blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
659b97170e91eb4703f9660aadb5dcd49a9e1982
|
b2de2d078b2fbf97b4d98aa806d590dd4535a068
|
/NBAStats/R/DailyOdds.R
|
2724f45971932eb045e03e16332eb88892d0e5cd
|
[] |
no_license
|
dchen36/r-package-teamivy
|
8ff1d298f3e7945a5ebc56fd07cebb4ced9e205d
|
885ae8819a5f84abb0c75be42409875eaf16ac2d
|
refs/heads/master
| 2020-04-22T22:48:53.134820
| 2017-12-21T22:56:09
| 2017-12-21T22:56:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,099
|
r
|
DailyOdds.R
|
##This function strips the daily betting odds
DailyOdds <- function(BettingDate2){
BettingDate2 <- as.Date(BettingDate2)
BettingDate <- format(BettingDate2, "%Y%m%d")
if(ymd(BettingDate) > today() +1) {
print("Sorry, the betting lines are not set yet! Lines are only released for today and the following day. Check back again!")
} else {
##Spreads Tables
url.spread <- paste0("http://www.donbest.com/nba/odds/spreads/", BettingDate, ".html")
SpreadsTable <- url.spread %>%
read_html %>%
html_nodes(xpath = '//*[@id="oddsHolder"]/div[1]/table') %>%
html_table(header = F, fill =T) %>%
data.frame() %>%
tbl_df()
SpreadsTable <- SpreadsTable[-1, ]
names(SpreadsTable) <- SpreadsTable[1,]
SpreadsTable <- SpreadsTable[-1, ]
SpreadsTable <- SpreadsTable %>%
select(Rot, Opener, Team, Time, Bovada, Pinnacle, Mirage)
#Rot works
SpreadsTable <- SpreadsTable %>%
mutate(Rot1 = as.character(Rot),
Rot2 = as.character(Rot))
SpreadsTable$Rot1 <- str_sub(SpreadsTable$Rot1, 1, 3)
SpreadsTable$Rot2 <- str_sub(SpreadsTable$Rot2, 4, 6)
#We need to create a regex pattern
SpreadsTable <- SpreadsTable %>%
mutate(OS = Opener)
#Opening Spread
pattern <- str_c("[+-]", "[:digit:]+", ".?", "[:digit:]*","[\\s]")
SpreadsTable$OSAway <- str_match(SpreadsTable$OS, pattern)
SpreadsTable$OSAway <- sub("\\s+", "", SpreadsTable$OSAway)
SpreadsTable$Bovada <- str_match(SpreadsTable$Bovada, pattern)
SpreadsTable$Bovada <- sub("\\s+", "", SpreadsTable$Bovada)
SpreadsTable$Pinnacle <- str_match(SpreadsTable$Pinnacle, pattern)
SpreadsTable$Pinnacle <- sub("\\s+", "", SpreadsTable$Pinnacle)
SpreadsTable$Mirage <- str_match(SpreadsTable$Mirage, pattern)
SpreadsTable$Mirage <- sub("\\s+", "", SpreadsTable$Mirage)
#Teams
#Team Away City
patternteam <- str_c("((^[:upper:][:lower:]+)[:blank:][:upper:][:lower:]+[:blank:])|((^[:upper:][:lower:]+)[:blank:])")
SpreadsTable$TeamAwayCity <- str_match(SpreadsTable$Team, patternteam)
SpreadsTable$Team <- str_replace(SpreadsTable$Team, patternteam, "")
#Team Away
patternteam2 <- str_c("(^[:upper:][:lower:]+|76ers)")
SpreadsTable$TeamAway <- str_match(SpreadsTable$Team, patternteam2)
SpreadsTable$Team <- str_replace(SpreadsTable$Team, patternteam2, "")
#Put the city and nickname together
SpreadsTable <- as.data.frame(SpreadsTable)
SpreadsTable$Bovada <- SpreadsTable$Bovada[,1]
SpreadsTable$Mirage <- SpreadsTable$Mirage[,1]
SpreadsTable$Pinnacle <- SpreadsTable$Pinnacle[,1]
SpreadsTable$OSAway <- SpreadsTable$OSAway[,1]
SpreadsTable$TeamAwayCity <- SpreadsTable$TeamAwayCity[,1]
SpreadsTable$TeamAway <- SpreadsTable$TeamAway[,1]
SpreadsTable <- SpreadsTable %>%
mutate(Away = paste0(TeamAwayCity, TeamAway))
SpreadsTable <- SpreadsTable %>%
select(Team, Time, Bovada, Pinnacle, Mirage, Rot1, Rot2, OSAway, Away)
names(SpreadsTable)[1] <- "Home"
names(SpreadsTable)[6] <- "RotAway"
names(SpreadsTable)[7] <- "RotHome"
#MoneyLine Table
url.ML <- paste0("http://www.donbest.com/nba/odds/money-lines/", BettingDate, ".html")
MoneyLineTable <- url.ML %>%
read_html %>%
html_nodes(xpath = '//*[@id="oddsHolder"]/div[1]/table') %>%
html_table(header = F, fill =T) %>%
data.frame() %>%
tbl_df()
MoneyLineTable <- MoneyLineTable[-1, ]
names(MoneyLineTable) <- MoneyLineTable[1,]
MoneyLineTable <- MoneyLineTable[-1, ]
MoneyLineTable <- MoneyLineTable %>%
select(Rot, Opener, Team, Time, Bovada, Pinnacle, Mirage)
#Rot works
MoneyLineTable <- MoneyLineTable %>%
mutate(Rot1 = as.character(Rot),
Rot2 = as.character(Rot))
MoneyLineTable$Rot1 <- str_sub(MoneyLineTable$Rot1, 1, 3)
MoneyLineTable$Rot2 <- str_sub(MoneyLineTable$Rot2, 4, 6)
#Money Line Away
pattern <- str_c("^[+-]", "[:digit:]+")
MoneyLineTable$OpenerAwayML <- str_match(MoneyLineTable$Opener, pattern)
MoneyLineTable$BovadaAwayML <- str_match(MoneyLineTable$Bovada, pattern)
MoneyLineTable$PinnacleAwayML <- str_match(MoneyLineTable$Pinnacle, pattern)
MoneyLineTable$MirageAwayML <- str_match(MoneyLineTable$Mirage, pattern)
#Money Line Home
pattern <- str_c("[+-]", "[:digit:]+$")
MoneyLineTable$OpenerHomeML <- str_match(MoneyLineTable$Opener, pattern)
MoneyLineTable$BovadaHomeML <- str_match(MoneyLineTable$Bovada, pattern)
MoneyLineTable$PinnacleHomeML <- str_match(MoneyLineTable$Pinnacle, pattern)
MoneyLineTable$MirageHomeML <- str_match(MoneyLineTable$Mirage, pattern)
#Teams
#Team Away City
patternteam <- str_c("((^[:upper:][:lower:]+)[:blank:][:upper:][:lower:]+[:blank:])|((^[:upper:][:lower:]+)[:blank:])")
MoneyLineTable$TeamAwayCity <- str_match(MoneyLineTable$Team, patternteam)
MoneyLineTable$Team <- str_replace(MoneyLineTable$Team, patternteam, "")
#Team Away
patternteam2 <- str_c("(^[:upper:][:lower:]+|76ers)")
MoneyLineTable$TeamAway <- str_match(MoneyLineTable$Team, patternteam2)
MoneyLineTable$Team <- str_replace(MoneyLineTable$Team, patternteam2, "")
#Put the city and nickname together
MoneyLineTable <- as.data.frame(MoneyLineTable)
#Weird Structure
MoneyLineTable$BovadaAwayML <- MoneyLineTable$BovadaAwayML[,1]
MoneyLineTable$MirageAwayML <- MoneyLineTable$MirageAwayML[,1]
MoneyLineTable$PinnacleAwayML <- MoneyLineTable$PinnacleAwayML[,1]
MoneyLineTable$OpenerAwayML <- MoneyLineTable$OpenerAwayML[,1]
MoneyLineTable$OpenerHomeML <- MoneyLineTable$OpenerHomeML[,1]
MoneyLineTable$PinnacleHomeML <- MoneyLineTable$PinnacleHomeML[,1]
MoneyLineTable$MirageHomeML <- MoneyLineTable$MirageHomeML[,1]
MoneyLineTable$BovadaHomeML <- MoneyLineTable$BovadaHomeML[,1]
#Team
MoneyLineTable$TeamAwayCity <- MoneyLineTable$TeamAwayCity[,1]
MoneyLineTable$TeamAway <- MoneyLineTable$TeamAway[,1]
MoneyLineTable <- MoneyLineTable %>%
mutate(Away = paste0(TeamAwayCity, TeamAway))
MoneyLineTable <- MoneyLineTable %>%
select(-Rot, -Opener, -Bovada, - Pinnacle, - Mirage, - TeamAwayCity, -TeamAway)
names(MoneyLineTable)[1] <- "Home"
names(MoneyLineTable)[3] <- "RotAway"
names(MoneyLineTable)[4] <- "RotHome"
url.OU <- paste0("http://www.donbest.com/nba/odds/totals/", BettingDate, ".html")
OverUnderTable <- url.OU %>%
read_html %>%
html_nodes(xpath = '//*[@id="oddsHolder"]/div[1]/table') %>%
html_table(header = F, fill =T) %>%
data.frame() %>%
tbl_df()
OverUnderTable <- OverUnderTable[-1, ]
names(OverUnderTable) <- OverUnderTable[1,]
OverUnderTable <- OverUnderTable[-1, ]
OverUnderTable <- OverUnderTable %>%
select(Rot, Opener, Team, Time, Bovada, Pinnacle, Mirage)
#Rot works
OverUnderTable <- OverUnderTable %>%
mutate(Rot1 = as.character(Rot),
Rot2 = as.character(Rot))
OverUnderTable$Rot1 <- str_sub(OverUnderTable$Rot1, 1, 3)
OverUnderTable$Rot2 <- str_sub(OverUnderTable$Rot2, 4, 6)
#OverUnder
pattern <- str_c("[:digit:]{3}\\.[:digit:]")
OverUnderTable$OpenerOU <- str_match(OverUnderTable$Opener, pattern)
OverUnderTable$BovadaOU <- str_match(OverUnderTable$Bovada, pattern)
OverUnderTable$PinnacleOU <- str_match(OverUnderTable$Pinnacle, pattern)
OverUnderTable$MirageOU <- str_match(OverUnderTable$Mirage, pattern)
#Teams
#Team Away City
patternteam <- str_c("((^[:upper:][:lower:]+)[:blank:][:upper:][:lower:]+[:blank:])|((^[:upper:][:lower:]+)[:blank:])")
OverUnderTable$TeamAwayCity <- str_match(OverUnderTable$Team, patternteam)
OverUnderTable$Team <- str_replace(OverUnderTable$Team, patternteam, "")
#Team Away
patternteam2 <- str_c("(^[:upper:][:lower:]+|76ers)")
OverUnderTable$TeamAway <- str_match(OverUnderTable$Team, patternteam2)
OverUnderTable$Team <- str_replace(OverUnderTable$Team, patternteam2, "")
#Put the city and nickname together
OverUnderTable <- as.data.frame(OverUnderTable)
#Weird Structure
OverUnderTable$BovadaOU <- OverUnderTable$BovadaOU[,1]
OverUnderTable$MirageOU <- OverUnderTable$MirageOU[,1]
OverUnderTable$PinnacleOU <- OverUnderTable$PinnacleOU[,1]
OverUnderTable$OpenerOU <- OverUnderTable$OpenerOU[,1]
#Team
OverUnderTable$TeamAwayCity <- OverUnderTable$TeamAwayCity[,1]
OverUnderTable$TeamAway <- OverUnderTable$TeamAway[,1]
OverUnderTable <- OverUnderTable %>%
mutate(Away = paste0(TeamAwayCity, TeamAway))
OverUnderTable <- OverUnderTable %>%
select(-Rot, -Opener, -Bovada, - Pinnacle, - Mirage, - TeamAwayCity, -TeamAway)
names(OverUnderTable)[1] <- "Home"
names(OverUnderTable)[3] <- "RotAway"
names(OverUnderTable)[4] <- "RotHome"
##Join the tables together
BettingTable <- left_join(SpreadsTable, MoneyLineTable, by = c("Home", "Away", "Time", "RotAway", "RotHome")) %>%
left_join(OverUnderTable, by = c("Home", "Away", "Time", "RotAway", "RotHome"))
head(BettingTable)
BettingTable <- BettingTable[, c(1, 9, 2:8, 10:21)]
names(BettingTable)[4] <- "BovadaAwayS"
names(BettingTable)[5] <- "PinnacleAwayS"
names(BettingTable)[6] <- "MirageAwayS"
names(BettingTable)[9] <- "OpenerAwayS"
BettingTable <- BettingTable %>%
mutate(Time = hm(Time))
for(i in 4:length(BettingTable)){
BettingTable[,i] <- as.numeric(BettingTable[,i])
}
#Create a win probability from the Bovada money line
BettingTable <- BettingTable %>%
mutate(WP.HB = ifelse(PinnacleHomeML > 0, 100/(PinnacleHomeML + 100),
PinnacleHomeML/(PinnacleHomeML -100)),
WP.VB = ifelse(PinnacleAwayML > 0, 100/(PinnacleAwayML + 100),
PinnacleAwayML/(PinnacleAwayML -100)),
WP.H = (WP.HB + (1-WP.VB))/2,
WP.V = 1 - WP.H) %>%
select(-WP.HB, -WP.VB)
return(BettingTable)
}
}
|
af98e632b29394b671419f81961b4e4f10757fee
|
f7d693b780be70d28b68386bb1118626ad3a9190
|
/analylb.R
|
4345393727c4b666bdefeab2f5bd140f685c46fb
|
[] |
no_license
|
yuyuehill/analysislb
|
f8ead2cb7b62d23d57951d111f75282cd2e0bce3
|
1851997dad51c3e2af965c666c6cf48f38717e9b
|
refs/heads/master
| 2021-01-10T20:28:37.086164
| 2019-01-27T06:14:43
| 2019-01-27T06:14:43
| 41,676,200
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 353
|
r
|
analylb.R
|
lbdata <- read.table('/Users/hill/git/analysislb/data/luckyball.csv', header=TRUE, sep=",")
rs <- rowSums(lbdata[,3:8],na.rm=TRUE)
#plot the sum with time series
plot(rs)
lbdata <- read.table('/Users/hill/git/analysislb/data/luckyball.csv', header=TRUE, sep=",")
rs <- rowSums(lbdata[2:2,9:9],na.rm=TRUE)
#plot the sum with time series
plot(rs)
|
851557ce009114c37bea6f66b10c09df3b0cbfbd
|
cd56c57626210d836a6671300a45a7360ab6cf86
|
/scripts/01-create-data-set.r
|
891bfca157f60eb2a3b04f055793a122d5f4d079
|
[] |
no_license
|
blakeshurtz/AC_runtime_project
|
dfea3f73e970d7d32444c23fecfba7e0abd37ca3
|
e41208ed4124e5b1e316c5122692b51058efb1a3
|
refs/heads/master
| 2023-06-29T02:00:50.212886
| 2021-07-28T20:25:48
| 2021-07-28T20:25:48
| 260,077,964
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,637
|
r
|
01-create-data-set.r
|
#it is NOT recommended that you run this script in isolation
#it is recommended that you open "AC_runtime_project" R project
#and run the "primary_script.r" for all analysis
#read in metadata
setwd("data")
customer_data <- read_excel("customer-metadata.xlsx",
col_types = c("text", "date", "date", "numeric", "numeric", "numeric"))
#add year and duration of analysis (in days)
customer_data$year <- map(customer_data$start_date, year)
customer_data$days <- ymd(customer_data$end_date) - ymd(customer_data$start_date)
#read in logger data
setwd("logger-data")
read_plus <- function(flnm) {
read_csv(flnm) %>%
mutate(name = flnm)
}
d <-
list.files(pattern = "*.csv",
full.names = T) %>%
map_df(~read_plus(.))
d$name <- str_sub(d$name, start = 3, end = -5)
#measure logger sampling_frequency, typically 1 measurement per minute
customer_data$sampling_frequency <- d %>%
group_by(name) %>%
slice(1:2) %>%
summarise(sr = (as.numeric(mdy_hms(datetime))[2] - as.numeric(mdy_hms(datetime))[1])/60) %>%
ungroup() %>%
select(-name) %>%
unlist() %>%
as.numeric()
#if system is 2 stage, calculate low stage capacity
customer_data$low_stage_capacity = if_else(customer_data$num_stages == 1,
NA_real_,
customer_data$high_stage_capacity * .7)
#join customer metadata with logger data
d <- customer_data %>%
select(name, num_stages, cooling_load, high_stage_capacity, low_stage_capacity, sampling_frequency) %>%
left_join(d, by = "name") %>%
select(-c(index, datetime))
rm(customer_data)
#define low amp draw as equipment being off
d$on <- if_else(d$amps < 2, 0, 1) #on/off
#kmeans model determines if two-stage system is in low stage based on amperage
d <- d %>%
group_by(name) %>%
do(model = kmeans(.['amps'], 4, nstart = 50, iter.max = 10)) %>%
ungroup() %>% group_by(name) %>%
do(map_df(.$model, broom::tidy)) %>% ungroup() %>%
select(name, x1) %>%
arrange(name, x1) %>%
ungroup() %>%
mutate(cluster = rep(c("c1", "c2", "c3", "c4"), length(unique(name)))) %>%
spread(cluster, x1) %>%
left_join(d, by = "name") %>%
select(-c(c1, c2, c4))
#define variable capacity staging based on amerage
d$stage <- d %>%
mutate(stage = if_else(amps < 2, 0,
if_else(amps > 2 & amps < c3 | num_stages == 1, 1,
if_else(amps > c3, 2, NA_real_)))) %>%
select(stage) %>%
unlist() %>% as.numeric()
#determine system capacity based on sampling frequency and system staging
d$capacity <- if_else(d$stage == 1, d$sampling_frequency * d$low_stage_capacity/60, 0)
d$capacity <- if_else(d$stage == 2 & d$num_stages == 2, d$sampling_frequency * d$high_stage_capacity/60, d$capacity)
d$capacity <- if_else(d$stage == 2 & d$num_stages == 60, d$sampling_frequency * d$high_stage_capacity/60, d$capacity)
d$capacity <- if_else(d$num_stages == 1 & d$on == 1, d$sampling_frequency * d$high_stage_capacity/60, d$capacity)
d$capacity <- if_else(d$num_stages == 60 & d$on == 1, d$sampling_frequency * d$low_stage_capacity/60 * .5, d$capacity)
#count number of cycles for each system
d$cycle_no <- with(rle(d$on), rep(seq_along(lengths), lengths))
#aggregate capacity for each system cycle
summary <- d %>%
group_by(name, cycle_no) %>%
summarise(ccl = sum(capacity)/mean(cooling_load)) %>%
filter(ccl != 0) %>%
arrange(desc(ccl))
#export data
write.csv(summary, file = "../ccl.csv", row.names = FALSE)
#clean environment
rm(d)
rm(summary)
rm(read_plus)
#set wd back
setwd("../../")
|
7c20780e4c8873aa334a56da50a0ace9971d7289
|
c76c4d07b646ec0898674adca61bcd4f5bb2ce2f
|
/R/bufferTimeMthly.R
|
f890f019c3979b1f4bac0cb95017d9b0380a370b
|
[] |
no_license
|
DanBoyB/m50-perf-ind
|
95d883e21376674f1b9c04dfe9cfd9799781f490
|
89ed3964bbac4cc81a70dbf94fe0bb2df56ebfeb
|
refs/heads/master
| 2020-12-31T06:46:31.855877
| 2017-09-13T11:17:30
| 2017-09-13T11:17:30
| 86,582,992
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,079
|
r
|
bufferTimeMthly.R
|
library(knitr)
library(dplyr)
library(tidyr)
library(readr)
library(purrr)
library(lubridate)
path <- "/var/datastore/"
db <- src_sqlite(paste(path, "/SQLite/M50.sqlite", sep = ""), create = FALSE)
laneNum <- read_csv("data/m50laneNum.csv") %>%
mutate(siteID = ifelse(siteID %in% c(1504,1509), 15041509,
ifelse(siteID %in% c(1506,1507), 15061507, siteID)))
dayTypes <- tbl(db, "dayTypes1219") %>%
filter(date >= 1420070400, date <= 1451606399) %>%
mutate(date = sql(datetime(date, 'unixepoch'))) %>%
select(-day)
links <- read_csv("data/links.csv") %>%
rename(sectionName = Section)
times <- format(seq.POSIXt(ISOdatetime(2015, 1, 1, 0, 0, 0),
ISOdatetime(2015, 1, 1, 23, 55, 0),
by = "5 min"),
"%H:%M") %>%
data_frame(timeSeq = seq_along(.), time = .)
##---------------------------------------------------------------------------------##
traffic <- tbl(db, "min5_50_2015") %>%
rename(dateTime = time) %>%
filter(siteID != 1012) %>%
mutate(time = sql(strftime('%H:%M', datetime(dateTime, 'unixepoch')))) %>%
mutate(siteID = ifelse(siteID %in% c(1504,1509), 15041509,
ifelse(siteID %in% c(1506,1507), 15061507, siteID))) %>%
left_join(times, by = "time", copy = TRUE) %>%
left_join(laneNum, by = "Code", copy = TRUE) %>%
rename(siteID = siteID.y) %>%
left_join(links, by = "sectionName", copy = TRUE) %>%
collect(n = Inf) %>%
mutate(month = month(as.POSIXct(dateTime, origin = "1970-01-01")),
dayType = as.integer(dayType),
ref = paste(linkID, dayType, timeSeq, sep = "_"))
##---------------------------------------------------------------------------------##
jTNB <- tbl(db, "journeyTimes_2015_NB") %>%
select(linkID, time, journeyTime, dayType) %>%
rename(dateTime = time) %>%
collect(n = Inf) %>%
mutate(time = format(as.POSIXct(dateTime, origin = "1970-01-01"), "%H:%M")) %>%
left_join(times, by = "time") %>%
left_join(links, by = "linkID")
jTSB <- tbl(db, "journeyTimes_2015_SB") %>%
select(linkID, time, journeyTime, dayType) %>%
rename(dateTime = time) %>%
collect(n = Inf) %>%
mutate(time = format(as.POSIXct(dateTime, origin = "1970-01-01"), "%H:%M")) %>%
left_join(times, by = "time") %>%
left_join(links, by = "linkID")
jTime <- jTNB %>%
bind_rows(jTSB) %>%
mutate(month = month(as.POSIXct(dateTime, origin = "1970-01-01")),
ref = paste(linkID, dayType, timeSeq, sep = "_"),
hour = hour(as.POSIXct(dateTime, origin = "1970-01-01"))) %>%
mutate(speed = lengthKm / (journeyTime / 60))
jTimeClean <- jTime %>%
filter(journeyTime > 0,
speed <= 150,
!linkID %in% c(76,77))
##---------------------------------------------------------------------------------##
monthlyTraf <- function(x) {
traffic %>%
filter(month == x) %>%
rename(siteID = siteID.x) %>%
group_by(siteID, linkID, ref) %>%
summarise(month = first(month), traffic = sum(volume))
}
monthlyJT <- function(x) {
jTimeClean %>%
filter(month == x) %>%
group_by(siteID, linkID, dayType, timeSeq, ref, direction, sectionName, hour) %>%
summarise(month = first(month),
medianJt = median(journeyTime / lengthKm),
meanJt = mean(journeyTime / lengthKm),
stdevJt = ifelse(is.na(sd(journeyTime / lengthKm)), 0, sd(journeyTime / lengthKm)),
planTime = quantile(journeyTime / lengthKm, 0.95),
longTime = quantile(journeyTime / lengthKm, 0.975),
lengthKm = first(lengthKm)) %>%
ungroup() %>%
mutate(jt100kph = ((lengthKm / 100) * 60) / lengthKm,
percVar = (stdevJt / medianJt) * 100,
buffTime = planTime - medianJt,
buffTimeIndex = (buffTime / medianJt) * 100,
planTimeIndex = (planTime / medianJt) * 100,
miseryIndex = ((longTime) / medianJt) * 100)
}
joinCalc <- function(traffic, jTime) {
jTime %>%
left_join(traffic, by = "ref") %>%
mutate(vkt = traffic * lengthKm) %>%
mutate(period = ifelse(dayType %in% c(5:6, 12:14), "Off Peak",
ifelse(dayType %in% c(0:4, 7:11) & hour %in% c(0:6, 19:23), "Off Peak",
ifelse(dayType %in% c(0:4, 7:11) & hour %in% c(7, 9), "AM Peak Shoulders",
ifelse(dayType %in% c(0:4, 7:11) & hour %in% c(16, 18), "PM Peak Shoulders",
ifelse(dayType %in% c(0:4, 7:11) & hour %in% c(8), "AM Peak Hour",
ifelse(dayType %in% c(0:4, 7:11) & hour %in% c(17), "PM Peak Hour",
ifelse(dayType %in% c(0:4, 7:11) & hour %in% c(10:16), "Inter Peak", NA)))))))) %>%
na.omit()
}
sumStats <- function(stats) {
stats %>%
rename(siteID = siteID.x) %>%
group_by(siteID, sectionName, month.x, period, direction) %>%
summarise(buffTimeIndex = weighted.mean(buffTimeIndex, vkt),
miseryIndex = weighted.mean(miseryIndex, vkt))
}
monthly <- data_frame(monthNo = c(1:12)) %>%
mutate(traffic = map(monthNo, monthlyTraf),
jTime = map(monthNo, monthlyJT),
stats = map2(traffic, jTime, joinCalc),
sumStats = map(stats, sumStats))
stats <- monthly %>%
select(stats) %>%
unnest() %>%
rename(siteID = siteID.x, month = month.x)
saveRDS(stats, "output/bufferMisery/bufferMisery.rds")
##---------------------------------------------------------------------------------##
total <- d %>%
summarise(buffTimeIndex = weighted.mean(buffTimeIndex, vkt),
miseryIndex = weighted.mean(miseryIndex, vkt))
period <- jtC2join %>%
group_by(period) %>%
summarise(buffTimeIndex = weighted.mean(buffTimeIndex, vkt),
miseryIndex = weighted.mean(miseryIndex, vkt))
|
1ed3a50e29effba8a930d413529dea462214600e
|
e7707dccad6153d86227f5634ffb5ac42d9c7dc1
|
/R/convert.R
|
cfab59a8799f5ae0b8017f7259f799131a5c80da
|
[
"MIT"
] |
permissive
|
shahcompbio/signals
|
d93a144b4bcdbcefdf1fe6221adc0543f60be319
|
c5ebe5f644f55c231b2e14c2e6fe78fcc987a8cd
|
refs/heads/master
| 2023-06-25T10:02:38.279104
| 2023-06-13T17:06:30
| 2023-06-13T17:06:30
| 227,641,994
| 3
| 0
|
NOASSERTION
| 2023-06-13T17:06:31
| 2019-12-12T15:49:51
|
R
|
UTF-8
|
R
| false
| false
| 1,133
|
r
|
convert.R
|
#' @export
assign_haplotype_label <- function(haplotypes, hapbinsize = 50e3){
colnames(haplotypes) <- c("chr", "pos", "cell_id", "allele0", "allele1")
haplotypes$chr <- as.character(haplotypes$chr)
haplotypes$pos2 <- haplotypes$pos
bins <- getBins(binsize = hapbinsize) %>%
dplyr::rename(start_bins = start, end_bins = end, chr_bins = chr) %>%
dplyr::select(-width) %>%
as.data.table() %>%
.[, hap_label := 1:.N]
haplotypes <- haplotypes[bins, on = .(chr == chr_bins, pos > start_bins, pos < end_bins)]
haplotypes <- na.omit(haplotypes)
hap_labels <- dplyr::distinct(haplotypes, chr, pos2, hap_label) %>% dplyr::rename(position = pos2)
return(hap_label)
}
#' @export
assign_label_persnp <- function(haplotypes, hapbinsize = 50e3){
snpdf <- as.data.table(haplotypes)[, hap_label := .GRP, by = list(chr, position)]
return(snpdf)
}
#' @export
assign_bins_haplotypes <- function(haplotypes, binsize = 0.5e6){
binshaps <- haplotypes %>%
.[, start := floor(position / binsize) * binsize + 1] %>%
.[, end := start + binsize - 1] %>%
.[, position := NULL]
return(binshaps)
}
|
6d02aace33b486f1c894448ac839987d15b8c9d2
|
1651425837dc73a61101a43242ced7c9ade54681
|
/R/fitvolkov.R
|
35ca9b28212c121726f226498d58b5da4575a624
|
[] |
no_license
|
cran/sads
|
7b72ce8762a2be115351cb41c782d4a47755991a
|
52b5d7b6a7ea38a29b23467c5539654305b48bb6
|
refs/heads/master
| 2021-01-18T23:41:28.702286
| 2018-06-16T21:47:56
| 2018-06-16T21:47:56
| 21,417,370
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,358
|
r
|
fitvolkov.R
|
fitvolkov <- function(x, trunc, start.value, ...){
dots <- list(...)
if (any(x <= 0) | any(!is.wholenumber(x))) stop ("All x must be positive integers")
if(missing(start.value)){
tmp <- tempfile()
sink(tmp) # as the following function outputs lots of garbage...
start.value <- maxLikelihood.ESF(c(5, 0.5), x)$par
sink()
file.remove(tmp) ## as sink("dev/null") does not work in al OS'
}
thetahat <- start.value[1]
mhat <-start.value[2]
if(!"method" %in% names(dots)){
dots$method <- "L-BFGS-B"
if(!"lower" %in% names(dots)) dots$lower=c(theta=thetahat/5, m=1e-4)
if(!"upper" %in% names(dots)) dots$upper=c(theta=thetahat*5, m=0.9999)
}
if (!missing(trunc)){
if (min(x)<=trunc) stop("truncation point should be lower than the lowest data value")
}
if (missing(trunc)){
LL <- function(theta, m, J) -sum(dvolkov(x, theta = theta, m = m, J = J, log = TRUE))
}
else {
LL <- function(theta, m, J) {
-sum(dtrunc("volkov", x = x,
coef = list(J = J, m = m, theta = theta),
trunc = trunc, log = TRUE))
}
}
result <- do.call("mle2", c(list(minuslogl=LL, start = list(theta = thetahat, m = mhat), fixed=list(J=sum(x)), data = list(x = x)), dots))
new("fitsad", result, sad="volkov", distr = distr.depr, trunc = ifelse(missing(trunc), NaN, trunc))
}
|
d7eb774bc2c1fa45c7eb9cbc95a3accd7a6ea958
|
21d0756e90c916684d531edc45ba6a4707c654f4
|
/python_rules/rulemsiAddKeyValToMspStr.r
|
4cf090da476b48180f86e1382c5e67631f86071f
|
[] |
no_license
|
irods/irods_rule_engine_plugin_python
|
8db27464a1898613849d4f216a60e20afb5665d8
|
773105f6c429ad708adfbf292164dfc9aaf3e3cf
|
refs/heads/main
| 2023-08-06T19:30:05.326697
| 2023-07-28T20:30:13
| 2023-07-28T20:51:16
| 37,076,977
| 7
| 13
| null | 2023-07-28T20:51:17
| 2015-06-08T15:55:22
|
C++
|
UTF-8
|
R
| false
| false
| 358
|
r
|
rulemsiAddKeyValToMspStr.r
|
def main(rule_args, callback, rei):
attr_name = global_vars['*AttrName'][1:-1]
attr_value = global_vars['*AttrValue'][1:-1]
out_str = '='.join([attr_name, attr_value])
callback.writeLine('stdout', 'The string now contains')
callback.writeLine('stdout', out_str)
INPUT *AttrName="destRescName", *AttrValue="demoResc"
OUTPUT ruleExecOut
|
bc54c2ed31770a215ffe4c26269ddca0f4ade395
|
1fc421ae8d2d0cc87944ec21ea53b37b1ef02544
|
/man/Full.DevFactors.Rd
|
245c008e8c96bf47e6a638623b111252ad9d635c
|
[] |
no_license
|
EduardoRamosP/MackNet
|
5f3df28a30385e83c4d3de0eb10606a416499c92
|
1281f90ccad86df2f496b6e1a33aeab18cf81807
|
refs/heads/master
| 2022-12-18T22:17:47.097987
| 2020-09-21T20:30:55
| 2020-09-21T20:30:55
| 296,931,038
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 478
|
rd
|
Full.DevFactors.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Full.DevFactors.R
\name{Full.DevFactors}
\alias{Full.DevFactors}
\title{Full.DevFactors}
\usage{
Full.DevFactors(Cumulative.T)
}
\arguments{
\item{Cumulative.T}{Upper and lower triangle used for estimating the predictive development factors.}
}
\value{
Full development factors
}
\description{
Calculation of development factors taking into consideration both, all the observed and predicted data.
}
|
a9b29714d69cd82d4e951d450e79714c4fd7c7a3
|
48c65804caa6d684455b066b762283fa2c538d87
|
/clinical_trials.R
|
f324bdc6e3e2708119dc9b778dd0a3c617fbf3ca
|
[] |
no_license
|
wesenu/MITx-Analytics-Edge-Coursework
|
9ffa6c3c0f59d7d45972f540c8fe2b08c249581e
|
a7b6d1a04d494f4ff3292c74b0bb174afd295b41
|
refs/heads/master
| 2023-03-16T11:20:31.655383
| 2015-09-12T11:16:32
| 2015-09-12T11:16:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,492
|
r
|
clinical_trials.R
|
trials <- read.csv("clinical_trial.csv", stringsAsFactors = F)
str(trials)
# Pre process data
library(tm)
# Create Corpus
corpusTitle <- Corpus(VectorSource(trials$title))
corpusAbstract <- Corpus(VectorSource(trials$abstract))
# Convert to lower case
corpusTitle <- tm_map(corpusTitle, tolower)
corpusAbstract <- tm_map(corpusAbstract, tolower)
#Convert to text document
corpusTitle = tm_map(corpusTitle, PlainTextDocument)
corpusAbstract = tm_map(corpusAbstract, PlainTextDocument)
# Remove punctuation
corpusTitle <- tm_map(corpusTitle, removePunctuation)
corpusAbstract <- tm_map(corpusAbstract, removePunctuation)
# Remove Stop words
corpusTitle <- tm_map(corpusTitle, removeWords, stopwords("english"))
corpusAbstract <- tm_map(corpusAbstract, removeWords, stopwords("english"))
# Stem the words
corpusTitle <- tm_map(corpusTitle, stemDocument)
corpusAbstract <- tm_map(corpusAbstract, stemDocument)
# Look at the first document
corpusTitle[[1]]
# Create matrix
dtmTitle <- DocumentTermMatrix(corpusTitle)
dtmAbstract <- DocumentTermMatrix(corpusAbstract)
#Retain only top 5% words
dtmTitle = removeSparseTerms(dtmTitle, 0.95)
dtmAbstract = removeSparseTerms(dtmAbstract, 0.95)
#building a df
abstractDf = as.data.frame(as.matrix(dtmAbstract))
titleDf = as.data.frame(as.matrix(dtmTitle))
# We want to combine dtmTitle and dtmAbstract into a single data frame to make predictions. However, some of the variables in these data frames have the same names.
colnames(titleDf) <- paste0("T", colnames(titleDf))
colnames(abstractDf) <- paste0("A", colnames(abstractDf))
colnames(titleDf)
colnames(abstractDf)
# Combine the two dataframes
dtm <- cbind(titleDf, abstractDf)
# Add the trial variable
dtm$trial <- trials$trial
# Load CaTools
library(caTools)
set.seed(144)
spl <- sample.split(dtm$trial, SplitRatio = 0.7)
train <- subset(dtm, spl == T)
test <- subset(dtm, spl == F)
# baseline model accuracy on the training set
table(train$trial)[1] / sum(table(train$trial))
library(rpart)
library(rpart.plot)
nkCART = rpart(trial ~. ,data = train, method = "class")
prp(nkCART)
predTrain <- predict(nkCART)[,2]
# Accuracy on the training set
t1 <- table(train$trial, predTrain >= 0.5)
trialsCART= nkCART
predTest <- predict(trialsCART, newdata = test)[,2]
t2 <- table(test$trial, predTest >= 0.5)
(t2[1,1] + t2[2,2])/(sum(t2))
library(ROCR)
predROCR = prediction(predTest, test$trial)
perfROCR = performance(predROCR, "tpr", "fpr")
auc = performance(predROCR, "auc")
|
4d541313940b1400f708d77af2cf900232b14f26
|
808d4e3289a28ab439109c5c703e49911c0cc640
|
/R/functions/stan-data-preparation.R
|
ba02642e7ae2153b46e28771c9fdc9a5ee106368
|
[] |
no_license
|
odaniel1/track-cycling
|
164e2c8d1ad58b8438a85dfa115d402a34762d6f
|
8f617c5e5f5961b6236221d148769dba31f75de0
|
refs/heads/master
| 2021-09-20T13:05:20.089377
| 2021-09-05T17:18:30
| 2021-09-05T17:18:30
| 210,910,227
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,411
|
r
|
stan-data-preparation.R
|
prepare_stan_data <- function(riders_df, matches_df, pairing_df, days_df, training = TRUE){
# arrange so all training data preceeds evaluation data
matches_df <- matches_df %>%
arrange(desc(split), round)
split_round_index <- matches_df %>%
mutate(row_no = 1:n()) %>%
group_by(split, round) %>%
summarise(pos = min(row_no)) %>%
arrange(desc(split), round) %>%
ungroup()
rider_date_start <- days_df %>%
mutate(row_no = 1:n()) %>%
group_by(rider_id) %>%
summarise(pos = min(row_no)) %>%
ungroup() %>%
arrange(rider_id)
stan_data <- list(
training = 1 * training,
R = nrow(riders_df), # No. Riders
M = nrow(matches_df), # No Matches
T = sum(matches_df$split == "training"),
D = nrow(days_df),
E = max(matches_df$event_id),
winner_id = matches_df$winner_id, # ID of first rider
loser_id = matches_df$loser_id, # ID of second rider
sprints = matches_df$sprints, # No. Sprints in match, models >= 2.0
winner_date_no = matches_df$winner_date_no,
loser_date_no = matches_df$loser_date_no,
winner_at_home = matches_df$winner_at_home,
loser_at_home = matches_df$loser_at_home,
date_index_R = c(rider_date_start$pos, nrow(days_df)+1),
rider_dates = days_df$years_to_start,
B = 10,
qual_diff = matches_df$winner_qual_time_diff,
event = matches_df$event_id
)
return(stan_data)
}
|
fb9ad5fcde19c4c401d4b5de66426662de393f5f
|
af363179d727883d63776a93b9b08e614d5a2b45
|
/bin/genomic-signatures-cohort.R
|
19e9a9c4d3b66fb39c0f20d343e8284644660562
|
[] |
no_license
|
stevekm/tmb-signatures-analysis
|
113f801562d7c37768441e61d36b09a56abe40da
|
7b92d03d6b02386a2874912105bb0732497378cc
|
refs/heads/master
| 2020-04-01T22:03:31.316759
| 2019-09-04T23:32:30
| 2019-09-04T23:32:30
| 153,689,116
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,406
|
r
|
genomic-signatures-cohort.R
|
#!/usr/bin/env Rscript
# Script for running deconstructSigs to produce genomic signatures
# need at least 55 variants per sample !!
# https://cancer.sanger.ac.uk/cosmic/signatures
library("BSgenome.Hsapiens.UCSC.hg19")
library("deconstructSigs")
signature_type <- 'signatures.cosmic'
tri.counts.method <- 'exome'
args <- commandArgs(TRUE)
cohortID <- args[1]
output_Rdata <- args[2]
signatures_plot_pdf <- args[3]
signatures_pieplot_pdf <- args[4]
signatures_weights_tsv <- args[5]
input_vcfs <- args[6:length(args)]
cohort_label <- sprintf("Cohort.%s", cohortID)
vcf_colnames <- c("CHROM", "POS", "ID", "REF", "ALT", "QUAL", "FILTER", "INFO", "FORMAT", "Sample")
vcf_df <- do.call('rbind', lapply(input_vcfs, function(x){
df <- read.delim(file = x, header = FALSE, sep = '\t',
comment.char = '#', col.names = vcf_colnames, check.names = FALSE)
return(df)
}))
# subset cols
vcf_df <- vcf_df[, c("CHROM", "POS", "REF", "ALT")]
vcf_df[["SampleID"]] <- cohort_label
# keep only entries with chroms in the reference data
genome_seqnames <- seqnames(BSgenome.Hsapiens.UCSC.hg19::Hsapiens)
vcf_df <- vcf_df[which(as.character(vcf_df[["CHROM"]]) %in% genome_seqnames), ]
# remove duplicates
vcf_df <- vcf_df[which(!duplicated(vcf_df)), ]
# convert to signatures format
sigs.input <- mut.to.sigs.input(mut.ref = vcf_df,
sample.id = "SampleID",
chr = "CHROM",
pos = "POS",
ref = "REF",
alt = "ALT")
signatures <- whichSignatures(tumor.ref = sigs.input,
signatures.ref = signatures.cosmic, # signature_type
sample.id = cohort_label,
contexts.needed = TRUE,
tri.counts.method = tri.counts.method)
signature_weights <- signatures[["weights"]]
signature_weights[["SampleID"]] <- cohort_label
signature_weights[["SignatureType"]] <- signature_type
write.table(x = signature_weights, file = signatures_weights_tsv, sep = '\t', row.names = FALSE, col.names = TRUE)
pdf(file = signatures_plot_pdf)
print(plotSignatures(signatures, sub = signature_type))
dev.off()
pdf(file = signatures_pieplot_pdf)
print(makePie(signatures, sub = signature_type))
dev.off()
save.image(file = output_Rdata)
|
4266eceae77c623b68f5265e7be0d08e164834b6
|
5a7b15eb2a3453475ee70bb56e19a7bb2751db89
|
/code/NOT_USED/optimization/evaluate/analyze.R
|
e82baf1921a6f9e89de205a44135ad650498572b
|
[] |
no_license
|
m-hahn/memory-surprisal
|
8db19bc86ada9c352feb66859f718749623700b6
|
1b3d680836ba87fb9186741a8d4f184fda35b122
|
refs/heads/master
| 2022-04-30T16:01:39.323884
| 2022-03-25T04:10:12
| 2022-03-25T04:10:12
| 156,466,125
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,376
|
r
|
analyze.R
|
library(dplyr)
library(tidyr)
data_dlm = read.csv("/u/scr/mhahn/deps/locality_optimized_dlm/manual_output_funchead_coarse_depl/English_optimizeDependencyLength.py_model_3137767.tsv", sep="\t")
data_i1 = read.csv("/u/scr/mhahn/deps/locality_optimized_i1/English_optimizeGrammarForI1_6_Long.py_model_876472065.tsv", sep="\t")
#data_i1 = read.csv("/u/scr/mhahn/deps/locality_optimized_i1/English_optimizeGrammarForI1_3.py_model_281176292.tsv", sep="\t")
#data_i1 = read.csv("/u/scr/mhahn/deps/locality_optimized_i1/English_optimizeGrammarForI1_5.py_model_180268043.tsv", sep="\t")
data_neural = read.csv("/u/scr/mhahn/deps/locality_optimized_neural/manual_output_funchead_langmod_coarse_best_ud/English_optimizePredictability.py_model_2194458393.tsv", sep="\t")
data_ground = read.csv("~/scr/CODE/memory-surprisal/results/manual_output_ground_coarse/English_inferWeightsCrossVariationalAllCorpora_NoPunct_NEWPYTORCH_Coarse.py_model_3723683.tsv", sep="\t")
data_ground = data_ground %>% rename(CoarseDependency = Dependency)
data_b = merge(data_i1, data_ground, by=c("CoarseDependency"))
data_n = merge(data_neural, data_ground, by=c("CoarseDependency"))
data_d = merge(data_dlm, data_ground, by=c("CoarseDependency"))
cor.test(data_b$DistanceWeight, data_b$Distance_Mean_NoPunct)$p.value
cor.test(data_n$DistanceWeight, data_n$Distance_Mean_NoPunct)$p.value
cor.test(data_d$DistanceWeight, data_d$Distance_Mean_NoPunct)$p.value
cor.test(data_b$DH_Weight, data_b$DH_Mean_NoPunct)$p.value
cor.test(data_n$DH_Weight, data_n$DH_Mean_NoPunct)$p.value
cor.test(data_d$DH_Weight, data_d$DH_Mean_NoPunct)$p.value
data_i1_3 = read.csv("/u/scr/mhahn/deps/locality_optimized_i1/English_optimizeGrammarForI1_3.py_model_281176292.tsv", sep="\t")
data_i1_7 = read.csv("/u/scr/mhahn/deps/locality_optimized_i1/English_optimizeGrammarForI1_5.py_model_180268043.tsv", sep="\t")
#data_i1_7 = read.csv("/u/scr/mhahn/deps/locality_optimized_i1/English_optimizeGrammarForI1_3.py_model_106171623.tsv", sep="\t")
#data_i1_7 = read.csv("/u/scr/mhahn/deps/locality_optimized_i1/English_optimizeGrammarForI1_7_Long.py_model_393654163.tsv", sep="\t")
#data_i1_7 = read.csv("/u/scr/mhahn/deps/locality_optimized_i1/English_optimizeGrammarForI1_7_Long.py_model_908985474.tsv", sep="\t")
data = merge(data_i1_3, data_i1_7, by=c("CoarseDependency"))
cor(data$DistanceWeight.x, data$DistanceWeight.y)
|
f4ef6dbd1e8d7b1a10c8455bf9ea32300d494098
|
d6a87f1960383e6f8014c0925b67d926ca916eb6
|
/R/lsem_fitsem.R
|
e2959240e0e163bf8e3a6eeb5d5313849e604847
|
[] |
no_license
|
rosefu79/sirt
|
6e573c082be1cfda8084f6cc6d89ba354afade7e
|
be5bba70c3141f3f5ae9f90088dea695c72757eb
|
refs/heads/master
| 2020-04-13T09:35:44.970577
| 2018-12-10T14:13:24
| 2018-12-10T14:13:24
| 163,115,635
| 1
| 0
| null | 2018-12-25T22:43:40
| 2018-12-25T22:43:39
| null |
UTF-8
|
R
| false
| false
| 3,726
|
r
|
lsem_fitsem.R
|
## File Name: lsem_fitsem.R
## File Version: 0.42
##############################################################
lsem_fitsem <- function( dat, weights, lavfit,
fit_measures, NF, G, moderator.grid, verbose,
pars, standardized, variables_model,
sufficient_statistics, lavaan_fct, lavmodel,
... )
{
parameters <- NULL
fits <- NULL
pars0 <- pars
env_temp <- environment()
if (verbose){
cat( "** Fit lavaan model\n")
G1 <- min(G,10)
pr <- round( seq(1,G, len=G1) )
cat("|")
cat( paste0( rep("*",G1), collapse="") )
cat("|\n")
cat("|")
}
for (gg in 1:G){
# gg <- 1
dat$weight <- weights[,gg]
#***** fit the model using weighted data
if (! sufficient_statistics){
datsvy <- survey::svydesign(id=~index, weights=~weight, data=dat)
# assign(x="lavmodel__", value=lavmodel, pos=1)
assign_args <- list( x="lavmodel__", value=lavmodel, pos=1)
res0 <- do.call( what="assign", args=assign_args)
survey.fit <- lavaan.survey::lavaan.survey(lavaan.fit=lavfit,
survey.design=datsvy )
}
#***** fit the model using sufficient statistics
if (sufficient_statistics){
res <- lsem_weighted_mean( x=dat[, variables_model], weights=dat$weight )
wmean <- res$mean
res <- lsem_weighted_cov( x=dat[, variables_model], weights=dat$weight )
wcov <- res$cov
Nobs <- round( res$Nobs )
if (lavaan_fct=="sem"){
survey.fit <- lavaan::sem(model=lavmodel, sample.cov=wcov,
sample.mean=wmean, sample.nobs=Nobs, ... )
}
if (lavaan_fct=="lavaan"){
survey.fit <- lavaan::lavaan(model=lavmodel, sample.cov=wcov,
sample.mean=wmean, sample.nobs=Nobs, ... )
}
}
dfr.gg <- pars <- lavaan::parameterEstimates(survey.fit)
if (standardized){
sol <- lavaan::standardizedSolution( survey.fit )
colnames(sol)[ which( colnames(sol)=="est.std" ) ] <- "est"
sol$lhs <- paste0( "std__", sol$lhs)
pars <- sirt_rbind_fill( x=pars, y=sol )
# pars <- plyr::rbind.fill( pars, sol )
dfr.gg <- pars
}
pars <- paste0( pars$lhs, pars$op, pars$rhs )
NP <- length(pars0)
ind <- match( pars0, pars )
dfr.gg <- dfr.gg[ ind, ]
dfr.gg <- data.frame("grid_index"=gg, "moderator"=moderator.grid[gg],
"par"=pars0, "parindex"=1:NP, dfr.gg )
dfr.gg0 <- data.frame("grid_index"=gg, "moderator"=moderator.grid[gg],
"par"=fit_measures, "parindex"=NP + 1:NF,
"est"=lavaan::fitMeasures(survey.fit, fit.measures=fit_measures ),
"op"="fit" )
vars <- setdiff( colnames(dfr.gg), colnames(dfr.gg0) )
for (vv in vars){ dfr.gg0[,vv] <- NA }
dfr.gg <- rbind( dfr.gg, dfr.gg0[, colnames(dfr.gg) ] )
parameters <- rbind( parameters, dfr.gg )
# fits <- rbind( fits, dfr.gg )
if (verbose){
if ( gg %in% pr ){
cat("-")
utils::flush.console()
}
}
}
if (verbose){
cat("|\n")
utils::flush.console()
}
parameters <- parameters[ order(parameters$parindex), ]
#--- OUTPUT
res <- list( parameters=parameters )
return(res)
}
#######################################################################
lsem.fitsem <- lsem_fitsem
|
77059b04a65617c43494305da7ad36916ab59f2d
|
584fd6953d01b615c904f6b4ad9d6cb591bdb133
|
/data_3/source/scr.R
|
375a00ffb8b158e8b058da6d7e14a0fa14d2af7e
|
[] |
no_license
|
sebastian-ospina-cuartas/taller_r-202102
|
4f5e04d4d362e9fc3d62e2afb77befa491afe364
|
5d3d66621b044d6b669f9c21b58a7a75c821c838
|
refs/heads/master
| 2023-09-04T02:30:31.899937
| 2021-11-03T21:45:05
| 2021-11-03T21:45:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,835
|
r
|
scr.R
|
# Elaborado por: Eduard Martinez
# Colaboradores:
# Fecha de elaboracion: 25/08/2021
# Ultima modificacion: 25/08/2021
# configuracion inicial (comentario prueba max)
rm(list = ls()) # limpia el entorno de R
if(!require(pacman)) install.packages(pacman)
require(pacman)
p_load(dplyr,data.table)
print('No fijamos un WD porque estamos trabajando en un R-project')
#============================#
# [1.] Directorio de trabajo #
#============================#
getwd() # obtener la ruta del directorio de trabajo actual
list.files() # obtener vector nombres de archivos en el wd
list.files(path = '.' , pattern = '.md') # vector nombres archivos markdown (.md)
#====================#
# [2.] Tipo de datos #
#====================#
# lógico
vector_l = c(NA,TRUE,FALSE)
is.logical(vector_l)
# character
vector_c = c("hola",'a',"2") #para character se usa '' o ""
is.character(vector_c)
# Datos numericos
# numeric
vector_n = c(5,2)
is.numeric(vector_n)
# interger
vector_i = -5:5
is.integer(vector_i)
# double
vector_d = c(1e6,2e6)
is.double(vector_d)
cat("puedo consultar el tipo de datos que contiene un objeto usando la función is()")
#==========================#
# [3.] Estructura de datos #
#==========================#
#--------------------------------------
# Dim | Homogeneos | Heterogeneos |
#--------------------------------------
# 1d | Vector | Listas |
# 2d | Matriz | Dataframe |
# nd | Array | |
#--------------------------------------
# Homogéneos: todos los elementos de este objeto deben ser del mismo tipo (númerico o carácter o lógico).
# Heterogéneos: este tipo de objetos permiten almacenar diferentes tipos de datos (númerico, carácter y lógico).
# Dimensiones: hace referencia al numero de dimensiones (filas y/o columnas) de un objeto.
## Vector & Matriz
# Vectores
abe = c("a","b","c","d","e","f")
log = c(TRUE,FALSE,TRUE,TRUE,FALSE,FALSE)
num = c(2,4,6,7,8,9) # los numeros no tiene que ser continuos
files = list.files()
# Matriz
matriz = matrix(num, ncol = 3, nrow = 2 )
df = data.frame(matriz)
matriz
## Listas
#lista
lista = list()
lista[[1]] = abe
lista[[2]] = log
lista[[3]] = num
lista[[4]] = matriz # matriz que creamos anteriormente
lista[[5]] = df # dataframe que creamos anteriormente
lista
## Dataframes
cat("Además de los datos que están en esas filas y columnas un data.frame tiene dos atributos (metadatos):
los nombres de columna y los nombres de filas.")
# dataframe
dataframe = data.frame(log, abe, num, matriz)
dataframe
str(dataframe)
## Manipular vectores
abe[5]
abe
abe[-5] # Eliminar elemento 5
abe[1:3] # Pedir los 3 primeros elementos
## Manipular matrices
matriz[1,] # todos los elementos en fila 1
matriz[,2] # todos los elementos en columna 2
matriz[1,2] # elemento en fila 1 y columna 2
# Remplazar elementos en una fila
matriz[1,] = c(3,7,9)
matriz
# remplazar un elemento
matriz[1,2] = 5
matriz
# nombre de columnas
colnames(matriz)
# nombre de fillas
rownames(matriz)
# Asignar nombre a varias columnas
colnames(matriz) = c("col_1" , "col_2", "col_3")
matriz
# Asignar nombre a una fila en especifico
rownames(matriz)[2] = "row_2"
matriz
rownames(matriz) = c("row_1","new_row_2")
matriz
## Manipular dataframes
dataframe[1,]# observar una fila
dataframe
dataframe[,3]# observar una columna
dataframe$num
# cambiar los nombre de la filla
rownames(dataframe) = c("row_1","row_2","row_3","row_4","row_5","row_6")
rownames(dataframe)# observar los nombres de las fillas
dataframe
## Manipular listas
lista
lista[[4]] # LLamamos el dato que deseamos
lista[[4]][,2] # seleccionar columna dentro de la matiz
names(lista) = c("vector_character","vector_logical","vector_numerico","matriz","df")
lista
lista$vector_character
lista["df"]
|
bd3ec9cc8831e7a73e4b7d47f45c09e6fd7b9e3b
|
577f03954ec69ed82eaea32c62c8eba9ba6a01c1
|
/R/h2o-package/man/h2o.naiveBayes.Rd
|
0e828043218db279ba86cb5e04b8ebd02e23fbf2
|
[
"Apache-2.0"
] |
permissive
|
ledell/h2o
|
21032d784a1a4bb3fe8b67c9299f49c25da8146e
|
34e271760b70fe6f384e106d84f18c7f0adb8210
|
refs/heads/master
| 2020-02-26T13:53:01.395087
| 2014-12-29T04:14:29
| 2014-12-29T04:14:29
| 24,823,632
| 1
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,915
|
rd
|
h2o.naiveBayes.Rd
|
\name{h2o.naiveBayes}
\alias{h2o.naiveBayes}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
H2O: Naive Bayes Classifier
}
%% ~~function to do ... ~~
\description{Builds gradient boosted classification trees, and gradient boosted regression trees on a parsed data set.
}
\usage{
h2o.naiveBayes(x, y, data, key = "", laplace = 0, dropNACols = FALSE)
}
\arguments{
\item{x}{
A vector containing the names of the predictors in the model.
}
\item{y}{
The name of the response variable in the model.
}
\item{data}{
An \code{\linkS4class{H2OParsedData}} (\code{version = 2}) object containing the variables in the model.
}
\item{key}{
(Optional) The unique hex key assigned to the resulting model. If none is given, a key will automatically be generated.
}
\item{laplace}{
(Optional) A positive number controlling Laplace smoothing. The default (0) disables Laplace smoothing.
}
\item{dropNACols}{
(Optional) A logical value indicating whether to drop predictor columns with >= 20\% NAs.
}
}
\details{
The naive Bayes classifier assumes independence between predictor variables conditional on the response, and a Gaussian distribution of numeric predictors with mean and standard deviation computed from the training dataset.
When building a naive Bayes classifier, every row in the training dataset that contains at least one NA will be skipped completely. If the test dataset has missing values, then those predictors are omitted in the probability calculation during prediction.
}
\value{
An object of class \code{\linkS4class{H2ONBModel}} with slots key, data, and model, where the last is a list of the following components:
\item{laplace }{A positive number controlling Laplace smoothing. The default (0) disables Laplace smoothing.}
\item{levels }{Categorical levels of the dependent variable.}
\item{apriori }{Total occurrences of each level of the dependent variable.}
\item{apriori_prob }{A-priori class distribution for the dependent variable.}
\item{tables }{A list of tables, one for each predictor variable. For categorical predictors, the table displays, for each attribute level, the conditional probabilities given the target class. For numeric predictors, the table gives, for each target class, the mean and standard deviation of the variable.}
}
\seealso{
For more information see: http://docs.h2o.ai
}
\examples{
library(h2o)
localH2O = h2o.init()
# Build naive Bayes classifier with categorical predictors
votesPath = system.file("extdata", "housevotes.csv", package="h2o")
votes.hex = h2o.importFile(localH2O, path = votesPath, header = TRUE)
summary(votes.hex)
h2o.naiveBayes(y = 1, x = 2:17, data = votes.hex, laplace = 3)
# Build naive Bayes classifier with numeric predictors
irisPath = system.file("extdata", "iris.csv", package="h2o")
iris.hex = h2o.importFile(localH2O, path = irisPath)
h2o.naiveBayes(y = 5, x = 1:4, data = iris.hex)
}
|
e9b085dc66dabe1270a307eb5a9925e73f81910f
|
1b4f0bc2361d4d2b27aed0f7e00832945acc4e48
|
/R/manageTags.R
|
928fe250cdbb2d66363a393f5094ed88fad5e366
|
[] |
no_license
|
Lchiffon/Rweixin
|
5016200d68c58372be04f56cd8f93a3b36f3b4d6
|
7d75e7c6e534d75ef8c0de591ea56b9989fadc97
|
refs/heads/master
| 2022-06-28T14:51:51.117647
| 2022-06-13T08:50:18
| 2022-06-13T08:50:18
| 101,831,958
| 22
| 8
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,861
|
r
|
manageTags.R
|
getTagsList <- function(obj,...){
if (!inherits(obj, "weixin")) stop("A weixin object is required!")
url = "https://api.weixin.qq.com/cgi-bin/tags/get"
requestURL = paste0(url, "?access_token=", obj$oauthToken)
t0 = .getURL(requestURL, ...)
return(t0)
}
getTagUsers <- function(obj, tagid, ...){
if (!inherits(obj, "weixin")) stop("A weixin object is required!")
url = "https://api.weixin.qq.com/cgi-bin/user/tag/get"
requestURL = paste0(url, "?access_token=", obj$oauthToken)
params <- list(tagid = tagid)
out <- .postURL(requestURL, params, ...)
OUT <- out$data$openid
if (tail(out$data$openid, n=1) != out$next_openid) {
while (tail(out$data$openid, n=1) != out$next_openid) {
params <- list(tagid = tagid, next_openid = out$next_openid)
out <- .postURL(requestURL, params, ...)
OUT <- c(OUT, out)
}
}
return(OUT)
}
postTags <- function(obj, tagid, openids, ...) {
if (!inherits(obj, "weixin")) stop("A weixin object is required!")
requestURL = paste0("https://api.weixin.qq.com/cgi-bin/tags/members/batchtagging",
"?access_token=", obj$oauthToken)
openidlist <- split(openids, f = rep(1:(length(openids) %/% 50 + 1), each = 50)[1:length(openids)])
for (i in 1:length(openidlist)) {
params <- list(openid_list = openidlist[[i]], tagid = tagid)
out = .postURL(requestURL, params, ...)
}
return(out)
}
postUnTags <- function(obj, tagid, openids, ...) {
if (!inherits(obj, "weixin")) stop("A weixin object is required!")
requestURL = paste0("https://api.weixin.qq.com/cgi-bin/tags/members/batchuntagging",
"?access_token=", obj$oauthToken)
openidlist <- split(openids, f = rep(1:(length(openids) %/% 50 + 1), each = 50)[1:length(openids)])
for (i in 1:length(openidlist)) {
params <- list(openid_list = openidlist[[i]], tagid = tagid)
out = .postURL(requestURL, params, ...)
}
return(out)
}
|
11f9e1a63767541b95af0502e08b9dabfa69fd05
|
77eebb3e0eb6c2bdf477fba817d67dfbad94a03c
|
/first attempt.R
|
f5160523d08f1573e071b8b3a08231efd797be9e
|
[] |
no_license
|
phileas-condemine/moskitoes_kaggle
|
eb5041a4661fe5de2820aba2393e2790904fb48a
|
9f10466e8ebfb3056e48ecd0de5ab6b266168048
|
refs/heads/master
| 2021-07-16T00:16:16.226036
| 2017-10-18T15:42:31
| 2017-10-18T15:42:31
| 107,428,831
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,348
|
r
|
first attempt.R
|
setwd("/Users/p-condemine/Documents/kaggle moskitoes/")
library(data.table)
require("rgdal") # requires sp, will use proj.4 if installed
require("maptools")
require("ggplot2")
require("dplyr")
require("rgdal")
db=fread("/Users/p-condemine/Documents/kaggle moskitoes/train.csv")
db[,c("NumMosquitos","WnvPresent","count"):=list(sum(NumMosquitos),max(WnvPresent),.N),
by=c("Date","Longitude","Latitude","Species")]
db=unique(db)
map=readShapePoly("poldist")
map@data$id = rownames(map@data)
map.points = fortify(map, region="id")
map.df = join(map.points, map@data, by="id")
coordinates(db) <- ~ Longitude + Latitude
proj4string(db) <- proj4string(map)
db=cbind(db,over(x=db,y = map))
db=data.table(db)
map.df=data.table(map.df)
map.df=map.df[map.df$order,]
ggplot(data=map.df)+geom_polygon(aes(long,lat,group=group,fill=group))
test=fread("/Users/p-condemine/Documents/kaggle moskitoes/test.csv")
test[,c("count"):=list(.N),
by=c("Date","Longitude","Latitude","Species")]
coordinates(test) <- ~ Longitude + Latitude
proj4string(test) <- proj4string(map)
test=cbind(test,over(x=test,y = map))
test=data.table(test)
spray=fread("/Users/p-condemine/Documents/kaggle moskitoes/spray.csv")
weather=fread("/Users/p-condemine/Documents/kaggle moskitoes/weather.csv")
weather$Tmax=as.numeric(weather$Tmax)
weather$Tmin=as.numeric(weather$Tmin)
weather$AvgSpeed=as.numeric(weather$AvgSpeed)
weather$ResultSpeed=as.numeric(weather$ResultSpeed)
weather$PrecipTotal=as.numeric(weather$PrecipTotal)
weather$PrecipTotal=ifelse(weather$PrecipTotal==0,-1,weather$PrecipTotal)
weather$PrecipTotal=ifelse(is.na(weather$PrecipTotal),0,weather$PrecipTotal)
#Ecart à la moyenne mensuelle
weather$Depart=as.numeric(weather$Depart)
#Indicateur de pression : température d'ébulition
weather$DewPoint=as.numeric(weather$DewPoint)
#même genre
weather$WetBulb=as.numeric(weather$WetBulb)
weather
weather=select(weather,-Water1,-SnowFall,-Depth,-Heat,-Cool,-Sunrise,-Sunset,)
weather[as.numeric(weather$)>0]
summary(factor(weather$CodeSum))
# list of patterns
library(pbapply)
patterns=c("TS ","RA","DZ","HZ","BR","FU","FG ","FG+","TSRA","GR","VCTS")
weather$Date_num=as.numeric(as.Date(weather$Date))
for (pattern in patterns){
weather$x=1*grepl(pattern=pattern,x=weather$CodeSum)
temp=subset(weather,weather$x==1)
temp1=subset(temp,temp$Station=="1")$Date_num
temp2=subset(temp,temp$Station=="2")$Date_num
weather$last_x=(pbapply(weather[,c("Date_num","Station"),with=F],1,function(y){
if(y[2]==1){
if(length(temp1[temp1<=y[1]])>0){
print(print(y[1]-max(temp1[temp1<=y[1]])))
} else NA
} else {
if(length(temp2[temp2<=y[1]])>0){
print(y[1]-max(temp2[temp2<=y[1]]))
} else NA
}
}))
setnames(weather,c("x","last_x"),c(pattern,paste0("last_",pattern)))
}
str(weather)
"DewPoint","WetBuld","TS " "TS "
"RA" "DZ" "HZ" "BR" "FU" "FG " "FG+" "TSRA"
"GR" "VCTS" "Date_num" "x" "Precip_10days"
# volume de précitipitations :
temp=weather[,c("Date_num","PrecipTotal"),with=F]
temp$PrecipTotal=ifelse(temp$PrecipTotal<0,0,temp$PrecipTotal)
weather$Precip_10days=pbsapply(weather$Date_num,function(x){
sum(subset(temp,temp$Date_num<=x&temp$Date_num>x-10)$PrecipTotal)
})
# Station 1: CHICAGO O'HARE INTERNATIONAL AIRPORT Lat: 41.995 Lon: -87.933 Elev: 662 ft. above sea level
# Station 2: CHICAGO MIDWAY INTL ARPT Lat: 41.786 Lon: -87.752 Elev: 612 ft. above sea level
weather$Longitude=ifelse(weather$Station==1,-87.933,-87.752)
weather$Latitude=ifelse(weather$Station==1,41.995,41.786)
library(ggplot2)
library(ggmap)
mapdata <- readRDS(file.path("mapdata_copyright_openstreetmap_contributors.rds"))
local=db[,list("risk"=sum(NumMosquitos),"risk2"=mean(WnvPresent)),by=c("Longitude","Latitude")]
library(Hmisc)
local$risk_range=cut2(local$risk2,cuts = quantile(local$risk2,c(0.2,0.5,0.8,0.9,0.95)))
g<-ggmap(get_googlemap(center = c(-87.72,41.86),zoom = 11))
g+geom_point(data=local,aes(x=Longitude,y=Latitude,color=factor(risk_range),size=log(log(1+risk))))+
geom_text(data=local,aes(x=Longitude,y=Latitude,label=paste0(round(10000*risk2/sqrt(risk)))))
db$month=month(as.Date(db$Date))
db$weekday=factor(weekdays(as.Date(db$Date)))
db$week=factor(round(((as.numeric(as.Date(db$Date))-13662)%%365)/7))
db$Species=factor(db$Species)
db$Trap=factor(db$Trap)
db$year=year(as.Date(db$Date))
db$Species_coherent=db$Species
db$day=(as.numeric(as.Date(db$Date))-13662)%%365
# db$year_spray=1*(year(as.Date(db$Date))>=2010)
test$month=month(as.Date(test$Date))
test$weekday=factor(weekdays(as.Date(test$Date)))
test$week=factor(round(((as.numeric(as.Date(test$Date))-13662)%%365)/7))
test$Species=factor(test$Species)
test$Trap=factor(test$Trap)
test$year=year(as.Date(test$Date))
test$Species_coherent=test$Species
test$day=(as.numeric(as.Date(test$Date))-13662)%%365
# test$year_spray=1*(year(as.Date(test$Date))>=2010)
levels(db$Species_coherent)<-c(NA,"CULEX PIPIENS","CULEX PIPIENS/RESTUANS", "CULEX RESTUANS",NA,NA,NA)
# risk in previous year ?
# CREE DE L'OVERFITTING
# ownrisk=subset(db,year(as.Date(db$Date))<2013)[,list("risk_wnv"=sum(WnvPresent),"risk_freq"=100*mean(WnvPresent),
# "risk_freq_w"=10000*sum(WnvPresent)/sum(NumMosquitos),
# "risk_freq_exposed"=10000*sum(WnvPresent)/log(sum(NumMosquitos))),
# by=c("Longitude","Latitude")]
# ownrisk[ownrisk==0]<-NA
# db=merge(db,ownrisk,by=c("Longitude","Latitude"),all.x=T,all.y=F)
#
# Distance time & space to last
# -alert
# -seasonality
# KNN risk
# gradient of risk to neighbors
# Add data about PoI
KNN=get.knnx(data=weather[1:2,c("Longitude","Latitude"),with=F],query=db[,c("Longitude","Latitude"),with=F],k=2)
dist_station=dist(y=weather[1:2,c("Longitude","Latitude"),with=F],x=db[,c("Longitude","Latitude"),with=F])
weather$Date_next=as.numeric(as.Date(weather$Date))-1
db$Date_next=as.numeric(as.Date(db$Date))
db$dist_station1=dist_station[,1]
db$dist_station2=dist_station[,2]
db$Station=KNN$nn.index[,1]
db=merge(db,weather,by=c("Date_next","Station"),all.x=T,all.y=F)
weather$Date.x=as.numeric(as.Date(weather$Date))
db$Date.x=as.numeric(as.Date(db$Date.x))
db=merge(db,weather,by=c("Date.x","Station"),all.x=T,all.y=F)
KNN2=get.knnx(data=weather[1:2,c("Longitude","Latitude"),with=F],query=test[,c("Longitude","Latitude"),with=F],k=2)
dist_station=dist(y=weather[1:2,c("Longitude","Latitude"),with=F],x=test[,c("Longitude","Latitude"),with=F])
test$Date_next=as.numeric(as.Date(test$Date))
test$dist_station1=dist_station[,1]
test$dist_station2=dist_station[,2]
test$Station=KNN2$nn.index[,1]
test=merge(test,select(weather,-Date.x),by=c("Date_next","Station"),all.x=T,all.y=F)
test$Date.x=as.numeric(as.Date(test$Date.x))
test=merge(test,weather,by=c("Date.x","Station"),all.x=T,all.y=F)
db[,c("year_risk"):=list(mean(WnvPresent)),by=year]
db$year_factor=factor(db$year)
# train=db
train=subset(db,year(db$Date.y)%in%c("2007","2009","2011"))
validation=subset(db,year(db$Date.y)%in%c("2013"))
# cut=sample(1:nrow(db))
# train=db[cut[1:5000]]
# validation=db[cut[5001:nrow(db)]]
library(gbm)
keep=c("WnvPresent","month","weekday","week","Longitude","Latitude",
"Species_coherent","ownrisk_range","day","count",
"AvgSpeed.y","PrecipTotal.y",
"PrecipTotal.x","Tmin.x","Tmax.x","Tmin.y","Tmax.y","ResultSpeed.y","year_factor")
# "risk_freq_w","risk_freq_exposed")
param=c(ntree=300,depth=15,shrinkage=0.005,train.fraction=0.8,minsize=10)
modeldp=gbm(WnvPresent~.,cv.folds = 10,
data=train[,which(colnames(train)%in%keep),with=F],
n.trees = param[1],interaction.depth = param[2],
shrinkage = param[3],train.fraction = param[4],
n.minobsinnode=param[5],bag.fraction=0.5,verbose = T)
,weights=log(train$NumMosquitos)
summary(modeldp)
colnames(train[,which(colnames(train)%in%keep),with=F])
plot(modeldp,i.var=c(2,1),type="response")
plot(modeldp,i.var=6,type="response")
n.trees=200
library(verification)
roc=roc.plot(x=validation$WnvPresent,
pred=predict(modeldp,newdata = validation[,which(colnames(validation)%in%keep),
with=F],type="response"))
roc$roc.vol
pSubmit<-predict(modeldp, newdata = test, type = "response")
## look at the predicted distribution (AUC doesn't care about probabilities; just ordering. It's still a good diagnostic)
summary(modeldp)
submissionFile<-cbind(test$Id,pSubmit)
colnames(submissionFile)<-c("Id","WnvPresent")
options("scipen"=100, "digits"=8)
write.csv(submissionFile,"submitGBM.csv",row.names=FALSE,quote=FALSE)
# ANNEXES
# SPRAY IS USELESS BECAUSE WE DON'T KNOW THE USE IN TEST SET
i=3
lev=levels(factor(spray$Date))
spray_sub=subset(spray,spray$Date==lev[i])
ggmap(mapdata) +
# geom_point(data=spray_sub,aes(x=Longitude,y=Latitude))+
geom_point(data=local,aes(x=Longitude,y=Latitude,color=factor(risk_range),
size=log(log(1+risk))))
sub=subset(db,abs(as.numeric(as.Date(db$Date))-as.numeric(as.Date(lev[i])))<15)
# sub=subset(sub,(sub$Longitude<max(spray_sub$Longitude)&sub$Longitude>min(spray_sub$Longitude))&
# (sub$Latitude<max(spray_sub$Latitude)&sub$Latitude>min(spray_sub$Latitude)))
sub$dist_spray=get.knnx(query=sub[,c("Longitude","Latitude"),with=F],
data=spray_sub[,c("Longitude","Latitude"),with=F],
k=1)$nn.dist[,1]
sub=subset(sub,sub$dist_spray<0.01)
sub$before=1*(as.numeric(as.Date(sub$Date))<as.numeric(as.Date(lev[i])))
sub[,list("alerts"=sum(WnvPresent),"mosquitos"=sum(NumMosquitos),"counts"=.N),by="before"]
sub[sub$WnvPresent==1]
lev[i]
################################################################
# WEATHER PATTERNS ?
temp=data.table(pattern=c("RAS",patterns))
for (y in c(2007,2009,2011,2013)){
counts=length(dbtemp[grepl(pattern = " ",x = dbtemp$CodeSum.y,fixed = TRUE)]$WnvPresent)
freqs=mean(dbtemp[grepl(pattern = " ",x = dbtemp$CodeSum.y,fixed = TRUE)]$WnvPresent)
dbtemp=db[db$year==y]
for (i in 1:length(patterns)){
pattern=patterns[i]
counts=c(counts,length(dbtemp[grepl(pattern = pattern,x = dbtemp$CodeSum.y,fixed = FALSE)]$WnvPresent))
freqs=c(freqs,mean(dbtemp[grepl(pattern = pattern,x = dbtemp$CodeSum.y,fixed = FALSE)]$WnvPresent))
}
temp=data.table(cbind(temp,freq=freqs,count=counts))
setnames(temp,c("freq","count"),c(paste("freq",y),paste("count",y)))
}
temp=temp[order(temp[["freq 2011"]],decreasing=T)]
temp
db[,list(mean(WnvPresent),sum(NumMosquitos),.N),by=year]
Ecart important entre les années...
Il faut essayer de comprendre pourquoi en faisant un clustering sur la météo ?
quelques stats simples devraient suffire ?
ca vient des aéroports ? donc flux migratoires à chicago par année ? ou budget prévention ?
summary(test[,list("number"=.N),by=c("Date","Species")]$number)
summary(test[,list("number"=.N),by=c("Date","Species")]$number)
|
bf43acbb9099d1c3fbbe1ae306cfef4e0768ec68
|
2cdb57f6c9ff73d2fb3ed4e3fbc52143f8014f21
|
/man/DataStructure.Rd
|
0593e79f08a50d72d60e56cd8247fc3e48b789d7
|
[] |
no_license
|
cran/LGS
|
29afe18731c88aeaac52055377eae7808bafd6c1
|
e2e178de5d7c3a59ee1a9d4960433f9bdc841f94
|
refs/heads/master
| 2021-01-10T20:39:12.074818
| 2008-12-12T00:00:00
| 2008-12-12T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 384
|
rd
|
DataStructure.Rd
|
\name{DataStructure}
\alias{DataStructure}
\title{ Data structure of LGS output }
\description{
\code{summaryLGS} provides summary statistics about the allele ratio and populations of an LGS object.
}
\author{ Dario Beraldi <\email{dario.beraldi@ed.ac.uk}> }
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ manip }
|
a4389ca4a170e67379259990db1cf5823dd04f43
|
9b55a2a720fbc00b9fe014099734ef9d7b560870
|
/Air Passengers Time Series.R
|
60885b6f528f4232d114cbebdd273d695cabf7b2
|
[] |
no_license
|
mauryask07/Air-Passengers-Time-Series
|
67298b512638bd8f5b0895c901ab98ebb0b3d0d8
|
82f80fd1a2c0cb2f80ace91b8d67e945728286b7
|
refs/heads/master
| 2021-05-13T16:31:57.966520
| 2018-01-09T09:47:50
| 2018-01-09T09:47:50
| 116,796,466
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,453
|
r
|
Air Passengers Time Series.R
|
# Load the data
data("AirPassengers")
AirPassengers
class(AirPassengers)
#This tell us that tha data series is in a time series format
frequency(AirPassengers)
#This cycle of the time series is 12 months in a year
plot(AirPassengers)
summary(AirPassengers)
#Summary of the data
abline(reg = lm(AirPassengers~time(AirPassengers)))
#This will fit in a line
boxplot(AirPassengers~cycle(AirPassengers))
#Box plot across months will give us a sense on seasonal effect
plot(diff(log(AirPassengers)))
#We see that the series is stationary enough to do any kind of time series modelling.
library(tseries)
# Ar I MA
acf(diff(log(AirPassengers)))
# Determines the value of q
pacf(diff(log(AirPassengers)))
# Determines the value of p
#Let's fit an ARIMA model and predict the future 5 year
fit<-arima(log(AirPassengers),c(0,1,1),seasonal = (order=c(0,1,1)))
pred<-predict(fit,n.ahead = 5*12)
pred1<-2.718^pred$pred
#To convert log value to decimal value using "e" value
ts.plot(AirPassengers,2.718^pred$pred,log="y", lty=c(1,3))
#Testing our model
model1<-ts(AirPassengers,frequency = 12, start = c(1949,1),end=c(1959,12))
fit<-arima(log(model1),c(0,1,1),seasonal = list(order=c(0,1,1),period=12))
pred<-predict(fit,n.ahead = 10*12)
pred1<-2.718^pred$pred
data1<-head(pred1,12)
predict_1960<-round(data1,digits =0)
original_1960<-tail(AirPassengers,12)
ts.plot(AirPassengers,2.718^pred$pred,log = "y", lty=c(1,3))
|
686a1ab6b4089368ce2fa7978f084ed9f0305cb1
|
2fed785e9aa814dffca8c87319984891cec0dbdb
|
/ui.R
|
d898a7b68bc14d727809e81127ab7e5839d3692a
|
[] |
no_license
|
HunterRatliff1/STEP2Reddit
|
5f57ccb529579748f2551b96911349e6dbb5af50
|
ad5234b00dd4b14fe553cdb1ad76c943bb8d2640
|
refs/heads/master
| 2022-02-18T04:03:58.451152
| 2019-09-06T17:49:07
| 2019-09-06T17:49:07
| 197,857,684
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,494
|
r
|
ui.R
|
# This is the user-interface definition of a Shiny web application.
# You can find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com
#
# ---- Packages
require(shiny)
suppressPackageStartupMessages(require(tidyverse))
require(ggthemes)
require(lubridate)
require(stringr)
require(ggrepel)
require(readr)
# ---- Load data
reddit <- readr::read_csv("Step2CKSurvey.csv", na = c("", "NA", "#N/A"))
reddit <- reddit %>%
mutate(
Date_StudyStart = mdy(Date_StudyStart), # Sys.setenv(TZ="America/Chicago")
STEP2.date = mdy(STEP2.date),
UW_FirstPass = UW_FirstPass/100
) %>%
rename(UW_FP=UW_FirstPass)
shinybootstrap2::withBootstrap2(shinyUI(fluidPage(
# Google analytics (see http://shiny.rstudio.com/articles/google-analytics.html)
tags$head(includeScript("google-analytics.js"),
includeScript("https://www.googletagmanager.com/gtag/js?id=UA-144372385-1")),
# Application title
titlePanel("STEP 2 CK Score Linear Model (2018)"),
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
shiny::sliderInput("ci", "Confidence interval", 50, 100, 95, 1),
h3("Your scores"),
numericInput("UW_1", label="UWSA 1", value = 251),
numericInput("UW_2", label="UWSA 2", value = NA),
numericInput("UW_FP", label="UW First pass (%)", value = NA, min=1, max=100),
numericInput("Goal", label="Goal/target score", value = NA),
numericInput("NBME_6", label="NBME 6", value = NA),
numericInput("NBME_7", label="NBME 7", value = NA),
numericInput("NBME_8", label="NBME 8", value = NA),
p(a(href="https://www.reddit.com/r/Step2/comments/ansibd/step2_ck_2018_correlation_survey_results/",
"Source/Reddit thread")),
p(HTML("<u>Contact:</u>"), "/u/MomsAgainstMedAdvice"),
actionButton("help", "Help")
),
# Show a plot of the generated distribution
mainPanel(
fluidRow(
column(6,
# checkboxGroupInput("modelVars", h3("Score(s) to include in model"),
# inline=F, selected = c("UWSA 1"="UW_1"),
# choices = c("UWSA 1"="UW_1", "UWSA 2"="UW_2",
# "UW First pass"="UW_FP", "Goal score"="Goal",
# "NBME #6"="NBME_6", "NBME #7"="NBME_7",
# "NBME #8"="NBME_8"))
uiOutput("dynamicCheckbox"),
hr(),
plotOutput("graphDrop", height = "75px"),
p("Based on", strong(textOutput("numDrop", inline=T)), " total survey responses")
),
column(6,
p("Below are the calculated scores based on the equations from Reddit:"),
htmlOutput("redditModel"),
br(),
htmlOutput("complexWarning"),
modalDialog(title="How this works", size="m", easyClose=T,
p("First, input your scores from all of the practice tests you've taken",
"in their respective boxes on the left of the page."),
p("You can then generate a linear model that predicts your score by checking the",
"checkboxes at the top of the page for whichever practice tests you want to include.",
strong("Entering your scores in the sidebar does", tags$u("not"),
"include them in the model;"),"you must", strong("use the checkboxes."),
"The confidence interval can be adjusted using the slider in the left-upper corner"),
hr(),
p("Keep in mind that there is a tradeoff between a more complex model and",
"the sample size. More complex models (e.g. including all five practice tests)",
"have fewer responses included, because they only include survey responses from",
"people who took all five practice tests (in this case, only 15% of the dataset).",
"So it's best to stick to one or two variables (e.g.", code("UW_1"),"+", code("UW_2"),
", which includeds 80% of the dataset)."
)
)
)
),
hr(),
p("Using the model formula", strong("STEP2 ="), code(textOutput("modFormula", inline=T)), " as input:"),
h4("Estimated score: ", strong(style="color:red", textOutput("estScore", inline=T)),
textOutput("estCI", inline=T)),
plotOutput("graph"),
p("Your projected score is in red. The grey points/bars represent the data from the reddit
survey for comparison"),
hr(),
h3("Summary of the model"),
p("Below is a summary of the current model, which isn't very easy to read,
so here's the TL;DR:"),
p(strong("Call:"), "This is the formula which is shown up above. It does",
HTML("<u>not</u>"), "include an intercept"),
p(strong("Residuals:"), "Self-explanatory"),
p(strong("Coefficients:"), "See table below. Variables that have a colon
between them represents the interaction between/among variables (e.g.",
code("UW_1 : UW_2"), "represents the interactions between these two tests)."),
tableOutput("coeffDf"),
verbatimTextOutput("modSummary")
)
)
)))
|
8ae909d82bd710ca823e0d9e7c0071708e9fdddd
|
22e805419af43a4cde1cb5dd1dbd0b237f79f564
|
/german credit data analysis.R
|
c530687d94627fed92596b3ba88cca9e3d83f1ab
|
[] |
no_license
|
Shixi99/German-Credit-Data-Analysis
|
dc3e9cf19f673925c97aad00c0f73364fdb24dd0
|
9ddb482943b863f3679e5019d88342cf15a90f00
|
refs/heads/master
| 2022-04-29T01:09:46.725122
| 2020-04-27T08:52:34
| 2020-04-27T08:52:34
| 259,257,860
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,655
|
r
|
german credit data analysis.R
|
# install.packages(c('caret','ROSE'))
# options(repos='http://cran.rstudio.com/')
library(data.table)
library(tidyverse)
library(scorecard)
library(inspectdf)
library(h2o)
library(highcharter)
library(stats)
library(DMwR)
library(ROSE)
library(caTools)
library(caret)
dat <- fread("D://DATASETS//credit_german.csv")
dat <- dat %>% rename('target' = 'creditability')
dat %>% glimpse()
dat$target <- as.factor(dat$target)
dat$target <- as.numeric(dat$target)
dat$target <- ifelse(dat$target>1, 1, 0)
# =====================================================================================
# Filling missing values ----
dat %>% inspect_na %>% view()
# dat %>%
# inspect_na() %>%
# as.data.frame() %>%
# filter(cnt>0)
#
# unique_numeric_values_tbl <- raw %>%
# select_if(is.numeric) %>%
# map_df(~ unique(.) %>% length()) %>%
# gather() %>%
# arrange(value) %>%
# mutate(key = as_factor(key))
#
# num_unique_number <- unique_numeric_values_tbl %>%
# filter(value < 7) %>%
# arrange(desc(value)) %>%
# pull(key) %>%
# as.character()
#
# for (v in num_unique_number) {
# raw[[v]] <- as.character(raw[[v]])
# }
#
# string_factor_names <- raw %>%
# select_if(is.character) %>%
# names()
#
#
# rec_obj <- recipe(~ ., data = raw) %>%
# step_string2factor(string_factor_names) %>%
# step_meanimpute(all_numeric()) %>%
# step_modeimpute(all_nominal()) %>%
# prep(stringsAsFactors = FALSE)
#
# df <- bake(rec_obj, raw)
#
# df %>% inspect_na()
# Outliers ----
num_vars <- dat %>%
select(-target) %>%
select_if(is.numeric) %>%
names()
num_vars
for_vars <- c()
for (b in 2:length(num_vars)) {
OutVals <- boxplot(dat[[num_vars[b]]])$out
if(length(OutVals)>0){
for_vars[b] <- num_vars[b]
}
}
for_vars <- for_vars %>% as.data.frame() %>% drop_na() %>% pull(.) %>% as.character()
for_vars
for (o in for_vars) {
OutVals <- boxplot(dat[[o]])$out
mean <- mean(dat[[o]],na.rm=T)
o3 <- ifelse(OutVals>mean,OutVals,NA) %>% na.omit() %>% as.matrix() %>% t() %>% .[1,]
o1 <- ifelse(OutVals<mean,OutVals,NA) %>% na.omit() %>% as.matrix() %>% t() %>% .[1,]
val3 <- quantile(dat[[o]],0.75,na.rm = T) + 1.5*IQR(dat[[o]],na.rm = T)
dat[which(dat[[o]] %in% o3),o] <- val3
val1 <- quantile(dat[[o]],0.25,na.rm = T) - 1.5*IQR(dat[[o]],na.rm = T)
dat[which(dat[[o]] %in% o1),o] <- val1
}
# ======================================================================================================
# BINNING ----
dat %>% glimpse()
# IV (important variables) ---
iv <- dat %>%
iv(y = 'target') %>%
as_tibble() %>%
mutate( info_value = round(info_value, 3)) %>%
arrange( desc(info_value) )
# Exclude not important variables ---
ivars <- iv %>%
filter(info_value>0.02) %>%
select(variable) %>%
.[[1]]
data_iv <- dat %>%
select(ivars,target)
data_iv %>% dim()
# breaking dt into train and test ---
dt_list <- split_df(data_iv, "target", ratio = 0.8, seed=123)
train <- dt_list$train
test <- dt_list$test
# woe binning ---
bins <- data_iv %>% woebin("target")
# converting train and test into woe values
train_woe <- train %>% woebin_ply(bins)
test_woe <- test %>% woebin_ply(bins)
names <- train_woe %>% names()
names <- gsub("_woe","",names)
names(train_woe) <- names
names(test_woe) <- names
train_woe %>% inspect_na(show_plot = F)
test_woe %>% inspect_na(show_plot = F)
# # Check normality
# num_vars <- train_woe %>%
# select(-target) %>%
# names()
# num_vars
#
# norm <- c()
# for (s in 1:length(num_vars)) {
# val <- round(e1071::skewness(train_woe[[num_vars[s]]]), 2)
# norm[s] <- val
# }
#
# par(mfrow=c(5, 10)) # divide graph area in 2columns & 2rows (number of variables)
#
# for (s in 1:length(num_vars)) {
# var.name = num_vars[s]
# plot(density(train_woe[[num_vars[s]]]),
# main=glue('{enexpr(var.name)}'),
# ylab="Frequency",
# sub=paste("Skewness:", round(e1071::skewness(train_woe[[num_vars[s]]]), 2)))
# polygon(density(train_woe[[num_vars[s]]]), col="red")
# }
# Logistic Linear Regression Diagnostics ----
outcome <- 'target'
features <- train_woe %>% select(-target) %>% names()
f <- as.formula(paste(outcome, paste(features, collapse = " + "), sep = " ~ "))
glm <- glm(f, data = train_woe)
glm %>% summary()
# Select a formula-based model by AIC
step <- glm %>% stats::step()
step$call # copy paste
glm2 <- glm(formula = target ~ status.of.existing.checking.account +
duration.in.month + credit.history + age.in.years + savings.account.and.bonds +
purpose + present.employment.since + housing + other.installment.plans +
credit.amount + other.debtors.or.guarantors + installment.rate.in.percentage.of.disposable.income,
data = train_woe)
glm2 %>% summary()
glm2 %>%
coefficients() %>%
as.data.frame() %>%
rownames() %>%
.[-1] %>%
as.factor() -> all.vars
all.vars %>% length()
all.vars_char <- all.vars %>% as.character()
glm2 %>% vif() %>% arrange(desc(gvif)) %>%
pull(variable) -> all_vars
# Multicollinrarity
hchart(cor(
train_woe %>%
select(target,all.vars) %>%
mutate_if(is.factor,as.numeric)) %>%
round(.,2),label = T)
# VIF - glm2
# https://www.statisticshowto.datasciencecentral.com/variance-inflation-factor/
# glm2 %>% vif() %>% arrange(desc(gvif)) %>%
# filter(gvif<1.24) %>%
# pull(variable) -> afterVIF
#
# f <- as.formula(paste(outcome, paste(afterVIF, collapse = " + "), sep = " ~ "))
# glm3 <- glm(f, data = train_woe)
#
# glm3 %>% summary()
glm2 %>% scorecard::vif() %>%
pull(variable) -> selected
step$call
hchart(cor(
train_woe %>%
select(target,selected)) %>%
round(.,2),label = T)
# ================================================================================
rose_sample_train_data <- ROSE(target ~ ., data = train_woe, seed=123)$data
print('Number of transactions in train dataset after applying ROSE sampling method')
print(table(rose_sample_train_data$target))
rose_sample_test_data <- ROSE(target ~ ., data = test_woe, seed=123)$data
print('Number of transactions in train dataset after applying ROSE sampling method')
print(table(rose_sample_test_data$target))
# Modeling with GLM ----
h2o.init()
train_h2o <- as.h2o(train_woe %>% select(target,selected))
test_h2o <- as.h2o(test_woe %>% select(target,selected))
outcome <- "target"
features <- train_woe %>% select(selected) %>%
names()
model <- h2o.glm(
x = features,
y = outcome,
training_frame = train_h2o,
family = "binomial",
seed = 123,
nfolds = 5, #Number of folds for K-fold cross-validation
remove_collinear_columns = T, #Collinear columns can cause problems during model fitting. This option can only be used with the 'IRLSM' solver
#balance_classes = T,
max_runtime_secs = 180
)
model %>% h2o.auc() %>% round(2)
#model %>% h2o.giniCoef() %>% round(2)
model %>% h2o.performance(newdata = test_h2o) %>% h2o.auc() %>% round(2)
#model %>% h2o.performance(newdata = test_h2o) %>% h2o.giniCoef() %>% round(2)
# before balancing train_auc = 83, test_auc = 80
# after balancing train_auc = 81, test_auc = 76
model %>% h2o.std_coef_plot()
model@model$coefficients %>% as.data.frame() %>%
mutate(names = rownames(model@model$coefficients %>% as.data.frame())) %>%
`colnames<-`(c('coefficients','names')) %>%
select(names,coefficients) %>%
filter(coefficients != 0) %>%
arrange(desc(coefficients))
h2o.varimp(model) %>% as.data.frame() %>%
pull(percentage) %>% sum()
h2o.varimp(model) %>% as.data.frame() %>% .[.$percentage>0,] %>%
pull(variable) -> imp.vars
imp.vars %>% length()
h2o.varimp(model) %>% as.data.frame() %>% .[.$percentage != 0,] %>%
select(variable, percentage) %>%
hchart("pie", hcaes(x = variable, y = percentage)) %>%
hc_colors(colors = 'orange') %>%
hc_xAxis(visible=T) %>%
hc_yAxis(visible=T)
model %>% h2o.performance(newdata = test_h2o) %>%
h2o.find_threshold_by_max_metric('f1')
pred <- model %>% h2o.predict(newdata = test_h2o) %>% as.data.frame()
pred %>% select(predict) %>% table()
# scorecard
card <- bins %>% scorecard(model@model)
# credit score, only_total_score = TRUE
train_score <- train %>% scorecard_ply(card)
test_score <- test %>% scorecard_ply(card)
# psi
psi <- perf_psi(
score = list(train = train_score, test = test_score),
label = list(train = train$target, test = test$target)
)
psi$psi
#psi$pic
# only_total_score = FALSE
train_score2 <- train %>% scorecard_ply(card, only_total_score=FALSE)
test_score2 <- test %>% scorecard_ply(card, only_total_score=FALSE)
# psi
psi2 <- perf_psi(
score = list(train = train_score2, test = test_score2),
label = list(train = train$target, test = test$target)
)
psi2$psi
# AUC
perf <- h2o.performance(model, train_h2o)
train_auc<-h2o.auc(perf, xval = TRUE)
perf <- h2o.performance(model, test_h2o)
test_auc<-h2o.auc(perf, xval = TRUE)
tibble(train_auc, test_auc)
# H2O.AUTOML MODEL
data_iv$target <- as.factor(data_iv$target)
h2o.init()
h2o_data <- as.h2o(data_iv)
# Splitting the data
h2o_data <- h2o.splitFrame(h2o_data,ratios = c(0.7,0.15),seed=123)
train<-h2o_data[[1]]
validation<-h2o_data[[2]]
test<-h2o_data[[3]]
outcome <- 'target' # y - variable which we want to predict
features <- data_iv %>% select(-target) %>% names() # by using x variables predict y
# Fitting h2o model
model_automl <- h2o.automl(
x = features,
y = outcome,
training_frame = train,
validation_frame = validation,
leaderboard_frame = test,
stopping_metric = "AUC",
balance_classes = T,
seed = 123,
max_runtime_secs = 180)
model_automl@leader
|
626cee41b15ee62775ab9abd2ef4d3e12102722d
|
d42d1cd25bb3726c7a42caf4195eca2f2ac43c6e
|
/Pterois_volitans_ctrl_parameters.R
|
556de1539bdf4d0f36d6ef1cfcf651c382321b80
|
[] |
no_license
|
population-genetic-modeling-dis/seaINVADERS
|
e6f1b29eb7b8b0c96ec3a0f70c32406236714754
|
c6e7d06122bd61fd7c89320ac92f5fab52873d7e
|
refs/heads/master
| 2020-04-20T22:58:48.515603
| 2019-06-07T17:05:26
| 2019-06-07T17:05:26
| 169,156,597
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,438
|
r
|
Pterois_volitans_ctrl_parameters.R
|
#### Parameters for space_invaders.R model #####
NP <- 7 #Number of processors to use
MODEL_DURATION <- 50 #Number of years to run the model
NUM_BOOTSTRAPS <- 10 #Number of simulations to run with each source
#### Population Genetic Parameters ####
species <- "PtrVol"
source.name <- "Atlantic"
source.theta <- 7.63797
source.theta.sd <- 2.65535
source.hap <-c(194,172,24,19,14,19,15,1,1)
destination.name <- "Carribean"
destination.hap <-c(159,405,3,34,0,0,0,0,0)
#### Demographic Parameters for Model ####
MONTHS <- 12 #Number of "months" or portions to divide a year into
thin <- FALSE #Save only first and last month of simulated pops?
min_prop <- .25 #This is the minimum proportion of successful recruits
max_prop <- 1 #This is the maximum proportion of successful
prop_bins <- 2 #ex. If prop_increment=4, then 4 different props will be run, ex: (.25,.5,.75,1)
min_f_number <- 5 #This is the minimum # of females.
max_f_number <- 1000 #This is the maximum # of females.
f_bins <- 3 #ex. If f_increment=3, then three different numbers of female colonists will be run (min, min+(max-min)/2,max)
BIN <- 12 # Number of different age-classes
JUVI.MORT <- 0.165 # Juvenile mortality
ADULT.MORT <- 0.052 # Adult mortality
ADULT.FRAC <- 1-0.96 # Fraction of starting population that are adults*
# * Based on the empirical estimates of Belize 2014 lionfish sample
# on the forereef.
## Calculating Recruit per Individual
# This section takes egg and larval demographic parameters and calculates
# the monthly number of recruits per individual adult lionfish.
ADULT.FEM.FRAC <- 0.49 # Proportion of adults that are females
ADULT.FEM.MAT <- 0.79 # Proportion of females that are mature
FE <- 194577 # Fecundity: number of eggs per female per month
ME <- 0.31 # Egg mortality (days)
DE <- 3 # Egg duration (days)
ML <- 0.35 # Larval mortality (days)
DL <- 27 # Larval duration (days)
K <- 100000000 #Carrying capacity, used to modulate "birth rate" for logistic pop growth
#FE.sd <- 1 # Standard dev in fecundity, if enabled and >0 then fecundity will be stochastic within a bootstrap (same fecundity per year for a rep)
|
4fad1c91bce7c68164a1cce22361ed9739ead7d4
|
c9e0c41b6e838d5d91c81cd1800e513ec53cd5ab
|
/man/gtkItemFactoryDeleteEntry.Rd
|
dd1aaef9b10bbbb50f0c99a6271b5461b7451e87
|
[] |
no_license
|
cran/RGtk2.10
|
3eb71086e637163c34e372c7c742922b079209e3
|
75aacd92d4b2db7d0942a3a6bc62105163b35c5e
|
refs/heads/master
| 2021-01-22T23:26:26.975959
| 2007-05-05T00:00:00
| 2007-05-05T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 655
|
rd
|
gtkItemFactoryDeleteEntry.Rd
|
\alias{gtkItemFactoryDeleteEntry}
\name{gtkItemFactoryDeleteEntry}
\title{gtkItemFactoryDeleteEntry}
\description{
Deletes the menu item which was created from \code{entry} by the given
item factory.
\strong{WARNING: \code{gtk_item_factory_delete_entry} has been deprecated since version 2.4 and should not be used in newly-written code. }
}
\usage{gtkItemFactoryDeleteEntry(object, entry)}
\arguments{
\item{\code{object}}{[\code{\link{GtkItemFactory}}] a \code{\link{GtkItemFactory}}}
\item{\code{entry}}{[\code{\link{GtkItemFactoryEntry}}] a \code{\link{GtkItemFactoryEntry}}}
}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
|
d07b6c0f5908363c496bf1f8bc1f1cad55cf68d4
|
ad25c879bd226c9a90609905732e3f11a83ca18d
|
/subsetting using dplyr.r
|
bf5d386a5b1cbd87c14d3a3a8b3286f439e1075d
|
[] |
no_license
|
VinayRana5674/R-Programming
|
e8556f707747b323f9b4552042d0557092b7a605
|
5a0f05c301e5eb8553028cda69cd1fd12ee52c4d
|
refs/heads/master
| 2020-06-16T02:32:59.331994
| 2019-10-21T07:52:09
| 2019-10-21T07:52:09
| 195,456,086
| 3
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,603
|
r
|
subsetting using dplyr.r
|
#Titanic Dataset
customerdata["cust1","prod"]
#List
l1=list(customerdata,b=c(20,30),mtcars)
#Accessing element from list
l1[[1]]
l1[[2]][1]
l1[[3]][2,1]
#Adding elemnt from list
l1[4]='a'
l1
#Replacing element
l1[[2]][3]=60
l1[[2]]
#Removing elements from list
l1=l1[-4]
l1[[2]][-3]
#-----------------titanic------------------------------------------
Titanic_train=read.csv(file.choose(),sep=",",header=T)
View(Titanic_train)
Titanic_test=read.csv(file.choose(),sep=",",header=T)
View(Titanic_test)
Titanic_gender=read.csv(file.choose(),sep=",",header=T)
View(Titanic_gender)
str(Titanic_train)
str(Titanic_test)
str(Titanic_gender)
ncol(Titanic_train)
ncol(Titanic_test)
ncol(Titanic_gender)
titanic_data=cbind(Titanic_test,Titanic_gender$Survived)
View(titanic_data)
titanic_data=cbind(Titanic_test,Survived=Titanic_gender$Survived)
View(titanic_data)
titanic_data=rbind(Titanic_train,titanic_data)
dim(Titanic_train)
dim(titanic_data)
summary(titanic_data)
subset(titanic_data,Survived==1)
subset(titanic_data,Survived==1,select = Age)
subset(titanic_data,Survived==1,select=c(Age,Sex))
subset(titanic_data,Survived==1 & Sex=='male')
nrow(subset(titanic_data,Survived==1 & Sex=='male'))
#percentage of mael survived out of whole dataset
nrow(subset(titanic_data,Survived==1 & Sex=='male'))/nrow(titanic_data)*100
#percentage of male survived
nrow(subset(titanic_data,Survived==1 & Sex=='male'))/nrow(subset(titanic_data,Sex=='male'))*100
#percentage of female survived
nrow(subset(titanic_data,Survived==1 & Sex=='female'))/nrow(subset(titanic_data,Sex=='female'))*100
#percentage of female survived out of all persons survived
nrow(subset(titanic_data,Survived==1 & Sex=='female'))/nrow(subset(titanic_data,Survived==1))*100
#find which class of passengers have given preference for survival
nrow(subset(titanic_data,Survived==1 & Pclass==1)) #this is only giving the number of Pclass==1
nrow(subset(titanic_data,Survived==1 & Pclass==2)) #this is only giving the number of Pclass==2
nrow(subset(titanic_data,Survived==1 & Pclass==3)) #this is only giving the number of Pclass==3
nrow(subset(titanic_data,Survived==1 & Pclass==1))/nrow(subset(titanic_data,Pclass==1))*100
nrow(subset(titanic_data,Survived==1 & Pclass==2))/nrow(subset(titanic_data,Pclass==2))*100
nrow(subset(titanic_data,Survived==1 & Pclass==3))/nrow(subset(titanic_data,Pclass==3))*100
for(i in 1:max(titanic_data$Pclass)){
print(nrow(subset(titanic_data,Survived==1 & Pclass==i))/nrow(subset(titanic_data,Pclass==i))*100)
}
|
bb644c8368759413f917b14ba4cb3f73f2fe2aa0
|
9e87c2e512327fafab69efea70dcd5a735878e9f
|
/server.R
|
166bd06552469b7487112f1d723109015d0a2cff
|
[] |
no_license
|
gracemshea/Coursera-Clickstream-ShinyApp
|
ed14935fa0773be8d8aaed62c382839da8cbbe4c
|
5bb6c752fdcacce4b5c09771b1a53b6021a6204d
|
refs/heads/master
| 2020-07-25T15:59:10.635677
| 2017-05-10T18:12:37
| 2017-05-10T18:12:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,353
|
r
|
server.R
|
#
# This is the server logic of a Shiny web application. You can run the
# application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
# Define server logic required to draw a histogram
shinyServer(function(input, output) {
output$intro_info = renderText({
"Datasets Description"
})
output$map <- renderLeaflet({
leaflet() %>%
addTiles(
urlTemplate = "//{s}.tiles.mapbox.com/v3/jcheng.map-5ebohr46/{z}/{x}/{y}.png",
attribution = 'Maps by <a href="http://www.mapbox.com/">Mapbox</a>'
) %>%
setView(
lng = -93.85, lat = 37.45, zoom = 4
) %>%
addCircles(lng = location$lon, lat = location$lat, weight = 1, radius = location$location.agg*50,
popup = paste("Location:", location$V4,"<br>",
"Participation:", location$location.agg, "<br>",
"Percentage:", 100*(location$location.agg/119891),"%"))
})
output$cumulative <- renderPlotly({
temp <- NewData[which(NewData$key!='heartbeat'),]
temp <- temp[which(temp$key!='start'),]
temp <- temp[which(temp$key!='seek'),]
sub = subset(temp, temp$video_name == input$video)
#plot(ecdf(sub$timecode),main="Empirical Cumulative Distribution of the time by video", xlab="time by seconds")
den <- density(na.omit(sub$timecode), bw = 1,from = 0, to = max(sub$timecode))
plot_ly(x = ~den$x, y = ~den$y, type = 'scatter', mode = 'lines', fill = 'tozeroy') %>%
layout(
title = "Activity density plot (exclude the heartbeat) of the selected Video",
xaxis = list(title = 'Timecode'),
yaxis = list(title = 'Density'))
})
output$basic <- renderPrint({
sub = subset(NewData, NewData$video_name == input$video)
summary(sub$timecode)
})
#output$hist1 <- renderHighchart({
# sub = subset(NewData, NewData$video_name == input$video)
# hchart(sub$timecode) %>%
# hc_title(text = "Histogram of the time user spent by video") %>%
# hc_add_theme(
# hc_theme_flatdark(
# chart = list(
# backgroundColor = "transparent",
# divBackgroundImage = "http://www.wired.com/images_blogs/underwire/2013/02/xwing-bg.gif"
# )
# )
# )
#})
output$hist1 <- renderPlotly({
sub =subset(NewData[NewData$key == input$key1,], NewData[NewData$key == input$key1,]$video_name == input$video1)
#den <- density(na.omit(sub$timecode), bw = 0.04, from = 0, to = 1)
#density <- density(na.omit(NewData$timecode))
plot_ly(x = sub$timecode, type = "histogram", autobinx = T) %>%
#add_trace(density, x= density$x, y = density$y, type = 'scatter', mode = 'lines') %>%
layout(
title = "Histogram of the selected Video",
xaxis = list(
title = "Timecode",
showgrid = F),
yaxis = list(
title = "Count")
)
})
#output$boxplot1 <- renderPlotly({
# sub =subset(NewData, NewData$video_name == input$video2)
# sub <- sub %>%
# arrange(key)
##
# plot_ly(sub, color = ~key, x = ~timecode, type = 'box') %>%
# layout(autosize = T,
# title = "Boxplot for all the keys for selected video") %>%
# hide_legend()
# })
output$boxplot1 <- renderHighchart({
sub =subset(NewData, NewData$video_name == input$video2)
hcboxplot(x = sub$timecode, var = sub$key,name = "Length", color = "#2980b9")
})
output$plot2 <- renderPlotly({
sub2 =subset(NewData[NewData$key == input$key2,], NewData[NewData$key == input$key2,]$video_name == input$video1)
plot_ly(sub2, x = ~timecode, y = ~key, color = ~key, type = 'scatter')
})
#output$BoxPlot <- renderPlot({
# sub = subset(NewData, NewData$video_name == input$video)
# #par(mar=c(5.1,8.1,4.1,2.1))
# boxplot(timecode~key, data=sub, horizontal = TRUE, names, outline = TRUE, col="bisque",las=1,
# at=rank(-tapply(sub$timecode, sub$key, mean),na.last=NA))
# title("Comparing boxplot()s by different keys")
#})
})
|
801b2315bfafaa04fab75485dd115c3655bf63ca
|
48cbb955ea27365c1266b6bedd1f2f56288615d1
|
/test_sampling_r_package.R
|
1231d923ff80b88ca35b4b6d1ffab04048cb62c4
|
[
"CC0-1.0"
] |
permissive
|
fschirr/sampling_r_package
|
015dc1ed44834c043d57f742998133735d789d9c
|
4703c8c2dc8b2cdd7d5d1f773fa96eb3444081e1
|
refs/heads/master
| 2021-01-17T04:48:23.200150
| 2016-06-13T13:38:07
| 2016-06-13T13:38:07
| 38,422,415
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,534
|
r
|
test_sampling_r_package.R
|
# Prepare and run simulations
volunteerdata <- CreateEcologist(50, 50, 60, 50, 10)
expertdata <- CreateEcologist(50, 80, 10, 0, 200)
system.time(samplingresult <- Sampling(Papilio, 100, expertdata, volunteerdata,
100, 1, 0, 0, outputall=F))
system.time(evaluationresult <- Evaluation(Papilio, 100, expertdata,
volunteerdata, 0, 1, 0, 0,
outputall=F, 100, 5))
evaluationresult <- NULL
evaluationresult <- Evaluation(Papilio, 100, expertdata, volunteerdata,
20, 1, 0, 0, outputall=F, 10, 5)
volunteer.evalu <-evaluationresult
expert.evalu <-evaluationresult
volunteer.evalu$slope.reduction <- evaluationresult[, 4] /
evaluationresult[, 2] * -100 * 30
expert.evalu$slope.reduction <- evaluationresult[, 4] /
evaluationresult[, 2] * -100 * 30
evaluationresult$slope.reduction <- evaluationresult[, 4] /
evaluationresult[, 2] * -100 * 30
dataoutput100
#summaryresult <- summarySE(evaluationresult, measurevar = "num.of.individuals",
#groupvars = c("num.plots","years"))
summaryresult <- summarySE(volunteer.evalu[-1, ],
measurevar = "slope.reduction",
groupvars = "num.plots")
ggplot(summaryresult, aes(x = num.plots, y = slope.reduction)) +
geom_errorbar(aes(ymin = slope.reduction - sd, ymax = slope.reduction + sd),
width = .1, colour="blue") +
geom_line(colour = "red") +
geom_point(size = 3) +
xlab("Number of plots") +
ylab("Percent of lost population") +
geom_hline(yintercept = volunteer.evalu[1, 7]) +
coord_cartesian(ylim = c(45, 85))
summaryresult <- summarySE(expert.evalu[-1, ],
measurevar = "slope.reduction",
groupvars = "num.plots")
ggplot(summaryresult, aes(x = num.plots, y = slope.reduction)) +
geom_errorbar(aes(ymin = slope.reduction - sd, ymax = slope.reduction + sd),
width = .1, colour="blue") +
geom_line(colour = "red") +
geom_point(size = 3) +
xlab("Number of plots") +
ylab("Percent of lost population") +
geom_hline(yintercept = expert.evalu[1, 7]) +
coord_cartesian(ylim = c(45, 85))
#plot "truth"
boxplot(num.of.individuals ~ year,data = Papilio)
plot(Papilio$year,Papilio$num.of.individuals, xlab = "Time in years",
ylab = "Number of individuals")
reg1 <- lm(Papilio$num.of.individuals ~ Papilio$year)
summary(reg1)
coef(reg1)
abline(reg1)
ggplot(Papilio, aes(x = year, y = num.of.individuals)) +
geom_point(size = 3) +
xlab("Time in years") +
ylab("Number of individuals") +
stat_smooth(method = "lm", se = FALSE, size = 2)
#plot sampling
#evaluation
ggplot(summaryresult, aes(x = num.plots, y = slope.reduction)) +
geom_errorbar(aes(ymin = slope.reduction - sd, ymax = slope.reduction + sd),
width = .1, colour="blue") +
geom_line(colour = "red") +
geom_point(size = 3) +
xlab("Number of plots") +
ylab("Percent of lost population") +
geom_hline(yintercept = volunteer.evalu[1, 7]) #+
coord_cartesian(ylim = c(45, 85))
ggplot(evaluationresult, aes(x = num.plots, y = slope.in.percent)) +
geom_point()
ggplot(summaryresult, aes(x = num.plots, y = slope.in.percent)) +
geom_errorbar(aes(ymin = slope.in.percent - ci, ymax = slope.in.percent + ci),
width = .1, colour = "blue") +
geom_line(colour = "red") +
geom_point(size = 3) +
xlab("Number of plots") +
ylab("Quotient of sample and real slope with 95% confidence interval") #+
# coord_cartesian(ylim = c(93, 107))
ggplot(summaryresult, aes(x = num.plots, y = slope.in.percent)) +
geom_errorbar(aes(ymin = slope.in.percent - se, ymax = slope.in.percent + se),
width = .1, colour="blue") +
geom_line(colour = "red") +
geom_point(size = 3) +
xlab("Number of plots") +
ylab("Quotient of sample and real slope with 95% confidence interval") #+
# coord_cartesian(ylim = c(93, 107))
ggplot(summaryresult, aes(x = num.plots, y = slope.in.percent)) +
geom_errorbar(aes(ymin = slope.in.percent-sd, ymax = slope.in.percent+sd),
width = .1, colour="blue") +
geom_line(colour = "red") +
geom_point(size = 3) +
xlab("Number of plots") +
ylab("Quotient of sample and real slope in percent with standard deviation") #+
# coord_cartesian(ylim = c(90, 110))
debug(Evaluation)
undebug(Evaluation)
debug(Sampling)
undebug(Sampling)
|
3e12d4c3970ac1c451abc473d47db376a187e0af
|
5bd7f231f3d30e08bca887eb92c115fbc1d01eae
|
/Data cleansing/data_cleaning2.R
|
f32833297c83bfb7e476b79d680fd9b003f271c5
|
[] |
no_license
|
yhuai001/DataScience-JohnsHopkins-R
|
cbc9f7844c49c2b995eca3e75f6e8b1215dfe6d1
|
aa92a8320e986adf470046fcc576493c865e3097
|
refs/heads/master
| 2020-03-19T17:43:44.910748
| 2018-06-10T03:35:22
| 2018-06-10T03:35:22
| 136,774,599
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,227
|
r
|
data_cleaning2.R
|
#MySQL
ucscDb <- dbConnect(MySQL(), user="genome",
host="genome-mysql.cse.ucsc.edu")
result <- dbGetQuery(ucscDb, "show databases;"); dbDisconnect(ucscDb);
head(result)
hg19 <- dbConnect(MySQL(), user="genome", db="hg19",
host="genome-mysql.cse.ucsc.edu")
allTables <- dbListTables(hg19)
length(allTables)
allTables[1:5]
dbListFields(hg19,"affyU133Plus2")
dbGetQuery(hg19, "select count(*) from affyU133Plus2")
affyData <- dbReadTable(hg19, "affyU133Plus2")
head(affyData)
query <- dbSendQuery(hg19, "select*from affyU133Plus2 where misMatches between 1 and 3")
affyMis <- fetch(query); quantile(affyMis$misMatches)
affyMisSmall <- fetch(query, n=10)
dbClearResult(query);
dim(affyMisSmall)
dbDisconnect(hg19)
#hdf5
source("http://bioconductor.org/biocLite.R")
biocLite("rhdf5")
library(rhdf5)
created = h5createGroup("example.h5", "foo")
created = h5createGroup("example.h5", "baa")
created = h5createGroup("example.h5", "foo/foobaa")
h5ls("example.h5")
A = matrix(1:10, nr=5, nc=2)
h5write(A, "example.h5", "foo/A")
B = array(seq(0.1, 2.0, by=0.1), dim=c(5,2,2))
attr(B, "scale") <- "liter"
h5write(B, "example.h5", "foo/foobaa/B")
h5ls("example.h5")
df = data.frame(1L:5L, seq(0,1,length.out = 5),
c("ab","cde","fghi", "a", "s"), stringAsFactors = FALSE)
h5(df, "example.5", "df")
h5ls("example.h5")
readA = h5read("example.h5", "foo/A")
readB = h5read("example.h5", "foo/foobaa/B")
readdf = h5read("example.h5", "df")
readA
h5write(c(12,13,14), "example.h5", "foo/A", index=list(1:3,1))
h5read("example.h5", "foo/A")
#Web
library(XML)
url <- "https://scholar.google.com/citations?user=HI-I6C0AAAAJ&hl=en"
html <- htmlTreeParse(url, useInternalNodes = T)
xpathSApply(html, "//title", xmlValue)
xpathSApply(html, "//td[@id='col-citedby']", xmlValue)
library(httr)
html2 = GET(url)
content2 = content(html2, as="text")
parsedHtml = htmlParse(content2, asText=TRUE)
xpathSApply(parsedHtml, "//title", xmlValue)
pg1 = GET("http://httpbin.org/basic-auth/user/passwd")
pg1
pg2 = GET("http://httpbin.org/basic-auth/user/passwd",
authenticate("user","passwd"))
pg2
names(pg2)
google = handle("http://google.com")
pg1 = GET(handle = google, path='search')
#API
myapp = oauth_app("twitter",
key="qKkJ4rNbLZJI42h4w72LlA799", secret="Jx505cjdsm")
sig = sign_oauth1.0(myapp,
token = "yourTokenHere",
token_secret = "yourTokenSecretHere")
homeTL = GET("https://api.twitter.com/1.1/statuses/home_timeline.json", sig)
json1 = content(homeTL)
json2 = jsonlite::fromJSON(toJSON(json1))
json2[1, 1:4]
#other
library(httr)
# 1. Find OAuth settings for github:
# http://developer.github.com/v3/oauth/
oauth_endpoints("github")
# 2. To make your own application, register at
# https://github.com/settings/developers. Use any URL for the homepage URL
# (http://github.com is fine) and http://localhost:1410 as the callback url
#
# Replace your key and secret below.
myapp <- oauth_app("github",
key = "fe6aeefadf80893f482b",
secret = "fa6f93542b8baeb8710a64248ef00c856c6161db")
# 3. Get OAuth credentials
github_token <- oauth2.0_token(oauth_endpoints("github"), myapp)
# 4. Use API
gtoken <- config(token = github_token)
req <- GET("https://api.github.com/rate_limit", gtoken)
stop_for_status(req)
content(req)
#quiz2
library(sqldf)
download.file("https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2Fss06pid.csv", destfile = "quiz2data.csv")
acs <- read.csv("quiz2data.csv")
sqldf("select pwgtp1 from acs where AGEP < 50")
sqldf("select distinct AGEP from acs")
#quiz4
require(httr);require(XML)
URL <- url("http://biostat.jhsph.edu/~jleek/contact.html")
lines <- readLines(URL)
close(URL)
c(nchar(lines[10]), nchar(lines[20]), nchar(lines[30]), nchar(lines[100]))
#quiz5
quiz5 <- "https://d396qusza40orc.cloudfront.net/getdata%2Fwksst8110.for"
widths <- c(1, 9, 5, 4, 1, 3, 5, 4, 1, 3, 5, 4, 1, 3, 5, 4, 1, 3)
fixed <- read.fwf(quiz5, widths, header = FALSE, skip = 4)
sum(fixed$V8)
|
0a4cfc187d5950cb875b293eb0976c113885585d
|
8876242d9cbaa59a781aba848b1a2213aa14e16c
|
/leaflet.R
|
381b5dc0ed34222e8f178c4445fb34a2d33f1f26
|
[
"Apache-2.0"
] |
permissive
|
ajsmit/SACTN
|
fb4d21da869ea2f16eda97a65b701e51969602c3
|
fc933ce22082a4b431cb75285bc13f74d3e4ef8c
|
refs/heads/master
| 2021-01-17T00:05:59.831413
| 2018-06-19T14:54:54
| 2018-06-19T14:54:54
| 46,488,915
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 699
|
r
|
leaflet.R
|
library(leaflet)
sites <- read.csv("metadata/site_list_v4.2.csv")
pal <- colorFactor(palette = c("yellow", "orange", "green", "red", "turquoise", "navy", "blue"),
domain = c("DAFF", "DEA", "EKZNW", "KZNSB", "SAEON", "SAWS", "UWC"))
content <- paste(sep = "<br/>", "<b>", sites$site, "</b>", sites$src, sites$type)
m1 = leaflet() %>%
addProviderTiles("Esri.OceanBasemap",
options = providerTileOptions(opacity = 1.0)) %>%
addCircleMarkers(lng = sites$lon, lat = sites$lat, group = sites$src,
popup = content, radius = 5, color = pal(sites$src),
stroke = FALSE, fillOpacity = 0.8) %>%
setView(24, -31, zoom = 6)
m1
|
b9885a967d848b2e4613d0cedc599e38c93d65f6
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/GENLIB/examples/gen.max.Rd.R
|
a0eeafafaa0b05a2736ab8160765a692406483b7
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 404
|
r
|
gen.max.Rd.R
|
library(GENLIB)
### Name: gen.max
### Title: Maximum number of generations
### Aliases: gen.max
### Keywords: manip
### ** Examples
data(geneaJi)
genJi<-gen.genealogy(geneaJi)
gen.min(genJi,c(17,26))
gen.mean(genJi,c(17,26))
gen.max(genJi,c(17,26))
data(genea140)
gen140<-gen.genealogy(genea140)
gen.min(gen140,c(18311,18430))
gen.mean(gen140,c(18311,18430))
gen.max(gen140,c(18311,18430))
|
1e6e56cab9d768a9866b3f6659e30fec1955ac5b
|
94ad4efa72e9ac54513b259dc4c665d49fec80ee
|
/iif/brainstorm_examples/pageresize.R
|
4cb835c064ebe12086d37c35de9bbd6fb8a3be2c
|
[] |
no_license
|
schwarzm3283/shiny-server
|
e4f67a9c6b471125b31b8d4cea5385de4e2426c2
|
0e576165d8d7afddd50e7c4159ac0c05e1b0298e
|
refs/heads/master
| 2021-01-17T05:07:53.447205
| 2017-06-27T17:52:27
| 2017-06-27T17:52:27
| 83,227,260
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,003
|
r
|
pageresize.R
|
library(shiny)
library(plotly)
# make a chart that we will use throughout
# this is just for convenience in this example
p <- ggplotly(
ggplot(mtcars, aes(x=mpg,y=hp,color=factor(cyl))) +
geom_point() +
facet_wrap(~cyl, ncol=1)
) %>%
layout(margin=list(r=100, l=70, t=20, b=70))
# for better layout
# fluidPage and flexdashboard offers lots of helpers
# but let's see how we can do it in old-school html/css
ui <- tagList(
numericInput("nplot","Number of plots",2),
uiOutput(
'chartcontainer'
)
)
server <- function(input, output, session) {
output$chartcontainer <- renderUI({
tagList(
lapply(
seq_len(input$nplot),
function(x){
htmltools::tags$div(
style="display:block;float:left;width:45%;height:50%;",
tags$h3(paste0("plot #",x)),
#NOTE: inside of renderUI, need to wrap plotly chart with as.tags
htmltools::as.tags(p)
)
}
)
)
})
}
shinyApp(ui,server)
|
600464e8168b499ee32c96df01d2bf3709bca098
|
2c86fa2bfd5796570aeca475f86b7d7d6a052ddb
|
/app.R
|
49dfdd49e6ce2c1c2a7bab0d755636067aad4a48
|
[] |
no_license
|
lakshyaag/CrimeInIndia
|
f873eed3b8362d1bccf8ec8a6df69de53dc0422d
|
0947caa704e3de235919e8f3c5e542e4d6e3e5b4
|
refs/heads/master
| 2020-07-14T22:57:03.641824
| 2020-03-18T08:17:31
| 2020-03-18T08:17:31
| 205,421,045
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,483
|
r
|
app.R
|
library(dplyr)
library(tidyverse)
library(ggplot2)
library(ggalt)
library(forcats)
library(R.utils)
library(png)
library(grid)
library(ggpubr)
library(scales)
library(bbplot)
library(ggthemes)
library(colourpicker)
library(DT)
library(shinythemes)
crime <- read.csv('crime_2016_tidy.csv')
# List of crimes
crime_list <- unique(crime$crime)
# List of states
state_list <- unique(crime$state)
# Overview data
overview_data <- crime %>% group_by(state) %>% filter(crime == 'Total Cognizable IPC Crime')
# IPC Table
ipc_main <- as.data.frame(list(unique(crime$crime)), col.names = c('main')) %>%
mutate(Section = main %>% str_extract('(Sec.*)') %>% str_replace('(IPC.*)', ''), Description = main %>% str_replace('(Sec.*IPC)', '') %>% str_remove_all('-')) %>%
select(-main) %>%
drop_na()
# Plot Crime Data
plot_crime <- function(crime_type, color){
plot <- ggplot(crime %>% filter(crime == crime_type), aes(x=reorder(state, incidents), y=incidents)) +
geom_col(fill=color) +
geom_hline(yintercept = 0, size = 1, colour="#333333") +
guides(fill=F) +
bbc_style() +
labs(title=paste('Incidents of', crime_type), subtitle = 'Crime Statistics, 2016', y='Number of incidents') +
coord_flip() +
scale_y_continuous(labels = scales::comma) +
theme(axis.text.x = element_text(margin=margin(t = 13, b = 10)), panel.grid.major.x = element_line(color="#cbcbcb"),
panel.grid.major.y=element_blank())
finalise_plot(plot, source = 'Source: National Crime Record Bureau', save_filepath = paste(crime_type, '.jpg', sep = ''),
width_pixels = 1000, height_pixels = 1000)
}
# Plot State Data
plot_state <- function(state_name, color, numberOfCrimes){
# Filtering top 10 crimes by incidents in selected state
state_crime_data <- crime %>% filter(state == state_name & !crime %in% c('Total Cognizable IPC Crime', 'Other IPC Crimes')) %>%
top_n(numberOfCrimes, wt = incidents) %>% mutate(crime = str_extract(crime, '(Sec.*)'))
plot <- ggplot(state_crime_data, aes(x=reorder(crime, incidents), y=incidents)) +
geom_col(fill=color) +
geom_hline(yintercept = 0, size = 1, colour="#333333") +
guides(fill=F) +
bbc_style() +
labs(title=paste('Incidents in', state_name), subtitle = 'Crime Statistics, 2016', y='Number of incidents') +
scale_y_continuous(labels = scales::comma) +
coord_flip() +
theme(axis.text.x = element_text(margin=margin(t = 13, b = 10)), panel.grid.major.x = element_line(color="#cbcbcb"),
panel.grid.major.y=element_blank())
finalise_plot(plot, source = 'Source: National Crime Record Bureau', save_filepath = paste(state_name, '.jpg', sep = ''),
width_pixels = 1000, height_pixels = 1000)
}
# App begins here
library(shiny)
# Define UI for application
ui <- fluidPage(theme = shinytheme('flatly'),
# Application title
titlePanel("Crime In India"),
tabsetPanel(
tabPanel('Overview',
sidebarLayout(
sidebarPanel(
h2('Overview of Crimes'),
p('This chart shows the total number of cognizable IPC crimes in every state, sorted by number of incidents'),
DT::dataTableOutput('stateOverviewTable')
),
# Overivew
mainPanel(
plotOutput("overviewPlot", height = '750px')
)
)
),
tabPanel('Crime-wise',
# Crime-Wise
sidebarLayout(
sidebarPanel(
selectizeInput('typeCrime', 'Type of crime', crime_list, multiple = F),
colourInput('colorPlotCrime', 'Select color', value='#BA1A52')
),
# Show a plot
mainPanel(
plotOutput("crimePlot", height = '750px')
)
)
),
tabPanel('State-Wise',
# State-Wise
sidebarLayout(
sidebarPanel(
selectizeInput('whichState', 'Select State/UT', state_list, multiple = F),
sliderInput('numberOfCrimes', 'Select maximum number of crimes', min=5, max=20, value = 10, step=1),
colourInput('colorPlotState', 'Select color', value='#BA1A52')
),
# Show a plot
mainPanel(
plotOutput("statePlot", height = '750px')
)
)
),
tabPanel('Indian Penal Code - Section Descriptions',
# State-Wise
sidebarLayout(
sidebarPanel(
h3('The table on the right gives a description of the sections of the Indian Penal Code'),
p('Use the search box to filter a particular section of the IPC')
),
# Show a plot
mainPanel(
DT::dataTableOutput("IPCTable")
)
)
)
)
)
# Define server logic
server <- function(input, output) {
output$crimePlot <- renderPlot({
plot_crime(input$typeCrime, input$colorPlotCrime)
})
output$statePlot <- renderPlot({
plot_state(input$whichState, input$colorPlotState, input$numberOfCrimes)
})
output$overviewPlot <- renderPlot({
overview_plot <- ggplot(overview_data,
aes(x=reorder(state, -incidents), y=incidents)) +
geom_col(fill='#F63231') +
geom_hline(yintercept = 0, size = 1, colour="#333333") +
guides(fill=F) +
bbc_style() +
labs(title='Overview of crimes', subtitle = 'Crime Statistics, 2016', y='Number of incidents') +
scale_y_continuous(labels = scales::comma, breaks = c(0, 50000, 100000, 150000, 200000, 250000)) +
scale_x_discrete(labels=abbreviate) +
theme(axis.text.x = element_text(angle = 90))
finalise_plot(overview_plot, source = 'Source: National Crime Record Bureau', save_filepath = 'overview_plot.jpg',
width_pixels = 1500, height_pixels = 1000)
})
output$stateOverviewTable <- DT::renderDataTable({overview_data %>% select(state, incidents) %>%
rename('State' = state, 'No. of incidents' = incidents)},
selection = 'none',
options = list(order = list(list(1, 'desc')), dom='pt', pagingType = 'numbers'),
rownames = F)
output$IPCTable <- DT::renderDataTable({ipc_main},
selection = 'none', options = list(dom='ptf', pagingType='numbers'),
rownames = F)
}
# Run the application
shinyApp(ui = ui, server = server)
|
3eb306579dd8b70b1a989e3cfaaf904f951b7921
|
36e3621c72b88f1d13f40acde975161e9361146d
|
/4_file_paths/summarise_locations.R
|
4fb4144b9dbfd246c40656ca25c7d7652c312241
|
[] |
no_license
|
ghoresh11/ecoli_genome_collection
|
65d84deb833593e861a507f9505d1e295a83983a
|
a8bf70b812ca7fca48e6fafbf01c0768a4a0b313
|
refs/heads/master
| 2023-01-18T15:25:15.042316
| 2020-11-27T07:38:34
| 2020-11-27T07:38:34
| 171,271,168
| 8
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,880
|
r
|
summarise_locations.R
|
library(ggplot2)
library(RColorBrewer)
setwd("/Users/gh11/e_colis/genomes/4_file_paths/")
md_with_locs_prev = read.table("metadata_fixed_with_loc_prev.csv",
sep = "\t", stringsAsFactors = F, comment.char = "", header =T)
md_with_locs = read.table("metadata_fixed_with_loc.csv",
sep = "\t", stringsAsFactors = F, comment.char = "", header =T)
## print out what the options are for the location columns
assembly_fails = sort(table(md_with_locs$Assembly_Location), decreasing = T)[1:2]
annot_fails = sort(table(md_with_locs$Annotation_Location), decreasing = T)[1]
read_fails = sort(table(md_with_locs$Reads_Location), decreasing = T)[1]
### summary of the assemblies
summary = data.frame( ID = md_with_locs$ID,
name = md_with_locs$Name,
make_artificial = md_with_locs$Make_artificial,
read_ids = md_with_locs$Run_ID,
source = md_with_locs$Publication,
loc = md_with_locs$Assembly_Location,
cat = rep("NA", dim(md_with_locs)[1]), stringsAsFactors = F)
summary$cat[which(summary$make_artificial == "No" &
summary$loc != "No assembly")] = "Reads and assembly available"
summary$cat[which(summary$make_artificial == "No" &
summary$loc == "No assembly")] = "Reads available, assembly required" ##these should be gone
summary$cat[which(summary$make_artificial == "Yes" &
summary$loc != "Not found")] = "Assembly available, artifical reads required"
summary$cat[which(summary$make_artificial == "Yes" &
summary$loc == "Not found")] = "Problem: neither reads or assembly available" ## these were removed entirely
for_plot = data.frame(table(summary$cat))
for_plot$Var1 = factor(for_plot$Var1,
c( "Reads and assembly available", "Reads available, assembly required",
"Assembly available, artifical reads required","Problem: neither reads or assembly available"))
ggplot(for_plot, aes(x = Var1, y = Freq)) + geom_bar(stat = "identity") +
xlab("") + ylab("Number of genomes")+ theme_bw(base_size = 16) +
theme(panel.border = element_blank(),
panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
axis.line = element_line(colour = "black")) + theme(axis.text.x = element_text(angle = 40, hjust = 1)) +
scale_y_continuous(expand = c(0,0)) + ggtitle(paste("Total number of genomes:", sum(for_plot$Freq)))
### here: make sure I have the location of everything!! There should only be a few that failed and cannot be located!!
## how many are still missing assemblies?
missing_loc = md_with_locs[which(md_with_locs$Assembly_Location == "No assembly" |
md_with_locs$Annotation_Location == "No annotation" |
md_with_locs$Reads_Location == "No reads"),c(1, 2, 16, 18, 19, 20)]
write.table(x = no_assembly, file = "no_assembly.csv", sep = ",",
col.names = T, row.names = F, quote = F)
missing_loc = which(md_with_locs$Assembly_Location == "No assembly" |
md_with_locs$Annotation_Location == "No annotation" |
md_with_locs$Reads_Location == "No reads")
md_with_locs = md_with_locs[-missing_loc,]
### final list of genomes with all the locations of the relevant files
write.table(x = md_with_locs, file = "../final_metadata_with_loc.csv", sep = "\t",
col.names = T, row.names = F, quote = F)
#### ALl of the below add up EXACTLY to the dimentions of the missing files DF ####
### 1. Have assembly + annotation but no reads
no_reads = which(missing_loc$Assembly_Location != "No assembly" &
missing_loc$Annotation_Location != "No annotation")
length(no_reads)
write.table(unlist(strsplit(missing_loc$Assembly_Location[no_reads], split = ",")),
file = "missing/artificial_reads.txt", col.names = F,
row.names = F, quote = F)
## have annotation but no assembly
no_assembly_w_annot = which(missing_loc$Assembly_Location == "No assembly" &
missing_loc$Annotation_Location != "No annotation")
length(no_assembly_w_annot)
write.table(unlist(strsplit(missing_loc$Annotation_Location[no_assembly_w_annot], split = ",")),
file = "missing/retrieve_assembly.txt", col.names = F,
row.names = F, quote = F)
### have annotation but no assembly -> require to run prokka
no_annot_w_assembly = which(missing_loc$Assembly_Location != "No assembly" &
missing_loc$Assembly_Location != "Not found" &
missing_loc$Annotation_Location == "No annotation")
length(no_annot_w_assembly)
write.table(unlist(strsplit(missing_loc$Assembly_Location[no_annot_w_assembly], split = ",")),
file = "missing/run_prokka.txt", col.names = F,
row.names = F, quote = F)
## have reads but no assembly or annotation -> need to run assembly and annotation
no_assembly_or_annot_but_reads = which(missing_loc$Assembly_Location == "No assembly" &
missing_loc$Annotation_Location == "No annotation" &
missing_loc$Reads_Location != "No reads")
length(no_assembly_or_annot_but_reads)
write.table(unlist(strsplit(missing_loc$Reads_Location[no_assembly_or_annot_but_reads], split = ",")),
file = "missing/run_assembly_prokka.txt", col.names = F,
row.names = F, quote = F)
## can't help with these
nothing = which(missing_loc$Assembly_Location == "No assembly" &
missing_loc$Annotation_Location == "No annotation" &
missing_loc$Reads_Location == "No reads")
print(length(nothing))
|
dbeced23958ac8d40b9f8a509c8d4c13555cfbf9
|
7f676cf7d612fd55e4f4bd55c7bb6460d5159907
|
/gibbs/ui.R
|
7bd73499ac1da17993c81df0f41346d65025b660
|
[] |
no_license
|
Delvis/tutorial_bayesiano
|
f5f8e86b01e9b1eca42ad2f3f3cb6bde9496b0da
|
d22af15823ca8ba76f4b87febc762d61719c7b50
|
refs/heads/master
| 2021-01-15T08:43:22.045915
| 2014-10-22T08:18:44
| 2014-10-22T08:18:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 844
|
r
|
ui.R
|
library(shiny)
shinyUI(fluidPage(
#h1("GIBBS SAMPLER"),
absolutePanel(
bottom = 20, right = 0, width = 300,
draggable = TRUE,
wellPanel(
sliderInput("thin",
"Número de Adelgazamiento(thinning):",
value = 1,
min = 1,
max = 10),
sliderInput("pasos",
"Número de Pasos:",
value = 100,
min = 100,
max = 10000)
),
style = "opacity: 0.92"
),
absolutePanel(
top = 0, left = 0, right = 0,
fixed = TRUE,
tabsetPanel(type = "tabs",
tabPanel("2D", plotOutput("D2")),
tabPanel("3D", plotOutput("D3"))
)
)
)
)
|
247e04b8ebcd44d2a6f45bf9101b88e2262a96ca
|
455672ec0a682cc0aa628b77b308a32b25cbe667
|
/Codes/Project.R
|
3a08a7fe51b591e7c8a5bb2126edd37a8c7dd182
|
[] |
no_license
|
the-eon-flux/Regression_Models
|
36678361e24957bec079a780f92161df23e5450a
|
18bd17ffbbefeeed24382f4a3446fc8edf78f074
|
refs/heads/master
| 2023-01-02T11:35:20.009163
| 2020-10-23T17:15:41
| 2020-10-23T17:15:41
| 299,348,755
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 135
|
r
|
Project.R
|
# Load data
data("mtcars")
mtcars$am_X <- 1 * mtcars$am ==0
fit <- lm(mpg~factor(am)-1, mtcars)
summary(fit)
anova(fit)
plot(fit)
|
fe5a9e0a712f9844a2ea0f8f60525c7ec339ee37
|
4571cdfa21e4150b68ef8b15103032d354ab4a54
|
/man/listMissing.Rd
|
1389701e8a1baec172ea509a5038a545bbe956ed
|
[] |
no_license
|
shraddhapai/dataExplore
|
97a0eb7767a1228455208406cbfb09f4a119ee5d
|
047baadf8efdc1f24bf16ab7c987cac9e9747034
|
refs/heads/master
| 2021-03-13T02:03:55.047042
| 2020-03-06T01:12:02
| 2020-03-06T01:12:02
| 91,477,269
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 981
|
rd
|
listMissing.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/listMissing.R
\name{listMissing}
\alias{listMissing}
\title{flags variables and samples exceeding certain level of missingness}
\usage{
listMissing(x, maxMissVar = 0.25, maxMissSamp = 0.25,
rmMissing = FALSE)
}
\arguments{
\item{x}{(matrix or data.frame) rows are variables, columns are samples
rownames must be var names and colnames must be sample names}
\item{maxMissVar}{(numeric (0.01,1)) vars with greater than maxMiss samples
missing will be flagged}
\item{maxMissSamp}{(numeric (0.01,1)) samples with greater than maxMissSamp
vars missing will be flagged}
\item{rmMissing}{(logical) if TRUE remove vars and samples that are missing}
}
\value{
if rmMissing is set to TRUE, returns the matrix after removing offending
variables and samples.
Otherwise prints names of such variables and samples to console.
}
\description{
flags variables and samples exceeding certain level of missingness
}
|
10cb84b255077a09ce06d5c1e725a810bcf26172
|
0bdf2f0ec3515abc0800d74fa74b16a2206e5187
|
/man/total_plastic.Rd
|
5359658e4e19e5192f0d88b836ea87fa6ec0f846
|
[] |
no_license
|
LiamDBailey/NatGeoDataViz
|
af533d31099e2c608c13046271a5ff846c26a516
|
3bff826f20f1a5cd689f34453012ade0c55d2fa7
|
refs/heads/master
| 2020-05-30T12:24:05.026020
| 2019-06-10T17:24:28
| 2019-06-10T17:24:28
| 189,733,378
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 417
|
rd
|
total_plastic.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/extract_data.R
\name{total_plastic}
\alias{total_plastic}
\title{Extract table with total plastic production over time}
\usage{
total_plastic(extracted_data)
}
\arguments{
\item{extracted_data}{Raw scraped data generated by package tabulizer.}
}
\value{
A dataframe.
}
\description{
Extract table with total plastic production over time
}
|
539f88591ad4f9c4867c7b025d5f3dcf633aa682
|
317bba03e6bb5d3e959890569a89c2fbb2b378c2
|
/Test/Two-Sample-Test/t-test.R
|
5eb597612d2393360d5ddf120ca9851c9501906c
|
[] |
no_license
|
jimcrozier/rattle
|
5d402963be454fb7d619490008dc773ed91453f7
|
14e873cdfe68a3549887e2300d7dee1a3f6c2197
|
refs/heads/master
| 2021-01-21T04:03:25.890240
| 2014-02-14T17:53:53
| 2014-02-14T17:53:53
| 16,208,223
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 193
|
r
|
t-test.R
|
# Use the fBasics package for statistical tests.
require(fBasics, quietly=TRUE)
# Perform the test.
locationTest(na.omit(crs$dataset[, "Sepal.Length"]), na.omit(crs$dataset[, "Petal.Length"]))
|
cd509eb42d8a1858dd0106d8179da218c1687fd3
|
8b5997cd77638ba7df64adf28e8fc81b7954ebb1
|
/clustering.based.on.direction.of.expression.R
|
9eb4cd178c1244ee0e90d9686994ea613e9353f0
|
[] |
no_license
|
imjoohyu/R.scripts.for.RNA-seq
|
6fefe2476ef6eaee71c5786e4bf6411469a668fd
|
b299a54f4e095025fc2818dbbb84401405e67cce
|
refs/heads/master
| 2020-06-10T20:45:49.736589
| 2018-01-29T23:36:40
| 2018-01-29T23:36:40
| 75,879,268
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,966
|
r
|
clustering.based.on.direction.of.expression.R
|
####################################################
#Cluster samples based on the direction of expression
#September 7, 2016
#Joo Hyun Im (ji72)
####################################################
#Create a table of direction of expression for each gene in each condition
#1 = Up, 0 = EE, -1 = Down
#This method reduces the degree of elevation or reduction in expression level because a 2-fold increase is labeled the same was as 4-fold increase, for instance.
#delete any previous input
rm(list=ls(all=TRUE))
#Load the data. Get the samples ready for DE analysis.
setwd("/Users/JooHyun/Dropbox/Cornell/Lab/Projects/Mega_RNA-seq/clustering/clustering.based.on.edgeR.results/")
########################################################################################################
#Part I. Unchallenged vs Infected dataset
########################################################################################################
#Read in the data file
total.DE.with.name.and.id = read.table("edgeR_unchallenged_vs_infected_all_genes_FC.txt", header=T) #11911 x 64
print("Filtering out the genes that have NA for p-val ..."); total.DE.with.name.and.id <- na.omit(total.DE.with.name.and.id)
total.with.names.sig.indicated = read.table("edgeR_unchallenged_vs_infected_all_genes_FC_sig_changed_to_Y_or_N.txt", header=T) #11911 x 64
#1. Process data
####################################################
#Convert 'Up' to 1, 'EE' to 0, 'Down' to -1
print("Converting the FC information to Up, Down, or Same depending on their degree of significance ...")
length.of.table <- as.numeric(dim(total.with.names.sig.indicated)[1]); width.of.table <- as.numeric(dim(total.with.names.sig.indicated)[2])
indicator <- NULL
for (i in 1:length.of.table){ #1, 2, 3, ... 11911
#cat("The value for k is: ", k, "\n" )
for (s in seq(3, width.of.table, 2)){ #3, 5, 7, ... 64
if (total.with.names.sig.indicated[i,s+1] == "Y"){ #if significant
indicator <- isTRUE(total.DE.with.name.and.id[i,s] > 0) #indicator shows that the direction is positive
#cat("indicator: ", indicator, "\n")
if (indicator == TRUE) { #If the case is Up-DEG
total.with.names.sig.indicated[i,s] = 1
}
else { #If the caseis Down-DEG
total.with.names.sig.indicated[i,s] = -1
}
}
else { #if not significant
total.with.names.sig.indicated[i,s] = 0
}
}
}
write.table(total.with.names.sig.indicated, file="individual.time.points/edgeR_unchallenged_vs_infected_all_genes_FC_converted_to_numeric_direction.txt", quote = F, row.names = F) #11911 x 64
#Only pull out directions and discard FDR Y/N
total.with.names.sig.indicated.dir.only = total.with.names.sig.indicated[,c(seq(3,63,2))] #all infections including virulent infections that only have 12hr time point. Exclude gene.name and gene.id.
#2. Cluster the conditions (samples) based on direction of expression
####################################################
#Traqnspose the dataframe -- rows: condition, columns: each gene's expression value
total.with.names.sig.indicated.dir.only.t = t(total.with.names.sig.indicated.dir.only)
clusters = hclust(dist(total.with.names.sig.indicated.dir.only.t))
plot(clusters, main="Hierarchical Clustering of DE between unchallenged and infected")
########################################################################################################
#Part II. Previous time point infected vs Present time point dataset
########################################################################################################
#delete any previous input
rm(list=ls(all=TRUE))
#Load the data. Get the samples ready for DE analysis.
setwd("/Users/JooHyun/Dropbox/Cornell/Lab/Projects/Mega_RNA-seq/clustering/clustering.based.on.edgeR.results/")
#Read in the data file
total.DE.with.name.and.id = read.table("edgeR_prev_infected_vs_present_infected_all_genes_FC.txt", header=T) #11911 x 56 -- excluding conditions that only have 12hr time point
print("Filtering out the genes that have NA for p-val ..."); total.DE.with.name.and.id <- na.omit(total.DE.with.name.and.id)
total.with.names.sig.indicated = read.table("edgeR_prev_infected_vs_present_infected_all_genes_FC_sig_changed_to_Y_or_N.txt", header = T) #11911 x 56
####################################################
#1. Process data
#Convert 'Up' to 1, 'EE' to 0, 'Down' to -1
print("Converting the FC information to Up, Down, or Same depending on their degree of significance ...")
length.of.table <- as.numeric(dim(total.with.names.sig.indicated)[1]); width.of.table <- as.numeric(dim(total.with.names.sig.indicated)[2])
indicator <- NULL
for (i in 1:length.of.table){ #1, 2, 3, ... 11911
#cat("The value for k is: ", k, "\n" )
for (s in seq(3, width.of.table, 2)){ #3, 5, 7, ... 56
if (total.with.names.sig.indicated[i,s+1] == "Y"){ #if significant
indicator <- isTRUE(total.DE.with.name.and.id[i,s] > 0) #indicator shows that the direction is positive
#cat("indicator: ", indicator, "\n")
if (indicator == TRUE) { #If the case is Up-DEG
total.with.names.sig.indicated[i,s] = 1
}
else { #If the caseis Down-DEG
total.with.names.sig.indicated[i,s] = -1
}
}
else { #if not significant
total.with.names.sig.indicated[i,s] = 0
}
}
}
write.table(total.with.names.sig.indicated, file="individual.time.points/edgeR_prev_infected_vs_present_infected_all_genes_FC_converted_to_numeric_direction.txt", quote = F, row.names = F) #11911 x 56
#Only pull out directions and discard FDR Y/N
total.with.names.sig.indicated.dir.only = total.with.names.sig.indicated[,c(seq(3,56,2))] #all infections excluding virulent infections that only have 12hr time point. Exclude gene.name and gene.id.
####################################################
#2. Cluster the conditions (samples) based on direction of expression
#Traqnspose the dataframe -- rows: condition, columns: each gene's expression value
total.with.names.sig.indicated.dir.only.t = t(total.with.names.sig.indicated.dir.only)
clusters = hclust(dist(total.with.names.sig.indicated.dir.only.t))
plot(clusters, main="Hierarchical Clustering of DE between n time point and n+1 timpe point")
########################################################################################################
#Part III. Factor analysis?
########################################################################################################
#3. Factor Analysis
fit <- factanal(total.with.names.sig.indicated.dir.only.t, 3, rotation="varimax")
print(fit, digits=2, cutoff=.3, sort=TRUE)
load <- fit$loadings[,1:2]
plot(load,type="n") # set up plot
#PCA?
fit = princomp(total.with.names.sig.indicated.dir.only, cor=T)
summary(fit)
loadings(fit)
fit$scores
|
00a94c6763ccbff94938bf85ea47da9a909796b6
|
5e5cf4ceec1ec61d94f31b066acde03313d34ab7
|
/rColorTable.R
|
0e868997c37c3e93d4df0d0b514dbd75a6d1d850
|
[] |
no_license
|
FloraKrivakTetley/rColorTable
|
db7fde18c0d8e2c0903889f71c87a39d5237bd8c
|
07a0aeabeaa64bfa956526b36cebb4654030c8e2
|
refs/heads/master
| 2021-06-06T23:24:12.254521
| 2016-07-08T13:17:23
| 2016-07-08T13:17:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,909
|
r
|
rColorTable.R
|
#### Make 2-page color table of all colors available in R ####
# R color convention is color, color1, color2, color3, and color4
# with shades getting progressively darker
# For example skyblue, skyblue1, skyblue2, skyblue3, skyblue4
# This table prints only the full name, and fullname4 if available
# This table does not show greyscale colors grey0:grey100
# R colors minus 100 shades of grey
cols = colors()[c(2:151,362:657)]
# Duplicate colors
dupCols = list()
for (i in 1:4){
dupCols[[i]] <- cols[grep(pattern = i,x = cols)]
}
dupNames = gsub(pattern = '1',replacement = '',x = dupCols[[1]])
# All Color Names
allNames = cols[!grepl('1',cols) & !grepl('2',cols) & !grepl('3',cols) & !grepl('4',cols)]
indx = match(dupNames,allNames)
# Output color table
output = data.frame(col = allNames,c1=NA,c2=NA,c3=NA,c4=NA,stringsAsFactors = F)
for (i in 1:4){
output[,i+1][indx] = dupCols[[i]]
}
# Set rectangles x-axis start and end points
x1 = c(0,3,3.5,4,4.5)
x2 = c(3,3.5,4,4.5,5)
# 2 page color chart
pdf('rColorTable.pdf',height = 10)
par(mar=c(0,6,0,6))
# First page
plot(0, type="n", ylab="", xlab="",axes=FALSE, ylim=c(69,0), xlim=c(1,5))
axis(2,at = 1:69,labels = allNames[1:69],las=1,cex.axis=0.6)
axis(4,at= indx[indx<70],labels = paste(allNames[indx[indx<70]],4,sep=''),cex.axis=0.6,las=1)
for (j in 1:69) {
for (i in 1:5) {
#k = j*5 + i
if (!is.na(output[j,i])){
rect(x1[i],j-0.5, x2[i],j+0.5, border="black", col=output[j,i])
}
}
}
# Second Page
plot(0, type="n", ylab="", xlab="",axes=FALSE, ylim=c(138,70),xlim=c(1,5))
axis(2,at = 70:138,labels = allNames[70:138],las=1,cex.axis=0.6)
axis(4,at= indx[indx>=70],labels = paste(allNames[indx[indx>=70]],4,sep=''),cex.axis=0.6,las=1)
for (j in 70:138) {
for (i in 1:5) {
#k = j*5 + i
if (!is.na(output[j,i])){
rect(x1[i],j-0.5, x2[i],j+0.5, border="black", col=output[j,i])
}
}
}
dev.off()
|
f81677db25f9c742f01b1443810daa04dec8cdbd
|
dd46e7fb0c626ff4ebdebc7c7108ed5f6575b11c
|
/man/plot_mlLoganfit.Rd
|
4e7a0de217b5bd00661bdf9e395067d0a5786eb7
|
[] |
no_license
|
eebrown/kinfitr
|
3222443d03ed17bf7c9a54d7681ab26d03383c77
|
3aa34c83dc77fb995841a36f65055e7769a90d9a
|
refs/heads/master
| 2020-04-08T13:38:25.448190
| 2018-04-05T17:36:01
| 2018-04-05T17:36:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 653
|
rd
|
plot_mlLoganfit.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/kinfitr_mlloganplot.R
\name{plot_mlLoganfit}
\alias{plot_mlLoganfit}
\title{Plot: Multilinear Logan Plot}
\usage{
plot_mlLoganfit(loganout, roiname = NULL)
}
\arguments{
\item{roiname}{Optional. The name of the Target Region to see it on the plot.}
\item{mlloganout}{The output object of the multilinear Logan Plot fitting procedure.}
}
\value{
A ggplot2 object of the plot.
}
\description{
Function to visualise the fit of the multilinear Logan Plot model to data.
}
\examples{
plot_mlLoganfit(mlloganoutout)
}
\author{
Granville J Matheson, \email{mathesong@gmail.com}
}
|
40a3d19cec42b0d8fcbbdd537803c054b5794748
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/ggquickeda/examples/geom_kmband.Rd.R
|
2301852dc88dc222af0e6495ad8b10c218d6362a
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 393
|
r
|
geom_kmband.Rd.R
|
library(ggquickeda)
### Name: GeomKmband
### Title: Display Kaplan Meier Curve
### Aliases: GeomKmband geom_kmband
### Keywords: internal
### ** Examples
library(ggplot2)
sex <- rbinom(250, 1, .5)
df <- data.frame(time = exp(rnorm(250, mean = sex)), status = rbinom(250, 1, .75), sex = sex)
ggplot(df, aes(time = time, status = status, color = factor(sex))) + geom_km() + geom_kmband()
|
ee97b0f31e8ce4f9fa8299affc81e8b1c2bcbdb0
|
fbe57536cc2d84e69a5bf799c88fcb784e853558
|
/R/hist.ungrouped.R
|
5b39694a8279d6fbe3bf80069e7ec55b663e6f1d
|
[
"MIT"
] |
permissive
|
burrm/lolcat
|
78edf19886fffc02e922b061ce346fdf0ee2c80f
|
abd3915791d7e63f3827ccb10b1b0895aafd1e38
|
refs/heads/master
| 2023-04-02T11:27:58.636616
| 2023-03-24T02:33:34
| 2023-03-24T02:33:34
| 49,685,593
| 5
| 2
| null | 2016-10-21T05:14:49
| 2016-01-15T00:56:55
|
R
|
UTF-8
|
R
| false
| false
| 1,311
|
r
|
hist.ungrouped.R
|
hist.ungrouped <- function(
x
,col="lightblue"
,main="Ungrouped Histogram"
,stat.lsl = NA
,stat.lsl.label = "LSL"
,stat.target = NA
,stat.target.label = "TGT"
,stat.usl = NA
,stat.usl.label = "USL"
,freq = T
,after.plot = function (x, ...) {}
,...
) {
argext <- list(...)
dist.ungrouped <- frequency.dist.ungrouped(x, na.rm = T)
breaks.tentative <- dist.ungrouped$value
resolution <- max(min(diff(unique(sort(breaks.tentative)))),1e-7)
breaks.tentative <- seq(min(breaks.tentative)-resolution, max(breaks.tentative)+resolution,resolution)
ret <- hist(x, col=col, breaks= breaks.tentative, main=main, xaxt="n", freq = freq, ...)
if (length(argext[["xaxt"]]) && "n" == argext[["xaxt"]]) {
} else {
axis(1,at=breaks.tentative-.5*resolution, labels = breaks.tentative)
}
if (!is.na(stat.lsl)) {
hist.add.spec.line.simple(at = stat.lsl, label = stat.lsl.label)
}
if (!is.na(stat.target)) {
hist.add.spec.line.simple(at = stat.target, label = stat.target.label)
}
if (!is.na(stat.usl)) {
hist.add.spec.line.simple(at = stat.usl, label = stat.usl.label)
}
after.plot(x, freq = freq, resolution = resolution)
invisible(ret)
}
|
4a003b61fd3f7c0f9050ff86154cde2a0559e7f3
|
ff9eb712be2af2fa24b28ecc75341b741d5e0b01
|
/man/gpqCiNormCensored.Rd
|
b8fe93bd28b7ad3a208ac8cd27338e445a31c555
|
[] |
no_license
|
alexkowa/EnvStats
|
715c35c196832480ee304af1034ce286e40e46c2
|
166e5445d252aa77e50b2b0316f79dee6d070d14
|
refs/heads/master
| 2023-06-26T19:27:24.446592
| 2023-06-14T05:48:07
| 2023-06-14T05:48:07
| 140,378,542
| 21
| 6
| null | 2023-05-10T10:27:08
| 2018-07-10T04:49:22
|
R
|
UTF-8
|
R
| false
| false
| 9,207
|
rd
|
gpqCiNormCensored.Rd
|
\name{gpqCiNormCensored}
\alias{gpqCiNormCensored}
\alias{gpqCiNormSinglyCensored}
\alias{gpqCiNormMultiplyCensored}
\title{
Generalized Pivotal Quantity for Confidence Interval for the Mean of a Normal Distribution Based on Censored Data
}
\description{
Generate a generalized pivotal quantity (GPQ) for a confidence interval for the
mean of a \link[stats:Normal]{Normal distribution} based on singly or multiply
censored data.
}
\usage{
gpqCiNormSinglyCensored(n, n.cen, probs, nmc, method = "mle",
censoring.side = "left", seed = NULL, names = TRUE)
gpqCiNormMultiplyCensored(n, cen.index, probs, nmc, method = "mle",
censoring.side = "left", seed = NULL, names = TRUE)
}
\arguments{
\item{n}{
positive integer \eqn{\ge 3} indicating the sample size.
}
\item{n.cen}{
for the case of singly censored data, a positive integer indicating the number of
censored observations. The value of \code{n.cen} must be between \code{1} and
\code{n-2}, inclusive.
}
\item{cen.index}{
for the case of multiply censored data, a sorted vector of unique integers
indicating the indices of the censored observations when the observations are
\dQuote{ordered}. The length of \code{cen.index} must be between \code{1} and
\code{n-2}, inclusive, and the values of \code{cen.index} must be between
\code{1} and \code{n}.
}
\item{probs}{
numeric vector of values between 0 and 1 indicating the confidence level(s)
associated with the GPQ(s).
}
\item{nmc}{
positive integer \eqn{\ge 10} indicating the number of Monte Carlo trials to run
in order to compute the GPQ(s).
}
\item{method}{
character string indicating the method to use for parameter estimation. \cr
\cr
For singly censored data, possible values are \code{"mle"} (the default),
\code{"bcmle"}, \code{"qq.reg"}, \code{"qq.reg.w.cen.level"},
\code{"impute.w.qq.reg"}, \cr
\code{"impute.w.qq.reg.w.cen.level"},
\code{"impute.w.mle"}, \cr
\code{"iterative.impute.w.qq.reg"},
\code{"m.est"}, and \code{"half.cen.level"}. See the help file for
\code{\link{enormCensored}} for details. \cr
\cr
For multiply censored data, possible values are \code{"mle"} (the default),
\code{"qq.reg"}, \code{"impute.w.qq.reg"}, and \code{"half.cen.level"}.
See the help file for \code{\link{enormCensored}} for details.
}
\item{censoring.side}{
character string indicating on which side the censoring occurs. The possible
values are \code{"left"} (the default) and \code{"right"}.
}
\item{seed}{
positive integer to pass to the function \code{\link{set.seed}}. This argument is
ignored if \code{seed=NULL} (the default). Using the \code{seed} argument lets you
reproduce the exact same result if all other arguments stay the same.
}
\item{names}{
a logical scalar passed to \code{\link{quantile}} indicating whether to add a
names attribute to the resulting GPQ(s). The default value is \code{names=TRUE}.
}
}
\details{
The functions \code{gpqCiNormSinglyCensored} and \code{gpqCiNormMultiplyCensored}
are called by \cr
\code{\link{enormCensored}} when \code{ci.method="gpq"}. They are
used to construct generalized pivotal quantities to create confidence intervals
for the mean \eqn{\mu} of an assumed normal distribution.
This idea was introduced by Schmee et al. (1985) in the context of Type II singly
censored data. The function
\code{gpqCiNormSinglyCensored} generates GPQs using a modification of
Algorithm 12.1 of Krishnamoorthy and Mathew (2009, p. 329). Algorithm 12.1 is
used to generate GPQs for a tolerance interval. The modified algorithm for
generating GPQs for confidence intervals for the mean \eqn{\mu} is as follows:
\enumerate{
\item Generate a random sample of \eqn{n} observations from a standard normal
(i.e., N(0,1)) distribution and let \eqn{z_{(1)}, z_{(2)}, \ldots, z_{(n)}}
denote the ordered (sorted) observations.
\item Set the smallest \code{n.cen} observations as censored.
\item Compute the estimates of \eqn{\mu} and \eqn{\sigma} by calling
\code{\link{enormCensored}} using the method
specified by the \code{method} argument, and denote these estimates as
\eqn{\hat{\mu}^*, \; \hat{\sigma}^*}.
\item Compute the t-like pivotal quantity
\eqn{\hat{t} = \hat{\mu}^*/\hat{\sigma}^*}.
\item Repeat steps 1-4 \code{nmc} times to produce an empirical distribution of
the t-like pivotal quantity.
}
A two-sided \eqn{(1-\alpha)100\%} confidence interval for \eqn{\mu} is then
computed as:
\deqn{[\hat{\mu} - \hat{t}_{1-(\alpha/2)} \hat{\sigma}, \; \hat{\mu} - \hat{t}_{\alpha/2} \hat{\sigma}]}
where \eqn{\hat{t}_p} denotes the \eqn{p}'th empirical quantile of the
\code{nmc} generated \eqn{\hat{t}} values.
Schmee at al. (1985) derived this method in the context of Type II singly censored
data (for which these limits are exact within Monte Carlo error), but state that
according to Regal (1982) this method produces confidence intervals that are
close apporximations to the correct limits for Type I censored data.
The function
\code{gpqCiNormMultiplyCensored} is an extension of this idea to multiply censored
data. The algorithm is the same as for singly censored data, except
Step 2 changes to: \cr
2. Set observations as censored for elements of the argument \code{cen.index}
that have the value \code{TRUE}.
The functions \code{gpqCiNormSinglyCensored} and \code{gpqCiNormMultiplyCensored} are
computationally intensive and provided to the user to allow you to create your own
tables.
}
\value{
a numeric vector containing the GPQ(s).
}
\references{
Krishnamoorthy K., and T. Mathew. (2009).
\emph{Statistical Tolerance Regions: Theory, Applications, and Computation}.
John Wiley and Sons, Hoboken.
Regal, R. (1982). Applying Order Statistic Censored Normal Confidence Intervals
to Time Censored Data. Unpublished manuscript, University of Minnesota, Duluth,
Department of Mathematical Sciences.
Schmee, J., D.Gladstein, and W. Nelson. (1985). Confidence Limits for Parameters
of a Normal Distribution from Singly Censored Samples, Using Maximum Likelihood.
\emph{Technometrics} \bold{27}(2) 119--128.
}
\author{
Steven P. Millard (\email{EnvStats@ProbStatInfo.com})
}
\seealso{
\code{\link{enormCensored}}, \code{\link{estimateCensored.object}}.
}
\examples{
# Reproduce the entries for n=10 observations with n.cen=6 in Table 4
# of Schmee et al. (1985, p.122).
#
# Notes:
# 1. This table applies to right-censored data, and the
# quantity "r" in this table refers to the number of
# uncensored observations.
#
# 2. Passing a value for the argument "seed" simply allows
# you to reproduce this example.
# NOTE: Here to save computing time for the sake of example, we will specify
# just 100 Monte Carlos, whereas Krishnamoorthy and Mathew (2009)
# suggest *10,000* Monte Carlos.
# Here are the values given in Schmee et al. (1985):
Schmee.values <- c(-3.59, -2.60, -1.73, -0.24, 0.43, 0.58, 0.73)
probs <- c(0.025, 0.05, 0.1, 0.5, 0.9, 0.95, 0.975)
names(Schmee.values) <- paste(probs * 100, "\%", sep = "")
Schmee.values
# 2.5% 5% 10% 50% 90% 95% 97.5%
#-3.59 -2.60 -1.73 -0.24 0.43 0.58 0.73
gpqs <- gpqCiNormSinglyCensored(n = 10, n.cen = 6, probs = probs,
nmc = 100, censoring.side = "right", seed = 529)
round(gpqs, 2)
# 2.5% 5% 10% 50% 90% 95% 97.5%
#-2.46 -2.03 -1.38 -0.14 0.54 0.65 0.84
# This is what you get if you specify nmc = 1000 with the
# same value for seed:
#-----------------------------------------------
# 2.5% 5% 10% 50% 90% 95% 97.5%
#-3.50 -2.49 -1.67 -0.25 0.41 0.57 0.71
# Clean up
#---------
rm(Schmee.values, probs, gpqs)
#==========
# Example of using gpqCiNormMultiplyCensored
#-------------------------------------------
# Consider the following set of multiply left-censored data:
dat <- 12:16
censored <- c(TRUE, FALSE, TRUE, FALSE, FALSE)
# Since the data are "ordered" we can identify the indices of the
# censored observations in the ordered data as follow:
cen.index <- (1:length(dat))[censored]
cen.index
#[1] 1 3
# Now we can generate a GPQ using gpqCiNormMultiplyCensored.
# Here we'll generate a GPQs to use to create a
# 95% confidence interval for left-censored data.
# NOTE: Here to save computing time for the sake of example, we will specify
# just 100 Monte Carlos, whereas Krishnamoorthy and Mathew (2009)
# suggest *10,000* Monte Carlos.
gpqCiNormMultiplyCensored(n = 5, cen.index = cen.index,
probs = c(0.025, 0.975), nmc = 100, seed = 237)
# 2.5% 97.5%
#-1.315592 1.848513
#----------
# Clean up
#---------
rm(dat, censored, cen.index)
}
\keyword{ distribution }
\keyword{ htest }
|
1be6d49e4d166bae471cf5d894bbeb896d8c3c41
|
f02a8015cc634a67758876bb1b8f5d06948d9c50
|
/bis557/man/ridge_regression.Rd
|
7aab2e4ef00b3a0a910aa092a22ee874efb72879
|
[] |
no_license
|
Klong0725/bis557
|
54650d1f2d71038e88dd2a9eb030c49df86001f1
|
589f46487b38da7c77d5ce184ad2b717cbcc6149
|
refs/heads/master
| 2020-08-27T00:40:06.938950
| 2019-10-24T03:23:00
| 2019-10-24T03:23:00
| 217,196,320
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 569
|
rd
|
ridge_regression.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ridge_regression.R
\name{ridge_regression}
\alias{ridge_regression}
\title{Implement ridge regression}
\usage{
ridge_regression(X, y, lambda = 0)
}
\arguments{
\item{X}{numeric data matrix of predictors in a ridge regression model}
\item{y}{the response vector}
\item{lambda}{a hyperparameter that controls the penalty term}
}
\value{
Components of the fitted ridge regression model
}
\description{
This function can conduct ridge regression and export coefficients
}
|
d4ebc44bc16a08302e6c99287b8d58fac0c1b56a
|
fcb8f1ff74fd9de6d27399c73300580d68365c16
|
/indagridat/man/indiastat.Rd
|
a2c9869f46f071b71dbd651e8b5b9511b055be4d
|
[] |
no_license
|
simonmoulds/r_indagridat
|
bae0d7f3c58764bf1ffcfdce89690012d06c30ea
|
b1942703f023be330221322f42aaf3cdebb74529
|
refs/heads/master
| 2021-01-17T09:00:58.282188
| 2019-12-16T11:45:08
| 2019-12-16T11:45:08
| 40,711,606
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 342
|
rd
|
indiastat.Rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{indiastat}
\alias{indiastat}
\title{District level agricultural statistics for India}
\format{A data.frame}
\usage{
indiastat
}
\description{
Dataset containing various... TODO
}
\examples{
data(indiastat)
}
\keyword{datasets}
|
a88f2d6b68ff76d9727607badb504aa7c5b77897
|
d60483c8da8c1ad871bdf6a6ba9c918b3bae33a5
|
/R/AKm02a0.R
|
ca3d2b30c68f3624fcc9e7ad4ddfafdf102a7391
|
[] |
no_license
|
cran/MortHump
|
b8f02f191ff5eb6fa41d8a9251a27930e7b0a9ce
|
f080bed7bb8fcc6165546e7d74735710d999a886
|
refs/heads/master
| 2021-05-03T11:44:10.798604
| 2018-01-24T11:56:41
| 2018-01-24T11:56:41
| 64,298,178
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,827
|
r
|
AKm02a0.R
|
#' @title \code{AKq02a0} estimates a0 using the Andreev-Kingkade rule of thumb.
#'
#' @description \code{AKq02a0} is an auxiliary function used by version 6 of the four HMD lifetable functions, \code{ltper_AxN()}, \code{ltcoh_AxN()}, \code{ltperBoth_AxN()}, \code{ltcohBoth_AxN()}.
#'
#' @param q0 a value or vector of values of q0, the death probability for age 0 infants.
#' @param sex either "m" or "f"
#'
#' @return a0, the estimated average age at death of those dying in the first year of life, either a single value or a vector of a_0 values.
#'
#' @author Tim Riffe
#'
#' @export
AKq02a0 <- function(q0, sex = "m"){
sex <- rep(sex, length(q0))
ifelse(sex == "m",
ifelse(q0 < .0226, {0.1493 - 2.0367 * q0},
ifelse(q0 < 0.0785, {0.0244 + 3.4994 * q0},.2991)),
ifelse(q0 < 0.0170, {0.1490 - 2.0867 * q0},
ifelse(q0 < 0.0658, {0.0438 + 4.1075 * q0}, 0.3141))
)
}
#'
#' @title AKm02q0 derive q0 from m0 using the Andreev-Kingkade rule of thumb.
#'
#' @description Derive m0 from q0 according to the relevant segment of the Andreev-Kingkade formula. This is elegant because it's an analytic solution, but ugly because, man, look at it. Carl Boe got this from Maple I think. This formula is only necessary because AK start with q0 whereas the HMD starts with m0, so we needed to adapt. This is an auxiliary function, and not likely needed for direct use.
#'
#' @param m0 the event / exposure infant mortality rate (not IMR)
#' @param constant the intercept of the relevant Andreev-Kingkade segment
#' @param slope the slope of the relevant Andreev-Kingkade segment
#'
#' @return q0 the estimate of q0 according to the identity between a0, m0, q0
#'
#' @author Tim Riffe
#'
#' @export
#'
#' @details This is based on an MPIDR Working Paper: Andreev, Evgueni M and Kingkade, Ward W (2011) "Average age at death in infancy and infant mortality level: reconsidering the Coale-Demeny formulas at current levels of low mortality". short link: http://goo.gl/b5m5pg.
AKm02q0 <- function(m0, constant, slope){
-1 / slope / m0 * (-m0 + (m0 * constant) - 0.1e1 + sqrt(((m0 ^ 2) - 2 * constant * (m0 ^ 2) + 2 * m0 + (constant ^ 2) * (m0 ^ 2) - 2 * (m0 * constant) + 1 - 4 * slope * (m0 ^ 2)))) / 2
}
#' @title \code{AKm02a0} estimates a0 using the Andreev-Kinkade rule of thumb.
#'
#' @description
#' \code{AKm02a0} is an auxiliary function used by version 6 of the four HMD lifetable functions, \code{ltper_AxN()}, \code{ltcoh_AxN()}, \code{ltperBoth_AxN()}, \code{ltcohBoth_AxN()}. This function calls \code{AKm02q0()} to help get the work done, since the HMD needed to adapt the Andreev-Kingkade formulas to work with the period lifetable flow.
#'
#' @param m0 the event / exposure infant mortality rate (not IMR)
#' @param sex either "male" or "female"
#'
#' @return a0, the estimated average age at death of those dying in the first year of life, either a single value or a vector of a_0 values.
#'
#' @author Tim Riffe
#'
#' @export
#'
#' @details This is based on an MPIDR Working Paper: Andreev, Evgueni M and Kingkade, Ward W (2011) "Average age at death in infancy and infant mortality level: reconsidering the Coale-Demeny formulas at current levels of low mortality". short link: http://goo.gl/b5m5pg.
AKm02a0 <- function(m0,sex="male"){
sex <- rep(sex,length(m0))
ifelse(sex == "male",
ifelse(m0 < 0.02306737, 0.1493 - 2.0367 * AKm02q0(m0, 0.1493, -2.0367),
ifelse(m0 < 0.0830706, 0.0244 + 3.4994 * AKm02q0(m0, 0.0244, 3.4994), .2991)),
ifelse(m0 < 0.01725977, 0.1490 - 2.0867 * AKm02q0(m0, 0.1490, -2.0867),
ifelse(m0 < 0.06919348, 0.0438 + 4.1075 * AKm02q0(m0, 0.0438, 4.1075), 0.3141))
)
}
|
7eafd0a16d4dbec7b515eab54bcffdbf50a47951
|
edc33b49b028ef4446a33cf42ac444cebb48dc22
|
/test.R
|
db758dd65324f237e6517a321ea6f827c46e0332
|
[] |
no_license
|
mayamathur/time-varying-covariates
|
228d425ac4d17e8fbae2e6250cc40ffc40a51678
|
71241a938edaa220895396a3c5bf77adbd324c1c
|
refs/heads/master
| 2020-09-21T13:35:26.670422
| 2015-02-24T19:09:46
| 2015-02-24T19:09:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 24
|
r
|
test.R
|
# here is some test code
|
4968b62a67218ca3e578a51d3fcc31f7c989ab33
|
3dc38709642fb1556a29feb21951e666c12d8bc9
|
/man/subsetDim.Rd
|
0a0ff98678c969345ed56b7b808b37b3c9eebec6
|
[
"BSD-3-Clause"
] |
permissive
|
nuno-agostinho/cTRAP
|
010dfff8cc02f110926e73a80a39b3125aa53473
|
491b0e9d41ddc313c3deb95b04fe84e9d44bc985
|
refs/heads/master
| 2023-09-01T05:14:34.383038
| 2023-02-15T09:00:59
| 2023-02-15T09:00:59
| 150,247,015
| 5
| 1
|
NOASSERTION
| 2023-02-15T09:01:01
| 2018-09-25T10:23:35
|
R
|
UTF-8
|
R
| false
| true
| 338
|
rd
|
subsetDim.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{subsetDim}
\alias{subsetDim}
\title{Subset rows or columns based on a given index}
\usage{
subsetDim(k, dims, nargs, areCols = TRUE)
}
\value{
Subset rows/columns
}
\description{
Subset rows or columns based on a given index
}
\keyword{internal}
|
18db6799e30b081224a05d9fb28c1e78ced97c61
|
716fe3ec13ba4cd2a77dfa4253c0c62df02a5c2b
|
/man/KennardStone.Rd
|
373c74a50bac079856a6c57286562f073f6f8ffc
|
[] |
no_license
|
HMzhu/CTVPdtw
|
7d9b76bf30c96ea9b945b87b77caf894ec33aae9
|
dad77175e6148ce587d49f2f1979a7d6de3271c7
|
refs/heads/master
| 2021-07-07T06:15:00.079466
| 2020-10-21T02:25:55
| 2020-10-21T02:25:55
| 199,740,121
| 3
| 4
| null | 2020-10-21T02:25:57
| 2019-07-30T23:06:11
|
R
|
UTF-8
|
R
| false
| false
| 1,273
|
rd
|
KennardStone.Rd
|
\name{KennardStone}
\alias{KennardStone}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
KennardStone sample partitioning
}
\description{
Use the Euclidean distance between samples to do sample partitioning
% Input
}
\usage{
KennardStone(x,num)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{the matrix of the sample spectra}
\item{num}{the number of the sample spectra you want select }
}
\details{
KennardStone selects the samples XSelected which uniformly distributed in the exprimental data x's space
}
\value{
Return a list of result
\item{vSelectedRowIndex }{the row index of the selected sample in the X matrix }
\item{xSelected }{the sample spectras was selected from the X}
\item{xRest }{the sample spectras remain int the X after select}
\item{vNotSelectedSample }{the row index of remain int the X after select}
}
\references{
R.W. Kennard, L.A. Stone, Computer Aided Design of Experiments, Technometrics, 11 (1969) 137-148.
}
\author{
ZHuiMin
}
\examples{
data(corn)
x = corn$X.MP5
result<-KennardStone (x,num=64)
X1selected=result$xSelected
X1Rest=result$xRest
vSelectedRowIndex=result$vSelectedRowIndex
vNotSelectedSample=result$vNotSelectedSample
}
\keyword{ ks }
\keyword{ KennardStone }
|
018de5c952d6082ff490f10bc0a52cb9c3a62c05
|
3dd0a2a2da595180a3dbc0476a441e21948bad47
|
/DesarrolloPreguntas.R
|
5863a3af42e4c694df809c893c6c6279ca72b2de
|
[] |
no_license
|
Rafael1107/CIERRE_SEMESTRE_S2
|
c60bc819643b67ea54e0ba1d8fd2877563d19aaa
|
413cab36db56a4e679f68c6397a25c6f6df1db00
|
refs/heads/master
| 2020-11-26T03:30:14.554226
| 2020-01-09T16:00:24
| 2020-01-09T16:00:24
| 228,953,084
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,726
|
r
|
DesarrolloPreguntas.R
|
DesarrolloPreguntas.R
#########################EJERCICIO 1 ####################################
##########################################################################
Ejemplos=sample(c("Positivo","Negativo","Neutros"),100,replace=TRUE)
#################Contando los Positivos################
n_positivos<-0
for (i in 1:length(Ejemplos)){
if("Positivo"==Ejemplos[i]){n_positivos<-n_positivos+1}
}
####################Contando los Negativos##############
n_negativos<-0
for (i in 1:length(Ejemplos)){
if("Negativo"==Ejemplos[i]){n_negativos<-n_negativos+1}
}
####################Contando los Neutros#################
n_neutros<-0
for (i in 1:length(Ejemplos)){
if("Neutros"==Ejemplos[i]){n_neutros<-n_neutros+1}
}
################Comprobando la sumatoria igual a 100################
TOTAL<-n_negativos+n_positivos+n_neutros
#########################EJERCICIO 2 ####################################
##########################################################################
Ejemplos=sample(c("Positivo","Negativo","Neutros"),10,replace=TRUE)
set.seed(10)
#########################EJERCICIO 3 ####################################
##########################################################################
Ejemplos=sample(c("Positivo","Negativo","Neutros"),10,replace=TRUE)
set.seed(66)
# Se ejecuta lo anterior mas lo realizado en el ejercicio uno para obtener los numeros de positivos, negativos y neutros
Porcentajes_incluye_neutros<-c(n_negativos/TOTAL,n_positivos/TOTAL,n_neutros/TOTAL)
Porcentajes_no_neutros<-c(n_negativos/(TOTAL-n_neutros),n_positivos/(TOTAL-n_neutros))
#########################EJERCICIO 4 y 5 ####################################
##########################################################################
Cartas_Sacadas=sample(c("A",2:10,"J","Q","K"),31,replace=TRUE)
set.seed(31)
Cuenta<-0
for (i in 1:length(Cartas_Sacadas)){
if(Cartas_Sacadas[i]==2|Cartas_Sacadas[i]==3|Cartas_Sacadas[i]==4|Cartas_Sacadas[i]==5|Cartas_Sacadas[i]==6) {
Cuenta<-Cuenta+1 } else if (Cartas_Sacadas[i]=="A"|Cartas_Sacadas[i]=="J"|Cartas_Sacadas[i]=="Q"|Cartas_Sacadas[i]=="K"|Cartas_Sacadas[i]==10){
Cuenta<-Cuenta-1} else if (Cartas_Sacadas[i]==7|Cartas_Sacadas[i]==8|Cartas_Sacadas[i]==9){
Cuenta<-Cuenta+0
}
}
# Ahora lo realice con 2 ford de manera variable.
mas1<-c(2:6)
menos1<-c("A","J","Q","K",10)
neutros<-c(7:9)
cuenta2<-0
variable<-mas1
for (i in 1:length(Cartas_Sacadas)){ for (n in 1:length(variable)){
if(Cartas_Sacadas[i]==mas1[n]){cuenta2<-cuenta2+1}}
variable<-menos1
for(n in 1:length(variable)){
if(Cartas_Sacadas[i]==menos1[n]){cuenta2<-cuenta2-1}
}
}
#########################EJERCICIO 6 ####################################
##########################################################################
setwd("C:\Users\Rafae\Documents\CIERRE_SEMESTRE_S2")
#SE NECESITAN LOS PAQUETES
library(tm)
library(wordcloud)
library(ggplot2)
library(reshape2)
#Reconocimiento de Archivos
textos=VCorpus(DirSource("Textos",encoding="UTF-8"),readerControl =list(language="spa"))
inspect(textos)
writeLines(as.character(textos[[1]]))
writeLines(as.character(textos[[2]]))
#Colocando en formato de tabla
tdm<-TermDocumentMatrix(textos,
control=list(stopwords=T,
removePunctuation=T,
removeNumbers=T,
steming=T))
matrix_tdm<-melt(as.matrix(tdm),value.name="Count")
head(matrix_tdm)
##################################### METODO2 ###############################################
path = "C:/Users/Rafae/Documents/CIERRE_SEMESTRE_S2"
setwd("C:/Users/Rafae/Documents/CIERRE_SEMESTRE_S2")
dir = DirSource(paste(path,"pos/",sep=""), encoding = "UTF-8")
corpus = Corpus(dir)
|
9e22ca7c230e3be3bfbb10b90defa9d794af8c1a
|
65b7c1a0100da9d016bbbe49648b71dd026f2601
|
/5-10 Reshape包:数据融合(melt)和重铸(cast).R
|
89e5cc86fff319794b88862b8e347ca51c0ba34e
|
[] |
no_license
|
xush65/R_in_Action
|
a3b0f8659abe7b807b9e244dc3f9e518a15dd2bf
|
228538c3affca78464fa63b97cba3fb1d1d335ec
|
refs/heads/master
| 2021-06-10T10:14:37.119012
| 2017-02-16T03:10:26
| 2017-02-16T03:10:26
| null | 0
| 0
| null | null | null | null |
GB18030
|
R
| false
| false
| 651
|
r
|
5-10 Reshape包:数据融合(melt)和重铸(cast).R
|
#merge 见4-9
#使用R自带的airquality数据阐明melt和cast的用法:
#首先了解airquality的数据情况
head(airquality)
#使用melt函数:
library("reshape")
library("reshape2")
aql <-melt(airquality, id.vars = c("Month","Day"), variable.name ="climate_variable", value.name ="climate_value")
#把各项天气情况据数据分开,对于每一项气象状况按月、日排序。
head(aql)
aqw <- cast(aql, Month + Day ~ variable)
#把数据按月日整合回去
head(aqw)
#数据算平均值(此处如果报错建议清空workspace,主要原因是变量冲突)
cast(aql, Month ~ variable, mean, na.rm=T)#按月算各个天气均值
|
054ec246581f3aed8819fa4570ee7b10a6246b63
|
0cf89b25d1b7d0ba7696a34b9287389d216c2149
|
/man/numextract.Rd
|
c0f9a364b7a77bc59918f6de34257a57fef67f7c
|
[
"MIT"
] |
permissive
|
rpkyle/cscmisc
|
790567b8453d71e99cf08d90e03faa1991f6f493
|
afcec1f4b310390f5853bf8d7c1f7cd62df094bf
|
refs/heads/master
| 2020-03-28T06:37:43.305367
| 2019-03-08T17:00:01
| 2019-03-08T17:00:01
| 147,845,151
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,958
|
rd
|
numextract.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/numextract.R
\name{numextract}
\alias{numextract}
\title{Extract Numeric Values from Character Strings}
\usage{
numextract(string, sequence = "first")
}
\arguments{
\item{string}{A single string of \code{class} string to parse for digits.}
\item{sequence}{A second character string, matching one of the following: \code{"first"}, \code{"last"}, \code{"collapse"}, or \code{"midpoint"}.}
}
\value{
Numeric value(s) occurring in \code{string}, or the midpoint of the first and last digits
within the string.
}
\description{
Given a character string, this function will attempt to extract digits and return the result as
a numeric value.
}
\details{
All functions used are available in base R; no additional packages are required.
If one matching sequence is identified, but the \code{sequence} argument is \code{"midpoint"}
or \code{"collapse"}, the function attempts to return a "safe" value. In this case, the only
numeric match is returned. If no matches are found, the function returns \code{numeric()}.
}
\examples{
example_string1 <- "12-15 HOURS"
example_string2 <- "DAY -1"
# Returns 12.
numextract(example_string1)
numextract(example_string1, sequence="first")
# Returns -15, a negative numeric value.
numextract(example_string1, sequence="last")
# Returns 1215, compressing two sequences into one.
numextract(example_string1, sequence="collapse")
# Returns 13.5, which is the midpoint of 15 and 12
(assumes second sequence does not correspond to negative numeric value).
numextract(example_string1, sequence="midpoint")
# All return -1
numextract(example_string2)
numextract(example_string2, sequence="first")
numextract(example_string2, sequence="last")
numextract(example_string2, sequence="midpoint")
numextract(example_string2, sequence="collapse")
}
\author{
Ryan Kyle, \email{ryan.kyle@mail.mcgill.ca}
}
\keyword{utilities}
|
9cbf6b155b196cc4cc902b8a1975c9d7074416b2
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/sapa/examples/taper.Rd.R
|
305a2b958ffb9a6dad0c9148fe2843ec96b4bb73
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 709
|
r
|
taper.Rd.R
|
library(sapa)
### Name: taper
### Title: Oracle function for obtaining a particular taper/window
### Aliases: taper as.matrix.taper plot.taper print.taper
### Keywords: ts
### ** Examples
require(ifultools)
## change plot layout
gap <- 0.11
old.plt <- splitplot(4,4,1,gap=gap)
## create a plot of all supported tapers and
## windows
nms <- c("rectangle", "triangle", "raised cosine",
"hanning", "hamming", "blackman",
"nuttall", "gaussian", "kaiser",
"chebyshev", "born jordan", "sine",
"parzen", "papoulis", "daniell", "dpss")
for (i in seq(along=nms)){
if (i > 1) splitplot(4,4,i,gap=gap)
plot(taper(type=nms[i]))
}
## restore plot layout to initial state
par(old.plt)
|
c5195f08fc59a9c2bf6dcd2c5bb80be98f682efe
|
8df49c76b6f4a9856dbb93658cb2aa70c7cdb821
|
/run_analysis.R
|
698a0c68ffa4849cf5a6c9c8399a8a4e20bb1f83
|
[] |
no_license
|
ctangar/Gett_and_Clean_data_project
|
7dc9d7ed19fcd9bc721234d01b7ee773b0b442e4
|
2e30e18a8548c0657950356ada88725bd190de69
|
refs/heads/master
| 2021-01-18T14:09:53.570852
| 2014-11-23T16:41:18
| 2014-11-23T16:41:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,196
|
r
|
run_analysis.R
|
# Carlos Tangarife-Course Project (run_analysis.R)
# Script that does the following:
#1) Merges the training and the test sets to create one data set.
#2) Extracts only the measurements on the mean and standard deviation for each measurement.
#3) Uses descriptive activity names to name the activities in the data set
#4) Appropriately labels the data set with descriptive variable names.
#5) From the data set in step 4, creates a second, independent tidy data set with the average of each variable for each activity and each subject.
#---------------------------------------------------------------------------------------
# Checks required libraries
#---------------------------------------------------------------------------------------
if(require("data.table")){
print("data.table is loaded correctly")
} else {
print("trying to install data.table")
install.packages("data.table")
if(require("data.table")){
print("data.table installed and loaded")
} else {
stop("could not install data.table")
}
}
if(require("reshape2")){
print("reshape2 is loaded correctly")
} else {
print("trying to install reshape2")
install.packages("reshape2")
if(require("reshape2")){
print("reshape2 installed and loaded")
} else {
stop("could not install reshape2")
}
}
# Load activity labels
activity_labels <- read.table("./UCI HAR Dataset/activity_labels.txt")[,2]
# Load data column names
features_file <- read.table("./UCI HAR Dataset/features.txt")[,2]
extract_features <- grepl("mean|std", features_file)
#---------------------------------------------------------------------------------------
# Load and process X_test & y_test data.
#---------------------------------------------------------------------------------------
X_test <- read.table("./UCI HAR Dataset/test/X_test.txt")
y_test <- read.table("./UCI HAR Dataset/test/y_test.txt")
subject_test <- read.table("./UCI HAR Dataset/test/subject_test.txt")
names(X_test) = features_file
#---------------------------------------------------------------------------------------
# Extract only the measurements on the mean and standard deviation for each measurement.
#---------------------------------------------------------------------------------------
X_test = X_test[,extract_features]
# Load activity labels
y_test[,2] = activity_labels[y_test[,1]]
#--------------------------------------------------------------------------
# Uses descriptive activity names to name the activities in the data set
#--------------------------------------------------------------------------
names(y_test) = c("Activity_ID", "Activity_Label")
names(subject_test) = "subject"
# Bind data
test_data <- cbind(as.data.table(subject_test), y_test, X_test)
# Load and process X_train & y_train data.
X_train <- read.table("./UCI HAR Dataset/train/X_train.txt")
y_train <- read.table("./UCI HAR Dataset/train/y_train.txt")
subject_train <- read.table("./UCI HAR Dataset/train/subject_train.txt")
names(X_train) = features_file
#---------------------------------------------------------------------------------------
# Extract only the measurements on the mean and standard deviation for each measurement.
#---------------------------------------------------------------------------------------
X_train = X_train[,extract_features]
# Load activity data
y_train[,2] = activity_labels[y_train[,1]]
names(y_train) = c("Activity_ID", "Activity_Label")
names(subject_train) = "subject"
# Bind data
train_data <- cbind(as.data.table(subject_train), y_train, X_train)
#------------------------------------------
# Merge test and training data
#------------------------------------------
data = rbind(test_data, train_data)
#--------------------------------------------------------------
# Appropriately labels the data set with descriptive names.
#--------------------------------------------------------------
colnames(data) <- gsub('\\(|\\)',"",colnames(data), perl = TRUE)
# Make clearer names
colnames(data) <- gsub('Acc',"Acceleration",colnames(data))
colnames(data) <- gsub('GyroJerk',"AngularAcceleration",colnames(data))
colnames(data) <- gsub('Gyro',"AngularSpeed",colnames(data))
colnames(data) <- gsub('Mag',"Magnitude",colnames(data))
colnames(data) <- gsub('^t',"TimeDomain.",colnames(data))
colnames(data) <- gsub('^f',"FrequencyDomain.",colnames(data))
colnames(data) <- gsub('\\-mean',".Mean",colnames(data))
colnames(data) <- gsub('\\-std',".StDev",colnames(data))
colnames(data) <- gsub('Freq\\.',"Frequency.",colnames(data))
colnames(data) <- gsub('Freq$',"Frequency",colnames(data))
id_labels = c("subject", "Activity_ID", "Activity_Label")
data_labels = setdiff(colnames(data), id_labels)
melt_data = melt(data, id = id_labels, measure.vars = data_labels)
# Calculate mean using dcast function
tidy_data = dcast(melt_data, subject + Activity_Label ~ variable, mean)
#--------------------------------------------------------------
# Tidy data set with the average of each variable for each activity and each subject.
#--------------------------------------------------------------
write.table(tidy_data, file = "./tidy_data_set.txt")
|
ad342a9f985a7e641baad948c1a9542852293359
|
e535d498001519774956adcc5b0106a5f4e555ac
|
/misc/R/gDNA_mappingBias.R
|
9bcbd339c4a19192770073d99af7fb7db58de25d
|
[] |
no_license
|
kraigrs/thesis_work
|
f73c6f130a0cf33ed079acb35208bff9cb85d4d1
|
bcc8e46b5c65f08c61d5beb8e29ac7e4df101cff
|
refs/heads/master
| 2021-01-22T16:18:29.372793
| 2015-09-10T18:48:11
| 2015-09-10T18:48:11
| 34,088,947
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 344
|
r
|
gDNA_mappingBias.R
|
exons <- read.table("/Users/kraigrs/Wittkopp/mel_mel_data/zhr_z30.SD_cut20_sepPar2.mosaik.exons.gDNA.txt",header=TRUE,sep="\t");
genes <- read.table("/Users/kraigrs/Wittkopp/mel_mel_data/zhr_z30.SD_cut20_sepPar2.mosaik.genes.gDNA.txt",header=TRUE,sep="\t");
hist(log2(exons[,2]/exons[,3]),breaks=50);
hist(log2(genes[,2]/genes[,3]),breaks=50);
|
b3652fc8ab0eb242ce09dd9192060cbf8d28507d
|
c143a79fc65c3628133f98e20fd79a065edb91f2
|
/output/JingMu/ui.R
|
ea0355101f0f80238ff8b50bf4d26f9cb872e636
|
[] |
no_license
|
elk2142/nycrestaurants
|
d55f4a1943f2bc23294200616c25c67bf07c0bc8
|
7058be54ff9ab6d6c1ebba6f662834ba16de95a8
|
refs/heads/master
| 2021-01-12T12:12:02.357978
| 2016-10-30T14:12:11
| 2016-10-30T14:12:11
| 72,353,113
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,130
|
r
|
ui.R
|
library(shiny)
library(leaflet)
library(shinythemes)
#GRADE <- levels(bind$GRADE)
GRADE <- c('All','','A','B','C','Not Yet Graded','P','Z')
shinyUI(fluidPage(
titlePanel('Restaurant Grades'),
tabPanel('Dynamic Map',
div(class='outer',
tags$head(
includeCSS('styles.css')
),
leafletOutput('map'),
absolutePanel(id = 'controls', class='panel panel-default', fixed = T,
draggable = T, top = 60, left = 'auto', right = 20, bottom='auto',
width='auto', height = 'auto',
h2('Restaurant Grades')),
selectInput(inputId = 'GRADE',label = 'Grade',
choices = GRADE,
selected = "All")
))
))
# navbarPage('Grade', id = 'Grade',
# tabPanel('Interactive map',
# div(class='outer',
# leafletOutput('map', width = '100%', height = '100%')))
# )
|
9c9df14c6de8fbacadedd4162ba2867e84362a07
|
5d18a2394e2d9d0798b3df16e327a8684fda2b72
|
/run_analysis.R
|
4516c8f95347048b4461407a3677a78bc4f8fc94
|
[] |
no_license
|
blink-student/Getting-and-Cleaning-Data-Course-Project
|
1d699585cda6f0053d205c0a3985e80f1d202942
|
fb5608fee1016d82c432c325a8b297d07259ea5d
|
refs/heads/master
| 2020-02-26T17:25:19.807972
| 2016-10-23T22:33:03
| 2016-10-23T22:33:03
| 71,733,472
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,973
|
r
|
run_analysis.R
|
## run_analysis.R
## Peer-graded Assignment: Getting and Cleaning Data Course Project
## DBoiko 2016-10
library(data.table);
library(plyr);
library(dplyr);
## it is assumed that the working data is already prepared and is in working directory
## but the script can download it and unzip
if (!file.exists("UCI HAR Dataset")) {
url <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
f <- file.path(getwd(), "dataset.zip");
download.file(url, f);
unzip ("dataset.zip", exdir = ".")
}
dataDir <- file.path(getwd(), "UCI HAR Dataset");
## Step 1. Merge the training and the test sets to create one data set.
# training data
activityLabels <- read.table(file.path(dataDir,"activity_labels.txt"), header = FALSE);
features <- read.table(file.path(dataDir,"features.txt"), header = FALSE);
subject_train <- read.table(file.path(dataDir,"train","subject_train.txt"), header = FALSE);
x_train <- read.table(file.path(dataDir,"train","x_train.txt"), header = FALSE);
y_train <- read.table(file.path(dataDir,"train","y_train.txt"), header = FALSE);
colnames(activityLabels) <- c("activityId", "activityLabel");
colnames(subject_train) <- "subjectId";
colnames(x_train) <- features[,2];
colnames(y_train) <- "activityId";
training_dataset <- cbind(y_train, subject_train, x_train);# merging all training data
# test data
subject_test <- read.table(file.path(dataDir,"test","subject_test.txt"), header = FALSE);
x_test <- read.table(file.path(dataDir,"test","x_test.txt"), header = FALSE);
y_test <- read.table(file.path(dataDir,"test","y_test.txt"), header = FALSE);
colnames(subject_test) <- "subjectId";
colnames(x_test) <- features[,2];
colnames(y_test) <- "activityId";
test_dataset <- cbind(y_test, subject_test, x_test);# merging all test data
full_dataset <- rbind(training_dataset, test_dataset);# merging training and test data
## Step 2. Extract only the measurements on the mean and standard deviation for each measurement.
cl <- colnames(full_dataset);
full_dataset <- full_dataset[
(grepl("activity..",cl)
|grepl("subject..",cl)
|grepl("-mean..",cl) & !grepl("-meanFreq..",cl)
|grepl("-std..",cl))
];
## Step 3. Uses descriptive activity names to name the activities in the data set
full_dataset <- merge(full_dataset, activityLabels, by="activityId", all.x=TRUE);
## Step 4. Appropriately labels the data set with descriptive variable names.
cl <- colnames(full_dataset);
for (i in 1:length(cl))
{
#cl[i] <- gsub("activityId","activityId",cl[i])
#cl[i] <- gsub("activityLabel","activityLabel",cl[i])
#cl[i] <- gsub("subjectId","subjectId",cl[i])
cl[i] <- gsub("^t","time",cl[i])
cl[i] <- gsub("^f","freq",cl[i])
cl[i] <- gsub("\\(\\)","",cl[i])
cl[i] <- gsub("-mean","Mean",cl[i])
cl[i] <- gsub("-std","StdDev",cl[i])
cl[i] <- gsub("-[Xx]","X",cl[i])
cl[i] <- gsub("-[Yy]","Y",cl[i])
cl[i] <- gsub("-[Zz]","Z",cl[i])
cl[i] <- gsub("[Gg]ravity","Gravity",cl[i])
cl[i] <- gsub("([Bb]ody){1,2}","Body",cl[i])
cl[i] <- gsub("[Gg]yro","Gyroscope",cl[i])
cl[i] <- gsub("[Mm]ag","Magnitude",cl[i])
cl[i] <- gsub("[Aa]cc","Accelerometer",cl[i])
cl[i] <- gsub("[Jj]erk","Jerk",cl[i])
};
colnames(full_dataset) <- cl;
## Step 5. From the data set in step 4, creates a second, independent tidy data set with the average of each variable for each activity and each subject.
fdWO <- full_dataset[,names(full_dataset) != "activityLabel"];# remove activityLabel
tidyDataset <- aggregate(fdWO[,names(fdWO) != c("activityId","subjectId")],by=list(activityId=fdWO$activityId,subjectId = fdWO$subjectId),mean);
tidyDataset <- merge(tidyDataset,activityLabels,by='activityId',all.x=TRUE);# add activityLabel
tidyDataset <- tidyDataset[,names(tidyDataset) != "activityId"];# remove activityId
#not the most elegant solution :(
write.table(tidyDataset, file = file.path(getwd(),"tidyData.txt"),row.names=FALSE)
|
e0311c461a0fb5517264d6434e82b3c2abae2e2e
|
5d0bc9fa9c48a468d115e9930f5eac66a0764789
|
/man/OldFaithful1.Rd
|
c4387a679a63ed8171492bd5f6527312488aa9a0
|
[] |
no_license
|
rpruim/ISIwithR
|
a48aac902c9a25b857d2fd9c81cb2fc0eb0e848e
|
7703172a2d854516348267c87319ace046508eef
|
refs/heads/master
| 2020-04-15T20:36:55.171770
| 2015-05-21T09:20:21
| 2015-05-21T09:20:21
| 21,158,247
| 5
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 328
|
rd
|
OldFaithful1.Rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/datasets.R
\docType{data}
\name{OldFaithful1}
\alias{OldFaithful1}
\title{OldFaithful1}
\format{A dataset with 222 observations on the following 1 variables.
\describe{
\item{time}{integer}
}}
\description{
A data set
}
\keyword{datasets}
|
fb95238534f59f1ba608d16ccb05686cb13428cd
|
e26d6b79d2b30fd89ddb7dc5d05ad8e08b16a185
|
/plot3.R
|
be925c4917e9d86860c424b342b6ed447a987b0c
|
[] |
no_license
|
emma1225/Project2
|
2d4b914b43b3fc20ce56d634efa7afdf655b849e
|
0be323026d0e7528c55dfc0c71b381bd86b8933c
|
refs/heads/master
| 2021-01-19T11:35:18.580753
| 2015-02-22T08:19:23
| 2015-02-22T08:19:23
| 31,157,015
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 931
|
r
|
plot3.R
|
#Load Data
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
#Calculate Total Emissions from four types of sources of Every Year
NEIBAL<-subset(NEI,fips == "24510")
SumType<-tapply(NEIBAL$Emissions,list(NEIBAL$year,NEIBAL$type),sum,simplify=TRUE)
SumType<-as.numeric(SumType)
Year<-c("1999","2002","2005","2008")
Type<-c("Non-Road","Non-Road","Non-Road","Non-Road",
"NonPoint","NonPoint","NonPoint","NonPoint",
"On-Road","On-Road","On-Road","On-Road",
"Point","Point","Point","Point")
SumType2<-data.frame(Year,Type,SumType)
colnames(SumType2)=c("Year","Type","Sum")
#ggplot
library(ggplot2)
png(file="plot3.png",width=960,height=480)
qplot(Year,Sum,data=SumType2,facets=.~Type)
dev.off()
#Three sources(Non-Road,NonPoint,On-Road) have seen decreases in emissions for Baltimore City from 1999 to 2008.
#One source(Point) have seen increases in emissions for Baltimore City from 1999 to 2008.
|
8d13cba123e4acd9006204b9b5f4b4adbaa1de2b
|
b2e2f737bee0614571ecce3743438fd0d92f5d53
|
/man/RMrotat.Rd
|
a03376f0ad2c076393280e3be92f63eb90b55744
|
[] |
no_license
|
mistletoe999/RandomFields
|
9bfceaf4ba855abb5f878ee57282a995d81492fd
|
e5a7a2f272b7834f96c925ced7acfa0c6456a87f
|
refs/heads/master
| 2020-03-10T14:34:03.979780
| 2017-04-17T22:09:51
| 2017-04-17T22:09:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 974
|
rd
|
RMrotat.Rd
|
\name{RMrotat}
\alias{RMrotat}
\alias{RMrotation}
\title{Rotation matrices}
\description{
\command{RMrotat} and \command{RMrotation} are
auxiliary space-time functions that create some rotation
\deqn{f(h, t) = s (\cos(\phi t) h_1 + \sin(\phi t) h_2) / \|h\|}
and
\deqn{f(h, t) = (\cos(\phi t) h_1 + \sin(\phi t) h_2,
- \sin(\phi t) h_1 + \cos(\phi t) h_2, t)}
respectively
}
\usage{
RMrotat(speed, phi)
RMrotation(phi)
}
\arguments{
\item{speed}{real value \eqn{s} }
\item{phi}{angle}
}
\details{
\command{\link{RMrotat}} and \command{\link{RMrotation}} are
space-time models for
two-dimensial space.
}
\value{
\command{\link{RMrotat}} and \command{\link{RMrotation}} return an object of class \code{\link[=RMmodel-class]{RMmodel}}
}
\author{Martin Schlather, \email{schlather@math.uni-mannheim.de}
}
\seealso{
\command{\link{RMmodel}},
\link{S10}
}
\keyword{spatial}
\keyword{models}
\examples{
# see S10
\dontshow{FinalizeExample()}
}
|
18d764722eee5249aa005f769ce19824ff3496eb
|
53e3ee7943214d302f386b6ff832c85cc1a8154a
|
/Rscripts/chapter5/6_get_gtfs_from_shapefiles_2014plan.R
|
348b73e50a88a7ad389178b216b80bbfb720f1ab
|
[] |
no_license
|
rafapereirabr/thesis
|
39a269ea9b55fe5c2b28d8ca0dc7e5c78add0b9b
|
61fd8ced9151fcbbe1b1e2db35698a4d9d9fd072
|
refs/heads/master
| 2023-04-18T06:43:11.937046
| 2021-04-11T22:22:59
| 2021-04-11T22:22:59
| 113,566,756
| 24
| 19
| null | null | null | null |
UTF-8
|
R
| false
| false
| 18,413
|
r
|
6_get_gtfs_from_shapefiles_2014plan.R
|
# set working Directory
setwd("R:/Dropbox/Dout/Data Dout")
##################### Load packages -------------------------------------------------------
source("./R scripts/00_LoadPackages.R")
library(sp)
library(chron)
library(stplanr)
library(lubridate)
library(rgeos)
caju <- "partial"
caju <- ""
paradas_depois_caju <- c("transbra_137","transbra_138","transbra_139","transbra_140","transbra_141","transbra_142")
######## read shapefiles and reproject data -------------------
shapes_sp <- readOGR(dsn="./shapes_brt_transbrasil", layer="Trajetos_BRT__Visualizacao") #read shape file
stops_sp <- readOGR(dsn="./shapes_brt_transbrasil", layer="Estacoes_BRT__Visualizacao") #read shape file
#### change projection
proj4string(shapes_sp)
proj4string(stops_sp)
myCRlatlong <- "+proj=longlat +zone=23 +datum=WGS84 +ellps=WGS84 +towgs84=0,0,0"
shapes_sp <- spTransform(shapes_sp, CRS(myCRlatlong))
stops_sp <- spTransform(stops_sp, CRS(myCRlatlong))
# change stops names to avoid conflict with other GTFS stops
stops_sp@data$OBJECTID <- paste0('transbra_',stops_sp@data$OBJECTID)
# subset BRT Transbrasil
#transbrasil_sp <- subset(shapes_sp, Corredor == "TransBrasil")
transbrasil_sp <- subset(shapes_sp, Nome == "TransBrasil")
plot(stops_sp, col="gray")
plot(shapes_sp, add=T)
plot(transbrasil_sp, col="red", add=T)
#### select stops from Transbrasil
# create buffer around transbrasil
buff_brt <- gBuffer(transbrasil_sp, width= .00286 ) # aprox. 200 meters
# select stops within buffer
stops_brt_sp <- stops_sp[ buff_brt, ]
plot(shapes_sp)
plot(buff_brt, col="red", add=T)
plot(stops_brt_sp, col="green", add=T)
# # interactivelt view spatial oject
# mapview::mapview(buff_brt)
######## stops.txt ----------------------
# convert to sp to data frame
stops_brt_df <- as.data.frame(stops_brt_sp) %>% setDT()
head(stops_brt_df)
# subset and rename columns
stops_brt_df <- stops_brt_df[,.(OBJECTID, Nome, coords.x1, coords.x2)]
names(stops_brt_df) <- c('stop_id','stop_name','stop_lon','stop_lat')
# Encoding of names
stops_brt_df[, stop_name := as.character(stop_name)]
Encoding(stops_brt_df$stop_name) <- "UTF-8"
# add empty columns
stops_brt_df[, c('stop_code','stop_desc','zone_id','stop_url','location_type','parent_station') := NA ]
# reorder columns
setcolorder(stops_brt_df, c('stop_id','stop_name','stop_lat','stop_lon','stop_code','stop_desc','zone_id','stop_url','location_type','parent_station'))
head(stops_brt_df)
# add parent_station
stops_brt_df[ , parent_station := ifelse(stop_id=="transbra_142", "38734844", # Metro Uruguaiana
ifelse(stop_id=="transbra_117", "9629", NA ))] # Trem Deodoro
# add Transcarioca stations
penha <- matrix(data=c(33154600,"BRT Transcarioca - Penha I",-22.841819,-43.275043,NA,NA,NA,NA,NA,NA), nrow = 1) %>% as.data.frame()
names(penha) <- names(stops_brt_df)
stops_brt_df <- rbind(stops_brt_df, penha)
fundao <- matrix(data=c(33469197,"BRT Transcarioca - Fundão",-22.839559,-43.239663,NA,NA,NA,NA,NA,NA), nrow = 1) %>% as.data.frame()
names(fundao) <- names(stops_brt_df)
stops_brt_df <- rbind(stops_brt_df, fundao)
# lat long numeric
stops_brt_df$stop_lat <- as.numeric( as.character(stops_brt_df$stop_lat) )
stops_brt_df$stop_lon <- as.numeric( as.character(stops_brt_df$stop_lon) )
# save stops.txt
fwrite(stops_brt_df, "./gtfs_brt_transbrasil/stops.txt")
### reorder stopds according to TransBrasil stops order
# subset of columns
stop_sequence_all <- data.table(stop_id = as.character( stops_brt_df$stop_id),
stop_name = as.character( stops_brt_df$stop_name))
# add stops sequence following diagram in Figura 28 from plan
stop_sequence_all$sequence <- c(1, 2, 3, 4, 5, 7, 8, 6, 9, 10, 11, 12, 13, 15, 17, 18, 19, 20, 21, 22, 23, 24, 27, 25, 26, 28, 16, 14 )
# reorder
stop_sequence_all <- stop_sequence_all[order(sequence)]
######## shape.txt ----------------------
# sample points regularly spaced over transbrasil_sp with a distance of 30 meters between them
shapes_points <- spatialEco::sample.line(x=transbrasil_sp, type="regular", longlat=T, d=.03)
plot(transbrasil_sp)
plot(shapes_points, col="red", add=T)
# convert do data frame
shapes_points_df <- as.data.frame(shapes_points) %>% setDT()
# chage colnames
names(shapes_points_df) <- c('shape_id', 'shape_pt_lon', 'shape_pt_lat')
# create shape_id and shape_pt_sequence
shapes_points_df[, shape_id := as.character(shape_id)][, shape_id := as.character("shapeid_transbra")]
shapes_points_df[, shape_pt_sequence := 0:nrow(shapes_points_df)]
# calculate distances between points
# Function with adapted version of geosphere::distHaversine that fits into a data.table := \\\ https://stackoverflow.com/questions/36817423/how-to-efficiently-calculate-distance-between-pair-of-coordinates-using-data-tab
# it returns distance in meters
dt.haversine <- function(lat_from, lon_from, lat_to, lon_to, r = 6378137){
radians <- pi/180
lat_to <- lat_to * radians
lat_from <- lat_from * radians
lon_to <- lon_to * radians
lon_from <- lon_from * radians
dLat <- (lat_to - lat_from)
dLon <- (lon_to - lon_from)
a <- (sin(dLat/2)^2) + (cos(lat_from) * cos(lat_to)) * (sin(dLon/2)^2)
return(2 * atan2(sqrt(a), sqrt(1 - a)) * r)
}
# calculate distances
shapes_points_df[, shape_dist_traveled := dt.haversine(shape_pt_lat, shape_pt_lon,
data.table::shift(shape_pt_lat, type = "lag"),
data.table::shift(shape_pt_lon, type = "lag"))]
shapes_points_df[1, shape_dist_traveled := 0] # add 0 to first point
shapes_points_df[, shape_dist_traveled := cumsum(shape_dist_traveled)] # cumulative distance
# set colorder
setcolorder(shapes_points_df, c('shape_id','shape_pt_sequence','shape_pt_lat','shape_pt_lon','shape_dist_traveled'))
head(shapes_points_df)
# save shape.txt
fwrite(shapes_points_df, "./gtfs_brt_transbrasil/shapes.txt")
######## Agency.txt ----------------------
info <- paste0("Opplan2014_" ,caju)
agency_df <- data.table( agency_id = 'agency_rafapereira'
, agency_name = info
, agency_url = 'https://urbandemographics.blogspot.com/'
, agency_timezone = 'America/Sao_Paulo'
)
# save Agency.txt
fwrite(agency_df, "./gtfs_brt_transbrasil/agency.txt")
######## Calendar.txt ----------------------
# empty data.frame
calendar_df <- data.frame( service_id = character()
, monday = integer()
, tuesday = integer()
, wednesday = integer()
, thursday = integer()
, friday = integer()
, saturday = integer()
, sunday = integer()
, start_date = character()
, end_date = character(),
stringsAsFactors=FALSE)
# add servic of trip_id running on weekdays
calendar_df[1,] <- c("transbra",1,1,1,1,1,0,0,"20170101","20190101")
head(calendar_df)
# save routes.txt
fwrite(calendar_df, "./gtfs_brt_transbrasil/calendar.txt")
######## routes.txt ----------------------
# Read table with services
routes <- readxl::read_xlsx("./R scripts/paper_4_BRT_Transbrasil/BRT_operational_plan.xlsx", sheet="data")
setDT(routes)
# add string to route_id
routes[, route_id := paste0('id_', id)]
# calculate average speeds of each route
routes[, avg_speed := km / (ciclo_min /60) ]
# create routes.txt
routes_df <- data.table( route_id = routes$route_id
, agency_id = 'agency_rafapereira'
, route_short_name = routes$route_id
, route_long_name = routes$route_long_name
, route_type = 3 )
# save routes.txt
fwrite(routes_df, "./gtfs_brt_transbrasil/routes.txt")
######## Trips.txt ----------------------
# function to get a route_id and create a two-trip data.table
# One trip heading towards the city center, the other towards Deodoro
get_trip <- function(i) {
data.table( route_id = rep(paste0('id_',i), 2)
, service_id = rep('transbra', 2)
, trip_id = c(paste0('tripid_',i,'_0'), paste0('tripid_',i,'_1'))
, trip_headsign = c('centro', 'deodoro')
, direction_id = c(0,1)
, shape_id = rep('shapeid_transbra',2))
}
# create trip.txt
trips_df <- lapply( routes$id, get_trip) %>% rbindlist()
# save Trips.txt
fwrite(trips_df, "./gtfs_brt_transbrasil/trips.txt")
######## Frequencies.txt ----------------------
# Table with morning peak headways
frequencies_peak_morning <- data.table( route_id = trips_df$route_id
, trip_id = trips_df$trip_id
, start_time = c('07:00:00')
, end_time = c('11:00:00')
)
# get headways reported in the plan
frequencies_peak_morning[ routes, on= "route_id", headway := i.headway ]
# convert headway to seconds
frequencies_peak_morning[, headway := format(headway, "%H:%M:%S")]
frequencies_peak_morning[, headway_secs := lubridate::hms(headway) %>% as.numeric() ]
# remove columns we won't use in the file
frequencies_peak_morning[, c('route_id', 'headway') := NULL]
# generate heads for off-peak ???
frequencies_df <- copy(frequencies_peak_morning)
head(frequencies_df)
# save frequencies.txt
fwrite(frequencies_df, "./gtfs_brt_transbrasil/frequencies.txt")
######## stop_times.txt ----------------------
stop_seq_1_0 <- stop_sequence_all$stop_id[c(1,2,3,4,5,7,8,10,11,12,13,15,17,18,19,20,21,22,23)]
stop_seq_1_1 <- rev(stop_seq_1_0)
stop_seq_2_0 <- stop_sequence_all$stop_id[c(1, 2, 3, 4, 5, 24)]
stop_seq_2_1 <- rev(stop_seq_2_0)
stop_seq_3_0 <- stop_sequence_all$stop_id[c(1, 2, 3, 4, 5, 27 )]
stop_seq_3_1 <- rev(stop_seq_3_0)
stop_seq_4_0 <- stop_sequence_all$stop_id[c(1, 2, 3, 4, 5, 25, 26, 28 )]
stop_seq_4_1 <- rev(stop_seq_4_0)
stop_seq_5_0 <- stop_sequence_all$stop_id[c(6, 7, 8, 10, 11, 12, 13, 15, 17, 18, 19, 20, 21, 22, 23 )]
stop_seq_5_1 <- rev(stop_seq_5_0)
stop_seq_6_0 <- stop_sequence_all$stop_id[c(6, 7, 8, 24)]
stop_seq_6_1 <- rev(stop_seq_6_0)
stop_seq_7_0 <- stop_sequence_all$stop_id[c(6, 7, 8, 27)]
stop_seq_7_1 <- rev(stop_seq_7_0)
stop_seq_8_0 <- stop_sequence_all$stop_id[c(6, 7, 8, 25, 26, 28)]
stop_seq_8_1 <- rev(stop_seq_8_0)
stop_seq_9_0 <- stop_sequence_all$stop_id[c(9, 10, 11, 12, 13, 15, 17, 18, 19, 20, 21, 22, 23 )]
stop_seq_9_1 <- rev(stop_seq_9_0)
stop_seq_10_0 <- stop_sequence_all$stop_id[c(9, 10, 11, 12, 13, 24)]
stop_seq_10_1 <- rev(stop_seq_10_0)
stop_seq_11_0 <- stop_sequence_all$stop_id[c(9, 10, 11, 12, 13, 27)]
stop_seq_11_1 <- rev(stop_seq_11_0)
stop_seq_12_0 <- stop_sequence_all$stop_id[c(9, 10, 11, 12, 13, 25, 26, 28)]
stop_seq_12_1 <- rev(stop_seq_12_0)
stop_seq_13_0 <- stop_sequence_all$stop_id[c(14, 17, 18, 19, 20, 21, 22, 23, 24 )]
stop_seq_13_1 <- rev(stop_seq_13_0)
stop_seq_14_0 <- stop_sequence_all$stop_id[c(14, 17, 18, 19, 20, 21, 22, 23, 27 )]
stop_seq_14_1 <- rev(stop_seq_14_0)
stop_seq_15_0 <- stop_sequence_all$stop_id[c(14, 17, 18, 19, 20, 21, 22, 23, 25, 26, 28 )]
stop_seq_15_1 <- rev(stop_seq_15_0)
stop_seq_16_0 <- stop_sequence_all$stop_id[c(14, 28 )]
stop_seq_16_1 <- rev(stop_seq_16_0)
stop_seq_17_0 <- stop_sequence_all$stop_id[c(16, 17, 18, 19, 20, 21, 22, 23 )]
stop_seq_17_1 <- rev(stop_seq_17_0)
stop_seq_18_0 <- stop_sequence_all$stop_id[c(16, 24 )]
stop_seq_18_1 <- rev(stop_seq_18_0)
stop_seq_19_0 <- stop_sequence_all$stop_id[c(16, 25, 26, 28 )]
stop_seq_19_1 <- rev(stop_seq_19_0)
### Make changes if working on partial scenario --------------
if (caju == "partial"){
# update stop sequences
for( i in ls(pattern="stop_seq_")){
temp <- get(i) # get vector
temp <- temp[ !(temp %in% paradas_depois_caju)] # remove stops after caju
assign(i, temp) %>% return() # update vector of stops
}
# update files
# remove routes that would not exist in the partial scenario
remove(stop_seq_16_0, stop_seq_16_1, stop_seq_18_0, stop_seq_18_1, stop_seq_19_0, stop_seq_19_1)
# save stops.txt
stops_brt_df <- subset(stops_brt_df, !(stop_id %in% paradas_depois_caju ))
fwrite(stops_brt_df, "./gtfs_brt_transbrasil/stops.txt")
# save routes.txt
routes_df <- subset(routes_df, !(route_id %in% c('id_16', 'id_18', 'id_19')))
fwrite(routes_df, "./gtfs_brt_transbrasil/routes.txt")
# save Trips.txt
trips_df <- subset(trips_df, !(route_id %in% c('id_16', 'id_18', 'id_19')))
fwrite(trips_df, "./gtfs_brt_transbrasil/trips.txt")
# save frequencies.txt
frequencies_df <- subset(frequencies_df,trip_id %in% trips_df$trip_id)
fwrite(frequencies_df, "./gtfs_brt_transbrasil/frequencies.txt")
}
# list of all service seqs
ls(pattern="stop_seq_")
# function to get stop times
get_stoptimes <- function(i){
# i = "stop_seq_9_0"
id <- substr(i, 9, 13)
temp_seq <- get(i)
stops_times_df <- data.frame(trip_id = rep(paste0('tripid',id), length(temp_seq))
, stop_id = temp_seq
, stop_sequence = 1:length(temp_seq)
, timepoint = rep(0, length(temp_seq)) )
# # create stoptimes
# stops_times_df0 <- data.frame(trip_id = rep('tripid_transbra0', length(stop_sequence_0))
# , stop_id = stop_sequence_0
# , stop_sequence = 1:length(stop_sequence_0)
# , timepoint = rep(0, length(stop_sequence_0))
# )
#
# stops_times_df1 <- data.frame(trip_id = rep('tripid_transbra1', length(stop_sequence_1))
# , stop_id = stop_sequence_1
# , stop_sequence = 1:length(stop_sequence_1)
# , timepoint = rep(0, length(stop_sequence_1)))
#
# # rbind all stoo_times
# stops_times_df <- rbind(stops_times_df0, stops_times_df1) %>% setDT()
## add empty arrival and departure times
setDT(stops_times_df)[, c('arrival_time', 'departure_time') := times("00:00:00")]
head(stops_times_df)
# add first departure time to each trip
stops_times_df[, arrival_time := c(times("07:00:00"), arrival_time[-1]), by = trip_id]
stops_times_df[, departure_time := c(times("07:01:00"), departure_time[-1]), by = trip_id]
# get distances between stops
# add lat long
stops_times_df <- left_join(stops_times_df, stops_brt_df[, .(stop_id, stop_lat, stop_lon)], by ="stop_id")
setDT(stops_times_df)
# calculate distances in km
stops_times_df[, dist := dt.haversine(stop_lat, stop_lon,
data.table::shift(stop_lat, type = "lag"),
data.table::shift(stop_lon, type = "lag"))/1000, by=trip_id]
stops_times_df[, dist := c(0L, dist[-1]), by = trip_id] # distance to first stop = 0
# remove lat long
stops_times_df[, c('stop_lat', 'stop_lon') := NULL]
# get travel times between stops in minutes
embarcation_time = chron::times( 1 / 1440) # 1 minute in 1440 minutes in a day
# calculate travel_time in seconds
# add route_id
stops_times_df[trips_df, on="trip_id", route_id := i.route_id]
# add average speed
stops_times_df[routes, on="route_id", avg_speed := i.avg_speed]
stops_times_df[, travel_time := (dist / avg_speed) * 60 ]
stops_times_df[, travel_time := c(0L, travel_time[-1]), by = trip_id] # travel time to first stop = 0
# convert seconts to time object
stops_times_df[, travel_time := chron::times( travel_time / 1440)] # 1440 minutes in a day
head(stops_times_df)
# generate sample departure times
stops_times_df[,departure_time := cumsum(arrival_time + ifelse(travel_time==0, embarcation_time, travel_time + embarcation_time )), by = trip_id]
stops_times_df[, arrival_time := departure_time - ifelse(travel_time == 0 , embarcation_time, embarcation_time )]
return(stops_times_df)
}
# get all stoo_times tables
all_stopstimes <- lapply( ls(pattern="stop_seq_") , get_stoptimes) %>% rbindlist()
# check total distance and total travel time by trip
# all_stopstimes[ trip_id =="tripid_10_1"]
all_stopstimes[, .(dist= sum(dist),
time= sum(travel_time)), by = trip_id]
# final edits
# organize columns
all_stopstimes[, c('dist', 'route_id', 'avg_speed', 'travel_time') := NULL] # drop columns
setcolorder(all_stopstimes, c('trip_id', 'arrival_time', 'departure_time', 'stop_id', 'stop_sequence', 'timepoint'))
head(all_stopstimes)
# change times back to character
all_stopstimes[, c('arrival_time', 'departure_time') := .(as.character(arrival_time), as.character(departure_time))]
# save stop_times.txt
fwrite(all_stopstimes, "./gtfs_brt_transbrasil/stop_times.txt")
################ Zip FTGS file -------------------------
setwd("R:/Dropbox/Dout/Data Dout/gtfs_brt_transbrasil")
# get .txt files in folder
txt_files <-list.files(path = ".", pattern = ".txt", full.names = T)
# Save zip files
zip(zipfile = paste0('./gtfs_brt_transbrasil_',info), files = txt_files)
|
0c1bf753118b38424a89d32ef57908aac35cf797
|
1ced37e5243dfc53d7e3631408d0f55b9b109b11
|
/plot3.R
|
1b32a104b4341e59545c70f1977625108350bfea
|
[] |
no_license
|
sjkim76/ExData_Plotting1
|
b15652427703424b8439f3d7baeba53b8a3a897c
|
77f79c67acbfffce9f05346aa5db97b57c8aaf9d
|
refs/heads/master
| 2022-09-11T11:20:19.483799
| 2020-05-28T09:43:10
| 2020-05-28T09:43:10
| 267,216,449
| 0
| 0
| null | 2020-05-27T04:01:00
| 2020-05-27T04:00:59
| null |
UTF-8
|
R
| false
| false
| 1,177
|
r
|
plot3.R
|
#read raw data
rawdata<-read.table("household_power_consumption.txt",header=TRUE,sep=";",na.strings="?")
#only pick data between 2007-02-01 and 2007-02-02
housedata<-rawdata[which(rawdata$Date %in% c("1/2/2007","2/2/2007")),]
housedata<-housedata[complete.cases(housedata),]
#type conversion
housedata$Sub_metering_1<-as.numeric(housedata$Sub_metering_1)
housedata$Sub_metering_2<-as.numeric(housedata$Sub_metering_2)
housedata$CDate<-strptime(paste(housedata$Date,housedata$Time),format="%d/%m/%Y %H:%M:%S")
#English locale setting for Weekday Expression
Sys.setlocale("LC_TIME", "English")
#Date/Time column setting, Date,Time removal
housedata$CDate <- as.POSIXct(housedata$CDate)
housedata$Date <- NULL
housedata$Time <- NULL
#plotting
plot( housedata$Sub_metering_1~housedata$CDate,type="l", col="black",
xlab="" ,ylab="Energy sub metering")
lines(housedata$Sub_metering_2~housedata$CDate, col="red")
lines(housedata$Sub_metering_3~housedata$CDate, col="blue")
legend("topright", col=c("black", "red", "blue"), lwd=c(1,1,1),
c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3") )
dev.copy(png,"plot3.png",width=480,height=480)
dev.off()
|
9289207811ae9c4cd6e0e6c8ba03a50c452c2f54
|
e20709b1da0e2522b14d31a0b6dfcf0956841f54
|
/man/DirichReg.Rd
|
ba3b1f501103d0e1bd37a84efada9ee326d671f5
|
[
"MIT"
] |
permissive
|
bcbritt/stepseg
|
b6dbe6368277dcc10959e193dd2bdbc218742bad
|
f01f7364af083c010feda08722c1e865168fbd12
|
refs/heads/master
| 2023-05-16T21:42:01.312708
| 2023-05-01T07:02:31
| 2023-05-01T07:02:31
| 264,266,384
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,077
|
rd
|
DirichReg.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/stepseg.R
\name{DirichReg}
\alias{DirichReg}
\title{Fitting a Dirichlet Regression}
\usage{
DirichReg(
formula,
data,
model = c("common", "alternative"),
subset,
sub.comp,
base,
weights,
control,
verbosity = 0
)
}
\arguments{
\item{formula}{The formula specifying the regression model}
\item{data}{The data set from which the model is constructed}
\item{model}{The parameterization of the model}
\item{subset}{Not used}
\item{sub.comp}{Not used}
\item{base}{Not used}
\item{weights}{Not used}
\item{control}{A list of variables to control the convergence process}
\item{verbosity}{Not used}
}
\value{
A Dirichlet regression model as specified by
\code{\link[DirichletReg]{DirichReg}}
}
\description{
This is a modified form of the
\code{\link[DirichletReg]{DirichReg}} function, adjusted to avoid "model
frame and formula mismatch in model.matrix()" errors that frequently result
when the formula is long and the original function is called from within
another function.
}
|
6f0a3aaf91c74a8d7076d2ee39185945bab54a7e
|
561beed458dfcf06de55c8b9145613adf3e3dad6
|
/admm_lasso_multi_test.r
|
21bd2edae51a64b5fc0968e761e476e3107a5057
|
[] |
no_license
|
deniseduma/respls
|
e75dafd68ecd0fa5905667b78f2a36f44d8947da
|
f326cf1a84ab4734a67a156917f8a2752597be68
|
refs/heads/master
| 2016-09-14T05:36:51.454059
| 2016-05-23T22:30:59
| 2016-05-23T22:30:59
| 59,517,165
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,859
|
r
|
admm_lasso_multi_test.r
|
library("glmnet")
library("elasticnet")
source("admm_lasso_multi2.r")
m <- 200
K <- 20
ni <- 1000
n <- ni*K
p <- 10/K
rho <- 1
alpha <- 1.8
#generate block sparse solution vector
x <- matrix(0, ni , K)
for (i in 1:K)
if (runif(1) < p)
x[, i] <- rnorm(ni)
xtrue <- x <- Matrix(as.vector(x))
#generat random data matrix
A <- Matrix(rnorm(m*n), nrow=m, ncol=n)
#normalize columns of A
A <- A %*% Diagonal(n, 1/sqrt(colSums(A^2)))
#generate measurement b with noise
b <- A %*% x + Matrix(rnorm(m, 0, sqrt(1)))
#lambda max
nrms = numeric(K)
for (i in 1:K) {
Ai <- A[ ,((i-1)*ni+1):(i*ni)]
Aitb <- t(Ai) %*% b
nrms[i] <- sqrt(sum(Aitb^2))
}
lam_max <- max(nrms)
#lam_max <- max(abs(t(A) %*% b))
#regularization parameter
lam <- 0.01*lam_max #best 0.01 for both admm_lasso_multi and admm_lasso_multi2
#Precompute matrix decomps
decomps <- vector("list", K)
for (i in 1:K) {
#Ai <- A
Ai <- A[ ,((i-1)*ni+1):(i*ni)]
res<-factor(Ai, rho)
decomps[[i]]<-list(L=res$L, U=res$U)
}
init_x <- Matrix(0, n , 1)
start <- proc.time()
#x <- admm_lasso_multi(A,b,init_x,decomps,lam,rho,alpha,F,ni,20)
x <- admm_lasso_multi2(A,b,init_x,lam,rho,alpha,F,ni,20)
elapsed <- proc.time() - start
print(paste("Elapsed time ", elapsed[3], sep=""))
cat("\n")
print(paste("norm(xtrue)=", sqrt(sum(xtrue^2)), ", norm(x)=", sqrt(sum(x^2)), ", length(x)=",length(x), sep=""))
print("glmnet solution")
r <- glmnet(A, b, family="gaussian", nlambda=1, lambda=lam, standardize=FALSE, intercept=FALSE)
print(paste("norm(beta)=", sqrt(sum(r$beta^2)),", length(r$beta)=",length(r$beta),sep=""))
#print("enet solution")
#r <- enet(as.matrix(A), as.matrix(b), normalize=FALSE, intercept=FALSE)
#t <- t(r$beta.pure)
##t <- (t!=0)
##beta <- rowMeans(t)
##beta[beta<0.6] <- 0
#print(paste("norm(beta)=", sqrt(sum(t[, ncol(t)]^2)),", length(beta)=",length(t[ , ncol(t)]),sep=""))
|
3de0623e1143439be0ebe7b5dc5ee68440ad9c7b
|
97b4693734e41e212ce68e00a9c5fab0b4c99aa9
|
/SC_Pre_Process.R
|
d40bcdb41b165f94e73746955c8d36adffc5a9b5
|
[] |
no_license
|
Yuzi-Liu/Airline-Customer-Segmentation
|
f3359ab4dc74759dde3f3109757aef4f19a255b9
|
805c1dd9cf1a3ac48a47efff7191a071bcb4cfa7
|
refs/heads/master
| 2020-06-20T23:06:14.300863
| 2019-07-21T22:12:24
| 2019-07-21T22:12:24
| 197,281,787
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,638
|
r
|
SC_Pre_Process.R
|
# This program contains the R code for data clean-up and pre-processing for the Sun Country Airlines case
# We start by reading the data file, which is a 1% random sample of the original airline reservations data
# We set the working directory using setwd()
setwd("/Users/yuziliu/Desktop/BANA200")
# Then we read the data using the read.csv function
data <- read.csv("SC_Data.csv")
# Use head() to see the first 6 rows of the data
head(data)
# We will be using the dplyr package for clean up and pre-processing
install.packages("dplyr")
library(dplyr)
#Only keep records where we know the birthdate
filter(data, !is.na(birthdateid))
#Only retain records where we know the gender
data$GenderCode<-as.character(data$GenderCode)
filter(data, GenderCode!="")
data$GenderCode<-as.factor(data$GenderCode)
#Some odd age values... we'll replace with the median.
data$Age[data$Age < 0] <- median(data$Age)
data$Age[data$Age > 120] <- median(data$Age)
# If there is no reward number we assign it a 0
data$UFlyRewardsNumber[is.na(data$UFlyRewardsNumber)]<-0
#We construct a reward status factor variable.
data$UflyMemberStatus<-as.character(data$UflyMemberStatus)
data$UflyMemberStatus[data$UflyMemberStatus==''] <-"non-ufly"
#Discard duplicate records
data <- group_by(data, PNRLocatorID,CouponSeqNbr,PaxName,ServiceStartCity,ServiceEndCity,ServiceStartDate)
filter(data, n() == 1)
#Replace odd one off booking channels with 'Other'
data$BookingChannel<-as.character(data$BookingChannel)
data$BookingChannel[data$BookingChannel!="Outside Booking" & data$BookingChannel!="SCA Website Booking" & data$BookingChannel!="Tour Operator Portal" & data$BookingChannel!="Reservations Booking" & data$BookingChannel!="SY Vacation"] <- "Other"
data$BookingChannel<-as.factor(data$BookingChannel)
# Only keep records that involve SunCountry airlines tickets, for which MarketingAirlineCode=="SY".
data$MarketingAirlineCode<-as.character(data$MarketingAirlineCode)
filter(data,MarketingAirlineCode=="SY")
data$MarketingAirlineCode<-as.factor(data$MarketingAirlineCode)
#Delete PNRs that have odd values and indicate an error.
data <- group_by(data, PNRLocatorID)
data <- mutate(data, error=ifelse(min(CouponSeqNbr)!=1,1,0))
filter(data, error==0)
# Create a unique customer ID by concatenating name, gender and birthday
mutate (data, customerID = paste(PaxName, GenderCode, as.character(birthdateid),sep="-"))
# Create Age buckets for age ranges, creating a new categorical variable "age_group" with the following posisble values: "0-17", "18-24", "25-34", "35-54", "55+" and "N/A"
mutate(data,age_group=
ifelse(Age>=0&Age<=17,"0-17",
ifelse(Age>17&Age<=24,"18-24",
ifelse(Age>24&Age<=34,"25-34",
ifelse(Age>34&Age<=54,"35-54",
ifelse(Age>=55,"55+",
ifelse("N/A")))))))
#For a given PNR, figure out true origin city (source of first leg)
true_origins <- data%>%
arrange(PNRLocatorID,CouponSeqNbr)%>%
group_by(PNRLocatorID,PaxName)%>%
do(data.frame(true_origin=first(.$ServiceStartCity)))
data<-merge(data,true_origins, by.x=c("PNRLocatorID","PaxName"),by.y = c("PNRLocatorID","PaxName"))
# For a given PNR, figure out final destination (target of last leg),
# then merge the "data" and "final_destination" data frames, as we did earlier for true_origins.
final_destination<-data%>%
arrange(PNRLocatorID,CouponSeqNbr)%>%
group_by(PNRLocatorID,PaxName)%>%
do(data.frame(final_destination=last(.$ServiceEndCity)))
data<-merge(data,final_destination, by.x=c("PNRLocatorID","PaxName"),by.y = c("PNRLocatorID","PaxName"))
# Now, we can remove the true_origins and final_destination data frames
rm(true_origins)
rm(final_destination)
# We will use the lubridate package for operations involving date strings
install.packages("lubridate")
library(lubridate)
#Now figure out "true" destination, city in which customer spent the most time
diff1<-data%>%arrange(PNRLocatorID,CouponSeqNbr)%>%
group_by(PNRLocatorID,PaxName)%>%
mutate(stay=lead(date(ServiceStartDate))-date(ServiceStartDate),default=0)%>%
select(PNRLocatorID,PaxName,ServiceStartCity,ServiceEndCity,ServiceStartDate,stay)
diff1$stay[is.na(diff1$stay)]<-0
diff1$stay<-as.numeric(diff1$stay)
true_destination<-diff1%>%
group_by(PNRLocatorID,PaxName)%>%
do(data.frame(true_destination=first(.$ServiceEndCity[.$stay==max(.$stay)])))
data<-merge(data,true_destination, by.x=c("PNRLocatorID","PaxName"),by.y = c("PNRLocatorID","PaxName"))
rm(diff1)
rm(true_destination)
# Is the booking a round-trip or one-way?
data<-data%>%
mutate(round_trip = ifelse(as.character(true_origins)==as.character(final_destination), 1, 0))
# What is the size of the group?
data<-data%>%mutate(uid=paste(EncryptedName,GenderCode,birthdateid,sep=""))
data<-data%>%
group_by(PNRLocatorID)%>%
mutate(group_size = length(unique(uid)))
# Create a binary indicator "group" corresponding to whether it was a group or single party traveling.
data<-data%>%
group_by(PNRLocatorID)%>%
mutate(group= ifelse(group_size>1,1,0))
# Figure out which calendar quarter the trip took place in.
# Create a categorical variable "Seasonality" which takes on the following values:
# "Q1" for months 1:3, "Q2" for months 4:6, "Q3" for months 7:9 and "Q4" for months 10:12
# method 1
mutate(data,Seasonality=ifelse(month(data$ServiceStartDate)>=1 & month(data$ServiceStartDate)<=3,"Q1",
ifelse(month(data$ServiceStartDate)>=4 & month(data$ServiceStartDate)<=6,"Q2",
ifelse(month(data$ServiceStartDate)>=7 & month(data$ServiceStartDate)<=9,"Q3",
ifelse(month(data$ServiceStartDate)>=10 & month(data$ServiceStartDate)<=12,"Q4",
ifelse("NA"))))))
# another method 2
data$Seasonality[month(data$ServiceStartDate)>=1 & month(data$ServiceStartDate)<=3] <-"Q1"
data$Seasonality[month(data$ServiceStartDate)>=4 & month(data$ServiceStartDate)<=6] <-"Q2"
data$Seasonality[month(data$ServiceStartDate)>=7 & month(data$ServiceStartDate)<=9] <-"Q3"
data$Seasonality[month(data$ServiceStartDate)>=10 & month(data$ServiceStartDate)<=12] <-"Q4"
#How many days in advance was the trip booked?
data$PNRCreateDate<-as.Date(data$PNRCreateDate)
data$ServiceStartDate<-as.Date(data$ServiceStartDate)
data<-data%>%
mutate(days_pre_booked=as.numeric(floor(difftime(ServiceStartDate,PNRCreateDate,units=c("days")))))
write.csv(data, "SC_data_CleanedUp.csv")
|
54b2be292d11c6b45c2b07f851318fbfbd76305a
|
bbaebbebb04cb38585f0870801a72c13f6d37487
|
/plot4.R
|
cb6c1319d9ad8f6fdae02f89bea57c5bde9c4015
|
[] |
no_license
|
njkranton/ExData_Plotting1
|
614fe3ff4a76714e8cedbaa78d098b3e605080a7
|
1ab0e357301a57bc0c5fb9820c62c80a3b740aa2
|
refs/heads/master
| 2021-01-17T20:10:37.613549
| 2015-10-10T14:43:07
| 2015-10-10T14:43:07
| 43,971,994
| 0
| 0
| null | 2015-10-09T18:06:51
| 2015-10-09T18:06:51
| null |
UTF-8
|
R
| false
| false
| 2,421
|
r
|
plot4.R
|
plot4 <- function() {
## Set locale to US & English - otherwise day names, etc will be incorrectly
## localized resulting in differences in the actual plots vs. the spec
Sys.setlocale(category = "LC_ALL", locale = "English_United States.1252")
## read in the raw data
rawdata <- read.table("household_power_consumption.txt", header = TRUE, sep = ";", stringsAsFactors = FALSE)
## subset only the data we need, i.e data for days 02-02-2007 and 02-02-2007
subdata <- rawdata[rawdata$Date == "1/2/2007" | rawdata$Date == "2/2/2007", ]
## convert Submetering values (3 variables) to numeric so they can be plotted
subdata$Sub_metering_1 <- as.numeric(subdata$Sub_metering_1)
subdata$Sub_metering_2 <- as.numeric(subdata$Sub_metering_2)
subdata$Sub_metering_3 <- as.numeric(subdata$Sub_metering_3)
## convert Global active power to numeric so it can be plotted
subdata$Global_active_power <- as.numeric(subdata$Global_active_power)
## convert Global reactive power to numeric so it can be plotted
subdata$Global_reactive_power <- as.numeric(subdata$Global_reactive_power)
## convert Voltage to numeric so it can be plotted
subdata$Voltage <- as.numeric(subdata$Voltage)
## create new datetime column
datetime <- as.POSIXct(paste(subdata$Date, subdata$Time), format="%d/%m/%Y %H:%M:%S")
## add datetime column to rawdata
data <- cbind(subdata, datetime)
## create a new png-image to hold the plot
png(filename = "plot4.png")
## set the layout of the plots
par(mfcol = c(2,2))
## make the plots
with(data, {
## make the Global active power plot
plot(datetime, Global_active_power, type = "l", ylab = "Global Active Power", xlab = "")
## make the Submetering plot
plot(datetime, Sub_metering_1, type = "l", ylab = "Energy sub metering", xlab = "")
lines(data$datetime, data$Sub_metering_2, col = "red")
lines(data$datetime, data$Sub_metering_3, col = "blue")
## add a legend to the Submetering plot
legend("topright", lty = 1, col = c("black", "red", "blue"), bty = "n", legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
## make the Voltage plot
plot(datetime, Voltage, type = "l", ylab = "Voltage")
## make the Global reactive power plot
plot(datetime, Global_reactive_power, type = "l")
})
## close the plotting device (i.e. the png file) so that it can be read
dev.off()
}
|
3fa33b4069b0ecb335273f86a719ae35e07e73a4
|
bec2aef1fa0722ad373f0a51bcf119c3028855aa
|
/chap06/exam04.R
|
733d1259d8c44f4685553183129fd136de34f901
|
[] |
no_license
|
floraOuO/R
|
751a275163758716806752e98f1a4cd3f06f6cc2
|
4cace0f4158513b08701c2c4f9987d2c1803d8f6
|
refs/heads/master
| 2020-04-28T04:02:56.088121
| 2019-03-22T09:07:50
| 2019-03-22T09:07:50
| 174,962,698
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 557
|
r
|
exam04.R
|
library(dplyr)
library(sqldf)
library(ggplot2)
df_mpg = mpg
#1.sqldf
sqldf("
select df_mpg.*, cty+hwy as total, (cty+hwy)/2. as mean from df_mpg
order by mean desc limit 3
")
#2.dplyr
df_mpg %>% mutate(total = cty + hwy) ->df_mpg
df_mpg %>% mutate(mean = total/2) -> df_mpg
df_mpg %>% arrange(desc(mean)) %>% head(3)
df_mpg %>%
mutate(total = cty + hwy,
mean = total/2) %>%
arrange(desc(mean)) %>% head(3)
#3.r syntx
df_mpg$total = df_mpg$cty + df_mpg$hwy
df_mpg$mean = df_mpg$total/2
head(df_mpg[order(-df_mpg$mean),],3)
|
8bf52395b76c253f7745b12605a4b462f3498764
|
a6b21b7097134ebed49f92f9255d59515c274235
|
/Composition.R
|
a329ba21bddc7d94d2b7ae6aaff841bf5f92afab
|
[] |
no_license
|
bsalehe/NWT_MovingUphill3
|
c46f3495e004db7bc77996179fcc80800bd679c1
|
d6a17722a31688bceb5b1cd3b9e8e07ab22b6d98
|
refs/heads/master
| 2020-12-22T23:30:27.798146
| 2019-01-24T17:42:52
| 2019-01-24T17:42:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,252
|
r
|
Composition.R
|
###### Change in relative abundance ######
datEukS3k2
datEukN3k2
datBacS3k2
datITSS3k2
#change a colname, it can't have a dash in it
colnames(datBacS3k2)[78]<-"WPS.2"
names(which(colSums(datBacS3k2[,32:81])>2))
relBac<-datBacS3k2 %>%
dplyr::select(Sample_name,Acidobacteria,Actinobacteria,Bacteroidetes,Cyanobacteria,Gemmatimonadetes,Heterotrophic_Chloroflexi,Heterotrophic_Planctomycetes,Heterotrophic_Proteobacteria,Verrucomicrobia,WPS.2) %>%
gather(Taxa,abun,Acidobacteria:WPS.2) %>%
mutate(type="A. Bacteria")
#relBac<-datBacS3k2 %>%
# dplyr::select(Sample_name,Acidobacteria,Actinobacteria,AD3,Armatimonadetes,Bacteroidetes,Cyanobacteria,Gemmatimonadetes,Heterotrophic_Chloroflexi,Heterotrophic_Planctomycetes,Heterotrophic_Proteobacteria,Verrucomicrobia,WPS.2) %>%
# gather(Taxa,abun,Acidobacteria:WPS.2) %>%
# mutate(type="A. Bacteria")
names(which(colSums(datITSS3k2[,32:42])>.5))
relITS<-datITSS3k2 %>%
dplyr::select(Sample_name,Ascomycota,Basidiomycota,Glomeromycota,Mortierellomycota) %>%
gather(Taxa,abun,Ascomycota,Basidiomycota,Glomeromycota,Mortierellomycota) %>%
mutate(type="B. Fungi")
#if the label has the word "unknown" in it, then I don't want to plot it. it means that is it unknown at a level higher than phylum
sort(colSums(datEukS3k2[,32:60]))#46
names(which(colSums(datEukS3k2[,32:60])>1))#,Alveolata,Archaeplastida,Photosynthetic_Stramenopiles,Rhizaria
relEukS<-datEukS3k2 %>%
dplyr::select(Sample_name,Ciliophora,Cercozoa,Chlorophyta,Photosynthetic_Stramenopiles,Heterotrophic_Euglenozoa,Charophyta,Nonphotosynthetic_Stramenopiles) %>%
gather(Taxa,abun,Ciliophora,Cercozoa,Chlorophyta,Photosynthetic_Stramenopiles,Heterotrophic_Euglenozoa,Charophyta,Nonphotosynthetic_Stramenopiles) %>%
mutate(type="C. Small Eukaryotes")
names(which(colSums(datEukN3k2[,32:39])>1))
relEukN<-datEukN3k2 %>%
dplyr::select(Sample_name,Arthropoda,Nematoda,Rotifera,Tardigrada) %>%
gather(Taxa,abun,Arthropoda,Nematoda,Rotifera,Tardigrada) %>%
mutate(type="D. Soil mesofauna")
relALL1<-rbind(relBac,relITS,relEukS,relEukN)#
head(relALL1)
#merge with biogeo6 to get pca1, some asmples will be lost b/c we are going from 90 to 75 plots
relALL<-merge(relALL1,biogeo6,"Sample_name")
head(relALL)
#plotdata<-relALL %>%
# mutate(typeTaxa=paste(type,Taxa)) %>%
# group_by(Taxa,lomehi,type,typeTaxa) %>%
# summarise(mean_abun = mean(abun),se_abun=std.error(abun))
# #%>%filter(mean_abun>.04)
#this was weird, maybe something changed in ggplot or dplyr because the colors were messing up and it was listing the legend in alfabetical order by taxa rather than the order in the "plotdata" dataframe. the workaroudn was to set the levels of plotdata$Taxa so they were correct
plotdata<-relALL %>%
mutate(typeTaxa=paste(type,Taxa)) %>%
group_by(typeTaxa,Taxa,lomehi,type) %>%
summarise(mean_abun = mean(abun),se_abun=std.error(abun))
plotdata$Taxa<-factor(plotdata$Taxa,levels=unique(plotdata$Taxa))
as.data.frame(plotdata)
plotdata$lomehi<-factor(plotdata$lomehi,levels=c("lo","me","hi"))
pdf("/Users/farrer/Dropbox/EmilyComputerBackup/Documents/Niwot_King/Figures&Stats/kingdata/Figs/relabuntaxavsplantdensitygroupsR2.pdf",width=6.5,height=4.3)#,width=4.3, height=5.3
ggplot(plotdata,aes(x=lomehi,y=mean_abun,group=typeTaxa,color=Taxa))+
labs(x = "",y="Relative abundance")+
theme_classic()+
theme(line=element_line(size=.3),text=element_text(size=10),strip.background = element_rect(colour="white", fill="white"),axis.line=element_line(color="gray30",size=.3),legend.key.size = unit(.6, "line"))+
geom_line(stat = "identity", position = "identity",size=.5)+
geom_point(size=2)+
geom_errorbar(aes(ymax = mean_abun+se_abun, ymin=mean_abun-se_abun),width=.15,size=.5)+
scale_color_manual(values=mycols) +
facet_wrap(~type,nrow=3,scales="free")+
guides(col = guide_legend(ncol = 1))
dev.off()
#10 bacteria, 4 fungi, 7 small euks, 4 large euks
mycols<-c("#4BC366",#light green
"#D9A125",#yellow
"#6F94DE",#light blue
"#B4405E",#red
"#D185E0",
"#ff99a4",#light pink,
"#659125",#green
"#cf6f23",#orange
"#5C426C",#dark purple
"#6768A3",#medium blue last bact
"#D9A125",#yellow
"#B4405E",#red
"#659125",
"#6768A3",
"#6F94DE",#light blue
"#5C426C",#dark purple
"#D185E0",#light purple
"#cf6f23",#orange
"#D9A125",#yellow
"#659125",#green
"#ff99a4",#light pink,
"#B4405E", #red
"#4BC366", #light green
"#5C426C", #dark purple
"#D9A125") #yellow
#scatter plots
head(relALL)
plotdata<-relALL %>%
mutate(typeTaxa=paste(type,Taxa))
plotdata$Taxa<-factor(plotdata$Taxa,levels=unique(plotdata$Taxa))
pdf("/Users/farrer/Dropbox/EmilyComputerBackup/Documents/Niwot_King/Figures&Stats/kingdata/Figs/relabuntaxavsplantdensitygroupsBFSLENscatter.pdf",width=6.5,height=6)#,width=4.3, height=5.3
ggplot(plotdata,aes(x=log10(Plant_Dens+1),y=abun,group=typeTaxa,color=Taxa))+
labs(x = "",y="Relative abundance")+
theme_classic()+
theme(line=element_line(size=.3),text=element_text(size=10),strip.background = element_rect(colour="white", fill="white"),axis.line=element_line(color="gray30",size=.3),legend.key.size = unit(.6, "line"))+
scale_color_manual(values=mycols) +
geom_point(size=.2)+
geom_smooth(method=lm,se=F,size=.8) +
facet_wrap(~type,nrow=3,scales="free")+
guides(col = guide_legend(ncol = 1))
dev.off()
#Doing anova on all of the above taxa groups
ind<-length(unique(relALL$Taxa))
anovaoutput<-data.frame(Taxa=rep(NA,ind),F=rep(NA,ind),P=rep(NA,ind))
for(i in 1:ind){
current.taxa<-unique(relALL$Taxa)[i]
temp<-relALL %>%
filter(Taxa==current.taxa)
mod<-anova(lm(abun~lomehi,data=temp))
anovaoutput[i,1]<-current.taxa
anovaoutput[i,2]<-mod$`F value`[1]
anovaoutput[i,3]<-mod$`Pr(>F)`[1]
}
anovaoutput$qval<-p.adjust(anovaoutput$P,method="fdr")
anovaoutput$qval<-format(anovaoutput$qval,scientific=F)
anovaoutput$Taxa<-factor(anovaoutput$Taxa,levels=unique(plotdata$Taxa))
anovaoutput[order(anovaoutput$Taxa),]
#90 samples, relative abundance otu table, grouped by "kingdom" aka "labels"
datBacS3k2
#dim(comm.dataBac)
#comm.dataBac[1:10,1:32]
#colnames(datBac3fk2)[78]<-"WCHB1.60"
names(which(colSums(datBacS3k2[,32:74])>3))
relBac<-datBacS3k2 %>%
dplyr::select(lomehi,Sample_name, Plant_Div, Plant_Dens,Acidobacteria,Actinobacteria,Bacteroidetes,Heterotrophic_Chloroflexi,Cyanobacteria,Planctomycetes,Proteobacteria,Verrucomicrobia) %>%
gather(Taxa,abun,Acidobacteria:Verrucomicrobia) %>%
mutate(type="A. Bacteria")
plotdata<-relBac %>%
group_by(Taxa,lomehi) %>%
summarise(mean_abun = mean(abun),se_abun=std.error(abun))
plotdata$lomehi<-factor(plotdata$lomehi,levels=c("lo","me","hi"))
data.frame(plotdata)
ggplot(plotdata,aes(x=lomehi,y=mean_abun,color=Taxa)+
labs(x = "",y="Relative abundance")+
theme_classic()+
#theme(line=element_line(size=.3),text=element_text(size=10),strip.background = element_rect(colour="white", fill="white"),axis.line=element_line(color="gray30",size=.3),legend.key.size = unit(.6, "line"))+
geom_line(stat = "identity", position = "identity",size=.5)
mycols<-c("#4BC366",
"#D9A125",
"#659125",
"#6768A3",
"#5C426C",
"#D185E0",
"#6F94DE",
"#B4405E")
ggplot(plotdata,aes(x=lomehi,y=mean_abun,color=Taxa,group=Taxa))+
labs(x = "",y="Relative abundance")+
theme_classic()+
theme(line=element_line(size=.3),text=element_text(size=10),strip.background = element_rect(colour="white", fill="white"),axis.line=element_line(color="gray30",size=.3),legend.key.size = unit(.6, "line"))+
geom_line(size=.5)+
geom_point(size=2)+
geom_errorbar(aes(ymax = mean_abun+se_abun, ymin=mean_abun-se_abun),width=.15,size=.5)+
scale_color_manual(values=mycols) +
facet_wrap(~type,nrow=3,scales="free")+
guides(col = guide_legend(ncol = 1))
mynmds<-metaMDS(comm.dataBac[,32:3430],dist="bray",trymax = 1000)
#old lomehi
col=ifelse(comm.dataBac$lomehi=="lo","lightblue",NA)
col[which(comm.dataBac$lomehi=="me")]<-"dodgerblue"
col[which(comm.dataBac$lomehi=="hi")]<-"darkblue"
plot(scores(mynmds),col=col,pch=21,bg=col)#-scores(mynmds)[,1],scores(mynmds)[,2]
ordiellipse(mynmds,groups=col,col=c("darkblue","dodgerblue","lightblue"),conf=.99999,kind="se",lwd=2)#
legend("bottomright",c("Early","Mid","Late"),col=c("#ab3b57","#5268cb","#639e51"),pch=21,pt.bg=c("#ab3b57","#5268cb","#639e51"),lty=1,bty="n")
#contains all data plus biogeochemistry and plant cover, 75 samples
#biogeo info 53
#N 143
#S 1124
#Bact 3399
#ITS 1122
ordi.bact<-cbind(comm.bio[,1:53],comm.bio[,1321:4719])
mynmds<-metaMDS(ordi.bact[,54:3452],dist="bray",trymax = 1000)
#new lomehi
col=ifelse(ordi.bact$lomehi=="lo","lightblue",NA)
col[which(ordi.bact$lomehi=="me")]<-"dodgerblue"
col[which(ordi.bact$lomehi=="hi")]<-"darkblue"
plot(scores(mynmds),col=col,pch=21,bg=col)#-scores(mynmds)[,1],scores(mynmds)[,2]
ordiellipse(mynmds,groups=col,col=c("darkblue","dodgerblue","lightblue"),conf=.99999,kind="se",lwd=2)#
comm.dataBac[1:5,32:33]
ordi.bact[1:5,54:55]
|
0cca76fdf6f49fd87c550e45fd3b3564c41dfdb3
|
a9fdb6d54b83f7fae941d919086a349768ececb2
|
/man/dm.Rd
|
78c34eaab65bbdd40e51f7f87c3ab044da1edfaf
|
[] |
no_license
|
agbaca/twitteR
|
68b57039066dac5802487d8c5a1caadaa8f60c34
|
8cd265b919f8d24950904b87cc36c223e3699f51
|
refs/heads/master
| 2021-01-12T12:26:08.574034
| 2017-02-26T03:50:24
| 2017-02-26T03:50:24
| 72,490,855
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,851
|
rd
|
dm.Rd
|
\name{dmGet}
\alias{dmGet}
\alias{dmSent}
\alias{dmDestroy}
\alias{dmSend}
\title{ Functions to manipulate Twitter direct messages }
\description{
These functions allow you to interact with, send, and delete direct
messages (DMs) in Twitter.
}
\usage{
dmGet(n=25, sinceID=NULL, maxID=NULL, ...)
dmSent(n=25, sinceID=NULL, maxID=NULL, ...)
dmDestroy(dm, ...)
dmSend(text, user, ...)
}
\arguments{
\item{text}{The text of a message to send}
\item{user}{The user to send a message to, either \code{character} or
an \code{\link{user}} object.}
\item{dm}{The message to delete, an object of class \code{\link{directMessage}}}
\item{n}{ The maximum number of direct messages to return }
\item{sinceID}{If not \code{NULL}, an ID representing the earliest
boundary}
\item{maxID}{If not \code{NULL}, an ID representing the newest ID you
wish to retrieve}
\item{...}{Further arguments to pass along the communication chain}
}
\value{
These functions will not work without \code{OAuth} authentication
The \code{dmGet} and \code{dmSent} functions will return a list of
\code{\link{directMessage}} objects. The former will retrieve DMs
sent to the user while the latter retrieves messages sent from the user.
The \code{dmDestroy} function takes a \code{\link{directMessage}}
object (perhaps from either \code{dmGet} or \code{dmSent}) and will
delete it from the Twitter server.
The \code{dmSend} function will send a message to another Twitter user.
}
\author{Jeff Gentry}
\seealso{\code{\link{directMessage}}, \code{\link{registerTwitterOAuth}}}
\examples{
\dontrun{
dms <- dmGet()
dms
## delete the first one
dms[[1]]$destroy()
dmDestroy(dms[[2]])
## send a DM
dmSend('Testing out twitteR!', 'twitter')
}
}
\keyword{ interface }
|
527b884fa53ed37e747c8d728e7894ba4b08676e
|
55ba4622941c73a4f1002f9c2b57bf46b614aa3d
|
/code/Fibroblast_analysis/Fibroblast_DM.R
|
a3c3318a4577aad806d35f13b31012787d94239e
|
[
"MIT"
] |
permissive
|
malihhhh/CUIMC-NYP_COVID_autopsy_lung
|
2089693eeb0bd08973026578b285a8a16f266ec0
|
bf6fc4add36095c7bdc12b6e6ede33d768530bb7
|
refs/heads/main
| 2023-04-24T00:46:49.969287
| 2021-05-03T14:42:08
| 2021-05-03T14:42:08
| 548,264,345
| 1
| 0
|
MIT
| 2022-10-09T08:02:03
| 2022-10-09T08:02:02
| null |
UTF-8
|
R
| false
| false
| 2,379
|
r
|
Fibroblast_DM.R
|
#!/usr/bin/env Rscript
#### Fibroblast analysis: Diffusion maps
#### Author: Jana Biermann, PhD
library(Seurat)
library(destiny)
library(dplyr)
library(ggplot2)
library(cowplot)
library(scater)
library(SingleCellExperiment)
library(gplots)
library(viridis)
library(scales)
# Color palette
colors <- c('#006E82', '#AA0A3C', '#F0E442', '#00A0FA', '#FA5078', '#005AC8', '#CC79A7', '#FAE6BE', '#0072B2', '#A0FA82', '#F0F032', '#0AB45A', '#FA7850', '#14D2DC', '#FA78FA')
# Read in file
patient <- readRDS('data/lungs_all/data_lungs_all.rds')
# Subset to fibroblasts
patient <- subset(patient, cell_type_fine %in% c('Adventitial FB', 'Alveolar FB', 'Intermediate pathological FB', 'Other FB', 'Pathological FB'))
# Rerun Seurat workflow
patient <- ScaleData(object = patient)
patient <- RunPCA(object = patient)
patient <- RunUMAP(object = patient, dims = 1:30)
# Diffusion component analysis
es <- as.ExpressionSet(as.data.frame(t(patient@assays$integrated@data)))
es@phenoData@data <- patient@meta.data
# Make diffusion map
dm <- DiffusionMap(es, verbose = T, n_pcs = 30)
# Save diffusion maps
ifelse(!dir.exists(file.path('data/lungs_all/diffusion')), dir.create(file.path('data/lungs_all/diffusion')), FALSE)
ifelse(!dir.exists(file.path('data/lungs_all/diffusion/fibroblasts')), dir.create(file.path('data/lungs_all/diffusion/fibroblasts')), FALSE)
pdf('data/lungs_all/diffusion/fibroblasts/!dm_fibroblasts_cov_ctr.pdf')
# Diffusion maps
par(mar = c(5.1, 4.1, 4.1, 2.1), xpd = TRUE)
palette(c('#8214A0', '#00A0FA', '#0AB45A', '#14D2DC', '#FA7850'))
plot(dm, col = as.factor(es@phenoData@data$cell_type_fine), main = 'Cell type (COVID-19 and control cells)', pch = 20)
legend('bottomright', inset = c(-0.05, -0.12), legend = levels(as.factor(es@phenoData@data$cell_type_fine)), pch = 16, col = as.factor(levels(as.factor(es@phenoData@data$cell_type_fine))), bty = 'n', cex = 0.85)
palette(colors)
plot(dm, col = as.factor(es@phenoData@data$group), main = 'Disease status (COVID-19 and control cells)', pch = 20)
legend('bottomright', inset = c(0, 0), legend = levels(as.factor(es@phenoData@data$group)), pch = 16, col = as.factor(levels(as.factor(es@phenoData@data$group))), bty = 'n')
dev.off()
# Save image
save.image('data/lungs_all/diffusion/fibroblasts/image_fibroblasts.RData')
saveRDS(patient, 'data/lungs_all/diffusion/fibroblasts/data_fibroblasts_cov_ctr.rds')
|
50675003c47abaf4b2f7bc6fb7d08294baff8d30
|
88863cb16f35cd479d43f2e7852d20064daa0c89
|
/Ford/analysis/thresholdV11.R
|
69636ada6d43f90d1f6eb28646596d249b89731e
|
[] |
no_license
|
chrishefele/kaggle-sample-code
|
842c3cd766003f3b8257fddc4d61b919e87526c4
|
1c04e859c7376f8757b011ed5a9a1f455bd598b9
|
refs/heads/master
| 2020-12-29T12:18:09.957285
| 2020-12-22T20:16:35
| 2020-12-22T20:16:35
| 238,604,678
| 3
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 578
|
r
|
thresholdV11.R
|
library(caTools)
train <- read.csv("~/Ford/download/fordTrain.csv")
train.samp <- train[ sample(1:nrow(train),10000) , ]
# pdf(file="thresholdROCs.pdf")
var.name <- "V11"
data <- train.samp[[var.name]]
data.percentiles <- ecdf(data)(data)
pct.thresholds <- (1:9)/10.0
df <- data.frame(data.percentiles)
for(pct.thresh in pct.thresholds) {
thresh.label <- paste("thresh_",as.character(pct.thresh),sep="")
thresh.data <- (data.percentiles<pct.thresh)*1
df[[thresh.label]] <- thresh.data
}
aucs <- colAUC(df, train.samp$IsAlert, plotROC=TRUE)
print(var.name)
print(aucs)
|
6d5aa4564602cc3b3f777ff0667ab15cc297127d
|
3e3a9e3883d505b12924e9764b0639a56a8fc876
|
/boats.r
|
bfaa3f5826a3d29707a75e85d0a1bf7d8f38068d
|
[] |
no_license
|
djhunter/quatview
|
26d31240810b679a722506934d70ae2dd860481b
|
ff4b915426937e78330c0b683a203a6a8b0ccbfd
|
refs/heads/master
| 2020-12-24T18:12:38.072546
| 2020-08-01T17:09:22
| 2020-08-01T17:09:22
| 20,194,872
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,568
|
r
|
boats.r
|
boat3d <- function(orientation, x=1:length(orientation), y = 0, z = 0, scale = 0.25,
col = 'red', add = FALSE, box = FALSE, axes = TRUE,
graphics = c('rgl', 'scatterplot3d'), ...) {
if (!missing(add) && missing(graphics)) graphics <- attr(add, 'graphics')
orientation <- as(orientation, 'rotmatrix')
len <- length(orientation)
y <- rep(y, length=len)
z <- rep(z, length=len)
scale <- rep(scale, length=len)
col <- rep(col, length=len)
# Back Left bow Rt bow Sail
tx <- c(-1, 1, 0, 0, 0, 1, 0, -1, 0, 0, 0, 0)
ty <- c( 4, 4, 4, 0, 1,1.5,0,1.5, 1, 1, 1, 4)-2
tz <- c( 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 4, 0)-1
# Rt side Lt side
qx <- c(-1, 0, 0, -1, 1, 0, 0, 1)
qy <- c( 4, 4, 1, 1.5,1.5,1, 4, 4)-2
qz <- c( 1, 0, 0, 1, 1, 0, 0, 1)-1
graphics <- basename(find.package(graphics, quiet = TRUE))
if (!length(graphics)) stop('Need 3D renderer: rgl or scatterplot3d')
graphics <- graphics[1]
require(graphics, character.only = TRUE)
if (graphics == 'rgl') {
if (is.logical(add)) {
if (!add) {
if (is.null(rgl.cur())) {
open3d()
}
else
{
clear3d()
}
bg3d(col='white')
}
}
else rgl.set(add)
nx <- length(x)
verts <- rbind(tx,ty,tz)
for (i in 1:nx) {
newv <- verts*scale[i]
newv <- t(orientation[[i]]) %*% newv
newv[1,] <- newv[1,] + x[i]
newv[2,] <- newv[2,] + y[i]
newv[3,] <- newv[3,] + z[i]
triangles3d(newv[1,],newv[2,],newv[3,],col=col[i])
}
verts <- rbind(qx,qy,qz)
for (i in 1:nx) {
newv <- verts*scale[i]
newv <- t(orientation[[i]]) %*% newv
newv[1,] <- newv[1,] + x[i]
newv[2,] <- newv[2,] + y[i]
newv[3,] <- newv[3,] + z[i]
quads3d(newv[1,],newv[2,],newv[3,],col=col[i])
}
if (axes) decorate3d()
context <- rgl.cur()
attr(context, 'graphics') <- 'rgl'
invisible(context)
}
else if (graphics == 'scatterplot3d') {
tindices <- rep(c(1:3,1), 4) + rep(3*(0:3), each = 4)
verts <- rbind(tx[tindices],ty[tindices],tz[tindices])
qindices <- rep(c(1:4,1), 2) + rep(4*(0:1), each = 5)
verts <- cbind(verts, rbind(qx[qindices],qy[qindices],qz[qindices]))
ntv <- length(tindices)
nqv <- length(qindices)
nv <- ntv+nqv
nx <- length(x)
p <- matrix(NA, 3, nx*nv)
for (i in 1:nx) {
newv <- verts*scale[i]
newv <- t(orientation[[i]]) %*% newv
newv[1,] <- newv[1,] + x[i]
newv[2,] <- newv[2,] + y[i]
newv[3,] <- newv[3,] + z[i]
p[,(nv*(i-1)+1):(nv*i)] <- newv
}
xrange <- diff(range(p[1,]))
yrange <- diff(range(p[2,]))
zrange <- diff(range(p[3,]))
range <- max(xrange,yrange,zrange)
xlim <- mean(range(p[1,]))+c(-range/2,range/2)
ylim <- mean(range(p[2,]))+c(-range/2,range/2)
zlim <- mean(range(p[3,]))+c(-range/2,range/2)
if (is.logical(add)) {
if (!add) splot <- scatterplot3d(t(p),type='n',xlim=xlim, ylim=ylim, zlim=zlim, box=box, axis=axes, ...)
else stop('Must set add to result of previous call to add to boat3d plot.')
}
else splot <- add
pfun <- splot$points3d
for (i in 1:nx) {
# draw triangles
for (j in 1:4) pfun(t(p[,(nv*(i-1)+4*(j-1)+1):(nv*(i-1)+4*j)]), type='l', col=col[i])
# draw quads
for (j in 1:2) pfun(t(p[,(nv*(i-1)+5*(j-1)+17):(nv*(i-1)+5*j+16)]), type='l', col=col[i])
}
attr(splot, 'graphics') <- 'scatterplot3d'
invisible(splot)
} else
stop('Need rgl or scatterplot3d')
}
|
d0c44bdea51bd01f0f92049391a6a49c8d2df444
|
2cc56a6341f179923977128ad90bb31419e033d0
|
/man/format_pd.Rd
|
11f1959d7da3886608f44cfb2b6738f13f792f95
|
[] |
no_license
|
cran/insight
|
5e1d2d1c46478c603b491f53aa80de57bc8f54b4
|
247206683ad374a1ba179356410d095f6861aede
|
refs/heads/master
| 2023-07-19T11:33:37.490704
| 2023-06-29T13:30:02
| 2023-06-29T13:30:02
| 174,554,249
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 741
|
rd
|
format_pd.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/format_pd.R
\name{format_pd}
\alias{format_pd}
\title{Probability of direction (pd) formatting}
\usage{
format_pd(pd, stars = FALSE, stars_only = FALSE, name = "pd")
}
\arguments{
\item{pd}{Probability of direction (pd).}
\item{stars}{Add significance stars (e.g., p < .001***).}
\item{stars_only}{Return only significance stars.}
\item{name}{Name prefixing the text. Can be \code{NULL}.}
}
\value{
A formatted string.
}
\description{
Probability of direction (pd) formatting
}
\examples{
format_pd(0.12)
format_pd(c(0.12, 1, 0.9999, 0.98, 0.995, 0.96), name = NULL)
format_pd(c(0.12, 1, 0.9999, 0.98, 0.995, 0.96), stars = TRUE)
}
|
4facc61b27455139013b0d36407858d049331714
|
a6916d3ff111685b26cf44ca006fb5e621eaaa75
|
/R/mean_sd.R
|
e9adb1b8d757585d7f934264f2fde2153a6c41e6
|
[] |
no_license
|
spalmas/RIL-Simulator
|
2ee8227412b8ef8fd2e8483d3fa20261213406dd
|
d0c5e901644b32f0cb69381f608d701119e6b8fd
|
refs/heads/master
| 2021-01-17T04:12:36.474527
| 2017-11-19T22:03:06
| 2017-11-19T22:03:06
| 53,466,504
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,092
|
r
|
mean_sd.R
|
#' Estimate mean and standard deviation from a list correcting for iterations
#'
#' It esimtaes mean and sd correcting for iterations
#'
#' @param X X column of localization of plots
#' @param Y Y column of localization of plots
#'
#' @references
#' Ninguna por ahora
#' @return A formated mean and sd from the given column
#'
#' @examples
#' source('startup.R')
#' trees.tab <- stand.randomizer()
#' table.results <- simulator(sy = 15,
#' it = 2,
#' rotation = 10,
#' intensity = 'Normal',
#' enrich.bosquete = TRUE,
#' w.dist = 10,
#' dir.felling = TRUE,
#' improved.trail = TRUE,
#' lower.impact = TRUE,
#' trees.tab = trees.tab)
#'
#' mean.sd(results.column = 'EMISSIONS', data = table.results, scenario = 'RIL')
#'
mean.sd <- function(results.column, data, scenario, na.rm = TRUE, zeros = TRUE){
tabla <- aggregate(get(results.column) ~ IT , data=data, sum, na.action = na.omit)
string <- paste0(round(mean(tabla[,2], na.rm = TRUE), digits = 1),' (', round(sd(tabla[,2], na.rm = TRUE), digits = 1), ')')
return(string)
}
|
cf7ce1aeb1c86328f514f5bfb8920c0f8bc561d6
|
d4799676996d077a81bb6baa4460ac086d4a9b69
|
/man/topVar.Rd
|
51a1de614ee3b50e6cb089a0d128b03f6b718c57
|
[] |
no_license
|
mengchen18/omicade4
|
3cf9ebb1269c475a7a818f0ff1914ed347275354
|
622f3e0fb350d92e6cc85f991418e5336accc376
|
refs/heads/master
| 2021-01-20T04:57:20.933891
| 2020-10-26T07:38:27
| 2020-10-26T07:38:27
| 101,398,525
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,068
|
rd
|
topVar.Rd
|
\name{topVar}
\alias{topVar}
\title{
Selecting top weighted variables (genes) from result of MCIA or CIA
}
\description{
The user level function calls \code{topVar.mcia} or
\code{topVar.cia}. This function provides a method selecting
top weighted variables (genes) on an axis (either positive side or negative side or both).
}
\usage{
topVar(x, axis = 1, end = "both", topN = 5)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{
an object of class \code{mcia} or \code{cia}
}
\item{axis}{
an interger to sepecify which axis to check
}
\item{end}{
which end of the axis to check, could be \code{positive},
\code{negative} or \code{both}. Any unambiguous substring can be given.
}
\item{topN}{
An integer. The number of top weighted variable to return.
}
}
\value{
Returns a data.frame contains selected variables.
}
\author{
Chen Meng
}
\examples{
data(NCI60_4arrays)
mcoin <- mcia(NCI60_4arrays)
topVar(mcoin, axis = 1, end = "both", topN = 3)
}
|
b1ea2cc2ecbc6047350d26eb8ee9d57857731698
|
9f8e62d6b63ce4fe67321effcb1e8bddf89a80f1
|
/Titanic competition/NN/learningCurveNN.R
|
9aad40f442e1e13ea8a00e44c27c2e88cb2c421b
|
[] |
no_license
|
jsaadani/kaggle
|
bd19088ca46846bf3c24988002ebf594deb89eaf
|
6fb3a4111c608b5a78af7c6b3ae7800e65b240d1
|
refs/heads/master
| 2021-01-17T17:05:01.489686
| 2013-11-26T22:04:05
| 2013-11-26T22:04:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,065
|
r
|
learningCurveNN.R
|
learningCurveNN <- function(X,y,Xval,yval,lambda,
input_layer_size,
hidden_layer_size,
num_labels){
source("nnCostFunction.R")
source("trainNNReg.R")
m=nrow(X)
error_train <- matrix(0,m-1,1)
error_val <- matrix(0,m-1,1)
for(i in 2:m){
nn_params_train <- trainNNReg(X[1:i,],y[1:i,],lambda,
input_layer_size,
hidden_layer_size,
num_labels)
error_train[i-1] <- nnCostFunction(nn_params_train,input_layer_size,
hidden_layer_size,num_labels,
X[1:i,],y[1:i],0)
error_val[i-1] <- nnCostFunction(nn_params_train,input_layer_size,
hidden_layer_size,num_labels,
Xval,yval,0)
}
learningCurve <- list("error_train"=error_train,"error_val"=error_val)
return(learningCurve)
}
|
8a7be55408f22506d83a6eea7956e5f36160a46f
|
fbbd587d98043696ba3723e04d6e74349123e5ee
|
/R/patternPermuteLRTUpOrDownPerSubject.R
|
6ae56aaef3bd7eaf52f4c20399f74ed41716a7bd
|
[] |
no_license
|
cran/clustDRM
|
0c4c475fc845e523e0eba44a762b47f8e4b59011
|
7dfe6fc5d1e4fe9c02abf4292f2f46bb0b52abdc
|
refs/heads/master
| 2020-12-21T21:46:41.662087
| 2019-03-15T15:53:44
| 2019-03-15T15:53:44
| 236,572,636
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,849
|
r
|
patternPermuteLRTUpOrDownPerSubject.R
|
# TODO: Add comment
#
# Author: Vahid Nassiri
###############################################################################
#' performs LRT for non-flat profiles per subject
#' @param inputDataSubject
#' @param doseLevels
#' @param nPermute
#' @param numReplications
#' @param clusteringResult
#' @param useSeed
#' @return
#'
#' @import ORIClust
#' @importFrom readr parse_number
#' @author Vahid Nassiri
#' @noRd
patternPermuteLRTUpOrDownPerSubject <- function(inputDataSubject, doseLevels,
nPermute, numReplications, clusteringResult,
useSeed){
repeatedDose <- rep(doseLevels, numReplications)
# obtain the first word in the clusteringResult to determine the pattern.
firstWordClusteringResult <- gsub("([A-Za-z]+).*", "\\1", clusteringResult)
if(firstWordClusteringResult == "down"){
profilePattern = c("down.up")
} else if(firstWordClusteringResult == "up"){
profilePattern = c("up.down")
} else {
stop("Please give a clusteringResult beginning with words 'up' or 'down'.")
}
# obtain the numric part of clusteringResult which is then the maximum (or minimum).
profileMaxOrMin <- parse_number(clusteringResult)
# making all indexes
if (!is.null(useSeed)){
set.seed(useSeed)
}
indexAll <- apply(matrix(rep(1:length(inputDataSubject), nPermute),
length(inputDataSubject), nPermute), 2, sample, size = length(inputDataSubject))
ObservedStat <- computeLRTestStatUpOrDown(inputDataSubject, doseLevels,
profilePattern, nPermute, numReplications, profileMaxOrMin)
PermutedStat <- apply(matrix(inputDataSubject[indexAll],nrow(indexAll), ncol(indexAll)), 2, computeLRTestStatUpOrDown, doseLevels,
profilePattern, nPermute, numReplications, profileMaxOrMin)
estimatedPvalue <- (1 + sum(abs(PermutedStat)>abs(ObservedStat)))/(nPermute+1)
# computing adjusted p-values
return(estimatedPvalue)
}
|
7f3006c8e38c5bc8d3e6708eb427747772923685
|
d5334e07da75dc0d44fb3c47f674e078c687631a
|
/R/create_biv_palette.R
|
fddd06edfda41930bf8e38bd9a33377b38ddf518
|
[] |
no_license
|
mtennekes/cols4all
|
bf63720108541d5286fedaf146f9c9310260f571
|
ce7ca55c57f281da387f4d503030213a07abe821
|
refs/heads/main
| 2023-07-06T16:55:52.730989
| 2023-06-30T09:31:32
| 2023-06-30T09:31:32
| 457,766,023
| 247
| 11
| null | 2022-10-27T10:26:56
| 2022-02-10T12:12:59
|
R
|
UTF-8
|
R
| false
| false
| 3,637
|
r
|
create_biv_palette.R
|
aregreys = function(x) {
all(get_hcl_matrix(x)[,2] < 1)
}
# ensure the diagonal are pure greys
convert2grey = function(x) {
greys = rowSums(attr(hex2RGB(x), "coords")) / 3
rgb(greys, greys, greys)
}
create_biv_palette = function(palette, biv.method) {
if (!(biv.method %in% c("div2seqseq", "div2catseq", "seq2uncseq", "byrow", "bycol"))) {
n = as.integer(substr(biv.method, nchar(biv.method), nchar(biv.method)))
biv.method = substr(biv.method, 1, nchar(biv.method) - 1)
if (!(biv.method %in% c("div2seqseq", "byrow", "bycol"))) stop("Invalid biv.method", call. = FALSE)
} else {
np = length(palette)
if (biv.method == "div2seqseq") {
if ((np %% 2) != 1) stop("n is even but should be odd", call. = FALSE)
n = (np - 1L)/2 + 1L
m = n
} else if (biv.method == "div2catseq") {
if ((np %% 2) != 1) stop("n is even but should be odd", call. = FALSE)
n = (np - 1L)/2 + 1L
m = n - 1L
} else if (biv.method == "seq2uncseq") {
n = np
m = 5
} else {
n = round(sqrt(length(palette)))
m = round(length(palette) / n)
if ((n * m) != length(palette)) stop("Please set the biv.method to \"", biv.method, "X\", where X is the number of columns", call. = FALSE)
}
}
if (biv.method == "div2seqseq") {
a = get_hcl_matrix(palette)
# hue matrix
h1 = matrix(a[n:1, 1], nrow = n, ncol = n, byrow = FALSE)
h2 = matrix(a[n:np, 1], nrow = n, ncol = n, byrow = TRUE)
h = matrix(a[n, 1], nrow = n, ncol = n)
h[lower.tri(h)] = h1[lower.tri(h)]
h[upper.tri(h)] = h2[upper.tri(h)]
# chroma matrix
cr1 = a[n:1, 2]
cr2 = a[n:np, 2]
cr = matrix(0, nrow = n, ncol = n)
for (i in 2:n) {
cr[i, 1:i] = seq(cr1[i], 0, length.out = i)
cr[1:i, i] = seq(cr2[i], 0, length.out = i)
}
# limunance matrix
la1 = a[n:1, 3]
la2 = a[n:np, 3]
la0 = (la1 + la2) / 2
la0b = local({
x = 1:n
y = la0
fit2 <- stats::lm(y~poly(x,2,raw=TRUE))
xx <- seq(1,n*sqrt(2),length.out=n)
stats::predict(fit2, data.frame(x=xx))
})
l = matrix(la0[1], nrow = n, ncol = n)
for (i in 2:n) {
l[i, 1:i] = seq(la1[i], la0b[i], length.out = i)
l[1:i, i] = seq(la2[i], la0b[i], length.out = i)
}
l[l<0] = 0
l[l>100] = 100
# l = matrix(la0, nrow = n, ncol = n)
# l1 = t(mapply(seq, la1, la0, length.out = n))
# l2 = mapply(seq, la2, la0, length.out = n)
# l[lower.tri(l)] = l1[lower.tri(l1)]
# l[upper.tri(l)] = l2[upper.tri(l2)]
mat = t(matrix(do.call(grDevices::hcl, list(h = h, c = cr, l = l)), ncol = n, byrow = TRUE))
diag(mat) = convert2grey(diag(mat))
mat
} else if (biv.method == "div2catseq") {
a = get_hcl_matrix(palette)
l1 = a[m:1, 3]
l2 = a[(m+2):np, 3]
l0 = (l1 + l2) / 2
h1 = a[m:1, 1]
h2 = a[(m+2):np, 1]
c1 = a[m:1, 2]
c2 = a[(m+2):np, 2]
h = matrix(c(h1, h1, h1, h2, h2), ncol = n, byrow = FALSE)
cr = matrix(c(c1, c1/2, rep(0,m), c2/2, c2), ncol = n, byrow = FALSE)
l = matrix(c(l1, (l1+l0)/2, l0, (l2+l0)/2, l2), ncol = n, byrow = FALSE)
mat = matrix(do.call(grDevices::hcl, list(h = h, c = cr, l = l)), ncol = n, byrow = FALSE)
# convert middle col to pure greys if they are
mid_col = mat[, (n+1)/2]
if (aregreys(mid_col)) {
mat[, (n+1)/2] = convert2grey(mid_col)
}
mat
} else if (biv.method == "seq2uncseq") {
a = get_hcl_matrix(palette)
b = a
b[,2] = 0
pa = grDevices::hcl(h = a[,1], c = a[,2], l = a[,3])
pb = grDevices::hcl(h = b[,1], c = b[,2], l = b[,3])
pb = convert2grey(pb)
unname(do.call(rbind, mapply(function(ca, cb) {
colorRampPalette(c(ca, cb))(m)
}, pa, pb, SIMPLIFY = FALSE)))
} else {
matrix(palette, ncol = n, byrow = biv.method == "byrow")
}
}
|
e856443ad98afab4f821a99380a4d3e6d3e6c792
|
6f930adb2fea0155113a1e4a84bec37afa71c8bf
|
/world3.R
|
a39953925b1e0045b6721cea154f44c0fc18097d
|
[] |
no_license
|
DAWOODSKYM/R-scrips
|
99b180127e162d0de929b1a0c6cedde358ede1f1
|
2916d50d2f7f12079f59d8b6a608e5c4e443588f
|
refs/heads/main
| 2023-03-29T17:18:49.647194
| 2021-04-03T20:42:26
| 2021-04-03T20:42:26
| 354,388,197
| 0
| 0
| null | null | null | null |
WINDOWS-1252
|
R
| false
| false
| 1,006
|
r
|
world3.R
|
#load the required libraries
library(sf)
library(RColorBrewer)
library(tmap)
library(dplyr)
library(mapview)
#Read the shapefile and csv and print their coloumns names
SA<-st_read("world2/World_Countries.shp")
cols<-names(SA)
Data<-read.csv("SA4.csv")
print(Data)
#names(Data)[COUNTRY]="XCOUNTRY"
#Join csv to shapefile
Datajoin<-left_join(SA,Data,by=c('COUNTRY'='ï..COUNTRY'))
#set view mode and print
map1<-tm_shape(Datajoin)+tm_polygons("X1960",palette="Reds",contrast=1)+tm_layout(legend.frame="Red",title="David M. ENC222-0131/2018")
map2<-tm_shape(Datajoin)+tm_polygons("X1990",palette="Reds",contrast=1)+tm_layout(legend.frame="Red",title="David M. ENC222-0131/2018")
map3<-tm_shape(Datajoin)+tm_polygons("X2000",palette="Reds",contrast=1)+tm_layout(legend.frame="Red",title="David M. ENC222-0131/2018")
map4<-tm_shape(Datajoin)+tm_polygons("X2017",palette="Reds",contrast=1)+tm_layout(legend.frame="Red",title="David M. ENC222-0131/2018")
tmap_mode("view")
print(tmap_arrange(map1,map2,map3,map4))
|
d3482196efce2fca4d7a057dd51935170ece0d33
|
131454ec12f05c8de22b91d8f7f146f1990b3371
|
/afegir_coordenades.R
|
9710f589d4e2046c1bdd471720b06544b5d17412
|
[] |
no_license
|
pere-prlpz/toponims
|
1bcd28e2322f82f079d2635a61638e0b251c287f
|
df94177b54c14a62c0125197217082111f29de40
|
refs/heads/master
| 2023-08-30T20:40:07.174614
| 2023-08-29T20:03:45
| 2023-08-29T20:03:45
| 238,566,194
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,769
|
r
|
afegir_coordenades.R
|
# nuclis existents
library(readr)
nuclis <- read_delim("C:\\Users\\Pere\\Documents\\DADES\\pere\\varis\\nuclis_wd.tsv",
"\t", escape_double = FALSE, trim_ws = TRUE)
# nomenclàtor
library(readxl)
index_nomenclator_2009 <- read_excel("~/DADES/pere/varis/index_nomenclator_2009.xls")
nomred <- index_nomenclator_2009[index_nomenclator_2009$Concepte=="nucli", 1:13]
nomred <- nomred[!is.na(nomred$`UTM X`),]
# canvi coordenades nomenclàtor
library(rgdal)
coords <- as.matrix(nomred[, c("UTM X","UTM Y")])*100
#sputm <- SpatialPoints(coords, proj4string=CRS("+proj=utm +zone=31N +datum=WGS84"))
sputm <- SpatialPoints(coords,
proj4string=
CRS("+init=epsg:23031 +proj=utm +zone=31 +ellps=intl +towgs84=-87,-98,-121,0,0,0,0 +units=m +no_defs"))
spgeo <- spTransform(sputm, CRS("+proj=longlat +datum=WGS84"))
spgeo.df <- as.data.frame(spgeo)
names(spgeo.df) <- c("lon", "lat")
nomred <- cbind(nomred, spgeo.df)
# regularització del nom
arreglanom <- function(nom) {
nom <- gsub(" *", "", nom, fixed = TRUE)
trossos <- unlist(strsplit(nom, ", "))
n <- length(trossos)
nom <- ifelse(n==1,
nom,
paste(c(trossos[n], trossos[1:(n-1)]), collapse = " ")
)
nom <- gsub("' ","'", nom)
return (nom)
}
nomred$nom <- sapply(nomred$Topònim, arreglanom)
# nom dels existents
treupar <- function(nom) {
trossos <- strsplit(nom," (", fixed = TRUE)
return(trossos[[1]][1])
}
nuclis$nomnet <- sapply(nuclis$name, treupar)
# noms per enganxar
nuclis$nomrel <- tolower(nuclis$nomnet)
nomred$nomrel <- tolower(nomred$nom)
nuclis$munrel <- tolower(nuclis$nmun)
nomred$munrel <- tolower(nomred$`Municipi 1`)
nomred$munrel <- gsub(" *","",nomred$munrel, fixed = TRUE)
units <- merge(nuclis, nomred, by=c("nomrel", "munrel"), all.y=TRUE)
# distància
dist <- function(lat.x, lon.x, lat.y, lon.y) {
sqrt(((6371*(lat.x-lat.y)*pi/180)^2+(6371*cos(lat.x/180*pi)*(lon.x-lon.y)*pi/180)^2))
}
# més allunyats
with(units, units[dist(lat.x, lon.x, lat.y, lon.y)>5 & !is.na(lat.x),])
# busco on afegir coordenades
afegircoor <- units[is.na(units$lat.x) & !is.na(units$lat.y) & !is.na(units$item), ]
afegircoor$item <- gsub("http://www.wikidata.org/entity/", "",
afegircoor$item , fixed=TRUE)
# preparar quickstatemens
quick <- function(fila) {
instr <- list()
instr[[1]] <- c(fila$item, "P625",
paste0("@", fila$lat.y,"/", fila$lon.y))
instr <- sapply(instr, FUN=paste, collapse="\t")
return (instr)
}
instruccions <- unlist(lapply(1:nrow(afegircoor), function(i) {quick(afegircoor[i,])}))
cat(paste(instruccions, collapse="\n")) #pantalla
|
88e7a7ad2e97234133bcc921342c9a34dba0bcba
|
8dff28544e8dc1ba210cfdb22a4e82ce97a5d30b
|
/quiz_W2.R
|
092d3c2214d1c194880d346f2edccfcc93359a46
|
[] |
no_license
|
GaelleLouise/GettingCleaningData
|
f5f4e56b5c1e07c16ffba184f3afce66aca5b8fa
|
954f27bacac24bede45e2b756ebfef4366b4d348
|
refs/heads/master
| 2023-01-29T07:36:53.811404
| 2020-12-10T14:56:19
| 2020-12-10T14:56:19
| 319,390,870
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 894
|
r
|
quiz_W2.R
|
# lecture JSON d'une API (question 1) :
jsonData <- fromJSON("https://api.github.com/users/jtleek/repos")
names(jsonData) # donne les différentes "colonnes"
Nom_repo <- jsonData$name # stocke les différents noms de repository dans un vecteur
date_repo <- jsonData$created_at # stocke les différentes dates de création de repository dans un vecteur
date_repo[Nom_repo == "datasharing"] # affiche la date de création du repository intitulé "datasharing"
# lecture d'une page web :
connexion = url("http://biostat.jhsph.edu/~jleek/contact.html")
htmlCode = readLines(connexion)
close(connexion)
# lecture d'un fichier en format fixe :
fileURL <- "https://d396qusza40orc.cloudfront.net/getdata%2Fwksst8110.for"
data <- read.fwf(file = fileURL, widths = c(10, 9, 4, 9, 4, 9, 4, 9, 4), # on peut spéciier des nb <0 pour sauter des colonnes
skip = 4, dec = ".")
sum(data$V4)
|
3845120e7b4d0a6a57ed94d82a6d1f1c2b89c7c3
|
b654d0764d7d9353363903253efc194ecce95a89
|
/Corte2/Logistic Regression/LogisticRegression.R
|
29c6b9fdcfb40926b61339c0d3bf690d086cc47e
|
[] |
no_license
|
johnj-hernandez/Ciencia-de-los-Datos-R
|
c77d043f514ff82edb053ff4c584081fd1b0c81b
|
476bc02226dbeaf268911b9a5ff9ad6465b23b44
|
refs/heads/master
| 2020-07-07T04:16:09.131452
| 2019-08-19T20:37:25
| 2019-08-19T20:37:25
| 203,246,034
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,793
|
r
|
LogisticRegression.R
|
#Por medio de la regresion logistica podremos predecir entre decisiones binarias la pertenencia
#de un valor , me devuelve 0 o 1 , usando la funcion sigmoide la cual me da valores entre 0 a 1
#luego por medio de un umbral vamos a definir cuales son 0 y 1
#leemos los datos
donors<- read.csv('donors.csv')
donors
str(donors)
#realizamos el modelo de regresion logistica relacionando 3 variables y family binomial para 0 y 1
donation_model <-
glm(donated ~ bad_address + interest_religion
+ interest_veterans,
data = donors, family = "binomial")
donation_model
#vemos cuales son los coeficientes de la ecuacion
donation_model$coefficients
#vemos el resumen del modelo en donde podemos ver la significancia de estos
summary(donation_model)
#ahora vemos cuantos donaron realmente (reales)
table(donors$donated)
#en los datos de donors le agregamos una matriz con las probabilidades de que donden
#arrojadas por la prediccion y el modelo de regresion lineal
donors$donation_prob <-
predict(donation_model, type = "response")
donors$donation_prob
colnames(donors)
#calculamos la proporcion de los datos reales y con esto vamos a probar si las probabildiades
#calculadas fueron adecuadas
mean(donors$donated)
donors$donation_pred <- ifelse(donors$donation_prob > 0.05040551,1,0)
donors$donation_pred
mean(donors$donated == donors$donation_pred)
#probabilidad de que no donen y donen
mean(donors$donated == 0)
mean(donors$donated == 1)
#preguntar porque igual a 0
#matriz de confusion
table(x= donors$donated,y = donors$donation_pred)
table(donors$donated,rep(0,length(donors$donated)))
install.packages("pROC")
library(pROC)
ROC <- roc(donors$donated, donors$donation_prob)
ROC
auc(ROC)
plot(ROC, col = "blue")
str(donors)
# * son interacciones
#tenemos el modelo con la nueva combinacion de variables
rfm_model <- glm(donated ~ money + recency*frequency, donors, family = "binomial")
#Vemos las probabilidades
rfm_prob <- predict(rfm_model, type = "response")
summary(rfm_model)
#realizamos las predicciones con el nuevo modelo
rfm_pred<- ifelse(rfm_prob>0.05040551,1,0)
#la precision con el nuevo modelo disminuyo sin embargo hay que verificar si la sensitividad mejoro
#sensitividad aumento, primero era 0.19 y luego 0.555 osea mejor
#osea va a darme mas valores positivos aun si se equivoca con los negativos
mean(donors$donated==rfm_pred)
#grafico inicial de roc
ROC<- roc(donors$donated, donors$donation_prob)
plot(ROC,col="blue",main="Grafico inicial")
#grafico con nuevo modelo
ROC2<- roc(donors$donated, rfm_prob)
plot(ROC2,col="red",main="Grafico con modelo rfm")
|
0f31b8efd38435867995c32c14cfdb5523d1d729
|
bd0637d80e2f9742aff94fe1b77cc25268b121ec
|
/Word_cloud.R
|
8c0e704a90e17805f8a8f7fefb0a609190a13e20
|
[] |
no_license
|
Niteshiya/Word_cloud_with_R_-_sentiments
|
27dbf0438e4a99211151cb45c7b611ee9acd7753
|
ab94b7a5f37d1b3db2ff82eb0b2a0c1ef2cfdc0a
|
refs/heads/master
| 2020-07-29T16:20:38.555303
| 2019-09-20T21:50:19
| 2019-09-20T21:50:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,036
|
r
|
Word_cloud.R
|
#load data
library(pdftools)
data <- pdf_text("nc.pdf")
library(tm)
options(header=F,stringsAsFactors = F,FileEncoding="latin1")
#make corpus
corpus <- Corpus(VectorSource(data))
#data cleaning
corpus <- tm_map(corpus,tolower)
corpus <- tm_map(corpus,removePunctuation)
corpus <- tm_map(corpus,removeNumbers)
corpus <- tm_map(corpus,removeWords,stopwords("english"))
remove_specials <- function(x) gsub("\r?\n|\r"," ",x)
corpus <- tm_map(corpus,content_transformer(remove_specials))
corpus <- tm_map(corpus,stripWhitespace)
#finally clean text
cleantxt <- tm_map(corpus,PlainTextDocument)
cleantxt <- Corpus(VectorSource(cleantxt))
#term document matrix
tdm <- TermDocumentMatrix(cleantxt,control=list(minWordLength=c(1,Inf)))
word_mat <- as.matrix(tdm)
#inspecting frequency words
findFreqTerms(tdm,lowfreq = 10)
#Bar plot
term_freq <- rowSums(word_mat)
term_freq <- subset(term_freq,term_freq>=7)
library(ggplot2)
barplot(term_freq,las=2,col=rainbow(20))
#if any unwanted word comes use tm_map(corpus,removeWords,c("","",""))
#word cloud
library(wordcloud)
wordFreq <- sort(rowSums(word_mat),decreasing = T)
grayLevel <- gray((wordFreq+10)/(max(wordFreq)+10))
#1st word cloud with gray level
wordcloud(words=names(wordFreq),freq=wordFreq,min.freq=5,random.order = F,color=grayLevel)
#2nd with colors
wordcloud(words=names(wordFreq),freq=wordFreq,min.freq=5,random.order = F,color=brewer.pal(11,"Paired"))
library(wordcloud2)
df <- data.frame(names(wordFreq),wordFreq)
colnames(df) <- c("Word","Frequency")
head(df)
#remove all non utf-8 characters
df$Word <- iconv(df$Word,from="UTF-8",to="UTF-8",sub="")
wordcloud2(df,size=0.8,shape="star")
#sentiments analysis
library(syuzhet)
library(reshape2)
library(dplyr)
library(scales)
library(ggplot2)
library(lubridate)
sentiment_score <- get_nrc_sentiment(as.character(cleantxt))
#my pdf is alone so it takes all the text data in the 1st row
sentiment_score <- sentiment_score[1,]
sentiment_score
#bar plot
barplot(colSums(sentiment_score),las=2,ylab="Score",col=rainbow(10))
|
54af0377583d8447f26ef85758c1a3bbdb98ca9f
|
ec636517d71057cd78774dcfc040be6e0adc29a0
|
/R/02-create_modeling_dataset.R
|
1719d1b8676ea2a5ea744648d42a0dfa5fdaa085
|
[] |
no_license
|
JiaxiangBU/neural_networks_longitudinal
|
3723f0a49d6852804b7d2ecaf33b5c3b1db83d95
|
4e96861e486445c737544807e4b29003a28644e0
|
refs/heads/master
| 2021-03-25T05:04:18.054122
| 2020-03-16T02:59:49
| 2020-03-16T02:59:49
| 247,591,312
| 1
| 0
| null | 2020-03-16T02:19:27
| 2020-03-16T02:19:27
| null |
UTF-8
|
R
| false
| false
| 5,589
|
r
|
02-create_modeling_dataset.R
|
message('Bring in data sources')
bstats <- battingStats() %>%
arrange(playerID, yearID) %>%
filter(stint==1 & yearID >= min_year)
### Filter on plate appearances to focus on batters
## Filling in empty years
pa_padded <- bstats %>%
select(playerID, yearID, PA) %>%
arrange(playerID, yearID) %>%
mutate(date = as.Date(paste0(yearID, '-12-31'))) %>%
arrange(playerID, date) %>%
pad(interval='year', by='date', group='playerID') %>%
mutate(yearID = year(date), PA = replace_na(PA, 0)) %>%
select(-date)
## Get windows that show 3 seasons + 1 follow-up season
pa_window <- pa_padded %>%
group_by(playerID) %>%
rename(PA_current = PA) %>%
mutate(PA_minus1 = lag(PA_current, 1),
PA_minus2 = lag(PA_current, 2),
PA_plus1 = lead(PA_current, 1)
) %>%
select(playerID, yearID, PA_minus2, PA_minus1, PA_current, PA_plus1) %>%
filter(!is.na(PA_minus1) & !is.na(PA_minus2) & !is.na(PA_plus1)) %>%
ungroup
### Apply plate appearances filter
# This table contains the final list of players to include in the analysis.
message('Apply plate appearances filter')
pa_filtered_raw <- pa_window %>%
filter(PA_minus1 >= min_plate_appearances &
PA_minus2 >= min_plate_appearances &
PA_current >= min_plate_appearances &
PA_plus1 >= min_plate_appearances) %>%
apply_data_filters
# Training/testing/validation split on player IDs so as to avoid data leakage between test/validation and training sets.
message('Training/testing/validation split on player IDs')
set.seed(random_seed)
## Vector of all players
players_vec <- unique(pa_filtered_raw$playerID)
nplayers <- length(players_vec)
ntrain <- ceiling(pct_train * nplayers)
## Training set
playerIDs_train <- sample(x=players_vec, size=ntrain, replace=F)
## Testing and validations sets
playerIDs_testvalid <- players_vec[players_vec %in% playerIDs_train == F]
ntest <- ceiling(pct_test * length(playerIDs_testvalid))
playerIDs_valid <- sample(playerIDs_testvalid, size=ntest, replace=F)
playerIDs_test <- playerIDs_testvalid[playerIDs_testvalid %in% playerIDs_valid == F]
## All players accounted for?
all_accounted_for <- length(playerIDs_train) + length(playerIDs_test) + length(playerIDs_valid) == nplayers
cat('All players accounted for?', all_accounted_for, '\n')
# Add train/test/validation split labels to the filtering dataframe
pa_filtered <- pa_filtered_raw %>%
mutate(data_split = case_when(playerID %in% playerIDs_train ~ 'train',
playerID %in% playerIDs_test ~ 'test',
playerID %in% playerIDs_valid ~ 'valid',
TRUE ~ 'other'),
min_year = yearID - 2,
max_year = yearID + 1)
# For each player, determine the years of data that contain the minimum amount of plate appearances
message('Find players with correct number of plate appearances')
player_years <- lapply(players_vec, get_player_years) %>% bind_rows
player_years %>% head
message('Batting Stats table...')
batting <- bstats %>%
rename(Batting_AtBats = AB,
Batting_Runs = R, #RunsCreated=RC, RunsProduced=RP,
Batting_BaseOnBalls = BB,
Batting_Hits = H,
Batting_HomeRuns = HR,
Batting_Games = G,
Batting_Doubles = X2B,
Batting_Triples = X3B,
Batting_RunsBattedIn = RBI,
Batting_StolenBases = SB,
Batting_CaughtStealing = CS,
Batting_Strikeouts = SO,
Batting_IntentionalWalks = IBB,
Batting_HitByPitch = HBP,
Batting_SacrificeHits = SH,
Batting_Sacrifice_Flies = SF,
Batting_GroundedIntoDoublePlays = GIDP,
Batting_PlateAppearances = PA,
Batting_BattingAverage = BA,
Batting_OnBasePct = OBP,
Batting_SlugPct = SlugPct
) %>%
select(playerID, lgID, yearID, teamID,
Batting_AtBats, Batting_Runs, Batting_BaseOnBalls, Batting_Hits, # Batting
Batting_HomeRuns, Batting_Games, Batting_BattingAverage, Batting_PlateAppearances,
Batting_BattingAverage, Batting_OnBasePct, Batting_SlugPct,
Batting_Doubles, Batting_Triples, Batting_RunsBattedIn, Batting_StolenBases,
Batting_CaughtStealing, Batting_Strikeouts, Batting_IntentionalWalks,
Batting_HitByPitch, Batting_SacrificeHits, Batting_Sacrifice_Flies,
Batting_GroundedIntoDoublePlays) %>%
distinct()
baseball_raw <- batting %>% # base table
inner_join(player_years, by=c('playerID', 'yearID')) %>% # filters players of primary interest
arrange(playerID, yearID) %>%
select(playerID, lgID, yearID, teamID, data_split,
Batting_AtBats, Batting_Runs, Batting_BaseOnBalls, Batting_Hits, # Batting
Batting_HomeRuns, Batting_Games, Batting_BattingAverage, Batting_PlateAppearances,
Batting_BattingAverage, Batting_OnBasePct, Batting_SlugPct,
Batting_Doubles, Batting_Triples, Batting_RunsBattedIn, Batting_StolenBases,
Batting_CaughtStealing, Batting_Strikeouts, Batting_IntentionalWalks,
Batting_HitByPitch, Batting_SacrificeHits, Batting_Sacrifice_Flies,
Batting_GroundedIntoDoublePlays
) %>%
apply_data_filters() %>%
#select_at(cols_include) %>%
replace(is.na(.), 0) %>% # replace missing values with zeroes
mutate(idx = as.numeric(as.factor(playerID))) %>%
select_at(c('playerID', 'idx', colnames(.)[colnames(.) %in% c('playerID', 'idx') == F])) %>%
distinct()
#Export to CSV
message('Export to CSV')
baseball_raw %>%write_csv('baseball_dat_modeling.csv')
|
d0e00c65f020828f1d19e38a8d17c5a94f017ba0
|
f9b7b6c992654c72be8cb9087f3a38636cc36c2b
|
/plot_functions.R
|
a476802a6fa3383e4f701505861831d6fdfd9ab7
|
[] |
no_license
|
leosouliotis/AZ_data.challenge
|
5cd20d27f174c9763f9d8e1bdea4a78fdfac02ef
|
4b5898c34dbbb4a45756e2a7c731635698d3025f
|
refs/heads/master
| 2021-05-20T00:28:42.651039
| 2020-04-01T07:57:17
| 2020-04-01T07:57:17
| 252,107,326
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,697
|
r
|
plot_functions.R
|
setwd('~/Downloads')
library('gplots')
library('ggplot2')
country2reactions =read.csv('country2reactions.csv', stringsAsFactors = F)
head(country2reactions[1:5,1:5])
rownames(country2reactions) = country2reactions$X
country2reactions$X = NULL
most_freq_responses = read.csv('max_20_reactions.csv')
most_countries = read.csv('country2counts_max20.csv')
country2reactions = country2reactions/rowSums(country2reactions)
country2reactions = country2reactions[,colnames(country2reactions) %in% most_freq_responses$X]
country2reactions = country2reactions[rownames(country2reactions) %in% most_countries$X,]
which(rowSums(country2reactions) == 0)
crp <- colorRampPalette(c("blue","red","orange","yellow"))(100)
heatmap(as.matrix(country2reactions),dendrogram='none',trace='none', col=crp)
#dev.off()
noreactions2countries = read.csv('no_reactions2country.csv')
kruskal.test(noreactions2countries$no_reactions, noreactions2countries$Country)
#ggplot(noreactions2countries,aes(x=Country))+
# geom_bar(stat='count')
nodrugs2countries = read.csv('no_drugs2country.csv')
kruskal.test(nodrugs2countries$no_drugs, nodrugs2countries$Country)
cor.test(noreactions2countries$no_reactions, nodrugs2countries$no_drug, method = 'spearman')
cor_line = rep(cor(noreactions2countries$no_reactions, nodrugs2countries$no_drug, method = 'spearman'),dim(df_nodrugs_noreactions)[1])
df_nodrugs_noreactions = data.frame(no_drugs = nodrugs2countries$no_drugs, no_reactions = noreactions2countries$no_reactions, spearman_cor = cor_line)
ggplot(df_nodrugs_noreactions,aes(x=no_drugs,y=no_reactions))+
geom_point()+
labs(x='No of drugs', y='No of reactions')
ggsave('~/Documents/no.drugs_no.responses.pdf')
|
a8fc8a14d3a83f1d242d342d75dcb6ada77025e1
|
79b7742a7f5483bd829f6dc3589f6b345024fa49
|
/Learning_structure.R
|
b38ae84a996adc713c059b7f227f4826d12e2749
|
[] |
no_license
|
vungan19/Temporal-Bayesian-network
|
d7aaf01912255d85e74efea114675bbac2ddab8b
|
85c9f8c25a9ccbefa636b8424ab79da4149d995b
|
refs/heads/master
| 2020-04-13T16:24:05.702922
| 2018-12-27T17:38:39
| 2018-12-27T17:38:39
| 163,320,080
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,194
|
r
|
Learning_structure.R
|
library(bnlearn)
#data(coronary)
#bn_df<- data.frame(coronary)
#res <- hc(bn_df)
#res$arcs <- res$arcs[-which((res$arcs[,'from'] == "M..Work" & res$arcs[,'to'] == "Family")),]
newdata <- c()
## Create node
for ( i in 1: length(AWSUM_feature[,1])) {
for ( j in 1:(length(AWSUM_feature[1,])-5)){
add <- AWSUM_feature[i,j:(j+5)]
names(add)<- c("y_t-5","y_t-4","y_t-3","y_t-2","y_t-1","y_t")
newdata<- rbind(newdata,add)
}
}
data <- newdata
samplesize <- 0.60 * nrow(data)
# Random sampling
set.seed(80)
index <- sample( seq_len ( nrow ( data ) ), size = samplesize )
# Create training and test set
datatrain <- data[ index, ]
datatest <- data[ -index, ]
res<- hc(datatrain) # Learning the structure
plot(res) ## Plot the learning structure
add_arc <- data.frame("from"=c( "y_t-5","y_t-5","y_t-4","y_t-4","y_t-3","y_t-2","y_t-2"),"to"= c("y_t-3","y_t-2","y_t-1","y_t","y_t","y_t-1","y_t"))
plot(res)
fittedbn <- bn.fit(res, data = datatrain)
coef_t<- fittedbn[["y_t"]][["coefficients"]]
y_t_train <- coef_t[1] + coef_t[2]*datatest[["y_t-5"]]+coef_t[3]*datatest[["y_t-3"]] +coef_t[4]*datatest[["y_t-1"]]
summary(fittedbn)
plot(datatest[["y_t"]],y_t_train,col="blue",pch=16,y_lab=" Predicted value at next window",xlab="real value")
abline(0,1)
plot(datatest[["y_t"]],type="l")
lines(y_t_train,col="red")
plot(newdata[["y_t"]],type="p")
points(y_t_new,col="red")
##=====================================================================================================================##
## Learning Bayesian Stucture [12/12/2018]
# Call library:
library(bnstruct)
data <- AWSUM_feature
## Create a BNDataset
dataset<- BNDataset(data = data,
discreteness = rep('c',6),
variables = c("y_t-5","y_t-4","y_t-3","y_t-2","y_t-1","y_t"),
node.sizes = c(5,5,5,5,5,5))
net.1 <- learn.network(dataset,initial.network = "random.chain")
net.2 <- learn.network(dataset,initial.network = net.1)
net.3 <- learn.network(dataset,algo="mmhc",initial.network = net.2)
net.1 <- learn.network(dataset,
algo = "sem",
scoring.func = "AIC")
|
7635652532381ead7bbd242f3318841202f19682
|
4e3bd5287f24ddfe9b08f9751710c98615f1cd7a
|
/solution.R
|
6bd3244ce2a9a3e6a2389b8244474f7e3fd4c0c4
|
[] |
no_license
|
nilmadhab/PML_assignment
|
fcf5db8c3bc655250bd38cf9c0c486eef91d8b11
|
26022dffc1a4f189737d3baa5a3babc062656bb7
|
refs/heads/master
| 2021-01-10T06:24:25.949567
| 2015-09-26T04:16:51
| 2015-09-26T04:16:51
| 43,191,514
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,731
|
r
|
solution.R
|
rm(list = ls())
# Read cleaned training and testing data
#set working directory
setwd("/home/nilmadhab/Desktop/data/Practical-Machine-Learning/")
training <- read.table(file = "pml-training2.csv",
header = TRUE, sep = ",", quote = "")
testing <- read.table(file = "pml-testing2.csv",
header = TRUE, sep = ",", quote = "")
# Change the numeric type to integer type to make sure
# the same data type in training data and testing data
training$magnet_dumbbell_z <- as.integer(training$magnet_dumbbell_z)
training$magnet_forearm_y <- as.integer(training$magnet_forearm_y)
training$magnet_forearm_z <- as.integer(training$magnet_forearm_z)
# Change the
levels(testing$new_window) <- levels(training$new_window)
# Install randomForest package
# install.packages("randomForest")
library(randomForest)
# install.packages("caret")
library(caret)
set.seed(111)
# Define cross-validation experiment
fitControl = trainControl( method = "cv", number = 2)
# Perform the cross validation
cv <- train(classe ~ ., data = training, method = "rf",
trControl = fitControl)
cv$bestTune$mtry
library(rattle)
fancyRpartPlot(cv$finalModel)
RandomForest = randomForest(classe ~ ., data = training,
mtry = cv$bestTune$mtry)
PredictForTrain = predict(RandomForest)
table(PredictForTrain, training$classe)
PredictForest = predict(RandomForest, newdata = testing)
PredictForest
pml_write_files = function(x){
n = length(x)
for(i in 1:n){
filename = paste0("problem_id_", i ,".txt")
write.table(x[i], file = filename, quote = FALSE,
row.names = FALSE, col.names = FALSE)
}
}
# Call the function
pml_write_files(PredictForest)
|
ad435a623701b03c71df612ef9dfa07114c5bc96
|
d8db4472082424fd086a85753c95b247c7fd3949
|
/functions/eFormsOrganizeData_byTable_NCCA.r
|
139ae10ebdd8ffe5b3eaffa576a9c08976705950
|
[
"CC0-1.0"
] |
permissive
|
kblocksom/eForms_NARS
|
5dba55cfdb8f4efcdd848d9f7d0b1ba7d2594018
|
15c73cd647bf4e24cf06749348cdaf7928fe8b89
|
refs/heads/master
| 2023-04-09T10:08:24.243145
| 2021-04-14T16:47:36
| 2021-04-14T16:47:36
| 256,043,383
| 0
| 1
|
CC0-1.0
| 2020-11-03T19:48:34
| 2020-04-15T21:43:37
|
R
|
UTF-8
|
R
| false
| false
| 8,290
|
r
|
eFormsOrganizeData_byTable_NCCA.r
|
# eFormsOrganizeData_byTable.r
# Purpose: For each type of data, organize into data frames
# First figure out the type of data by sample type
#
# Created 2/27/2019 by Karen Blocksom
###############################################################
eFormsOrganize_byTable.ncca <- function(rawData){
# Extract visit info
visitinfo <- as.data.frame(rawData[1:7],stringsAsFactors=F)
# Extract sample type from 8th element in each file
sampletype <- names(rawData)[8]
# Create data frame of parsed data to start with, making them all character variables
parsedData <- as.data.frame(rawData[8])
parsedData[,names(parsedData)] <- lapply(parsedData[,names(parsedData)], as.character)
# run parsed data through organizing function, based on sample type
switch(sampletype,
ASSESSMENT = {rr <- organizeAssessment.ncca(parsedData)},
CALIBRATION = {rr <- organizeCalibration.ncca(parsedData)},
VERIFICATION = {rr <- organizeVerification.ncca(parsedData)},
SAMPLES = {rr <- organizeSamples.ncca(parsedData)},
PROFILE = {rr <- organizeProfile.ncca(parsedData)},
ECOFISH = {rr <- organizeEcofish.ncca(parsedData)},
HHFISH = {rr <- organizeHHfish.ncca(parsedData)},
SAMPLE_PROCESS = {rr <- organizeSampProc.ncca(parsedData)}
)
ss <- list(cbind(visitinfo, rr))
# Add new object to list with sample type name
ss[["SAMPLE_TYPE"]] <- sampletype
return(ss)
}
#############################################################################################################
# This begins the section which organizes the parsed data by sample type
organizeVerification.ncca <- function(parsedIn){
# Simply melt these data and clean up parameter names
aa <- parsedIn
aa$SAMPLE_TYPE <- 'VERIF'
varLong <- names(parsedIn)
aa.long <- reshape(aa, idvar=c('SAMPLE_TYPE'), varying = varLong, times = varLong,
v.names = 'RESULT', timevar = 'PARAMETER', direction = 'long')
aa.long$PARAMETER <- with(aa.long, gsub('VERIFICATION\\.', '', PARAMETER))
aa.long$SAMPLE_TYPE <- with(aa.long, ifelse(grepl('MACRO_ALGAE|BOTTOM_TYPE|HABITAT|MACRO_ABUNDANCE|MACROALGAE|MARINE_DEBRIS|MARINE_DEBRIS_TYPE|SAV|SAV_ABUNDANCE', PARAMETER), 'SHAB', 'VERIF'))
aa.long <- subset(aa.long, str_detect(PARAMETER, 'REVIEW')==FALSE)
aa.out <- base::subset(aa.long, select = c('SAMPLE_TYPE','PARAMETER','RESULT'))
return(aa.out)
}
organizeSamples.ncca <- function(parsedIn){
# Simply melt these data by SAMPLE_TYPE and clean up parameter names
aa <- parsedIn
aa$SAMPLE_TYPE <- 'SAMPLES'
varLong <- names(parsedIn)
aa.long <- reshape(aa, idvar = 'SAMPLE_TYPE', varying = varLong, times = varLong,
v.names = 'RESULT', timevar = 'variable', direction = 'long')
aa.long$SAMPLE_TYPE <- with(aa.long, substring(as.character(variable), 9, 12))
aa.long$variable <- with(aa.long, gsub('SAMPLES\\.', '', variable))
aa.long$PARAMETER <- with(aa.long, ifelse(str_detect(variable, '\\_COMMENT')|variable=='BENTHIC_DISTANCE',
variable, substring(as.character(variable),6,nchar(as.character(variable)))))
aa.long <- subset(aa.long, str_detect(variable, 'REVIEW')==FALSE)
aa.out <- base::subset(aa.long, select = c('SAMPLE_TYPE','PARAMETER','RESULT'))
return(aa.out)
}
organizeAssessment.ncca <- function(parsedIn){
aa <- parsedIn
aa$SAMPLE_TYPE <- 'ASSESS'
aa.long <- reshape(aa, idvar = 'SAMPLE_TYPE', varying = names(parsedIn),
times = names(parsedIn), v.names = 'RESULT', timevar = 'PARAMETER',
direction = 'long')
aa.long$PARAMETER <- with(aa.long, gsub("ASSESSMENT\\.", "", PARAMETER))
aa.long <- subset(aa.long, str_detect(PARAMETER, 'REVIEW')==FALSE)
aa.out <- base::subset(aa.long, select = c('SAMPLE_TYPE','PARAMETER','RESULT'))
return(aa.out)
}
organizeProfile.ncca <- function(parsedIn){
# NEED TO FIND PARAMETERS THAT START WITH CHARACTER VS. NUMBER
aa <- subset(parsedIn, select=str_starts(names(parsedIn),'PROFILE\\.[:alpha:]'))
aa$SAMPLE_TYPE <- 'HYDRO'
aa$LINE <- '0'
varLong <- names(aa)[!(names(aa) %in% c('SAMPLE_TYPE','LINE'))]
aa.long <- reshape(aa, idvar = c('SAMPLE_TYPE','LINE'), varying = varLong, times = varLong,
v.names = 'RESULT', timevar = 'PARAMETER', direction = 'long')
aa.long$PARAMETER <- with(aa.long, str_replace(PARAMETER, "PROFILE\\.",""))
aa.long$SAMPLE_TYPE <- with(aa.long, ifelse(str_starts(PARAMETER,'CLEAR_TO_BOTTOM|DISAPPEARS|REAPPEARS|SECCHI'),'SECC', SAMPLE_TYPE))
aa.long <- subset(aa.long, str_detect(PARAMETER, 'REVIEW')==FALSE)
aa.out <- subset(aa.long, select = c('SAMPLE_TYPE','LINE','PARAMETER','RESULT'))
# bb pulls out and formats species by line number and sample type
bb <- subset(parsedIn, select=str_starts(names(parsedIn), 'PROFILE\\.[:digit:]'))
bb$SAMPLE_TYPE <- 'HYDRO'
varLong <- names(bb)[names(bb)!='SAMPLE_TYPE']
bb.long <- reshape(bb, idvar='SAMPLE_TYPE', varying = varLong, times = varLong,
v.names = 'RESULT', timevar = 'variable', direction = 'long')
bb.long$variable <- with(bb.long, gsub('PROFILE\\.', '', variable))
bb.long$LINE <- str_extract(bb.long$variable, '[:digit:]+')
bb.long$PARAMETER <- str_replace(bb.long$variable, '[:digit:]+\\_', '')
bb.long <- subset(bb.long, str_detect(variable, 'REVIEW')==FALSE)
bb.out <- base::subset(bb.long, select=c('SAMPLE_TYPE','LINE','PARAMETER','RESULT'))
cc <- rbind(aa.out, bb.out)
return(cc)
}
organizeCalibration.ncca <- function(parsedIn){
# Simply melt data and clean up parameter names
aa <- parsedIn
aa$SAMPLE_TYPE <- 'CALIB'
varLong <- names(aa)[names(aa)!='SAMPLE_TYPE']
aa.long <- reshape(aa, idvar = 'SAMPLE_TYPE', varying = varLong, times = varLong,
v.names = 'RESULT', timevar = 'PARAMETER', direction = 'long')
aa.long$PARAMETER <- gsub('CALIBRATION\\.', '', aa.long$PARAMETER)
aa.long <- subset(aa.long, str_detect(PARAMETER, 'REVIEW')==FALSE)
aa.out <- base::subset(aa.long, select = c('SAMPLE_TYPE','PARAMETER','RESULT'))
return(aa.out)
}
organizeEcofish.ncca <- function(parsedIn){
aa <- parsedIn
aa$SAMPLE_TYPE <- 'EINF'
varLong <- names(aa)[names(aa)!='SAMPLE_TYPE']
aa.long <- reshape(aa, idvar = 'SAMPLE_TYPE', varying = varLong, times = varLong,
v.names = 'RESULT', timevar = 'PARAMETER', direction = 'long')
aa.long$SAMPLE_TYPE <- substring(aa.long$PARAMETER, 9, 12)
aa.long <- subset(aa.long, str_detect(PARAMETER, 'REVIEW')==FALSE)
aa.long$PARAMETER <- with(aa.long, substring(PARAMETER, 14, nchar(PARAMETER)))
aa.out <- base::subset(aa.long, select = c('SAMPLE_TYPE', 'PARAMETER', 'RESULT'))
return(aa.out)
}
organizeHHfish.ncca <- function(parsedIn){
aa <- parsedIn
aa$SAMPLE_TYPE <- 'HINF'
varLong <- names(aa)[names(aa)!='SAMPLE_TYPE']
aa.long <- reshape(aa, idvar = 'SAMPLE_TYPE', varying = varLong, times = varLong,
v.names = 'RESULT', timevar = 'PARAMETER', direction = 'long')
aa.long$SAMPLE_TYPE <- substring(aa.long$PARAMETER, 8, 11)
aa.long <- subset(aa.long, str_detect(PARAMETER, 'REVIEW')==FALSE)
aa.long$PARAMETER <- with(aa.long, substring(PARAMETER, 13, nchar(PARAMETER)))
aa.out <- base::subset(aa.long, select = c('SAMPLE_TYPE', 'PARAMETER', 'RESULT'))
return(aa.out)
}
organizeSampProc.ncca <- function(parsedIn){
# Simply melt these data by SAMPLE_TYPE and clean up parameter names
aa <- parsedIn
aa$SAMPLE_TYPE <- 'SAMPLES'
varLong <- names(parsedIn)
aa.long <- reshape(aa, idvar = 'SAMPLE_TYPE', varying = varLong, times = varLong,
v.names = 'RESULT', timevar = 'variable', direction = 'long')
aa.long$variable <- with(aa.long, gsub('SAMPLE\\_PROCESS\\.', '', variable))
aa.long$SAMPLE_TYPE <- with(aa.long, substring(as.character(variable), 1, 4))
aa.long <- subset(aa.long, str_detect(variable, 'REVIEW')==FALSE)
aa.long$PARAMETER <- ifelse(aa.long$variable=='BENTHIC_DISTANCE', aa.long$variable,
substring(aa.long$variable, 6, nchar(aa.long$variable)))
aa.out <- base::subset(aa.long, select = c('SAMPLE_TYPE','PARAMETER','RESULT'))
return(aa.out)
}
|
8f3f978c6f403fa1505de5e0b9732e4992eedd95
|
53435fba931963d96958afe6287278f4d575385d
|
/man/divat.Rd
|
d25eb7e05f63d8e632d5efafefbc2d6d6b5d6223
|
[] |
no_license
|
cran/wally
|
370aef3bfc3845b6c45a230ee07fe430377e05ef
|
dc5208dc815b878ca6d91266bc92ed2bb7f8ae4d
|
refs/heads/master
| 2020-12-30T11:16:11.093328
| 2019-10-30T13:40:02
| 2019-10-30T13:40:02
| 91,548,791
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,569
|
rd
|
divat.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/wally-package.R
\docType{data}
\name{divat}
\alias{divat}
\title{divat data}
\format{A subsample consisting of 1300 observations on the following 3 variables.
\describe{ \item{pi}{5-year risk prediction of kidney graft failure.}
\item{status}{0=censored, 1=kidney graft failure}
\item{time}{time to event (i.e., time to kidney graft failure or loss of follow-up)}}}
\description{
Extracted data from a french population based cohort (DIVAT cohort). The dataset includes
followup information on kidney graft failure outcome and predicted 5-year risks based on
based on the subject specific information which includes age, gender,
cardiovascular and diabetes histories, monitoring of the evolution of the kidney function
measured via serum creatinine and relevant characteristics of his or her kidney donor.
Graft failure is defined as either death with functioning kidney graft or return to dialysis.
The prediction model from which the predictions have been computed has been previously fitted
using an independent training sample from the DIVAT data. Details about data and modeling can
be found in Fournier et al. (2016).
}
\examples{
data(divat)
}
\references{
Fournier, M. C., Foucher, Y., Blanche, P., Buron, F., Giral, M., & Dantan, E. (2016).
A joint model for longitudinal and time-to-event data to better assess the specific
role of donor and recipient factors on long-term kidney transplantation outcomes.
European journal of epidemiology, 31(5), 469-479.
}
\keyword{datasets}
|
6faf879e5f029ba81a36e93d6b1c2a0bb0463fa9
|
1689524e7bcaf7c53cc85538d34a673f43ca74f3
|
/class-3/multiMat.R
|
1f4213ea2d5ebdfe9602f322f822111402275c31
|
[
"MIT"
] |
permissive
|
lidkalee/r-class-igib
|
4b5b9b2e5445ee29c75dcf33cc274dc27bbf87a7
|
ead2c091a048af93609d14c8d89118db58fa49d6
|
refs/heads/master
| 2020-08-09T23:19:58.403508
| 2017-10-16T04:45:40
| 2017-10-16T04:45:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 289
|
r
|
multiMat.R
|
multiMat <- function(mat, multi){
nRow <- nrow(mat)
nCol <- ncol(mat)
# empty matrix
newMat <- matrix(data = NA,
ncol = ncol(mat),
nrow = nrow(mat)
)
for(Row in 1:nRow){
for(Col in 1:nCol){
# fill the gap
newMat[Row, Col] <- mat[Row, Col] * multi
}
}
return(newMat)
}
|
927c0fee25a43306ecd1b91b99132d1e1bbebca9
|
b12b3e7f2a6527065feac603e5bf4d1165a0af4c
|
/ui.R
|
cb0677085050bc3b3dedce141d42920a9a09dbe3
|
[] |
no_license
|
areino42/tweetR
|
2b7abc61e840c0839b104949c109ef93ceba835f
|
a8127987b337cf295b95ac720467755d08562be4
|
refs/heads/main
| 2023-03-26T04:15:51.168658
| 2021-03-26T15:40:10
| 2021-03-26T15:40:10
| 351,812,488
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 25,504
|
r
|
ui.R
|
library(shiny)
shinyUI(
navbarPage("tweetR",
theme = shinythemes::shinytheme("cerulean"),
collapsible = TRUE,
fluid = TRUE,
position = "fixed-top",
tabPanel("home", icon = icon("home"),
tags$head(
shinyjs::useShinyjs(),
includeCSS("style.css")
),
fluidRow(
tags$header(
HTML('
<header class="masthead">
<div class="container">
<div class="intro-text">
<span class="name">tweetR</span>
<hr>
<span class="subname">Una aplicación de text mining desarrollada en lenguaje de programación <strong>R</strong>.</span>
</div>
</div>
</header>
<section class="success" id="about">
<div class="container">
<h2 class="text-center">por qué <strong>R</strong>?</h2>
<br>
<br>
<div class="row">
<div class="col-lg-6">
<p>R es un ambiente y lenguaje de programación orientado a objetos con enfoque estadístico, siendo uno de
los <strong>open source</strong> más usados por la comunidad científica.</p>
</div>
<div class="col-lg-6">
<p>Es multidisciplinario, siendo muy popular en los campos de minería de datos, matemáticas financieras,
aprendizaje de máquina, bioinformática y en la <strong>minería de textos</strong>.</p>
</div>
</div>
</div>
</section>
')
)
),
fluidRow(
column(width = 4,
br(),
img(src='cell-phone-1245663_640.jpg', align = "left", width = 400, height = 300)
),
column(width = 8,
br(),
br(),
br(),
HTML('
<blockquote class="blockquote blockquote-reverse">
<p>La minería de textos o "text mining" es el conjunto de herramientas que nos permiten obtener conocimiento automático a partir datos no estructurados, en este caso
aplicado a los tweeters correspondientes a los <strong>8 candidatos presidenciales Chile 2017</strong>.
Esta aplicación nos permite abordar las principales herramientas del análisis de textos de manera interactiva,
con el objetivo de que el usuario puede aprender sobre dichas herramientas o técnicas estadísticas y descubrir
que es lo que la gente está diciendo, cuáles son los temas más frecuentes o de interés en las redes sociales.
La aplicación nos permite la descarga de los tweets correspondientes a los <strong>últimos 7 días
</strong>.</p></blockquote>
')
)
),
fluidRow(
column(width = 8,
br(),
br(),
br(),
HTML('
<blockquote class="blockquote">
<p>Los avances tecnológicos han permitido que el área de text mining progrese rápido en la última década, esta área
comprende modelos estadísticos, aprendizaje de máquina, lingüística computacional, entre otras, necesarias para analizar una gran cantidad de información
no estructurada o de texto abierto, que suele ser muy valiosa para la toma de decisiones, pero que muchas veces es pasada por alto o que simplemente queda
almacenada. Hoy softwares de código abierto u <strong>"open source"</strong> pueden automatizar la extracción y análisis de esta información
en ambientes integrados de desarrollo o "IDE" de manera fácil de comprender, en este caso lenguaje científico <strong>"R"</strong>.
Este tipo de tecnologías nos permiten hoy desarrollar prácticamente cualquier cosa.</p>
</blockquote>
')
),
column(width = 4,
br(),
img(src='code-820275_640.jpg', align = "right", width = 400, height = 300)
)
),
fluidRow(
column(width = 4,
br(),
img(src='thought-2123970_640.jpg', align = "left", width = 400, height = 300)
),
column(width = 8,
br(),
br(),
br(),
HTML('
<blockquote class="blockquote blockquote-reverse">
<p>El área de text mining es relativamente nueva en la ciencia computacional, las aplicaciones hoy son varias,
van desde el análisis de redes sociales, inteligencia de negocio, mercadeo, gerencia de riesgo, hasta la prevención de
cibercrímenes. Técnicas como la conglomeración, análisis de sentimientos, entre otras, son muy útiles
para extraer el conocimiento escondido en datos no estructurados, técnicas le permiten a la organización a
incrementar su competitividad.
</p></blockquote>
<blockquote class="blockquote blockquote-reverse">
<i>"La creatividad es simplemente conectar cosas. Cuando le preguntas a las personas creativas como hicieron algo,
se sienten un poco culpables, porque en realidad no lo hicieron, simplemente vieron algo. Algo que después de
algún tiempo, les pareció obvio."</i>
<footer class="blockquote-footer"><cite title="Source Title">STEVE JOBS</cite></footer>
</blockquote>
')
)
),
fluidRow(
HTML('
<footer class="footer-distributed">
<div class="footer-left">
<h3>developed thanks to</h3>
<p class="footer-links">
<a href="https://www.r-project.org/">The R Project for Statistical Computing</a>
<br>
<a href="https://www.rstudio.com/">RStudio – Open source and enterprise-ready professional software for R</a>
<br>
<a href="https://shiny.rstudio.com/">Shiny from RStudio</a>
<br>
<a href="https://www.shinyapps.io/">shinyapps.io by RStudio</a>
<br>
<a href="https://startbootstrap.com/">Start Bootstrap</a>
<br>
<a href="https://tutorialzine.com/">Tutorialzine!</a>
</p>
<p class="footer-company-name">tweetR ® 2017</p>
</div>
<div class="footer-center">
<div>
<i class="fa fa-map-marker"></i>
<p><span>Santa Isabel 353</span> Santiago, Chile</p>
</div>
<div>
<i class="fa fa-phone"></i>
<p>+56964835724</p>
</div>
<div>
<i class="fa fa-envelope"></i>
<p><a href="mailto:andresr424242@gmail.com">andresr424242@gmail.com</a></p>
</div>
</div>
<div class="footer-right">
<p class="footer-company-about">
<span>about the app</span>
Aplicación web interactiva en "Shiny" para el análisis la red social Twitter, Magister en Estadística 2016, Instituto de Estadística, PUCV.
</p>
<a href="http://www.estadistica.cl/">
<img src="logo.png" width="250" height="200">
</a>
</div>
</footer>
')
)
),#--------------> tabPanel
tabPanel("tweets",icon = icon("twitter"),
fluidRow(
column(width = 3,
HTML('
<h3>tweets</h3>
<p>Selecciona el twitter a analizar y la cantidad de tweets a descargar, recuerda
que la aplicación permite descargar contenido de los <strong>últimos 7 días</strong>.
La distribución de los tweets por día puede variar según la frecuencia con la que se
escriba en el mismo.
</p>
'),
br(),
sidebarPanel(width = 12,
selectInput("target", "twitter a analizar:",
c(
"CAROLINA GOIC @carolinagoic" = "carolinagoic",
"JOSE ANTONIO KAST @joseantoniokast" = "joseantoniokast",
"SEBASTIAN PINERA @sebastianpinera" = "sebastianpinera",
"ALEJANDRO GUILLIER @guillier" = "guillier",
"BEATRIZ SANCHEZ @labeasanchez" = "labeasanchez",
"MARCO ENRIQUEZ OMINAMI @marcoporchile" = "marcoporchile",
"EDUARDO ARTES @eduardo_artes" = "eduardo_artes",
"ALEJANDRO NAVARRO @navarrobrain" = "navarrobrain"
)
),
br(),
sliderInput("n", "tweets:", min = 1000, max = 3000, value = 2000),
br(),
dateRangeInput("daterange", "fecha:",
start = since,
end = until),
br(),
HTML('
<button id="go" type="button" class="btn action-button btn btn-primary btn-lg btn-block"><i class="fa fa-fw fa-twitter"></i> get tweets</button>
')
)
),
column(width = 9,
br(),
DT::dataTableOutput("mytable")
)
) #----------> row
), #----------> tabPanel
tabPanel("top 10 retweets",icon = icon("reply-all"),
fluidRow(
column(width = 3,
HTML('
<h3>top 10 retweets</h3>
<p>De los tweets descargados tenemos aquellos 10 que registran el conteo más alto en el
campo <strong>"retweetCount"</strong>. Dar Re tweet significa copiar el tweet de otra
persona en nuestro perfil para que nuestros seguidores lo lean, se distingue por las
iniciales <strong>"RT"</strong> al comienzo del texto. Twitter almacena el número de
veces que el tweet es re twitteado.</p>
')
),
column(width = 9,
br(),
DT::dataTableOutput("mytable_b")
)
) #----------> row
), #----------> tabPanel
tabPanel("frequent terms",icon = icon("bar-chart"),
fluidRow(
column(width = 3,
HTML('
<h3>frequent terms</h3>
<p>Los términos frecuentes o "frequent terms" son aquellas palabras que más se repiten en los
tweets descargados. Se puede ajustar la frecuencia mínima de los términos a visualizar.</p>
'),
br(),
sidebarPanel(width = 12,
sliderInput("lowfreq", "frecuencia mínima:", min = 50, max = 150, value = 75)
)
),
column(width = 9,
br(),
plotOutput("frequent",width = "100%", height = "800px")
)
) #----------> row
),#----------> tabPanel
tabPanel("wordcloud", icon = icon("cloud"),
fluidRow(
column(width = 3,
HTML('
<h3>wordcloud</h3>
<p>La nube de palabras o "wordcloud" es una representación visual de los términos que componen
el texto de los tweets, pero ordenados en tamaño según su frecuencia, es decir las palabras
de mayor frecuencia poseen un mayor tamaño y viceversa.
</p>
'),
br(),
sidebarPanel(width = 12,
sliderInput("freq", "frecuencia mínima:", min = 1, max = 150, value = 1),
br(),
sliderInput("max","numero máximo de palabras:",min = 10, max = 300, value = 200)
)
),
column(width = 9,
br(),
plotOutput("cloud",width = "100%", height = "800px")
)
) #-----------> row
),#----------> Tabpanel
tabPanel("network of terms",icon = icon("share-alt"),
fluidRow(
column(width = 3,
HTML('
<h3>network of terms</h3>
<p>La red de términos o "network of terms" es la representación visual de las correlaciones
existentes entre las palabras más frecuentes, se ajusta la <strong>frecuencia mínima</strong>
y la <strong>correlación límite</strong> o "corThreshold", las relaciones más fuertes
entre términos resaltan más y viceversa.
</p>
'),
br(),
sidebarPanel(width = 12,
sliderInput("lowfreq2", "frecuencia mínima:", min = 50, max = 150, value = 50),
br(),
sliderInput("corThreshold", "correlación límite:", min = 0.01, max = 1, value = 0.25)
),
br(),
HTML('
<p><small><h3>search correlations</h3></small></p>
<p><small>Busca las correlaciones de los términos presentados en "network of terms".</small></p>
'),
sidebarPanel(width = 12,
textInput("search", 'ingresa el término:', ''),
br(),
HTML('
<button id="go_b" type="button" class="btn action-button btn btn-primary btn-lg btn-block"><i class="fa fa-fw fa-search"></i> search correlations</button>
')
)
),
column(width = 9,
br(),
plotOutput("network",width = "100%", height = "600px"),
br(),
verbatimTextOutput("network_b")
)
) #----------> row
), #----------> tabpanel
tabPanel("cluster",icon = icon("sitemap"),
br(),
tabsetPanel(
tabPanel("hierarchical cluster",
fluidRow(
column(width = 3,
HTML('
<h3>hierarchical cluster</h3>
<p>Un dendrograma es un diagrama de árbol que nos muestra los conglomerados y sus niveles de distancia, con el objetivo de crear grupos similares u homogéneos.
Esta herramienta utiliza el algoritmo de conglomerado jerárquico o <strong>"hierarchical cluster"</strong> que se basa en el cálculo de distancias, la distancia por defecto es la <strong>"euclidiana"</strong>.
Existen varios métodos que se pone a disposición.
</p>
<p><small><h3>remove sparse terms</h3></small></p>
<p><small>La eliminación de términos dispersos o "remove sparse terms" depura la matriz para el análisis
de aquellas palabras o términos con poca frecuencia, es decir palabras que son dispersas. Dispersión o <strong>"sparse"</strong>
es un valor numérico para la espacialidad o dispersión máxima permitida, a mayor nivel de espacialidad,
mayor el número de palabras que conserva la matriz y viceversa.
Por defecto se fija al 0.99, permitiendo disminuir hasta un 0.95.
</small></p>
'),
br(),
sidebarPanel(width = 12,
sliderInput("h_sparse","removeSparseTerms:", value = 0.99, min = 0.95, max = 0.99, step = 0.01),
br(),
selectInput("h_distance", "distancia a usar:",
c(
"euclidean" = "euclidean",
"maximum" = "maximum",
"manhattan" = "manhattan",
"canberra" = "canberra",
"binary" = "binary",
"minkowski" = "minkowski"
)
),
br(),
selectInput("method", "método de aglomeración a usar:",
c(
"ward.D" = "ward.D",
"ward.D2" = "ward.D2",
"single" = "single",
"complete" = "complete",
"average" = "average",
"mcquitty" = "mcquitty",
"median" = "median",
"centroid" = "centroid"
)
)
)
),
column(width = 9,
br(),
plotOutput("hcluster",width = "100%", height = "600px")
)
) #----------------> Row
),#---------------> Tab
tabPanel("kmeans cluster",
fluidRow(
column(width = 3,
HTML('
<h3>kmeans cluster</h3>
<p>K-medias o "kmeans" es un método para agrupar <strong>n</strong> observaciones en <strong>k</strong> grupos,
en donde cada grupo posee observaciones con valores medios cercanos o similares.
Selecciona el número de grupos a formar.
</p>
<p><small><h3>remove sparse terms</h3></small></p>
<p><small>La eliminación de términos dispersos o "remove sparse terms" depura la matriz para el análisis
de aquellas palabras o términos con poca frecuencia, es decir palabras que son dispersas. Dispersión o <strong>"sparse"</strong>
es un valor numérico para la espacialidad o dispersión máxima permitida, a mayor nivel de espacialidad,
mayor el número de palabras que conserva la matriz y viceversa.
Por defecto se fija al 0.99, permitiendo disminuir hasta un 0.95.
</small></p>
'),
br(),
sidebarPanel(width = 12,
sliderInput("k_sparse","removeSparseTerms:", value = 0.99, min = 0.95, max = 0.99, step = 0.01),
br(),
sliderInput("k", "número de clúster:", min = 1, max = 10, value = 4, step = 1)
)
),
column(width = 9,
fluidRow(
column(width = 6,
br(),
plotOutput("kcluster_c",width = "100%", height = "400px")
),
column(width = 6,
br(),
plotOutput("kcluster_d",width = "100%", height = "400px")
)
),
verbatimTextOutput("kcluster_b")
)
)#-------------> Row
)#-------------> Tab
) #----------> row
), #----------> tabpanel
tabPanel("topic modeling",icon = icon("area-chart"),
fluidRow(
column(width = 3,
HTML('
<h3>topic modeling</h3>
<p>El modelador de tópicos o "topic modeling" es una manera sencilla de analizar grandes
volúmenes de texto. Un <strong>tópico</strong> consiste en una conglomeración de palabras
que frecuentemente aparecen juntas. Se usa para descubrir estructuras semánticas en un vector
de texto. Se puede modelar ajustando el número de tópicos y los términos por tópico.
</p>
<p><small><h3>Latent Dirichlet Allocation (LDA)</h3></small></p>
<p><small>En el procesamiento de lenguaje natural, LDA es un modelo estadístico generativo,
en el cual cada documento se ve como una mezcla de temas, el modelo propone que cada palabra
en el documento es explicado o atribuido a un tema o tópico en particular.
En el modelo generativo los documentos son mezclas aleatorias de temas <strong>latentes</strong>,
donde cada tema tienen su distribución sobre las palabras que lo componen.
</small></p>
'),
br(),
sidebarPanel(width = 12,
sliderInput("topics", "tópicos:", 4, min = 2, max = 8, step = 1),
br(),
sliderInput("term", "término por tópico:", 4, min = 2, max = 6, step = 1)
)
),
column(width = 9,
br(),
plotlyOutput("topics_a",width = "100%", height = "500px"),
br(),
verbatimTextOutput("topics_b")
)
) #----------> row
), #----------> tabpanel
tabPanel("devices",icon = icon("feed"),
fluidRow(
column(width = 3,
HTML('
<h3>devices</h3>
<p>Twitter guarda el canal por el que fue enviado el tweet, que son los dispositivos o "devices".
La gráfica nos indica que dispositivos son los más usados.
A continuación te presentamos las definiciones de los canales menos comunes:</p>
<p><small><h3>twitter web client</h3></small></p>
<p><small>Es una herramienta web de gestión para clientes de Twitter,
con funciones de automatización, seguimiento, entre otras.</small></p>
<p><small><h3>tweetdeck</h3></small></p>
<p><small>Es una herramienta para gestionar y controlar notificaciones, mensajes y
la actividad en general de una o varias cuentas de Twitter.</small></p>
<p><small><h3>twitter lite</h3></small></p>
<p><small>Es una versión reducida de la aplicación de Twitter, que ocupa menos almacenamiento y
gasta menos datos.</small></p>
<p><small><h3>hootsuite</h3></small></p>
<p><small>Es una plataforma diseñada para gestionar redes sociales, usada por personas
u organizaciones. </small></p>
')
),
column(width = 9,
br(),
plotlyOutput("devices",width = "100%", height = "500px")
)
) #----------> row
) #----------> tabpanel
)# ---------------> nav bar pages
)#----------> ui
|
50d2dc1da5408a823200a87824f0faf0dce5ba4b
|
3fb3e85afa180f302aebee631e532f1ad6222120
|
/Mapping_3D_gb_data.R
|
dfcc66c8e6af7624ce7eb69c738fa4245ae38ad6
|
[
"CC0-1.0"
] |
permissive
|
RonaldVisser/Roads-Subsurface-in-3-Dimensions-RSI3D
|
039d8bae58d666dbf37a7957ba4fefc607150699
|
3dff3d825175174a0cf3de3538e3b3384037328c
|
refs/heads/main
| 2023-04-11T12:21:01.652501
| 2023-01-20T14:41:15
| 2023-01-20T14:41:15
| 581,506,362
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,548
|
r
|
Mapping_3D_gb_data.R
|
# input vector layer should be polygon (not multipolygon!)
source("Connect_db.R") # also loads library(RPostgres)
source("ColumnExists.R") # load function to check if column exists
SQL_3D_view <- "create or replace view \"griffiersveld_3d\" as
select * from \"3dbag_v210908_lod22_3d\"
where ST_Within(\"3dbag_v210908_lod22_3d\".geom, (select geom from onderzoeksgebied));"
dbExecute(con,SQL_3D_view)
SQL_minimal_z <- "Select ST_ZMin(geom) from \"griffiersveld_3d\" "
z_values <- dbGetQuery(con, SQL_minimal_z)
mean_z <- round(mean(z_values$st_zmin),2)
#wegvlak <- dbGetQuery(con, "select * from gb_wegvakonderdeel")
# simplify road-layer to prevent errors in 3D geometry and speed up processes, 0.005 m resolution
dbExecute(con, "Update gb_wegvakonderdeel set geom = ST_SimplifyPreserveTopology(geom,0.005)")
# aanpassen tabel: diktes verharding: Sier/BSS = 8.8, betontegels = 7.3 cm
# add column for dikte_verharding
if (ColumnExists(con, "gb_wegvakonderdeel", "dikte_verharding") == FALSE) {
dbExecute(con, "Alter table gb_wegvakonderdeel add column dikte_verharding numeric")
}
# update verharding
dbExecute(con, "UPDATE gb_wegvakonderdeel set dikte_verharding = 0.073 where verhardi00 like 'Tegels%';")
dbExecute(con, "UPDATE gb_wegvakonderdeel set dikte_verharding = 0.088 where verhardi00 like 'BSS%';")
dbExecute(con, "UPDATE gb_wegvakonderdeel set dikte_verharding = 0.088 where verhardi00 like 'Sier%';")
# dikte straatlaag op basis punten binnen polygoon, gemiddelde dikte
if (ColumnExists(con, "gb_wegvakonderdeel", "dikte_straatlaag") == FALSE) {
dbExecute(con, "Alter table gb_wegvakonderdeel add column dikte_straatlaag numeric")
}
dbExecute(con, "UPDATE gb_wegvakonderdeel set dikte_straatlaag = avg_dikte FROM
(select gb_wegvakonderdeel.gid, round(avg(straatlaag.dikte::numeric)/100,3) as avg_dikte
from straatlaag,gb_wegvakonderdeel
where st_within(straatlaag.geom, gb_wegvakonderdeel.geom)
group by gb_wegvakonderdeel.gid) as straatlaagdikte
WHERE straatlaagdikte.gid = gb_wegvakonderdeel.gid")
# create layer (first delete if exists)
if (dbExistsTable(con,"gb_wegvak_3d" )) {
dbExecute(con, "drop table \"gb_wegvak_3d\"")
}
# dikte bestrating
SQL_bestrating <- paste0("create table gb_wegvak_3d as select gid, verharding, verhardi00, std_verhar, 'bestrating' as laag3D, ",
"st_translate(ST_Extrude(geom,0,0,dikte_verharding),0,0,", mean_z,
"-dikte_verharding) as geom from gb_wegvakonderdeel where dikte_verharding is not null;")
# Code for multipolygon input
#SQL_bestrating <- paste0("create table gb_wegvak_3d as select gid, verharding, verhardi00, std_verhar, 'bestrating' as laag3D,
#ST_CollectionExtract(st_translate(ST_Extrude(geom,0,0,dikte_verharding),0,0,", mean_z,
# "-dikte_verharding)) as geom from gb_wegvakonderdeel where dikte_verharding is not null;")
dbExecute(con,SQL_bestrating)
dbExecute(con, "ALTER TABLE gb_wegvak_3d RENAME COLUMN gid TO gid_2d")
dbExecute(con, "ALTER TABLE gb_wegvak_3d add column gid bigserial")
# dbExecute(con, "UPDATE gb_wegvak_3d set gid = row_number()")
# st_volume is slow, store volumes in table:
if (ColumnExists(con, "gb_wegvak_3d", "volume_m3") == FALSE) {
dbExecute(con, "Alter table gb_wegvak_3d add column volume_m3 numeric")
}
if (ColumnExists(con, "gb_wegvak_3d", "time_vol_calc") == FALSE) {
dbExecute(con, "Alter table gb_wegvak_3d add column time_vol_calc numeric")
}
# loop to store volumes in DB, because slow process and can be stopped per iteration (record/volume) now
gid_n <- dbGetQuery(con, "select max(gid) from gb_wegvak_3D")
for (i in 0:as.integer(gid_n$max)){
start_time <- Sys.time()
dbExecute(con, paste0("UPDATE gb_wegvak_3d set volume_m3 = st_volume(geom) where gid = ", i, " and volume_m3 is null;"))
dbExecute(con, paste0("UPDATE gb_wegvak_3d set time_vol_calc = ", as.numeric(difftime(Sys.time(), start_time, units="secs")), "where gid = ", i, "and time_vol_calc is null;"))
cat('Processing record', i+1, 'of', as.integer(gid_n$max)+1,'\n')
}
#dbExecute(con, "UPDATE gb_wegvak_3d set volume_m3 = st_volume(geom)")
# dikte straatlaag invoegen
SQL_straatlaag <- paste0("INSERT INTO gb_wegvak_3d select gid, verharding, verhardi00, std_verhar, 'straatlaag' as laag3D,
ST_CollectionExtract(st_translate(ST_Extrude(geom,0,0,dikte_straatlaag),0,0,", mean_z,
"-dikte_verharding-dikte_straatlaag)) as geom from gb_wegvakonderdeel where dikte_straatlaag is not null;")
dbExecute(con,SQL_straatlaag)
dbDisconnect(con)
|
18ee3da3fb98a41125917eee8e4f244ce478407c
|
d56ed82f814dd7a299ee6caf8a52ac381bc8bd93
|
/R/packages/topocatch/R/topocatch.r
|
fee71231405506c3433f3a38bd607f2c137f2b85
|
[
"MIT"
] |
permissive
|
echse/echse_tools
|
db69845e5e9d26f4a8f68a3ae7a9622b28fbce88
|
4e236a426d261f3750a2f31bb7fdab61335ed676
|
refs/heads/master
| 2021-06-03T10:05:55.907513
| 2017-11-08T13:28:41
| 2017-11-08T13:28:41
| 39,069,970
| 1
| 2
| null | 2015-11-09T12:04:53
| 2015-07-14T11:03:07
|
R
|
UTF-8
|
R
| false
| false
| 23,809
|
r
|
topocatch.r
|
#' Pre-processing of geo data for hydrological catchment modeling
#'
#' Type \code{help(package="topocatch")} to inspect the package description.
#'
#' @name topocatch-package
#' @aliases topocatch
#' @docType package
{}
#' @import shapefiles
library("shapefiles")
#' @import foreign
library("foreign")
#' @import sp
library("sp")
#' @import maptools
library("maptools")
################################################################################
#' Filling of sinks in a digital elevation model (DEM)
#'
#' See the workhorse function \code{\link{sinkfill}} for details.
#'
#' @param fileIn Name/path of input file (ASCII grid).
#' @param fileOut Name/path of output file (ASCII grid).
#' @inheritParams sinkfill
#' @inheritParams geogrid.readAscii
#' @inheritParams geogrid.writeAscii
#'
#' @return \code{NULL}
#'
#' @author David Kneis \email{david.kneis@@uni-potsdam.de}
#'
#' @export
dem.fill= function(fileIn, fileOut, ndigits, replace=FALSE, silent=TRUE
) {
# Check args
# Bool
checkArg(arg=replace, len=1, type="logical")
checkArg(arg=silent, len=1, type="logical")
# Files
checkFileIn(fileIn)
checkFileOut(fileOut, replace=replace)
# Numbers
checkArg(arg=ndigits, len=1, type="integer")
# Process
if (!silent) print("Reading DEM grid...")
dem= geogrid.readAscii(fileIn)
if (!silent) print("Filling sinks...")
dem2= sinkfill(grid=dem, ndigits=ndigits, silent=silent)
if (!silent) print("Writing sink-filled DEM grid...")
geogrid.writeAscii(grid=dem2, file=fileOut, replace=replace)
return(invisible(NULL))
}
################################################################################
#' Analyze a digital elevation model (DEM)
#'
#' See the workhorse functions \code{\link{flowdir}} and
#' \code{\link{concTimeIndex}} for details.
#'
#' @param fileDEM Name/path of the INPUT file containing the \emph{sink-filled}
#' DEM (ASCII grid).
#' @param fileDIR Name/path of the OUTPUT file containing flow direction codes
#' (ASCII grid).
#' @param fileACC Name/path of the OUTPUT file containing flow accumulation data
#' (ASCII grid).
#' @param fileCTI Name/path of the OUTPUT file containing values of the
#' concentation time index (ASCII grid).
#' @param fileSHP Name/path of the OUTPUT file containing the generated river
#' net (shape file format).
#' @param minlength_reach Minimum length of a river section to be classified as
#' a reach. Shorter sections are considered as so-called 'mini-reaches'.
#' @param classname_reach Class name for reach objects.
#' @param classname_minireach Class name for mini-reach objects (see the
#' \code{minlength_reach} argument).
#' @param id_field Name of the ID field in the output shape file.
#' @param class_field Name of the class field in the output shape file.
#' @inheritParams flowdir
#' @inheritParams concTimeIndex
#' @inheritParams flowPaths
#' @inheritParams geogrid.readAscii
#' @inheritParams geogrid.writeAscii
#'
#' @return ID of the system's outlet, i.e. the most downstream reach (integer).
#'
#' @author David Kneis \email{david.kneis@@uni-potsdam.de}
#'
#' @export
dem.analyze= function(fileDEM, fileDIR, fileACC, fileCTI, fileSHP,
crit_source_area,
x_inBasin=NULL, y_inBasin=NULL, id_field="id", class_field="class",
minlength_reach=100, classname_reach="rch", classname_minireach="minirch",
dz_min=0.1, replace=FALSE, silent=TRUE
) {
# Check args
# Bool
checkArg(arg=replace, len=1, type="logical")
checkArg(arg=silent, len=1, type="logical")
# Files
checkFileIn(fileDEM)
checkFileOut(fileDIR, replace=replace)
checkFileOut(fileACC, replace=replace)
checkFileOut(fileCTI, replace=replace)
checkFileOut(fileSHP, replace=replace)
# Names
checkArg(arg=id_field, len=1, type="character")
checkArg(arg=class_field, len=1, type="character")
checkArg(arg=classname_reach, len=1, type="character")
checkArg(arg=classname_minireach, len=1, type="character")
# Numbers
checkArg(arg=crit_source_area, len=1, type="numeric")
checkArg(arg=minlength_reach, len=1, type="numeric")
checkArg(arg=dz_min, len=1, type="numeric")
# Process
if (!silent) print("Reading DEM grid...")
dem= geogrid.readAscii(fileDEM)
if (!silent) print("Computing flow direction codes...")
fdir= flowdir(grid=dem, silent=silent)
if (!silent) print("Writing grid of flow direction codes...")
geogrid.writeAscii(grid=fdir, file=fileDIR, replace=replace)
if (!silent) print("Computing flow accumulation...")
facc= flowacc(grid=fdir, silent=silent)
if (!silent) print("Writing grid of flow accumulation...")
geogrid.writeAscii(grid=facc, file=fileACC, replace=replace)
if (!silent) print("Computing concentration time indices...")
cti= concTimeIndex(grid_dem=dem, grid_flowdir=fdir, grid_flowacc=facc,
crit_source_area=crit_source_area, dz_min=dz_min, silent=silent)
if (!silent) print("Writing grid of concentration time indices...")
cti$matrix= round(cti$matrix,1)
geogrid.writeAscii(grid=cti, file=fileCTI, replace=replace)
rm(cti) # Clean up
if (!silent) print("Computing flow paths...")
tmp= flowPaths(grid_flowdir=fdir, grid_flowacc=facc, crit_source_area=crit_source_area,
x_inBasin=x_inBasin, y_inBasin=y_inBasin, silent=silent)
id_outlet= tmp$id_outlet # Set outlet ID
rm(fdir) # Clean up
rm(facc) # Clean up
if (!silent) print("Assembling attribute table...")
tmp= tmp$shpTable # Only keep the table in tmp
tmp$xy= paste(tmp$x, tmp$y, sep="_")
len= tapply(X=tmp$xy, INDEX=tmp$id, FUN=lineLength, sepchar="_")
tmp$xy= NULL
attr= data.frame(id=as.integer(names(len)), len=as.numeric(len), class=NA)
attr$class[attr$len >= minlength_reach]= classname_reach
attr$class[attr$len < minlength_reach]= classname_minireach
attr$len= NULL
names(attr)[which(names(attr) == "id")]= id_field
names(attr)[which(names(attr) == "class")]= class_field
if (!silent) print(paste("Identified ",sum(attr[,class_field]==classname_reach),
" object(s) of class '",classname_reach,"'",sep=""))
if (!silent) print(paste("Identified ",sum(attr[,class_field]==classname_minireach),
" object(s) of class '",classname_minireach,"'",sep=""))
if (!silent) print("Assembling shape file...")
names(tmp)[which(names(tmp)== "id")]= id_field
s= convert.to.shapefile(shpTable=tmp, attTable=attr, field=id_field, type=3)
if (nchar(fileSHP) > 4) {
if (substr(fileSHP,nchar(fileSHP)-3,nchar(fileSHP)) == ".shp")
fileSHP= substr(fileSHP,1,nchar(fileSHP)-4)
}
if (file.exists(paste(fileSHP,".shp",sep="")) && (!replace))
stop(paste("Shape file '",paste(fileSHP,".shp",sep=""),"' already exists.",sep=""))
if (!silent) print("Writing shape file...")
write.shapefile(shapefile=s, out.name=fileSHP, arcgis=TRUE)
# Return outlet ID
return(id_outlet)
}
################################################################################
#' Derive input for hydrological catchment modeling from pre-processed geo data
#'
#' The function identifies the relevant objects for object-based hydrological
#' catchment modeling from pre-processed geo data sets. It creates several output
#' files which can directly serve as an input for hydrological models, namely those
#' build with the ECHSE simulation environment.
#'
#' @param fileSHP Name/path of an INPUT shape file with line features representing
#' drainage lines. This file is either an output of \code{\link{dem.analyze}},
#' a manually edited version of such output or an external file (possibly
#' created by manual digitizing). If \code{updateSHP} is \code{TRUE}, some new
#' fields will be appended to the shape file's attribute table (making it an
#' input \emph{and} output).
#' @param fileDEM Name/path of the INPUT file containing the \emph{sink-filled}
#' DEM (ASCII grid). It is used here to estimate the bed slope of river reaches.
#' If the shape file supplied as \code{fileSHP} was created by a call to
#' \code{\link{dem.analyze}}, the same DEM as in this call should be used.
#' @param fileDIR Name/path of the INPUT file containing flow direction codes
#' computed by \code{\link{dem.analyze}} (ASCII grid).
#' @param fileCAT Name/path of the OUTPUT grid file showing shape, position and
#' extent of the created catchments (ASCII grid). Each catchment is identified
#' by an ID number. This integer code is derived from the IDs of the
#' corresponding features in the input shape file (\code{fileSHP}).
#' @param fileAttrCAT Name/path of a tabular OUTPUT file listing basic attributes
#' of the generated catchments such as the areal extent (in field 'area') and the
#' positions of the center of gravity ('x' and 'y' fields). The unit of the area
#' is square of the basic length unit used in the input files. See also the
#' \code{findAffectedGages} argument.
#' @param fileAttrRCH Name/path of a tabular OUTPUT file listing basic attributes
#' of reach objects (incl. 'mini-reaches'). The attributes include the reach
#' length (field 'length'), the elevation of the two reach ends (fields
#' 'elev_min' and 'elev_max'), a rough estimate of the bed slope derived from
#' the former information ('slope' field) as well as the total area of the
#' reach's upstream catchment (in field 'upstreamArea'). See also the
#' \code{findAffectedGages} argument.
#' @param fileObjDecl Name/path of a tabular OUTPUT file to be used as an input
#' by ECHSE-based hydrological models. This file represents the so-called
#' 'object declaration table' holding IDs and class info for all objects.
#' @param fileObjLink Name/path of a tabular OUTPUT file to be used as an input
#' by ECHSE-based hydrological models. This file represents the so-called
#' 'object linkage table'. It provides info on object interactions.
#' @param id_outlet ID of the line feature representing the system's outlet
#' (ID of the most downstream reach). The value must exist in the ID field of
#' the shape file's attribute table specified by \code{id_field}. If the shape
#' file supplied as \code{fileSHP} was created by a call to \code{\link{dem.analyze}},
#' without subsequent modification, the return value of this function is an
#' appropriate input.
#' @param id_field Name of the field in the shape file's attribute table
#' containing feature IDs.
#' @param class_field Name of the field in the shape file's attribute table
#' holding information on the the features' classes. This field is used, for
#' example, to distinguish between reaches, pipes, reservoirs, etc. See the
#' \code{classes_with_catchment} argument for further details.
#' @param classname_reach Class name used for reach objects. If the shape
#' file supplied as \code{fileSHP} was created by a call to
#' \code{\link{dem.analyze}}, the value should be consistent with the one used
#' in that call.
#' @param classname_minireach Class name used for very short reach objects with
#' negligible travel time. If the shape file supplied as \code{fileSHP} was
#' created by a call to \code{\link{dem.analyze}}, the value should be
#' consistent with the one used in that call.
#' @param classname_node Class name to be assigned to node objects (junctions).
#' @param classname_catch Class name to be assigned to catchment objects.
#' @param classname_gage Class name to be assigned to gage objects, if existent.
#' @param classes_with_catchment A vector of class names (strings). Catchments are
#' generated only for features belonging to those classes. These class names are
#' expected to exist in the attribute table's field specified by \code{class_field}.
#' Typically, catchments are to be generated for objects of the reach class, at
#' least. Lake and reservoir objects, usually have a catchment on its own as well.
#' @param nbuffer An integer value to control the thickness of the rasterized
#' lines during vector-to-raster conversion. The converted lines will be
#' \eqn{nbuffer * 2 + 1} cells wide. Thus, the default value of 1 results in
#' rasterized lines being 3 cells wide.
#' @param coord_tol A small distance value (same spatial unit as used in the
#' shape file) to account for precision problems. The positions of two lines'
#' end points are assumed to be identical if their distance is <= the value of
#' \code{coord_tol}. The default is 1.e-03.
#' Shape files created by a call to \code{\link{dem.analyze}} are always
#' \emph{exact} in this respect and the small default value is appropriate.
#' If the shape file was created manually in a GIS without the use of a snapping
#' mechanism (which is \emph{not} recommended), a larger value might be required.
#' The value of \code{coord_tol} is also used when gage objects are specified by
#' position via the \code{gageLocations} argument.
#' @param prefix_node String used as a prefix when generating the IDs of node
#' objects based on the features ID of the respective downstream objects.
#' @param prefix_catch String used as a prefix when generating the IDs of
#' catchment objects based on the IDs of the corresponding features from the
#' shape file.
#' @param min_slope Minimum slope of river bed (dimensionless). This value is
#' used as a substitute for zero-slope values in the reach attributes table.
#' Such zero-values typically exist in nearly flat areas.
#' @param updateSHP If \code{TRUE}, fields will be appended to the shape file's
#' attribute table. Those fields contain information for reach-like objects
#' such as the length and the total upstream area. The value of -9999 is used
#' in these fields for non-reach-like objects and those objects which were
#' dropped from the system because of missing inflow from upstream.
#' @param namesIO A data frame with 2 columns named 'target' and 'source'. Each
#' record represents the names of a pair of corresponding input-output variables. A typical
#' example would be a data frame with just a single record and the strings
#' "inflow" and "outflow" in the 'target' and 'source' column, respectively.
#' Additional records in the data frame declare additional I/O variables whose
#' values are exchanged between interacting objects.
#' @param gageLocations A data frame with (at least) 3 columns named 'id',
#' 'x' and 'y'. Each record defines the position of a gage. By default, this
#' data frame is empty. The positions given by x and y \emph{must} coindide
#' with the coordinates of the start or end point of a line feature in the
#' shape file. Thus, a gage can only be located at either end of a reach but not
#' somewhere in the mid. Note that the coordinates must match to the precision
#' defined by \code{coord_tol}.
#' @param findAffectedGages If \code{TRUE}, the function identifies the gage
#' objects being affected by each particular object. For each gage, an additional
#' column is appended to the output files \code{fileObjDecl}, \code{fileAttrCAT},
#' and \code{fileAttrRCH}. Column names are taken from the 'id'
#' field of \code{gageLocations}. The values in those columns are 1 or 0, where
#' 1 means that the gage (column) is affected by the simulated object (row) and
#' 0 indicates the opposite (no interaction). The default for \code{findAffectedGages}
#' is \code{FALSE} because the current algorithm may be slow for very large systems.
#' If \code{gageLocations} is empty this argument setting has no effect.
#' @param replace Should existing output files be silently replaced?
#' @param silent Logical value to turn diagnostic messages on/off.
#'
#' @return The function returns \code{NULL}. All computed information is written
#' to the respective output files.
#'
#' @note Not all of the features in the shape file are retained as reach-like
#' objects. Those features that do not have a catchment upstream are dropped
#' automatically during creation of the various output tables. This is true in
#' particular for head-reaches.
#'
#' If gage objects are to be considered, an iterative approach may be convienent.
#' In the 1st step, this function is called without any gage specifications (default for
#' \code{gageLocations}). Suitable gage positions can then be identified in the GIS
#' based on the updated shape file if \code{updateSHP} was set to \code{TRUE}
#' (coordinates are available as attributes and can be queried). In the 2nd step,
#' the function is called with gage positions supplied in \code{gageLocations}.
#'
#' @author David Kneis \email{david.kneis@@uni-potsdam.de}
#'
#' @export
hydroModelData= function(
fileSHP,
fileDEM,
fileDIR,
fileCAT,
fileAttrCAT,
fileAttrRCH,
fileObjDecl,
fileObjLink,
id_outlet,
id_field="id",
class_field="class",
classname_reach="rch",
classname_minireach="minirch",
classname_node="node",
classname_catch="cat",
classname_gage="gage",
classes_with_catchment= c(classname_reach),
nbuffer=1,
coord_tol=1.e-03,
prefix_node="node_",
prefix_catch="cat_",
min_slope= 0.0001,
updateSHP= FALSE,
namesIO= data.frame(target=c("qi"),source=c("qx")),
gageLocations= data.frame(id=c(),x=c(),y=c()),
findAffectedGages= FALSE,
replace=FALSE,
silent=TRUE
){
# Check args
checkFileIn(fileSHP)
checkFileIn(fileDEM)
checkFileIn(fileDIR)
checkFileOut(fileCAT, replace=replace)
checkFileOut(fileAttrCAT, replace=replace)
checkFileOut(fileAttrRCH, replace=replace)
checkFileOut(fileObjDecl, replace=replace)
checkFileOut(fileObjLink, replace=replace)
checkArg(arg=id_outlet, len=1, type="integer")
checkArg(arg=id_field, len=1, type="character")
checkArg(arg=class_field, len=1, type="character")
checkArg(arg=classname_reach, len=1, type="character")
checkArg(arg=classname_minireach, len=1, type="character")
checkArg(arg=classname_node, len=1, type="character")
checkArg(arg=classname_catch, len=1, type="character")
checkArg(arg=classname_gage, len=1, type="character")
checkArg(arg=classes_with_catchment, len=NULL, type="character")
checkArg(arg=nbuffer, len=1, type="integer")
checkArg(arg=coord_tol, len=1, type="numeric")
checkArg(arg=prefix_node, len=1, type="character")
checkArg(arg=prefix_catch, len=1, type="character")
checkArg(arg=min_slope, len=1, type="numeric")
checkArg(arg=updateSHP, len=1, type="logical")
checkArg(arg=namesIO, len=NULL, type="data.frame")
checkArg(arg=gageLocations, len=NULL, type="data.frame")
checkArg(arg=findAffectedGages, len=1, type="logical")
checkArg(arg=replace, len=1, type="logical")
checkArg(arg=silent, len=1, type="logical")
# Create catchment grid
if (!silent) print("Computing catchments...")
catchments(fileDIR=fileDIR, fileSHP=fileSHP, fileCAT=fileCAT,
id_field=id_field, class_field=class_field,
classes_with_catchment= classes_with_catchment,
nbuffer=nbuffer, replace=replace, silent=silent
)
# Create data frame with basic catchment attributes
if (!silent) print("Computing catchment attributes...")
tab_catchAttribs= catchAttribs(
fileCAT=fileCAT, prefix=prefix_catch, silent=silent
)
# Analyze drainage network
if (!silent) print("Analyzing drainage network...")
info= hydroLinkage(shapefile=fileSHP, id_field=id_field, class_field=class_field,
coord_tol=coord_tol, id_outlet=id_outlet,
classes_with_catchment=classes_with_catchment,
classname_reach=classname_reach, classname_minireach=classname_minireach,
classname_node=classname_node, classname_catch=classname_catch, classname_gage=classname_gage,
prefix_node=prefix_node, prefix_catch=prefix_catch,
tab_var=namesIO, tab_gages=gageLocations, silent=silent
)
# Create data frame with reach attributes
if (!silent) print("Computing reach attributes...")
tab_reachAttribs= reachAttribs(
tab_objDecl=info$objDecl,
tab_downObj=info$downObj,
tab_catchAttribs=tab_catchAttribs,
fileDEM=fileDEM, fileSHP=fileSHP, id_field=id_field, class_field=class_field,
classname_reach=classname_reach, classname_minireach=classname_minireach,
updateSHP=updateSHP, min_slope=min_slope, silent=silent
)
if (findAffectedGages) {
colname_affectedGagesCode= "code_affectedGages"
if (!silent) print("Identifying affected gages...")
# Identify the gages affected by each object
if (nrow(gageLocations) > 0) {
# Force string conversion
gageLocations$id= as.character(gageLocations$id)
# Check for name conflicts
reserved= unique(c(id_field,class_field,
names(info$objDecl), names(tab_catchAttribs), names(tab_reachAttribs)))
if (any(gageLocations$id %in% reserved))
stop(paste("Name conflict. A gage name must not be one of '",
paste(reserved,collapse="', '"),"'.",sep=""))
if (colname_affectedGagesCode %in% reserved)
stop(paste("Error in package's source code. Column '",colname_affectedGagesCode,
"' already exists.",sep=""))
# Update the object declaration table
info$objDecl= assignGages(tab_objDecl= info$objDecl, tab_objLink= info$objLink,
gageObjects= gageLocations$id)
# Add the info also merged into a single field. Let it be a character field to avoid
# problems with the representation of very large numbers (in the case of many gages)
info$objDecl[,colname_affectedGagesCode]= apply(X=info$objDecl[,gageLocations$id],
MARGIN=1,FUN=paste,collapse="")
info$objDecl[,colname_affectedGagesCode]= paste("code_",info$objDecl[,colname_affectedGagesCode],sep="")
# Update the catchment attribute table
tab_catchAttribs= merge(x=tab_catchAttribs, y=info$objDecl[,c("object",gageLocations$id,colname_affectedGagesCode)],
by="object", all.x=TRUE, all.y=FALSE)
if (any(is.na(tab_catchAttribs)))
stop("Failed to add gage info to catchment attribute table.")
# Update the reach attribute table
tab_reachAttribs= merge(x=tab_reachAttribs, y=info$objDecl[,c("object",gageLocations$id,colname_affectedGagesCode)],
by="object", all.x=TRUE, all.y=FALSE)
if (any(is.na(tab_reachAttribs)))
stop("Failed to add gage info to reach attribute table.")
# Update the shape file's attribute table if requested
if (updateSHP) {
if (!silent) print("Updating shape file's attribute table...")
# Read attr. table
dbfname= paste(substr(fileSHP,1,nchar(fileSHP)-3),"dbf",sep="")
attTab= shapefiles::read.dbf(dbfname, header=TRUE)
attTab= attTab$dbf
# Delete fields that will be updated if they already exist (may happen in repeated calls, for example)
del= which(names(attTab) %in% c(gageLocations$id,colname_affectedGagesCode))
attTab[,del]=NULL
# Add gage info
newTab= merge(x=attTab, y=info$objDecl[,c("object",gageLocations$id,colname_affectedGagesCode)], by.x=id_field,
by.y="object", all.x=TRUE, all.y=FALSE)
# Set info for non-simulated objects to special value
for (i in 1:nrow(gageLocations)) {
newTab[is.na(newTab[,gageLocations$id[i]]),gageLocations$id[i]]= -9999
}
newTab[is.na(newTab[,colname_affectedGagesCode]),colname_affectedGagesCode]= -9999
foreign::write.dbf(newTab, dbfname) # The 'foreign' package is loaded via 'shapefiles' (but this function is masked, therefore we use ::)
}
}
}
# Write output tables
write.table(x=tab_catchAttribs, file=fileAttrCAT, sep="\t", col.names=TRUE, row.names=FALSE, quote=FALSE)
write.table(x=tab_reachAttribs, file=fileAttrRCH, sep="\t", col.names=TRUE, row.names=FALSE, quote=FALSE)
write.table(x=info$objDecl, file=fileObjDecl, sep="\t", col.names=TRUE, row.names=FALSE, quote=FALSE)
write.table(x=info$objLink, file=fileObjLink, sep="\t", col.names=TRUE, row.names=FALSE, quote=FALSE)
# Return nothing
return(invisible(NULL))
}
|
c14fbc1de9a90702badf3fab71b52d5060e2727f
|
2bec5a52ce1fb3266e72f8fbeb5226b025584a16
|
/EFAtools/tests/testthat/test-PROMAX.R
|
9bf6f08287d1c0c3e0eb0ea594d15ceebe51a1f6
|
[] |
no_license
|
akhikolla/InformationHouse
|
4e45b11df18dee47519e917fcf0a869a77661fce
|
c0daab1e3f2827fd08aa5c31127fadae3f001948
|
refs/heads/master
| 2023-02-12T19:00:20.752555
| 2020-12-31T20:59:23
| 2020-12-31T20:59:23
| 325,589,503
| 9
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,815
|
r
|
test-PROMAX.R
|
unrot <- EFA(test_models$baseline$cormat, 3, N = 500)
prom <- .PROMAX(unrot, type = "EFAtools")
prom_psych <- .PROMAX(unrot, type = "psych")
prom_spss <- .PROMAX(unrot, type = "SPSS")
unrot_1 <- EFA(test_models$baseline$cormat, 1, N = 500)
prom_1 <- suppressWarnings(.PROMAX(unrot_1, type = "EFAtools"))
test_that("output class and dimensions are correct", {
expect_is(prom, "list")
expect_is(prom_1, "list")
expect_named(prom, c("rot_loadings", "Phi", "Structure", "rotmat",
"vars_accounted_rot", "settings"))
expect_named(prom_1, c("rot_loadings", "Phi", "Structure", "rotmat",
"vars_accounted_rot", "settings"))
expect_is(prom$rot_loadings, "LOADINGS")
expect_is(prom$Phi, "matrix")
expect_is(prom$Structure, "matrix")
expect_is(prom$rotmat, "matrix")
expect_is(prom$vars_accounted_rot, "matrix")
expect_is(prom$settings, "list")
expect_is(prom_1$rot_loadings, "LOADINGS")
expect_equal(prom_1$Phi, NA)
expect_equal(prom_1$Structure, NA)
expect_equal(prom_1$rotmat, NA)
expect_equal(prom_1$vars_accounted_rot, NA)
expect_is(prom_1$settings, "list")
})
test_that("settings are returned correctly", {
expect_named(prom$settings, c("normalize", "P_type", "precision",
"order_type", "varimax_type", "k"))
expect_named(prom_psych$settings, c("normalize", "P_type", "precision",
"order_type", "varimax_type", "k"))
expect_named(prom_spss$settings, c("normalize", "P_type", "precision",
"order_type", "varimax_type", "k"))
expect_named(prom_1$settings, c("normalize", "P_type", "precision",
"order_type", "varimax_type", "k"))
expect_equal(prom$settings$normalize, TRUE)
expect_equal(prom_psych$settings$normalize, TRUE)
expect_equal(prom_spss$settings$normalize, TRUE)
expect_equal(prom_1$settings$normalize, TRUE)
expect_equal(prom$settings$P_type, "norm")
expect_equal(prom_psych$settings$P_type, "unnorm")
expect_equal(prom_spss$settings$P_type, "norm")
expect_equal(prom_1$settings$P_type, "norm")
expect_equal(prom$settings$precision, 1e-05)
expect_equal(prom_psych$settings$precision, 1e-05)
expect_equal(prom_spss$settings$precision, 1e-05)
expect_equal(prom_1$settings$precision, 1e-05)
expect_equal(prom$settings$order_type, "eigen")
expect_equal(prom_psych$settings$order_type, "eigen")
expect_equal(prom_spss$settings$order_type, "ss_factors")
expect_equal(prom_1$settings$order_type, "eigen")
expect_equal(prom$settings$varimax_type, "svd")
expect_equal(prom_psych$settings$varimax_type, "svd")
expect_equal(prom_spss$settings$varimax_type, "kaiser")
expect_equal(prom_1$settings$varimax_type, "svd")
expect_equal(prom$settings$k, 4)
expect_equal(prom_psych$settings$k, 4)
expect_equal(prom_spss$settings$k, 4)
expect_equal(prom_1$settings$k, 4)
})
test_that("errors etc. are thrown correctly", {
expect_error(.PROMAX(unrot, type = "none"), ' One of "P_type", "order_type", "varimax_type", or "k" was NA and no valid "type" was specified. Either use one of "EFAtools", "psych", or "SPSS" for type, or specify all other arguments\n')
expect_warning(.PROMAX(unrot, type = "EFAtools", normalize = FALSE), " Type and normalize is specified. normalize is used with value ' FALSE '. Results may differ from the specified type\n")
expect_warning(.PROMAX(unrot, type = "EFAtools", P_type = "norm"), " Type and P_type is specified. P_type is used with value ' norm '. Results may differ from the specified type\n")
expect_warning(.PROMAX(unrot, type = "EFAtools", order_type = "ss_factors"), " Type and order_type is specified. order_type is used with value ' ss_factors '. Results may differ from the specified type\n")
expect_warning(.PROMAX(unrot, type = "EFAtools", k = 2), " Type and k is specified. k is used with value ' 2 '. Results may differ from the specified type\n")
expect_warning(.PROMAX(unrot, type = "psych", normalize = FALSE), " Type and normalize is specified. normalize is used with value ' FALSE '. Results may differ from the specified type.\n")
expect_warning(.PROMAX(unrot, type = "psych", P_type = "norm"), " Type and P_type is specified. P_type is used with value ' norm '. Results may differ from the specified type\n")
expect_warning(.PROMAX(unrot, type = "psych", order_type = "ss_factors"), " Type and order_type is specified. order_type is used with value ' ss_factors '. Results may differ from the specified type\n")
expect_warning(.PROMAX(unrot, type = "psych", k = 2), " Type and k is specified. k is used with value ' 2 '. Results may differ from the specified type\n")
expect_warning(.PROMAX(unrot, type = "SPSS", normalize = FALSE), " Type and normalize is specified. normalize is used with value ' FALSE '. Results may differ from the specified type.\n")
expect_warning(.PROMAX(unrot, type = "SPSS", P_type = "unnorm"), " Type and P_type is specified. P_type is used with value ' unnorm '. Results may differ from the specified type\n")
expect_warning(.PROMAX(unrot, type = "SPSS", order_type = "eigen"), " Type and order_type is specified. order_type is used with value ' eigen '. Results may differ from the specified type\n")
expect_warning(.PROMAX(unrot, type = "SPSS", k = 2), " Type and k is specified. k is used with value ' 2 '. Results may differ from the specified type\n")
expect_warning(.PROMAX(unrot, type = "SPSS", varimax_type = "svd"), " Type and varimax_type is specified. varimax_type is used with value ' svd '. Results may differ from the specified type\n")
expect_warning(.PROMAX(unrot_1, type = "EFAtools"), " Cannot rotate single factor. Unrotated loadings returned.\n")
})
rm(unrot, prom, unrot_1, prom_1, prom_psych, prom_spss)
|
1f89fddf91bbf8e74c3ef0e74b2c077562a4af01
|
a444bf00dff232cef1319968cea655cffbcf0903
|
/functions/smooth_update.R
|
b9229bbd7a90e5785b76145fa5a2c07ea73d13a5
|
[] |
no_license
|
Allisterh/dsge_m
|
472a9cc5521a0d9c067d6b8ad9e8243f52c0f103
|
3325745e005baf019f66a3f07f2a000c4c959fb5
|
refs/heads/main
| 2023-05-14T20:20:27.224434
| 2021-06-09T20:57:26
| 2021-06-09T20:57:26
| 446,838,437
| 1
| 0
| null | 2022-01-11T13:46:08
| 2022-01-11T13:46:07
| null |
UTF-8
|
R
| false
| false
| 1,552
|
r
|
smooth_update.R
|
smooth_update <- function(xsmooth_future, Vsmooth_future, xfilt, Vfilt, Vfilt_future, VVfilt_future, A, Q, B, u) {
# One step of the backwards RTS smoothing equations.
# function [xsmooth, Vsmooth, VVsmooth_future] = smooth_update(xsmooth_future, Vsmooth_future, ...
# xfilt, Vfilt, Vfilt_future, VVfilt_future, A, B, u)
#
# INPUTS:
# xsmooth_future = E[X_t+1|T]
# Vsmooth_future = Cov[X_t+1|T]
# xfilt = E[X_t|t]
# Vfilt = Cov[X_t|t]
# Vfilt_future = Cov[X_t+1|t+1]
# VVfilt_future = Cov[X_t+1,X_t|t+1]
# A = system matrix for time t+1
# Q = system covariance for time t+1
# B = input matrix for time t+1 (or [] if none)
# u = input vector for time t+1 (or [] if none)
#
# OUTPUTS:
# xsmooth = E[X_t|T]
# Vsmooth = Cov[X_t|T]
# VVsmooth_future = Cov[X_t+1,X_t|T]
# xpred = E[X(t+1) | t]
if (all(is.na(B))) {
xpred <- A %*% xfilt
} else {
xpred <- A %*% xfilt + B %*% u
}
Vpred <- (A %*% Vfilt %*% t(A)) + Q # Vpred = Cov[X(t+1) | t]
# Vfilt, pause
J <- Vfilt %*% t(A) %*% pracma::pinv(Vpred) # smoother gain matrix
xsmooth <- xfilt + (J %*% (xsmooth_future - xpred))
Vsmooth <- Vfilt + (J %*% (Vsmooth_future - Vpred) %*% t(J))
VVsmooth_future <- VVfilt_future + (Vsmooth_future - Vfilt_future) %*% pracma::pinv(Vfilt_future) %*% VVfilt_future
res_sm_update <- list()
res_sm_update$xsmooth <- xsmooth
res_sm_update$Vsmooth <- Vsmooth
res_sm_update$VVsmooth_future <- VVsmooth_future
return(res_sm_update)
}
|
bc020fa99c9af1a91cc2b4aa81414f770f484715
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/RadTran/R/RnDifAdv.hom.R
|
2df200c47697178cd45ac80f80d5d8f361df8853
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
ISO-8859-1
|
R
| false
| false
| 1,985
|
r
|
RnDifAdv.hom.R
|
RnDifAdv.hom <-
function (lx, ly, nx, ny, e, m,
bdc_top, rn_lam, rn_sol, k_soil, d_bulk, miu, dp, solution, ...)
{
Lx = lx
Ly = ly
Nx = nx
Ny = ny
dx = Lx/Nx
dy = Ly/Ny
X = seq(dx/2,by=dx,len=Nx)
Y = seq(dy/2,by=dy,len=Ny)
A = Lx*Ly
xgrid=setup.grid.1D(N=Nx,L=Lx)
ygrid=setup.grid.1D(N=Ny,L=Ly)
xygrid=setup.grid.2D(x.grid=xgrid,y.grid=ygrid)
x.axis = X
y.axis.conc = Y
y.axis.flux = -ygrid$x.int
poro = e
hum = m
Bc = bdc_top
lambda = rn_lam
L = rn_sol
k = k_soil
D = d_bulk
vis = miu
deltaP = dp
beta = (1-hum+L*hum)*poro
G = lambda*10000
v = (k/vis)*deltaP/Ly
Dgrid=setup.prop.2D(value=0,grid=xygrid,y.value=D)
VFgrid=setup.prop.2D(value=poro,grid=xygrid,y.value=poro)
Agrid=setup.prop.2D(value=1,grid=xygrid,y.value=1)
AFDWgrid=setup.prop.2D(value=1,grid=xygrid,y.value=1)
vgrid=setup.prop.2D(value=0,grid=xygrid,y.value=v)
Diff2D=function(t,y,parms){
CONC=matrix(nrow=Nx,ncol=Ny,y)
Tran=tran.2D(CONC,C.x.up=0,C.x.down=0,C.y.up=Bc,
C.y.down=0,flux.x.up=0,flux.x.down=0,a.bl.x.up=0,
a.bl.x.down=0,D.grid=Dgrid,v.grid=vgrid,
AFDW.grid=AFDWgrid,VF.grid=VFgrid,grid=xygrid,
A.grid=Agrid,full.output=TRUE)
dCONC=Tran$dC-lambda*beta*CONC+poro*G
xFlux=Tran$x.flux*A
yFlux=Tran$y.flux*A
yTopFlux=Tran$flux.y.up
yBottFlux=Tran$flux.y.down
return(list(as.vector(dCONC),yFlux=yFlux,xFlux=xFlux,
yTopFlux=yTopFlux,yBottFlux=yBottFlux))
}
y=runif(Nx*Ny) #condição inicial
std2=steady.2D(func=Diff2D,y=as.vector(y),time=0,
positive=TRUE,parms=NULL,lrw=9e7,dimens=c(Nx,Ny))
mat=matrix(nrow=Nx,ncol=Ny,std2$y)
mat2=matrix(nrow=Nx,ncol=Ny+1,std2$yFlux)
RnDifAdv.hom=list()
RnDifAdv.hom$conc=mat
RnDifAdv.hom$flux=mat2
if (solution == "steady") {
return(list(x.axis=x.axis,y.axis.conc=y.axis.conc,
y.axis.flux=y.axis.flux,conc=RnDifAdv.hom$conc,
flux=RnDifAdv.hom$flux))
}
}
|
fb15d94663bd49d84fdde9fab421cf426173f638
|
4804e4a4166a33faf98e9ad3df60757d94a0f1d9
|
/R/Q.R
|
355ddf6d5e507a1d7d445697eab0f5d484ff33dc
|
[
"MIT"
] |
permissive
|
zozlak/MLAK
|
958cb673939b684657ff88f141145f038ed2d89a
|
89e88050814b2ff2594669eb38ad198163e13b87
|
refs/heads/master
| 2021-06-01T11:34:57.797493
| 2020-07-09T08:51:11
| 2020-07-09T08:51:11
| 23,737,268
| 2
| 5
| null | null | null | null |
UTF-8
|
R
| false
| false
| 940
|
r
|
Q.R
|
#' @title wskazany kwantyl
#' @description
#' Istnieje możliwość obliczenia wartości wielu kwantyli jednocześnie -
#' wystarczy jako q przekazać wektor. W takim wypadku zwrócony zostanie wektor
#' odpowiednich kwantyli.
#' @param x wektor wartości
#' @param q numer kwantyla (może być wektorem)
#' @param n liczba kwantyli
#' @param wyrownaj czy wyrównywać długość wyniku (jeśli NA, wybór zostanie
#' dokonany automatycznie)
#' @param dokl liczba cyfr po przecinku, do których zaokrąglony zostanie wynik
#' @return NULL
#' @export
Q = function(x, q, n, wyrownaj = NA, dokl = 2){
stopifnot(
is.numeric(x),
is.numeric(q),
is.numeric(n),
length(n) == 1,
all(n >= q),
n >= 1
)
wyrownaj = ustawWyrownaj(wyrownaj)
f = function(x){
tmp = stats::quantile(x, seq(0, 1, length.out = n + 1), na.rm = TRUE)
return(tmp[q + 1])
}
return(statWektor(x, f, sys.call(), wyrownaj, dokl))
}
|
2d5dff75c96d0a84eee50e402a474e5709ed8a51
|
662395fae8ce93a6f721910a39ce76f9ba1868b6
|
/man/make_html_file.Rd
|
2e75fad0ce187c3b91e6238e74e1e2c7c6a404ec
|
[
"MIT"
] |
permissive
|
LucyMcGowan/statwords
|
fae7def1ce10984cd3495ccf98f8f9f4ad278205
|
462f28f6f7108d137a866e36b6fb5cad2d38b382
|
refs/heads/master
| 2020-05-04T18:00:07.469434
| 2019-04-10T20:25:36
| 2019-04-10T20:25:36
| 179,335,394
| 6
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 302
|
rd
|
make_html_file.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/make_html_file.R
\name{make_html_file}
\alias{make_html_file}
\title{Make html file}
\usage{
make_html_file(html_text, file)
}
\arguments{
\item{html_text}{HTML text}
\item{file}{file name}
}
\description{
Make html file
}
|
f92fa6f76b381395086cd09ac06daca78b18e48c
|
f041362e25f899a58dfae9b60da2e64b214a95d8
|
/Old_moved2bitemodelr/Fogel2017_ParamDist_now_raw-data.R
|
03409e0ca1719442098e5170fc269060cd68663f
|
[] |
no_license
|
alainapearce/LODEModel_SimStudy
|
bbf9b7b0ba993d0665013ae3a2fed4d0dcb3ed0a
|
7adf168b033d2ef564710172232cd433f7923522
|
refs/heads/master
| 2022-12-29T01:33:44.594998
| 2020-10-20T19:23:24
| 2020-10-20T19:23:24
| 267,889,760
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,976
|
r
|
Fogel2017_ParamDist_now_raw-data.R
|
# This script was written by Alaina Pearce in 2020
# to create:
# 1) a databased based on the mean and correlational
# structure of average meal microstructure in children
# reported in Fogel et al., 2017.
# 2) generate and save parameter distributes from generated
# database
#
# Copyright (C) 2012 Alaina L Pearce
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
###now a script in raw-data called DataGen_SimDat_Fogel2017
############ Basic Data Load/Setup ############
library(bitemodelr)
source('Fogel2017_SimDat.R')
#only need to do once
SimDat_Fogel2017 = Fogel2017_simDat(500)
write.csv(SimDat_Fogel2017, 'Data/SimDat_Fogel2017.csv', row.names = FALSE)
#### Generate bite data ####
#sample bite timing from a logistic curve and use average bite size to get cumulative intake
source('simBitesLogit.R')
SimBites_Fogel2017_list = t(mapply(simBitesLogit, mealdur = SimDat_Fogel2017$MealDur_min, nBites = SimDat_Fogel2017$nBites, Emax = SimDat_Fogel2017$TotalIntake_g, id = SimDat_Fogel2017$ID))
SimBites_Fogel2017 = data.frame(matrix(c(unlist(SimBites_Fogel2017_list)), byrow = FALSE, ncol = 4))
names(SimBites_Fogel2017) = c('ID', 'Bite', 'SampledTime', 'EstimatedCumulativeIntake')
write.csv(SimBites_Fogel2017, 'Data/SimBites_Fogel2017.csv', row.names = FALSE)
#### FPM Model ####
#fit parameters to the bite datasets
FPM_SimBites_Fogel2017_params = IntakeModelParams(data = SimBites_Fogel2017, timeVar = 'SampledTime', intakeVar = 'EstimatedCumulativeIntake', fit_fn = FPM_Fit, idVar = 'ID', CI = FALSE)
#### Kissileff Model ####
#fit parameters to the bite datasets
Kissileff_SimBites_Fogel2017_params = IntakeModelParams(data = SimBites_Fogel2017, timeVar = 'SampledTime', intakeVar = 'EstimatedCumulativeIntake', fit_fn = Kissileff_Fit, idVar = 'ID', CI = FALSE)
#### Add parameters to data ####
SimDat_Fogel2017 = merge(SimDat_Fogel2017, FPM_SimBites_Fogel2017_params, by = 'ID')
names(SimDat_Fogel2017)[12:16] = c('FPM_value', 'FPM_counts', 'FPM_counts_gradiant', 'FPM_convergence', 'FPM_method')
SimDat_Fogel2017 = merge(SimDat_Fogel2017, Kissileff_SimBites_Fogel2017_params, by = 'ID')
names(SimDat_Fogel2017)[20:24] = c('Kissileff_value', 'Kissileff_counts', 'Kissileff_counts_gradiant', 'Kissileff_convergence', 'Kissileff_method')
write.csv(SimDat_Fogel2017, 'Data/ParamDat_Fogel2017.csv', row.names = FALSE)
|
11c9a38098ddc3b7e45ab5b182ed94ca37774f04
|
468283f49c0f482119963b3aef343f90f4bee5a2
|
/Vocera-Voice-Call-Summary-NIH-REPORTING.R
|
0622e34caf0cac66778bfb1bf95b3dd0dc13045e
|
[] |
no_license
|
mo-g/vocera-messaging
|
397d9cd9bfdad5438b7d642af4dde667fece062f
|
d5e5e73a38b6f3724b852c5b4230d7931f00156f
|
refs/heads/master
| 2021-07-18T21:20:43.906107
| 2017-10-28T02:49:22
| 2017-10-28T02:49:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,893
|
r
|
Vocera-Voice-Call-Summary-NIH-REPORTING.R
|
#VOCERA VOICE STATES
#Gustavo Gonzalez
#Generates summary data into a file on Vocera Voice usage details
# install.packages("RMySQL")
library("RMySQL")
library("dplyr")
library("reshape2")
#GLOBAL VARIABLES#
#DB Connection Details
dbhandle <- dbConnect(MySQL(), user='vocera', password='vocera', dbname = 'vocera_reports', host='10.134.21.14')
#Get Call Data
rs <- dbSendQuery(dbhandle, "select * from Calls where TxDate > '2017-09-10'")
CRG_ds_voice_calls <- fetch(rs, n=-1)
rs <- dbSendQuery(dbhandle, "select * from Calls where TxDate > '2017-09-25'")
CVH_ds_voice_calls <- fetch(rs, n=-1)
#2017-09-21 - Gina. S advised that this extract will no longer be needed, commenting out
# #Get Login Logout Data
# rs <- dbSendQuery(dbhandle, "select * from loginlogout where loginlogout.TxDate > '2017-09-09' AND loginlogout.UserSite = 'CRG' AND loginlogout.Operation = 'login'")
# ds_voice_loginlogout <- fetch(rs, n=-1)
#Close DB Connection
dbDisconnect(dbhandle)
#Include only accepted calls for each site listed below.
CRG_daily_voice_call_summary <- filter(CRG_ds_voice_calls, CRG_ds_voice_calls$Accepted==1 & CRG_ds_voice_calls$UserSite=="CRG" & !CRG_ds_voice_calls$DeviceType=="B2000")
CVH_daily_voice_call_summary <- filter(CVH_ds_voice_calls, CVH_ds_voice_calls$Accepted==1 & CVH_ds_voice_calls$UserSite=="CVH"& !CVH_ds_voice_calls$DeviceType=="B2000")
#Generate summary table
CRG_daily_voice_call_summary <- data.frame(table(CRG_daily_voice_call_summary$TxDate, CRG_daily_voice_call_summary$DeviceType))
CVH_daily_voice_call_summary <- data.frame(table(CVH_daily_voice_call_summary$TxDate, CVH_daily_voice_call_summary$DeviceType))
#Pivot and Add Totals
################# CRG ##############################################
CRG_daily_voice_call_summary <- dcast(CRG_daily_voice_call_summary, Var1 ~ Var2)
CRG_daily_voice_call_summary$daily_total <- rowSums(CRG_daily_voice_call_summary[,2:ncol(CRG_daily_voice_call_summary)])
CRG_daily_voice_call_summary[nrow(CRG_daily_voice_call_summary)+1,] <- c(NA, sum(CRG_daily_voice_call_summary$Apple), sum(CRG_daily_voice_call_summary$B3000), sum(CRG_daily_voice_call_summary$Phone), sum(CRG_daily_voice_call_summary$daily_total))
write.csv(CRG_daily_voice_call_summary, paste('C:/Users/Public/NIH-Reporting/CRG_daily_voice_call_summary_', format(Sys.time(), "%Y_%m_%d"), ".csv"))
################# CVH ##############################################
CVH_daily_voice_call_summary <- dcast(CVH_daily_voice_call_summary, Var1 ~ Var2)
CVH_daily_voice_call_summary$daily_total <- rowSums(CVH_daily_voice_call_summary[,2:ncol(CVH_daily_voice_call_summary)])
CVH_daily_voice_call_summary[nrow(CVH_daily_voice_call_summary)+1,] <- c(NA, sum(CVH_daily_voice_call_summary$Apple), sum(CVH_daily_voice_call_summary$B3000), sum(CVH_daily_voice_call_summary$Phone), sum(CVH_daily_voice_call_summary$daily_total))
write.csv(CVH_daily_voice_call_summary, paste('C:/Users/Public/NIH-Reporting/CVH_daily_voice_call_summary_', format(Sys.time(), "%Y_%m_%d"), ".csv"))
#2017-09-21 - Gina. S advised that this extract will no longer be needed, commenting out
#Find unique logins by user by date
# daily_loginlogout_summary <- ds_voice_loginlogout
# daily_loginlogout_summary <- unique(daily_loginlogout_summary[c("TxDate", "UserID")])
#
# #Summarize and create log in file summary
# daily_loginlogout_summary <- data.frame(table(daily_loginlogout_summary$TxDate))
# colnames(daily_loginlogout_summary) <- c("date", "#_login_logout")
# daily_loginlogout_summary[nrow(daily_loginlogout_summary)+1,] <- c(NA, sum(daily_loginlogout_summary$`#_login_logout`))
# write.csv(daily_loginlogout_summary, paste('reporting/daily_voice_loginlogout_summary_', format(Sys.time(), "%Y_%m_%d"), ".csv"))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.