blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
332b19ad069227c09cf8d5918a733bf424546cd9
|
646b9ff26a4338cb4674fbe32a819b9be9d3870d
|
/src/2json_parse.R
|
e7973613be2be43a22853b42123159f0856c2edd
|
[] |
no_license
|
jtgrassel/DHS_Data_Analysis
|
8bcbda9f701b4af9e045de7f19637eadeec44238
|
82e0fe989bd4c907f404d03d96e46aa0eee5a7b0
|
refs/heads/main
| 2023-07-20T02:57:58.569241
| 2021-09-02T04:31:27
| 2021-09-02T04:31:27
| 325,148,676
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,397
|
r
|
2json_parse.R
|
library(tidyverse)
# Parse the json files into tables, then remove them.
#The tables have the following columns:
#Test, UserID, Prompt, Q1, Q2, Q3, Time, x, y
#---- Parse the Pilot data ----#
jsonTablePilot <- tibble(Test=character(), UserID=character(), Prompt=numeric(), Q1=numeric(), Q2=numeric(), Q3=numeric(), Time=numeric(), Gender=character(), Age=numeric(), Edu=character())
for (i in seq(1, length(jsonRawPilot))) {
for (j in seq(0, 13)) {
jsonTablePilot <- add_row(jsonTablePilot,
Test = "Pilot",
Prompt = j,
UserID=jsonRawPilot[[i]]["name"][[1]],
Q1 = jsonRawPilot[[i]][as.character(j)][[1]]$q1,
Q2 = jsonRawPilot[[i]][as.character(j)][[1]]$q2,
Q3 = jsonRawPilot[[i]][as.character(j)][[1]]$q3,
Time = jsonRawPilot[[i]][as.character(j)][[1]]$time,
Gender = jsonRawPilot[[i]]$demographic$gender,
Age = as.numeric(jsonRawPilot[[i]]$demographic$age),
Edu = jsonRawPilot[[i]]$demographic$education
)
}
}
rm(i, j)
rm(jsonRawPilot)
#---- Parse the 2_1 data ----#
jsonTable2_1 <- tibble(Test=character(), UserID=character(), Prompt=numeric(), Q1=numeric(), Q2=numeric(), Q3=numeric(), Time=numeric(), Gender=character(), Age=numeric(), Edu=character())
for (i in seq(1, length(jsonRaw2_1))) {
for (j in seq(1, length(jsonRaw2_1[[i]])-2)) {
jsonTable2_1 <- add_row(jsonTable2_1,
Test = "2_1",
Prompt = j,
UserID=jsonRaw2_1[[i]]["name"][[1]],
Q1 = jsonRaw2_1[[i]][[j]]$q1,
Q2 = jsonRaw2_1[[i]][[j]]$q2,
Q3 = jsonRaw2_1[[i]][[j]]$q3,
Time = jsonRaw2_1[[i]][[j]]$time,
Gender = jsonRaw2_1[[i]]$demographic$gender,
Age = as.numeric(jsonRaw2_1[[i]]$demographic$age),
Edu = jsonRaw2_1[[i]]$demographic$education
)
}
}
rm(i, j)
rm(jsonRaw2_1)
#---- Parse the 2_2 data ----#
jsonTable2_2 <- tibble(Test=character(), UserID=character(), Prompt=numeric(), Q1=numeric(), Q2=numeric(), Q3=numeric(), Time=numeric(), Gender=character(), Age=numeric(), Edu=character())
for (i in seq(1, length(jsonRaw2_2))) {
for (j in seq(1, length(jsonRaw2_2[[i]])-2)) {
jsonTable2_2 <- add_row(jsonTable2_2,
Test = "2_2",
Prompt = j,
UserID=jsonRaw2_2[[i]]["name"][[1]],
Q1 = jsonRaw2_2[[i]][[j]]$q1,
Q2 = jsonRaw2_2[[i]][[j]]$q2,
Q3 = jsonRaw2_2[[i]][[j]]$q3,
Time = jsonRaw2_2[[i]][[j]]$time,
Gender = jsonRaw2_2[[i]]$demographic$gender,
Age = as.numeric(jsonRaw2_2[[i]]$demographic$age),
Edu = as.character(ifelse(is.null(jsonRaw2_2[[i]]$demographic$education), NA, jsonRaw2_2[[i]]$demographic$education))
)
}
}
rm(i, j)
rm(jsonRaw2_2)
#---- Parse the 2_3 data ----#
jsonTable2_3 <- tibble(Test=character(), UserID=character(), Prompt=numeric(), Q1=numeric(), Q2=numeric(), Q3=numeric(), Time=numeric(), Gender=character(), Age=numeric(), Edu=character())
for (i in seq(1, length(jsonRaw2_3))) {
for (j in seq(1, length(jsonRaw2_3[[i]])-2)) {
jsonTable2_3 <- add_row(jsonTable2_3,
Test = "2_3",
Prompt = j,
UserID=jsonRaw2_3[[i]]["name"][[1]],
Q1 = jsonRaw2_3[[i]][[j]]$q1,
Q2 = jsonRaw2_3[[i]][[j]]$q2,
Q3 = jsonRaw2_3[[i]][[j]]$q3,
Time = jsonRaw2_3[[i]][[j]]$time,
Gender = jsonRaw2_3[[i]]$demographic$gender,
Age = as.numeric(jsonRaw2_3[[i]]$demographic$age),
Edu = jsonRaw2_3[[i]]$demographic$education
)
}
}
rm(i, j)
rm(jsonRaw2_3)
#---- Parse the 3_1 data ----#
jsonTable3_1 <- tibble(Test=character(), UserID=character(), Prompt=numeric(), Q1=numeric(), Q2=numeric(), Q3=numeric(), Time=numeric(), x=numeric(), y=numeric(), Gender=character(), Age=numeric(), Edu=character())
for (i in seq(1, length(jsonRaw3_1))) {
for (j in seq(1, length(jsonRaw3_1[[i]])-2)) {
jsonTable3_1 <- add_row(jsonTable3_1,
Test = "3_1",
Prompt = j,
UserID=jsonRaw3_1[[i]]["name"][[1]],
Q1 = jsonRaw3_1[[i]][[j]]$q1,
Q2 = jsonRaw3_1[[i]][[j]]$q2,
Q3 = jsonRaw3_1[[i]][[j]]$q3,
Time = jsonRaw3_1[[i]][[j]]$time,
x = ifelse(is.null(jsonRaw3_1[[i]][[j]]$x), NA, jsonRaw3_1[[i]][[j]]$x),
y = ifelse(is.null(jsonRaw3_1[[i]][[j]]$y), NA, jsonRaw3_1[[i]][[j]]$y),
Gender = jsonRaw3_1[[i]]$demographic$gender,
Age = as.numeric(jsonRaw3_1[[i]]$demographic$age),
Edu = jsonRaw3_1[[i]]$demographic$education
)
}
}
rm(i, j)
rm(jsonRaw3_1)
#---- Parse the 3_2 data ----#
jsonTable3_2 <- tibble(Test=character(), UserID=character(), Prompt=numeric(), Q1=numeric(), Q2=numeric(), Q3=numeric(), Time=numeric(), x=numeric(), y=numeric(), Gender=character(), Age=numeric(), Edu=character())
for (i in seq(1, length(jsonRaw3_2))) {
for (j in seq(1, length(jsonRaw3_2[[i]])-2)) {
jsonTable3_2 <- add_row(jsonTable3_2,
Test = "3_2",
Prompt = j,
UserID=jsonRaw3_2[[i]]["name"][[1]],
Q1 = jsonRaw3_2[[i]][[j]]$q1,
Q2 = jsonRaw3_2[[i]][[j]]$q2,
Q3 = jsonRaw3_2[[i]][[j]]$q3,
Time = jsonRaw3_2[[i]][[j]]$time,
x = ifelse(is.null(jsonRaw3_2[[i]][[j]]$x), NA, jsonRaw3_2[[i]][[j]]$x),
y = ifelse(is.null(jsonRaw3_2[[i]][[j]]$y), NA, jsonRaw3_2[[i]][[j]]$y),
Gender = ifelse(is.null(jsonRaw3_2[[i]]$demographic$gender), NA, jsonRaw3_2[[i]]$demographic$gender),
Age = as.numeric(jsonRaw3_2[[i]]$demographic$age),
Edu = ifelse(is.null(jsonRaw3_2[[i]]$demographic$education), NA, jsonRaw3_2[[i]]$demographic$education)
)
}
}
rm(i, j)
rm(jsonRaw3_2)
#---- Parse the 3_3 data ----#
jsonTable3_3 <- tibble(Test=character(), UserID=character(), Prompt=numeric(), Q1=numeric(), Q2=numeric(), Q3=numeric(), Time=numeric(), x=numeric(), y=numeric(), Gender=character(), Age=numeric(), Edu=character())
for (i in seq(1, length(jsonRaw3_3))) {
for (j in seq(1, length(jsonRaw3_3[[i]])-2)) {
jsonTable3_3 <- add_row(jsonTable3_3,
Test = "3_3",
Prompt = j,
UserID=jsonRaw3_3[[i]]["name"][[1]],
Q1 = jsonRaw3_3[[i]][[j]]$q1,
Q2 = jsonRaw3_3[[i]][[j]]$q2,
Q3 = jsonRaw3_3[[i]][[j]]$q3,
Time = jsonRaw3_3[[i]][[j]]$time,
x = ifelse(is.null(jsonRaw3_3[[i]][[j]]$x), NA, jsonRaw3_3[[i]][[j]]$x),
y = ifelse(is.null(jsonRaw3_3[[i]][[j]]$y), NA, jsonRaw3_3[[i]][[j]]$y),
Gender = jsonRaw3_3[[i]]$demographic$gender,
Age = as.numeric(jsonRaw3_3[[i]]$demographic$age),
Edu = jsonRaw3_3[[i]]$demographic$education
)
}
}
rm(i, j)
rm(jsonRaw3_3)
#---- Parse the 4_1_1 data ----#
jsonTable4_1_1 <- tibble(Test=character(), UserID=character(), Prompt=numeric(), Q1=numeric(), Q2=numeric(), Q3=numeric(), Time=numeric(), x=numeric(), y=numeric(), Gender=character(), Age=numeric(), Edu=character())
for (i in seq(1, length(jsonRaw4_1_1))) {
for (j in seq(1, length(jsonRaw4_1_1[[i]])-2)) {
sj <- toString(j)
jsonTable4_1_1 <- add_row(jsonTable4_1_1,
Test = "4_1_1",
Prompt = j,
UserID=jsonRaw4_1_1[[i]]["name"][[1]],
Q1 = jsonRaw4_1_1[[i]][[sj]]$q1,
Q2 = jsonRaw4_1_1[[i]][[sj]]$q2,
Q3 = jsonRaw4_1_1[[i]][[sj]]$q3,
Time = jsonRaw4_1_1[[i]][[sj]]$time,
x = ifelse(is.null(jsonRaw4_1_1[[i]][[sj]]$x), NA, jsonRaw4_1_1[[i]][[sj]]$x),
y = ifelse(is.null(jsonRaw4_1_1[[i]][[sj]]$y), NA, jsonRaw4_1_1[[i]][[sj]]$y),
Gender = jsonRaw4_1_1[[i]]$demographic$gender,
Age = as.numeric(jsonRaw4_1_1[[i]]$demographic$age),
Edu = jsonRaw4_1_1[[i]]$demographic$education
)
}
}
rm(i, j, sj)
rm(jsonRaw4_1_1)
#---- Parse the 4_1_2 data ----#
jsonTable4_1_2 <- tibble(Test=character(), UserID=character(), Prompt=numeric(), Q1=numeric(), Q2=numeric(), Q3=numeric(), Time=numeric(), x=numeric(), y=numeric(), Gender=character(), Age=numeric(), Edu=character())
for (i in seq(1, length(jsonRaw4_1_2))) {
for (j in seq(1, length(jsonRaw4_1_2[[i]])-2)) {
sj <- toString(j)
jsonTable4_1_2 <- add_row(jsonTable4_1_2,
Test = "4_1_2",
Prompt = j,
UserID=jsonRaw4_1_2[[i]]["name"][[1]],
Q1 = jsonRaw4_1_2[[i]][[sj]]$q1,
Q2 = jsonRaw4_1_2[[i]][[sj]]$q2,
Q3 = jsonRaw4_1_2[[i]][[sj]]$q3,
Time = jsonRaw4_1_2[[i]][[sj]]$time,
x = ifelse(is.null(jsonRaw4_1_2[[i]][[sj]]$x), NA, jsonRaw4_1_2[[i]][[sj]]$x),
y = ifelse(is.null(jsonRaw4_1_2[[i]][[sj]]$y), NA, jsonRaw4_1_2[[i]][[sj]]$y),
Gender = jsonRaw4_1_2[[i]]$demographic$gender,
Age = as.numeric(jsonRaw4_1_2[[i]]$demographic$age),
Edu = jsonRaw4_1_2[[i]]$demographic$education
)
}
}
rm(i, j, sj)
rm(jsonRaw4_1_2)
#---- Parse the 4_1_3 data ----#
jsonTable4_1_3 <- tibble(Test=character(), UserID=character(), Prompt=numeric(), Q1=numeric(), Q2=numeric(), Q3=numeric(), Time=numeric(), x=numeric(), y=numeric(), Gender=character(), Age=numeric(), Edu=character())
for (i in seq(1, length(jsonRaw4_1_3))) {
for (j in seq(1, length(jsonRaw4_1_3[[i]])-2)) {
sj <- toString(j)
jsonTable4_1_3 <- add_row(jsonTable4_1_3,
Test = "4_1_3",
Prompt = j,
UserID=jsonRaw4_1_3[[i]]["name"][[1]],
Q1 = jsonRaw4_1_3[[i]][[sj]]$q1,
Q2 = jsonRaw4_1_3[[i]][[sj]]$q2,
Q3 = jsonRaw4_1_3[[i]][[sj]]$q3,
Time = jsonRaw4_1_3[[i]][[sj]]$time,
x = ifelse(is.null(jsonRaw4_1_3[[i]][[sj]]$x), NA, jsonRaw4_1_3[[i]][[sj]]$x),
y = ifelse(is.null(jsonRaw4_1_3[[i]][[sj]]$y), NA, jsonRaw4_1_3[[i]][[sj]]$y),
Gender = jsonRaw4_1_3[[i]]$demographic$gender,
Age = as.numeric(jsonRaw4_1_3[[i]]$demographic$age),
Edu = jsonRaw4_1_3[[i]]$demographic$education
)
}
}
rm(i, j, sj)
rm(jsonRaw4_1_3)
#---- Parse the 4_1_4 data ----#
jsonTable4_1_4 <- tibble(Test=character(), UserID=character(), Prompt=numeric(), Q1=numeric(), Q2=numeric(), Q3=numeric(), Time=numeric(), x=numeric(), y=numeric(), Gender=character(), Age=numeric(), Edu=character())
for (i in seq(1, length(jsonRaw4_1_4))) {
for (j in seq(1, length(jsonRaw4_1_4[[i]])-2)) {
sj <- toString(j)
jsonTable4_1_4 <- add_row(jsonTable4_1_4,
Test = "4_1_4",
Prompt = j,
UserID=jsonRaw4_1_4[[i]]["name"][[1]],
Q1 = jsonRaw4_1_4[[i]][[sj]]$q1,
Q2 = jsonRaw4_1_4[[i]][[sj]]$q2,
Q3 = jsonRaw4_1_4[[i]][[sj]]$q3,
Time = jsonRaw4_1_4[[i]][[sj]]$time,
x = ifelse(is.null(jsonRaw4_1_4[[i]][[sj]]$x), NA, jsonRaw4_1_4[[i]][[sj]]$x),
y = ifelse(is.null(jsonRaw4_1_4[[i]][[sj]]$y), NA, jsonRaw4_1_4[[i]][[sj]]$y),
Gender = jsonRaw4_1_4[[i]]$demographic$gender,
Age = as.numeric(jsonRaw4_1_4[[i]]$demographic$age),
Edu = jsonRaw4_1_4[[i]]$demographic$education
)
}
}
rm(i, j, sj)
rm(jsonRaw4_1_4)
|
d96b34acdcfeda557ba9e4c27f45e88090818f52
|
ec2ff78f3166e3f63c7fd406c36d652aeee7e42c
|
/man/interpolatePlateData.Rd
|
dac53c48675acb299b2765638b1a99b331102d74
|
[
"MIT"
] |
permissive
|
raim/platexpress
|
a5178654240136e38f9697e7b9145d663e65c2d8
|
e6c5dcfce56cad0554fc6b059114a5cde51c3326
|
refs/heads/master
| 2022-02-04T07:24:26.681050
| 2022-01-12T15:30:54
| 2022-01-12T15:30:54
| 69,260,226
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,137
|
rd
|
interpolatePlateData.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/platereader.R
\name{interpolatePlateData}
\alias{interpolatePlateData}
\title{Interpolate Plate Data}
\usage{
interpolatePlateData(data, xid, yids, n, xout)
}
\arguments{
\item{data}{the list of measurement data as provided by
\code{\link{readPlateData}}}
\item{xid}{ID of the data set which should serve as the new x-axis
all data will be interpolated to equi-spaced points along the range
of measured values}
\item{yids}{restrict interpolation to these data IDs}
\item{n}{specify the number of interpolation points, if missing the
original number of rows will be used}
\item{xout}{specify the interpolation points directly}
}
\description{
interpolate one dataset to common points of another
data set. This is used to change the x-axis of a data set, e.g.,
the measured OD values can become the new x-axis and all fluorescence
values will be interpolated to common OD vlues, using the
R base function \code{\link[stats:splinefun]{spline}}, the same way
as in \code{\link{interpolatePlateTimes}}.
}
\author{
Rainer Machne \email{raim@tbi.univie.ac.at}
}
|
a5f130a0673d9a74daa8a714ea25ca689caaa0b9
|
9a8ed43c4934898f43023646051278872887781f
|
/scriptr/illust.R
|
e02f40f2890176f2336a37985710791fd10077c5
|
[] |
no_license
|
urc-pen/urushidani
|
ce34e772f13ea999e0dc32d073112d14f7d615ff
|
53dda09560be93efd2cb5d6cb5b5a5512570788c
|
refs/heads/master
| 2020-03-30T08:37:24.390811
| 2018-11-01T05:35:56
| 2018-11-01T05:35:56
| 151,028,823
| 1
| 0
| null | 2018-10-03T02:59:35
| 2018-10-01T02:44:37
|
Python
|
UTF-8
|
R
| false
| false
| 607
|
r
|
illust.R
|
X <- as.matrix(read.csv("mutationplot_cycle1.csv", header=TRUE,row.names=1))
tmp <- colnames(X)
tmp2 <- rownames(X)
tmp <- names(rev(sort(apply(X,1,var))))[1:1000]
X<-X[tmp,]
rcol <- c()
dr <- c()
for (i in 1:length(tmp)) {
a <- substring(tmp[i], 1, 1)
if (a == "D"){
dr <- append(dr, tmp[i])
}
}
for (i in 1:length(tmp)) {
a <- substring(tmp[i], 1, 1)
if (a == "D"){
rcol[tmp[i]] <- "blue"
}
if (a == "N"){
rcol[tmp[i]] <- "LightGrey"
}
}
heatmap(X,col=c("LightGrey", "red"), hclustfun = function(X) { hclust(X, method = "ward.D") }, scale = c("none"), RowSideColors=rcol)
|
b24a166085dce9bac591e61063e374150e60821a
|
d9f440d53216258b3f1d407003f117265909beff
|
/unit6/Unit6-HW-Stocks.R
|
48e984166eb4a5b78f09d92ccf6a64606523e293
|
[] |
no_license
|
hmartineziii/15.071x
|
88fa934ff7988d27e2f30c8799d0165272fd805b
|
22214ace9bfe3982f6da829245d533d7cbb04d46
|
refs/heads/master
| 2016-09-12T10:36:37.459666
| 2016-06-05T23:57:51
| 2016-06-05T23:57:51
| 56,651,270
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,809
|
r
|
Unit6-HW-Stocks.R
|
#problem1
stocks <- read.csv("StocksCluster.csv")
str(stocks)
nrow(stocks)
prop.table(table(stocks$PositiveDec))
sum(stocks$PositiveDec) / nrow(stocks)
mean(stocks$PositiveDec)
cm <- cor(stocks[1:11])
diag(cm) <- -1 # making the diagonals as -1
max(cm)
summary(stocks)
cm <- colMeans(stocks[1:11])
which.max(cm)
which.min(cm)
#problem2
library(caTools)
set.seed(144)
spl = sample.split(stocks$PositiveDec, SplitRatio = 0.7)
stocksTrain = subset(stocks, spl == TRUE)
stocksTest = subset(stocks, spl == FALSE)
StocksModel = glm(PositiveDec ~ ., data=stocksTrain, family=binomial)
PredictTrain = predict(StocksModel, type="response")
cmat_LR <-table(stocksTrain$PositiveDec, PredictTrain > 0.5)
cmat_LR
accu_LR <-(cmat_LR[1,1] + cmat_LR[2,2])/sum(cmat_LR)
accu_LR
PredictTest = predict(StocksModel, newdata=stocksTest, type="response")
#Then we compute the confusion matrix for the testing set using a threshold of 0.5
cmat_LR<-table(stocksTest$PositiveDec, PredictTest > 0.5)
cmat_LR
accu_LR <-(cmat_LR[1,1] + cmat_LR[2,2])/sum(cmat_LR)
accu_LR
sum(diag(cmat_LR))/nrow(stocksTest)
baseline<-table(stocksTest$PositiveDec)
baseline
accu_baseline <- max(baseline)/sum(baseline)
accu_baseline
baseline[2] / sum(baseline)
#problem3
limitedTrain = stocksTrain
limitedTrain$PositiveDec = NULL
limitedTest = stocksTest
limitedTest$PositiveDec = NULL
library(caret)
preproc = preProcess(limitedTrain)
normTrain = predict(preproc, limitedTrain)
normTest = predict(preproc, limitedTest)
mean(normTrain$ReturnJan)
mean(normTest$ReturnJan)
set.seed(144)
km <- kmeans(normTrain, centers=3, iter.max=1000)
km$size
table(km$cluster)
library(flexclust)
km.kcca = as.kcca(km, normTrain)
clusterTrain = predict(km.kcca)
clusterTest = predict(km.kcca, newdata=normTest)
table(clusterTest)
sum(clusterTest == 2)
#problem4
stocksTrain1 = subset(stocksTrain, clusterTrain == 1)
stocksTrain2 = subset(stocksTrain, clusterTrain == 2)
stocksTrain3 = subset(stocksTrain, clusterTrain == 3)
stocksTest1 = subset(stocksTest, clusterTest == 1)
stocksTest2 = subset(stocksTest, clusterTest == 2)
stocksTest3 = subset(stocksTest, clusterTest == 3)
mean(stocksTrain1$PositiveDec)
mean(stocksTrain2$PositiveDec)
mean(stocksTrain3$PositiveDec)
stocksTrain11 <- split(stocksTrain, clusterTrain)
stocksTest11 <- split(stocksTest, clusterTest)
sapply(stocksTrain11, function(s){ mean(s$PositiveDec) })
StocksModel1 = glm(PositiveDec ~ ., data=stocksTrain1, family=binomial)
StocksModel2 = glm(PositiveDec ~ ., data=stocksTrain2, family=binomial)
StocksModel3 = glm(PositiveDec ~ ., data=stocksTrain3, family=binomial)
summary(StocksModel1)
summary(StocksModel2)
summary(StocksModel3)
stocksModels <- lapply(stocksTrain11, function(s){
glm(s$PositiveDec ~ ., family=binomial, data=s)
})
sapply(stocksModels, function(m){ m$coefficients })
PredictTest1 = predict(StocksModel1, newdata = stocksTest1, type="response")
PredictTest2 = predict(StocksModel2, newdata = stocksTest2, type="response")
PredictTest3 = predict(StocksModel3, newdata = stocksTest3, type="response")
cmat1<-table(stocksTest1$PositiveDec, PredictTest1 > 0.5)
cmat1
cmat2<-table(stocksTest2$PositiveDec, PredictTest2 > 0.5)
cmat2
cmat3<-table(stocksTest3$PositiveDec, PredictTest3 > 0.5)
cmat3
predictions <- sapply(1:3, function (i) {
p <- predict(stocksModels[[i]], newdata=stocksTest11[[i]], type="response")
(conf.mat <- table(stocksTest11[[i]]$PositiveDec, p > 0.5))
accuracy <- sum(diag(conf.mat)) / sum(conf.mat)
list(predict=p, accuracy=accuracy)
})
predictions
AllPredictions = c(PredictTest1, PredictTest2, PredictTest3)
AllOutcomes = c(stocksTest1$PositiveDec, stocksTest2$PositiveDec, stocksTest3$PositiveDec)
cmatoverall<-table(AllOutcomes, AllPredictions > 0.5)
cmatoverall
accu_overall<- (cmatoverall[1,1] + cmatoverall[2,2])/sum(cmatoverall)
accu_overall
|
3897ac6e4bad5e941f9b03048050f2cba1f5a870
|
17ecc993d930afb29a3c835b32b380048b76cd75
|
/project-prototype/DATA_VIS_FINAL.R
|
a38aa47236aa69e89eed15fa9277de0eb61f9847
|
[] |
no_license
|
chrono721/msan622
|
a0ddda2fe3affba54162deebad85018d85a6f098
|
77baa81a631808eb5b8386ca116381266178151f
|
refs/heads/master
| 2021-01-22T16:38:58.421764
| 2014-05-15T23:43:01
| 2014-05-15T23:43:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,360
|
r
|
DATA_VIS_FINAL.R
|
library(ggplot2)
library(scales)
library(reshape)
library(wordcloud)
library(network)
library(gridExtra)
curr_dir <- getwd()
myData_count <- read.csv("counts.csv")
myData_POS <- read.csv("POS.csv")
myData_basic <- read.csv("basic.csv")
#TRANSFORM BASIC DATA
myData_basic$PAPER_NAME <- as.character(myData_basic$PAPER_NAME)
myData_basic$DATE <- as.Date(myData_basic$DATE, format = "%m/%d/%Y")
myData_basic$WC_TOTAL <- rowSums(myData_count)
myData_basic$WC_NOUN <- rowSums(myData_count[,subset(myData_POS, POS=="noun")$Word])
myData_basic$WC_ADVERB <- rowSums(myData_count[,subset(myData_POS, POS=="adverb")$Word])
myData_basic$WC_ADJECTIVE <- rowSums(myData_count[,subset(myData_POS, POS=="adjective")$Word])
myData_basic$WC_VERB <- rowSums(myData_count[,subset(myData_POS, POS=="verb")$Word])
myData_basic$WC_OTHER <- rowSums(myData_count[,subset(myData_POS, POS %in% c("CD", "modal","PRP", "RBR"))$Word])
#TRANSFOEM MYDATA_COUNTS
rownames(myData_count) <- myData_basic$PAPER_NAME
#Plot1: Bar Multiples
common_words <- names(sort(colSums(myData_count), decreasing=T)[1:5])
common_words_counts <- as.vector(sort(colSums(myData_count), decreasing=T)[1:5])
titles <- myData_basic$PAPER_NAME[5:13]
authors <- myData_basic$AUTHOR[5:13]
title_author <- paste(titles,' (',authors,')', sep="")
word_df <- myData_count[titles,common_words]
word_df$papers <- title_author
bar_df <- melt(word_df, id="papers")
bar_df$papers <- factor(bar_df$papers, levels = title_author)
mytable <- tableGrob(cbind(words=common_words,total_counts = common_words_counts),
gpar.rowfill = gpar(fill = "grey90", col = "white"))
plt1 <- ggplot(bar_df, aes(x="i", y=value))
plt1 <- plt1 + geom_bar(aes(fill=variable),position ="dodge",
stat="identity", width=.25)
plt1 <- plt1 + theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_rect(fill="white"),
plot.background = element_blank(),
axis.ticks = element_blank(),
axis.text.x = element_blank(),
axis.text.y = element_blank(),
axis.title.x = element_blank(),
axis.title.y = element_blank(),
legend.title = element_blank(),
legend.text = element_text(size=14, face="bold"))
plt1 <- plt1 + ggtitle("Top 5 Most Frequent Words in the Federalist Papers\n")
#plt1 <- plt1 + scale_fill_manual(values=palette[1:3])
plt1 <- plt1 + facet_wrap( ~ papers, ncol=3)
plt1 <- plt1 + theme(strip.background = element_rect(fill = "tan"),
strip.text = element_text(size = 10, face="italic"))
#plt1 <- plt1 + annotation_custom(tableGrob(mytable, gpar.rowfill = gpar(fill = "grey90", col = "white")),
# xmin=0, xmax=0,ymin=0, ymax=0)
#Extract Legend
g_legend <- function(a.gplot){
tmp <- ggplot_gtable(ggplot_build(a.gplot))
leg <- which(sapply(tmp$grobs, function(x) x$name) == "guide-box")
legend <- tmp$grobs[[leg]]
return(legend)}
legend <- g_legend(plt1)
grid.newpage()
vp1 <- viewport(width = 0.75, height = 1, x = 0.375, y = .5)
vpleg <- viewport(width = 0.25, height = 0.5, x = 0.85, y = 0.75)
subvp <- viewport(width = 0.3, height = 0.3, x = 0.85, y = 0.25)
print(plt1 + opts(legend.position = "none"), vp = vp1)
upViewport(0)
pushViewport(vpleg)
grid.draw(legend)
#Make the new viewport active and draw
upViewport(0)
pushViewport(subvp)
grid.draw(mytable)
#print(plt1)
#Plot2: Wordcloud
#WORD_TOTALS<- apply(counts, 2, function(x) info$Year[which(x == max(x))[1]])
#TOTALS$YEARS <- t(t(WORD_TOTALS))[,1]
authors <- c("HAMILTON", "JAY", "MADISON")
word_group <- data.frame(WORDS = names(myData_count))
for (author in authors){
papers <- myData_basic$PAPER_NAME[which(myData_basic$AUTHOR==author)]
word_group[,author] <- t(t(colSums(myData_count[papers,])))
}
rownames(word_group)<- word_group$WORDS
word_group$WORDS <- NULL
word_group <- as.matrix(word_group)
comparison.cloud(word_group, scale=c(4,.5), title.size=2)
###################################
#Not Done
#Plot3: Lines Plot
lines <- data.frame(state=counts[,1695], power=counts[,1333],constitution=counts[,385],
world=counts[,1943], america=counts[,97], new=counts[,1207])
lines$Year <- info$Year
lines_melted <- melt(lines, id="Year") # convert to long format
Type = character(0)
for (i in 1:length(lines_melted$variable)){
if(lines_melted$variable[i] == "state"){Type <- cbind(Type, "Declined Use of Word")}
if(lines_melted$variable[i] == "constitution"){Type <- cbind(Type, "Declined Use of Word")}
if(lines_melted$variable[i] == "power"){Type <- cbind(Type, "Declined Use of Word")}
if(lines_melted$variable[i] == "america"){Type <- cbind(Type, "Increased Use of Word")}
if(lines_melted$variable[i] == "new"){Type <- cbind(Type, "Increased Use of Word")}
if(lines_melted$variable[i] == "world"){Type <- cbind(Type, "Increased Use of Word")}
}
lines_melted$Type <- as.factor(Type[1,])
plt2 <- ggplot(data = lines_melted, aes(x=Year, y=value))
plt2 <- plt2 + geom_line(aes(group = variable, color = variable))
plt2 <- plt2 + facet_wrap(~Type, nrow = 2)
plt2 <- plt2 + ggtitle("Word Usage in Inaugural Speeches over the Years")
plt2 <- plt2 + xlab("Year") + ylab("Count of Usage")
plt2 <- plt2 + labs(color="Word Used")
plt2 <- plt2 + scale_x_continuous(breaks = seq(1789,2009,20))
plt2 <- plt2 + scale_y_continuous(breaks = seq(0,60,10))
plt2 <- plt2 + theme(axis.title=element_text(size=12,face="bold"), title = element_text(size=18))
plt2 <- plt2 +theme(panel.background = element_rect(fill="grey60"))
#print(plt2)
#Plot4: Network Graph
plotg <- function(net, value=NULL) {
m <- as.matrix.network.adjacency(net) # get sociomatrix
# get coordinates from Fruchterman and Reingold's force-directed placement algorithm.
plotcord <- data.frame(gplot.layout.fruchtermanreingold(m, NULL))
# or get it them from Kamada-Kawai's algorithm:
# plotcord <- data.frame(gplot.layout.kamadakawai(m, NULL))
colnames(plotcord) = c("X1","X2")
edglist <- as.matrix.network.edgelist(net)
edges <- data.frame(plotcord[edglist[,1],], plotcord[edglist[,2],])
plotcord$elements <- as.factor(get.vertex.attribute(net, "elements"))
colnames(edges) <- c("X1","Y1","X2","Y2")
edges$midX <- (edges$X1 + edges$X2) / 2
edges$midY <- (edges$Y1 + edges$Y2) / 2
pnet <- ggplot() +
geom_segment(aes(x=X1, y=Y1, xend = X2, yend = Y2),
data=edges, size = 0.5, colour="grey") +
geom_point(aes(X1, X2,colour=elements), data=plotcord) +
scale_colour_brewer(palette="Set1") +
scale_x_continuous(breaks = NA) + scale_y_continuous(breaks = NA) +
# discard default grid + titles in ggplot2
opts(panel.background = theme_blank()) + opts(legend.position="none")+
opts(axis.title.x = theme_blank(), axis.title.y = theme_blank()) +
opts( legend.background = theme_rect(colour = NA)) +
opts(panel.background = theme_rect(fill = "white", colour = NA)) +
opts(panel.grid.minor = theme_blank(), panel.grid.major = theme_blank())
return(print(pnet))
}
g <- network(150, directed=FALSE, density=0.03)
classes <- rbinom(150,1,0.5) + rbinom(150,1,0.5) + rbinom(150,1,0.5)
set.vertex.attribute(g, "elements", classes)
plotg(g)
|
cde5e37ad6be714b7ecbf3f99232ee6334e6405f
|
ec2bbe190cb92da561484e88b8ae1a6db88e8238
|
/Plot3.R
|
4507db90a5e62f9ecf456a9d16c3558391fb7a61
|
[] |
no_license
|
HumbertoCanon/ExData_Plotting1
|
55e94879930f89a0e14b8b1afa648a6db35143f9
|
063d9430f08bc1b7d3649273187851eeba6835e3
|
refs/heads/master
| 2022-12-06T15:31:45.181215
| 2020-08-31T05:58:56
| 2020-08-31T05:58:56
| 291,408,843
| 0
| 0
| null | 2020-08-30T05:48:37
| 2020-08-30T05:48:36
| null |
UTF-8
|
R
| false
| false
| 1,253
|
r
|
Plot3.R
|
## Downloading file
fileUrl <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(fileUrl, destfile = "FullData.zip", method = "curl")
## Unzip files
if (!file.exists("./Electricpowerconsumption"))
{ unzip("FullData.zip") }
## Reading Table and sub setting dates 1/2/2007 and 2/2/2007
FullData <- read.table("household_power_consumption.txt", header = TRUE,
sep=";")
Data <- subset(FullData, FullData$Date=="1/2/2007" |
FullData$Date =="2/2/2007")
## Formatting Data
Data$TimeDate <- strptime(paste(Data$Date, Data$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
Data$Sub_metering_1 <- as.numeric(Data$Sub_metering_1)
Data$Sub_metering_2 <- as.numeric(Data$Sub_metering_2)
Data$Sub_metering_3 <- as.numeric(Data$Sub_metering_3)
##Drawing Plot3 = Plot "Energy sub metering by day"
png("plot3.png", width=480, height=480)
with(Data, plot(TimeDate, Sub_metering_1, type="l", xlab= "", ylab="Energy sub metering"))
with(Data, lines(TimeDate, Sub_metering_2, col= "red"))
with(Data, lines(TimeDate, Sub_metering_3, col= "blue"))
legend("topright", legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
col=c("black","red","blue"), lty=1)
dev.off()
|
d55007f983562f56df3aad8a42f876070ad572a5
|
58457e825f17984f737d70cfb1094c8fcd9a094a
|
/man/nested.stdsurv.Rd
|
bcc9f03e57e3226a71e1fa9f71dd61dcd9e7628e
|
[] |
no_license
|
cran/NestedCohort
|
7f305c0365ea99b933f0ffc007eb2c2bda67716c
|
6ce4e5fdb409ab8a1b1d4da34c2908d97bec114f
|
refs/heads/master
| 2016-09-02T04:25:48.397651
| 2012-12-21T00:00:00
| 2012-12-21T00:00:00
| 17,681,133
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,828
|
rd
|
nested.stdsurv.Rd
|
\name{nested.stdsurv}
\alias{nested.stdsurv}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{ Estimate Standardized Survivals and Attributable Risks for
covariates with missing data }
\description{
The function \code{nested.stdsurv} fits the Cox model to estimate
standardized survival curves and attributable risks for covariates
that are missing data on some cohort members. All covariates must be
factor variables.
\code{nested.stdsurv} requires knowledge of the variables that
missingness depends on, with missingness probability modeled through a
\code{\link{glm}} sampling model. Often, the data is in the form of a
case-control sample taken within a cohort. \code{nested.stdsurv} allows
cases to have missing data, and can extract efficiency from auxiliary
variables by including them in the sampling model. \code{nested.stdsurv}
requires \code{\link{coxph}} from the survival package.
}
\usage{
nested.stdsurv(outcome, exposures, confounders, samplingmod, data,
exposureofinterest = "", timeofinterest = Inf,cuminc=FALSE,
plot = FALSE, plotfilename = "", glmlink = binomial(link = "logit"),
glmcontrol = glm.control(epsilon = 1e-10, maxit = 10, trace = FALSE),
coxphcontrol = coxph.control(eps = 1e-10, iter.max = 50),
missvarwarn = TRUE, ...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
Required arguments:
\item{outcome}{ Survival outcome of interest, must be a
\code{\link{Surv}} object }
\item{exposures}{ The part of the right side of the Cox model that parameterizes the
exposures. Never use '*' for interaction, use
\code{\link{interaction}}. Survival probabilities will be computed
for each level of the exposures. }
\item{confounders}{ The part of the right side of the Cox model that
parameterizes the confounders. Never use '*' for interaction, use
\code{\link{interaction}}. }
\item{samplingmod}{ Right side of the formula for the \code{glm}
sampling model that models the probability of missingness }
\item{data}{ Data Frame that all variables are in }
Optional arguments:
\item{exposureofinterest}{ The name of the level of the exposures for which
attributable risk is desired. Default is the first level of the exposure. }
\item{timeofinterest}{ The time at which survival probabilities and
attributable risks are desired. Default is the last event time. }
\item{cuminc}{ Set to T if you want output as cumulative incidence, F
for survival }
\item{plot}{ If T, plot the standardized survivals. Default is F. }
\item{plotfilename}{ A string for the filename to save the plot as }
\item{glmlink}{ Sampling model link function, default is logistic regression }
\item{glmcontrol}{ See \code{\link{glm.control}} }
\item{coxphcontrol}{ See \code{\link{coxph.control}} }
\item{missvarwarn}{ Warn if there is missing data in the sampling
variable. Default is TRUE }
\item{\dots}{ Any additional arguments to be passed on to \code{glm}
or \code{coxph} }
}
\details{
If \code{nested.stdsurv} reports that the sampling model "failed to converge",
the sampling model will be returned for your inspection. Note that if
some sampling probabilities are estimated at 1, the model technically
cannot converge, but you get very close to 1, and \code{nested.stdsurv}
will not report non-convergence for this situation.
Note the following issues.
The data must be in a dataframe and specified in the data statement.
No variable can be named 'o.b.s.e.r.v.e.d.' or 'p.i.h.a.t.'.
Cases and controls cannot be finely matched on time, but
matching on time within large strata is allowed.
strata(), cluster() or offset() statements in
or confounders are not allowed.
Everyone must enter the cohort at the same time on the
vival time scale.
Must use Breslow Tie-Breaking.
All covariates must be factor variables, even if binary.
Do not use '*' to mean interaction in exposures or
confounders, use \code{\link{interaction}}.
}
\value{
A List with the following components:
\item{coxmod }{The fitted Cox model}
\item{samplingmod }{The fitted glm sampling model}
\item{survtable }{Standardized survival (and inference) for each
exposure level}
\item{riskdifftable }{Standardized survival (risk) differences (and
inference) for each exposure level, relative to the exposure of
interest. }
\item{PARtable }{Population Attributable Risk (and inference) for the
exposure of interest}
If plot=T, then the additional component is included:
\item{plotdata }{A matrix with data needed to plot the survivals:
time, standardized survival for each exposure level, and crude
survival. Name of each exposure level is converted to a proper R
variable name (these are the column labels).}
}
\references{
Katki HA, Mark SD. Survival Analysis for Cohorts with Missing
Covariate Information. R-News, 8(1) 14-9, 2008.
http://www.r-project.org/doc/Rnews/Rnews_2008-1.pdf
Mark, S.D. and Katki, H.A. Specifying and Implementing Nonparametric and
Semiparametric Survival Estimators in Two-Stage (sampled) Cohort Studies with
Missing Case Data. Journal of the American Statistical Association, 2006, 101,
460-471.
Mark SD, Katki H. Influence function based variance estimation and
missing data issues in case-cohort studies. Lifetime Data Analysis,
2001; 7; 329-342
Christian C. Abnet, Barry Lai, You-Lin Qiao, Stefan Vogt,
Xian-Mao Luo, Philip R. Taylor, Zhi-Wei Dong, Steven D. Mark,
Sanford M. Dawsey. Zinc concentration in esophageal biopsies measured
by X-ray fluorescence and cancer risk. Journal of the National Cancer
Institute, 2005; 97(4) 301-306
}
\author{ Hormuzd A. Katki }
\note{
Requires the MASS library from the VR bundle that is available from
the CRAN website.
}
% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{ See Also: \code{\link{nested.coxph}}, \code{\link{zinc}},
\code{\link{nested.km}}, \code{\link{coxph}}, \code{\link{glm}}
}
\examples{
## Simple analysis of zinc and esophageal cancer data:
## We sampled zinc (variable znquartiles) on a fraction of the subjects, with
## sampling fractions depending on cancer status and baseline histology.
## We observed the confounding variables on almost all subjects.
data(zinc)
mod <- nested.stdsurv(outcome="Surv(futime01,ec01==1)",
exposures="znquartiles",
confounders="sex+agestr+smoke+drink+mildysp+moddysp+sevdysp+anyhist",
samplingmod="ec01*basehist",exposureofinterest="Q4",data=zinc)
# This is the output:
# Standardized Survival for znquartiles by time 5893
# Survival StdErr 95% CI Left 95% CI Right
# Q1 0.5443 0.07232 0.3932 0.6727
# Q2 0.7595 0.07286 0.5799 0.8703
# Q3 0.7045 0.07174 0.5383 0.8203
# Q4 0.8911 0.06203 0.6863 0.9653
# Crude 0.7784 0.02491 0.7249 0.8228
# Standardized Risk Differences vs. znquartiles = Q4 by time 5893
# Risk Difference StdErr 95% CI Left 95% CI Right
# Q4 - Q1 0.3468 0.10376 0.143412 0.5502
# Q4 - Q2 0.1316 0.09605 -0.056694 0.3198
# Q4 - Q3 0.1866 0.09355 0.003196 0.3699
# Q4 - Crude 0.1126 0.06353 -0.011871 0.2372
# PAR if everyone had znquartiles = Q4
# Estimate StdErr 95% PAR CI Left 95% PAR CI Right
# PAR 0.5084 0.2777 -0.4872 0.8375
}
\keyword{ models }% at least one, from doc/KEYWORDS
|
22027569f4ba8f3c880ec07a1656b3793ce09ad6
|
959d194222c345f792db3ab838ce65dd2ba2c6af
|
/man/human_anno.Rd
|
0d5e71657e309d18b797d07e5a26036f5ee3e4b4
|
[] |
no_license
|
crukci-bioinformatics/qPLEXanalyzer
|
c5dc6275ad6bd8ce3fbb7ec94d343be633073a3c
|
9780505602b31e30ee4c3c5972fecf4a35058415
|
refs/heads/master
| 2023-04-29T23:57:22.170033
| 2023-04-17T10:59:32
| 2023-04-17T10:59:32
| 130,681,325
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 374
|
rd
|
human_anno.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/qPLEXanalyzer-package.R
\docType{data}
\name{human_anno}
\alias{human_anno}
\title{human_anno dataset}
\format{
An object of class \code{\link{data.frame}} consisting of uniprot
human protein annotation.
}
\description{
Uniprot Human protein annotation table.
}
\keyword{data}
\keyword{datasets}
|
8169b9df5b6506659ef59b103ea18fdba4e73018
|
d62421e292d672e6cc430db51211eaa778e5bc07
|
/Natural Language Processing.R
|
9a567facb829a9b3a822997dfb7672649304f36e
|
[] |
no_license
|
sainath7276331303/sainath
|
e25aa0d172067bb2aebaff84e7d5652168adac59
|
518ca085bd4f738901d511b1dc4644a583d83059
|
refs/heads/master
| 2021-03-10T20:06:12.529471
| 2020-08-30T08:40:16
| 2020-08-30T08:40:16
| 246,482,240
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,451
|
r
|
Natural Language Processing.R
|
install.packages("twitteR")
library(twitteR)
install.packages("ROAuth")
library(ROAuth)
install.packages("httpuv")
library(httpuv)
install.packages("base64enc")
library(base64enc)
cred <- OAuthFactory$new(consumerKey='FXTquJNbgDG2dH81XYVqNZFAb', # Consumer Key (API Key)
consumerSecret='3y0ALNFzJ8JKyxzFd0ba9FWSUpNSWhPisEIZOB6WCTtcGvP6SO', #Consumer Secret (API Secret)
requestURL='https://api.twitter.com/oauth/request_token',
accessURL='https://api.twitter.com/oauth/access_token',
authURL='https://api.twitter.com/oauth/authorize')
save(cred, file="twitter authentication.Rdata")
load("twitter authentication.Rdata")
setup_twitter_oauth("FXTquJNbgDG2dH81XYVqNZFAb",
"3y0ALNFzJ8JKyxzFd0ba9FWSUpNSWhPisEIZOB6WCTtcGvP6SO",
"529590041-qOXLd769cQEUTbXg3iRqCd33pC1K6xoORrGOMJDh",
"WlqZJwXFQzf64IuojkbKh1jdT5cnSY8U44pqmz6Sc1d4A")
#############Twitter extraction######################
tweets_ext <-userTimeline("iamsrk",n=1000)
tweets_df <- twListToDF(tweets_ext)
write.csv(tweets_df,"iamsrk.csv")
getwd()
install.packages("tm")
library(tm)
install.packages("wordcloud")
library(wordcloud)
install.packages("topicmodels")
library(topicmodels)
library(RColorBrewer)
text <- read.csv("File.choose()")
View(text)
document <- Corpus(VectorSource(text$text))
inspect(document[10])
-------------------Function to clean the corpus--------------------------
mydata.corpus <- Corpus(VectorSource(document))
mydata.corpus <- tm_map(mydata.corpus, removePunctuation)
my_stopwords <- c(stopwords('english'),"brothers", "sisters", "the", "due", "are", "not", "for", "this", "and", "that", "there", "new", "near", "beyond", "time", "from", "been", "both", "than", "has","now", "until", "all", "use", "two", "ave", "blvd", "east", "between", "end", "have", "avenue", "before", "just", "mac", "being", "when","levels","remaining","based", "still", "off", "over", "only", "north", "past", "twin", "while","then")
mydata.corpus <- tm_map(mydata.corpus, removeWords, my_stopwords)
mydata.corpus <- tm_map(mydata.corpus, removeNumbers)
mydata.corpus <- tm_map(mydata.corpus, stripWhitespace)
inspect(document[[10]])
-------------------------Document term matrix----------------------
doctm <- TermDocumentMatrix(document)
dim(doctm)
ctdm <- as.DocumentTermMatrix(doctm)
rowtotals <- apply(ctdm,1,sum)
ctdm.new <- ctdm[rowtotals>0,]
lda <- LDA(ctdm.new,10)
terms <- terms(lda,5)
terms
topic <- terms(lda)
tab <- table(names(topic),unlist(topic))
head(tab)
library(cluster)
library(dendextend)
cluster <- hclust(dist(tab),method = "ward.D2")
col_bran <- color_branches(cluster,k=3)
plot(col_bran)
plot(cluster)
------------------------NLP------------------------------
install.packages("textcat")
library(textcat)
table(textcat(document))
consider <- c(which(textcat(document)=="english"))
documen2 <- document[consider]
documen3 <- as.character(documen2)
library(syuzhet)
SRK_tweets <- get_sentences(documen3)
-----------------------Sentimental analysis-------------------------------
sentiment_vector <- get_sentiment(SRK_tweets, method = "bing")
head(sentiment_vector)
afinn_SRK_tweets <- get_sentiment(SRK_tweets, method = "afinn")
head(afinn_SRK_tweets)
nrc_data <- get_nrc_sentiment(documen3)
head(nrc_data)
sum(sentiment_vector)
mean(sentiment_vector)
summary(sentiment_vector)
plot(sentiment_vector, type = "l", main = "Plot Trajectory",
xlab = "Narrative Time", ylab = "Emotional Valence")
abline(h = 0, col = "red")
nrc_data <- get_nrc_sentiment(SRK_tweets)
barplot(sort(colSums(prop.table(nrc_data[, 1:10]))), horiz = T, cex.names = 0.7,
las = 1, main = "Emotions", xlab = "Percentage",
col = 1:8)
-----------------------Wordcloud-----------------------------------
freq <- rowSums(as.matrix(doctm))
length(freq)
ord <- order(freq,decreasing = TRUE)
freq[head(ord)]
freq[tail(ord)]
df <- data.frame(word =names(freq),freq = freq)
windows()
wordcloud(words = df$word,freq = df$freq,min.freq = 3,max.words = 100,random.order = F,col = brewer.pal(8,"Dark2"))
findFreqTerms(doctm,lowfreq = 8)
findAssocs(doctm,terms ="happiness",corlimit =0.3)
head(df,10)
barplot(df[1:10,]$freq,names.arg = df[1:10,]$word,col="forestgreen",main= "most used terms",ylab ="word frequency")
|
ac8e5cab2482e00833625ad0999d934d2cead65e
|
a6f483e7b06810d109add106ed70991b4c67b2b6
|
/elephant-function.R
|
648fae694d5c611788104ec54b491ac656b50eaa
|
[] |
no_license
|
jdyen/coding-club-plotting
|
0e0e18f5a802091f73306e0439c70c3dfa9676d9
|
74dde0850077e460e7f479088ee462c8be5b442a
|
refs/heads/master
| 2020-04-27T10:39:11.364282
| 2019-03-07T03:22:06
| 2019-03-07T03:22:06
| 174,263,351
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,187
|
r
|
elephant-function.R
|
# R code to draw an elephant from four parameters (five if you want an eye and a wiggling trunk)
# (see Mayer et al. 2010 [Am. J. Phys. 78:648-649] for details)
# elephant_parameters <- c(50 - 30i, 18 + 8i, 12 - 10i, -14 - 60i, 40 + 20i)
# helper function to calculate a Fourier series
fourier_series <- function(t_param, c_param) {
out <- rep(0, length(t_param))
a_param <- Re(c_param)
b_param <- Im(c_param)
for (i in seq_along(c_param))
out <- out + a_param[i] * cos(i * t_param) + b_param[i] * sin(i * t_param)
out
}
# function to create the elephant outline with an eye
elephant <- function(t_param, parameters) {
npar <- 6
Cx <- rep(0 + 0i, npar)
Cy <- rep(0 + 0i, npar)
Cx[1] <- Re(parameters[1]) * 1i
Cx[2] <- Re(parameters[2]) * 1i
Cx[3] <- Re(parameters[3])
Cx[5] <- Re(parameters[4])
Cy[1] <- Im(parameters[4]) + Im(parameters[1]) * 1i
Cy[2] <- Im(parameters[2]) * 1i
Cy[3] <- Im(parameters[3]) * 1i
# eye_parameter
x <- fourier_series(t_param, Cy)
y <- fourier_series(t_param, Cx)
eye_x <- Im(parameters[5])
eye_y <- Im(parameters[5])
list(x = x, y = -y, eye_x = eye_x, eye_y = eye_y)
}
|
a586e4616e55d1578ec401a9bcde56ce57fb270b
|
d11ba1f0a6f085812fff4a4acbd303f2a6023b96
|
/carOrder.r
|
c3fc65e895b59eb74df50f637e3ba082815ecad6
|
[] |
no_license
|
fangju2013/JDcode
|
8d24f7f3be1cfa49540c7152290b651da083f252
|
1b29edf9010c2587ff633360e346aa5df0fa2007
|
refs/heads/master
| 2021-01-19T21:59:07.484115
| 2017-04-19T10:39:17
| 2017-04-19T10:39:17
| 88,733,446
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,153
|
r
|
carOrder.r
|
setwd('E:\\JDproject\\JData')
library(data.table)
library(dtplyr)
library(dplyr)
library(stringr)
raw.data <- fread('JData_Action_201604.csv',header = T,stringsAsFactors = F)
analysisData <- raw.data
setkey(analysisData,time,type,cate)
testData <- analysisData[type%in%c(2,4)&cate==8&time>='2016-04-01']
# stats the cate is 8 and the time lag is 3,when add in cart,whether will order
# in the next 3 days
g.p <- group_by(testData,user_id)
sumResult <- summarise(g.p,value = cartOrderFunc(cate,time,type))
sumResult <- data.frame(sumResult,stringsAsFactors = F)
tmpdf <- do.call(rbind,str_split(sumResult$value,'#'))
colnames(tmpdf) <- c('cate','timeLag1','timeLag2')
tmpdf <- cbind(user_id = sumResult$user_id,tmpdf)
result <- data.frame(tmpdf,stringsAsFactors = F)
result <- data.table(result)
setkey(result,timeLag1,timeLag2)
result <- result[timeLag1 == 1 | timeLag2 == 1]
result
cartOrderFunc <- function(cate,time,type){
tmpdf <- data.frame(cate = cate,time = time,type = type,stringsAsFactors = F)
cateGroup <- group_by(tmpdf,cate)
result <- summarise(cateGroup,value = tmpFunc(type,time))
result <- data.frame(result,stringsAsFactors = F)
result <- paste(result$cate,result$value,sep = '#')
result <- paste(result,collapse = '/')
return(result)
}
# stats whether will order in the next window according to add the cart
tmpFunc <- function(type,time,win = 5,cutGroup = c(1,2,3)){
time <- as.Date(time)
end <- max(time)
tmpdf <- data.frame(type = type,time = time)
cutTime <- c()
for(i in cutGroup){
tmptime <- time[time > end - i*win & time <= end - (i-1)*win]
tmp <- c(rep(i,length(tmptime)))
cutTime <- append(cutTime,tmp)
}
tmpdf$cutTime <- cutTime
cartOrderCount <- c()
for(j in cutGroup[-length(cutGroup)]){
cutTmpdf <- tmpdf[tmpdf$cutTime == j,]
type2 <- which(cutTmpdf$type == 2)
cutNextTmpdf <- tmpdf[tmpdf$cutTime == (j+1),]
type4 <- which(cutNextTmpdf$type == 4)
type24 <- intersect(type2,type4)
tmp <- if_else(length(type24)>0,1,0)
cartOrderCount <- append(cartOrderCount,tmp)
}
result <- paste(cartOrderCount,collapse = '#')
return(result)
}
|
6d62034735622791d9a5ea81ce929f37fbbeaec8
|
be5d16a8bfbd1f721d20bfc1596ab6ebca674f48
|
/ecopackage/R/reali.R
|
7819f08b763924f52bdad2dcb9d2ea6710ff7e6f
|
[] |
no_license
|
gabriellecy/ecopackage
|
24e162535517d1a2004d6a808a772e2f7ee3713f
|
a7b0273dc7a17d4ab7b88ad8a393244b1f9b2f8d
|
refs/heads/main
| 2023-03-18T17:18:18.482087
| 2021-03-13T23:52:50
| 2021-03-13T23:52:50
| 347,504,248
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 141
|
r
|
reali.R
|
#Functions
#Macro-economics functions - reali
reali <- function(nominalrate,inflationrate){
return((1+nominalrate)/(1+inflationrate)-1)
}
|
5baa0fd48fa42ffc9a76162dc21442a525927f1b
|
dab5210c836ab2801c620035c2ae414407c8f0d4
|
/tests/testthat.R
|
c3b0d2134f9fae469076c61d4538db084ec5e11f
|
[] |
no_license
|
clawilso15/Text.Replace
|
32820a6e6a3c2a193d6806f3afb50c41b8b22fc0
|
fe6292471a3e22c5c766ddb63c75b36c83d10447
|
refs/heads/master
| 2021-07-18T07:19:39.362768
| 2021-02-04T20:59:02
| 2021-02-04T20:59:02
| 236,492,662
| 0
| 2
| null | 2020-03-17T01:50:27
| 2020-01-27T13:07:15
|
HTML
|
UTF-8
|
R
| false
| false
| 87
|
r
|
testthat.R
|
library(testthat)
library(Text.Replace)
library(rprojroot)
test_check("Text.Replace")
|
d190b6eaad998d19f6b540076e21e2bdd931291b
|
0a906cf8b1b7da2aea87de958e3662870df49727
|
/grattan/inst/testfiles/anyOutside/libFuzzer_anyOutside/anyOutside_valgrind_files/1610131110-test.R
|
7c0162838095e752fab7b1e163665881913d90ba
|
[] |
no_license
|
akhikolla/updated-only-Issues
|
a85c887f0e1aae8a8dc358717d55b21678d04660
|
7d74489dfc7ddfec3955ae7891f15e920cad2e0c
|
refs/heads/master
| 2023-04-13T08:22:15.699449
| 2021-04-21T16:25:35
| 2021-04-21T16:25:35
| 360,232,775
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 353
|
r
|
1610131110-test.R
|
testlist <- list(a = 0L, b = 0L, x = c(1769471L, -1L, -1L, -1L, -1L, -1L, -1L, -1L, -1L, -1L, -1L, -1L, -1L, -1L, -1L, -2049L, -250L, 50331647L, -193L, -14211840L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L))
result <- do.call(grattan:::anyOutside,testlist)
str(result)
|
10a8b9925174964f6f88b7d79020e51d9947c3b8
|
26987921c09ebdaf2cee68514d74895b65b7a9dd
|
/AEMC Consulatation/SAandQLD check.R
|
06e85997faa3751fc85c255edd9e38325c13a791
|
[] |
no_license
|
MatthewKatzen/NEM_LMP
|
fdab3d89dd3ef30b8660263b8513895a8c6eecaf
|
1602e96eb38ab903f8a6634f3583ce52d6bcb27c
|
refs/heads/master
| 2020-12-27T03:02:59.350375
| 2020-04-21T07:15:19
| 2020-04-21T07:15:19
| 237,742,407
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,253
|
r
|
SAandQLD check.R
|
temp1 <- c("NBHWF1", "BLUFF1", "BNGSF1", "BNGSF2", "SNOWNTH1", "SNOWSTH1", "CLEMGPWF", "HDWF1", "HDWF2", "HDWF3", "HALLWF1", "LGAPWF1", "LKBONNY2", "LKBONNY3", "HALLWF2", "SNOWTWN1", "TBSF1", "WGWF1", "WATERLWF")
temp1.1 <- NEMSIGHT_Details %>% filter(duid %in% temp) %>% select(station) %>% .[["station"]]
temp2 <- c("BARCALDN", "LILYSF1", "BARRON-1", "BARRON-2", "CALL_B_1", "CALL_B_2", "CPP_3", "CPP_4", "DAYDSF1", "HAYMSF1", "CLARESF1", "CSPVPS1", "YABULU2", "GSTONE1", "GSTONE2", "GSTONE3", "GSTONE4", "GSTONE5", "GSTONE6", "HAUGHT11", "KAREEYA1", "KAREEYA2", "KAREEYA3", "KAREEYA4", "EMERASF1", "CLERMSF1", "MACKAYGT", "RUGBYR1", "MSTUART1", "MSTUART2", "MSTUART3", "KSP1", "RRSF1", "QROW1K", "QROW2K", "HAMISF1", "WHITSF1", "STAN-1", "STAN-2", "STAN-3", "STAN-4", "YABULU", "SMCSF1", "MEWF1")
temp2.1 <- NEMSIGHT_Details %>% filter(duid %in% temp2) %>% select(station, fuel_type) %>% .[["station"]]
Step_4_Location <- "D:/AEMC Consultation/Data/Cleaned/INITIALMW/2019/Step 4 - Mutated/"
Step_4_files <- paste0(Step_4_Location, list.files(paste0(Step_4_Location)))
data_2019 <- Step_4_files %>% map(~fread(.x)) %>% rbindlist() %>% group_by(station) %>% summary_table_2()
data_2019 %>% filter(station %in% temp2.1) %>% summarise(sum(TObar))
|
58f776ae8a07ef81a6ae544df4ef96ad59cee84e
|
7a5c2b74b4e3d2648f237a0a50821f8eafbf7748
|
/Unsupervised Learning/kMeans.R
|
256c25523598b853d1f6e7b9aea8ad13171063f4
|
[] |
no_license
|
TYSingh/Unsupervised-Learning
|
2687d915405632424805fe22a8865be138a51c30
|
45f8f94e6425cc1c0f146db6c1755ef1be50ec50
|
refs/heads/master
| 2021-01-17T12:02:49.155116
| 2016-07-13T17:36:48
| 2016-07-13T17:36:48
| 63,266,338
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,033
|
r
|
kMeans.R
|
require(graphics)
library(cluster)
install.packages("fpc")
library(fpc)
# rm(list=ls())
# food <- read.csv("D:\\Viraj\\IMP_Stuff\\BDAP_SPJ\\009_MachineLearning_102\\protein.csv", header = T)
# set.seed(1)
#
# grpProtein <- kmeans(food[,-1], centers = 7, nstart = 10)
# o = order(grpProtein$cluster)
#
# data.frame(food$Country[o],grpProtein$cluster[o])
# windows()
# plot(food$RedMeat, food$WhiteMeat, type = "n", xlim = c(3,19), xlab = "Red meat", ylab = "White Meat")
# text(x=food$RedMeat, y=food$WhiteMeat, labels = food$Country, col=rainbow(7)[grpProtein$cluster])
rm(list=ls())
food <- read.csv(file.choose(),header=T)
set.seed(1)
grpProtein <- kmeans(food[,-1], centers = 5, nstart = 10)
grpProtein
o = order(grpProtein$cluster)
o
data.frame(food$Country[o],grpProtein$cluster[o])
windows()
plot(food$RedMeat, food$WhiteMeat, type = "n", xlim = c(3,19), xlab = "Red meat", ylab = "White Meat")
text(x=food$RedMeat, y=food$WhiteMeat, labels = food$Country, col=rainbow(7)[grpProtein$cluster])
|
af87b24313cce6b70651669f52e4772dbaf026ef
|
fde275b085fe2ee9fadebf0773144b9c8392189f
|
/getScriptsGH.R
|
445dcd4cd82c72ec2127b10c5cb11dab4734408d
|
[] |
no_license
|
roosadoekemeijer/Rscripts
|
21e76eb73aa1829a078ec7a89b1bac6ea030a25a
|
613eb6764d9905f393dfd7489d9f7273679f3396
|
refs/heads/master
| 2023-04-07T20:50:59.841942
| 2023-03-17T16:32:45
| 2023-03-17T16:32:45
| 235,764,224
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,010
|
r
|
getScriptsGH.R
|
get.myGHscripts <- function(files="ALL",
GH=c("user"="roosadoekemeijer",
"repos"="Rscripts",
"branch"="master")){
## Load functions from personal GitHub
## Input: names of scripts on GH that need to be loaded;
## GH details (username, repository, and branch)
## Returns: NA
if(!require(httr)){install.packages("httr");library(httr)}
GHfiles <- unlist(lapply(content(GET(paste0(
"https://api.github.com/repos/",GH['user'],"/",GH['repos'],
"/git/trees/", GH['branch'],"?recursive=1")))$tree,"[", "path"))
if(any(files=="ALL")) {files<-GHfiles}
ls <- ls()
for(f in files) {
con <- curl(paste0(
"https://raw.githubusercontent.com/",
GH['user'],"/",GH['repos'],"/",GH['branch'],"/",f))
eval(parse(text = readLines(con)))
close(con)
}
loaded_f <- setdiff(ls(),ls)
for (f in loaded_f) assign(f,get(f),envir = .GlobalEnv)
rm(ls,f,con,envir = .GlobalEnv)
}
|
08369bc385332f4156cffd62d84c1b47741552db
|
10b908437ccb5123218ee56191cd4bf42c6051df
|
/Bacillus/HGT_position/Functions/otherfunction_WIP.R
|
783248e742974bc4735ec70c44d69d493e657038
|
[] |
no_license
|
AlexanderEsin/Scripts
|
da258f76c572b50da270c66fde3b81fdb514e561
|
b246b0074cd00f20e5a3bc31b309a73c676ff92b
|
refs/heads/master
| 2021-01-12T15:10:47.063659
| 2019-03-10T15:09:38
| 2019-03-10T15:09:38
| 69,351,736
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,103
|
r
|
otherfunction_WIP.R
|
## // Subdivision functions // ##
## // Functional analysis // ##
## For a datatype and penalty (if not "All") extract COG list and total number
getPropCOG <- function(dataType, Penalty = NA) {
if (is.na(Penalty)) {
COG <- unlist(perTypeData[[dataType]]$allPosData$COGcat)
colName <- "Prop_All"
} else {
COG <- unlist(perTypeData[[dataType]][[Penalty]]$allPosData$COGcat)
colName <- paste0(dataType, "_T", Penalty)
}
COG_df <- as.data.frame(table(COG), stringsAsFactors = FALSE)
totalCOGs <- sum(COG_df$Freq)
COG_df$Prop <- COG_df$Freq / totalCOGs
names(COG_df)[3] <- colName
return(list(propDF = COG_df[,-2], totalCOGs = totalCOGs))
}
## Calculate the enrichment of COGs in one Class (and penalty) over another
## For comparison against Vertical - use penalty 3 as this is the most stringent set
calcPropEnrich <- function(data = "lHGT", background = "Ver", penalty = "3") {
if (identical(data, background)) {
stop("Can't compare to itself")
} else if (identical(background, "Ver")) {
bg_df <- perTypeCOGprop$Ver$'3'$propCOGdf
} else if (identical(background, "All")) {
bg_df <- perTypeCOGprop$All$propCOGdf
}
data_df <- perTypeCOGprop[[data]][[penalty]]$propCOGdf
join_df <- inner_join(data_df, bg_df, by = "COG")
join_df$Diff <- log(join_df[,2] / join_df[,3])
names(join_df)[4] <- paste0("Diff_T", penalty)
return(join_df[,c(1,4)])
}
## Calculate the per-COG ranges of enrichment values (across penalties)
## Produce coordinates to pass to geom_rect
calcEnrichRangesForPlot <- function(df, molten_df, box_width = 0.2) {
coordinate_df <- data.frame(x1 = numeric(), x2 = numeric(), y1 = numeric(), y2 = numeric())
for (i in 1:length(df$COG)) {
COG_needed <- levels(molten_df$COG)[i]
row_number <- which(df$COG == COG_needed)
## Do not count NAs in identifying min/max values ##
data_line <- df[row_number,-1]
data_line_clean <- data_line[,as.vector(!is.na(data_line))]
## Calculate the max and min values for the boxes ##
max_y <- max(data_line_clean)
min_y <- min(data_line_clean)
coordinate_df[nrow(coordinate_df)+1, ] <- c((i - box_width), (i + box_width), min_y, max_y)
}
return(coordinate_df)
}
## // ggplot // ##
centerLegendTitle <- function(plot) {
## Requires gtools
# extract legend
g <- ggplotGrob(plot)
grobs <- g$grobs
legend_index <- which(sapply(grobs, function(x) x$name) == "guide-box")
legend <- grobs[[legend_index]]
# extract guides table
guides_index <- which(sapply(legend$grobs, function(x) x$name) == "layout")
guides <- legend$grobs[[guides_index]]
# add extra column for spacing
# guides$width[5] is the extra spacing from the end of the legend text
# to the end of the legend title. If we instead distribute it 50:50 on
# both sides, we get a centered legend
guides <- gtable_add_cols(guides, 0.5*guides$width[5], 1)
guides$widths[6] <- guides$widths[2]
title_index <- guides$layout$name == "title"
guides$layout$l[title_index] <- 2
# reconstruct legend and write back
legend$grobs[[guides_index]] <- guides
g$grobs[[legend_index]] <- legend
return(g)
}
|
7be460bc3cdba75a8f26c20f424ffb9fd336837f
|
3bdf32a8a82312ac3027da069665356553b775e2
|
/cachematrix.R
|
e5989721dd8d06bbd571fae295c6feb530d63da5
|
[] |
no_license
|
rolaren/ProgrammingAssignment2
|
d21789a7f847c4a206a1d6929a31b17602aeeb1f
|
c4792592b0061826849a96c68dc5c462f6054682
|
refs/heads/master
| 2020-05-29T11:46:12.955119
| 2015-01-23T12:10:51
| 2015-01-23T12:10:51
| 29,544,899
| 0
| 0
| null | 2015-01-20T18:30:02
| 2015-01-20T18:30:02
| null |
UTF-8
|
R
| false
| false
| 2,016
|
r
|
cachematrix.R
|
## Description of cachematrix.R
## Function: makeCacheMatrix
## Arguments: a square matrix to be evaluated.
## Returns: list of functions
## Function: cacheSolve
## Arguments: a matrix
## Returns: the inverse of square matrix from calculus or cached data
## How to use cachematrix.R
## Load the file ## source("cachematrix.R")
## Create a matrix ## mymatrix <- matrix(c(9,1,3,6,13,11,7,0,5,7,4,7,2,6,1,10), ncol = 4, nrow=4)
## Store the matrix ## mCM <- makeCacheMatrix(mymatrix)
## Solve the inverse ## cacheSolve(mCM)
makeCacheMatrix <- function(x = matrix()) {
# Create a matrix wrapper, that allows for caching the matrix inversion.
#
# Arg:
# x: The matrix to be evaluated.
#
# Return:
# A list containing functions to set and
# get the matrix and inverse matrix values.
cache.inverse <- NULL
# Reset the cached value when setting new values to the matrix
set <- function(y) {
x <<- y
cache.inverse <<- NULL
}
get <- function() x
setinverse <- function(inverse) cache.inverse <<- inverse
getinverse <- function() cache.inverse
list(set = set, get = get, setinverse = setinverse, getinverse = getinverse)
}
cacheSolve <- function(x, ...) {
# Solves the inverse of the square matrix set with the function makeCacheMatrix.
# Check if the inverse has already been calculated.
# If so, returns the inverse of the matrix from the cache.
# Otherwise calculate the inverse of the new matrix and sets the
# new value in the cache.
#
# Arg:
# x: The square matrix, created with makeCacheMatrix.
#
# Returns:
# The inverse of the given matrix if it is a square matrix.
inverse <- x$getinverse()
if(!is.null(inverse)) {
message("Using cached data")
} else {
data <- x$get()
inverse <- solve(data, ...)
x$setinverse(inverse)
message("Setting a new data in cache")
}
inverse
}
|
804b2795ee9dc9bed903b0c7035e6b40b45b2efd
|
c6e7d4ca46733b76dbd0ee2bd1f9eb5d8b6e9553
|
/man/ISOP-package.Rd
|
75dcad5eab737c3870ac3689db6395cc5230d245
|
[] |
no_license
|
nghiavtr/ISOP
|
b58b8a3203b05edcca5fd5200f098cc995de7082
|
99b7b00729d5310eacd9dbf85397dac820c7f7e5
|
refs/heads/master
| 2023-05-01T00:06:44.598945
| 2023-04-20T11:16:57
| 2023-04-20T11:16:57
| 46,860,810
| 12
| 7
| null | null | null | null |
UTF-8
|
R
| false
| false
| 622
|
rd
|
ISOP-package.Rd
|
\name{ISOP-package}
\alias{ISOP-package}
\alias{ISOP}
\docType{package}
\title{
Isoform-level Expression Patterns in Single-cell RNA-sequencing Data
}
\description{
Isoform-level Expression Patterns in Single-cell RNA-sequencing Data
}
\details{
\tabular{ll}{
Package: \tab ISOP\cr
Type: \tab Package\cr
Version: \tab 0.99.1\cr
Date: \tab 2017-02-24\cr
License: \tab GPL-3\cr
LazyLoad: \tab yes\cr
}
}
\author{
Trung Nghia Vu, Yudi Pawitan and Mattias Rantalainen
Maintainer: Trung Nghia Vu <nghiavtr@gmail.com>
}
\examples{
# See the user guide of ISOP via vignette documents
vignette("ISOP")
}
\keyword{ package }
|
743361e856e27a60b368b260b9eec34704851946
|
b3b1b011ab46f024467282baeff0f160e2e91e31
|
/man/createLearningCurvePar.Rd
|
358e4e3c654b3209992f910d71a2ceb9b034aa11
|
[
"Apache-2.0"
] |
permissive
|
schuemie/PatientLevelPrediction
|
5265629020a2406f9f96a4975aa3ab35c9663b92
|
0b59c97a53ab4c6aaf6236048d5bcc9363c2716e
|
refs/heads/master
| 2020-09-05T00:50:10.021513
| 2019-11-06T07:46:44
| 2019-11-06T07:46:44
| 88,721,641
| 0
| 1
| null | 2019-05-01T04:30:23
| 2017-04-19T08:40:26
|
R
|
UTF-8
|
R
| false
| true
| 2,881
|
rd
|
createLearningCurvePar.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/LearningCurve.R
\name{createLearningCurvePar}
\alias{createLearningCurvePar}
\title{createLearningCurvePar}
\usage{
createLearningCurvePar(population, plpData, modelSettings,
testSplit = "person", testFraction = 0.25, trainFractions = c(0.25, 0.5,
0.75), splitSeed = NULL, nfold = 3, indexes = NULL,
minCovariateFraction = 0.001)
}
\arguments{
\item{population}{The population created using \code{createStudyPopulation()}
that will be used to develop the model.}
\item{plpData}{An object of type \code{plpData} - the patient level
prediction data extracted from the CDM.}
\item{modelSettings}{An object of class \code{modelSettings} created using
one of the function. Currently only one model is supported:
\itemize{
\item{\code{setLassoLogisticRegression} - a lasso logistic regression
model}
}}
\item{testSplit}{Specifies the type of evaluation used. Can be either
\code{'person'} or \code{'time'}. The value \code{'time'} finds the date
that splots the population into the testing and training fractions
provided. Patients with an index after this date are assigned to the test
set and patients with an index prior to this date are assigned to the
training set. The value \code{'person'} splits the data randomly into
testing and training sets according to fractions provided. The split is
stratified by the class label.}
\item{testFraction}{The fraction of the data, which will be used as the
testing set in the patient split evaluation.}
\item{trainFractions}{A list of training fractions to create models for.}
\item{splitSeed}{The seed used to split the testing and training set when
using a 'person' type split}
\item{nfold}{The number of folds used in the cross validation (default =
\code{3}).}
\item{indexes}{A dataframe containing a rowId and index column where the
index value of -1 means in the test set, and positive integer represents
the cross validation fold (default is \code{NULL}).}
\item{minCovariateFraction}{Minimum covariate prevalence in population to
avoid removal during preprocssing.}
}
\value{
A learning curve object containing the various performance measures
obtained by the model for each training set fraction. It can be plotted
using \code{plotLearningCurve}.
}
\description{
Creates a learning curve in parallel, which can be plotted using
the \code{plotLearningCurve()} function. Currently this functionality is
only supported by Lasso Logistic Regression.
}
\examples{
\dontrun{
# define model
modelSettings = setLassoLogisticRegression()
# register parallel backend
registerParallelBackend()
# create learning curve
learningCurve <- createLearningCurvePar(population,
plpData,
modelSettings)
# plot learning curve
plotLearningCurve(learningCurve)
}
}
|
97bbaab8090d0e39fe98a4e1968e3925e579000a
|
7cd73a98b9fe0d9bc3dc0c074a5eba9550250373
|
/R/SIexplorer.R
|
284c7b129883db37829713d4ee1ef1c13b73159b
|
[] |
no_license
|
kosticlab/ogttMetrics
|
e062e5afe204e8c2de386295172f8a41c115a82f
|
172acde694c266b41da061649d888b508a1ebde4
|
refs/heads/master
| 2021-12-14T21:49:30.883714
| 2017-06-08T12:13:05
| 2017-06-08T12:13:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,554
|
r
|
SIexplorer.R
|
#' interactive multipanel visualization for ogttCohort instance
#' @import shiny
#' @import ggbiplot
#' @importFrom plotly renderPlotly ggplotly plotlyOutput
#' @importFrom ggplot2 scale_x_sqrt scale_y_log10
#' @param oc ogttCohort instance
#' @param winsorizeSI if TRUE, move negative estimates of SI to smallest positive value
#' @param \dots passed to \code{\link{minmodByID}}
#' @examples
#' if (interactive()) {
#' if (options()$example.ask) stop("must set options(example.ask=FALSE) before running example")
#' data(obaSamp)
#' SIexplorer(obaSamp)
#' }
#' @export
SIexplorer = function(oc=obaSamp, winsorizeSI=TRUE, ...) {
stopifnot(is(oc, "ogttCohort"))
eln = names(experiments(oc))
stopifnot("Mats120" %in% eln)
stopifnot("SI" %in% eln)
# get dataset name, sample names
dstxt = deparse(substitute(oc))
allids = colnames(oc)$glucose
#
# build up information for biplots
#
a = assays(experiments(oc))
ins = na.omit(data.frame(t(a$insulin)))
insdrop = attributes(ins)$na.action
insids = colnames(oc)[[1]]
if (length(insdrop)>0) insids = colnames(oc)[[1]][-insdrop]
glu = na.omit(data.frame(t(a$glucose)))
gludrop = attributes(glu)$na.action
gluids = colnames(oc)[[1]]
if (length(gludrop)>0) gluids = colnames(oc)[[1]][-gludrop]
pins = prcomp(ins)
pglu = prcomp(glu)
#
# start app design
#
ui = fluidPage(
sidebarLayout(
sidebarPanel(
textOutput("datasetName"),
textOutput("nrec"),
helpText("Select ID for OGTT component plotting"),
selectInput("idpick", "id", choices=allids,
selected=allids[1]),
selectInput("pcbipick_1", "PC for biplot X", choices=as.character(1:5),
selected="1"),
selectInput("pcbipick_2", "PC for biplot Y", choices=as.character(1:5),
selected="2"),
width=3), # end sidebarPanel
mainPanel(
tabsetPanel(
tabPanel("SI vs Matsuda",
fluidRow(helpText("Cohort-wide SI vs Matsuda, hover over for ID, stats")),
fluidRow(plotlyOutput("sivmat"))
),
tabPanel("Glucose biplot", plotlyOutput("biplots_glucose")),
tabPanel("Insulin biplot", plotlyOutput("biplots_insulin")),
tabPanel("minmod fit",
fluidRow(textOutput("idtext")),
fluidRow(textOutput("sitext")),
fluidRow(plotOutput("demo"))
) # end fit panel
)
) #end mainpanel
) # end layout
) # end page
server = function(input, output, session) {
output$datasetName = renderText(paste("Dataset:", dstxt))
output$nrec = renderText(paste("# OGTT:", length(colnames(oc)[[1]])))
output$idtext = renderText(paste("ID =", input$idpick))
output$sitext = renderText(paste("SI =", round(assays(experiments(oc))$SI[1, input$idpick], 6)))
output$sivmat = renderPlotly( {
newdf = data.frame(id=colnames(oc)[[1]],
mats120=as.numeric(assays(experiments(oc))$Mats120),
SI=as.numeric(assays(experiments(oc))$SI[1,]),
converged=as.logical(as.numeric(assays(experiments(oc))$SI[2,])))
if (winsorizeSI) newdf$SI = ifelse(newdf$SI < 0,
min(newdf$SI[newdf$SI > 0]), newdf$SI)
newdf$text =
as.character(paste0("ID=",newdf$id,
"<br>Mats=", round(newdf$mats120,3),
"<br>SI=", round(newdf$SI,6)))
ggplotly(ggplot(newdf, aes(x=mats120, y=SI,
colour=converged, text=text)) +
geom_point() + scale_x_sqrt() + xlab("mats120") +
scale_y_log10(), tooltip="text")
} )
output$demo = renderPlot( {
fit1 = minmodByID(oc, input$idpick, ...)
plot_OGTT_fit(fit1)
} )
output$biplots_glucose = renderPlotly( {
CH1 = gsub("%%N%%", input$pcbipick_1, "PC%%N%%")
CH2 = gsub("%%N%%", input$pcbipick_2, "PC%%N%%")
choices = as.numeric(c(input$pcbipick_1, input$pcbipick_2))
ggplotly(ggbiplot(pglu, choices = choices, labels=gluids) + xlab(CH1) + ylab(CH2) +
theme_gray())
} )
output$biplots_insulin = renderPlotly( {
CH1 = gsub("%%N%%", input$pcbipick_1, "PC%%N%%")
CH2 = gsub("%%N%%", input$pcbipick_2, "PC%%N%%")
choices = as.numeric(c(input$pcbipick_1, input$pcbipick_2))
ggplotly(ggbiplot(pins, choices = choices, labels=insids) + xlab(CH1) + ylab(CH2) +
theme_gray())
} )
}
shinyApp(ui, server)
}
|
7349d3a1588f2bbe3968ac94cebf8374bf7ed543
|
5e5a463bac7dae4ea4ccfaad03f666ae445a6bb9
|
/codes/apt_rate.R
|
1c6423bb5f07873d77851d8641627606e162357c
|
[] |
no_license
|
harryyang1982/geoje
|
fa125a9d2371808a93fd3acf9ede76c6cb79760f
|
6267a6723f4e11099442edfa4860a29a248d60ed
|
refs/heads/master
| 2021-04-28T06:25:10.429828
| 2018-11-18T13:10:36
| 2018-11-18T13:10:36
| 105,009,264
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 480
|
r
|
apt_rate.R
|
library(tidyverse)
library(ggthemes)
library(readxl)
APT_rate <- read_excel("datasets/APT_rate.xlsx")
APT_rate %>%
gather(year, value, `2003`:`2017`) %>%
ggplot(aes(x = year, y = value)) +
geom_line(group = 1) +
geom_text(aes(label = value), vjust = -1) +
theme_economist() +
labs(title = "거제 아파트 가격 추이", subtitle = "2003~2017, 출처: 통계청 e-지방지표")
APT_rate %>%
gather(year, value, `2003`:`2017`) %>%
write_csv("3장_1.csv")
|
5c79bbb6ad988f83731cb5b5c4a4be2a46166347
|
fd754fd3d3b1a4653456e77295a2aa6079224674
|
/man/flipn.Rd
|
866292b7bc17539d400fc9806abeaa3590c36a4f
|
[] |
no_license
|
cran/nptest
|
aeaf83979c692038aff68f6da5e350c4330167fb
|
f471e4e4ac5a5789bc610da5d3e15ff32342964c
|
refs/heads/master
| 2023-04-28T05:14:58.839986
| 2023-04-14T18:50:02
| 2023-04-14T18:50:02
| 182,997,519
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,292
|
rd
|
flipn.Rd
|
\name{flipn}
\alias{flipn}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Generate All Sign-Flips of n Elements
}
\description{
Generates all \eqn{2^n} vectors of length \eqn{n} consisting of the elements -1 and 1.
}
\usage{
flipn(n)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{n}{
Number of elements.
}
}
\details{
Adapted from the "bincombinations" function in the \href{https://CRAN.R-project.org/package=e1071}{e1071} R package.
}
\value{
Matrix of dimension \eqn{n} by \eqn{2^n} where each column contains a unique sign-flip vector.
}
\references{
Meyer, D., Dimitriadou, E., Hornik, K., Weingessel, A., & Leisch, F. (2018). e1071: Misc Functions of the Department of Statistics, Probability Theory Group (Formerly: E1071), TU Wien. R package version 1.7-0. https://CRAN.R-project.org/package=e1071
}
\author{
Nathaniel E. Helwig <helwig@umn.edu>
}
\note{
Used for exact tests in \code{\link{np.loc.test}} and \code{\link{np.reg.test}}.
}
\section{Warning }{
For large \code{n} this function will consume a lot of memory and may even crash R.
}
\examples{
flipn(2)
flipn(3)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ utilities }% use one of RShowDoc("KEYWORDS")
|
812b4c997c6f9ce78f8ab3eb6677937c6b6b8e10
|
c9342eb0e09061d4fb8b072a04a3a95a822fef5c
|
/d010_comparando_dos_muestras.r
|
756048967366915615ae432ef217cf51c193059a
|
[] |
no_license
|
dmontaner-teaching/intuitive_stats
|
8c9ee7ddce616b3d9a96020485534d75d62426d5
|
390d6fa763fc8cc377ed7ac5e9c993fd30885321
|
refs/heads/master
| 2021-01-09T20:19:54.794339
| 2016-05-27T09:20:07
| 2016-05-27T09:20:07
| 59,821,895
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,750
|
r
|
d010_comparando_dos_muestras.r
|
rm (list = ls ())
################################################################################
### Dos poblaciones
X <- rnorm (1000000, mean = 0, sd = 1)
Y <- rnorm (1000000, mean = 0, sd = 1)
length (X)
length (Y)
X[1:10]
Y[1:10]
Y[1000:1010]
summary (X)
summary (Y)
hist (X)
x11()
hist (Y)
boxplot (X, Y)
################################################################################
## MUESTREO ALEATORIO
N <- 100 ## sample size
N
x <- sample (X, size = N)
y <- sample (Y, size = N)
x
y
################################################################################
## Exploramosla muestra
summary (x)
summary (y)
boxplot (x, y)
## parametro de centralidad
mx <- mean (x)
my <- mean (y)
mx
my
##difencia de medias
mx - my
##fold change
mx/my
##log fold change
log (mx/my)
##varianza
var (x)
var (y)
##desviacion estandar
sd (x)
sd (y)
################################################################################
## Inferencia sobre la poblacion a partir de la muestra
## estadistico de la t (mas o menos)
t <- (mx - my) * sqrt (N/2) / sd (c (x, y))
t
##funcion directa en R
t.test (x, y)
## CAMBIAMOS LOS PARAMETROS POBLACIONALES
## VEMOS EL SIGNO DEL ESTADISTICO
################################################################################
## distribucion del estadistico bajo la HIPOTESIS NULA
R <- 1000
rept <- NULL
repmd <- NULL
reped <- NULL
for (i in 1:R) {
print (i)
##muestreo
x <- sample (X, size = N)
y <- sample (Y, size = N)
## resumen: busqueda de estadistico suficiente
mx <- mean (x)
my <- mean (y)
md <- mx - my
ed <- sd (c(x, y)) / sqrt (N/2)
t <- md / ed
##guardamos
rept <- c (rept, t)
repmd <- c (repmd, md)
reped <- c (reped, ed)
}
rept[1:10]
hist (rept)
plot (repmd, reped)
extremo <- abs (rept) > 2
table (extremo)
plot (repmd, reped, col = c ("blue", "red")[extremo + 1])
################################################################################
##PVALORES
# para una unica muestar
x <- sample (X, size = N)
y <- sample (Y, size = N)
## resumen: busqueda de estadistico suficiente
mx <- mean (x)
my <- mean (y)
md <- mx - my
ed <- sd (c(x, y)) / sqrt (N/2)
t <- md / ed
t
hist (rept)
abline (v = t)
abline (v = 2, col = "red")
## el pvalor es el area que queda por debajo de t si es negativo o por encima si t es positivo
rept < t
sum (rept < t)
sum (rept < t) / length (rept)
sign (t)
if (sign (t) == 1) {
pvalor <- 2 * sum (rept > t) / length (rept)
} else {
pvalor <- 2 * sum (rept < t) / length (rept)
}
pvalor
##funcion directa en R
t.test (x, y)
################################################################################
### REPETIMOS
tt <- t.test (x, y)
tt
tt$statistic
tt$p.value
tt$estimate[1]
tt$estimate[2]
tt$estimate[1] - tt$estimate[2]
R <- 1000
rept <- NULL
repp <- NULL
for (i in 1:R) {
print (i)
##muestreo
x <- sample (X, size = N)
y <- sample (Y, size = N)
## t-test
tt <- t.test (x, y)
##guardamos
rept <- c (rept, tt$statistic)
repp <- c (repp, tt$p.value)
}
plot (rept, repp)
abline (h = 0.05, col = "red")
abline (v = c (-1.96, 1.96), col = "grey")
### POR QUE USAR EL P-VALOR Y NO EL ESTADISTICO
N
N2 <- 5
R <- 1000
rept2 <- NULL
repp2 <- NULL
for (i in 1:R) {
print (i)
##muestreo
x <- sample (X, size = N2)
y <- sample (Y, size = N2)
## t-test
tt <- t.test (x, y)
##guardamos
rept2 <- c (rept2, tt$statistic)
repp2 <- c (repp2, tt$p.value)
}
#plot (rept2, repp2, col = "green", pch = 20)
points (rept2, repp2, col = "green", pch = 20)
### FALSOS POSITIVOS
table (repp2 < 0.05) / length (repp2)
table (repp < 0.05) / length (repp)
|
e64c07565d0684ea1feaad3b56dc71e00ba305aa
|
1e1fce89e08356bb57fffe5f55ec14d4a5a49d84
|
/man/dataList-class.Rd
|
c5c801e623cc1f9c32c70491f86db93b9261d9d6
|
[
"MIT"
] |
permissive
|
Khlick/jonesDataClass
|
8a6a537e2119946cf8b7528ee760e16eb3b4a337
|
7f28df5f9826460ac45c0c745c29f4accf960fc3
|
refs/heads/master
| 2020-03-23T06:19:05.415572
| 2018-08-10T15:31:49
| 2018-08-10T15:31:49
| 141,203,089
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 982
|
rd
|
dataList-class.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dataList.R
\docType{class}
\name{dataList-class}
\alias{dataList-class}
\alias{dataList}
\alias{plot,dataList,ANY-method}
\title{dataList}
\usage{
\S4method{plot}{dataList,ANY}(x, y = NULL, xlab = "Time", ylab = "",
axes = FALSE, ...)
}
\arguments{
\item{x}{dataList object, required}
\item{y}{ANY: best if a vector of ints or names}
\item{xlab}{X-axis label (units will be appended)}
\item{ylab}{Y-axis label (units will be appended)}
\item{axes}{bool, draw with or without axes (FALSE by default)}
\item{...}{for plot: passed to plot.window. otherwise unused.}
}
\description{
Class Definition for Storing formatted data with methods to access data,
subest traces, append/remove files,
plot
}
\section{Fields}{
\describe{
\item{\code{X}}{vector or matrix}
\item{\code{Y}}{coerced to matrix, if no names present, CELL\%d will be applied}
\item{\code{META}}{stoarage of units and names}
}}
|
b73422e06ee1ae46c45c742f386190ed708da2d2
|
8b3776c40a59286e53305437b335a3235b4e150e
|
/R/Temp_dependence_off_mass_jan_23_github.R
|
1c10eb74aede8c8626a65a751eec6fa3b6b22085
|
[] |
no_license
|
daan4786/fish-egg-and-temperature
|
235c14e3528e681675acc9a8902afb2de13ced0a
|
502442e178b2cc644eb56aac6c87f4466c214978
|
refs/heads/master
| 2020-12-19T21:50:37.743067
| 2020-11-27T17:22:25
| 2020-11-27T17:22:25
| 235,863,436
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 16,326
|
r
|
Temp_dependence_off_mass_jan_23_github.R
|
#Necessary packages.
library(ggplot2)
library(cowplot)
library(dplyr)
library(rfishbase)
library(MCMCglmm)
library(HDInterval)
############################################
# The egg mass - temp relationship
############################################
#Note - this is the code used to combine egg masses from Barneche et al. 2018 & Sibly et al. 2018 with temperature data from Fishbase. Temperature data were accessed from Fishbase in March of 2019.
#To convert to mm diameter to wet mass, use same conversion factor as Sibly et al. 2018 (assume eggs are spherical and have the density of water)
#barneche <- read.csv("~Barneche et al. 2018 Glob. Ecol. Biogeog..csv", stringsAsFactor=F) %>% group_by(Species) %>% mutate(barneche_mean_eggsize_g = 0.52*(mean(eggSize_mm)/10)^3) %>% group_by(Species) %>% slice(1L) %>% data.frame() %>% select(Species, barneche_mean_eggsize_g)
#sibly<-read.csv("~Sibly et al. 2018 RSOS.csv", stringsAsFactor=F) %>% mutate(sibly_egg_size_g = Offspring.wet.weight..g, sibly_adult_mass = Adult.wet.weight..g) %>% tidyr::unite_("Species", c("Genus", "Species"), " ") %>% filter(Class != "Elasmobranchii") %>% select(Class,Species, sibly_egg_size_g, sibly_adult_mass)
#This function accesses temperature data from Fishbase:
#fishbase <- popgrowth() %>% data.frame()%>% group_by(SpecCode) %>% mutate(Temp_mean = mean(Temperature, na.rm=T)) %>% slice(1L) %>% ungroup() %>% data.frame() %>% select(SpecCode, Species, Temp_mean) %>% filter(!is.na(Temp_mean))
#dat <- full_join(sibly, fishbase, by="Species")
#dat <- full_join(dat, barneche)
#dat <- dat %>% mutate(egg_mass_g = ifelse(is.na(sibly_egg_size_g), barneche_mean_eggsize_g, sibly_egg_size_g)) %>% select(Class, Species, Temp_mean, egg_mass_g) %>% filter(!is.na(egg_mass_g), !is.na(Temp_mean))
#This is the collated fish egg mass-temperature dataset shown in Fig. 2
#Here and elsewhere in this R file, the .csv files called in the read.csv() function can be found in the /data file on github (https://github.com/daan4786/fish-egg-and-temperature/tree/master/data)
d<-read.csv("~collated fish egg mass-temperature data.csv",stringsAsFactor=F)
head(d)
modd <- lm(log(Egg.mass..g.)~ Preferred.temp, data = d)
summary(modd)
############################################
#The size and temperature dependencies of egg and larval growth and mortality rates.
############################################
#egg mortality
egg_mort <- read.csv("~Pepin 1991 Can. J. Fish. Aquat. Sci. egg mortality.csv")
head(egg_mort)
egg_mort_mod_mcmc <- MCMCglmm(log(egg.mortality.rate..1.day.)~Temp.C, data = egg_mort, nitt=100000, burnin=10000)
summary(egg_mort_mod_mcmc)
#plot(egg_mort_mod_mcmc) #inspect trace and density plots to ensure convergence.
posterior_egg_mort_temp_dep <- egg_mort_mod_mcmc$Sol[,2]
posterior_egg_mort_int <- egg_mort_mod_mcmc$Sol[,1]
be <- exp(mean(posterior_egg_mort_int))
Eze <- mean(posterior_egg_mort_temp_dep)
#egg development
egg_dev<-read.csv("~Pauly & Pullin 1988 Enviro. Biol. Fish. egg development time.csv", header=T)
egg_dev_mod_mcmc <- MCMCglmm(ln.dt~ln.egg.mass.g+temp.c, data = egg_dev, nitt=100000, burnin=10000)
summary(egg_dev_mod_mcmc)
#plot(egg_dev_mod_mcmc) #inspect trace and density plots to ensure convergence.
posterior_egg_dev_mass_dep <- egg_dev_mod_mcmc$Sol[,2]
posterior_egg_dev_temp_dep <- egg_dev_mod_mcmc$Sol[,3]
posterior_egg_dev_int <- egg_dev_mod_mcmc$Sol[,1]
ae <- exp(mean(posterior_egg_dev_int))
Ege <- mean(posterior_egg_dev_temp_dep)
ne <- mean(posterior_egg_dev_mass_dep)
#larval growth
larv_growth <- read.csv("~houde 1989 Bull. Mar. Sci. larval growth.csv", stringsAsFactor = T)
head(larv_growth)
l_growth_mod_mcmc <- MCMCglmm(ln.gr~temp+ln.mass, data = barn_l_g, nitt=100000, burnin=10000)
summary(l_growth_mod_mcmc)
#plot(l_growth_mod_mcmc)
posterior_l_growth_mass_dep <- l_growth_mod_mcmc$Sol[,3]
posterior_l_growth_temp_dep <- l_growth_mod_mcmc$Sol[,2]
posterior_l_growth_int <- l_growth_mod_mcmc$Sol[,1]
al <- exp(mean(posterior_l_growth_int))
Eg <- mean(posterior_l_growth_temp_dep)
nl <- mean(posterior_l_growth_mass_dep)
#larval mortality
larv_mort <- read.csv("~Pepin 1991 Can. J. Fish. Aquat. Sci. larval mortality.csv", stringsAsFactor=F)
l_mort_mod_mcmc <- MCMCglmm(log(mort.rate.1.day)~ln.mass.g+Temp, data = larv_mort, nitt=100000, burnin=10000)
summary(l_mort_mod_mcmc)
#plot(l_mort_mod_mcmc)
posterior_l_mort_mass_dep <- l_mort_mod_mcmc$Sol[,2]
posterior_l_mort_temp_dep <- l_mort_mod_mcmc$Sol[,3]
posterior_l_mort_int <- l_mort_mod_mcmc$Sol[,1]
bl <- exp(mean(posterior_l_mort_int))
Ez <- mean(posterior_l_mort_temp_dep)
xl <- mean(posterior_l_mort_mass_dep)
#Mass at hatch
larv_hat <- read.csv("~Pepin 1991 Can. J. Fish. Aquat. Schi. egg mass-mass at hatch.csv", stringsAsFactor=F)
l_met_mcmc <- MCMCglmm(log(mass.at.hatch)~log(egg.mass), data = larv_met, nitt=100000, burnin=10000)
summary(l_met_mcmc)
#plot(l_met_mcmc)
posterior_l_met_temp_dep <- l_met_mcmc$Sol[,2]
posterior_l_met_int <- l_met_mcmc$Sol[,1]
I<-exp(mean(posterior_l_met_int))
q<-mean(posterior_l_met_temp_dep)
############################################
#The model
############################################
my_mod <- function(m, Te, bl, al, xl, nl, I, q, be, ae, Eze, Ege, ne, Ez, Eg){
survival_best <- exp( ( (bl/al) * (1/(xl-nl +1)) * exp((Ez - Eg)*Te) * ((I*m^q)^(xl-nl+1) - (0.1)^(xl-nl+1))) - ((be * ae) * exp((Eze+Ege)*Te)*m^(ne))) * (1/m)
d<-data.frame(survival_best)
return(d)
}
############################################
#Make Fig. 2
############################################
mass000000 <- seq(0.00000000000000000000000000001, 0.00000000000000000000000001, by = 0.00000000000000000000000000001)
mass00000 <- seq(0.00000000000000000000000001, 0.00000000000000000000001, by = 0.00000000000000000000000001)
mass0000 <- seq(0.00000000000000000000001, 0.00000000000000000001, by = 0.00000000000000000000001)
mass000 <- seq(0.00000000000000000001, 0.00000000000000001, by = 0.00000000000000000001)
mass00 <- seq(0.00000000000000001, 0.00000000000001, by = 0.00000000000000001)
mass0 <- seq(0.00000000000001, 0.00000000001, by = 0.00000000000001)
mass1 <- seq(0.00000000001, 0.00000001, by = 0.00000000001)
mass2 <- seq(0.00000001, 0.00001, by = 0.00000001)
mass3 <- seq(0.00001, 0.01, by = 0.00001)
mass4 <- seq(0.01, 10, by = 0.01)
mass5 <- seq(10, 100, by = 1)
mass <- c(mass000000, mass00000, mass0000, mass00, mass0, mass1, mass2, mass3, mass4, mass5)
temp <- seq(0, 30, by = 1)
temp_dep <- data.frame(temperature = temp, optimal_mass_best = rep(0, length(temp)), optimal_mass_low = rep(0, length(temp)), optimal_mass_high = rep(0, length(temp)), survival = rep(0, length(temp)))
size_pd <- 50000
posterior_distribution_predictions <- data.frame(temp = c(rep(0, size_pd), rep(1, size_pd),rep(2, size_pd),rep(3,(size_pd)),rep(4,(size_pd)),rep(5,(size_pd)),rep(6,(size_pd)),rep(7,(size_pd)),rep(8,(size_pd)),rep(9,(size_pd)),rep(10,(size_pd)),rep(11,(size_pd)),rep(12,(size_pd)),rep(13,(size_pd)),rep(14,(size_pd)),rep(15,(size_pd)),rep(16,(size_pd)),rep(17,(size_pd)),rep(18,(size_pd)),rep(19,(size_pd)),rep(20,(size_pd)),rep(21,(size_pd)),rep(22,(size_pd)),rep(23,(size_pd)),rep(24,(size_pd)),rep(25,(size_pd)),rep(26,(size_pd)),rep(27,(size_pd)),rep(28,(size_pd)),rep(29,(size_pd)),rep(30,(size_pd))), optimal_mass_pred = rep(0, (size_pd*length(temp))))
#For each temperature, I find the egg mass that maximizes survivorship per clutch mass, described by the my_mod() function. This egg mass is the optimal mass, and is stored in the temp_dep df.
for(i in 1:length(temp)){
#Generate a df of parental fitness ("s") vs. mass ("l").
d_d_best <- data.frame(l= mass, s= my_mod(mass, temp[i],bl, al, xl, nl, I, q, be, ae, Eze, Ege, ne, Ez, Eg)[,1], ss = (my_mod(mass,temp[i],bl, al, xl, nl, I, q, be, ae, Eze, Ege, ne, Ez, Eg)[,1]*mass))
#Find and store the mass ("l") that maximizes parental fitness ("s").
temp_dep$optimal_mass_best[i] <- d_d_best$l[d_d_best$s == max(d_d_best$s )]
temp_dep$survival[i] <- d_d_best$ss[d_d_best$s == max(d_d_best$s )]
x <- rep(0, size_pd)
for(j in 1:size_pd){
#At each temperature, I generate "size_pd" (=50,000 in this case) predictions for optimal egg size by randomly sampling the posterior distribution for each model parameter.
d_d_pd <- data.frame(l= mass, s= my_mod(mass, temp[i], exp(sample(posterior_l_mort_int, 1)), exp(sample(posterior_l_growth_int, 1)), sample(posterior_l_mort_mass_dep, 1), sample(posterior_l_growth_mass_dep, 1), exp(sample(posterior_l_met_int, 1)), sample(posterior_l_met_temp_dep, 1), exp(sample(posterior_egg_mort_int, 1)), exp(sample(posterior_egg_dev_int, 1)), sample(posterior_egg_mort_temp_dep, 1), sample(posterior_egg_dev_temp_dep, 1), sample(posterior_egg_dev_mass_dep, 1), sample(posterior_l_mort_temp_dep, 1), sample(posterior_l_growth_temp_dep, 1))[,1])
x[j] <- d_d_pd$l[d_d_pd$s == max(d_d_pd$s )]
}
posterior_distribution_predictions$optimal_mass_pred[posterior_distribution_predictions$temp == (i-1)] <- x
print(i)
}
#This is the predicted relationship
mod <- lm(log(optimal_mass_best) ~ temperature, data = temp_dep)
summary(mod)
#This df gives the lower and upper 95% Highest Density Intervals for the 50,000 predictions which were generated by randomly sampling the posterior distributions for each model parameter.
uncertainty_df <- posterior_distribution_predictions %>% group_by(temp) %>% summarize( ci_low = hdi(log(optimal_mass_pred))[1], ci_high = hdi(log(optimal_mass_pred))[2])
#Plot the observed relationship from the "d" df and the predicted relationship on top of one another. Also include the lower and upper 95% HDIs from the uncertainty df.
d %>% ggplot(aes(x = Preferred.temp, y = log(Egg.mass..g.) )) + geom_point( shape = 21, stroke=1.1) + geom_smooth(method = "lm", se = F, color = "black") + ylab("ln egg mass (g)") + xlab(expression(paste("temperature (", ""^{o},"C)"))) + theme(legend.position = c(0.5, 0.9), legend.title = element_blank()) + geom_line(data = temp_dep, aes(x = temperature, y = log(optimal_mass_best)), size = 1, color = "red") + geom_line(data = uncertainty_df, aes(x = temp, y = ci_low), method = "lm", color = "red", linetype = 2) + geom_line(data = uncertainty_df, aes(x = temp, y = ci_high), color = "red", linetype = 2) + annotate("text", x = 4.5, y = -23.5, label = "y == 0.003 * e^{-0.09*x}", parse=T, size = 5) + annotate("text", x = 4.5, y = -22, label = "y == 0.013 * e^{-0.11*x}", parse=T, size = 5, color = "red")
############################################
#Make Fig. 1
############################################
mass2 <- seq(0.00000001, 0.00001, by = 0.00000001)
mass3 <- seq(0.00001, 0.01, by = 0.00001)
mass4 <- seq(0.01, 10, by = 0.01)
mass5 <- seq(10, 100, by = 1)
mass <- c(mass3, mass4, mass5)
#Illustrate how the fitness-egg size curves change with temperatures (0, 5, 10, & 15 C)
surv_data <- data.frame(mass = c(mass,mass,mass,mass), temp = c(rep(0, length(mass)), rep(5, length(mass)), rep(10, length(mass)), rep(15, length(mass))) ) %>% rowwise() %>% mutate(survival_per_fecund_best = my_mod(mass, temp, bl, al, xl, nl, I, q, be, ae, Eze, Ege, ne, Ez, Eg)[,1]) %>% data.frame()
head(surv_data)
#The plot
surv_data %>% ggplot(aes(x = log(mass), y = log(survival_per_fecund_best))) + geom_line(aes(color = as.factor(temp)), size = 1) + theme( legend.position = c(0, 0.9)) + labs(color = expression(paste("Temp ", ""^{o}, "C"))) + guides(color = guide_legend(ncol=2)) + scale_color_manual(values = c("blue", "green2", "red", "orange")) + xlab("ln egg mass (g)")+ ylab("ln number surviving per clutch mass") + geom_point(x = log(temp_dep$optimal_mass_best[1]), y = log(max(filter(surv_data, temp==0)$survival_per_fecund_best)), color = "blue", size = 2) + geom_point(x = log(temp_dep$optimal_mass_best[6]), y = log(max(filter(surv_data, temp==5)$survival_per_fecund_best)), color = "green2", size = 2) + geom_point(x = log(temp_dep$optimal_mass_best[11]), y = log(max(filter(surv_data, temp==10)$survival_per_fecund_best)), color = "red", size = 2) + geom_point(x = log(temp_dep$optimal_mass_best[16]), y = log(max(filter(surv_data, temp==15)$survival_per_fecund_best)), color = "orange", size = 2) + xlim(c(-10, 0)) + ylim(c(-6, 0))
############################################
#Make Fig. 3
############################################
#Plot size and temperature dependence of egg and larval mortality and growth rates at two temperatures, to help visualize why smaller eggs are favored in warmer environments.
rates <- function(m, t){
mass <- m
temp <- t
dt <- 1/(ae*m^ne*exp(Ege*t))
emr <- be*exp(Eze*t)
lgr <- al*(m^nl)*exp(Eg*t)/m
lmr <- bl*(m^xl)*exp(Ez*t)
return(cbind(mass, temp, dt, emr, lgr, lmr))
}
mass <- seq(0.0001, 0.1, by = 0.0001)
#Make a df to help plot.
rd <- data.frame(mass = c(mass,mass), temp = c(rep(5, length(mass)), rep(10, length(mass))) ) %>% rowwise() %>% mutate(dt = rates(mass, temp)[,3], emr = rates(mass, temp)[,4], lgr = rates(mass, temp)[,5], lmr = rates(mass, temp)[,6]) %>% data.frame()
#Make plots.
larvae_hot<-
ggplot() + geom_line(data = filter(rd, temp == 10), aes(x = mass, y = lmr, linetype="1"), color = "red", size = 1) + geom_line(data = filter(rd, temp == 10), aes(x = mass, y = lgr, linetype="2"), color = "red", size = 1) + ggtitle(expression(paste("larvae at ", 10^{o}, "C"))) + geom_vline(xintercept = temp_dep$optimal_mass_best[temp_dep$temperature == 10], linetype = "dotted", size = 0.8, color = "red") + ylab("") + xlab("") + scale_linetype_manual(name = "",values = c("1"=1,"2"=2), labels = c("mortality", "growth")) + theme(legend.position=c(0.5,0.95), legend.key.width = unit(1, "cm"), plot.title = element_text(face = "plain")) + scale_x_continuous(breaks = c(0, 0.01, 0.02), limits = c(0, 0.02)) + scale_y_continuous(breaks = c(0.05, 0.15, 0.25), limits = c(0.05, 0.25))
egg_hot<-
ggplot() + geom_line(data = filter(rd, temp == 10), aes(x = mass, y = dt, linetype="2"), color = "red", size = 1) + geom_line(data = filter(rd, temp == 10), aes(x = mass, y = emr, linetype="1"), color = "red", size = 1) + ggtitle(expression(paste("eggs at ", 10^{o}, "C"))) + geom_vline(xintercept = temp_dep$optimal_mass_best[temp_dep$temperature == 10], color = "red", linetype = "dotted", size = 0.8) + ylab("") + xlab("") + scale_linetype_manual(name = "",values = c("1"=1,"2"=2), labels = c("mortality", "development")) + theme(legend.position=c(0.5,0.95), legend.key.width = unit(1, "cm"), plot.title = element_text(face = "plain")) + scale_x_continuous(breaks = c(0, 0.01, 0.02), limits = c(0, 0.02))+ scale_y_continuous(breaks = c(0.05, 0.15, 0.25), limits = c(0.05, 0.25))
larvae<-
ggplot() + geom_line(data = filter(rd, temp == 5), aes(x = mass, y = lmr), color = "blue", size = 1) + geom_line(data = filter(rd, temp == 5), aes(x = mass, y = lgr), color = "blue", linetype=2, size = 1) + theme(legend.position="none", plot.title = element_text(face = "plain")) + ggtitle(expression(paste("larvae at ", 5^{o}, "C"))) + geom_vline(xintercept = temp_dep$optimal_mass_best[temp_dep$temperature == 5], color = "blue", linetype = "dotted", size = 0.8) + ylab("") + xlab("") + scale_x_continuous(breaks = c(0, 0.01, 0.02), limits = c(0, 0.02))+ scale_y_continuous(breaks = c(0.05, 0.15, 0.25), limits = c(0.05, 0.25))
egg<-
ggplot() + geom_line(data = filter(rd, temp == 5), aes(x = mass, y = dt), color = "blue", linetype=2, size = 1) + geom_line(data = filter(rd, temp == 5), aes(x = mass, y = emr), color = "blue", size = 1) + scale_color_manual(values = c("green2", "orange")) + theme(legend.position="none", plot.title = element_text(face = "plain")) + ggtitle(expression(paste("eggs at ", 5^{o}, "C"))) + geom_vline(xintercept = temp_dep$optimal_mass_best[temp_dep$temperature == 5], color = "blue", linetype = "dotted", size = 0.8) + ylab("") + xlab("") + scale_x_continuous(breaks = c(0, 0.01, 0.02), limits = c(0, 0.02))+ scale_y_continuous(breaks = c(0.05, 0.15, 0.25), limits = c(0.05, 0.25))
#put plots together.
p<-plot_grid(egg_hot, larvae_hot, egg, larvae, align =c("l", "r" ), labels = "AUTO", label_x=0.23, label_y=0.89, label_fontface="plain")
ggdraw(p ) + draw_label("mass (g)", x = 0.5, y = 0.03, size = 14) + draw_label("rate (1/day)", x = 0.03, y = 0.5, angle = 90, size = 14)
|
bdb3312b25de821edd9361a2e2ca7b319d54f77d
|
da9f0de57073624cd26d9e8edd47fc47ed0845a0
|
/xml_parsing.R
|
fa01f90f409fd19778077b47b5f60f3b1fd21129
|
[] |
no_license
|
drhwint/SLBM
|
26811ad7dc058c6f938abe6bc97d329bd5541c5a
|
89bd277530c7662e39120597d76fbcfd5beca0e2
|
refs/heads/master
| 2020-03-14T00:27:28.235754
| 2018-04-28T03:24:55
| 2018-04-28T03:24:55
| 131,356,904
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 557
|
r
|
xml_parsing.R
|
## XML parsing Code
install.packages('XML')
library(XML)
file_name<-'pubmed_result.xml'
original_text<-xmlTreeParse(file_name,useInternalNodes = T,encoding = 'UTF-8')
# set root node
rootNode<-xmlRoot(original_text)
# get first journal's title
rootNode[[1]][[1]][[3]][[2]]
xmlValue(rootNode[[1]][[1]][[3]][[2]])
for (i in 1:1000){
title=xmlValue(rootNode[[i]][[1]][[3]][[2]])
print(title)
}
# relative position differ....try with node name
title<-data.frame(xpathSApply(rootNode,"//ArticleTitle",xmlValue))
head(title)
colnames(title)<-c('title')
|
7172f1946bc296c84a66ab531581db4ea5a6ce0c
|
86d14b29645ee6f841fb1dcea9a9456a551ebb58
|
/PythonBenchmark/discretize.R
|
0a77c61f9dc6579061f5a219be7f82756419810e
|
[
"BSD-3-Clause"
] |
permissive
|
dongwang218/ExpediaPersonalizedSortCompetition
|
cb3c22189ed94a6ea30ef5054b451d3d7d574e39
|
0d6415e79f6d3df20ab014759cd9cb8734bf0d4b
|
refs/heads/master
| 2021-01-21T08:21:20.117078
| 2013-11-22T06:27:45
| 2013-11-22T06:27:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 14,558
|
r
|
discretize.R
|
source('gbm_utils.R')
offset <- 1
num_train <- num_validate <- 1000000
num_test <- 1000000
num_stats <- 4000000 #917530
num_submission <- 6622629 #min(0, 6622629)
# 10 to 40
prior_cnt <- max(10, min(60, as.integer((num_train+num_stats) / 2000000 * 60)))
drop_cnt <- 10
read_train <- min(9917530, offset-1+num_train+num_validate+num_test+num_stats)
read_test <- max(1, num_submission)
train <- read.csv('../data/train.csv', na.string = "NULL", nrows=read_train)
test <- read.csv('../data/test.csv', na.string = "NULL", nrows=read_test)
# compute expected counts and discretize for contiguous
train$score <- pmin(5.0, train$booking_bool * 5 + train$click_bool)
prior <- mean(train$score)
test$score <- -1
train <- subset(train, select = -c(position, click_bool, gross_bookings_usd, booking_bool))
data<-rbind(train, test)
data <- split_data(data, offset, num_train, num_validate, num_stats, num_test, num_submission)
dim(data)
data$date_time_dist <- as.numeric(strftime(as.Date(data$date_time) + data$srch_booking_window, format = "%W"))
data$date_time_exp <- my_exp1(data, "date_time_dist", "score", data$split < 3, prior_cnt, prior, 0.0, drop_cnt)
data$site_id_exp <- my_exp1(data, "site_id", "score", data$split < 3, prior_cnt, prior, 0.0, drop_cnt)
# 230
data$visitor_location_country_id_exp <- my_exp1(data, "visitor_location_country_id", "score", data$split < 3, prior_cnt, prior, 0.0, drop_cnt)
# 323, 1 to 5
data$visitor_hist_starrating_dist <- my_dist(data, "visitor_hist_starrating", method="distance")
data$visitor_hist_starrating_exp <- my_exp1(data, "visitor_hist_starrating_dist", "score", data$split < 3, prior_cnt, prior, 0.0, drop_cnt)
# max price 2768.93
data$visitor_hist_adr_usd_dist <- my_dist(data, "visitor_hist_adr_usd")
data$visitor_hist_adr_usd_exp <- my_exp1(data, "visitor_hist_adr_usd_dist", "score", data$split < 3, prior_cnt, prior, 0.0, drop_cnt)
# 230
data$prop_country_id_exp <- my_exp1(data, "prop_country_id", "score", data$split < 3, prior_cnt, prior, 0.0, drop_cnt)
# too many
data$prop_id_exp <- my_exp1(data, "prop_id", "score", data$split < 3, prior_cnt, prior, 0.0, drop_cnt)
# "0" "1" "2" "3" "4" "5"
data$prop_starrating_exp <- my_exp1(data, "prop_starrating", "score", data$split < 3, prior_cnt, prior, 0.0, drop_cnt)
# "0" "1" "1.5" "2" "2.5" "3" "3.5" "4" "4.5" "5"
data$prop_review_score_exp <- my_exp1(data, "prop_review_score", "score", data$split < 3, prior_cnt, prior, 0.0, drop_cnt)
# keep brand_bool
#<400 score, 0 to 7
data$prop_location_score1_dist <- my_dist(data, "prop_location_score1")
data$prop_location_score1_exp <- my_exp1(data, "prop_location_score1_dist", "score", data$split < 3, prior_cnt, prior, 0.0, drop_cnt)
# many 0 to 1
data$prop_location_score2_dist <- my_dist(data, "prop_location_score2")
data$prop_location_score2_exp <- my_exp1(data, "prop_location_score2_dist", "score", data$split < 3, prior_cnt, prior, 0.0, drop_cnt)
# 390 0 to 6.21
data$prop_log_historical_price_dist <- my_dist(data, "prop_log_historical_price", method="distance")
data$prop_log_historical_price_exp <- my_exp1(data, "prop_log_historical_price_dist", "score", data$split < 3, prior_cnt, prior, 0.0, drop_cnt)
data$price_usd_dist <- my_dist(data, "price_usd")
data$price_usd_exp <- my_exp1(data, "price_usd_dist", "score", data$split < 3, prior_cnt, prior, 0.0, drop_cnt)
# keep promotion_flag, cross with brand
# many
data$srch_destination_id_exp <- my_exp1(data, "srch_destination_id", "score", data$split < 3, prior_cnt, prior, 0.0, drop_cnt)
# 1 to 59
data$srch_length_of_stay_exp <- my_exp1(data, "srch_length_of_stay", "score", data$split < 3, prior_cnt, prior, 0.0, drop_cnt)
# 0 to 498
data$srch_booking_window_dist <- my_dist(data, "srch_booking_window")
data$srch_booking_window_exp <- my_exp1(data, "srch_booking_window_dist", "score", data$split < 3, prior_cnt, prior, 0.0, drop_cnt)
# cross this with prop_id
data$srch_adults_count_exp <- my_exp1(data, "srch_adults_count", "score", data$split < 3, prior_cnt, prior, 0.0, drop_cnt)
data$srch_children_count_exp <- my_exp1(data, "srch_children_count", "score", data$split < 3, prior_cnt, prior, 0.0, drop_cnt)
data$srch_room_count_exp <- my_exp1(data, "srch_room_count", "score", data$split < 3, prior_cnt, prior, 0.0, drop_cnt)
# keep srch_saturday_night_bool
# tiny probability
data$srch_query_affinity_score_exp <- 10 ** data$srch_query_affinity_score
data$orig_destination_distance_dist <- my_dist(data, "orig_destination_distance")
data$orig_destination_distance_exp <- my_exp1(data, "orig_destination_distance_dist", "score", data$split < 3, prior_cnt, prior, 0.0, drop_cnt)
data$comp1_rate_exp <- my_exp1(data, "comp1_rate", "score", data$split < 3, prior_cnt, prior, 0.0, drop_cnt)
data$comp1_inv_exp <- my_exp1(data, "comp1_inv", "score", data$split < 3, prior_cnt, prior, 0.0, drop_cnt)
data$comp1_rate_percent_diff_dist <- my_dist(data, "comp1_rate_percent_diff")
data$comp1_rate_percent_diff_exp <- my_exp1(data, "comp1_rate_percent_diff_dist", "score", data$split < 3, prior_cnt, prior, 0.0, drop_cnt)
data$comp2_rate_exp <- my_exp1(data, "comp2_rate", "score", data$split < 3, prior_cnt, prior, 0.0, drop_cnt)
data$comp2_inv_exp <- my_exp1(data, "comp2_inv", "score", data$split < 3, prior_cnt, prior, 0.0, drop_cnt)
data$comp2_rate_percent_diff_dist <- my_dist(data, "comp2_rate_percent_diff")
data$comp2_rate_percent_diff_exp <- my_exp1(data, "comp2_rate_percent_diff_dist", "score", data$split < 3, prior_cnt, prior, 0.0, drop_cnt)
data$comp3_rate_exp <- my_exp1(data, "comp3_rate", "score", data$split < 3, prior_cnt, prior, 0.0, drop_cnt)
data$comp3_inv_exp <- my_exp1(data, "comp3_inv", "score", data$split < 3, prior_cnt, prior, 0.0, drop_cnt)
data$comp3_rate_percent_diff_dist <- my_dist(data, "comp3_rate_percent_diff")
data$comp3_rate_percent_diff_exp <- my_exp1(data, "comp3_rate_percent_diff_dist", "score", data$split < 3, prior_cnt, prior, 0.0, drop_cnt)
data$comp4_rate_exp <- my_exp1(data, "comp4_rate", "score", data$split < 3, prior_cnt, prior, 0.0, drop_cnt)
data$comp4_inv_exp <- my_exp1(data, "comp4_inv", "score", data$split < 3, prior_cnt, prior, 0.0, drop_cnt)
data$comp4_rate_percent_diff_dist <- my_dist(data, "comp4_rate_percent_diff")
data$comp4_rate_percent_diff_exp <- my_exp1(data, "comp4_rate_percent_diff_dist", "score", data$split < 3, prior_cnt, prior, 0.0, drop_cnt)
data$comp5_rate_exp <- my_exp1(data, "comp5_rate", "score", data$split < 3, prior_cnt, prior, 0.0, drop_cnt)
data$comp5_inv_exp <- my_exp1(data, "comp5_inv", "score", data$split < 3, prior_cnt, prior, 0.0, drop_cnt)
data$comp5_rate_percent_diff_dist <- my_dist(data, "comp5_rate_percent_diff")
data$comp5_rate_percent_diff_exp <- my_exp1(data, "comp5_rate_percent_diff_dist", "score", data$split < 3, prior_cnt, prior, 0.0, drop_cnt)
data$comp6_rate_exp <- my_exp1(data, "comp6_rate", "score", data$split < 3, prior_cnt, prior, 0.0, drop_cnt)
data$comp6_inv_exp <- my_exp1(data, "comp6_inv", "score", data$split < 3, prior_cnt, prior, 0.0, drop_cnt)
data$comp6_rate_percent_diff_dist <- my_dist(data, "comp6_rate_percent_diff")
data$comp6_rate_percent_diff_exp <- my_exp1(data, "comp6_rate_percent_diff_dist", "score", data$split < 3, prior_cnt, prior, 0.0, drop_cnt)
data$comp7_rate_exp <- my_exp1(data, "comp7_rate", "score", data$split < 3, prior_cnt, prior, 0.0, drop_cnt)
data$comp7_inv_exp <- my_exp1(data, "comp7_inv", "score", data$split < 3, prior_cnt, prior, 0.0, drop_cnt)
data$comp7_rate_percent_diff_dist <- my_dist(data, "comp7_rate_percent_diff")
data$comp7_rate_percent_diff_exp <- my_exp1(data, "comp7_rate_percent_diff_dist", "score", data$split < 3, prior_cnt, prior, 0.0, drop_cnt)
data$comp8_rate_exp <- my_exp1(data, "comp8_rate", "score", data$split < 3, prior_cnt, prior, 0.0, drop_cnt)
data$comp8_inv_exp <- my_exp1(data, "comp8_inv", "score", data$split < 3, prior_cnt, prior, 0.0, drop_cnt)
data$comp8_rate_percent_diff_dist <- my_dist(data, "comp8_rate_percent_diff")
data$comp8_rate_percent_diff_exp <- my_exp1(data, "comp8_rate_percent_diff_dist", "score", data$split < 3, prior_cnt, prior, 0.0, drop_cnt)
# total num of rate, inv
data$num_comp_rate <- ifelse(is.na(data$comp1_rate), 0, data$comp1_rate) +
ifelse(is.na(data$comp2_rate), 0, data$comp2_rate) +
ifelse(is.na(data$comp3_rate), 0, data$comp3_rate) +
ifelse(is.na(data$comp4_rate), 0, data$comp4_rate) +
ifelse(is.na(data$comp5_rate), 0, data$comp5_rate) +
ifelse(is.na(data$comp6_rate), 0, data$comp6_rate) +
ifelse(is.na(data$comp7_rate), 0, data$comp7_rate) +
ifelse(is.na(data$comp8_rate), 0, data$comp8_rate)
data$num_comp_inv <- ifelse(is.na(data$comp1_inv), 0, data$comp1_inv) +
ifelse(is.na(data$comp2_inv), 0, data$comp2_inv) +
ifelse(is.na(data$comp3_inv), 0, data$comp3_inv) +
ifelse(is.na(data$comp4_inv), 0, data$comp4_inv) +
ifelse(is.na(data$comp5_inv), 0, data$comp5_inv) +
ifelse(is.na(data$comp6_inv), 0, data$comp6_inv) +
ifelse(is.na(data$comp7_inv), 0, data$comp7_inv) +
ifelse(is.na(data$comp8_inv), 0, data$comp8_inv)
data$num_comp_rate <- ifelse(data$num_comp_rate == 0, NA, data$num_comp_rate)
data$num_comp_inv <- ifelse(data$num_comp_inv == 0, NA, data$num_comp_inv)
data$num_comp_rate_exp <- my_exp1(data, "num_comp_rate", "score", data$score >= 0, 10, prior, 0.0, drop_cnt)
data$num_comp_inv_exp <- my_exp1(data, "num_comp_inv", "score", data$score >= 0, 10, prior, 0.0, drop_cnt)
# cross features
if (TRUE) {
data$prop_id_date_time_exp <- my_exp2(data, "date_time_dist", "prop_id", "score", data$split < 3, prior_cnt, prior, 0.0, drop_cnt)
data$prop_id_srch_children_count_exp <- my_exp2(data, "prop_id", "srch_children_count", "score", data$split < 3, prior_cnt, prior, 0.0, drop_cnt)
data$prop_id_vistor_location_country_id_exp <- my_exp2(data, "prop_id", "visitor_location_country_id", "score", data$split < 3, prior_cnt, prior, 0.0, drop_cnt)
data$prop_id_visitor_hist_starrating_exp <- my_exp2(data, "prop_id", "visitor_hist_starrating_dist", "score", data$split < 3, prior_cnt, prior, 0.0, drop_cnt)
data$prop_id_visitor_hist_adr_usd_exp <- my_exp2(data, "prop_id", "visitor_hist_adr_usd_dist", "score", data$split < 3, prior_cnt, prior, 0.0, drop_cnt)
data$prop_id_srch_length_of_stay_exp <- my_exp2(data, "prop_id", "srch_length_of_stay", "score", data$split < 3, prior_cnt, prior, 0.0, drop_cnt)
data$prop_id_promotion_flag_exp <- my_exp2(data, "prop_id", "promotion_flag", "score", data$split < 3, prior_cnt, prior, 0.0, drop_cnt)
data$prop_id_srch_destination_id_exp <- my_exp2(data, "prop_id", "srch_destination_id", "score", data$split < 3, prior_cnt, prior, 0.0, drop_cnt)
data$visitor_hist_starrating_promotion_flag_exp <- my_exp2(data, "visitor_hist_starrating_dist", "promotion_flag", "score", data$split < 3, prior_cnt, prior, 0.0, drop_cnt)
# compute avg of comp rate, inv, percent_diff
data$prop_starrating_visitor_hist_starrating_exp <- my_exp2(data, "prop_starrating", "visitor_hist_starrating_dist", "score", data$split < 3, prior_cnt, prior, 0.0, drop_cnt)
data$prop_log_historical_price_visitor_hist_adr_usd_exp <- my_exp2(data, "prop_log_historical_price_dist", "visitor_hist_adr_usd_dist", "score", data$split < 3, prior_cnt, prior, 0.0, drop_cnt)
data$price_usd_visitor_hist_adr_usd_exp <- my_exp2(data, "price_usd_dist", "visitor_hist_adr_usd_dist", "score", data$split < 3, prior_cnt, prior, 0.0, drop_cnt)
data$num_comp_rate_visitor_hist_adr_usd_exp <- my_exp2(data, "num_comp_rate", "visitor_hist_adr_usd_dist", "score", data$split < 3, prior_cnt, prior, 0.0, drop_cnt)
data$prop_review_score_prop_location_score1_exp <- my_exp2(data, "prop_review_score", "prop_location_score1_dist", "score", data$split < 3, prior_cnt, prior, 0.0, drop_cnt)
}
save(data, file = "../data/train_test_disc_crossings.RData")
#exp_names = names(data)[grep(".*(prop_brand_bool|srch_saturday_night_bool|_exp)$", names(data))]
exp_names = names(data)[grep(".*_exp$", names(data))]
exp_names = exp_names[exp_names != "prop_id_visitor_hist_adr_usd_exp"]
gbm_formula <- paste("score ~ date_time_dist+site_id+visitor_location_country_id+visitor_hist_starrating+visitor_hist_adr_usd+prop_country_id+prop_id+prop_starrating+prop_review_score+prop_brand_bool+prop_location_score1+prop_location_score2+prop_log_historical_price+price_usd+promotion_flag+srch_destination_id+srch_length_of_stay+srch_booking_window+srch_adults_count+srch_children_count+srch_room_count+srch_saturday_night_bool+srch_query_affinity_score+orig_destination_distance+comp1_rate+comp1_inv+comp1_rate_percent_diff+comp2_rate+comp2_inv+comp2_rate_percent_diff+comp3_rate+comp3_inv+comp3_rate_percent_diff+comp4_rate+comp4_inv+comp4_rate_percent_diff+comp5_rate+comp5_inv+comp5_rate_percent_diff+comp6_rate+comp6_inv+comp6_rate_percent_diff+comp7_rate+comp7_inv+comp7_rate_percent_diff+comp8_rate+comp8_inv+comp8_rate_percent_diff+", paste(exp_names, collapse="+"))
train_sample <- data[(data$split==1) | (data$split==3), ]
gbm.ndcg <- gbm(as.formula(gbm_formula), data=train_sample, train.fraction=0.5, n.trees=2000, interaction.depth=8, n.minobsinnode=20, shrinkage=0.005, bag.fraction=0.5, verbose=TRUE, cv.folds=0, keep.data=TRUE, n.cores=16, distribution=list(name="pairwise", metric="ndcg", max.rank=38, group="srch_id"))
best.iter.ndcg <- gbm.perf(gbm.ndcg, method='test')
title('Training of pairwise model with ndcg metric')
summary(gbm.ndcg, n.trees=best.iter.ndcg, main='pairwise (ndcg)')
save(gbm.ndcg, file="../Models/gbm.ndcg.exp.no_cross.heavy.RData")
# generate prediction for test
test <- data[data$split == 4, ]
predict.test <- predict(gbm.ndcg, test, best.iter.ndcg)
my_ndcg(test$score, test$srch_id, -predict.test, 38)
if (num_submission > 0) {
submission <- data[data$split == 5, ]
predict.submission <- predict(gbm.ndcg, submission, best.iter.ndcg)
submission_p <- data.frame("srch_id" = submission$srch_id, "prop_id" = submission$prop_id, "pred" = predict.submission)
submission_p <- submission_p[with(submission_p, order(srch_id, -pred)), ]
#submission_p <- subset(submission_p, select = -c(pred))
names(submission_p) <- c("SearchId","PropertyId","Prediction")
write.table(submission_p, "../Submissions/submssision_gbm.ndcg.exp.no_cross.heavy.csv", sep = ",", row.names=FALSE, quote=FALSE)
}
#gbm.ndcg1 <- gbm.more(gbm.ndcg, 1000)
|
b230eafea0f9194bb42fd1a68c44b8ac23db220b
|
40ffcc2188952550ea8db4b0fc940abd9e01e5d4
|
/R/11_SummarisePredictions.R
|
f01d083d845c03d9b60be06ba5e9ece0624d0dfb
|
[] |
no_license
|
chantalhari/BioScen1.5_SDM
|
7d2d5f7b998dafa0779daeabec6c8a697b9e0981
|
7aa9ed086a0a3e6c03216dfc6aa05e7b0550ddc1
|
refs/heads/master
| 2023-03-16T00:45:45.769026
| 2020-07-29T09:48:22
| 2020-07-29T09:48:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 18,083
|
r
|
11_SummarisePredictions.R
|
#' Code to summarise the model output
# Load dplyr package
rm(list=ls()); gc()
library(dplyr)
library(tidyr)
library(readr)
library(ggplot2)
library(snowfall)
# Set file directory
filedir <- "I:/"
# Set taxa
taxa <- c("Amphibian", "Mammal", "Bird", "Reptile")
i <- 3
# Model type
k <- 4; model_type <- c("GAM", "GBM", "MaxEnt", "RF")[k]
# Time steps
if(taxa[i]=="Reptile"){timesteps <- c(1995, 2050, 2080)} else{
timesteps <- c(1845, 1990, 1995, 2009, 2010, 2020, 2026, 2032, 2048, 2050,
2052, 2056, 2080, 2100, 2150, 2200, 2250)
}
# File directory of results
predictsPaths <- sapply(timesteps, function(x){
paste0(filedir, "/", taxa[i], "_", model_type, "_predictions_", x)
})
# Output directory
sum_predPath <- paste0(filedir, "/", taxa[i], "_", model_type, "_predictions")
if(!dir.exists(sum_predPath)){dir.create(sum_predPath)}
# List all files
Modelfiles <- lapply(predictsPaths, function(x) list.files(x, full.names=TRUE))
Modelfiles <- do.call("c", Modelfiles)
# Check one output
Model1File <- read.csv(list.files(predictsPaths[[1]], full.names=TRUE)[1])
#head(Model1File)
# Aggregate the different AUC values from the 10 iterations per species
# and filter by AUC > 0.7
if(taxa[i]=="Reptile"){
# Extract species names
AUC_data <- lapply(c("GAM", "GBM"), function(model_type){
read.csv(paste0(filedir, "/AUCvalues_All_",
model_type, "_", taxa[i], ".csv.xz"))})
AUC_data <- do.call(rbind, AUC_data)
AUC_sum <- AUC_data %>% group_by(Species, taxa, model_type) %>%
dplyr::summarise(mean = mean(AUC, na.rm=T)) %>% filter(mean >= 0.7) %>% ungroup() %>%
group_by(Species, taxa) %>% dplyr::summarise(n = n()) %>% filter(n == 2)
} else{
# Extract species names
AUC_data <- lapply(c("GAM", "GBM", "MaxEnt", "RF"), function(model_type){
read.csv(paste0(filedir, "/AUCvalues_All_",
model_type, "_", taxa[i], ".csv.xz"))})
AUC_data <- do.call(rbind, AUC_data)
AUC_sum <- AUC_data %>% group_by(Species, taxa, model_type) %>%
dplyr::summarise(mean = mean(AUC, na.rm=T)) %>% filter(mean >= 0.7) %>% ungroup() %>%
group_by(Species, taxa) %>% dplyr::summarise(n = n()) %>% filter(n == 4)
}
spNames <- unique(AUC_sum$Species)
length(spNames)
# Get missing names
names_mis <- lapply(spNames, function(x){
if(!file.exists(paste0(sum_predPath, "/", x, "_", model_type, "_predict.csv.xz"))){
return(x)
}
})
names_mis <- unlist(Filter(Negate(is.null), names_mis))
# Remove Ursus maritimus
names_mis <- names_mis[!names_mis %in% "Ursus_maritimus"]
length(names_mis)
#' Check of model prediction files
# List all prediction files
#files <- unlist(lapply(dir(filedir, pattern=paste0(model_type, "_predictions_"), full.names = T),
# function(x){list.files(x, full.names=T)}))
#length(files)
##Get all files for one species
#files <- unlist(lapply(names_mis, function(species){files[grepl(files, pattern=species)]}))
#length(files)
#head(files)
# Check for corrupt files
#snowfall::sfInit(parallel=TRUE, cpus=ceiling(0.25*parallel::detectCores()))
#corrupt_files <- snowfall::sfLapply(files, function(x){
# data <- tryCatch(readr::read_csv(x), error=function(e) e) #The prediction files
# if(inherits(data, "error")){
# return(x)
# }
#}); snowfall::sfStop()
#corrupt_files <- unlist(Filter(Negate(is.null), corrupt_files))
#length(corrupt_files)
#file.remove(corrupt_files) # Remove corrupt files
#' Loop through all species names and save summarized prediction
n <- 10
sfInit(parallel=TRUE, cpus=n)
sfLibrary(dplyr); sfLibrary(readr); sfLibrary(tidyr)
sfExport(list=c("Modelfiles", "sum_predPath", "model_type", "timesteps"))
#Run in parallel
lapply(names_mis, function(species){
##Get all files for one species
spFiles <- Modelfiles[grepl(Modelfiles, pattern=paste0(species, "_"))]
avg_data <- lapply(timesteps, function(z){
print(paste(species, z))
##Import data for all species separated by timeframe
Files <- grep(spFiles, pattern=z, value=TRUE)
# Read data and merge into one dataframe
data <- readr::read_csv(Files)
colnames(data) <- gsub("[.]", "-", colnames(data))
#data <- lapply(Files, function(x){read.csv(paste0(x))})
#data <- do.call("bind_rows", data)
## Create model_rcp combination
model <- c("GFDL-ESM2M", "HadGEM2-ES", "IPSL-CM5A-LR", "MIROC5")
if(z == 1845){
rcp <- "piControl"
model_rcp <- expand.grid(model=model, rcp=rcp)
model_rcp <- tidyr::unite(model_rcp, "model_rcp", c(model,rcp))
model_rcp <- as.vector(model_rcp$model_rcp)
} else if(z == 1990){
rcp <- "historical"
model_rcp <- expand.grid(model=model, rcp=rcp)
model_rcp <- tidyr::unite(model_rcp, "model_rcp", c(model,rcp))
model_rcp <- as.vector(model_rcp$model_rcp)
} else if(z == 1995){
model_rcp <- "EWEMBI"
} else if(model_type %in% c("GAM", "GBM") & z %in% c(2050, 2080)){
rcp <- c("rcp26", "rcp60", "rcp85")
model_rcp <- expand.grid(model=model, rcp=rcp)
model_rcp <- tidyr::unite(model_rcp, "model_rcp", c(model,rcp))
model_rcp <- as.vector(model_rcp$model_rcp)
} else if(z < 2100){
rcp <- c("rcp26", "rcp60")
model_rcp <- expand.grid(model=model, rcp=rcp)
model_rcp <- tidyr::unite(model_rcp, "model_rcp", c(model,rcp))
model_rcp <- as.vector(model_rcp$model_rcp)
} else{
model_rcp <- c("HadGEM2-ES_rcp26", "IPSL-CM5A-LR_rcp26", "MIROC5_rcp26")
}
period <- z
if(colnames(data)[1]!="x"){
n <- ncol(data)-2
if(period == 1995){
y <- 1
colnames(data)[3:length(colnames(data))] <- paste0(model_type, "_EWEMBI_1995_block_", seq(1:(n/y)))
colnames(data)[1] <- "x"
colnames(data)[2] <- "y"
} else if(period == 1845){
y <- 4
colnames(data)[3:length(colnames(data))] <- c(paste0(model_type, "_GFDL-ESM2M_piControl_block_", seq(1:(n/y))),
paste0(model_type, "_HadGEM2-ES_piControl_block_", seq(1:(n/y))),
paste0(model_type, "_IPSL-CM5A-LR_piControl_block_", seq(1:(n/y))),
paste0(model_type, "_MIROC5_piControl_block_", seq(1:(n/y))))
colnames(data)[1] <- "x"
colnames(data)[2] <- "y"
} else if(period == 1990){
y <- 4
colnames(data)[3:length(colnames(data))] <- c(paste0(model_type, "_GFDL-ESM2M_historical_block_", seq(1:(n/y))),
paste0(model_type, "_HadGEM2-ES_historical_block_", seq(1:(n/y))),
paste0(model_type, "_IPSL-CM5A-LR_historical_block_", seq(1:(n/y))),
paste0(model_type, "_MIROC5_historical_block_", seq(1:(n/y))))
colnames(data)[1] <- "x"
colnames(data)[2] <- "y"
}else if(period <= 2080){
y <- 8
colnames(data)[3:length(colnames(data))] <- c(paste0(model_type, "_GFDL-ESM2M_rcp26_block_", seq(1:(n/y))),
paste0(model_type, "_GFDL-ESM2M_rcp60_block_", seq(1:(n/y))),
paste0(model_type, "_HadGEM2-ES_rcp26_block_", seq(1:(n/y))),
paste0(model_type, "_HadGEM2-ES_rcp60_block_", seq(1:(n/y))),
paste0(model_type, "_IPSL-CM5A-LR_rcp26_block_", seq(1:(n/y))),
paste0(model_type, "_IPSL-CM5A-LR_rcp60_block_", seq(1:(n/y))),
paste0(model_type, "_MIROC5_rcp26_block_", seq(1:(n/y))),
paste0(model_type, "_MIROC5_rcp60_block_", seq(1:(n/y))))
colnames(data)[1] <- "x"
colnames(data)[2] <- "y"
} else{
y <- 3
colnames(data)[3:length(colnames(data))] <- c(paste0(model_type, "_HadGEM2-ES_rcp26_block_", seq(1:(n/y))),
paste0(model_type, "_IPSL-CM5A-LR_rcp26_block_", seq(1:(n/y))),
paste0(model_type, "_MIROC5_rcp26_block_", seq(1:(n/y))))
colnames(data)[1] <- "x"
colnames(data)[2] <- "y"
}
readr::write_csv(x=data, path=Files)
}
## Select all data for one model_rcp combination
## And calculate average among blocks and PAs
avg_data <- lapply(model_rcp, FUN=function(w){
sub_data <- data %>% dplyr::select(x, y, contains(w)) %>% drop_na() %>%
tidyr::gather(var, value, -c(x,y)) %>%
dplyr::group_by(x,y) %>% dplyr::summarise(mean=round(mean(value, na.rm=TRUE), 3))
colnames(sub_data) <- c("x", "y", paste0(w, "_", z))
return(sub_data)
})
avg_data <- Reduce(function(x, y) full_join(x, y, by=c("x","y")), avg_data)
if(nrow(avg_data)==0){
avg_data[1,] <- NA
avg_data$x <- as.numeric(avg_data$x)
avg_data$y <- as.numeric(avg_data$y)
}
gc()
return(avg_data)
})
# Combine data
avg_data <- Reduce(function(x,y) full_join(x, y, by=c("x", "y")), avg_data)
# Remove entries where all data is 0
avg_data <- avg_data[!!rowSums(abs(avg_data[-c(1,2)]), na.rm=T),]
# Remove entries where all data are NA
avg_data <- avg_data[rowSums(is.na(avg_data)) != ncol(avg_data), ]
# Save data to csv file
readr::write_csv(avg_data, path=paste0(sum_predPath, "/", species, "_",
model_type, "_predict.csv.xz"))
return(NULL)
})
sfStop() # Close the cluster
#system('shutdown -s')
# Test output for one species
test <- read.csv(list.files(sum_predPath, full.names=T)[[1]])
head(test)
# Plot prediction
#ggplot(data=test, aes(x=x, y=y, fill=MIROC5_rcp26_2020)) + geom_raster()
########################################
#' Code to save individual model predictions in one NetCDF file
# Load packages
rm(list=ls())
library(dplyr)
library(tidyr)
library(ggplot2)
library(ncdf4)
# Set file directory
#filedir <- "/scratch/home/mbiber/data" # shinichi
#filedir <- "/bigdata_local/mbiber" # ceremony - Mammals
#filedir <- "/home/mbiber/data" # ceremony - Birds
filedir <- "/bigdata/mbiber/data" # Darkstar
#filedir <- "/scratch/mbiber/data"
#filedir <- "H:/"
# Set taxa
taxa <- c("Amphibian", "Mammal", "Bird")
i <- 3
# Output directory
sum_predPath <- paste0(filedir, "/OutputData/biodiversity/")
if(!dir.exists(sum_predPath)){dir.create(sum_predPath, recursive=T)}
# Read data
AUC_data <- lapply(c("GAM", "GBM", "MaxEnt", "RF"), function(model_type){
read.csv(paste0(filedir, "/AUCvalues_All_",
model_type, "_", taxa[i], ".csv.xz"))})
AUC_data <- do.call(rbind, AUC_data)
# Aggregate the different AUC values from the 10 iterations per species
# and filter by AUC > 0.7
AUC_sum <- AUC_data %>% group_by(Species, taxa, model_type) %>%
summarise(mean = mean(AUC, na.rm=T)) %>% filter(mean >= 0.7) %>% ungroup() %>%
group_by(Species, taxa) %>% summarise(n = n()) %>% filter(n == 4)
spNames <- unique(AUC_sum$Species); rm(AUC_data, AUC_sum)
# Remove Ursus maritimus
spNames <- spNames[!spNames == "Ursus_maritimus"]
length(spNames)
# Define model/rcp combinations
model_type <- "RF" #c("GAM", "GBM", "MaxEnt", "RF")
df <- expand.grid(model_type=model_type,
rcp=c("historical", "piControl", "rcp26", "rcp60"))
df <- rbind(expand.grid(model_type=model_type, rcp="1995"), df)
df <- rbind(df, expand.grid(model_type=model_type, rcp="2100rcp26"))
df
#' Loop through all species names and save NetCDF file
# Run code for each model output file
rcp_mod <- 4
#Define years
if(df$rcp[rcp_mod] == "piControl"){
year <- 1845
} else if(df$rcp[rcp_mod] == "1995"){
year <- 1995
} else if(df$rcp[rcp_mod] == "historical"){
year <- 1990
} else if(df$rcp[rcp_mod] %in% c("rcp26", "rcp60")){
year <- c(2009, 2010, 2020, 2026, 2032, 2048, 2050, 2052, 2056, 2080)
} else if(df$rcp[rcp_mod] == "2100rcp26"){
year <- c(2100, 2150, 2200, 2250)
df$rcp[rcp_mod] <- "rcp26"
}
# Define predicts paths according to years
predictsPaths <- sapply(year, function(x){paste0(filedir, "/", taxa[i], "_",
df$model_type[rcp_mod], "_predictions_", x)})
if(df$rcp[rcp_mod] == "1995"){
model="EWEMBI"
filename <- paste0(filedir, "/OutputData/biodiversity/bioscen1.5-sdm-",
tolower(df$model_type[rcp_mod]),
"_ewembi_nobc_hist_nosoc_co2_", tolower(taxa[i]),
"prob_global_30year-mean_1995_1995.nc4")
} else{
if(any(year >= 2100)){
model=c("MIROC5", "HadGEM2.ES", "IPSL.CM5A.LR")
filename <- sapply(model, function(m){paste0(filedir, "/OutputData/biodiversity/bioscen1.5-sdm-", tolower(df$model_type[rcp_mod]), "_",
gsub("[.]", "-", tolower(m)), "_ewembi_2100",
tolower(df$rcp[rcp_mod]), "_nosoc_co2_",
tolower(taxa[i]), "prob_global_30year-mean_",
min(year), "_", max(year),".nc4")})
} else{
model=c("MIROC5", "HadGEM2.ES", "IPSL.CM5A.LR", "GFDL.ESM2M")
filename <- sapply(model, function(m){paste0(filedir, "/OutputData/biodiversity/bioscen1.5-sdm-", tolower(df$model_type[rcp_mod]), "_",
gsub("[.]", "-", tolower(m)), "_ewembi_",
tolower(df$rcp[rcp_mod]), "_nosoc_co2_",
tolower(taxa[i]), "prob_global_30year-mean_",
min(year), "_", max(year),".nc4")})
}
}
length(which(!file.exists(filename)))
if(length(which(!file.exists(filename))) > 0){
#Define the dimensions
dimX = ncdim_def(name="lon", units="degrees", vals=seq(-179.75, 179.75, length = 720))
dimY = ncdim_def(name="lat", units="degrees", vals=seq(89.75, -89.75, length = 360))
dimT = ncdim_def(name="time", units="years since 1661-1-1 00:00:00",
vals=c(year-1661), calendar="proleptic_gregorian")
# Define data for NetCDF file
vard <- lapply(spNames, function(name){
ncvar_def(as.character(name), "Probability of occurrence per cell",
list(dimX,dimY,dimT), 1.e+20, prec="double", compression=9)})
# Create the NetCDF files
filename <- filename[which(!file.exists(filename))]
model <- model[which(!file.exists(filename))]
lapply(filename, function(x){
nc <- nc_create(x, vard)
ncatt_put(nc, varid=0, attname="contact", attval="Matthias Biber <matthias.biber@tum.de>")
ncatt_put(nc, varid=0, attname="institution", attval="Technical University Munich (Germany)")
nc_close(nc)
})
# Individually write data for every species
for(j in 6770:length(spNames)){
library(readr)
print(j)
# Get data
files <- lapply(predictsPaths,function(x){list.files(x, pattern=paste0(as.character(spNames[j]), "_"),
full.names=T)})
if(length(files) == 1){
data <- readr::read_csv(files[[1]])
data$PA <- 1
data$year <- year
} else{
data <- lapply(1:length(files), function(x){
data <- readr::read_csv(files[[x]])
data$PA <- 1
data$year <- year[x]
return(data)
})
data <- do.call(plyr::rbind.fill, data)
}
## Select all data for one model_rcp combination
## And calculate average among blocks and PAs
# Spread years
for(k in 1:length(filename)){
nc <- nc_open(filename[k], write=T)
data_sub <- data %>% group_by(x, y, PA, year) %>%
select(x, y, PA, year,matches(paste(model[k],df$rcp[rcp_mod], sep="_"))) %>%
tidyr::gather(var, value, -c(x,y,year,PA)) %>% group_by(x,y,year) %>%
summarise(mean=mean(value, na.rm=TRUE)) %>% tidyr::spread(year, mean)
#Expand dataframe with NAs
df_spat <- expand.grid(x=seq(-179.75, 179.75, length = 720),
y=seq(89.75, -89.75, length = 360))
data_sub <- left_join(df_spat, data_sub) %>% select(-x, -y); rm(df_spat)
# Turn data into array
data_sub <- array(unlist(data_sub),dim=c(720, 360, ncol(data_sub)),
dimnames=list(NULL, NULL, names(data_sub)))
# Write data to the NetCDF file
ncvar_put(nc, vard[[j]], data_sub, start=c(1,1,1), count=c(-1,-1,-1))
# Close your new file to finish writing
nc_close(nc)
}
}
}
sfStop()
#system('shutdown -s')
########################################
# Test NetCDF file
library(raster); library(ncdf4)
filedir <- "C:/Users/admin/Documents/"
i <- list.files(filedir, pattern = ".nc4", full.names=T)
(files <- list.files(paste0(filedir, "OutputData/biodiversity/"), pattern="gam.*historical", full.names=T))
#i <- list.files(paste0(filedir, "OutputData/biodiversity/"), pattern="maxent_IPSL-CM5A-LR_rcp60", full.names=T)
#i <- files[1]
#j <- "Yunganastes_mercedesae"
for(i in files){
par(mfrow=c(2,2))
for(j in c("Acanthixalus_spinosus", "Limnodynastes_convexiusculus",
"Xenorhina_rostrata", "Yunganastes_mercedesae")){
nc <- nc_open(i)
v <- nc$var[[nc$nvars]]
#nc_close(nc)
#test <- stack(i, varname=v$name)
test <- stack(i, varname=j)
print(nlayers(test))
plot(test[[sample(1:nlayers(test),1)]])
}
}
|
58ec437af72c2b933b0bb5e2526b264902ae5d4c
|
d9419af2c6dc6c5c1536653b27aa2ed342af3b90
|
/R/logos.R
|
cd4b66dc4e3993d46525f79e4aadf8d13e17f0b6
|
[] |
no_license
|
TGuillerme/TGuillerme.github.io
|
cc11ee45490dba142405e4c44ea330d2955b056d
|
f54b8748153236f916fe51e64b760d78812f9c7e
|
refs/heads/master
| 2023-07-09T22:30:54.861620
| 2023-07-04T14:42:04
| 2023-07-04T14:42:04
| 20,330,346
| 0
| 3
| null | 2015-01-14T15:35:00
| 2014-05-30T13:42:18
|
CSS
|
UTF-8
|
R
| false
| false
| 242
|
r
|
logos.R
|
## Colors
white <- "#D9DFC0"
grey_sharp <- "#1C1C1C"
grey_light <- "#4A4A4A"
orange_sharp <- "#F65205"
orange_light <- "#F38336"
blue_sharp <- "#3E9CBA"
blue_light <- "#98D4CF"
## Path
PATH <- "../images/"
## Size
heigth <- 180
width <- 180
|
9ef7b37ca960ae3995450a41df48d5e59eb12071
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/ClusterRankTest/R/clus.rank.sum.R
|
b439780e9983a7d16e4ba7f1839dd034760f80c5
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,269
|
r
|
clus.rank.sum.R
|
clus.rank.sum <-
function(Cluster,X,grp=NULL,Y=NULL,test=c("DS","DD","SDS")) {
if(as.integer(is.null(grp))==1 && as.integer(is.null(Y))==1) {stop("exactly one of 'grp' and 'Y' must be null")}
if(test=="DS" && as.integer(is.null(grp))==0){
#####calculate quantity 2 (using the pooled estimate of F)
n<-length(X)
F.hat<-numeric(n)
for (i in 1:n){
F.hat[i]<-(sum(X<=X[i])+sum(X<X[i]))/(2*n)
}
#####calculate quantity 1 (using ECD-F for each cluster)
#### M is No. of clusters, n is No. of observations
M<-length(unique(Cluster))
n.i<-table(Cluster)
F.prop<-numeric(n)
for(ii in 1:n){
F.j<-numeric(M)
for (i in 1:M){
F.j[i]<-(sum(X[Cluster==i]<X[ii])+0.5*sum(X[Cluster==i]==X[ii]))/(n.i[i])
}
F.prop[ii]<-sum(F.j[-Cluster[ii]])
}
###########calculate S=E(W*|X,g)
a<-numeric(M)
b<-1+F.prop
for (i in 1:M){
a[i]<-sum((grp[Cluster==i]*b[Cluster==i])/(n.i[i]))
}
c<-1/(M+1)
S<-c*sum(a)
########note: for m groups maybe can use grp[Cluster==i&grp=m]
#########Calculate E(S)=E(W*)
n.i1<-table(Cluster[grp==1])
d<-n.i1/n.i
E.S<-(1/2)*sum(d)
#######Calculate estimate of variance of S
W.hat<-numeric(M) #####first calculate W.hat for each cluster
a<-n.i1/n.i
for (i in 1:M){
b<-1/(n.i[i]*(M+1))
c<-(grp[Cluster==i])*(M-1)
d<-sum(a[-i])
W.hat[i]<-b*sum((c-d)*F.hat[Cluster==i])
}
a<-n.i1/n.i
E.W<-(M/(2*(M+1)))*(a-sum(a)/M) ##second, calculate E(W)
var.s<-sum((W.hat-E.W)^2) #calculate var(s)
statistic <-(S-E.S)/sqrt(var.s) #calculate the test statistic
p.value<-2*pnorm(abs(statistic),lower.tail=F)
}
if(test=="DD" && as.integer(is.null(grp))==0){
################# new rank sum test ########################
rn<-function(dv){
ik=dv[1]
x=dv[2]
ds1=data[data[,3]==1,]
vs1=(kh==2)*(ds1[,2]<x)+(kh==1)*(ds1[,2]<=x)
sl1=aggregate(vs1,list(ds1[,1]),mean)[,2]
ds2=data[data[,3]==0,]
vs2=(kh==2)*(ds2[,2]<x)+(kh==1)*(ds2[,2]<=x)
sl2=aggregate(vs2,list(ds2[,1]),mean)[,2]
fg=(sl1+sl2)/2
fg[ik]=0
return(fg)
}
rst<-function(il){
#only for variance
ly=sum(mat[-which(dw[,1]==il),-il])
#ly=apply(mat[-which(dw[,1]==il),-il],1,sum)
return(ly)
}
data= cbind(Cluster,X, grp)
m=length(unique(data[,1]))
dw=data[(data[,3]==1),]
ns=(dw[,1])
nv=as.vector(table(ns)[match(ns,names(table(ns)))])
kh=1
mat=t(apply(cbind(dw[,1:2]),1,rn))/nv
vf1=apply(cbind(seq(1,m)),1,rst) # variance part check
sFs1=sum(mat) #-estimate part
kh=2
mat=t(apply(cbind(dw[,1:2]),1,rn))/nv
vf2=apply(cbind(seq(1,m)),1,rst) #check
sFs2=sum(mat)
v1=((sFs1+sFs2)/4)+(m/2) #estimate --matches original
vd= ((vf1+vf2)/4)+(m-1)/2 #
#v1 is fine
#check vf1,vf2 and vd
h=1
statistic <- v1
E.T<- 0.25*m*(m+1)
test=(m/m^h)*v1-((m-1)/(m-1)^h)*vd
v.test=var(test)
v_hat=(((m^h)^2)/(m-1))*v.test
v.hat=ifelse(v_hat==0,0.00000001,v_hat)
Z<- (statistic-E.T)/sqrt(v.hat)
p.value<- 2*pnorm(abs(Z), lower.tail=FALSE)
}
if(test=="SDS" && as.integer(is.null(Y))==0){
Xij <- X-Y
ni <- as.vector(table(Cluster))
g <- length(ni)
n <- sum(ni)
cni <- cumsum(ni)
cni <- c(0,cni)
Fi <- function(x,i) { Xi <- Xij[(cni[i]+1):(cni[i+1])];
(sum(abs(Xi)<=x)+sum(abs(Xi)<x))/(2*ni[i])}
Ftot <- function(x) { st <- 0;
for (i in 1:g) st <- st + Fi(x,i);
return(st)}
Fcom <- function(x) { st <- 0;
for (i in 1:g) st <- st + Fi(x,i)*ni[i];
return(st/n)}
# SIGNED RANK TEST STATISTIC
TS <- VTS <- 0
for (i in 1:g) {
Xi <- Xij[(cni[i]+1):(cni[i+1])]
first <- (sum(Xi>0)-sum(Xi<0))/length(Xi)
second <- 0
third <- 0
for (x in Xi) { second <- second + sign(x)*(Ftot(abs(x))-Fi(abs(x),i));
third <- third + sign(x)*Fcom(abs(x))}
TS <- TS + first+second/length(Xi)
VTS <- VTS + (first+ (g-1)*third/length(Xi))^2
}
statistic=TS/sqrt(VTS) ##
p.value= 2*(1-pnorm(abs(statistic))) ##
}
structure(list(p.value=p.value,TestStat=statistic),class="Cluster.Test")
}
|
98657f409ca36db1d9417815990d0e0a5b8f4dd4
|
0b1d201a7c42a6f1b017db58c608fc6c8ba00f56
|
/man/isingfit-package.rd
|
dc088b5a4bea2e3d1766d5f9706c433b09f2bcc5
|
[] |
no_license
|
cran/IsingFit
|
af6c0f1c89f28a822b919b5db228a5b7c364432d
|
0da78d72223c39edf6d1fb3d4913edc96b062f13
|
refs/heads/master
| 2020-06-06T11:52:32.747346
| 2016-09-07T13:01:58
| 2016-09-07T13:01:58
| 17,680,040
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,693
|
rd
|
isingfit-package.rd
|
\name{IsingFit-package}
\alias{IsingFit-package}
\docType{package}
\title{
Network estimation using the eLasso method
}
\description{
This network estimation procedure eLasso, which is based on the Ising model, combines l1-regularized logistic regression with model selection based on the Extended Bayesian Information Criterion (EBIC). EBIC is a fit measure that identifies relevant relationships between variables. The resulting network consists of variables as nodes and relevant relationships as edges. Can deal with binary data.
}
\details{
\tabular{ll}{
Package: \tab IsingFit\cr
Type: \tab Package\cr
Version: \tab 0.3.1\cr
Date: \tab 2016-9-6\cr
License: \tab What license is it under?\cr
}
}
\author{
Claudia D. van Borkulo, Sacha Epskamp, with contributions from Alexander Robitzsch
Maintainer: Claudia D. van Borkulo <cvborkulo@gmail.com>
}
\references{
Chen, J., & Chen, Z. (2008). Extended bayesian information criteria for model selection with large model spaces. Biometrika, 95(3), 759-771.
Foygel, R., & Drton, M. (2011). Bayesian model choice and information criteria in sparse generalized linear models. arXiv preprint arXiv:1112.5635.
Ravikumar, P., Wainwright, M. J., & Lafferty, J. D. (2010). High-dimensional Ising model selection using l1-regularized logistic regression. The Annals of Statistics, 38, 1287 - 1319.
van Borkulo, C. D., Borsboom, D., Epskamp, S., Blanken, T. F., Boschloo, L., Schoevers, R. A., & Waldorp, L. J. (2014). A new method for constructing networks from binary data. Scientific Reports 4, 5918; DOI:10.1038/srep05918.
}
% ~~ Optionally other standard keywords, one per line, from file ~~
% ~~ KEYWORDS in the R documentation directory ~~
|
85866025bcd1e48564e0bb65fd2242fa92e88496
|
a63c31da79f4e445204c1b75bfe9fd1bf5ef6141
|
/tests/testthat.R
|
6163348b664cd22aa9d92a76ed0e0e23ab4c2ef4
|
[
"MIT"
] |
permissive
|
wlandau/crew
|
f039d3316f3b8c3e4af9ff4b15da7a956fe2273b
|
0bf1273e6ed14c334dd75ba684dabee1a4d70465
|
refs/heads/main
| 2023-09-01T08:58:23.025505
| 2023-08-23T18:22:50
| 2023-08-23T18:22:50
| 452,881,964
| 71
| 3
|
NOASSERTION
| 2023-09-08T13:19:36
| 2022-01-27T23:42:41
|
R
|
UTF-8
|
R
| false
| false
| 52
|
r
|
testthat.R
|
library(testthat)
library(crew)
test_check("crew")
|
7c4a417e96d739b9951cc0ad46f14fae8f9ce6f2
|
2bba5609fad00dbfc4b93c84978b08d35954d977
|
/man/SNPsites.Rd
|
515a93965726dd231a3ff6445f04106909faf586
|
[
"MIT"
] |
permissive
|
kant/sitePath
|
416d8f59a453ccd4e76ad2fdab05b03cbd905f4b
|
73a76a56cb6df52468a1243057ee1e2ccfa2b3ad
|
refs/heads/master
| 2022-10-20T17:50:14.530724
| 2020-06-16T09:28:15
| 2020-06-16T12:48:45
| 272,836,800
| 0
| 0
|
MIT
| 2020-06-16T23:58:11
| 2020-06-16T23:58:10
| null |
UTF-8
|
R
| false
| true
| 782
|
rd
|
SNPsites.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SNPsites.R
\name{SNPsites}
\alias{SNPsites}
\title{Finding sites with variation}
\usage{
SNPsites(tree, minSNP = NULL)
}
\arguments{
\item{tree}{The return from \code{\link{addMSA}} function.}
\item{minSNP}{Minimum number of amino acid variation to be a SNP.}
}
\value{
A \code{SNPsites} object
}
\description{
Single nucleotide polymorphism (SNP) in the whole package refers
to variation of amino acid. \code{SNPsite} will try to find SNP in the
multiple sequence alignment. A reference sequence and gap character may be
specified to number the site.
}
\examples{
data(zikv_tree_reduced)
data(zikv_align_reduced)
tree <- addMSA(zikv_tree_reduced, alignment = zikv_align_reduced)
SNPsites(tree)
}
|
b4c4533c3883f561c671f0ceae13ee4fb03f3aff
|
34145554609be95c59218067c0d8825f3e7d4206
|
/src/met/calculate_ET0.R
|
5de639e9dd50559df8d841414574bda1b584bb0a
|
[] |
no_license
|
cvoter/low-impact-lot-climate
|
bf1596b9124bbac4fdd51d8a4d5c6fe5dea079ee
|
4a6a73f0ee0319e9cd9aa1ffc89a1767b95a100e
|
refs/heads/master
| 2023-03-20T09:06:48.131089
| 2021-03-19T12:07:27
| 2021-03-19T12:07:27
| 271,650,657
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,170
|
r
|
calculate_ET0.R
|
#' calculate_ET0.R
#'
#' Calculates reference ET (ET0) using the FAO Penman Montieth equations
#'
#' This function calculates FAO Penman Monteith reference evapotranspiration
#' (ET0) in mm/hr for hourly time steps. Note that daily ET0 requires a
#' different equation.
#'
#' REFERENCES
#' Allen, R. G., Pereira, L. S., Raes, D., & Smith, M. (1998). Crop
#' evapotranspiration: Guidelines for computing crop water requirements. Rome:
#' FAO. Retrieved from http://www.fao.org/docrep/X0490E/x0490e00.htm
#'
#' REQUIRE
#' NISTunits (package)
#'
#' INPUTS
#' elevation: elevation above sea level (m)
#' latitude.degrees: latitude of city (degrees)
#' longitude.timezone: longitude of timing info time zone, default: 0
#' longitude.measurement: longitude of city (degrees west of Greenwich)
#' julian.day: julian day of every hour in the time series
#' met.pressure: pressure (kPa)
#' wind.measured: wind speed (m/s)
#' temperature.K: air temperature (K)
#' Rs: incoming shortwave radiation (W/m^2)
#' humidity: specific humidity (kg/kg)
#' nhours: number of hours in time series
#'
#' OUTPUTS
#' ET0: vector with ET0 rate (mm/hr) for each input time step
calculate_ET0 <- function(elevation,
latitude.degrees,
longitude.timezone = 0,
longitude.measurement,
julian.day,
met.pressure,
wind.measured,
temperature.K,
Rs,
humidity,
nhours) {
# INITIALIZE ----------------------------------------------------------------
library(NISTunits)
ET0 <- NULL
# define additional parameters not specific to location
albedo <- 0.23 # albedo for green grass reference crop (p.43)
wind.height <- 10 # distance above ground at which wind measured (m)
temperature.C <- temperature.K - 273.15 # temperature in Celsius
latitude <- NISTdegTOradian(latitude.degrees)
# LOOP THROUGH TIME SERIES --------------------------------------------------
for (i in 1:nhours) {
# day
this.julian <- julian.day[i]
# inverse relative distance Earth to Sun (Eq.23, p.46)
inverse.distance <- 1 + 0.033*cos(2*pi*this.julian/365)
# seasonal correction for solar time (hour) (Eq.32-33, p.48)
b <- 2*pi*(this.julian - 81)/364
seasonal.correction <- 0.1645*sin(2*b) - 0.1255*cos(b) - 0.025*sin(b)
# solar time angles (radians) (Eq.29-31, p.48)
omega <- (pi/12)*(((i %% 24 + 0.5) +
0.06667*(longitude.timezone - longitude.measurement) +
seasonal.correction) - 12) # @ midpoint of time period
omega.start <- omega - pi*1/24 # @ beginning of time period
omega.end <- omega + pi*1/24 # @ end of time period
# solar declination (radians) (Eq.24, p.46)
solar.declination <- 0.409*sin(2*pi*this.julian/365 - 1.39)
# sunset solar time angle (radians) (Eq. 25, p.46)
omega.solar <- acos(-tan(latitude)*tan(solar.declination))
omega.solar.start <- omega.solar - 0.79 # p.75
omega.solar.end <- omega.solar - 0.52 # p.75
# determine if it is day or night
if ( (omega > -omega.solar) & (omega < omega.solar) ) {
# daytime
day = 1
night = 0
} else {
# nighttime
day = 0
night = 1
}
# AIR AND HUMIDITY PARAMETERS ---------------------------------------------
# atmospheric pressure (kPa) (Eq.7, p.31)
atmospheric.pressure <- 101.3*((293 - 0.0065*elevation)/293)^5.26
# psychrometric constant (kPa/degC) (Eq.8, p.32)
gamma <- (0.665e-3)*atmospheric.pressure
# saturation vapor pressure (kPa) (Eq.11, p.36)
vp.saturation <- 0.6108*exp(17.27*temperature.C[i]/(temperature.C[i]+237.3))
# slope of saturation vapor pressure curve (Eq.13, p.37)
delta <- 4098*vp.saturation/((temperature.C[i] + 237.3)^2)
# actual vapor pressure (kPa), based on Bolton, 1980 (Eq.16)
# assume: (specific humidity, kg/kg)*(1000 g/kg) = (mixing ratio, g/kg)
vp.actual <- met.pressure[i]*humidity[i]/(0.622 + humidity[i])
# vapor pressure deficit (kPa)
vpd <- max(c(vp.saturation - vp.actual, 0))
# RADIATION --------------------------------------------------------------
# extraterrestrial radiation (MJ/m^2*hr) (Eq.28,p.47, also see note p.75)
Ra.day <- 12*60/pi*0.0820*inverse.distance*(
(omega.end - omega.start)*sin(latitude)*
sin(solar.declination) +
cos(latitude)*cos(solar.declination)*
(sin(omega.end)-sin(omega.start)))
Ra.night <- 12*60/pi*0.0820*inverse.distance*(
(omega.solar.end-omega.solar.start)*sin(latitude)*
sin(solar.declination) +
cos(latitude)*cos(solar.declination)*
(sin(omega.solar.end)-sin(omega.solar.start)))
# clear sky radiation (MJ/m^2*hr) (Eq.37, p.51)
Rso <- (0.75 + (2e-5)*elevation)*(Ra.day*day + Ra.night*night)
# Stefan-Boltzman constant, converted to (MJ/K^4*m^2*hr) (p.74)
stefan.boltzman <- (4.903e-9)/24
# net longwave radiation (MJ/m^2*hr) (Eq.39, p.52)
Rnl <- stefan.boltzman*temperature.K[i]^4*
(0.34 - 0.14*sqrt(vp.actual))*(1.35*min(Rs[i]/Rso,1) - 0.35)
# net shortwave radiation (MJ/m^2*hr) (Eq.38, p.51)
Rns <- (1 - albedo)*Rs[i]
# net radiation (MJ/m^2*hr) (Eq.40, p.53)
Rn <- Rns - Rnl
# Ground heat (MJ/m^2*hr) (Eq.45-46, p.55)
G <- 0.1*Rn*day + 0.5*Rn*night
# Wind speed at 2m off the ground (m/s) (Eq.47, p.56)
wind.2m <- wind.measured[i]*4.87/(log(67.8*wind.height - 5.42))
# FAO Penman-Monteith reference evapotranspiration (mm/hr) (Eq.53, p.74)
ET0[i]<-(0.408*delta*(Rn - G) + gamma*(37/temperature.K[i])*wind.2m*vpd)/
(delta + gamma*(1 + 0.34*wind.2m))
}
return(ET0)
}
|
3aa0f78ad7ef58adcdb98ac7c68e447e4c476709
|
febcd5b48398c8d0a70e578d24a7cae16344d551
|
/R/run_Rcodes.R
|
9efdc65de9d2d41fdecd6504cc36d45926b99d83
|
[] |
no_license
|
yangjl/farmeR
|
c5c68937d583139c66d9f0509441e29df459045f
|
4b6caf488212954d75694a3181e5c74293b9c8ea
|
refs/heads/master
| 2020-12-11T07:38:47.206550
| 2017-05-22T21:23:27
| 2017-05-22T21:23:27
| 53,074,155
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,946
|
r
|
run_Rcodes.R
|
#' \code{Run array job of R codes}
#'
#'
#' @param inputdf An input data.frame, with columns of file and out.
#' @param outdir The dir of shell files.
#' @param cmdno Number of commands to excute in each array.
#' @param rcodes The abosulte path of your R codes to run.
#' @param arrayshid The sbatch id.
#' @param email Your email address that farm will email to once the jobs were done/failed.
#' @param cmdno Number of commands per CPU, i.e. number of rows per inputdf.
#' @param runinfo Parameters specify the array job partition information.
#' A vector of c(FALSE, "bigmemh", "1"): 1) run or not, default=FALSE
#' 2) -p partition name, default=bigmemh and 3) --cpus, default=1.
#' It will pass to \code{set_array_job}.
#'
#' @return return a batch of shell scripts.
#'
#' @examples
#' run_Rcodes(inputdf=data.frame(file=1:11, out=10), outdir="slurm-script", cmdno=10,
#' rcodes = "lib/C_format.R", arrayshid = "slurm-script/run_rcode_array.sh",
#' email=NULL, runinfo = c(FALSE, "bigmemh", 1))
#'
#' @export
run_Rcodes <- function(
inputdf, outdir, cmdno=100,
rcodes = "lib/C_format.R",
arrayshid = "slurm-script/run_bcf_query_array.sh",
email=NULL, runinfo = c(FALSE, "bigmemh", 1)
){
runinfo <- get_runinfo(runinfo)
#### create dir if not exist
dir.create("slurm-script", showWarnings = FALSE)
dir.create(outdir, showWarnings = FALSE)
tot <- ceiling(nrow(inputdf)/cmdno)
for(j in 1:tot){
shid <- paste0(outdir, "/run_rcode_", j, ".sh")
##chr:start-end
#sh1 <- paste("cd", outdir)
sh <- paste0('R --no-save --args ', j, ' < ', rcodes)
cat(paste("### run Rcode", Sys.time(), sep=" "),
sh,
file=shid, sep="\n", append=FALSE)
}
shcode <- paste0("sh ", outdir, "/run_rcode_$SLURM_ARRAY_TASK_ID.sh")
set_array_job(shid=arrayshid, shcode=shcode, arrayjobs=paste("1", tot, sep="-"),
wd=NULL, jobid="rcode", email=email, runinfo=runinfo)
}
|
337a7ccf942ff924d46bbcf4975a0348f32921dc
|
fa49f6eda3c9eda86c72c0b1a853ae1585ef028f
|
/Doit_R/Chapter09.R
|
6c45bbc9c0481c00c532ac185c05ea426d6164fd
|
[] |
no_license
|
everydayspring/R-study
|
e9c0825f3efaf1a5390094ba6379c37c70c9d5b5
|
42ae3149ec2d064494a3027ce0fa94f05932552a
|
refs/heads/master
| 2022-12-05T15:56:33.672977
| 2020-08-20T12:54:58
| 2020-08-20T12:54:58
| 282,086,575
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,430
|
r
|
Chapter09.R
|
#------------ 데이터 분석 프로젝트 '한국인의 삶을 파악하라' ------------#
#------------ '한국복지패널데이터' 분석 준비하기 ------------#
# 데이터 준비하기
# 패키지 설치 및 로드하기
install.packages("foreign") # foreign 패키지 설치
library(foreign) # SPSS 파일 불러오기
library(dplyr) # 전처리
library(ggplot2) # 시각화
library(readxl) # 엑셀 파일 불러오기
# 데이터 불러오기
getwd()
raw_welfare <- read.spss(file = "Koweps_hpc10_2015_beta1.sav",
to.data.frame = T)
welfare <- raw_welfare
# 데이터 검토하기
head(welfare)
tail(welfare)
View(welfare)
dim(welfare)
str(welfare)
summary(welfare)
# 변수명 바꾸기
welfare <- rename(welfare,
sex = h10_g3, # 성별
birth = h10_g4, # 태어난 연도
marriage = h10_g10, # 혼인 상태
religion = h10_g11, # 종교
income = p1002_8aq1, # 월급
code_job = h10_eco9, # 직업 코드
code_region = h10_reg7) # 지역 코드
#------------ 성별에 따른 월급 차이 ------------#
#------------ 성별 변수 검토 및 전처리 ------------#
# 변수 검토하기
class(welfare$sex)
table(welfare$sex)
# 전처리
' 이상치 확인 '
table(welfare$sex)
' 이상치 결측 처리 '
welfare$sex <- ifelse(welfare$sex == 9, NA, welfare$sex)
' 결측치 확인 '
table(is.na(welfare$sex))
' 성별 항목 이름 부여 '
welfare$sex <- ifelse(welfare$sex == 1, "male", "female")
table(welfare$sex)
qplot(welfare$sex)
#------------ 월급 변수 검토 및 전처리 ------------#
# 변수 검토하기
class(welfare$income)
summary(welfare$income)
qplot(welfare$income) + xlim(0, 1000)
# 전처리
' 이상치 확인 '
summary(welfare$income)
' 이상치 결측 처리 '
welfare$income <- ifelse(welfare$income %in% c(0, 9999), NA, welfare$income)
' 결측치 확인 '
table(is.na(welfare$income))
#------------ 성별에 따른 월급 차이 분석하기 ------------#
# 성별 월급 평균표 만들기
sex_income <- welfare %>%
filter(!is.na(income)) %>%
group_by(sex) %>%
summarise(mean_income = mean(income))
sex_income
# 그래프 만들기
ggplot(data = sex_income, aes(x = sex, y = mean_income)) + geom_col()
|
6385ee7049e32598f6611bd7f8f253ccc9de2980
|
7853c37eebe37fa6a0307e0dd9e197830ee6ac71
|
/man/cudaStreamDestroy.Rd
|
9bd0c76a232e0abf3a96c1584cab71b5fc5d03cf
|
[] |
no_license
|
chen0031/RCUDA
|
ab34ffe4f7e7036a8d39060639f09617943afbdf
|
2479a3b43c6d51321b0383a88e7a205b5cb64992
|
refs/heads/master
| 2020-12-22T22:47:37.213822
| 2016-07-27T03:50:44
| 2016-07-27T03:50:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 606
|
rd
|
cudaStreamDestroy.Rd
|
\name{cudaStreamDestroy}
\alias{cudaStreamDestroy}
\title{Destroys and cleans up an asynchronous stream}
\description{ Destroys and cleans up the asynchronous stream specified by \code{stream}.}
\usage{cudaStreamDestroy(stream)}
\arguments{
\item{stream}{Stream identifier}
}
\seealso{\code{\link{cudaStreamCreate}}
\code{\link{cudaStreamCreateWithFlags}}
\code{\link{cudaStreamQuery}}
\code{\link{cudaStreamWaitEvent}}
\code{\link{cudaStreamSynchronize}}
\code{\link{cudaStreamAddCallback}}}
\references{\url{http://docs.nvidia.com/cuda/cuda-driver-api/index.html}}
\keyword{programming}
\concept{GPU}
|
9bf6538ba10ac153ced347685c576d2ddb58941c
|
42896053f72650e1dab794bf8a001dc221fc5fe0
|
/plot3.R
|
d8a67727da1a131d229a6aff89d4d8c42804aef3
|
[] |
no_license
|
dcryman/ExData_Plotting1
|
d8373bcc36f4ddd527e5947031f49d8760b7329b
|
bc04cb0370fff86f2e4bbedbe524d9d725ce5b3e
|
refs/heads/master
| 2021-01-18T11:21:23.571194
| 2014-06-05T22:37:39
| 2014-06-05T22:37:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 586
|
r
|
plot3.R
|
pow <- read.table('power.txt',header=TRUE,sep=';',na.strings='?',stringsAsFactors = FALSE)
pow1 <- pow[(pow$Date=='1/2/2007' | pow$Date=='2/2/2007'),]
pow1$datetime <- paste(pow1$Date, pow1$Time)
pow1$datetime <- strptime(pow1$datetime, format="%d/%m/%Y %H:%M:%S")
png(filename = "plot3.png",width = 480, height = 480, units = "px")
plot(pow1$datetime,pow1$Sub_metering_1,ylab="Energy sub metering",xlab="",type = "n")
lines(pow1$datetime,pow1$Sub_metering_1,col="black")
lines(pow1$datetime,pow1$Sub_metering_2,col="red")
lines(pow1$datetime,pow1$Sub_metering_3,col="blue")
dev.off()
|
cd51279bc88739834885bf952281da5e1acd720c
|
65bfa396abb4e4d1726acca08e64c4d0d6fc3a42
|
/man/adj_bitmap.Rd
|
b8fa7e60c5ef2bcb8f84389662cab2fca7c509a0
|
[] |
no_license
|
M2UCT/RFjobart
|
df24cc871f04fffdb9d84ce47700874d966f47f4
|
8f5ec155d52a487acf934ca37a7d0dc68404e0c8
|
refs/heads/master
| 2020-04-17T04:11:03.879029
| 2019-01-23T10:56:06
| 2019-01-23T10:56:06
| 166,217,866
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 718
|
rd
|
adj_bitmap.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bitmap.R
\name{adj_bitmap}
\alias{adj_bitmap}
\title{Adjust bitmap picture}
\usage{
adj_bitmap(img.in, img.out, xpix.ex, ypix.ex, xpix.in, ypix.in)
}
\arguments{
\item{img.in}{Full path to the bitmap picture}
\item{img.out}{Full path to the adjusted bitmap picture}
\item{xpix.ex}{Minimum number of pixels to form a straight line in the x-axis}
\item{ypix.ex}{Minimum number of pixels to form a straight line in the y-axis}
\item{xpix.in}{Maximum number of pixels to connect lines in the x-axis}
\item{ypix.in}{Maximum number of pixels to connect lines in the y-axis}
}
\value{
A bitmap picture
}
\description{
Adjust bitmap picture
}
|
4db54b4a0de5fc742d844f8c098fa3c4009aa99f
|
bd1f3cbc0ccdd145be1cc43119e728f180b85fd6
|
/man/observedKnownSites.Rd
|
0a3dd952334520f262ec1d551d54eb4a575a702d
|
[] |
no_license
|
fbreitwieser/isobar
|
277118c929098690490d0e9f857dea90a42a0924
|
0779e37bf2760c3355c443749d4e7b8464fecb8a
|
refs/heads/master
| 2020-12-29T02:20:51.661485
| 2016-05-17T17:20:00
| 2016-05-17T17:20:00
| 1,152,836
| 7
| 6
| null | 2017-03-17T15:27:24
| 2010-12-09T10:45:01
|
R
|
UTF-8
|
R
| false
| false
| 1,700
|
rd
|
observedKnownSites.Rd
|
\name{observedKnownSites}
\alias{observedKnownSites}
\alias{modif.site.count}
\alias{modif.sites}
\title{
Observed modification sites.
}
\description{
Functions to display the modification sites observed for each protein isoform
and count the number of modified residues per protein.
}
\usage{
observedKnownSites(protein.group, protein.g, ptm.info, modif, modification.name = NULL)
modif.site.count(protein.group, protein.g = reporterProteins(protein.group), modif, take = max)
modif.sites(protein.group, protein.g = reporterProteins(protein.group), modif)
}
\arguments{
\item{protein.group}{ProteinGroupb object.}
\item{protein.g}{protein group identifier.}
\item{ptm.info}{ptm information data.frame, see ?getPtmInfo.}
\item{modif}{Modification to track, e.g. 'PHOS'.}
\item{modification.name}{Value to filter 'modification.name' column in ptm.info.}
\item{take}{should be either max or min: When multiple isoforms are present, which value should be taken for the count?}
}
\author{
Florian P. Breitwieser
}
\examples{
data(ib_phospho)
data(ptm.info)
# Modification sites of reporter proteins:
# a list of protein groups,
# containing sub-lists of identified sites for each isoform
protein.modif.sites <- sort(modif.site.count(proteinGroup(ib_phospho),modif="PHOS"))
# Details on modification sites of proteins
# detected with most modifications
modif.sites(proteinGroup(ib_phospho),modif="PHOS",protein.g=names(tail(protein.modif.sites)))
# How many sites are known, and how many known sites have been observed?
observedKnownSites(proteinGroup(ib_phospho),modif="PHOS",protein.g=names(tail(protein.modif.sites)),ptm.info=ptm.info,modification.name="Phospho")
}
|
1eb90fbf46a8ad957713b338136d6d58a0c1f1f4
|
01eedc1fddbc6a9d597bb4b901be514f4f8b174c
|
/Đồ án R/21_Doughnutchart.R
|
abc919d523fe49820987c998a35324df19251af2
|
[] |
no_license
|
ThanhHung2112/R
|
40173de70c7e79442f3e5d8d2394571ea6e6a468
|
c136d646b89613b7ede05b15e5c6563f37d57e65
|
refs/heads/master
| 2023-06-13T03:57:30.560292
| 2021-07-12T08:46:25
| 2021-07-12T08:46:25
| 365,208,305
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,031
|
r
|
21_Doughnutchart.R
|
#Load Data
url = "https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_daily_reports/06-29-2021.csv"
df <- read.csv(url, header = TRUE)
#group data
country <- df %>% group_by(Country_Region)
country <- country %>% summarise(Deaths = sum(Deaths))
#Sort
country <- country[order(-country$Deaths),]
country_n<-country[1:5,]
others=sum(country$Deaths)-sum(country_n$Deaths)
others<-data.frame("Others", others)
names(others)<-c("Country_Region","Deaths")
country <- rbind(country_n, others)
data <- country
data$fraction = data$Deaths / sum(data$Deaths)
# Compute the cumulative percentages (top of each rectangle)
data$ymax = cumsum(data$fraction)
# Compute the bottom of each rectangle
data$ymin = c(0, head(data$ymax, n=-1))
ggplot(data, aes(ymax=ymax, ymin=ymin, xmax=4, xmin=3, fill=Country_Region)) +
geom_rect() +
coord_polar(theta="y") + # Try to remove that to understand how the chart is built initially
xlim(c(2, 4)) # Try to remove that to see how to make a pie chart
|
0147b4d4b120de1c587b889850d18b31d4fe2268
|
990415b41c24006029aaf71d4b4602ea08ba795e
|
/scripts/FDR_facewise_m.R
|
d06b945d815ee5b6c59982f7e3bb5eea99daf92e
|
[] |
no_license
|
PennLINC/DevProps
|
73b0bf08fb830020b49661620bfd0a795d9a26cc
|
0c9f9589494b56d158659835de2c397e0c658f93
|
refs/heads/main
| 2023-04-13T20:14:22.695532
| 2023-01-16T22:52:14
| 2023-01-16T22:52:14
| 397,272,223
| 4
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 940
|
r
|
FDR_facewise_m.R
|
# intake data from across hemispheres, fdr the p-distribution from both hemis, threshold dr2 values accordingly
# load in facewise output from left hemi
LBP_m=readRDS('/cbica/projects/pinesParcels/results/PWs/LBUProp_mdr2.rds')
LBP_mp=readRDS('/cbica/projects/pinesParcels/results/PWs/LBUProp_mp.rds')
# and right hemi
RBP_m=readRDS('/cbica/projects/pinesParcels/results/PWs/RBUProp_mdr2.rds')
RBP_mp=readRDS('/cbica/projects/pinesParcels/results/PWs/RBUProp_mp.rds')
# combine each
BUs=c(LBP_m,RBP_m)
BUsP=c(LBP_mp,RBP_mp)
# fdr each
BUsP_f=p.adjust(BUsP,method='fdr')
# mask dr2s accordingly
BUsP_f[BUsP_f>0.05]=0
# uncombine: seperate vecs for sep. hemis
BU_L=BUsP[1:length(LBP_m)]
BU_R=BUsP[(length(LBP_m)+1):length(BUsP)]
# print out each for matlab friendly reading
write.table(BU_L,'~/results/PWs/FDRed_m_L.csv',col.names=F,row.names=F,quote=F)
write.table(BU_R,'~/results/PWs/FDRed_m_R.csv',col.names=F,row.names=F,quote=F)
|
07a847b0dbb61cf95f80f88181972be9772d9047
|
4cabda4635edd3226a371403d608b6622fb7fd71
|
/R/gs_quickread.R
|
ad002179de3e41b676d6a8b21ae3410d052e2fb4
|
[] |
no_license
|
randallhelms/cakeR
|
a67fad8e563ec9dc267c6f2aaa79b3e2fc78ae44
|
923a718c6bc8c2a9354c83373ab94587da8df815
|
refs/heads/master
| 2021-06-04T16:58:28.637311
| 2020-03-19T21:58:16
| 2020-03-19T21:58:16
| 145,759,920
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 357
|
r
|
gs_quickread.R
|
gs_quickread <- function(name,worksheet=1) {
require(googlesheets)
require(tidyverse)
xlist <- gs_ls() %>%
filter(sheet_title == name)
if (nrow(xlist) > 1) {
xlist <- xlist %>%
arrange(desc(updated)) %>%
slice(1)
}
key <- xlist[['sheet_key']]
gkey <- gs_key(key)
x <- gs_read(gkey,ws=worksheet,literal=TRUE)
return(x)
}
|
c3ed5c482a6f9387aa9f531c81ddef4c3383a54f
|
fe2202c88c7e6e22ee3a95b1d829db06480f7f95
|
/i686-pc-linux-gnu/arm-xilinx-linux-gnueabi/mep/bin/arm/cortex-a9.rd
|
17fdd14cb15b0da5797687b574f20e5bd2403c8b
|
[] |
no_license
|
qiupq/Xilinx-Compile-Tools-Sourcery-CodeBench
|
109732f2f26162ede3ef8cb446f368862568b123
|
124903c7fd1309f6ceff8d5bc0bfae4737cad5e2
|
refs/heads/master
| 2021-07-01T15:25:23.316980
| 2017-09-22T06:07:42
| 2017-09-22T06:07:42
| 104,436,055
| 2
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 611
|
rd
|
cortex-a9.rd
|
// CORTEX Register Definition File
// Contains definitions for registers and fields that are not present in Cortex-A8
//
// The "cp" macro below is a built in and takes coprocessor arguments in the
// same order as they would appear in the assembly instructions. Example
//
// mrc p15, op1, <rd>, CRn, CRm, op2 -> cp(Op1, CRn, CRm, Op2)
//
INCLUDE "cortex-a8.rd"
REG=cp15_cntrl cp(0,1,0,0) COPROC15 4 // Control
REG_FIELD=cp15_cntrl te 30 30, afe 29 29, tre 28 28, nmfi 27 27, ee 25 25, ha 17 17, rr 14 14, v 13 13, i 12 12, z 11 11, sw 10 10, c 2 2, a 1 1, m 0 0
//
// <eof>
|
ad050f9afc75e7dd5f7626c19122e6a4a30bda1b
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/yuima/examples/lasso.Rd.R
|
795f7e0ae960d00516746d7ebe33abab35b7deb8
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,083
|
r
|
lasso.Rd.R
|
library(yuima)
### Name: lasso
### Title: Adaptive LASSO estimation for stochastic differential equations
### Aliases: lasso
### Keywords: ts
### ** Examples
##multidimension case
diff.matrix <- matrix(c("theta1.1","theta1.2", "1", "1"), 2, 2)
drift.c <- c("-theta2.1*x1", "-theta2.2*x2", "-theta2.2", "-theta2.1")
drift.matrix <- matrix(drift.c, 2, 2)
ymodel <- setModel(drift=drift.matrix, diffusion=diff.matrix, time.variable="t",
state.variable=c("x1", "x2"), solve.variable=c("x1", "x2"))
n <- 100
ysamp <- setSampling(Terminal=(n)^(1/3), n=n)
yuima <- setYuima(model=ymodel, sampling=ysamp)
set.seed(123)
truep <- list(theta1.1=0.6, theta1.2=0,theta2.1=0.5, theta2.2=0)
yuima <- simulate(yuima, xinit=c(1, 1),
true.parameter=truep)
est <- lasso(yuima, start=list(theta2.1=0.8, theta2.2=0.2, theta1.1=0.7, theta1.2=0.1),
lower=list(theta1.1=1e-10,theta1.2=1e-10,theta2.1=.1,theta2.2=1e-10),
upper=list(theta1.1=4,theta1.2=4,theta2.1=4,theta2.2=4), method="L-BFGS-B")
# TRUE
unlist(truep)
# QMLE
round(est$mle,3)
# LASSO
round(est$lasso,3)
|
f09ce59386da4bc2a9903bb5277195a6c97e5079
|
f1971a5cbf1829ce6fab9f5144db008d8d9a23e1
|
/packrat/lib/x86_64-pc-linux-gnu/3.2.5/DiagrammeR/tests/testthat/test-set_get_node_attr.R
|
b4d5c351bdc18b9ddf2d0c7868840d91f0f872e1
|
[] |
no_license
|
harryprince/seamonster
|
cc334c87fda44d1c87a0436139d34dab310acec6
|
ddfd738999cd302c71a11aad20b3af2f4538624f
|
refs/heads/master
| 2021-01-12T03:44:33.452985
| 2016-12-22T19:17:01
| 2016-12-22T19:17:01
| 78,260,652
| 1
| 0
| null | 2017-01-07T05:30:42
| 2017-01-07T05:30:42
| null |
UTF-8
|
R
| false
| false
| 8,459
|
r
|
test-set_get_node_attr.R
|
context("Setting and getting node attributes")
test_that("setting node attributes is possible", {
# Create a graph
graph <- create_graph()
graph <- add_node(graph, node = "A")
graph <- add_node(graph, node = "B")
graph <- add_node(graph, node = "C")
graph <- add_node(graph, node = "D")
graph <- add_node(graph, node = "E")
graph <- add_node(graph, node = "F")
graph <- add_node(graph, node = "1")
graph <- add_node(graph, node = "2")
graph <- add_node(graph, node = "3")
graph <- add_node(graph, node = "4")
graph <- add_node(graph, node = "5")
graph <- add_node(graph, node = "6")
graph <- add_node(graph, node = "7")
graph <- add_node(graph, node = "8")
graph <- add_edge(graph, "A", "1")
graph <- add_edge(graph, "B", "2")
graph <- add_edge(graph, "B", "3")
graph <- add_edge(graph, "B", "4")
graph <- add_edge(graph, "C", "A")
graph <- add_edge(graph, "1", "D")
graph <- add_edge(graph, "E", "A")
graph <- add_edge(graph, "2", "4")
graph <- add_edge(graph, "1", "5")
graph <- add_edge(graph, "1", "F")
graph <- add_edge(graph, "E", "6")
graph <- add_edge(graph, "4", "6")
graph <- add_edge(graph, "5", "7")
graph <- add_edge(graph, "6", "7")
graph <- add_edge(graph, "3", "8")
# Set attribute for named node "A"
graph_set_a <-
set_node_attrs(
graph,
nodes = "A",
node_attr = "value",
values = 5)
# Expect that node "A" has node attr set for `value`
expect_equal(
graph_set_a$nodes_df[
which(graph_set_a$nodes_df$nodes == "A"), 4],
"5")
# Expect that node "A" has node attr set for `value`
expect_equal(
get_cache(
cache_node_attrs(
graph_set_a,
node_attr = "value",
nodes = "A")), "5")
# Set attribute for named node "A" with a different value
graph_set_a <-
set_node_attrs(
graph,
nodes = "A",
node_attr = "value",
values = 8)
# Expect that node "A" has node attr set for `value`
expect_equal(
get_cache(
cache_node_attrs(
graph_set_a,
node_attr = "value",
nodes = "A")), "8")
# Select node "A"
graph_select_a <-
select_nodes(
graph, nodes = "A")
# Set attribute for selected node "A"
graph_select_a <-
set_node_attrs_ws(
graph_select_a,
node_attr = "value",
value = 5)
# Expect that node "A" has node attr set for `value`
expect_equal(
graph_select_a$nodes_df[
which(graph_select_a$nodes_df$nodes == "A"), 4],
"5")
# Set attribute for all nodes
graph_set_all <-
set_node_attrs(
graph,
node_attr = "value",
values = 5)
# Expect that all nodes have the attribute set
expect_true(
all(graph_set_all$nodes_df$value == "5"))
# Select node "A" and apply a node attribute using that
# node selection
graph_node_selection <-
graph %>% select_nodes(nodes = "A") %>%
set_node_attrs_ws(node_attr = "value", value = 5)
# Expect that node "A" has node attr set for `value`
expect_equal(
graph_node_selection$nodes_df[
which(graph_node_selection$nodes_df$nodes == "A"), 4],
"5")
# Expect that getting the node attribute from a
# selection works in the same way
expect_equal(
get_cache(
cache_node_attrs_ws(
graph_node_selection, node_attr = "value")),
"5")
# Get the node data frame from the graph as a separate object
graph_node_df <- graph$nodes_df
# Set attribute for named node "A" in the ndf
graph_node_df_set_a <-
set_node_attrs(
graph_node_df,
nodes = "A",
node_attr = "value",
values = 5)
# Expect that node "A" has node attr set for `value`
expect_equal(
graph_node_df_set_a[
which(graph_node_df_set_a$nodes == "A"), 4],
"5")
# Set attribute for named node "A" with a different value
graph_node_df_set_a_node_attr_df <-
set_node_attrs(
graph_node_df_set_a,
nodes = "A",
node_attr = "value",
values = 8)
# Expect that node "A" in the ndf has node attr set for `value`
expect_equal(
graph_node_df_set_a_node_attr_df[
which(graph_node_df_set_a_node_attr_df$nodes == "A"), 4],
"8")
# Set attribute for all nodes in the ndf
graph_node_df_set_all <-
set_node_attrs(
graph_node_df,
node_attr = "value",
values = 5)
# Expect that all nodes in the ndf will have the attribute set
expect_true(all(graph_node_df_set_all$value == "5"))
# Expect that getting the node attribute from a graph without
# a selection will result in an error
expect_error(cache_node_attrs_ws(graph))
# Expect an error if the attribute selected is `nodes`
expect_error(
set_node_attrs(
graph, nodes = "A",
node_attr = "nodes", values = "B")
)
# Expect an error if the attribute selected is `nodes`
expect_error(
set_node_attrs(
graph, nodes = "A",
node_attr = "value", values = c("1", "2"))
)
})
test_that("setting edge attributes is possible", {
# Create a graph
graph <- create_graph()
graph <- add_node(graph, node = "A")
graph <- add_node(graph, node = "B")
graph <- add_node(graph, node = "C")
graph <- add_node(graph, node = "D")
graph <- add_node(graph, node = "E")
graph <- add_node(graph, node = "F")
graph <- add_node(graph, node = "1")
graph <- add_node(graph, node = "2")
graph <- add_node(graph, node = "3")
graph <- add_node(graph, node = "4")
graph <- add_node(graph, node = "5")
graph <- add_node(graph, node = "6")
graph <- add_node(graph, node = "7")
graph <- add_node(graph, node = "8")
graph <- add_edge(graph, "A", "1")
graph <- add_edge(graph, "B", "2")
graph <- add_edge(graph, "B", "3")
graph <- add_edge(graph, "B", "4")
graph <- add_edge(graph, "C", "A")
graph <- add_edge(graph, "1", "D")
graph <- add_edge(graph, "E", "A")
graph <- add_edge(graph, "2", "4")
graph <- add_edge(graph, "1", "5")
graph <- add_edge(graph, "1", "F")
graph <- add_edge(graph, "E", "6")
graph <- add_edge(graph, "4", "6")
graph <- add_edge(graph, "5", "7")
graph <- add_edge(graph, "6", "7")
graph <- add_edge(graph, "3", "8")
# Set edge attribute for edge "A" -> "1"
graph_set_a_1 <-
set_edge_attrs(
graph,
from = "A",
to = "1",
edge_attr = "value",
values = 5)
# Expect that edge "A" -> "1" has edge attr set for `value`
expect_equal(
graph_set_a_1$edges_df[
which(graph_set_a_1$edges_df$from == "A" &
graph_set_a_1$edges_df$to == "1"), 4],
"5")
# Get edge attribute for edge "A" -> "1"
graph_set_a_1_edge_attr <-
get_cache(
cache_edge_attrs(
graph_set_a_1,
edge_attr = "value",
from = "A",
to = "1"))
# Expect that edge "A" -> "1" has edge attr set for `value`
expect_equal(graph_set_a_1_edge_attr, "5")
# Set attribute for named edge "A" -> "1" with a different value
graph_set_a_1 <-
set_edge_attrs(
graph_set_a_1,
from = "A",
to = "1",
edge_attr = "value",
values = 8)
# Expect that edge "A" -> "1" has edge attr set for `value`
expect_equal(
get_cache(
cache_edge_attrs(
graph_set_a_1,
edge_attr = "value",
from = "A",
to = "1")),
"8")
# Select edge "A" -> "1"
graph_select_a_1 <-
select_edges(
graph, from = "A", to = "1")
# Set attribute for selected node "A"
graph_select_a_1 <-
set_edge_attrs_ws(
graph_select_a_1,
edge_attr = "value",
value = 5)
# Expect that edge "A" -> "1" has edge attr set for `value`
expect_equal(
get_cache(
cache_edge_attrs(
graph_select_a_1,
edge_attr = "value",
from = "A",
to = "1")),
"5")
# Set attribute for all edges
graph_set_all <-
set_edge_attrs(
graph,
edge_attr = "value",
values = 5)
# Expect that all edges have the attribute set
expect_true(all(graph_set_all$edges_df$value == "5"))
# Select edge "A" -> "1" and apply an edge attribute using that
# edge selection
graph_edge_selection <-
graph %>%
select_edges(from = "A", to = "1") %>%
set_edge_attrs_ws(
edge_attr = "value", value = 5)
# Expect that edge "A" -> "1" has edge attr set for `value`
expect_equal(
graph_edge_selection$edges_df[
which(graph_edge_selection$edges_df$from == "A" &
graph_edge_selection$edges_df$to == "1"), 4],
"5")
})
|
f46f367cacbe0bb2bf538f76709d5330bb8fd8af
|
d4af0301237aec6415f09101db5c6198a5e0bbf4
|
/bankruptcy_surv.R
|
20a5557fb25a526db36d07db9f5e7cdcfd85fe0e
|
[] |
no_license
|
vshah5/Survival-Analysis-of-Companies
|
db32b62b47fa1824c96df63f74c8ffdfc2c8dc92
|
4e9dbc6d9162d8108b7c930c374ac27ac8dab4ad
|
refs/heads/main
| 2023-05-04T16:46:18.623228
| 2021-05-01T17:11:22
| 2021-05-01T17:11:22
| 363,464,762
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,172
|
r
|
bankruptcy_surv.R
|
library(survival)
#Survival Curves
#Exponential code
banksurv <- read.csv("C:/Users/valay/OneDrive/Desktop/Ivey/Winter Term 2021/Big Data Analytics/Class 9/Assignment/Bankruptcy Data - Haxo Corporation.csv")
out<-survfit(Surv(Years_to_Bankrupt, Bankruptcy) ~ 1, data=banksurv)
y <- out$surv
y
y<- -log(y)
t<-out$time
plot(t,y)
#This gives expected number of failures over the time t
#this plot should be a straight line or else it indicates that the relationship is not exponential
#after exponential, use Weibull model
#in Weibull you plot the minimum or maximum of independent random variables
#Difference between Weibull and Exponential is that gamma = 1 for exponential model
#This is the weibull model code
cox.out<-coxph(Surv(Years_to_Bankrupt, Bankruptcy) ~ Net_Debt_EBITDA +Profit_Margin +Current_Ratio +Debt_to_Cash_Flow_From_Ops +EBITDA, data=banksurv)
summary(cox.out)
#the effect of financial aid is exp(coef) = 0.69 in terms of hazard rate (committing crime)
#the rate for people with financial rate is 70% of the hazard rate for people without financial aid
#meaning it is 30% less if people have financial aid vs. they do not
|
5ba3d82b4f563eb629c28adb07df9d6b198b2fcd
|
144e1b215a8546d820f929055138b06eb67eda74
|
/stock_forecast.R
|
6b8e98bd5efabef0b6367b361c2a0bd78c43c3a2
|
[] |
no_license
|
Mentalaborer/TradeNow
|
9acdb1bd5a9e0822fded0ec89aab9f80771845cb
|
7b82093f0324ab2a314216fa950f7a8e42c7a5e2
|
refs/heads/master
| 2020-11-26T01:04:15.277242
| 2020-07-05T20:52:12
| 2020-07-05T20:52:12
| 228,915,006
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,591
|
r
|
stock_forecast.R
|
#### DEPRECATED #####
## Purpose is to explore past performance, see autocorrelation and
# forecast the adjusted stock price for X days in the future
library(zoo)
library(tseries)
library(urca)
library(PerformanceAnalytics)
library(quantmod)
library(xts)
library(tseries)
library(quantmod)
library(PerformanceAnalytics)
# global options:
lag.max = 10 # days to look back
h = 5 # days to predict ahead (if change, then need to change dates)
historical_prices = 30 # x number of days of historical data to plot
# load symbols
# symbols is from the fetch_data script but should source directly, OR
data_env <- new.env()
# call a specific symbol directly
focal_stock <- getSymbols('FB', src = "yahoo", from = "2020-01-01",
auto.assign = F, return.class = "xts", env = data_env)
colnames(focal_stock) <- paste("focal",
c("Open", "High", "Low", "Close", "Volume", "Adjusted"),
sep = "_")
# selct adjusted column only
focal_stock_adjusted <- focal_stock$focal_Adjusted
plot(focal_stock_adjusted, col="darkred", main="focal_stock Price Series")
# Stationarity testing
StationarityTest = ur.df(focal_stock_adjusted,type="none",selectlags = "AIC")
summary(StationarityTest)
#Stationarity Tesing on first Differences
Stationarity_Diff= ur.df(diff(focal_stock_adjusted)[2:dim(focal_stock_adjusted)[1],], type = "none", selectlags = "AIC")
summary(Stationarity_Diff)
# Plot the graph on first differences
D.focal_stock_adjusted= focal_stock_adjusted-lag(focal_stock_adjusted)
plot(D.focal_stock_adjusted, col="red4", main = "focal_stock_adjusted On First Differences")
ggAcf(D.focal_stock_adjusted, lag.max = lag.max) + theme_bw()
# Plot Autocorrelation
# How is the price today related to the past?
# Are the correlations large and positive for several lags? When decay?
acf(focal_stock_adjusted, lag.max = lag.max, plot = T)
## AUTOREGRESSIVE MODEL using AUTO ARIMA
# Most widely used time series model
# It’s like simple linear regression, each observation is regressed on the previous observation.
# a function in R to do auto.arima, and it just goes through
# and test all these combinations and automatically
# selects a good choice for your p, d and q, your three parameters
# for your autoregressive component, your integrated component, and your moving average component.
fitted_arima <- auto.arima(focal_stock_adjusted)
arima_forecast <- forecast(fitted_arima, h)
arima_sweep <- sw_sweep(arima_forecast)
# residual analysis: How well does model fit the data?
ts.plot(focal_stock_adjusted)
ar_focal_fitted <- focal_stock_adjusted - residuals(arima_forecast)
points(ar_focal_fitted, type = "l", col = "red", lty = 2)
# prep for visualization of predictions
# Note = h must match the dates here #
dates <- c(index(focal_stock_adjusted),
index(focal_stock_adjusted)[length(focal_stock_adjusted)] + 1,
index(focal_stock_adjusted)[length(focal_stock_adjusted)] + 2,
index(focal_stock_adjusted)[length(focal_stock_adjusted)] + 3,
index(focal_stock_adjusted)[length(focal_stock_adjusted)] + 4,
index(focal_stock_adjusted)[length(focal_stock_adjusted)] + 5)
# index(focal_stock_adjusted)[length(focal_stock_adjusted)] + 6,
# index(focal_stock_adjusted)[length(focal_stock_adjusted)] + 7,
# index(focal_stock_adjusted)[length(focal_stock_adjusted)] + 8,
# index(focal_stock_adjusted)[length(focal_stock_adjusted)] + 9,
# index(focal_stock_adjusted)[length(focal_stock_adjusted)] + 10)
arima_sweep <- add_column(arima_sweep, dates)
# Plotting historical prices (actual) and predictions
arima_sweep_display<- arima_sweep[(dim(arima_sweep)[1]-historical_prices):dim(arima_sweep)[1], ]
# Visualizing the forecast
arima_sweep_display %>%
ggplot(aes(x = dates, y = value, color = key)) +
## Prediction intervals
geom_ribbon(aes(ymin = lo.95, ymax = hi.95),
fill = "#D5DBFF", color = NA, size = 0) +
geom_ribbon(aes(ymin = lo.80, ymax = hi.80, fill = key),
fill = "#596DD5", color = NA, size = 0, alpha = 0.8) +
## Actual & Forecast
geom_line(size = 1) +
## Aesthetics
theme_bw() +
theme(axis.text.x = element_text(angle = 90, hjust = 1)) +
scale_x_date(date_breaks = "2 days", date_labels = "%b %d") +
labs(title = "focal_stock_adjusted 5 Days Ahead ARIMA Price Forecast", x = "", y = "")
# print predictions only
arima_sweep_display %>%
select(key, value, lo.95, hi.95, dates) %>%
filter(key == 'forecast')
|
8f156a7a5ed778da9c565a18d7125e49136974c1
|
38d127b3e63855d9897eb82f76f9def1fc381e6e
|
/R/createOutcomeGroup.R
|
3774d6daf068158e2ba27219e953f6e9dca37926
|
[] |
no_license
|
erikpal/bRush
|
ed2ac601050d82eac15211dacc3047923df413a4
|
42f47fe87ee982e9207b2f9ec6577daa0a01e764
|
refs/heads/master
| 2023-05-11T17:36:19.160006
| 2023-05-02T15:57:42
| 2023-05-02T15:57:42
| 88,922,475
| 15
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,846
|
r
|
createOutcomeGroup.R
|
#' Create a new outcome group
#'
#' Create a new outcome in an outcome group.
#' @param ID Account ID of the outcome group to create the outcome in
#' @param groupID Outcome group id to create the outcome group in
#' @param type Character of "course" or "account"
#' @param title Character of the title of the outcome group
#' @param description Character of the description of the outcome group
#' @param vendor_guid A custom GUID for the learning standard group
#' @param server Test, beta, production, or other name in R.environ OR full url of server
#' @param ... Optional page options to pass to processRequest
#' @export
createOutcomeGroup <- function(ID, groupID,
type = "account",
title,
description = "",
vendor_guid = NULL,
server = "test", ...){
url <- loadURL(server)
url$path <- "/api/v1/TYPE/ID/outcome_groups/groupID/subgroups"
url$path <- sub("groupID", groupID, url$path)
url$path <- sub("ID", ID, url$path)
if (type == "course"){
url$path <- sub("TYPE", "courses", url$path)
} else if (type == "account") {
url$path <- sub("TYPE", "accounts", url$path)
}
##Build the JSON for the body of the POST
require(jsonlite)
body <- list(title = title,
description = description,
vendor_guid = vendor_guid
)
##Convert to JSON
body <- jsonlite::toJSON(body, auto_unbox = TRUE, POSIXt = "ISO8601")
##Pass the url to the request processor
results <- processRequest(url, body, method = "CREATE", ...)
return(results)
}
|
5404582608984ad78b47a4056255d2778e9c2509
|
2c3f585dbaa3b5f17faa63921d48877b9e129f58
|
/plot3.R
|
8237e07ae678656c2decd89185485649e708c223
|
[] |
no_license
|
tomtec77/ExData_Plotting1
|
d8a9cbb778607ce7b8ae0558886a1d7b0bfde986
|
b8b525c80dc1b52edc602f84f0cc13d06e1cc157
|
refs/heads/master
| 2021-01-18T18:51:38.728696
| 2015-05-10T20:38:42
| 2015-05-10T20:38:42
| 35,373,144
| 0
| 0
| null | 2015-05-10T13:58:35
| 2015-05-10T13:58:34
| null |
UTF-8
|
R
| false
| false
| 1,302
|
r
|
plot3.R
|
# Coursera Exploratory Data Analysis course
# Course Project 1 - plot 3
# The data for this plot should be in a file called data/exdata1.RData which is
# created by the prepare_data.R script. If we don't find it, we source that
# script to create the data
if (!file.exists("data/exdata1.RData")) {
source("prepare_data.R")
}
load("data/exdata1.RData")
# This is to set my locale to en_US so that dates are printed in English,
# otherwise they appear in Spanish in the plot :)
Sys.setlocale(locale="en_US.UTF-8")
# Plots should be saved to a PNG file
png(filename="plot3.png",
width=480,
height=480)
# Plot 3 is a plot of energy sub metering as a function of date and time, for
# three different sub meterings
# First we initialise the plot
with(df, plot(Datetime, Sub_metering_1, type="n",
xlab="",
ylab="Energy sub metering"))
# Now add the three columns to plot
line.colours <- c("black", "red", "blue")
with(df, lines(Datetime, Sub_metering_1, col=line.colours[1]))
with(df, lines(Datetime, Sub_metering_2, col=line.colours[2]))
with(df, lines(Datetime, Sub_metering_3, col=line.colours[3]))
# Finally, add a legend
legend("topright", col=line.colours, lty=1,
legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
# Close the graphics device
dev.off()
|
062f50485e07d9c59d0e7ea4967f95e18fcca935
|
20b1b50f86dd29003c560c2e375086946e23f19a
|
/pop_gen/ihs_dgrp.R
|
02875167eb2f0793d71642feef94d3b34a518977
|
[] |
no_license
|
ericksonp/diapause-scripts-clean
|
cfc3ed0433114ee756019105b364f40e03b2419d
|
c93a052e9d63b9f7c60f7c18c1ad004385b59351
|
refs/heads/master
| 2021-07-09T07:16:03.326322
| 2020-07-31T17:00:37
| 2020-07-31T17:00:37
| 170,798,493
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 975
|
r
|
ihs_dgrp.R
|
library(rehh)
library(data.table)
library(foreach)
library(doMC)
registerDoMC(20)
#fix input files
foreach(chr=c("2L", "2R", "3L", "3R", "X"))%do%{
inp<-fread(paste0("/mnt/pricey_2/priscilla/dgrp2.filtered.", chr, ".impute.legend"), header=T)
inp[,chr:=tstrsplit(ID, split="_")[[1]]]
inp[,allele0:=0]
inp[,allele1:=1]
write.table(inp[,.(ID, chr, pos, allele0, allele1)], paste0("/mnt/pricey_2/priscilla/dgrp2.", chr, ".inp"), quote=F, sep=" ", row.names=F, col.names=F)
}
a<-foreach(chr=c("2L", "2R", "3L", "3R", "X"))%do%{
hap<-data2haplohh(hap_file=paste0("dgrp2.filtered.", chr,".impute.hap"),map_file=paste0("dgrp2.", chr, ".inp"),min_perc_geno.hap=90, min_perc_geno.snp=90, recode.allele=TRUE, haplotype.in.columns=TRUE)
res<-scan_hh(hap, threads=20)
return(as.data.table(res))
}
a<-rbindlist(a)
write.table(a, "/mnt/pricey_2/priscilla/ihs.txt", quote=F, sep="\t", row.names=F)
#move ihs.txt to rivanna to work there
|
9bc3457caf4d49a5f92a092afd4fab9b65a34a32
|
a1552ffee0cc530d7a6270334e9385b8b58aff81
|
/script.R
|
402b0e785b02c400ae4d3d9971a0fb1704f1ac06
|
[] |
no_license
|
vprayagala/Handling-Class-Imbalance
|
c8d9d93f893ea190d780e1216484966c98d1a0c5
|
1bf9357f81a7c66443d483a4fdf619426f33c48e
|
refs/heads/master
| 2020-03-12T11:08:39.442604
| 2018-04-22T18:27:14
| 2018-04-22T18:27:14
| 130,589,835
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,497
|
r
|
script.R
|
#Credit Card Fraud Detection
###########################################################################
#Below are the methods used to treat imbalanced datasets:
#
#1.Undersampling
#2.Oversampling
#3.Synthetic Data Generation
#4.Cost Sensitive Learning
#We will be working with Synthetic Data Generation technique in this script
#ROSE (Random OverSampling Example) and SMOTE from DMwR
###########################################################################
#Import the Required Libraries
library(data.table)
library(C50)
library(DMwR)
library(caret)
library(ROCR)
library(pROC)
library(ROSE)
#Checking the working directories
rm(list=ls(all=T))
getwd()
setwd("/Users/Saatwik/Documents/Kaggle/CreditCardFraud/")
getwd()
#Read Data
data<-fread("creditcard.csv", stringsAsFactors = F, sep = ",", header =T)
#Random check of data
#Time and Amount are variables, V1 to V28 Principal components from PCA
dim(data)
str(data)
sum(is.na(data))
#Class distribution
prop.table(table(data$Class))
#Split the data into into train and test, check the class distribution
set.seed(7)
rows<-sample(1:nrow(data),0.7*nrow(data))
train <- data[rows,]
test <- data[-rows,]
#Check Class Distribution in each split
prop.table(table(train$Class))
prop.table(table(test$Class))
#change target variable into factor for classification.
train$Class <- as.factor(train$Class)
test$Class <- as.factor(test$Class)
####################################################################
#Before using smote, check with given data
model<-C5.0(Class~.,data=train)
predicted<-predict(model,test)
caret::confusionMatrix(test$Class, predicted)
roc.curve(test$Class, predicted, plotit = T,col="blue")
####################################################################
#DMwR package and use Synthetic Minority Oversampling Technique
#(SMOTE)
set.seed(7)
smot_train<- SMOTE(Class~., data = train, perc.over = 900, k = 5, perc.under = 850)
smot_test <- SMOTE(Class~., data = test, perc.over = 900, k = 5, perc.under = 850)
#datatable(smot_test)
prop.table(table(smot_train$Class))
prop.table(table(smot_test$Class))
smot_model<-C5.0(Class~.,data=smot_train)
p <- predict(smot_model, smot_test)
#Accuracy, Precision and Recall.
caret::confusionMatrix(smot_test$Class, p)
#ROC Curve
c <- c()
f <- c()
j <- 1
for(i in seq(0.01, 0.8 , 0.01)){
set.seed(7)
fit <- C5.0(Class~., data = smot_train)
pre <- predict(fit, smot_test,type="prob")[,2]
pre <- as.numeric(pre > i)
auc <- roc(smot_test$Class, pre)
c[j] <- i
f[j] <- as.numeric(auc$auc)
j <- j + 1
}
df <- data.frame(c = c, f = f)
p <- df$c[which.max(df$f)]
p
#model - randomforst build on train, use the same for above cutoff (p) value found
pre <- predict(smot_model, smot_test,type="prob")[,2]
#perf<-performance(prediction(abs(pre),smot_test$Class),"tpr","fpr")
#plot(perf,col="red")
roc.curve(smot_test$Class, pre, plotit = T,col="red",add.roc=T)
##Model performance
caret::confusionMatrix(smot_test$Class, as.factor(as.numeric(pre>p)))
###########################################################################
#Using ROSE package to deal with imbalance
rose_train <- ROSE(Class ~ ., data = train, seed = 1)$data
table(rose_train$Class)
rose_test <- ROSE(Class ~ ., data = test, seed = 1)$data
table(rose_test$Class)
rose_model<-C5.0(Class~.,data=rose_train)
p <- predict(rose_model, rose_test)
#Accuracy, Precision and Recall.
caret::confusionMatrix(rose_test$Class, p)
roc.curve(rose_test$Class, p, plotit = T,col="green",add.roc = T)
|
142dc6bf541745afcf2d10db2c1bd151d1bdee50
|
06029de83ad273a8e9a86f9e7a0ef8dadaecbac9
|
/man/bcmeta.Rd
|
ac1053e5399fa3853c5f6bdf3bf7bafe4eeca9c2
|
[] |
no_license
|
cran/jarbes
|
f5a077316096009284c677fc7d2e6a4ffaa717d7
|
bb4a2132b40f4f34635689e1227794a10cab7642
|
refs/heads/master
| 2022-05-25T11:42:52.062775
| 2022-03-10T10:00:12
| 2022-03-10T10:00:12
| 145,906,980
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 4,020
|
rd
|
bcmeta.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bcmeta.R
\name{bcmeta}
\alias{bcmeta}
\title{Bias-Corrected Meta-Analysis for Combining Studies of Different Types and Quality}
\usage{
bcmeta(
data,
mean.mu = 0,
sd.mu = 10,
scale.sigma.between = 0.5,
df.scale.between = 1,
B.lower = 0,
B.upper = 10,
a.0 = 1,
a.1 = 1,
nu = 0.5,
nu.estimate = FALSE,
b.0 = 1,
b.1 = 2,
nr.chains = 2,
nr.iterations = 10000,
nr.adapt = 1000,
nr.burnin = 1000,
nr.thin = 1,
be.quiet = FALSE,
r2jags = TRUE
)
}
\arguments{
\item{data}{A data frame with at least two columns with the following names:
1) TE = treatment effect,
2) seTE = the standard error of the treatment effect.}
\item{mean.mu}{Prior mean of the overall mean parameter mu, default value is 0.}
\item{sd.mu}{Prior standard deviation of mu, the default value is 10.}
\item{scale.sigma.between}{Prior scale parameter for scale gamma distribution for the
precision between studies. The default value is 0.5.}
\item{df.scale.between}{Degrees of freedom of the scale gamma distribution for the precision between studies.
The default value is 1, which results in a Half Cauchy distribution for the standard
deviation between studies. Larger values e.g. 30 corresponds to a Half Normal distribution.}
\item{B.lower}{Lower bound of the bias parameter B, the default value is 0.}
\item{B.upper}{Upper bound of the bias parameter B, the default value is 10.}
\item{a.0}{Parameter for the prior Beta distribution for the probability of bias. Default value is a0 = 1.}
\item{a.1}{Parameter for the prior Beta distribution for the probability of bias. Default value is a1 = 1.}
\item{nu}{Parameter for the Beta distribution for the quality weights. The default value is nu = 0.5.}
\item{nu.estimate}{If TRUE, then we estimate nu from the data.}
\item{b.0}{If nu.estimate = TRUE, this parameter is the shape parameter of the prior Gamma distribution for nu.}
\item{b.1}{If nu.estimate = TRUE, this parameter is the rate parameter of the prior Gamma distribution for nu.
Note that E(nu) = b.0/b.1 and we need to choose b.0 << b.1.}
\item{nr.chains}{Number of chains for the MCMC computations, default 2.}
\item{nr.iterations}{Number of iterations after adapting the MCMC, default is 10000. Some models may need more iterations.}
\item{nr.adapt}{Number of iterations in the adaptation process, defualt is 1000. Some models may need more iterations during adptation.}
\item{nr.burnin}{Number of iteration discared for burnin period, default is 1000. Some models may need a longer burnin period.}
\item{nr.thin}{Thinning rate, it must be a positive integer, the default value 1.}
\item{be.quiet}{Do not print warning message if the model does not adapt. The default value is FALSE. If you are not sure about the adaptation period choose be.quiet=TRUE.}
\item{r2jags}{Which interface is used to link R to JAGS (rjags and R2jags), default value is R2Jags=TRUE.}
}
\value{
This function returns an object of the class "bcmeta". This object contains the MCMC
output of each parameter and hyper-parameter in the model and
the data frame used for fitting the model.
}
\description{
This function performers a Bayesian meta-analysis to jointly
combine different types of studies. The random-effects follows a finite
mixture of normals.
}
\details{
The results of the object of the class bcmeta can be extracted with R2jags or with rjags. In addition a summary, a print and a plot functions are
implemented for this type of object.
}
\examples{
\dontrun{
library(jarbes)
# Example ppvipd data
data(ppvipd)
}
}
\references{
Verde, P. E. (2017) Two Examples of Bayesian Evidence Synthesis with the Hierarchical Meta-Regression Approach. Chap.9, pag 189-206. Bayesian Inference, ed. Tejedor, Javier Prieto. InTech.
Verde, P.E. (2021) A Bias-Corrected Meta-Analysis Model for Combining Studies of Different Types and Quality. Biometrical Journal; 1–17.
}
|
8523a0f3725fbac48e3aff652694a75f96e38f31
|
1c9c172db3320dcc371f92dc304301432d38be4c
|
/miscellaneous/dzc_messing.R
|
df524f9528438bdc5284cc7ff56c83e8f3d579b8
|
[] |
no_license
|
clarkejames944/fish-data-analysis
|
8601897b743ac0871702375031d571b6e9178125
|
281c2eaf970f5e0fb60cb4f508a2a1744b5c6ae4
|
refs/heads/master
| 2020-04-10T23:35:28.060075
| 2019-08-14T14:08:14
| 2019-08-14T14:08:14
| 161,358,998
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,265
|
r
|
dzc_messing.R
|
library(tidyverse)
## 0. preamble ----
# box-cox transformation with translation
bct <- function(y, l1, l2 = 0) {
if (l1 == 0)
log(y + l2)
else
((y - l2) ^ l1 - 1) / l1
}
# pick the subset we are working with
site_sex_data <- EBSf
# common transformations
trans_data <- site_sex_data %>%
mutate(year_f = factor(Year))
## 2. experiment with b-c transformations ----
lam <- 1.6
trans_data <- site_sex_data %>%
mutate(
oto_size = bct(oto_size, l1 = lam, l2 = 0),
prev = bct(prev, l1 = lam, l2 = 0)
)
# fit a flexible growth model *by maturation status
# N.B. this is just an arbitrary age threshold (>5 == 'adult')
grow_mod <-
gam(
oto_size ~ prev + s(prev, k = 50, by = maturity),
#+ s(year_f, bs = "re"),
family = gaussian(link = "identity"),
data = trans_data
)
# check the choice of k (needs to be higher than the default)
gam.check(grow_mod)
# quick look at the terms and R^2
summary(grow_mod)
# extract fitted values and (r)esponse (i.e. raw) residuals
trans_data <- trans_data %>%
mutate(
preds = predict(grow_mod, type = "response"),
resid_r = residuals(grow_mod, type = "response")
)
# now examine the empirical mean-variance relationship
ggplot(trans_data, aes(x = preds, y = log(resid_r ^ 2))) +
geom_point(aes(colour = maturity), size = 1, alpha = 0.1) +
geom_smooth(method = 'gam',
formula = y ~ s(x, k = 100), se = FALSE) +
geom_smooth(method = 'lm',
se = FALSE, linetype = 2)
# calculate scaled residuals based on m-v relationship
var_mod <- gam(log(resid_r ^ 2) ~ s(preds), data = trans_data)
trans_data <- mutate(trans_data,
sc_resid = resid_r / sqrt(exp(predict(var_mod))))
# look at the distributional assumps. w/ scaled resids
with(trans_data, car::qqp(sc_resid[maturity == "juvenile"])) # less 'normal'
with(trans_data, car::qqp(sc_resid[maturity == "adult"])) # more 'normal'
# plot the growth data and the fitted values
ggplot(trans_data, aes(x = prev)) +
geom_point(aes(y = oto_size, colour = maturity),
size = 1, alpha = 0.1) +
geom_point(aes(y = preds), size = 0.25, colour = "steelblue") +
ylab("Box-Cox of otolith size in the following year") + xlab("Box-Cox of otolith size")+
theme_classic()
|
9a0e3df77945dac3ed82033ca9b374daceaff978
|
e8eb7b4f65772ccab82158aaf3d119fe3b919f83
|
/PowerSupplyUtilities/man/create_bing_event_log.Rd
|
e8ab2bbd64473788df251554953a50ce728b0d66
|
[] |
no_license
|
csalvato/r-adwords-analysis
|
e5429e848840bb2efcc40825a18d3ea20e456b16
|
beaf150c82087b53bb87d6ec87caa0ec405d9e8e
|
refs/heads/master
| 2020-03-30T14:22:22.864168
| 2016-07-22T15:21:37
| 2016-07-22T15:21:37
| 151,314,728
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,065
|
rd
|
create_bing_event_log.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/create_bing_event_log.r
\name{create_bing_event_log}
\alias{create_bing_event_log}
\title{Create Keywords Event Log (elog) for Bing}
\usage{
create_bing_event_log(from = Sys.Date(), to = Sys.Date())
}
\arguments{
\item{from}{Start date in either format <"yyyy-mm-dd"> or <yyyymmdd>. Inclusive.}
\item{to}{End date in either format <"yyyy-mm-dd"> or <yyyymmdd>. Inclusive.}
}
\value{
A data frame with all events (Bing clicks, Transactions and Referrals).
}
\description{
Created an event log (elog) of all transactions, referrals and keyword activity. An event log
is simple a time-series log of events that happen. When a certain row (observation) does not include
a particular variable, that variable is marked as NA. For example, a Bing keyword observation will
have information about the number of clicks, but no revenue for the transaction. Revenue data, instead
is pulled in from the order application's data.
}
\examples{
create_bing_event_log(from=20150101, to=20151231)
}
|
dfce4b6e53a0a28eaacbc8adb56e49a3e85bc85b
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/RVsharing/R/RVgene.R
|
1fed9dcd546b7d87947ce0659ea6bdb97fce8a43
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,600
|
r
|
RVgene.R
|
RVgene = function(ped.mat,ped.listfams,sites,fams,pattern.prob.list,nequiv.list,N.list,type="alleles",minor.allele.vec,precomputed.prob=list(0))
{
# ped.mat : pedigrees coded as in a ped file
# ped.listfams : list of pedigree objects, one object for each pedigree in ped.mat
# fams : vector of families carrying the variants listed in the corresponding position in sites
# sites : vector of the variant sites for each family in the fams vector
# minor.allele.vec : vector of the minor alleles at each site in the sites vector
if (missing(nequiv.list))
{
nequiv.list = rep(1,length(pattern.prob.list))
names(nequiv.list) = names(pattern.prob.list)
}
if (type=="alleles")
{
if (missing(minor.allele.vec)) minor.allele.vec = rep(2,length(sites))
if (length(sites)!=length(minor.allele.vec)) stop ("Lengths of sites and minor.allele.vec vectors differs.")
}
if (missing(fams))
{
fams.vec = sites.alongfams = NULL
if (type=="alleles")
{
minor.allele.alongfams = NULL
for (i in 1:length(sites))
{
fams.site = unique(ped.mat[ped.mat[,6]==2 & (ped.mat[,5+2*sites[i]]==minor.allele.vec[i] | ped.mat[,6+2*sites[i]]==minor.allele.vec[i]),1])
fams.vec = c(fams.vec,fams.site)
sites.alongfams = c(sites.alongfams,rep(sites[i],length(fams.site)))
minor.allele.alongfams = c(minor.allele.alongfams,rep(minor.allele.vec[i],length(fams.site)))
}
}
else
{
for (i in 1:length(sites))
{
fams.site = unique(ped.mat[ped.mat[,6]==2 & ped.mat[,6+sites[i]]>0,1])
fams.vec = c(fams.vec,fams.site)
sites.alongfams = c(sites.alongfams,rep(sites[i],length(fams.site)))
}
}
}
else
{
if (length(sites)!=length(fams)) stop ("Lengths of fams and sites vectors differs.")
fams.vec = fams
sites.alongfams = sites
if (type=="alleles") minor.allele.alongfams = minor.allele.vec
}
fams.vec = as.character(fams.vec)
missing.fams = fams.vec[!(fams.vec%in%names(ped.listfams))]
if (length(missing.fams>0)) stop ("Families ",missing.fams," not in ped.listfams.")
missing.fams = fams.vec[!(fams.vec%in%names(pattern.prob.list))]
if (length(missing.fams>0)) stop ("Families ",missing.fams," not in pattern.prob.list.")
missing.fams = fams.vec[!(fams.vec%in%names(N.list))]
if (length(missing.fams>0)) stop ("Families ",missing.fams," not in N.list.")
famu = unique(fams.vec)
famRVprob = famNcarriers = rep(NA,length(famu))
names(famRVprob) = names(famNcarriers) = famu
# Loop over the families
for (f in 1:length(fams.vec))
{
# get carriers list
if (type=="alleles")
carriers = extract_carriers(ped.mat,sites.alongfams[f],fams.vec[f],type=type,minor.allele.alongfams[f])
else carriers = extract_carriers(ped.mat,sites.alongfams[f],fams.vec[f],type=type)
# Computation of RV sharing probability
if (length(carriers)>0)
{
#cat (f,"\n")
if (fams.vec[f] %in% names(precomputed.prob))
tmp = precomputed.prob[[fams.vec[f]]][length(carriers)]
else tmp = RVsharing(ped.listfams[[fams.vec[f]]],carriers=carriers)@pshare
# If the RV has lower sharing probability, we keep it for this family
if (is.na(famRVprob[fams.vec[f]]) || tmp < famRVprob[fams.vec[f]])
{
famRVprob[fams.vec[f]] = tmp
famNcarriers[fams.vec[f]] = length(carriers)
}
}
#print(famRVprob)
}
# Identify number of informative families
fam.info = names(famRVprob)[!is.na(famRVprob)]
# print(fam.info)
# print(famNcarriers[fam.info])
nfam.info = length(fam.info)
# No informative family
if (nfam.info == 0) p = pall = 1
# One informative family
else if (nfam.info == 1)
{
p = sum((nequiv.list[[fam.info]]*pattern.prob.list[[fam.info]])[round(pattern.prob.list[[fam.info]],5)<=round(famRVprob[fam.info],5) & N.list[[fam.info]]>=famNcarriers[fam.info]])
pall = ifelse(famNcarriers[fam.info]==max(N.list[[fam.info]]),min(pattern.prob.list[[fam.info]]),1)
}
else if (nfam.info == 2)
{
# Creating matrices of joint probabilities, number of equivalent patterns and number of carriers for the two informative families
pattern.prob.array = outer(pattern.prob.list[[fam.info[1]]],pattern.prob.list[[fam.info[2]]])
nequiv.array = outer(nequiv.list[[fam.info[1]]],nequiv.list[[fam.info[2]]])
N.array = outer(N.list[[fam.info[1]]],N.list[[fam.info[2]]],"+")
}
else if (nfam.info == 3)
{
# Creating matrices of joint probabilities, number of equivalent patterns and number of carriers for the two informative families
pattern.prob.array = outer(outer(pattern.prob.list[[fam.info[1]]],pattern.prob.list[[fam.info[2]]]),pattern.prob.list[[fam.info[3]]])
nequiv.array = outer(outer(nequiv.list[[fam.info[1]]],nequiv.list[[fam.info[2]]]),nequiv.list[[fam.info[3]]])
N.array = outer(outer(N.list[[fam.info[1]]],N.list[[fam.info[2]]],"+"),N.list[[fam.info[3]]],"+")
}
else if (nfam.info == 4)
{
# Creating matrices of joint probabilities, number of equivalent patterns and number of carriers for the two informative families
pattern.prob.array = outer(outer(outer(pattern.prob.list[[fam.info[1]]],pattern.prob.list[[fam.info[2]]]),pattern.prob.list[[fam.info[3]]]),pattern.prob.list[[fam.info[4]]])
nequiv.array = outer(outer(outer(nequiv.list[[fam.info[1]]],nequiv.list[[fam.info[2]]]),nequiv.list[[fam.info[3]]]),nequiv.list[[fam.info[4]]])
N.array = outer(outer(outer(N.list[[fam.info[1]]],N.list[[fam.info[2]]],"+"),N.list[[fam.info[3]]],"+"),N.list[[fam.info[4]]],"+")
}
else if (nfam.info == 5)
{
# Creating matrices of joint probabilities, number of equivalent patterns and number of carriers for the two informative families
pattern.prob.array = outer(outer(outer(outer(pattern.prob.list[[fam.info[1]]],pattern.prob.list[[fam.info[2]]]),pattern.prob.list[[fam.info[3]]]),pattern.prob.list[[fam.info[4]]]),pattern.prob.list[[fam.info[5]]])
nequiv.array = outer(outer(outer(outer(nequiv.list[[fam.info[1]]],nequiv.list[[fam.info[2]]]),nequiv.list[[fam.info[3]]]),nequiv.list[[fam.info[4]]]),nequiv.list[[fam.info[5]]])
N.array = outer(outer(outer(outer(N.list[[fam.info[1]]],N.list[[fam.info[2]]],"+"),N.list[[fam.info[3]]],"+"),N.list[[fam.info[4]]],"+"),N.list[[fam.info[5]]],"+")
}
else if (nfam.info == 6)
{
# Creating matrices of joint probabilities, number of equivalent patterns and number of carriers for the two informative families
pattern.prob.array = outer(outer(outer(outer(outer(pattern.prob.list[[fam.info[1]]],pattern.prob.list[[fam.info[2]]]),pattern.prob.list[[fam.info[3]]]),pattern.prob.list[[fam.info[4]]]),pattern.prob.list[[fam.info[5]]]),pattern.prob.list[[fam.info[6]]])
nequiv.array = outer(outer(outer(outer(outer(nequiv.list[[fam.info[1]]],nequiv.list[[fam.info[2]]]),nequiv.list[[fam.info[3]]]),nequiv.list[[fam.info[4]]]),nequiv.list[[fam.info[5]]]),nequiv.list[[fam.info[6]]])
N.array = outer(outer(outer(outer(outer(N.list[[fam.info[1]]],N.list[[fam.info[2]]],"+"),N.list[[fam.info[3]]],"+"),N.list[[fam.info[4]]],"+"),N.list[[fam.info[5]]],"+"),N.list[[fam.info[6]]],"+")
}
else stop ("More than 6 informative families.")
if (nfam.info>1)
{
dvec=dim(pattern.prob.array)
# Computing p-value
pobs = round(prod(famRVprob[fam.info]),5)
p = sum((nequiv.array*pattern.prob.array)[round(pattern.prob.array)<=pobs & N.array>=sum(famNcarriers[fam.info])])
maxN = sapply(N.list[fam.info],max)
not = fam.info[famNcarriers[fam.info]<maxN]
if (length(not)>0)
{
pshare = list(ped.tocompute.vec=fam.info,pshare=sapply(pattern.prob.list[fam.info],min))
pall = get.psubset(fam.info,not,pshare)
}
else pall = prod(sapply(pattern.prob.list[fam.info],min))
}
list(p=p,pall=pall)
}
|
0a8a029abd0ca63c2013899daffdc9333c9875da
|
e31959d220a6509b9b0cd0983098d4728b0aeb3a
|
/man/output.Rd
|
ddc4add1d130875c6054ec108ad1a4ab233ce642
|
[] |
no_license
|
pvrqualitasag/PedigreeFromTvdData
|
d2d226ae14c309f46c89742dfc3964705b7955f0
|
bbf1567edfee91f5c2e47deabc190e45bec9e0b0
|
refs/heads/master
| 2021-09-06T23:42:05.091546
| 2018-02-13T13:37:20
| 2018-02-13T13:37:20
| 109,243,753
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 416
|
rd
|
output.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pedigree_output.R
\name{output}
\alias{output}
\title{Output Pedigree from TVD-Data}
\usage{
output(plPedigree, psOutfile, psSepChar = "\\t")
}
\arguments{
\item{plPedigree}{processed Pedigree}
\item{psOutfile}{output Pedigree}
\item{psSepChar}{parameter by default for delimiter symbol}
}
\description{
Output Pedigree from TVD-Data
}
|
22c7a30864921bbf1d4e38ebcaeea3cda92bbebb
|
fd6caf709c8d05a2a12936e1aa591ccb2c6ff1be
|
/course project 2/plot3.r
|
720cfea3a83b1e273d9c6025b441f1377bd13f29
|
[] |
no_license
|
correnm/ExData_Plotting1
|
40a99882710c7f659d942985532f219afae8e32b
|
e657bb70e290c95520fbf29e06f4ff79fb638f05
|
refs/heads/master
| 2021-01-20T23:54:34.167964
| 2014-08-24T23:10:34
| 2014-08-24T23:10:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,302
|
r
|
plot3.r
|
# Load the library
library(ggplot2)
# make a path and file string
dir<-getwd()
NEIfile<- paste(dir, "summarySCC_PM25.rds",sep="/")
SCCfile<- paste(dir, "Source_Classification_Code.rds",sep="/")
# Read in the data files
NEI <- readRDS(NEIfile)
SCC <- readRDS(SCCfile)
# Data for Baltimore City Only
baltimore<-subset(NEI, fips=='24510')
# Get the total of emissions by type in each year
plotData <- aggregate(baltimore["Emissions"], by=baltimore[c("type","year")], FUN=sum)
# get the range for the x and y axis
xrange <- range(plotData$year)
yrange <- range(plotData$Emissions)
# Determine the upper and lower limit for the x and y-axis
xmin<-min(plotData$year)
xmax<-max(plotData$year)
ymin<-min(plotData$Emissions)
ymax<-max(plotData$Emissions)
# Determine the values for axis tick marks
xbreaks <- c(seq(xmin, xmax, by=3))
ybreaks <- pretty(ymin:ymax, n=4)
# Save plot to png file
png("plot3.png", height=480, width=480)
# Line graph of emissions by year
output<- ggplot(data=plotData, aes(x=year, y=Emissions, group=type,
color=type)) + geom_line() + geom_point()
# changing the tick intervals on the x-axis
# set the title
output + scale_x_continuous(breaks=xbreaks, labels=xbreaks) + ggtitle("10-Year Summary for Baltimore City, MD")
# close the device
dev.off()
|
619e65c78c3886db213f4b0b2162a2e5d7d7a793
|
44598c891266cd295188326f2bb8d7755481e66b
|
/DbtTools/RetroMAT/man/NoBlanks.Rd
|
a6caa336d05a135d847b8a9e0e637e3e4e723203
|
[] |
no_license
|
markus-flicke/KD_Projekt_1
|
09a66f5e2ef06447d4b0408f54487b146d21f1e9
|
1958c81a92711fb9cd4ccb0ea16ffc6b02a50fe4
|
refs/heads/master
| 2020-03-13T23:12:31.501130
| 2018-05-21T22:25:37
| 2018-05-21T22:25:37
| 131,330,787
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 233
|
rd
|
NoBlanks.Rd
|
\name{NoBlanks}
\alias{NoBlanks}
\title{ NoBlanks(S) remove all blanks from string }
\arguments{
\item{S}{a String}
\item{ALU}{}
}
\value{List with
}
\description{
NoBlanks(S) remove all blanks from string }
\author{
}
|
9ecdd55dd76348e6233f2d85314c73370ee5dfb4
|
0a6f19d85f81b7d34b4faaa223707d1b85f95790
|
/scripts/transeqtl_permute.R
|
a814a98fbe812fa89d2d5e2044f187057e128c68
|
[] |
no_license
|
sarahodell/bg_rnaseq
|
7a20de4bfbcaf1187b40e2354375dc9c1414201d
|
b6846e434eab5fe279915f8b076d21fd9a66e434
|
refs/heads/master
| 2023-08-08T21:52:19.876322
| 2023-08-01T17:49:53
| 2023-08-01T17:49:53
| 245,015,919
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,219
|
r
|
transeqtl_permute.R
|
#!/usr/bin/env Rscript
args=commandArgs(trailingOnly=T)
time=as.character(args[[1]])
factor=as.character(args[[2]])
chr=as.character(args[[3]])
cores=as.numeric(args[[4]])
reps=as.numeric(args[[5]])
library('GridLMM')
library('data.table')
library('dplyr')
library('parallel')
library('MASS')
library('stringr')
#all_reps=c()
phenotype=fread(sprintf('MegaLMM/pheno_MegaLMM_%s_all_F_means.txt',time),data.table=F)
#factor_results=c()
#p=names(phenotype)[factor]
data=data.frame(ID=phenotype$V1,ID2=phenotype$V1,y=phenotype[,factor],stringsAsFactors=F)
data=data[!is.na(data$y),]
data$y=(data$y-mean(data$y))/sd(data$y)
#K=fread(sprintf('../GridLMM/K_matrices/K_matrix_chr%s.txt',chr),data.table=F)
K=fread('../GridLMM/K_matrices/K_matrix_full.txt',data.table=F)
rownames(K)=K[,1]
rownames(K)=gsub("-",".",rownames(K))
K=as.matrix(K[,-1])
colnames(K)=rownames(K)
X_list=readRDS(sprintf('../genotypes/probabilities/geno_probs/bg%s_filtered_genotype_probs.rds',chr))
inds=rownames(X_list[[1]])
inter=intersect(inds,data$ID)
null_model = GridLMM_ML(y~1+(1|ID),data,relmat=list(ID=K),ML=T,REML=F,verbose=F)
nmarkers=dim(X_list[[1]])[2]
frep2=sapply(seq(1,nmarkers),function(i) lapply(X_list,function(j) sum(j[,i]>0.75)))
founders=names(X_list)
fkeep=apply(frep2,MARGIN=2,function(x) x>2)
markers=dimnames(X_list[[1]])[[2]]
colnames(fkeep)=markers
colnames(frep2)=markers
fgroups=unique(colSums(fkeep))
n_reps=seq(1,reps)
randomized_gwas<-function(rep){
all_gwas=data.frame(matrix(ncol=26,nrow=0))
names(all_gwas)=c('Trait','X_ID','s2','ML_logLik','ID.ML',founders,'n_steps','Df_X','ML_Reduced_logLik','Reduced_Df_X','p_value_ML')
for(g in fgroups){
subm=colnames(fkeep[,colSums(fkeep)==g])
subfkeep=fkeep[,subm]
X_list_sub=lapply(X_list,function(x) x[inter,subm])
len=dim(X_list_sub[[1]])[1]
# Run GridLMM
# randomize the order of the genotypes
draw=sample(len,len,replace=F)
X_list_reordered=lapply(X_list_sub,function(x) x[draw,])
for(x in seq(1,16)){
dimnames(X_list_reordered[[x]])[[1]]=dimnames(X_list_sub[[1]])[[1]]
}
pattern=apply(subfkeep,MARGIN=2,function(x) str_flatten(c(unlist(founders[x])),'-'))
#pattern=
fdf=data.frame(marker=subm,fpattern=pattern,stringsAsFactors=F)
fpatterns=unique(fdf$fpattern)
if(g==16){
h2_start=null_model$results[,grepl('.ML',colnames(null_model$results),fixed=T),drop=FALSE]
names(h2_start) = sapply(names(h2_start),function(x) strsplit(x,'.',fixed=T)[[1]][1])
h2_start
V_setup=null_model$setup
Y=as.matrix(data$y)
X_cov=null_model$lmod$X
X_list_null=NULL
gwas=run_GridLMM_GWAS(Y,X_cov,X_list_reordered[-1],X_list_null,V_setup=V_setup,h2_start=h2_start,method='ML',mc.cores=cores,verbose=F)
gwas$Trait=factor
names(gwas)[6:21]=founders
gwas=gwas[,c('Trait','X_ID','s2','ML_logLik','ID.ML',founders,'n_steps','Df_X','ML_Reduced_logLik','Reduced_Df_X','p_value_ML')]
all_gwas=rbind(all_gwas,gwas)
}else{
for(i in fpatterns){
subm2=fdf[fdf$fpattern==i,]$marker
subf=subfkeep[,subm2,drop=F]
#m=marker[i]
fk=founders[subf[,1]]
nfk=founders[!subf[,1]]
X_list_sub2=X_list_reordered[ - which(names(X_list_reordered) %in% nfk)]
X_list_sub2=lapply(X_list_sub2,function(x) x[,subm2,drop=F])
h2_start=null_model$results[,grepl('.ML',colnames(null_model$results),fixed=T),drop=FALSE]
names(h2_start) = sapply(names(h2_start),function(x) strsplit(x,'.',fixed=T)[[1]][1])
h2_start
V_setup=null_model$setup
Y=as.matrix(data$y)
X_cov=null_model$lmod$X
X_list_null=NULL
gwas=run_GridLMM_GWAS(Y,X_cov,X_list_sub2[-1],X_list_null,V_setup=V_setup,h2_start=h2_start,method='ML',mc.cores=cores,verbose=F)
gwas$Trait=factor
names(gwas)[6:(6+length(fk)-1)]=fk
gwas[,nfk]=NA
gwas=gwas[,c('Trait','X_ID','s2','ML_logLik','ID.ML',founders,'n_steps','Df_X','ML_Reduced_logLik','Reduced_Df_X','p_value_ML')]
all_gwas=rbind(all_gwas,gwas)
}
}
}
all_gwas=as.data.frame(all_gwas,stringsAsFactors=F)
tmp=data.frame(chr=chr,replicate=rep,pval=min(all_gwas$p_value_ML))
}
#fwrite(all_gwas,sprintf('eqtl/trans/results/%s_c%s_pheno_%s_trans_results.txt',time,chr,factor),row.names=F,quote=F,sep='\t')
print(system.time({
results=mclapply(n_reps,randomized_gwas,mc.cores=cores)
}))
saveRDS(results,sprintf('eqtl/trans/permute/chr%s_%s_%s_%.0frep_min_pvalues.rds',chr,time,factor,reps))
#fwrite(factor_results,sprintf('eqtl/trans/results/%s_c%s_pheno_factor_trans_eQTL.txt',time,chr),row.names=F,quote=F,sep='\t')
#fwrite(factor_results,sprintf('eqtl/trans/results/%s_c%s_factor_trans_eQTL.txt',time,chr),row.names=F,quote=F,sep='\t')
# For each snp, figure out fkeep and separate X_list into multiple based on dimensions
# parallelize
#n_reps=seq(1,reps)
#randomized_gwas<-function(rep){
# len=dim(X_list_full[[1]])[1]
# Run GridLMM
# randomize the order of the genotypes
# draw=sample(len,len,replace=F)
# X_list_reordered=lapply(X_list_full,function(x) x[draw,])
# for(x in seq(1,16)){
# dimnames(X_list_reordered[[x]])[[1]]=dimnames(X_list_full[[1]])[[1]]
# }
# h2_start=null_model$results[,grepl('.ML',colnames(null_model$results),fixed=T),drop=FALSE]
# names(h2_start) = sapply(names(h2_start),function(x) strsplit(x,'.',fixed=T)[[1]][1])
# h2_start
# V_setup=null_model$setup
# Y=as.matrix(data$y)
# X_cov=null_model$lmod$X
# X_list_null=NULL
# gwas=run_GridLMM_GWAS(Y,X_cov,X_list_reordered[-1],X_list_null,V_setup=V_setup,h2_start=h2_start,method='ML',mc.cores=cores,verbose=F)
# gwas=gwas[!is.na(gwas$p_value_ML),]
# tmp=data.frame(chr=chr,replicate=rep,pval=min(gwas$p_value_ML))
#}
#print(system.time({
#results=mclapply(n_reps,randomized_gwas,mc.cores=cores)
#}))
#saveRDS(results,sprintf('test_models/chr%s_%s_x_%s_founderprobs_%.0frep_max_pvalues.rds',chr,pheno,env,reps))
#date=format(Sys.time(),'%m%d%y')
#time="WD_0720"
#chr="10"
#library('GenomicFeatures') # write a script to get a table of start and stop sites of genes from the gtf file
#options(warn=2)
# Read in Kinship Matrix
|
faa8a16720fa0f5d5bac2ca77f7c541d09ac0b10
|
1ae1cee46cd923e56a8a99a32b20e5aa537dd9c9
|
/pm2.5AndCoalEmissions/plot2.R
|
69dab37b4484d8b183f819d78ede3b01de7252c7
|
[] |
no_license
|
yy-math/exploratoryDataAnalysis
|
83f8b8775a51091da8785f36fc94be32f333e031
|
276cfdb99386fe027eeb1ce70b76d66064aa4208
|
refs/heads/master
| 2021-01-10T00:58:27.768867
| 2015-09-24T01:41:56
| 2015-09-24T01:41:56
| 43,035,391
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 568
|
r
|
plot2.R
|
# read in data sources
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
# subset to get only Baltimore data
baltimore <- NEI[NEI$fips=="24510",]
# aggregate baltimore data by year
aggBaltimore <- aggregate(Emissions ~ year, baltimore,sum)
# set up file to send plot to
png("plot2.png",width=480,height=480,units="px")
# set up plot
barplot(
aggBaltimore$Emissions,
names.arg=aggBaltimore$year,
xlab="Year",
ylab="PM2.5 Emissions (Tons)",
main="Baltimore PM2.5 Emissions By Year"
)
# turn off screen device
dev.off()
|
427eee40f6b65b48bcc7a2e8133477958f0f3933
|
7a25234c2e7193bf1600ba5e7c78604b6c76e396
|
/browser_bed/create_bed_files.R
|
d8281d5678aae179b5f18037ac4c014fcf34bd06
|
[] |
no_license
|
LieberInstitute/zandiHyde_bipolar_rnaseq
|
c7fc422d7349089c288e44236d4dd3530ec961e8
|
af20f05f51e3df31726c181094ee4c8b2a61563a
|
refs/heads/master
| 2023-04-11T00:59:37.556261
| 2022-04-11T19:49:31
| 2022-04-11T19:49:31
| 88,870,656
| 8
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,852
|
r
|
create_bed_files.R
|
## Based on:
# /users/ajaffe/Lieber/Projects/RNAseq/Astrazeneca/Round3/round3_bedTrack.R
library('rtracklayer')
library('RColorBrewer')
library('jaffelab')
library('devtools')
## Load eQTL results from the 881 candidate SNPs and their proxy SNPs
efiles <- dir('../eqtl/raggr', pattern = 'raggr_881_snps_', full.names = TRUE)
names(efiles) <- ss(gsub('_eqtls_fdr01.csv', '', efiles), '_snps_', 2)
etabs <- lapply(efiles, read.csv, header = TRUE)
## Filter to keep the "lead snps"
etabs <- lapply(etabs, function(e) {
e[e$SNP %in% unique(e$leadVariant), ]
})
get_col <- function(pval) {
bedShading <- cut(pval,
breaks = c(0,3, 5, 8, 10, 12, 15, 20, 1000), label=FALSE,
include.lowest=TRUE)
Ngroup = max(bedShading)
pal = brewer.pal(7,"RdBu")
posCols = colorRampPalette(pal[4:7])(Ngroup)
names(posCols) = 1:Ngroup
negCols = colorRampPalette(pal[1:4])(Ngroup)
names(negCols) = seq(-1*Ngroup,-1)
cols = c(posCols, negCols)
bedColors = cols[match(as.character(bedShading), names(cols))]
tmp = t(col2rgb(bedColors))
paste(tmp[,1], tmp[,2], tmp[,3], sep= ",")
}
ebed <- lapply(etabs, function(x) {
df <- data.frame(
'chr' = x$chr_hg38,
'start' = x$feat_start,
'end' = x$feat_end,
'name' = x$SNP,
'score' = -log10(x$FDR),
'strand' = x$strand,
'thickStart' = x$feat_start,
'thickEnd' = x$feat_end,
'itemRgb' = get_col(-log10(x$FDR)),
'status' = x$Status,
'type' = x$Type,
stringsAsFactors = FALSE
)
df[order(df$score, decreasing = TRUE), ]
split(df[, -which(colnames(df) %in% c('status', 'type'))], df$type)
})
stopifnot(all(sapply(ebed, length) == 4))
get_header <- function(region, typestatus) {
paste0("track name=ZandiBipolar_eQTL_", region, '_', typestatus,
" description='ZandiBipolar eQTL hits - ", region, ', ',
ss(typestatus, '_'),
"' visibility=2 itemRgb='On'")
}
## Write to files
dir.create('bed', showWarnings = FALSE)
xx <- lapply(names(ebed), function(reg) {
lapply(names(ebed[[1]]), function(typstat) {
bed <- paste0('bed/zandiBipolar_881lead_bedTrack_', Sys.Date(), '_', reg, '_', typstat, '.bed')
write.table(get_header(reg, typstat), file = bed,
row.names=FALSE, col.names=FALSE, quote=FALSE)
xx <- ebed[[reg]][[typstat]]
if(any(xx$strand == '*')) xx$strand[xx$strand == '*'] <- '+'
write.table(xx, file = bed,
row.names=FALSE, col.names=FALSE, quote=FALSE,append=TRUE)
return(NULL)
})
})
## Compress into a single tar ball for sharing
system('tar -zcvf bed.tar.gz bed')
system('wc -l bed/*bed')
## Reproducibility information
print('Reproducibility information:')
Sys.time()
proc.time()
options(width = 120)
session_info()
|
695aa62457280686ee6a933cf7e72d8b69041860
|
ea493fb53f45ebd4b3244b3d6b227e98ade09573
|
/AnalysisCode/04_Centrality.R
|
c921320505507eb2e23ca49f9a50d88d9221ecfd
|
[] |
no_license
|
vjf2/Family_Size_Effects
|
989926e155e5bd402eb3bef1c47d2c45e3c4f69d
|
aa438dd5ae6fcd357a7787dd62b282459a148fd6
|
refs/heads/main
| 2023-04-12T22:13:41.718888
| 2022-10-23T21:29:14
| 2022-10-23T21:29:14
| 556,440,814
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,369
|
r
|
04_Centrality.R
|
#######################################
#### Calculate Adult Network Centrality
#######################################
library(igraph)
library(SocGen)
library(foreach)
library(doParallel)
indiv_covars <- read.csv("SharedData/individual_covariates.csv")
focal_adults <- indiv_covars[which(indiv_covars$num_sightings_adult >= 20 &
indiv_covars$relocations >= 35 &
indiv_covars$genotyped == "Y" &
indiv_covars$prop_genotyped >= 0.70 &
!is.na(indiv_covars$adult_entry) &
!is.na(indiv_covars$provisioned)), ]
# sort these females into groups by available years to do calculations of the network while they are in their adult period
sightings <- read.csv("SharedData/sightings.csv")
dates <- sort(unique(sightings$Date))
focal_adults$first_obs_date <- as.Date(sapply(1:length(focal_adults$Dolphin.ID),
function(x) {dates[dates >= focal_adults$adult_entry[x]][1]}),
origin = "1970-01-01")
focal_adults$last_obs_date <- as.Date(sapply(1:length(focal_adults$Dolphin.ID), function(x) {
tail(dates[dates <= focal_adults$depart[x]], 1)
}), origin = "1970-01-01")
# date range key table
date_range <- unique(focal_adults[, c("first_obs_date", "last_obs_date")])
# add date diff
date_range$datediff <- as.numeric((date_range$last_obs_date - date_range$first_obs_date) / 365.25)
date_range$date_key <- 1:nrow(date_range)
focal_adults <- merge(focal_adults, date_range, sort = FALSE)
# set up empty dataframe
nf <- nrow(focal_adults)
real_network_metrics <- data.frame(ego = focal_adults$Dolphin.ID,
degree = numeric(nf),
strength = numeric(nf),
eigen = numeric(nf),
closeness = numeric(nf),
network_size = numeric(nf),
gc_size = numeric(nf))
for (i in 1:nrow(date_range)) {
centrality_surveys <- sightings[sightings$Date >= date_range$first_obs_date[i] &
sightings$Date <= date_range$last_obs_date[i], ]
central_network <- half_weight(sightings = centrality_surveys,
group_variable = "Observation.ID",
dates = "Date",
IDs = "Dolphin.ID")
g <- graph.adjacency(central_network, mode = "undirected", weighted = TRUE, diag = FALSE)
network_size <- vcount(g)
graphs <- decompose.graph(g)
largest <- which.max(sapply(graphs, vcount))
g1 <- graphs[[largest]]
gc_size <- vcount(g1)
if (i == 3) {print(distance_table(g)); print(mean_distance(g))} # whole network
egos <- focal_adults$Dolphin.ID[focal_adults$date_key == i]
# mini_loop
for (j in egos) {
# calc network metrics here
real_network_metrics[real_network_metrics$ego == j, "degree"] <- degree(g, j)
real_network_metrics[real_network_metrics$ego == j, "strength"] <- strength(g, j)
real_network_metrics[real_network_metrics$ego == j, "eigen"] <- eigen_centrality(g)$vector[j]
real_network_metrics[real_network_metrics$ego == j, "closeness"] <- closeness(g,
v = j,
weights = 1 / E(g)$weight,
normalized = TRUE)
# report network size and giant component size
real_network_metrics[real_network_metrics$ego == j, "network_size"] <- network_size
real_network_metrics[real_network_metrics$ego == j, "gc_size"] <- gc_size
# note: closeness centrality is not well-defined for disconnected graphs
}
}
# repeat for random
load("IntermediateData/kfinal1000.RData")
cl <- makeCluster(detectCores() - 1)
clusterEvalQ(cl, {library(SocGen);library(igraph)})
clusterExport(cl, c("kfinal", "date_range", "focal_adults", "nf"))
registerDoParallel(cl)
starttime <- Sys.time()
central_rand <- foreach(n = 1:length(kfinal), .errorhandling = 'pass') %dopar% {
kfinal1 <- kfinal[[n]]
kfinal1$Date <- substring(kfinal1$group, 1, 10)
network_metrics <- data.frame(ego = focal_adults$Dolphin.ID,
degree = numeric(nf),
strength = numeric(nf),
eigen = numeric(nf),
closeness = numeric(nf))
for (i in 1:nrow(date_range)) {
centrality_surveys <- kfinal1[kfinal1$Date >= date_range$first_obs_date[i] &
kfinal1$Date <= date_range$last_obs_date[i], ]
central_network <- half_weight(sightings = centrality_surveys,
group_variable = "group",
dates = "Date",
IDs = "id")
g <- graph.adjacency(central_network, mode = "undirected", weighted = TRUE, diag = FALSE)
egos <- focal_adults$Dolphin.ID[focal_adults$date_key == i]
# mini_loop
for (j in egos) {
# calc network metrics here
network_metrics[network_metrics$ego == j, "degree"] <- degree(g, j)
network_metrics[network_metrics$ego == j, "strength"] <- strength(g, j)
network_metrics[network_metrics$ego == j, "eigen"] <- eigen_centrality(g)$vector[j]
network_metrics[network_metrics$ego == j, "closeness"] <- closeness(g,
v = j,
weights = 1 / E(g)$weight,
normalized = TRUE)
# note: closeness centrality is not well-defined for disconnected graphs
}
}
# cat(paste0(" network complete for number ", n, "\n")) #if printing to file
network_metrics
}
endtime <- Sys.time()
stopCluster(cl)
endtime - starttime # run time ~1 hour
beepr::beep(2)
# save relevant objects
focal_adults <- focal_adults[, names(indiv_covars)]
save(real_network_metrics, central_rand, focal_adults, file = "IntermediateData/centrality_networks.RData")
|
0ce8a62b4f78ef893c17fdc8c8a015825fc8c1ca
|
6c131dcadb8e738286c1ee37d08b5ccfa0e1d57e
|
/Quiz/Quiz1/Q5/ui.R
|
80f2f264364f775e4017b8bc87c81aab551acf7a
|
[] |
no_license
|
TrentLin/Developing-Data-Product
|
903591c6540125b0e87c76592ee81e582ef0fa12
|
57510658781197efae59ceecc40fe9e6790f99cd
|
refs/heads/master
| 2020-06-02T16:19:29.079955
| 2015-02-22T08:51:31
| 2015-02-22T08:51:31
| 31,157,568
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 313
|
r
|
ui.R
|
library(shiny)
shinyUI(pageWithSidebar(
headerPanel("Example plot"),
sidebarPanel(
sliderInput('mu', 'Guess at the mu',value = 70, min = 60, max = 80, step = 0.05,)
),
mainPanel(
plotOutput('newHist')
)
))
# The server.R output name isn't the same as the plotOutput
# Change myHist to newHist!
|
04c0e8c5eea6680613f5bc1a4300b05d72c35ea5
|
b4ebd95a5cfed5fc5895ff6ac193fa83c02b35b1
|
/ML_Assignment_R.R
|
533d51b09e97e20620bf3b50a8ad56ecb906ad69
|
[] |
no_license
|
ApacheStark/Machine-Learning-Assign-1
|
f96693c49fbf419db4cca3a1f288ed3c67efe247
|
bd717b13ede2a9a2785afd7ce7d826d39ab58182
|
refs/heads/master
| 2021-11-04T20:52:50.777709
| 2019-04-28T06:59:48
| 2019-04-28T06:59:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,647
|
r
|
ML_Assignment_R.R
|
# Load Packages #
library(readr)
library(readxl)
library(dplyr)
library(tidyr)
library(knitr)
library(mlr)
library(outliers)
#----------------------------------------------------------------------------------------------------------#
# Read Data #
setwd("C:\\Users\\marga\\OneDrive\\Documents\\Uni\\Machine Learning")
heart <- read.csv("heart.csv")
#----------------------------------------------------------------------------------------------------------#
# Understand the Data#
head(heart)
tail(heart)
dim(heart)
str(heart)
names(heart)
class(heart)
#Findings: 14 attributes for 303 observations
#Attributes:
# > 1. age
# > 2. sex
# > 3. chest pain type (4 values)
# > 4. resting blood pressure
# > 5. serum cholestoral in mg/dl
# > 6. fasting blood sugar > 120 mg/dl
# > 7. resting electrocardiographic results (values 0,1,2)
# > 8. maximum heart rate achieved
# > 9. exercise induced angina
# > 10. oldpeak = ST depression induced by exercise relative to rest
# > 11. the slope of the peak exercise ST segment
# > 12. number of major vessels (0-3) colored by flourosopy
# > 13. thal: 3 = normal; 6 = fixed defect; 7 = reversable defect
#Ideas:
# - Change Column Names
# - Change Sex to M and F (factor)
# - Change chest pain type to ordered factors (names? mild to severe?)
# - Change "thal" numerical values to anmed factors normal, fixed defect, reversable defect
#----------------------------------------------------------------------------------------------------------#
# Tidy the Data #
colnames(heart) <- c('Age',
'Sex',
'Chest Pain',
'Rest. Blood Pressure',
'Cholestoral (mg/dl)',
'Fast. Blood Sugar (>120mg/dl)',
'Resting ECG',
'Max Heart Rate',
'Ex. Induced Angina',
'Old Peak',
'Slope',
'No. of Blood Vessels',
'Thalessemia',
'Target')
head(heart)
str(heart)
heart$Sex <- factor(heart$Sex,
levels = c(1,0),
labels = c("Male", "Female"))
heart$`Chest Pain` <- factor(heart$`Chest Pain` ,
levels = c(0,1,2,3),
labels = c("Typical Angina", "Atypical Angina", "Non-Anginal", "Asymptomatic"))
heart$`Fast. Blood Sugar (>120mg/dl)` <- factor(heart$`Fast. Blood Sugar (>120mg/dl)` ,
levels = c(0,1),
labels = c("FALSE", "TRUE"))
heart$`Resting ECG` <- factor(heart$`Resting ECG` ,
levels = c(0,1,2),
labels = c("Normal", "ST-T Abnormal", "Hypertrophy"))
heart$`Ex. Induced Angina` <- factor(heart$`Ex. Induced Angina` ,
levels = c(0,1),
labels = c("No", "Yes"))
heart$Slope <- factor(heart$Slope ,
levels = c(0,1,2),
labels = c("Upsloping", "Flat", "Downsloping"))
heart$`Thalessemia` <- factor(heart$`Thalessemia` ,
levels = c(1,2,3),
labels = c("Normal", "Fixed Defect", "Reversable Defect"))
heart$Target <- factor(heart$Target,
levels = c(0,1),
labels = c("No", "Yes"))
str(heart)
head(heart)
summary(heart)
#----------------------------------------------------------------------------------------------------------#
# Scan the Data #
# Missing Values #
which(is.na(heart))
colSums(is.na(heart))
rowSums(is.na(heart))
#Only a couple of NAs in the Thalessemia column. Can replace with mode value of Thalessemia.
summary(heart$`Thalessemia`)
#Mode value is "Fixed Defect".
heart$`Thalessemia`[is.na(heart$`Thalessemia`)] <- "Fixed Defect" #Look at relationships between thal and other variables to find best mode for NAs
which(is.na(heart))
summary(heart$`Thalessemia`)
#Check for special values
is.special <- function(x){
if (is.numeric(x)) !is.finite(x)
}
sapply(heart, is.special)
#None found.
# Outliers #
str(heart)
z.scores <- heart$`Rest. Blood Pressure` %>% scores(type = "z")
z.scores %>% summary()
which( abs(z.scores) >3 )
length (which( abs(z.scores) >3 ))
#2 Outliers for Resting Blood Pressure
z.scores <- heart$`Cholestoral (mg/dl)` %>% scores(type = "z")
z.scores %>% summary()
which( abs(z.scores) >3 )
length (which( abs(z.scores) >3 ))
#4 Outliers for Resting Blood Pressure
z.scores <- heart$`Max Heart Rate` %>% scores(type = "z")
z.scores %>% summary()
which( abs(z.scores) >3 )
length (which( abs(z.scores) >3 ))
#1 Outlier for Max Heart Rate
z.scores <- heart$`Old Peak` %>% scores(type = "z")
z.scores %>% summary()
which( abs(z.scores) >3 )
length (which( abs(z.scores) >3 ))
#2 Outliers for Old Peak
#Univariate Box Plots of Numeric Variables
boxplot(heart$`Rest. Blood Pressure`, main = "Resting Blood Pressure")
boxplot(heart$`Rest. Blood Pressure`, main = "Resting Blood Pressure")
boxplot(heart$`Cholestoral (mg/dl)`, main = "Cholestoral")
boxplot(heart$`Old Peak`, main = "Old Peak")
#Histograms
hist(heart$`Age`, main = "Sample Distribution of Age")
hist(heart$`No. of Blood Vessels`, main = "No. of Blood Vessels Coloured by Flourosopy")
|
9c449a84a2da73accdf5dcb490f022869bdbbe76
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/survsup/examples/theme_km.Rd.R
|
d3e39a763f71243fad05e4e9a36e14342369a42e
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 303
|
r
|
theme_km.Rd.R
|
library(survsup)
### Name: theme_km
### Title: Custom ggplot theme that make Kaplan-Meier curves look nice
### Aliases: theme_km
### ** Examples
library(survsup); library(ggplot2)
p <- ggplot(mtcars) + geom_point(aes(x = wt, y = mpg,
colour = factor(gear))) + facet_wrap(~am)
p + theme_km()
|
f1754d65aef367afbccd81b9fcfd1390e2521e76
|
30712e0ebb841fb214a73024acdc1ba8b571c82a
|
/gissr/gissr_part2/man/sp_convex_hull.Rd
|
2dbb51df139efc3584944d7117c6d81ee7e4a78d
|
[] |
no_license
|
karaframe/R_useful_packages
|
93bc9fe28f9411d2986c09a4529c92bf5d195583
|
b132d94eb7f54129253fc51ce1e7a01b4ee0692e
|
refs/heads/master
| 2021-01-11T06:31:41.654758
| 2016-09-26T08:12:45
| 2016-09-26T08:12:45
| 69,226,910
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 695
|
rd
|
sp_convex_hull.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sp_convex_hull.R
\name{sp_convex_hull}
\alias{sp_convex_hull}
\title{Function to create the smallest convex polygon that contains all input
geometries.}
\usage{
sp_convex_hull(sp, features = FALSE)
}
\arguments{
\item{sp}{Spatial object.}
\item{features}{Should the individual polygons of the features contained
within \code{sp} be calculated? Default is \code{FALSE}.}
}
\description{
\code{sp_convex_hull} is a simple wrapper for \code{rgeos::gConvexHull}.
}
\examples{
\dontrun{
# Create a boundary-polygon containing all points
sp_boundary_polygon <- sp_convex_hull(sp_points)
}
}
\author{
Stuart K. Grange
}
|
551bc11aaee94a79fa1800938b639c3e287c5cf4
|
b3528c342b78e8a50553f0b6d7758c1882caecad
|
/reviewsAnalysis/Shiny app1.R
|
40830d78ba55432d24537ff25c773065e4be68a1
|
[] |
no_license
|
DevDataAnalyst/ShinyApps
|
d6a20012c7f0317d6cc2485a90c5cad66f359cae
|
3059cc4302b3064746cff496f776538ef1ebe547
|
refs/heads/master
| 2020-04-03T08:28:22.998515
| 2018-10-30T16:06:41
| 2018-10-30T16:06:41
| 155,134,210
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,499
|
r
|
Shiny app1.R
|
library(shiny)
library(shinyShortcut)
library(shinythemes)
library(qdapRegex)
library(dplyr)
library(reshape2)
library(sqldf)
library(readxl)
library(sentiment)
library(stringr)
library(textstem)
library(ggplot2)
library(dygraphs)
library(networkD3)
shinyApp(
ui = tagList(
shinythemes::themeSelector(),
navbarPage(
"Sentiment Analyser",
tabPanel("Main Analysis",
sidebarPanel(
fileInput("file1", "Choose the Excel data file",
multiple = FALSE),
actionButton("action", "Energise!")
),
mainPanel(
tabsetPanel(
tabPanel("Sentiments",h4("Polarities"),dataTableOutput("table1")),
tabPanel("Sentiments Overall View",h4("Sentiment Plot"),plotOutput("plot1")),
tabPanel("Sentiment Trend over Time",h4("Sentiment Trend"),dygraphOutput("plot2")),
tabPanel("Influencer Analysis",h4("Influential users"),simpleNetworkOutput("plot3"))
)
)
)
)
),
server = function(input, output,session) {
x = reactive({
d1 = read_excel(input$file1$datapath,sheet=input$file1$sheetName)
d1$reviewText <- gsub("http.*","", d1$reviewText)
d1$reviewText = gsub("@\\w+ *", "", d1$reviewText)
d1$reviewText = gsub("<\\w+ *", "", d1$reviewText)
d1$reviewText = gsub("[[:punct:]]","", d1$reviewText)
tweets = d1$reviewText
tweets = as.data.frame(tweets)
r1 = tweets
r1$CleanText <- ""
# Text preprocessing function
Clean_String <- function(string){
#symbol replace
temp = str_replace_all(string, "[^[:alnum:]]", " ")
# Lowercase
temp <- tolower(string)
# Remove everything that is not a number or letter
temp <- str_replace_all(temp,"[^a-zA-Z\\s]", " ")
# Remove stopwords
temp <- removeWords(temp, stopwords('en'))
# Shrink down to just one white space
temp <- str_replace_all(temp,"[\\s]+", " ")
# Split the string into words
#temp <- str_split(temp, " ")[[1]]
temp <- stem_words(temp)
# Get rid of trailing "" if necessary
indexes <- which(temp == "")
if(length(indexes) > 0){temp <- temp[-indexes]}
# Get unique words
return(paste(unique(temp),collapse = ' '))
}
#Clean all the texts row-wise
for(i in 1:NROW(r1))
{
r1$CleanText[i] <- Clean_String(r1$text[i])
}
textdata = r1[c("tweets")]
sentiment_scores = classify_polarity(r1)
Sentiment = as.data.frame(sentiment_scores[,3:4])
final_result = cbind(d1,Sentiment)
})
observeEvent(input$action,{output$table1 <- renderDataTable({
final_result=x()
return(final_result)
})})
observeEvent(input$action,{output$plot1 = renderPlot({
final_result=x()
ggplot(final_result, aes(BEST_FIT, fill = BEST_FIT ) ) + geom_bar()
})})
observeEvent(input$action,{output$plot2 = renderDygraph({
final_result=x()
final_result$Months = month(final_result$reviewTime)
final_result$Years = year(final_result$reviewTime)
fr_agg = data.frame(cbind(final_result$Years,final_result$Months,as.character(final_result$BEST_FIT)))
colnames(fr_agg)=c("Years","Months","BEST_FIT")
fr_agg = data.frame(dcast(fr_agg,(Years+Months ~ BEST_FIT)))
neutral_ts = ts(as.numeric(as.character(fr_agg$neutral)),start=min(final_result$Years),frequency = 12)
positive_ts = ts(as.numeric(as.character(fr_agg$positive)),start=min(final_result$Years),frequency = 12)
negative_ts = ts(as.numeric(as.character(fr_agg$negative)),start=min(final_result$Years),frequency = 12)
sentiment_trend = cbind(positive_ts,negative_ts,neutral_ts)
dygraph(sentiment_trend, main = "Sentiment Trend over Time") %>%
dySeries("positive_ts",label = "Positive") %>%
dySeries("negative_ts",label = "Negative") %>%
dySeries("neutral_ts",label = "Neutral") %>%
dyOptions(colors = RColorBrewer::brewer.pal(3, "Set2")) %>%
dyOptions(fillGraph = TRUE, fillAlpha = 0.4)%>%
dyHighlight(highlightSeriesOpts = list(strokeWidth = 3))%>% dyRangeSelector()
})})
observeEvent(input$action,{output$plot3 = renderSimpleNetwork({
d1=x()
d1$helpful_positive=as.numeric(rm_between(d1$helpful,"[",",",extract = TRUE))
d1$total_views=as.numeric(rm_between(d1$helpful,", ","]",extract = TRUE))
d1$overall_helpful=d1$helpful_positive / d1$total_views
d1$overall_helpful = replace(d1$overall_helpful, is.na(d1$overall_helpful), 0)
book_summary = data.frame(d1%>%group_by(asin)%>%summarise(helpfulness=mean(overall_helpful)))
main_inf = data.frame(sqldf("select * from book_summary where helpfulness>0.75"))
final = merge.data.frame(d1,main_inf,by="asin",all.y = TRUE)
final = final[order(final$total_views,decreasing = TRUE),]
final = final[,-13]
final = final[1:20,]
Source = data.frame(cbind(final$reviewerID,final$asin,final$overall_helpful))
colnames(Source) = c("reviewerID","asin","overall_helpful")
simpleNetwork(Source,Source = "reviewerID",Target = "asin",width = 720, height = 720)
})})
})
|
45029360007af935830304d4b5a382d95f733356
|
12265f972c749881abbfecfd7d318a466ab387af
|
/man/Turyn_seq.Rd
|
e8af90633e1b45186898a060cd9ae15d30080ab7
|
[] |
no_license
|
cran/HadamardR
|
ab10185cfc1d91a5a98ec5c3bf5289e009564ec5
|
0c455660b1f2d8fb0ebc2ae6e62180c4454b3932
|
refs/heads/master
| 2021-05-26T03:17:35.294417
| 2020-04-07T14:10:06
| 2020-04-07T14:10:06
| 254,030,652
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 932
|
rd
|
Turyn_seq.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Turyn_seq.R
\name{Turyn_seq}
\alias{Turyn_seq}
\title{Turyn_seq
Turyn_seq performs the selection of the Turyn sequences from dataset.
It is internal function not exported.}
\usage{
Turyn_seq(order)
}
\arguments{
\item{order}{integer}
}
\value{
Required Turyn sequences of order of x
}
\description{
Turyn_seq
Turyn_seq performs the selection of the Turyn sequences from dataset.
It is internal function not exported.
}
\details{
Create Turyn sequences of given order from the internal dataset T_sequences
Turyn type-sequences are available for 28,30,34,36 in the internal table.
}
\references{
Goethals, J. M. and Seidel, J. J. (1967). Orthogonal matrices with zero diagnol. Canad. J. Math., 19, 259-264.
}
\seealso{
\code{\link{had_goethals_Turyn}} for Goethals-Seidel construction method using Turyn sequences.
#'
}
|
b02343d56994ede07c2f0ec2ceb39d9612eb4758
|
95898a6c3600190335a47355d54b07c79d5d4faa
|
/sources/nfirs/scripts/low-risk.r
|
3fb9abe7c26f017cb2099183bc7311acccf2cad9
|
[
"MIT"
] |
permissive
|
FireCARES/data
|
1af01b8bbbf7c1eda96c449a811138588fb65ff2
|
4c9d65f93f7527ed0f067fe5c966546b5f3ef483
|
refs/heads/master
| 2023-07-21T03:58:42.090007
| 2020-11-07T05:13:26
| 2020-11-07T05:13:26
| 34,053,366
| 0
| 2
|
MIT
| 2023-07-06T21:55:23
| 2015-04-16T12:02:56
|
Jupyter Notebook
|
UTF-8
|
R
| false
| false
| 17,514
|
r
|
low-risk.r
|
library('RPostgreSQL')
drv <- dbDriver("PostgreSQL")
conn <- dbConnect(drv, dbname=Sys.getenv("NFIRS_DATABASE_NAME"),
user=Sys.getenv("NFIRS_DATABASE_USER"),
port=Sys.getenv("NFIRS_DATABASE_PORT"),
host=Sys.getenv("NFIRS_DATABASE_HOST"),
password=Sys.getenv("NFIRS_DATABASE_PASSWORD"))
fires <- dbGetQuery( conn, 'select *, fdid as fd_id from nist.final_query' )
set.seed( 953016876 )
# set all the fire results to zero where the query above returns a Null value.
for( i in c( 'res_all', 'res_2','res_3' ) ) fires[[ i ]][ is.na( fires[[ i ]] ) ] <- 0
fires$region[ fires$state == 'PR' ] <- 'Puerto Rico'
fires$region[ is.na( fires$region ) ] <- 'West'
fires$inc_hh <- as.numeric( fires$inc_hh )
fires$inc_hh <- log( fires$inc_hh )
for( i in c( 'region', 'state', 'fd_id', 'fd_size' ) ) fires[[ i ]] <- factor( fires[[ i ]] )
fires$region <- relevel( fires$region, 'West' )
# create filters
fires$no.fire <- fires$giants <- fires$small <- fires$base <- fires$include <- TRUE
# base filter
fires$base <- with( fires, fd_size %in% 3:9 & ! ( is.na( fd_id ) | is.na( fd_size ) ) )
fires$base <- fires$base & ! is.na( fires$inc_hh )
fires$base <- fires$base & fires$f_located > 0
fires$base <- fires$base & ! is.na( fires$smoke_cty )
# giants filter
# changed geoid to tr10_fid
u <- with( fires[ fires$base, ], list( pop=tr10_fid[ pop > quantile( pop, .999) ],
hse.units=tr10_fid[hse_units > quantile(hse_units, .999)],
males=tr10_fid[males > quantile(males, .999)],
age_45_54=tr10_fid[age_45_54 > quantile(age_45_54, .999)]))
# changed geoid to tr10_fid
v <- NULL
for( i in names( u ) ) v <- union( v, u[[ i ]] )
fires$giants <- ! fires$tr10_fid %in% v
rm( i, u, v )
# small filter
fires$small <- fires$dept_incidents > 25 & ! is.na( fires$dept_incidents )
fires$include <- with( fires, base & small & giants )
# define outliers
dept <- fires[, c( 'tr10_fid', 'year', 'fd_id', 'dept_incidents' ) ]
ddd <- unique( fires[,c( 'year', 'fd_id', 'dept_incidents' ) ] )
ddd <- aggregate( ddd$dept_incidents, list( fd_id=ddd$fd_id ), function( x ) c( mean( x, na.rm=TRUE ), sd( x, na.rm=TRUE ) ))
ddd$m <- ddd$x[,1]
ddd$sd <- ddd$x[,2]
dept$m <- ddd$m[ match( dept$fd_id, ddd$fd_id ) ]
dept$sd <- ddd$sd[ match( dept$fd_id, ddd$fd_id ) ]
dept$lg <- ! ( is.na( dept$dept_incidents ) | dept$dept_incidents < dept$m - 2 * dept$sd )
fires$lcl <- dept$lg
rm( dept, ddd )
# partition data
tr10_fid <- unique( fires$tr10_fid )
tr10_fid <- data.frame( tr10_fid=tr10_fid, v=floor( runif( length( tr10_fid ) ) * 3 ), set="",
stringsAsFactors=FALSE )
tr10_fid$set[ tr10_fid$v == 0 ] <- "training"
tr10_fid$set[ tr10_fid$v == 1 ] <- "validation"
tr10_fid$set[ tr10_fid$v == 2 ] <- "test"
tr10_fid$set <- factor( tr10_fid$set )
fires$set <- tr10_fid$set[ match( fires$tr10_fid, tr10_fid$tr10_fid ) ]
rm( tr10_fid )
f <- function( conn, group=NULL, y=NULL, mdls=NULL, run="short" )
{
if( is.null( group ) & is.null( y ) ) stop( "at least one of 'group' and 'y' must be specified" )
if( ! ( is.null( group ) | class( group ) == "character" ) ) stop( "If you use 'group' it must be a character vector" )
if( is.null( group ) & ( is.null( y ) | is.null( mdls ) ) ) stop( "both 'y' and 'mdls' must be specified" )
if( is.null( group ) & ! ( is.character( y ) & is.character( mdls ) ) ) stop( "both 'y' and 'mdls' must be character vectors" )
r0 <- dbGetQuery( conn, paste( "select * from controls.runs where grp = '", run, "' order by tier1, tier2", sep="" ) )
r <- list()
tier1 <- unique( r0$tier1 )
for( i in tier1 )
{
r1 <- subset( r0, tier1 == i )
if( is.na( r1$tier2[ 1 ] ) ) r[[ i ]] <- parse( text=r1$value[ 1 ] )[[ 1 ]]
else
{
r[[ i ]] <- list()
for( j in 1:nrow( r1 ) ) r[[ i ]][[ r1$tier2[ j ] ]] <- parse( text=r1$value[ j ] )[[ 1 ]]
}
}
if( ! is.null( group ) )
{
mdl <- dbGetQuery( conn, paste( "select * from controls.models where lst in ( '", paste( group, collapse="', '" ), "' )", sep="" ) )
npts <- dbGetQuery( conn, paste( "select * from controls.inputs where lst in ( '", paste( group, collapse="', '" ), "' )", sep="" ) )
}
else
{
mdl <- dbGetQuery( conn, paste( "select * from controls.models where target='", y, "' AND model in ( '", paste( mdls, collapse="', '" ), "' )", sep="" ) )
npts <- dbGetQuery( conn, paste( "select * from controls.models NATURAL JOIN controls.inputs where target='", y, "' AND model in ( '", paste( mdls, collapse="', '" ), "' )", sep="" ) )
}
models <- NULL
for( i in 1:nrow( mdl ) )
{
models[[ mdl$model[ i ] ]] <- list( fn=c( library=mdl$library[ i ], ff=mdl$ff[ i ] ), inputs=list() )
npt0 <- subset( npts, lst==mdl$lst[ i ] & model==mdl$model[ i ] )
for( j in 1:nrow( npt0 ) )
{
if( npt0$class[ j ] == "call" )
models[[ i ]]$inputs[[ npt0$input[ j ] ]] <- parse( text=npt0$value[ j ] )[[ 1 ]]
else if( npt0$class[ j ] == "formula" )
models[[ i ]]$inputs[[ npt0$input[ j ] ]] <- as.formula( npt0$value[ j ], env=.GlobalEnv )
else
models[[ i ]]$inputs[[ npt0$input[ j ] ]] <- do.call( paste( "as", npt0$class[ j ], sep="." ), list( x=npt0$value[ j ] ) )
}
}
list( models=models, runs=r )
}
### Run function
fn.run <- function( sets, n=0, sink=NULL )
{
require( boot )
require( utils )
# u <- Sys.time()
out <<- list()
if( ! is.null( sink ) )
{
if( is.character( sink ) ) ff <- file( sink, "w" )
else
{
warning( "the 'sink' term must be a character" )
ff <- NULL
}
}
else ff <- NULL
for( k in names( sets$models ) )
{
if( tolower( sets$models[[k]]$fn[ 'library' ] ) == "null" ) next
out[[k]] <<- list()
require( sets$models[[k]]$fn[ 'library' ], character.only=TRUE )
fn <- sets$models[[k]]$fn[ 'ff' ]
aa <- a <- sets$models[[k]]$inputs
subset.a <- a$subset
a$subset <- NULL
data <- a$data
for( i in names( sets$runs ) )
{
out[[k]][[i]] <<- list()
if( is.list( sets$runs[[i]] ) )
{
for( j in names( sets$runs[[i]] ) )
{
# u[1] <- Sys.time()
out[[k]][[i]][[j]] <<- list()
# cat( "Evaluating ", k, format( " model: ", width=16 - nchar( k ) ), i, " ", j, format( ":", width=11 - nchar( j ) ), sep="" )
aa$subset <- substitute( u & v & set %in% c( "training", "validation" ), list( u=sets$runs[[i]][[j]], v=subset.a ) )
if( ! is.null( ff ) ) sink( ff, type="message", append=TRUE )
tryCatch(
out[[k]][[i]][[j]]$model <<- do.call( fn, aa ),
error =function( e ) cat( "ERROR in Model: ", k, ", run ", i, "-", j, ". Message: ", e$message, "\n", sep="", file=stderr() ),
message=function( e ) cat( "MESSAGE in Model: ", k, ", run ", i, "-", j, ". Message: ", e$message, "\n", sep="", file=stderr() )
)
if( ! is.null( ff ) ) sink( type="message" )
if( n > 0 )
{
dta <- do.call( "subset", list( x=data, subset=aa$subset ) )
# pb <- winProgressBar( title=paste( "Bootstrapping ", k, " model: ", i, " ", j, ": ", n, " iterations", sep="" ), label="0", max=n )
out[[k]][[i]][[j]]$boot <<- boot( dta, bbb, R=n, strata=dta$fd_id, a=a, ff=ff, fn=fn, pb=pb, nme=names( fixef( out[[k]][[i]][[j]]$model ) ) )
# close( pb )
}
# u[2] <- Sys.time()
# cat( "Elapsed time:", format( u[2] - u[1] ), "\n" )
}
}
else
{
# u[1] <- Sys.time()
# cat( "Evaluating ", k, format( " model: ", width=16 - nchar( k ) ), i, " all models:", sep="" )
aa$subset <- substitute( u & v & set %in% c( "training", "validation" ), list( u=sets$runs[[i]], v=subset.a ) )
if( ! is.null( ff ) ) sink( ff, type="message", append=TRUE )
tryCatch(
out[[k]][[i]]$model <<- do.call( fn, aa ),
error =function(e) cat( "ERROR in Model: ",k,", run ",i,"-All. Message: ",e$message,"\n",sep="",file=stderr()),
message=function(e) cat("MESSAGE in Model: ",k,", run ",i,"-All. Message: ",e$message,"\n",sep="",file=stderr() )
)
if( ! is.null( ff ) ) sink( type="message" )
if( n > 0 )
{
dta <- do.call( "subset", list( x=data, subset=aa$subset ) )
# pb <- winProgressBar( title=paste( "Bootstrapping ", k, " model: ", i, " all models: ", n, " iterations", sep="" ), label="0", max=n )
out[[k]][[i]]$boot <<- boot( dta, bbb, R=n, strata=dta$fd_id, a=a, ff=ff, fn=fn, pb=pb, nme=names( fixef( out[[k]][[i]]$model ) ) )
# close( pb )
}
# u[2] <- Sys.time()
# cat( "Elapsed time:", format( u[2] - u[1] ), "\n" )
}
}
}
if( ! is.null( ff ) ) close( ff )
}
### test function
fn.test <- function( input, output, subset=NULL )
{
# Test to see if the data and dependent variables are all identical.
# If not, throw an error
x <- unlist( lapply( input$models, function( x ) as.character( x$inputs$data ) ) )
dta <- x[1]
if( ! all( dta == x ) ) stop( "data are not all identical. Try breaking up the input and output files." )
x <- unlist( lapply( input$models, function( x ) as.character( x$inputs$formula[ 2 ] ) ) )
y <- x[1]
if( ! all( y == x ) ) stop( "Dependent variables are not all identical. Try breaking up the input and output files." )
rm( x )
if( is.null( subset ) )
{
x <- unlist( lapply( input$models, function( x ) as.character( x$inputs$subset ) ) )
if( ! all( x[1] == x ) ) warning( "The subsets are not all identical. Using the first. Try specifying the subset you want.")
rm( x )
subset <- input$models[[ 1 ]]$inputs$subset
}
if( is.list( subset ) )
{
old.res <- subset
subset <- old.res$subset
results <- old.res$results
if( y != old.res$lhs ) stop( "When 'subset' is the old results list, then the dependent variables must match." )
}
new.data <- do.call( "subset", list( x=get( dta ), subset=substitute( a & set == "test", list( a=subset ) ) ) )
if( ! exists( "old.res" ) )
{
results <- new.data[ , c( "year", "tr10_fid", "state", "region", "fd_id", "fd_size" ) ]
results$dept.new <- as.character( NA )
if( deparse( input$models[[1]]$inputs$family ) == "binomial" )
{
tmp.y <- eval( parse( text=y ), envir=new.data )
results[[ y ]] <- tmp.y[,1] / ( tmp.y[,1] + tmp.y[,2] )
}
else results[[ y ]] <- eval( parse( text=y ), envir=new.data )
}
vars <- NULL
for( k in names( input$models ) )
{
if( tolower( input$models[[k]]$fn["library"] ) == "null" ) next
vars <- c( vars, k )
require( input$models[[k]]$fn["library"], character.only=TRUE )
results[[ k ]] <- as.numeric( NA )
for( i in names( input$runs ) )
{
if( is.list( input$runs[[i]] ) )
{
for( j in names( input$runs[[i]] ) )
{
x <- eval( input$runs[[i]][[j]], envir=new.data )
if( any( x ) )
{
if( is.null( output[[k]][[i]][[j]]$model ) )
{
z <- as.numeric( NA )
warning( paste( "WARNING: Model ", k, " run ", i, "-", j, ": No model results were found.", sep="" ) )
}
else tryCatch(
{
if( input$models[[k]]$fn["library"] == "lme4" )
{
z <- predict( output[[k]][[i]][[j]]$model,newdata=new.data[x,],type="response",allow.new.levels=TRUE )
x1 <- results$fd_id %in% row.names( ranef( output[[k]][[i]][[j]]$model )$fd_id )
results$dept.new[ x & ! x1 ] <- paste( results$dept.new[ x & ! x1 ], k, sep=";" )
}
else if( input$models[[k]]$fn["library"] == "glmnet" )
{
d.f <- model.frame( formula=input$models[[k]]$inputs$formula, data=new.data[x,], na.action=na.pass )
off <- eval( input$models[[k]]$inputs$offset, new.data[x,] )
z <- predict( output[[k]][[i]][[j]]$model, newx=model.matrix( terms( d.f ), d.f ), offset=off )
}
else
{
z <- predict( output[[k]][[i]][[j]]$model, newdata=new.data[x,], type="response" )
}
},
error=function( e ) stop( paste( "ERROR: Model ", k, " run ", i, "-All: ", e$message, sep="" ) )
)
results[[ k ]][x] <- z
}
}
}
else
{
x <- eval( input$runs[[i]], envir=new.data )
if( any( x ) )
{
if( is.null( output[[k]][[i]]$model ) )
{
z <- as.numeric( NA )
warning( paste( "WARNING: Model ", k, " run ", i, "-All: No model results were found.", sep="" ) )
}
else tryCatch(
{
if( input$models[[k]]$fn["library"] == "lme4" )
{
z <- predict( output[[k]][[i]]$model, newdata=new.data[x,], type="response", allow.new.levels=TRUE )
x1 <- results$fd_id %in% row.names( ranef( output[[k]][[i]]$model )$fd_id )
results$dept.new[ x & ! x1 ] <- paste( results$dept.new[ x & ! x1 ], k, sep=";" )
}
else if( input$models[[k]]$fn["library"] == "glmnet" )
{
d.f <- model.frame( formula=input$models[[k]]$inputs$formula, data=new.data[x,], na.action=na.pass )
off <- eval( input$models[[k]]$inputs$offset, new.data[x,] )
z <- predict( output[[k]][[i]]$model, newx=model.matrix( terms( d.f ), d.f ), offset=off )
}
else
{
z <- predict( output[[k]][[i]]$model, newdata=new.data[x,], type="response" )
}
},
error=function( e ) stop( paste( "ERROR: Model ", k, " run ", i, "-All: ", e$message, sep="" ) )
)
results[[ k ]][x] <- z
}
}
}
}
results$dept.new <- sub( "^NA;", "", results$dept.new )
# results$dept.new[ is.na( results$dept.new ) ] <- ""
s <- results[ , vars ]
s <- ( s - results[[ y ]] ) ^ 2
se <- sqrt( colSums( s, na.rm=TRUE ) / apply( s, 2, function( x ) length( x[ ! is.na( x ) ] ) ) )
if( exists( "old.res" ) ) list( lhs=y, subset=subset, se=c( old.res$se, se ), results=results )
else list( lhs=y, subset=subset, se=se, results=results )
}
naive <- function( test )
{
if( ! is.list( test ) ) stop( "this is not the output of the fn.test function" )
if( any( names( test ) != c( "lhs", "subset", "se", "results" ) ) ) stop( "this is not the output of the fn.test function" )
x <- test$results[, c( "year", "geoid", test$lhs ) ]
x$ndx <- paste( x$geoid, x$year, sep="." )
x$match <- paste( x$geoid, x$year - 1, sep="." )
x$naive <- x[[ test$lhs ]][ match( x$match, x$ndx ) ]
if( "f_located" %in% names( test$results ) )
{
x$f_located <- test$results$f_located
x$naive <- x$naive * x$f_located / x$f_located[ match( x$match, x$ndx ) ]
}
test$results$naive <- x$naive
s <- ( test$results[[ test$lhs ]] - test$results$naive ) ^ 2
test$se <- c( test$se, naive=sqrt( sum( s, na.rm=TRUE ) / length( s[ ! is.na( s ) ] ) ))
test
}
inputs <- f(conn, 'npt.sz2.L0a', 'sz2')
fn.run(inputs)
|
b8fb5d0be9bb4258be018dcdda6cec709ec6f934
|
51de8ad74b1b9fa050e7d45020f37d6b7e3c1c00
|
/Session3_Assign1.R
|
7fd3a0b945dcf19c4862d97e6e35d92ba05ee9ed
|
[] |
no_license
|
gayuveeri/DataAnalytics_Session3_Assign1
|
ec670e38641c79f3c24bcab7b8de3d42e11e3068
|
440d46652dae9fdc11a44a9eb2170d1520e49cb7
|
refs/heads/master
| 2020-04-16T07:32:43.920012
| 2019-01-12T13:47:49
| 2019-01-12T13:47:49
| 165,390,941
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 480
|
r
|
Session3_Assign1.R
|
# Create a lower triangular matrix, that is a matrix whose
#elements below the main diagonal are non-zero, the others are left
#untouched to their initialized zero value.
myMat <- matrix(0,nrow=5,ncol=5)
myMat
counter <- 0
counter
for (i in seq(nrow(myMat))){
for(j in seq(ncol(myMat))){
if (i== j){
break
}else{
counter = counter +1
myMat[i,j] <- counter
}
}
print(myMat[i,j])
}
counter
myMat
|
0f0d5f33f1b5eeaed6b8e77aa2a9786ea8780d61
|
c393566485ebdaadf650a721fe9db3696e127a87
|
/man/tuairim.Rd
|
486dd05529d025c53cbed2932edc6c77218ad089
|
[
"CC-BY-4.0"
] |
permissive
|
cldatascience/stad
|
537c55e8b5bcd1f8788d78d80facb60bba61f5c7
|
11ff16ecba14d77d49f831ed6c89c629b193c8bc
|
refs/heads/master
| 2021-09-20T21:21:12.119687
| 2018-08-15T09:42:46
| 2018-08-15T09:42:46
| 114,877,845
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 624
|
rd
|
tuairim.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tuairim.R
\docType{data}
\name{tuairim}
\alias{tuairim}
\title{Sentiment words for Irish}
\format{A data frame with 963 rows and 2 variables:
\describe{
\item{word}{A word}
\item{sentiment}{The sentiment of the word, positive or negative}
}}
\source{
\itemize{
\item \url{https://www.cs.uic.edu/~liub/FBS/sentiment-analysis.html}
\item \url{https://github.com/juliasilge/tidytext/tree/master/data-raw}
}
}
\usage{
tuairim
}
\description{
Sentiment words for Irish, a loose translation from Bing Liu lexicon
in tidytext
}
\keyword{datasets}
|
542ee917dedc43ef1f742070c68039947792e62b
|
18072dc8abaf37e30e5e2181ead31751c6c7160e
|
/complete.R
|
b6d0deb55e658bad326024d24ef70cfd65859878
|
[] |
no_license
|
kumarpawan0522/AirPollution
|
31ea26c71a3e7316b5b26b8547e6a610903e88fd
|
484ce101cbeafb9188a383de6d4922597d207776
|
refs/heads/master
| 2021-01-12T01:32:39.658683
| 2017-01-09T07:23:19
| 2017-01-09T07:23:19
| 78,401,735
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 420
|
r
|
complete.R
|
complete <- function(directory, id = 1:332) {
comp <- data.frame(id=numeric(), nobs=numeric())
for (i in id) {
filename <- obsFileName(directory, i)
data <- read.csv(filename)
#comp <- rbind(comp, data.frame(id=i, nobs=nrow(data[complete.cases(data), ])))
v <- complete.cases(data)
comp <- rbind(comp, data.frame(id=i, nobs=length(v[v==TRUE])))
}
comp
}
|
643b24cac27aeb04d0f56ce8c83df2ad60ec889f
|
fd006b7b22ec47e218ed9aae1f13131713f6d57f
|
/man/EvaluationModel.Rd
|
47734811ed88196034b0d98f32bb35ef060cf4b1
|
[] |
no_license
|
cran/Mediana
|
0651ea64e7d6ac2f47f7630fef81544bb1563b37
|
4be4b35fcbe6254b35dc1840426cd9ffc925ada9
|
refs/heads/master
| 2020-12-25T17:37:38.446245
| 2019-05-08T12:20:03
| 2019-05-08T12:20:03
| 39,719,278
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,649
|
rd
|
EvaluationModel.Rd
|
\name{EvaluationModel}
\alias{EvaluationModel}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
EvaluationModel object
}
\description{
\code{EvaluationModel()} initializes an object of class \code{EvaluationModel}.
}
\usage{
EvaluationModel(...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{\dots}{
defines the arguments passed to create the object of class \code{EvaluationModel}.
}
}
\details{
Evaluation models are used within the Mediana package to specify the measures (metrics) for evaluating the performance of the selected clinical scenario (combination of data and analysis models).
\code{EvaluationModel()} is used to create an object of class \code{EvaluationModel} incrementally, using the '+' operator to add objects to the existing \code{EvaluationModel} object. The advantage is to explicitely define which objects are added to the \code{EvaluationModel} object. Initialization with \code{EvaluationModel()} is highly recommended.
Object of Class \code{Criterion} can be added to an object of class \code{EvaluationModel}.
}
\references{
\url{http://gpaux.github.io/Mediana/}
}
\seealso{
See Also \code{\link{Criterion}}.
}
\examples{
## Initialize a EvaluationModel and add objects to it
evaluation.model = EvaluationModel() +
Criterion(id = "Marginal power",
method = "MarginalPower",
tests = tests("Placebo vs treatment"),
labels = c("Placebo vs treatment"),
par = parameters(alpha = 0.025))
}
|
cc3001c2558e3e5c2a3c5e3231ac3f8525641322
|
d87241791be8df80425a8afcce0ef252d6d1ff38
|
/tests/testthat/test-GpdIcm.R
|
b7f6e61f077d737a3d3cf688d3ed2ca98b08cc50
|
[] |
no_license
|
MartinRoth/gpdIcm
|
551dd3228d806dabd3658c522f693fd0a5b66e99
|
5929123e94ae4e6269e319c8c9bd352c51e8cc42
|
refs/heads/master
| 2021-01-19T01:03:00.879843
| 2016-10-29T17:39:10
| 2016-10-29T17:39:10
| 46,576,315
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,784
|
r
|
test-GpdIcm.R
|
library(evd)
library(gpdIcm)
set.seed(1)
n <- 10
randomGpdGumbel <- rgpd(n, 0, 1, 0)
randomGpdFrechet <- rgpd(n, 0, 1, 0.3)
randomGpdWeibull <- rgpd(n, 0, 1, -0.3)
randomGpdStepGumbel <- rgpd(n, 0, seq(1, 2, length.out = n), 0)
load("CETvalues.rda")
newScale <- MakeScaleAdmissible(scaleTest, yTest, -0.5)
profileShape <- seq(-0.5, 0.3, by = 0.01)
context("Likelihood calculations")
test_that("Likelihood calculations", {
expect_equal(compute_nll_gpd(randomGpdGumbel, rep(1, n), 0),
-sum(log(dgpd(randomGpdGumbel, 0, 1, 0))))
expect_equal(compute_nll_gpd(randomGpdFrechet, rep(1, n), 0.3),
-sum(log(dgpd(randomGpdFrechet, 0, 1, 0.3))))
expect_equal(compute_nll_gpd(randomGpdWeibull, rep(1, n), -0.3),
-sum(log(dgpd(randomGpdWeibull, 0, 1, -0.3))))
expect_equal(compute_nll_gpd(yTest, scaleTest, -0.3),
-sum(log(dgpd(yTest, 0, scaleTest, -0.3))))
})
context("Partial derivatives")
test_that("Partial derivative", {
expect_equal(compute_pd1_scale_nll_gpd(randomGpdGumbel[1], 1, 0),
1 - randomGpdGumbel[1] / 1)
expect_equal(compute_pd1_scale_nll_gpd(randomGpdFrechet[1], 1, 0.3),
1 - (1 + 0.3) * randomGpdFrechet[1] / (1 + 0.3 * randomGpdFrechet[1]))
expect_equal(compute_pd1_scale_nll_gpd(randomGpdWeibull[1], 1, -0.3),
1 - (1 - 0.3) * randomGpdWeibull[1] / (1 - 0.3 * randomGpdWeibull[1]))
expect_equal(compute_pd2_scale_nll_gpd(randomGpdGumbel[1], 1, 0),
1 - 2 * randomGpdGumbel[1])
expect_equal(compute_pd2_scale_nll_gpd(randomGpdFrechet[1], 1, 0.3),
((1 - randomGpdFrechet[1])^2 - (0.3 + 1) * randomGpdFrechet[1]^2) /
(1 + 0.3 * randomGpdFrechet[1])^2)
expect_equal(compute_pd2_scale_nll_gpd(randomGpdWeibull[1], 1, -0.3),
((1 - randomGpdWeibull[1])^2 - (-0.3 + 1) * randomGpdWeibull[1]^2) /
(1 - 0.3 * randomGpdWeibull[1])^2)
})
context("Ensure admissibility")
test_that("Only admissable scale values", {
expect_equal(MakeScaleAdmissible(-1, 1, 0), 1e-8)
expect_equal(MakeScaleAdmissible(-1, 1, 0.1), 1e-8)
expect_equal(MakeScaleAdmissible(-1, 1, -0.1), 0.1 + 1e-8)
expect_equal(MakeScaleAdmissible(0.05, 1, -0.1), 0.1 + 1e-8)
expect_equal(MakeScaleAdmissible(c(0.05, 0.05, 0.05), c(1, 0.9, 2), -0.1),
c(0.1, 0.1, 0.2)+1e-8)
expect_equal_to_reference(MakeScaleAdmissible(scaleTest, yTest, -0.5), "./outputTests/AdmissableScale.rds")
})
context("Isotonic fits")
test_that("GPD scale isotonic fit", {
scaleFitFrechet <- FitIsoScaleFixedICM(yTest, scaleTest, 0.1)
scaleFitGumbel <- FitIsoScaleFixedICM(yTest, scaleTest, 0.0)
scaleFitWeibull <- FitIsoScaleFixedICM(yTest, scaleTest, -0.1)
expect_equal_to_reference(scaleFitFrechet, "./outputTests/scaleFitFrechet.rds")
expect_equal_to_reference(scaleFitGumbel, "./outputTests/scaleFitGumbel.rds")
expect_equal_to_reference(scaleFitWeibull, "./outputTests/scaleFitWeibull.rds")
scaleFitFrechetPG <- FitIsoScaleFixedPG(yTest, scaleTest, 0.1)
scaleFitGumbelPG <- FitIsoScaleFixedPG(yTest, scaleTest, 0.0)
scaleFitWeibullPG <- FitIsoScaleFixedPG(yTest, scaleTest, -0.1)
expect_equal_to_reference(scaleFitFrechetPG, "./outputTests/scaleFitFrechetPG.rds")
expect_equal_to_reference(scaleFitGumbelPG, "./outputTests/scaleFitGumbelPG.rds")
expect_equal_to_reference(scaleFitWeibullPG, "./outputTests/scaleFitWeibullPG.rds")
expect_equal(scaleFitFrechetPG$deviance, scaleFitFrechet$deviance, tolerance = 1e-6)
expect_equal(scaleFitGumbelPG$deviance, scaleFitGumbel$deviance, tolerance = 1e-6)
expect_equal(scaleFitWeibullPG$deviance, scaleFitWeibull$deviance, tolerance = 1e-6)
expect_lt(max(abs(scaleFitFrechetPG$fitted.values - scaleFitFrechet$fitted.values)), 1e-4)
expect_lt(max(abs(scaleFitGumbelPG$fitted.values - scaleFitGumbel$fitted.values)), 1e-4)
expect_lt(max(abs(scaleFitWeibullPG$fitted.values - scaleFitWeibull$fitted.values)), 1e-4)
})
test_that("Profile likelihood estimation", {
expect_equal_to_reference(FitIsoScaleGPD(yTest, -0.5, 0.3), "./outputTests/ProfileLikelihoodMaximizer.rds")
expect_error(FitIsoScaleGPD(yTest, 0.1, 0.3), "Zero should be included in the interval")
yTestFrechet <- rgpd(500, 0, c(rep(1, 200), seq(1,1.1, length.out = 100), rep(1.1, 200)), 0.3)
expect_equal_to_reference(FitIsoScaleGPD(yTestFrechet, -0.1, 0.4), "./outputTests/ProfileLikelihoodMaximizerFrechet.rds")
})
context("Failed Convergence")
load("badSimulation.rda")
test_that("Convergence fails", {
startValue <- isoreg(yBadTest)$yf
tmp1 <- FitIsoScaleFixedICM(yBadTest, startValue, shapeBadTest)
#tmp2 <- FitIsoScaleFixedPG(yBadTest, startValue, shapeBadTest)
expect_true(tmp1$convergence)
#expect_false(tmp2$convergence)
})
|
a7acee83bbc2f62c0d7da8508e07ab83d4d1ee70
|
82be3695a4de47d9140700fe5b34de2ba33ee100
|
/Rcode/5 Dose map.R
|
75b0ad3ae34c599b53ad61496b578f3d4d6c3003
|
[
"CC-BY-4.0"
] |
permissive
|
javiereliomedina/Radon_Mapping
|
43ca14381512af6aa10a2685ddca901008fb11c4
|
2377e43e2659a453b1cb9f82feb47e69e02131c5
|
refs/heads/master
| 2021-06-22T15:34:30.863738
| 2021-04-09T06:05:56
| 2021-04-09T06:05:56
| 199,718,436
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,860
|
r
|
5 Dose map.R
|
# Dose map ----
summary(Grids_10km_Sum$OK_AM)
summary(Grids_10km_Sum$OK_SD)
## New dataframe with AM and SD ----
Dose <- Grids_10km_Sum %>% transmute(Id = Id,
Rn_AM = OK_AM,
Rn_SD = OK_SD,
)
## Dose [mSv/y] = CRn [Bq/m3] * FE * FO * TY [h/y] * FD [mSv / Bq.h.m-3]
# Uncertainty MC simulations
nsim <- 100
MC_Sim <- matrix(NA, nrow = length(Dose$Rn_AM), ncol = nsim)
TY <- 8760
for (i in 1:nsim) {
Rn <- truncnorm::rtruncnorm(length(Dose$Rn_AM), a = 0, b = Inf, mean = Dose$Rn_AM, sd = Dose$Rn_SD) # truncated: Rn > 0
FE <- rlnorm(1, meanlog = log(0.4), sdlog = log(1.15))
FO <- rnorm(1, 0.8, 0.03)
FD <- rnorm(1, 9e-06, 1.5e-06)
MC_Sim[,i] <- Rn * FE * FO * TY * FD
}
MC_Sim <- as.data.frame(MC_Sim)
MC_Sim$Id <- Dose$Id
MC_Sim$Dose_AM <- rowMeans(MC_Sim[,1:nsim])
MC_Sim$Dose_SD <- apply(MC_Sim[,1:nsim], 1, sd)
## Add AM and SD of the MC simulations to the dose table ----
Dose <- left_join(Dose %>% as.data.frame(),
MC_Sim[c("Id","Dose_AM","Dose_SD")] %>% as.data.frame,
by = "Id")
Dose <- Dose %>% st_sf(sf_column_name = "geometry.x")
## Dose map ----
summary(Dose)
P_Dose_AM <- ggplot() +
geom_sf(data = Country) +
geom_sf(data = Dose, aes(fill = Dose_AM)) +
scale_fill_gradient(name = "mSv/y", low = "blue", high = "red") +
ggtitle("Radiation dose - AM")
P_Dose_AM
P_Dose_SD <- ggplot() +
geom_sf(data = Country) +
geom_sf(data = Dose, aes(fill = Dose_SD)) +
scale_fill_gradient(name = "mSv/y", low = "blue", high = "red") +
ggtitle("Radiation dose - SD")
P_Dose_SD
grid.arrange(P_Dose_AM, P_Dose_SD, nrow = 1, ncol = 2)
|
d002b4c43e4224a7271de689570aa219dd606691
|
dbc18300c004cc5752629c1714f7eec4bc053501
|
/man/run_MCMC.Rd
|
2fbf45eb7b64d4000bf235ebd0cb8e3f1e804444
|
[] |
no_license
|
jmzobitz/SoilModeling
|
393983879879eb48bd90fc2c6f16332cdece8ce6
|
6b8b0020f6fcd7f5916aedec03d2bb80d01ee256
|
refs/heads/master
| 2021-06-20T18:52:26.752481
| 2019-11-24T14:22:40
| 2019-11-24T14:22:40
| 32,336,899
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 970
|
rd
|
run_MCMC.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/run_MCMC.R
\name{run_MCMC}
\alias{run_MCMC}
\title{Do a metropolis chain}
\usage{
run_MCMC(param_file_name, model_in, data_in, join_key, results_file_name,
out_file_name, mcmc_parameters)
}
\arguments{
\item{param_file_name}{file name (as a string) of where the default parameters are located}
\item{model_in}{the name of the model we are using}
\item{data_in}{input data we use to compare in the likelihood function.}
\item{join_key}{The name of the variable we will join to data_in when doing likelihood}
\item{results_file_name}{The file name (as a string) where the final MCMC results will be stored.}
\item{out_file_name}{The file name (as a string) where we log various info as the mcmc proceeds}
\item{mcmc_parameters}{a data frame of how we want to estimate (can be mcmc_superfast or mcmc_defaults)}
}
\description{
\code{run_MCMC} Does a complete MCMC parameter estimation
}
|
217a7a4dcac1e2054c3d1f74f7d7244d5971a7f6
|
0b0a0170295015721a3f11f6a4f867a1f2a4e3f5
|
/man/with_groups.Rd
|
60d16e3061e3fbb6e7eed89d22ab5f181b1e0c8a
|
[] |
no_license
|
cbrown5/DataGLMRepeat
|
2cae49a4f2bede031341451918e2a113246ed21c
|
5b52eb686b8306798eb9a937eef6b56b8931e09c
|
refs/heads/master
| 2021-01-05T10:21:04.486780
| 2020-09-04T01:19:52
| 2020-09-04T01:19:52
| 240,990,950
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 735
|
rd
|
with_groups.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/with_groups.R
\name{with_groups}
\alias{with_groups}
\title{Evaluate expressoin by groups of grouped dataframe}
\usage{
with_groups(df, expr)
}
\arguments{
\item{df}{agrouped data frame}
\item{expr}{and expression, possible wrapped in curly braces.}
}
\value{
A list with the contents of the expression evaluated by
groups.
}
\description{
Evaluate expressoin by groups of grouped dataframe
}
\examples{
library(dplyr)
dat2 <- tibble(
grp = sample(c(10, 20, 30), 100, replace = TRUE),
x = 1:100,
y = x*2 + rnorm(100) + grp)
gout <- dat2 \%>\%
group_by(grp) \%>\%
with_groups(., {
lm(y ~x)
})
}
\author{
Christopher J. Brown
}
|
ee46240741733e041c728148809831175a7211c4
|
7e4d5290ca5c21a2f879818ef6828d87e75515eb
|
/Fig1_FthroughI_Fig3_AthroughD.R
|
3fb62c1c1f77147f7686b802a6d91a63a904ec46
|
[] |
no_license
|
krc3004/HRD
|
8eb7e8b7964b560c521f7c880e8794c549579c9e
|
8584467afce1ecda889a3bdf4e5dce96f0ec3aa0
|
refs/heads/master
| 2020-03-27T09:23:12.935417
| 2018-08-29T14:22:45
| 2018-08-29T14:22:45
| 146,336,393
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,511
|
r
|
Fig1_FthroughI_Fig3_AthroughD.R
|
#######################################################################################################################################
# Fig1_FthroughI_Fig3_AthroughD.R
# Authors: Chirag Krishna, Robert Samstein, Nadeem Riaz
# plots key survival curves for immune checkpoint blockade (ICB)-treated patients with mutations in HR genes vs those without
# Two HR gene sets are presented. For Fig1 F/G, HR_Gene_Set is BRCA1, BRCA2, PALB2, CHEK2, ATM
# For Fig 1 H/I, HR_Alternatibe_Gene_Set is BRCA1, BRCA2, PALB2, RAD50, RAD51, RAD51B, RAD51C, RAD51D, MRE11A, NBN
# Fig 1 FGHI, Fig 3 ABCD
#######################################################################################################################################
## load required packages
library(data.table)
library(survival)
library(survminer)
library(survMisc)
library(ggplot2)
## load clinical data
impact_icb = as.data.frame(fread("~/Documents/final_IMPACT_supplemental_table.txt"))
## df is the clinical data
## cancers can be any, but for paper should be "pan" or "HR-associated"
## mutation_type should be one of "BRCA1_Mutation", "BRCA2_Mutation", "HR_Gene_Set", "HR_Alternative_Gene_Set"
## plot_title can be anything
HR_survival = function(df, cancers, mutation_type, plot_title){
## first subset the clinical data on cancer types and mutation type
if(cancers == "pan"){surv_df = df[,c("Sample", "Cancer_Type", "Time_to_death", "Alive_Dead", mutation_type)]}
if(cancers == "HR-associated"){surv_df = df[which(df$Cancer_Type %in% c("Breast Cancer", "Ovarian Cancer", "Prostate Cancer", "Pancreatic Cancer")),
c("Sample", "Cancer_Type", "Time_to_death", "Alive_Dead", mutation_type)]}
if(! cancers %in% c("pan", "HR-associated")){surv_df = df[which(df$Cancer_Type %in% cancers),c("Sample", "Cancer_Type", "Time_to_death", "Alive_Dead", mutation_type)]}
## plot survival curves and log-rank p value
HR_fit = do.call(survfit, list(formula = Surv(surv_df$Time_to_death, surv_df$Alive_Dead)~surv_df[,mutation_type]))
print(ggsurvplot(HR_fit, pval = TRUE, font.x = c(14, "bold", "black"),
font.y = c(14, "bold", "black"), font.tickslab = c(12, "bold", "black"), size = 1.5, palette = c("blue", "red"), data = surv_df, title = plot_title))
## print number of patients with and without mutation
print(table(surv_df[,mutation_type]))
}
## Fig 1F: HR Gene Set, HR-associated cancers
HR_survival(impact_icb, "HR-associated", "HR_Gene_Set", "HR Gene Set HR-associated cancers: Fig 1F")
## Fig 1G: HR Gene Set, pan cancer
HR_survival(impact_icb, "pan", "HR_Gene_Set", "HR Gene Set pan cancer: Fig 1G")
## Fig 1H: HR Alternative Gene Set, HR-associated cancers
HR_survival(impact_icb, "HR-associated", "HR_Alternative_Gene_Set", "HR Alternative Gene Set HR-associated cancers: Fig 1H")
## Fig 1I: HR Alternative Gene Set, HR-associated cancers
HR_survival(impact_icb, "pan", "HR_Alternative_Gene_Set", "HR Alternative Gene Set pan cancer: Fig 1I")
## Fig 3A: BRCA1 vs WT, HR-associated cancers
HR_survival(impact_icb, "HR-associated", "BRCA1_Mutation", "BRCA1 HR-associated cancers: Fig 3A")
## Fig 3B: BRCA2 vs WT, HR-associated cancers
HR_survival(impact_icb, "HR-associated", "BRCA2_Mutation", "BRCA2 pan cancer: Fig 3B")
## Fig 3C: BRCA1 vs WT, pan cancer
HR_survival(impact_icb, "pan", "BRCA1_Mutation", "BRCA1 pan cancer: Fig 3C")
## Fig 3D: BRCA2 vs WT, pan cancer
HR_survival(impact_icb, "pan", "BRCA2_Mutation", "BRCA2 pan cancer: Fig 3D")
|
012aa87aa8f27c9c31152638c76037c2fa56ff4f
|
7ce35c255fe7506795ff7abc15b5222e582451bb
|
/4-longbow-tmle-analysis/02-coxPH_exclusive_exposures.R
|
4c7f1fa74fb8e3c751cf22abda6690ab46d96ff8
|
[] |
no_license
|
child-growth/ki-longitudinal-growth
|
e464d11756c950e759dd3eea90b94b2d25fbae70
|
d8806bf14c2fa11cdaf94677175c18b86314fd21
|
refs/heads/master
| 2023-05-25T03:45:23.848005
| 2023-05-15T14:58:06
| 2023-05-15T14:58:06
| 269,440,448
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,652
|
r
|
02-coxPH_exclusive_exposures.R
|
rm(list=ls())
source(paste0(here::here(), "/0-config.R"))
library(caret)
library(lmtest)
#Set adjustment covariates
Wvars <- c("sex", "tr", "brthmon", "vagbrth", "hdlvry", "single", "trth2o",
"cleanck", "impfloor", "hfoodsec", "hhwealth_quart","W_mage", "W_mhtcm", "W_mwtkg",
"W_mbmi", "W_fage", "W_fhtcm", "W_meducyrs", "W_feducyrs", "W_nrooms", "W_nhh",
"W_nchldlt5", "W_parity", "impsan", "safeh20")
#load data
d <- readRDS("/data/KI/UCB-SuperLearner/Manuscript analysis data/mortality_age_no_overlap.rds")
table(d$studyid, d$country)
table(d$studyid, d$country, d$dead)
d$sex <-factor(d$sex)
d$single <-as.numeric(d$single)
d$impsan <-as.numeric(d$impsan )
d$safeh20 <-as.numeric(d$safeh20 )
d$vagbrth <-as.numeric(d$vagbrth )
d$hdlvry <-as.numeric(d$hdlvry )
d$trth2o <-as.numeric(d$trth2o )
d$cleanck <-as.numeric(d$cleanck )
d$impfloor <-as.numeric(d$impfloor )
d$hfoodsec <-addNA(d$hfoodsec )
d$hhwealth_quart <-addNA(d$hhwealth_quart )
d <- as.data.frame(d)
for(i in Wvars){
var<-d[,i]
cat(i,": ",class(var),"\n")
print(table(var))
if(class(var)=="factor"){
d[,i] <- addNA(d[,i])
}
}
d$subjid <- as.numeric(d$subjid)
d <- d %>% arrange(studyid, subjid, agedays)
table(d$studyid)
length(unique(d$studyid))
#Drop older ages
table(d$agecat)
d <- d %>% filter(agedays <= 730)
df <- d %>% group_by(studyid, subjid) %>%
filter(agedays==max(agedays)) %>%
ungroup()
prop.table(table(paste0(df$studyid,"-",df$country), df$dead),1) * 100
table(paste0(df$studyid,"-",df$country), df$dead)
d <- droplevels(d)
#Drop imputed age of death and studies with only imputed age of death
df <- d %>% filter(dead==1)
table(df$studyid, df$imp_agedth)
d <- d %>% filter(imp_agedth==0, !(studyid %in% c("GMS-Nepal","SAS-CompFeed","SAS-FoodSuppl")))
length(unique(d$studyid))
# #Drop measures too close to event
dim(d)
d <- d %>% filter(!(dead==1 & agedth-agedays <= 7))
dim(d)
#Only keep final observations
table(d$studyid, d$dead)
d <- d %>% group_by(studyid, subjid) %>% filter(agedays==max(agedays))
table(d$studyid, d$dead)
#get the N's for the studies with enough mortality to estimate
df <- d %>% filter(studyid %in% c("Burkina Faso Zn", "iLiNS-DOSE", "iLiNS-DYAD-M", "JiVitA-3", "JiVitA-4", "Keneba","VITAMIN-A","ZVITAMBO" ))
table(df$dead)
df %>% ungroup() %>% summarize(n())
df %>% summarize(n())
X_vector <- c("stunt", "wast","underwt",
"stunt_uwt", "wast_uwt", "co")
X_vector_sev <- c("sstunt","swast","sunderwt","sstunt_suwt","swast_suwt","sev_co")
#All ages < 730 days
set.seed(12345)
res_mod <- res_sev <- NULL
res_mod <- run_cox_meta(df=d, X_vector=X_vector, Y="dead", Wvars=Wvars, V=NULL, agecat=NULL, no_exp_overlap=T)
res_sev <- run_cox_meta(df=d, X_vector=X_vector_sev, Y="dead", Wvars=Wvars, V=NULL, agecat=NULL, no_exp_overlap=T)
#sex stratified
res_mod_sex_strat <- run_cox_meta(df=d, X_vector=X_vector, Y="dead", Wvars=Wvars, V="sex", agecat=NULL, no_exp_overlap=T)
res_sev_sex_strat <- run_cox_meta(df=d, X_vector=X_vector_sev, Y="dead", Wvars=Wvars, V="sex", agecat=NULL, no_exp_overlap=T)
res <- bind_rows(res_mod, res_sev, res_mod_sex_strat, res_sev_sex_strat)
res_adj <-res %>% filter(studyid=="pooled", is.na(region), method=="RE", adj==1)
res_adj
res_unadj <-res %>% filter(studyid=="pooled", is.na(region), method=="RE", adj==0)
res_unadj
res_unadj_FE <-res %>% filter(studyid=="pooled", is.na(region), method=="FE", adj==0)
res_unadj_FE
saveRDS(res, file=paste0(BV_dir,"/results/cox_results_no_overlap.RDS"))
|
a754ffb5eba3c2d1056e557863f274ee4ed3a410
|
b201f1f182b1828a66a2d97baf28224b39d70564
|
/R/modules/ui/analysis_modules/immune_features_ui.R
|
eb5511b436f0a388f618e98f1330290692deb8eb
|
[
"MIT"
] |
permissive
|
Drinchai/iatlas-app
|
147294b54f64925fb4ee997da98f485965284744
|
261b31224d9949055fc8cbac53cad1c96a6a04de
|
refs/heads/master
| 2023-02-08T08:17:45.384581
| 2020-07-20T23:27:08
| 2020-07-20T23:27:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,153
|
r
|
immune_features_ui.R
|
immune_features_ui <- function(id) {
ns <- shiny::NS(id)
source_files <- c(
"R/modules/ui/submodules/immune_feature_distributions_ui.R",
"R/modules/ui/submodules/immune_feature_correlations_ui.R",
"R/modules/ui/submodules/call_module_ui.R"
)
for (file in source_files) {
source(file, local = T)
}
shiny::tagList(
iatlas.app::titleBox("iAtlas Explorer — Immune Feature Trends"),
iatlas.app::textBox(
width = 12,
shiny::p(paste0(
"This module allows you to see how immune readouts vary ",
"across your groups, and how they relate to one another."
))
),
iatlas.app::sectionBox(
title = "Correlations",
call_module_ui(
ns("immune_feature_distributions"),
immune_feature_distributions_ui
)
),
iatlas.app::sectionBox(
title = "Distributions",
call_module_ui(
ns("immune_feature_correlations"),
immune_feature_correlations_ui
)
)
)
}
|
064c0efa18b8d7674f18f9d93d7b274f53935255
|
7f95a575926d8fe31fc3b22c86845c90d1cb1ade
|
/run_analysis.R
|
4b4de5fec70926289e0585120a180021f7c5964f
|
[] |
no_license
|
cjerozal/coursera-datascience-3-cp
|
22945f2ac4aec80d2017fe285d9573eeeeaf18d1
|
29d9f0faaaf7981f9c13a3ce871c1d84662d3d56
|
refs/heads/master
| 2016-09-06T18:35:23.526963
| 2014-07-27T20:17:51
| 2014-07-27T20:17:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,083
|
r
|
run_analysis.R
|
activityColumnName <- "Activity"
subjectIdColumnName <- "Subject ID"
# Read in relevant data
subjectTestData <- read.table("UCI HAR Dataset/test/subject_test.txt", header = FALSE)
xTestData <- read.table("UCI HAR Dataset/test/X_test.txt", header = FALSE)
yTestData <- read.table("UCI HAR Dataset/test/y_test.txt", header = FALSE)
subjectTrainData <- read.table("UCI HAR Dataset/train/subject_train.txt", header = FALSE)
xTrainData <- read.table("UCI HAR Dataset/train/X_train.txt", header = FALSE)
yTrainData <- read.table("UCI HAR Dataset/train/y_train.txt", header = FALSE)
# Merge the training and the test sets to create one data set
testData <- cbind(subjectTestData, xTestData, yTestData)
trainingData <- cbind(subjectTrainData, xTrainData, yTrainData)
activityData <- rbind(testData, trainingData)
# Appropriately label the data set with descriptive variable names, and
# Extract only the measurements on the mean and standard deviation for each measurement
cleanFeatureName <- function(featureName) {
cleanFeatureName <- sub("()", "", featureName, fixed = TRUE)
cleanFeatureName <- sub("BodyBody", "Body", cleanFeatureName) # correct mistake in original data
cleanFeatureName <- sub("^t", "time", cleanFeatureName)
cleanFeatureName <- sub("^f", "freq", cleanFeatureName)
cleanFeatureName
}
featureMappings <- read.table("UCI HAR Dataset/features.txt", header = FALSE)
featureMappings <- as.character(featureMappings[,2])
colnames(activityData) <- c(subjectIdColumnName, featureMappings, activityColumnName)
meanAndStdData <- data.frame(activityData[, subjectIdColumnName], activityData[, activityColumnName])
colnames(meanAndStdData) <- c(subjectIdColumnName, activityColumnName)
for (featureName in featureMappings) {
if(grepl("std", featureName) |
(grepl("mean", featureName) & !grepl("meanFreq", featureName))) {
cleandFeatureName <- cleanFeatureName(featureName)
meanAndStdData[, cleandFeatureName] <- activityData[, featureName]
}
}
# Use descriptive activity names to name the activities in the data set
activityMappings <- read.table("UCI HAR Dataset/activity_labels.txt", header = FALSE)
activityMappings <- as.character(activityMappings[,2])
for (index in seq_along(activityMappings)) {
meanAndStdData[meanAndStdData[, activityColumnName] == index, activityColumnName] <- activityMappings[[index]]
}
# Create a second, independent tidy data set with the average of each variable for each activity and each subject
numSubjects <- 30
tidyDataMatrix <- matrix(nrow = length(activityMappings)*numSubjects, ncol = ncol(meanAndStdData))
colnames(tidyDataMatrix) <- colnames(meanAndStdData)
# skip the subject and activity columns when calculating means
meanNames <- colnames(tidyDataMatrix)[3:ncol(tidyDataMatrix)]
dataSplitBySubject <- split(meanAndStdData, meanAndStdData[, subjectIdColumnName])
subjectNames <- names(dataSplitBySubject)
for (subjectIndex in seq_along(subjectNames)) {
currentSubject <- subjectNames[subjectIndex]
subjectDF <- dataSplitBySubject[[currentSubject]]
dataSplitByActivity <- split(subjectDF, subjectDF[, activityColumnName])
activityNames <- names(dataSplitByActivity)
for (activityIndex in seq_along(activityNames)) {
currentActivity <- activityNames[activityIndex]
activityDF <- dataSplitByActivity[[currentActivity]]
rowOfMeans <- vector(mode = "numeric")
for (colIndex in seq_along(meanNames)) {
colName <- meanNames[colIndex]
rowOfMeans[colIndex] <- mean(activityDF[, colName], na.rm = TRUE)
}
currentRowIndex <- ((subjectIndex - 1) * 6) + activityIndex
tidyDataMatrix[currentRowIndex, ] <- c(currentSubject, currentActivity, rowOfMeans)
}
}
tidyData <- data.frame(tidyDataMatrix, stringsAsFactors = FALSE)
for (colIndex in 1:ncol(tidyData)) {
# the second column is activity names, but all others should be numeric
if (colIndex != 2) {
tidyData[, colIndex] <- as.numeric(tidyData[, colIndex])
}
}
write.table(tidyData, file = "tidydata.txt")
|
21ac75ecfeb8a943c65fe89faf9365dbaab469fa
|
0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb
|
/cran/paws.business.applications/man/chime_create_room_membership.Rd
|
e872589817260b9bf3d24fca78aea2b88b805af4
|
[
"Apache-2.0"
] |
permissive
|
paws-r/paws
|
196d42a2b9aca0e551a51ea5e6f34daca739591b
|
a689da2aee079391e100060524f6b973130f4e40
|
refs/heads/main
| 2023-08-18T00:33:48.538539
| 2023-08-09T09:31:24
| 2023-08-09T09:31:24
| 154,419,943
| 293
| 45
|
NOASSERTION
| 2023-09-14T15:31:32
| 2018-10-24T01:28:47
|
R
|
UTF-8
|
R
| false
| true
| 1,388
|
rd
|
chime_create_room_membership.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/chime_operations.R
\name{chime_create_room_membership}
\alias{chime_create_room_membership}
\title{Adds a member to a chat room in an Amazon Chime Enterprise account}
\usage{
chime_create_room_membership(AccountId, RoomId, MemberId, Role)
}
\arguments{
\item{AccountId}{[required] The Amazon Chime account ID.}
\item{RoomId}{[required] The room ID.}
\item{MemberId}{[required] The Amazon Chime member ID (user ID or bot ID).}
\item{Role}{The role of the member.}
}
\value{
A list with the following syntax:\preformatted{list(
RoomMembership = list(
RoomId = "string",
Member = list(
MemberId = "string",
MemberType = "User"|"Bot"|"Webhook",
Email = "string",
FullName = "string",
AccountId = "string"
),
Role = "Administrator"|"Member",
InvitedBy = "string",
UpdatedTimestamp = as.POSIXct(
"2015-01-01"
)
)
)
}
}
\description{
Adds a member to a chat room in an Amazon Chime Enterprise account. A
member can be either a user or a bot. The member role designates whether
the member is a chat room administrator or a general chat room member.
}
\section{Request syntax}{
\preformatted{svc$create_room_membership(
AccountId = "string",
RoomId = "string",
MemberId = "string",
Role = "Administrator"|"Member"
)
}
}
\keyword{internal}
|
549c336261c49dde8e16ccdbbf143df82ce1f7d6
|
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
|
/fuzzedpackages/Rfast/R/rel.risk.R
|
a0593599340266e0815a97dc2ed446353ba7d4f1
|
[] |
no_license
|
akhikolla/testpackages
|
62ccaeed866e2194652b65e7360987b3b20df7e7
|
01259c3543febc89955ea5b79f3a08d3afe57e95
|
refs/heads/master
| 2023-02-18T03:50:28.288006
| 2021-01-18T13:23:32
| 2021-01-18T13:23:32
| 329,981,898
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 572
|
r
|
rel.risk.R
|
#[export]
rel.risk <- function (x, a = 0.05, logged = FALSE) {
d1 <- 1/(x[1] + x[3] )
d2 <- 1/(x[2] + x[4] )
rr <- x[1] * d1 / (x[2] * d2)
z <- log(rr)
s <- sqrt(1/x[1] + 1/x[2] - d1 - d2)
ci <- c(z - qnorm(1 - a/2) * s, z + qnorm(1 - a/2) * s)
stat <- abs(z)/s
if (logged) {
pvalue <- log(2) + pnorm(stat, lower.tail = FALSE, log.p = TRUE)
}
else pvalue <- 2 * pnorm(stat, lower.tail = FALSE)
res <- c(rr, pvalue)
names(res) <- c("relative risk", "p-value")
list( res = res, ci = exp(ci) )
}
|
bf863ee7a3abb7951864d45d45654471342aa783
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/EffectLiteR/examples/effectLite.Rd.R
|
62318c9b2d95147a4d6e5aa65221e624cbdf8b36
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,022
|
r
|
effectLite.Rd.R
|
library(EffectLiteR)
### Name: effectLite
### Title: Estimate average and conditional effects
### Aliases: effectLite
### ** Examples
## Example with one categorical covariate
m1 <- effectLite(y="y", x="x", k="z", control="0", data=nonortho)
print(m1)
## Example with one categorical and one continuous covariate
m1 <- effectLite(y="dv", x="x", k=c("k1"), z=c("z1"), control="control", data=example01)
print(m1)
## Example with latent outcome and latent covariate
measurement <- '
eta2 =~ 1*CPM12 + 1*CPM22
eta1 =~ 1*CPM11 + 1*CPM21
CPM11 + CPM12 ~ 0*1
CPM21 ~ c(m,m)*1
CPM22 ~ c(p,p)*1'
m1 <- effectLite(y="eta2", x="x", z=c("eta1"), control="0",
measurement=measurement, data=example02lv)
print(m1)
## Not run:
##D ## Example with cluster variable and sampling weights
##D m1 <- effectLite(y="y", x="x", z="z", fixed.cell=TRUE, control="0",
##D syntax.only=F, data=example_multilevel,
##D ids=~cid, weights=~weights)
##D print(m1)
## End(Not run)
|
97ffb97e075fe3c83874168ba5df23992a296751
|
0e3b43de95c9649c06742fc3cb9954727150b860
|
/R/Allaccessors.R
|
8d5beac4b789aaa29c2f86c53acc8d049fe01e8c
|
[] |
no_license
|
tsjzz/exomePeak2
|
6c4181180f33e58dc55575c17b28d62339db4be9
|
1edc86cf33021f021f405c24116747dc109a88e7
|
refs/heads/master
| 2023-07-15T05:09:53.993523
| 2021-05-19T16:49:57
| 2021-05-19T16:49:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,898
|
r
|
Allaccessors.R
|
#' @title Accessor to the slot \code{Parameter} in class \code{MeripBamFileList}.
#'
#' @param x a \code{MeripBamFileList} object.
#'
#' @aliases Parameter
#'
#' @rdname Parameter-methods
#'
#' @examples
#' GENE_ANNO_GTF = system.file("extdata", "example.gtf", package="exomePeak2")
#'
#' f1 = system.file("extdata", "IP1.bam", package="exomePeak2")
#' f2 = system.file("extdata", "IP2.bam", package="exomePeak2")
#' f3 = system.file("extdata", "IP3.bam", package="exomePeak2")
#' f4 = system.file("extdata", "IP4.bam", package="exomePeak2")
#' IP_BAM = c(f1,f2,f3,f4)
#' f1 = system.file("extdata", "Input1.bam", package="exomePeak2")
#' f2 = system.file("extdata", "Input2.bam", package="exomePeak2")
#' f3 = system.file("extdata", "Input3.bam", package="exomePeak2")
#' INPUT_BAM = c(f1,f2,f3)
#'
#' f1 = system.file("extdata", "treated_IP1.bam", package="exomePeak2")
#' TREATED_IP_BAM = c(f1)
#' f1 = system.file("extdata", "treated_Input1.bam", package="exomePeak2")
#' TREATED_INPUT_BAM = c(f1)
#'
#' MeRIP_Seq_Alignment <- scanMeripBAM(
#' bam_ip = IP_BAM,
#' bam_input = INPUT_BAM,
#' paired_end = FALSE
#' )
#'
#' Parameter(MeRIP_Seq_Alignment)
#'
#' @export
#'
#' @return a list for the additional parameters of the MeRIP-seq experiment.
#'
setMethod("Parameter",
"MeripBamFileList",
function(x) {
return(x@Parameter)
})
#' @title Accessor to the slot \code{LibraryType} in class \code{MeripBamFileList}.
#'
#' @param x a \code{MeripBamFileList} object.
#'
#' @aliases LibraryType
#'
#' @rdname LibraryType-methods
#'
#' @examples
#' GENE_ANNO_GTF = system.file("extdata", "example.gtf", package="exomePeak2")
#'
#' f1 = system.file("extdata", "IP1.bam", package="exomePeak2")
#' f2 = system.file("extdata", "IP2.bam", package="exomePeak2")
#' f3 = system.file("extdata", "IP3.bam", package="exomePeak2")
#' f4 = system.file("extdata", "IP4.bam", package="exomePeak2")
#' IP_BAM = c(f1,f2,f3,f4)
#' f1 = system.file("extdata", "Input1.bam", package="exomePeak2")
#' f2 = system.file("extdata", "Input2.bam", package="exomePeak2")
#' f3 = system.file("extdata", "Input3.bam", package="exomePeak2")
#' INPUT_BAM = c(f1,f2,f3)
#'
#' f1 = system.file("extdata", "treated_IP1.bam", package="exomePeak2")
#' TREATED_IP_BAM = c(f1)
#' f1 = system.file("extdata", "treated_Input1.bam", package="exomePeak2")
#' TREATED_INPUT_BAM = c(f1)
#'
#' MeRIP_Seq_Alignment <- scanMeripBAM(
#' bam_ip = IP_BAM,
#' bam_input = INPUT_BAM,
#' paired_end = FALSE
#' )
#'
#' LibraryType(MeRIP_Seq_Alignment)
#'
#' @return a value for the library type of MeRIP-seq experiment.
#'
#' @export
#'
setMethod("LibraryType",
"MeripBamFileList",
function(x) {
return(x@LibraryType)
})
#' @title Accessor to the slot \code{GCsizeFactors} in class \code{SummarizedExomePeak}.
#'
#' @param x1 A \code{SummarizedExomePeak} object.
#'
#' @aliases GCsizeFactors
#'
#' @examples
#'
#' f1 = system.file("extdata", "sep_ex_mod.rds", package="exomePeak2")
#'
#' sep <- readRDS(f1)
#'
#' head(GCsizeFactors(sep))
#'
#' @rdname GCsizeFactors-methods
#'
#' @return a data.frame for the GC content size factors of each sample
#'
#' @export
#'
setMethod("GCsizeFactors",
"SummarizedExomePeak",
function(x1) {
return(assays(x1)$GCsizeFactors)
})
#' @title Accessor to the slot \code{GCsizeFactors} in class \code{SummarizedExomePeak}.
#'
#' @param x2 A \code{SummarizedExomePeak} object.
#' @param value A \code{matrix} object.
#'
#' @aliases GCsizeFactors<-
#'
#' @rdname GCsizeFactors-methods
#'
#' @export
#'
setMethod("GCsizeFactors<-",
"SummarizedExomePeak",
function(x2,value) {
assays(x2,withDimnames=FALSE)$GCsizeFactors <- value
return(x2)
})
#' @title Accessor to the slot \code{exomePeak2Results} in class \code{SummarizedExomePeak}.
#'
#' @param x1 A \code{data.frame} object.
#'
#' @aliases exomePeak2Results
#'
#' @rdname exomePeak2Results-methods
#'
#'
#' @examples
#'
#' f1 = system.file("extdata", "sep_ex_mod.rds", package="exomePeak2")
#'
#' sep <- readRDS(f1)
#'
#' head(exomePeak2Results(sep))
#'
#' @export
#'
setMethod("exomePeak2Results",
"SummarizedExomePeak",
function(x1) {
return(x1@exomePeak2Results)
})
#' @title Accessor to the slot \code{exomePeak2Results} in class \code{SummarizedExomePeak}.
#'
#' @param x2 A \code{SummarizedExomePeak} object.
#' @param value a \code{data.frame} object for the DESeq2 Results.
#'
#' @return A \code{data.frame} object for the DESeq2 Results.
#'
#' @aliases exomePeak2Results<-
#'
#' @rdname exomePeak2Results-methods
#'
#' @export
#'
setMethod("exomePeak2Results<-",
"SummarizedExomePeak",
function(x2,value) {
x2@exomePeak2Results <- value
return(x2)
})
|
f9bceb27787c55b65a86260e8881f8172d46b860
|
b2f61fde194bfcb362b2266da124138efd27d867
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/nreachq_query52_1344/nreachq_query52_1344.R
|
25a794eccbd836559bff7689801798cbd2ccd0fb
|
[] |
no_license
|
arey0pushpa/dcnf-autarky
|
e95fddba85c035e8b229f5fe9ac540b692a4d5c0
|
a6c9a52236af11d7f7e165a4b25b32c538da1c98
|
refs/heads/master
| 2021-06-09T00:56:32.937250
| 2021-02-19T15:15:23
| 2021-02-19T15:15:23
| 136,440,042
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,531
|
r
|
nreachq_query52_1344.R
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 32097
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 31537
c
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 31537
c
c Input Parameter (command line, file):
c input filename QBFLIB/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/nreachq_query52_1344.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 4893
c no.of clauses 32097
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 31537
c
c QBFLIB/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/nreachq_query52_1344.qdimacs 4893 32097 E1 [7 50 92 4549 4550 4551 4552 4553 4554 4555 4556 4557 4558 4559 4560 4561 4562 4563 4564 4565 4566 4567 4568 4569 4570 4571 4572 4573 4574 4575 4576 4577 4578 4579 4580 4581 4582 4583 4584 4585 4586 4587 4588 4589 4590 4591 4592 4593 4594 4595 4596 4597 4598 4599 4600 4601 4602 4603 4604 4605 4606 4607 4608 4609 4610 4611 4612 4613 4614 4615 4616 4617 4618 4619 4620 4621 4622 4623 4624 4625 4626 4627 4628 4629 4630 4631 4632 4633 4634 4635 4636 4637 4638 4639 4640 4641 4642 4643 4644 4645 4646 4647 4648 4649 4650 4651 4652 4653 4654 4655 4656 4657 4658 4659 4660 4661 4662 4663 4664 4665 4666 4667 4668 4669 4670 4671 4672 4673 4674 4675 4676 4677 4678 4679 4680 4681 4682 4683 4684 4685 4686 4687 4688 4689 4690 4691 4692 4693 4694 4695 4696 4697 4698 4699 4700 4701 4702 4703 4704 4705 4706 4707 4708 4709 4710 4711 4712 4713 4714 4715 4716 4717 4718 4719 4720 4721 4722 4723 4724 4725 4726 4727 4728 4729 4730 4731 4732 4733 4734 4735 4736 4737 4738 4739 4740 4741 4742 4743 4744 4745 4746 4747 4748 4749 4750 4751 4752 4753 4754 4755 4756 4757 4758 4759 4760 4761 4762 4763 4764 4765 4766 4767 4768 4769 4770 4771 4772 4773 4774 4775 4776 4777 4778 4779 4780 4781 4782 4783 4784 4785 4786 4787 4788 4789 4790 4791 4792 4793 4794 4795 4796 4797 4798 4799 4800 4801 4802 4803 4804 4805 4806 4807 4808 4809 4810 4811 4812 4813 4814 4815 4816 4817 4818 4819 4820 4821 4822 4823 4824 4825 4826 4827 4828 4829 4830 4831 4832 4833 4834 4835 4836 4837 4838 4839 4840 4841 4842 4843 4844 4845 4846 4847 4848 4849 4850 4851 4852 4853 4854 4855 4856 4857 4858 4859 4860 4861 4862 4863 4864 4865 4866 4867 4868 4869 4870 4871 4872 4873 4874 4875 4876 4877 4878 4879 4880 4881 4882 4883 4884 4885 4886 4887 4888 4889 4890 4891 4892] 0 16 4530 31537 RED
|
5bee96bef6696365180183cb54e4eae849b72454
|
a6512af6112ff4106f30da9261271bfb09ed6ca3
|
/man/safePOST.Rd
|
48fcacc11b0bb6df229095f99f2f5415d654e13a
|
[] |
no_license
|
hrbrmstr/urldiversity
|
11144da555135cc3521fc1a51a6691629f949eba
|
1f90bb07b5bbe15a51f1a3781435ff66d5eab91a
|
refs/heads/master
| 2020-03-15T12:30:50.457100
| 2019-02-27T22:14:14
| 2019-02-27T22:14:14
| 132,145,874
| 6
| 2
| null | null | null | null |
UTF-8
|
R
| false
| true
| 3,306
|
rd
|
safePOST.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/httr-helpers.R
\name{safePOST}
\alias{safePOST}
\title{Safer version of \code{\link[httr:POST]{httr::POST()}}}
\usage{
safePOST(url = NULL, config = list(), timeout = httr::timeout(5),
..., body = NULL, encode = c("multipart", "form", "json", "raw"),
handle = NULL)
}
\arguments{
\item{url}{the url of the page to retrieve}
\item{config}{Additional configuration settings such as http
authentication (\code{\link{authenticate}}), additional headers
(\code{\link{add_headers}}), cookies (\code{\link{set_cookies}}) etc.
See \code{\link{config}} for full details and list of helpers.}
\item{timeout}{a call to \code{\link[httr:timeout]{httr::timeout()}}. Default timeout is \code{5} seconds.}
\item{...}{Further named parameters, such as \code{query}, \code{path}, etc,
passed on to \code{\link{modify_url}}. Unnamed parameters will be combined
with \code{\link{config}}.}
\item{body}{One of the following:
\itemize{
\item \code{FALSE}: No body. This is typically not used with \code{POST},
\code{PUT}, or \code{PATCH}, but can be useful if you need to send a
bodyless request (like \code{GET}) with \code{VERB()}.
\item \code{NULL}: An empty body
\item \code{""}: A length 0 body
\item \code{upload_file("path/")}: The contents of a file. The mime
type will be guessed from the extension, or can be supplied explicitly
as the second argument to \code{upload_file()}
\item A character or raw vector: sent as is in body. Use
\code{\link{content_type}} to tell the server what sort of data
you are sending.
\item A named list: See details for encode.
}}
\item{encode}{If the body is a named list, how should it be encoded? Can be
one of form (application/x-www-form-urlencoded), multipart,
(multipart/form-data), or json (application/json).
For "multipart", list elements can be strings or objects created by
\code{\link{upload_file}}. For "form", elements are coerced to strings
and escaped, use \code{I()} to prevent double-escaping. For "json",
parameters are automatically "unboxed" (i.e. length 1 vectors are
converted to scalars). To preserve a length 1 vector as a vector,
wrap in \code{I()}. For "raw", either a character or raw vector. You'll
need to make sure to set the \code{\link{content_type}()} yourself.}
\item{handle}{The handle to use with this request. If not
supplied, will be retrieved and reused from the \code{\link{handle_pool}}
based on the scheme, hostname and port of the url. By default \pkg{httr}
requests to the same scheme/host/port combo. This substantially reduces
connection time, and ensures that cookies are maintained over multiple
requests to the same host. See \code{\link{handle_pool}} for more
details.}
}
\description{
Scraping the web is fraught with peril. URLs die; networks get disrupted
and best laid plans for building a corups from links can quickly go awry.
Use this funtion to mitigate some of the pain of retrieving web resoures.
}
\details{
This is a thin wrapper for \code{\link[httr:GET]{httr::GET()}} using \code{\link[purrr:safely]{purrr::safely()}} that will
either return a \code{httr} \code{response} object or \code{NULL} if there was an error.
If you need the reason for the error (e.g. \code{Could not resolve host...})
you should write your own wrapper.
}
|
46425c5c1ee9b66a971a8a3738599e3590e20b2e
|
6856715b807090fba52244ca10eef0fe5d074fa3
|
/scripts/extract_probabilities.R
|
94d41469141b5d6197d2c802311eaa1274d5b126
|
[] |
no_license
|
carnivorouspeanut/Leiden-2017
|
6d153bcdb2cbdf3a288dd8dc306154c3ffc6c56d
|
cd4aa2c9d65083c39f96c1993dd59317cdd872da
|
refs/heads/master
| 2021-04-28T10:54:11.800104
| 2018-02-22T07:34:53
| 2018-02-22T07:34:53
| 122,078,305
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,608
|
r
|
extract_probabilities.R
|
tab <- read.table("hits_correspondence_iHHsearch_HHsearch.txt", header = TRUE, stringsAsFactors = FALSE)
new_tab <- subset(tab, select = c("index", "prot_id_segment_id", "Hit", "Prob_ihh", "Prob_hh"))
#this is the vector of hits probabilities on subsequences
Prob_seq <- c()
for(i in (1:nrow(new_tab))){
#find a file containing information about hits found by HHsearch on current FASTA subsequence
foldername1 <- paste(new_tab[i, ]$prot_id_segment_id, new_tab[i, ]$Hit, new_tab[i, ]$index, sep = "_")
foldername1 <- paste("../FASTA_best_hit/out", foldername1, "iteration_1", sep = "/")
foldername2 <- list.files(path = foldername1, pattern = "cumulative_hits.tsv$")
tablename <- paste(foldername1, foldername2, sep = "/")
#check if we even have this file
if(!file.exists(tablename)){
print(tablename)
}
tab_PFAM_HH <- read.table(tablename, header = TRUE, stringsAsFactors = FALSE)
#extract probability of a hit we are interested in
Hit <- data.frame(tab_PFAM_HH[tab_PFAM_HH$Hit == new_tab[i, ]$Hit, ]$Prob)
Hit <- Hit[1, ] # we take only the first hit, if there is more than one
Prob_seq <- c(Prob_seq, Hit)
}
Prob_seq <- as.numeric(unlist(Prob_seq))
new_tab <- cbind(new_tab, Prob_seq)
write.csv(new_tab, file = "Probabilities_HH_iHH_partHH.txt")
#plot
pdf("iHH_HHpart.pdf")
lim_min = min(new_tab$Prob_seq, new_tab$Prob_ihh)
lim_max = max(new_tab$Prob_seq, new_tab$Prob_ihh)
plot(new_tab$Prob_ihh, new_tab$Prob_seq, pch = 16, xlab = "iHHsearch", ylab = "HHsearch_on_hit_sequence", xlim = c(lim_min, lim_max), ylim = c(lim_min, lim_max))
abline(0, 1, col = "red")
dev.off()
|
913e7e9e82a75a8b7efa260080f7ec073688b04e
|
171e369954c2e0a8160f50d7bd5daccd1cdd8a79
|
/sim_galaxies/generate_Ec.R
|
55ec406474ac9a9ce3e8e6470a67f84735a8dae2
|
[] |
no_license
|
brendanstats/ABC-Dark-Matter
|
655c22aa7712536464b055e26199cd14f3965cb0
|
adff6322e996892dae1e8208108aa6b847ef05c0
|
refs/heads/master
| 2016-09-06T11:45:37.044235
| 2015-12-12T19:03:59
| 2015-12-12T19:03:59
| 42,635,242
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,291
|
r
|
generate_Ec.R
|
#####################
#Advanced Data Analysis
#Brendan McVeigh
#July 28, 2015
#####################
setwd("~/Google Drive/2015_S1_Spring/ADA/2015_07_28")
library(rPython)
library(parallel)
library(doParallel)
library(foreach)
python.load("functions_0624.py")
source("ABC_Code/priors.R")
##########################################
#Generate Datasets
##########################################
generate.sample <- function(python.file,sample.fun,param,ct,prior){
python.load(python.file)
proposal <- prior(1,"s")
param[4] <- proposal
param <- as.numeric(param)
sample <- do.call(cbind,python.call(sample.fun,param,ct))
out.pair <- list(theta=proposal,sample=sample)
return(out.pair)
}
file.name <- "Ec_samples.R"
n <- 2000
ct <- 5000
param <- c(2.0,-5.3, 2.5, 0.16, 1.5, -9.0, 6.9, 0.086, 21.0, 1.5)
ncore <- detectCores()
cl <- makeCluster(ncore)
registerDoParallel(cl)
sample <- foreach(i=1:n, .packages="rPython") %dopar%
generate.sample(python.file = "functions_0624.py",
sample.fun = "sampleR",
param = param,ct = ct, prior = prior.Ec)
stopCluster(cl)
save(sample,file = file.name)
##########################################
#Distance Functions to test
##########################################
mean.x <- function(data.sim){
return(mean(data.sim[,1]))
}
mean.y <- function(data.sim){
return(mean(data.sim[,2]))
}
mean.r <- function(data.sim){
r <- sqrt(data.sim[,1]^2+data.sim[,2]^2)
return(mean(r))
}
mean.vz <- function(data.sim){
return(mean(data.sim[,6]))
}
m2.x <- function(data.sim){
return(mean(data.sim[,1]*data.sim[,1]))
}
m2.y <- function(data.sim){
return(mean(data.sim[,2]*data.sim[,2]))
}
m2.r <- function(data.sim){
r2 <- data.sim[,1]^2+data.sim[,2]^2
return(mean(r2))
}
m2.vz <- function(data.sim){
return(mean(data.sim[,6]*data.sim[,6]))
}
m3.x <- function(data.sim){
return(mean(data.sim[,1]*data.sim[,1]*data.sim[,1]))
}
m3.y <- function(data.sim){
return(mean(data.sim[,2]*data.sim[,2]*data.sim[,2]))
}
m3.r <- function(data.sim){
r3 <- (data.sim[,1]^2+data.sim[,2]^2)^(3/2)
return(mean(r3))
}
m3.vz <- function(data.sim){
return(mean(data.sim[,6]*data.sim[,6]*data.sim[,6]))
}
vdisp.25 <- function(data.sim){
r <- sqrt(data.sim[,1]^2+data.sim[,2]^2)
min.r <- quantile(r,probs = c(0))
max.r <- quantile(r,probs = c(.25))
keep <- min.r <= r & r < max.r
return(var(data.sim[keep,6]))
}
vdisp.5 <- function(data.sim){
r <- sqrt(data.sim[,1]^2+data.sim[,2]^2)
min.r <- quantile(r,probs = c(.25))
max.r <- quantile(r,probs = c(.5))
keep <- min.r <= r & r < max.r
return(var(data.sim[keep,6]))
}
vdisp.75 <- function(data.sim){
r <- sqrt(data.sim[,1]^2+data.sim[,2]^2)
min.r <- quantile(r,probs = c(.5))
max.r <- quantile(r,probs = c(.75))
keep <- min.r <= r & r < max.r
return(var(data.sim[keep,6]))
}
vdisp1 <- function(data.sim){
r <- sqrt(data.sim[,1]^2+data.sim[,2]^2)
min.r <- quantile(r,probs = c(.75))
max.r <- quantile(r,probs = c(1))
keep <- min.r <= r & r < max.r
return(var(data.sim[keep,6]))
}
dens.25 <- function(data.sim){
r <- sqrt(data.sim[,1]^2+data.sim[,2]^2)
min.r <- quantile(r,probs = c(0))
max.r <- quantile(r,probs = c(.25))
num <- dim(data.sim)[1]/4
return(num/(max.r^2-min.r^2))
}
dens.5 <- function(data.sim){
r <- sqrt(data.sim[,1]^2+data.sim[,2]^2)
min.r <- quantile(r,probs = c(.25))
max.r <- quantile(r,probs = c(.5))
num <- dim(data.sim)[1]/4
return(num/(max.r^2-min.r^2))
}
dens.75 <- function(data.sim){
r <- sqrt(data.sim[,1]^2+data.sim[,2]^2)
min.r <- quantile(r,probs = c(.5))
max.r <- quantile(r,probs = c(.75))
num <- dim(data.sim)[1]/4
return(num/(max.r^2-min.r^2))
}
dens1 <- function(data.sim){
r <- sqrt(data.sim[,1]^2+data.sim[,2]^2)
min.r <- quantile(r,probs = c(.75))
max.r <- quantile(r,probs = c(1))
num <- dim(data.sim)[1]/4
return(num/(max.r^2-min.r^2))
}
load("distance_samples_test.R")
thetas <- unlist(lapply(sample, function(x) x[["theta"]]))
mu.x <- unlist(lapply(sample,function(x) mean.x(x$sample)))
plot(thetas, mu.x, xlab = "Ec", ylab = "Mean of x coordinate")
mu.y <- unlist(lapply(sample,function(x) mean.y(x$sample)))
plot(thetas, mu.y, xlab = "Ec", ylab = "Mean of y coordinate")
mu.r <- unlist(lapply(sample,function(x) mean.r(x$sample)))
plot(thetas,mu.r, xlab = "Ec", ylab = "Mean of radius")
mu.vz <- unlist(lapply(sample,function(x) mean.vz(x$sample)))
plot(thetas,mu.vz, xlab = "Ec", ylab = "Mean of z velocity")
mom2.x <- unlist(lapply(sample,function(x) m2.x(x$sample)))
plot(thetas,mom2.x, xlab = "Ec", ylab = "Second Moment of x coordinate")
mom2.y <- unlist(lapply(sample,function(x) m2.y(x$sample)))
plot(thetas,mom2.y, xlab = "Ec", ylab = "Second Moment of y coordinate")
mom2.r <- unlist(lapply(sample,function(x) m2.r(x$sample)))
plot(thetas,mom2.r, xlab = "Ec", ylab = "Second Moment of radius")
mom2.vz <- unlist(lapply(sample,function(x) m2.vz(x$sample)))
plot(thetas,mom2.vz, xlab = "Ec", ylab = "Second Moment of z velocity")
mom3.x <- unlist(lapply(sample,function(x) m3.x(x$sample)))
plot(thetas,mom3.x, xlab = "Ec", ylab = "Third Moment of x coordinate")
mom3.y <- unlist(lapply(sample,function(x) m3.y(x$sample)))
plot(thetas,mom3.y, xlab = "Ec", ylab = "Third Moment of y coordinate")
mom3.r <- unlist(lapply(sample,function(x) m3.r(x$sample)))
plot(thetas,mom3.r, xlab = "Ec", ylab = "Third Moment of radius")
mom3.vz <- unlist(lapply(sample,function(x) m3.vz(x$sample)))
plot(thetas,mom3.vz, xlab = "Ec", ylab = "Third Moment of z velocity")
vd.25 <- unlist(lapply(sample,function(x) vdisp.25(x$sample)))
plot(thetas,vd.25, xlab = "Ec", ylab = "Sample Variance of z velocity",
main = "Sample Variance of Velocity \n 1st quartile of radius")
vd.5 <- unlist(lapply(sample,function(x) vdisp.5(x$sample)))
plot(thetas,vd.5, xlab = "Ec", ylab = "Sample Variance of z velocity",
main = "Sample Variance of Velocity \n 2nd quartile of radius")
vd.75 <- unlist(lapply(sample,function(x) vdisp.75(x$sample)))
plot(thetas,vd.75, xlab = "Ec", ylab = "Sample Variance of z velocity",
main = "Sample Variance of Velocity \n 3rd quartile of radius")
vd1 <- unlist(lapply(sample,function(x) vdisp1(x$sample)))
plot(thetas,vd1, xlab = "Ec", ylab = "Sample Variance of z velocity",
main = "Sample Variance of Velocity \n 4th quartile of radius")
rden.25 <- unlist(lapply(sample,function(x) dens.25(x$sample)))
plot(thetas,rden.25, xlab = "Ec", ylab = "Stars per unit area",
main = "Sample Density \n 1st quartile of radius")
rden.5 <- unlist(lapply(sample,function(x) dens.5(x$sample)))
plot(thetas,rden.5, xlab = "Ec", ylab = "Stars per unit area",
main = "Sample Density \n 2nd quartile of radius")
rden.75 <- unlist(lapply(sample,function(x) dens.75(x$sample)))
plot(thetas,rden.75, xlab = "Ec", ylab = "Stars per unit area",
main = "Sample Density \n 3rd quartile of radius")
rden1 <- unlist(lapply(sample,function(x) dens1(x$sample)))
plot(thetas,rden1, xlab = "Ec", ylab = "Stars per unit area",
main = "Sample Density \n 4th quartile of radius")
start <- Sys.time()
rden1 <- unlist(lapply(sample,function(x) dens1(x$sample)))
end <- Sys.time()
end-start
start <- Sys.time()
rden2 <- unlist(mclapply(sample,function(x) dens1(x$sample)))
end <- Sys.time()
end-start
|
5e8c3d6909e75911ae4181cad8122a3eca25e0da
|
9098a1a8a6272d816e820e21b98a16eb1236fead
|
/generate_graphs.R
|
c43c4612765e1bf7c3dc5a1da3fd2104d762f185
|
[] |
no_license
|
Teabeans/UW527_Crypto_Assg2_OTPToken
|
2a6fdfee458562deeaba22bcf242e3e69a0724c9
|
5d907526363babe87e8380788eb5407d0292bc47
|
refs/heads/master
| 2020-12-20T02:22:05.828129
| 2020-02-03T23:51:39
| 2020-02-03T23:51:39
| 235,931,593
| 0
| 1
| null | 2020-01-29T01:46:04
| 2020-01-24T03:09:54
|
Java
|
UTF-8
|
R
| false
| false
| 1,291
|
r
|
generate_graphs.R
|
# R script for generating graphs used in CSS527 Assignment 2
# Make sure necessary packages are installed and loaded
install.packages("ggplot2")
library (ggplot2)
# Set numeric formatting options
options(scipen=9)
# Load data for CR1 collision plot, one million iterations
cr1_1_million.data <- read.csv(file="stats_to_one_million_iterations.txt")
# Make graph
p1 <- ggplot() + geom_line(aes(y = Collisions, x = Iteration), data = cr1_1_million.data)
p1 + labs(title = "CR1 Collision Rate by Iteration", subtitle = "To One Million Iterations", x = "Iterations", y = "Collisions") + theme(plot.title = element_text(hjust = 0.5)) + theme(plot.subtitle = element_text(hjust = 0.5))
# Export the CR1 graph to an image
ggsave("cr1_to_one_million.png")
# Load data for CR1 collision plot, ten million iterations
cr1_10_million.data <- read.csv(file="stats_to_ten_million_iterations.txt")
# Make graph
p2 <- ggplot() + geom_line(aes(y = Collisions, x = Iteration), data = cr1_10_million.data)
p2 + labs(title = "CR1 Collision Rate by Iteration", subtitle = "To Ten Million Iterations", x = "Iterations", y = "Collisions") + theme(plot.title = element_text(hjust = 0.5)) + theme(plot.subtitle = element_text(hjust = 0.5))
# Export the CR1 graph to an image
ggsave("cr1_to_ten_million.png")
|
1e72b30eda5b3b1c30f62b1b4035a99b635dcb8d
|
495404e45696ddd2dfecb0a88a79e2e499354156
|
/src/project/firm_sentiments_28day_growth_rscript.R
|
aa06791a5904aee87a90bab955d24141c6221fbe
|
[] |
no_license
|
gopalpenny/firmsentiments
|
fe7fddf5d53ff79cf0e827fbbd7ef9a9dcb742c5
|
0e45c5c19a9142dff199a4529bb7ea27a890f99f
|
refs/heads/main
| 2023-04-05T16:24:53.203528
| 2021-04-19T22:56:50
| 2021-04-19T22:56:50
| 358,458,182
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,234
|
r
|
firm_sentiments_28day_growth_rscript.R
|
# This script prepares and joins firmrisk Sentiments and stock market growth.
# After merging the two datasets, it selects only dates within 14 days of the
# Earnings statement, and calculates average daily market growth over these
# days. It then considered three regressions to determine how closely related
# both datasets are in terms of firm Sentiment and stock market growth. The code
# follows the outline here:
# 0. Load and prep the data
# 1a. the raw data
# 1b. data binned by average market growth
# 1c. data binned by sentiment
# 2. Plot the data
# load libraries
library(data.table)
library(tidyverse)
# 0. Load and prep the data
# Load data if file exists already. Otherwise, prepare using data.table
if (file.exists("data/format/stocks_28day_avg_growth.csv")) {
stocks_28day_growth <- read_csv("data/format/stocks_28day_avg_growth.csv")
} else {
# Read firmrisk to data.table
firmrisk <- fread("data/format/firmquarter_2020q4.csv")
firmrisk <- firmrisk[, .(year_quarter = date, ticker, Risk, Sentiment, date_earningscall)]
firmrisk <- firmrisk[, RollDate := date_earningscall - 45] # Shift join data by 45 days for rolling join
# Read stock histories to data.table
stocks <- fread("data/format/yfinance_stock_histories.csv")
stocks <- stocks[order(ticker, date), .(Close, date, ticker)]
stocks <- stocks[, `:=`(RollDate = date, CloseLag = shift(Close)), by = ticker] # to calculate daily growth
stocks <- stocks[, `:=`(CloseGrowth = (Close - CloseLag)/CloseLag)]
# dt_test <- setDT(data.frame(num = 1:7, group = c("a","a","b","b","b","c","c")))
# Set join keys
setkey(stocks, ticker, RollDate)
setkey(firmrisk, ticker, RollDate)
# Join stocks and firmrisk data, using rolling date match
stocks_growth_rolljoin <- firmrisk[stocks, roll = TRUE, nomatch = 0]
# Filter stock growth data to be within 14 days of earnings call
stocks_growth_28day_all <- stocks_growth_rolljoin[abs(date_earningscall - date) <= 14]
# Get 28-day average stock growth, centered on the date of the earnings call
stocks_28day_growth <- as_tibble(stocks_growth_28day[, .(avg_growth = mean(CloseGrowth, na.rm = TRUE),
sd_growth = sd(CloseGrowth, na.rm = TRUE),
Risk = first(Risk), Sentiment = first(Sentiment)),
by = .(ticker, year_quarter, date_earningscall)])
rm(list = c("stocks", "stocks_growth_rolljoin", "stocks_growth_28day_all"))
write_csv(stocks_28day_growth, "data/format/stocks_28day_avg_growth.csv")
}
# Identify outliers in average 28day growth
stocks_28day_growth_IQR <- quantile(stocks_28day_growth$avg_growth, c(0.25, 0.75), na.rm = TRUE)
stocks_28day_growth_IQR_outliers <- stocks_28day_growth_IQR +
(stocks_28day_growth_IQR[2] - stocks_28day_growth_IQR[1]) * c(-1.5, 1.5)
stocks_28day_growth <- stocks_28day_growth %>%
mutate(outlier = !between(avg_growth, stocks_28day_growth_IQR_outliers[1], stocks_28day_growth_IQR_outliers[2]))
# 1a. Firm sentiment vs average growth using the raw 28-day averages
firms_df <- stocks_28day_growth %>% filter(!outlier)
lm_28day_all <- summary(lm(Sentiment ~ avg_growth, firms_df))
lm_28day_all
# Group the average 28day growth into 10 equal-sized bins, and do the same for Sentiment scores.
firms_df_binned <- firms_df %>%
mutate(avg_growth_bins = cut(avg_growth, quantile(avg_growth, seq(0, 1, by = 0.1), na.rm = TRUE), include.lowest = TRUE),
sentiment_bins = cut(Sentiment, quantile(Sentiment, seq(0, 1, by = 0.1), na.rm = TRUE), include.lowest = TRUE))
# Get the lower limit of each bin
avg_growth_bins_ll <- as.numeric(gsub("(\\()|(\\[)|(,.*)", "", levels(firms_df_binned$avg_growth_bins)))
sentiment_bins_ll <- as.numeric(gsub("(\\()|(\\[)|(,.*)", "", levels(firms_df_binned$sentiment_bins)))#)
## Binned average quarterly growth (1b and 1c)
# 1b. Firm sentiment vs average growth using averages of grouped data -- binned
# into 10 equal-sized groups based on **Average growth**.
firms_df_binned_growth_means <- firms_df_binned %>%
group_by(avg_growth_bins) %>%
summarize(mean_growth = mean(avg_growth, na.rm = TRUE),
mean_sentiment = mean(Sentiment, na.rm = TRUE))
lm_growth_binned <- summary(lm(mean_sentiment ~ mean_growth, firms_df_binned_growth_means))
lm_growth_binned
# 1c. Firm sentiment vs average growth using averages of grouped data -- binned
# into 10 equal-sized groups based on **Sentiment**.
firms_df_binned_sentiment_means <- firms_df_binned %>%
group_by(sentiment_bins) %>%
summarize(mean_growth = mean(avg_growth,na.rm = TRUE),
mean_sentiment = mean(Sentiment,na.rm = TRUE))
lm_sentiment_binned <- summary(lm(mean_sentiment ~ mean_growth, firms_df_binned_sentiment_means))
lm_sentiment_binned
# 2. Plot the data
## Average quarterly growth versus Sentiment
# The following plot shows average average 28-day growth versus average
# sentiment for all year-quarter combinations as well as for both summaries of
# binned data.
lm_coefficients <- rbind(with(lm_28day_all, c(coefficients[,1], coefficients[2,4], r.squared)),
with(lm_growth_binned, c(coefficients[,1], coefficients[2,4], r.squared)),
with(lm_sentiment_binned, c(coefficients[,1], coefficients[2,4], r.squared))) %>%
as_tibble() %>% set_names(c("intercept", "slope", "p_val_slope", "regression_r_squared"))
ggplot() +
geom_point(data = firms_df, aes(avg_growth, Sentiment), alpha = 0.01) +
geom_vline(data = data.frame(intercept = avg_growth_bins_ll[-1]),
aes(xintercept = intercept,color = "Growth bins"), alpha = 0.75, linetype = "dashed") +
geom_abline(data = data.frame(intercept = sentiment_bins_ll[-1], slope = 0),
aes(slope = slope, intercept = intercept,color = "Sentiment bins"), alpha = 0.75, linetype = "dashed") +
geom_point(data = firms_df_binned_sentiment_means, aes(mean_growth, mean_sentiment, color = "Sentiment bins")) +
geom_point(data = firms_df_binned_growth_means, aes(mean_growth, mean_sentiment, color = "Growth bins")) +
xlab("Average 28-day growth, [%/day]") +
coord_flip()
|
86541848a57c33581d3e9b0982f1202452f98dca
|
c91350b98d6c2d6c067cd796742795847c6fd631
|
/vignettes/man/num.assays-ProjectSummary-method.Rd
|
775e172e39e8adf81a5d6a283abbb88bbea6cff0
|
[] |
no_license
|
gccong/ddiR-sirius
|
127673f8fca158449e50dafc462ec78c234a5d55
|
6b1792d06e6ff094349e89b5cbee7144763b932d
|
refs/heads/master
| 2021-01-19T04:18:49.242352
| 2015-12-13T22:29:01
| 2015-12-13T22:29:01
| 62,987,187
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 449
|
rd
|
num.assays-ProjectSummary-method.Rd
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/project.R
\docType{methods}
\name{num.assays,ProjectSummary-method}
\alias{num.assays,ProjectSummary-method}
\title{Returns a project number of assays}
\usage{
\S4method{num.assays}{ProjectSummary}(object)
}
\arguments{
\item{object}{a ProjectSummary}
}
\value{
the number of assays
}
\description{
Returns a project number of assays
}
\author{
Jose A. Dianes
}
|
21298a3ab81133cbd1e5dc31e29af173c6171d42
|
dc788043ff172615f0d4cc4c4b605b1cabaf9367
|
/R/split_lines.R
|
1d13e89f921083d3e57b95f796020e37e18c6f98
|
[] |
no_license
|
bmsasilva/roadHotspots
|
06d20710dbe713af624701d3cf91664f3665389f
|
472ed19e27b66730549e364a3ca20a818ca5b104
|
refs/heads/master
| 2022-02-14T17:52:55.953894
| 2022-02-02T11:35:51
| 2022-02-02T11:35:51
| 120,631,645
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,602
|
r
|
split_lines.R
|
#' split_lines
#' @name split_lines
#' @title Split lines into same length segments
#' @description This function splits each line of an "sp" object of class SpatialLines into segments of a given length.
#' @param spatial_line An "sp" object of class SpatialLines or SpatialLinesDataFrame.
#' @param split_length The length of the segments to split the lines into, in units of the SpatialLines. Default 500.
#' @usage split_lines(spatial_line, split_length = 500)
#' @return SpatialLinesDataFrame
#' @author Duccio Aiazzi
#' @references http://math.stackexchange.com/questions/175896/finding-a-point-along-a-line-a-certain-distance-away-from-another-point?newreg=468f66d7274f449b8ecf3fa4e63f41fe
#' @references http://tutorial.math.lamar.edu/Classes/CalcII/Vectors_Basics.aspx
#' @keywords internal
split_lines <- function(spatial_line,
split_length = 500) {
linedf <- lines2df(spatial_line)
df <- data.frame(
id = character(),
fx = numeric(),
fy = numeric(),
tx = numeric(),
ty = numeric(),
stringsAsFactors = FALSE
)
for (i in 1:nrow(linedf)) {
v_seg <- linedf[i, ]
seg_length <- sqrt( (v_seg$fx - v_seg$tx) ^ 2 +
(v_seg$fy - v_seg$ty) ^ 2)
if (seg_length <= split_length) {
df[nrow(df) + 1, ] <- c(paste0(v_seg$id, "_", "0000"),
v_seg$fx,
v_seg$fy,
v_seg$tx,
v_seg$ty)
next()
}
v <- c(v_seg$tx - v_seg$fx,
v_seg$ty - v_seg$fy)
u <- c(v[1] / sqrt(v[1] ^ 2 + v[2] ^ 2),
v[2] / sqrt(v[1] ^ 2 + v[2] ^ 2))
num_seg <- floor(seg_length / split_length)
seg_left <- seg_length - (num_seg * split_length)
for (i in 0:(num_seg - 1)) {
df[nrow(df) + 1, ] <- c(
paste0(v_seg$id, "_", formatC(i, width = 4, flag = "0")),
v_seg$fx + u[1] * split_length * i,
v_seg$fy + u[2] * split_length * i,
v_seg$fx + u[1] * split_length * (i + 1),
v_seg$fy + u[2] * split_length * (i + 1)
)
}
df[nrow(df) + 1, ] <- c(
paste0(v_seg$id, "_", formatC(
num_seg, width = 4, flag = "0"
)),
v_seg$fx + u[1] * split_length * num_seg,
v_seg$fy + u[2] * split_length * num_seg,
v_seg$tx,
v_seg$ty
)
}
df$fx <- as.numeric(df$fx)
df$fy <- as.numeric(df$fy)
df$tx <- as.numeric(df$tx)
df$ty <- as.numeric(df$ty)
sl <- df2lines(df)
sl <- sp::SpatialLinesDataFrame(sl, df, match.ID = FALSE)
return(sl)
}
|
97065a2c9b34b3439d1faaf4308c6b3215021bfa
|
306a75ef2e518a02bbc7009a435a9bf4cedfa8ae
|
/man/guess_gamlss.Rd
|
c06d0e6a462a97da9309327d3961d609996ef72e
|
[] |
no_license
|
ChristK/CKutils
|
6a5abe239717af86015005c28b8bffb0cacf86ff
|
8bb4c0d085a3b0b363a129e16252a32f2e528a0a
|
refs/heads/master
| 2021-07-19T18:54:52.946453
| 2020-05-27T15:57:31
| 2020-05-27T15:57:31
| 169,543,531
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 600
|
rd
|
guess_gamlss.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/functions.R
\name{guess_gamlss}
\alias{guess_gamlss}
\title{Prediction from a gamlss object in parallel}
\usage{
guess_gamlss(dt, gamlss_obj, orig_data = gamlss_obj$data, nc = 1L)
}
\arguments{
\item{dt}{A data.table}
\item{gamlss_obj}{gamlss object}
\item{orig_data}{original data.table}
\item{nc}{by default = 1L}
}
\description{
`guess_gamlss` returns a data.table with the predicted
variable. `dt` needs to have a column with percentiles named `rank_y`,
where `y` the name of the predicted variable (i.e. bmi).
}
|
88f33a7e9103ef5961bad95d8f3cebc48515d674
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/rcreds/vignettes/rcreds.R
|
7fff714cd6e379eee33ef10fb5a32a20ef1bff30
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,795
|
r
|
rcreds.R
|
## ---- echo=FALSE, results='asis'-----------------------------------------
some_login_function <- function(username, password, separate_param) {
## does something with username/password
## ...
message(sprintf("some_login_function() received username = '%s' and password = '%s' and separate_param = '%s'\n (obviously wouldn't normally output like this)", username, password, separate_param))
return(TRUE)
}
## ---- echo=FALSE, results='asis'-----------------------------------------
## reminder, tempdir() is only for vignette. Normally use:
# CREDS_PARENT_FOLDER <- "~/.rcreds"
CREDS_PARENT_FOLDER <- tempdir()
library(rcreds)
set_default_rcreds_folder(file.path(CREDS_PARENT_FOLDER, "credential_files"))
set_default_rcreds_folder(file.path(CREDS_PARENT_FOLDER, "db_credential_files"), DB=TRUE)
set_default_rcreds_key_folder(folder = file.path(CREDS_PARENT_FOLDER, "key_files"))
## ---- echo=FALSE, results='asis'-----------------------------------------
## reminder, tempdir() is only for vignette. Normally use:
# CREDS_PARENT_FOLDER <- "~/.rcreds"
CREDS_PARENT_FOLDER <- tempdir()
## in your .Rprofile, you should set the default folders.
## Replace CREDS_PARENT_FOLDER with correct folder, such as "~/.rcreds"
rcreds::set_default_rcreds_folder(file.path(CREDS_PARENT_FOLDER, "credential_files"))
rcreds::set_default_rcreds_folder(file.path(CREDS_PARENT_FOLDER, "db_credential_files"), DB=TRUE)
rcreds::set_default_rcreds_key_folder(folder=file.path(CREDS_PARENT_FOLDER, "key_files"))
library(rcreds)
creds_info <- "for_app123_login" ## some description that will be part of the filename
key_object <- create_key(bytes=32, depth=8, verbose=TRUE)
## Save Credentials
write_credentials_to_file(username="cosmo", password="too many secrets", key=key_object, info.file_name = creds_info, verbose=TRUE)
## Save key file to a different location.
save_key(key=key_object, zArchive_existing=FALSE, overwrite_existing=TRUE, verbose=TRUE)
## ---- echo=FALSE, results='asis'-----------------------------------------
## reminder, tempdir() is only for vignette. Normally use:
# CREDS_PARENT_FOLDER <- "~/.rcreds"
CREDS_PARENT_FOLDER <- tempdir()
key_file <- file.path(CREDS_PARENT_FOLDER, "key_files", ".crypt_key.rds")
creds_file <- file.path(CREDS_PARENT_FOLDER, "credential_files", "for_app123_login.credentials.creds")
creds <- read_credentials_from_file(file_full_path=creds_file, key=key_file)
## SHOWING CONTENTS FOR DEMO PURPOSES. NORMALLY DON'T DO THIS
print(creds)
## Use the credentials by refering to the elements by name
some_login_function(username = creds$username, password=creds$password, separate_param="plain example")
## altenatively can use do.call
do.call(some_login_function, c(creds, list(separate_param="do.call example")))
|
8d690347f4766ee48959f2b6a8228cf22ffaca71
|
783197389a6d1c087fe234b6d094d5aaf2d255ea
|
/plot1.R
|
33632d7d025df76fbde0a6733184ef4ae659c85e
|
[] |
no_license
|
tycoi2005/ExData_Plotting1
|
483504bdf7920f1601051f4b5f4dd176b832efce
|
09566e2c7c1cae48157718d220d3d8bf568bc813
|
refs/heads/master
| 2020-12-24T10:39:40.867503
| 2015-03-08T19:27:26
| 2015-03-08T19:27:26
| 31,860,259
| 0
| 0
| null | 2015-03-08T18:23:59
| 2015-03-08T18:23:59
| null |
UTF-8
|
R
| false
| false
| 517
|
r
|
plot1.R
|
data<-read.table("../household_power_consumption.txt", sep=";",
colClasses=c('character', 'character', 'numeric',
'numeric', 'numeric', 'numeric','numeric', 'numeric', 'numeric')
,header=TRUE, na.strings='?')
data1 <- data[data$Date == "1/2/2007" | data$Date == "2/2/2007",]
png("plot1.png", width=480, height=480)
hist(data1$Global_active_power, col='red',
xlab = 'Global Active Power (kilowatts)',
main = 'Global Active Power')
dev.off()
|
5a0f7bd1c052f0c9ab8c631642b33e1aca5f7866
|
88303591700f8064578089b1f057adf186e2a1e0
|
/man/ppMeasures-package.Rd
|
09772e8a076f55546712b3aabd6f67b7b6dac9d2
|
[] |
no_license
|
cran/ppMeasures
|
d338659bc26d1c6462af8ecf2d384f75a5ae0092
|
57e68c8be7ab2be5a8bae1299294a5098a41902c
|
refs/heads/master
| 2020-05-26T08:35:47.895833
| 2012-11-07T00:00:00
| 2012-11-07T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,786
|
rd
|
ppMeasures-package.Rd
|
\name{ppMeasures-package}
\alias{ppMeasures-package}
\alias{ppMeasures}
\docType{package}
\title{
Point pattern distances and prototypes.
}
\description{
The package focuses on distances and prototypes for point patterns. There are three algorithms provided to compute spike-time distance, and one of these algorithms is generalized to compute variations of spike-time distance. Multiple algorithms are also provided to estimate prototypes of collections of point patterns.
}
\details{
\tabular{ll}{
Package: \tab ppMeasures\cr
Type: \tab Package\cr
Version: \tab 0.2\cr
Date: \tab 2012-11-07\cr
License: \tab GPL (>= 2)\cr
LazyLoad: \tab yes\cr
}
~~ An overview of how to use the package, including the most important ~~
~~ functions ~~
}
\author{
DM Diez, KE Tranbarger Freier, and FP Schoenberg
Maintainer: DM Diez <david.m.diez@gmail.com>
}
\references{
J Victor and K Purpura (1997). Metric-space analysis of spike trains: theory, algorithms and application. Network: Computation in Neural Systems vol. 8, pp. 127-164, 1997.
KE Tranbarger and FP Schoenberg (2010). On the computation and application of point process prototypes. Open Applied Informatics Journal 4, 1-9.
Diez DM, Schoenberg FP, and Woody CD (2012). Algorithms for computing spike time
distance and point process prototypes with application to feline neuronal responses to
acoustic stimuli. Journal of Neuroscience Methods 203(1):186-192.
}
\keyword{ package }
\keyword{ point pattern }
\keyword{ point process }
\keyword{ point pattern distance }
\keyword{ point pattern prototype }
\seealso{
\code{\link{stDist}}, \code{\link{ppColl}}, \code{\link{ppPrototype}}
}
\examples{
data(pattEx2)
x <- pattEx2[pattEx2[,1] == 1,c(2,3)]
y <- pattEx2[pattEx2[,1] == 2,c(2,3)]
(hold2 <- stDist(x, y, 2))
summary(hold2)
plot(hold2)
data(collEx2)
(ppc2 <- ppColl(collEx2[,2:3], collEx2[,1]))
summary(ppc2)
plot(ppc2, pch=0.5)
hold2 <- ppPrototype(ppc2, pm=0.05)
points(hold2, pch=20, cex=3, col='#FF000088')
#===> reproducing results from Diez et al. (in review) <===#
# results differ slightly due to default use of "margPT" algorithm
# and that cats are not weighted equally in the below analysis
par(mfrow=c(3,1))
data(neurNaive)
(ppc3 <- ppColl(neurNaive[,2], neurNaive[,1], nMissing=))
summary(ppc3)
plot(ppc3, cex=0.5)
hold3 <- ppPrototype(ppc3, pm=0.0106, bypassCheck=TRUE)
points(hold3, pch=20, cex=3, col='#FF000044')
data(neurCond)
(ppc4 <- ppColl(neurCond[,2], neurCond[,1]))
summary(ppc4)
plot(ppc4, cex=0.5)
hold4 <- ppPrototype(ppc4, pm=0.0075, bypassCheck=TRUE)
points(hold4, pch=20, cex=3, col='#FF000044')
data(neurBd)
(ppc5 <- ppColl(neurBd[,2], neurBd[,1]))
summary(ppc5)
plot(ppc5, cex=0.5)
hold5 <- ppPrototype(ppc5, pm=0.0078, bypassCheck=TRUE)
points(hold5, pch=20, cex=3, col='#FF000044')
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.