blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1a37f2223f26117ff25a4019e259f77f141be8bc
|
f997169854672f36810e793a2932313f11b52139
|
/man/coins.Rd
|
6f204682f5d04a3ec611b33279ab9495505839e0
|
[] |
no_license
|
jverzani/UsingR
|
7e3fcbddae97a0ecd0268a9068af7a70ecc82907
|
d1cd49622b6e85cf26710c5747423b4ba0721ef6
|
refs/heads/master
| 2021-01-09T20:53:56.202763
| 2020-07-29T16:53:55
| 2020-07-29T16:53:55
| 57,312,995
| 1
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 519
|
rd
|
coins.Rd
|
\name{coins}
\alias{coins}
\docType{data}
\title{The coins in my change bin}
\description{
The coins in author's change bin with year and value.
}
\usage{data(coins)}
\format{
A data frame with 371 observations on the following 2 variables.
\describe{
\item{year}{Year of coin}
\item{value}{Value of coin: quarter, dime, nickel, or penny}
}
}
\examples{
data(coins)
years = cut(coins$year,seq(1920,2010,by=10),include.lowest=TRUE,
labels = paste(192:200,"*",sep=""))
table(years)
}
\keyword{datasets}
|
7a1090f139779644bdaf3f368ea72f96cabf7aca
|
6b5ca4ba55e404ade3d0e16d20192ace73cfe904
|
/man/overlay_spatial_prior.Rd
|
fab00cbe25b9309348bc6130b2ee7c1fbb9dcf7c
|
[
"MIT"
] |
permissive
|
Michael-Stevens-27/silverblaze
|
6715506ed23132ca9d1f71f1a208061edaa4722d
|
f0f001710589fe072545b00f0d3524fc993f4cbd
|
refs/heads/master
| 2021-08-21T20:46:40.350940
| 2021-05-26T16:47:50
| 2021-05-26T16:47:50
| 127,313,359
| 3
| 0
|
MIT
| 2021-05-26T16:47:51
| 2018-03-29T15:46:19
|
R
|
UTF-8
|
R
| false
| true
| 973
|
rd
|
overlay_spatial_prior.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot.R
\name{overlay_spatial_prior}
\alias{overlay_spatial_prior}
\title{Add spatial prior to dynamic map}
\usage{
overlay_spatial_prior(
myplot,
project,
col = col_hotcold(),
opacity = 0.8,
smoothing = 1
)
}
\arguments{
\item{myplot}{dynamic map produced by \code{plot_map()} function.}
\item{project}{an RgeoProfile project, as produced by the function
\code{rgeoprofile_project()}.}
\item{col}{set of plotting colours.}
\item{opacity}{opacity of spatial prior.}
\item{smoothing}{what level of smoothing to apply to spatial prior Smoothing
is applied using the \code{raster} function \code{disaggregate}, with
\code{method = "bilinear"}.}
}
\description{
Add spatial prior to dynamic map
}
\examples{
\dontshow{library(silverblaze)}
\dontshow{p <- rgeoprofile_file("tutorial1_project.rds")}
plot1 <- plot_map()
plot1 <- overlay_spatial_prior(myplot = plot1, project = p)
plot1
}
|
7a3d4ca9413b517b48ab2affa0a171032694a5a2
|
eaa3a59c28af9fbf27f610a54a54af8399ab4bf6
|
/lib/read_GLUE_results.R
|
3820ef7bb4546e898a160e3edf4393c2c19c1bcb
|
[] |
no_license
|
co822ee/LUR_optimization
|
419214564d0a71f4afb496032fd2029b3d862892
|
84f35535ecd67349872c5b15c801ef3b3492fc05
|
refs/heads/master
| 2021-01-05T06:38:05.204724
| 2020-05-17T07:31:34
| 2020-05-17T07:31:34
| 240,917,143
| 1
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,970
|
r
|
read_GLUE_results.R
|
# Observations
sensor_validation <- read.csv('../data/sensorValidateData_all.csv',header=T)
sensor_train <- read.csv('../data/sensorTrainData_all.csv',header=T)
sensor_all <- read.csv('../data/data_laea_new_correctTropomi.csv',header=T)
tropomiValues_training <- read.table('../data/tropomiInTropomiTBlock.txt')[,1]
tropomiValues_validation <- read.table('../data/tropomiInTropomiVBlock.txt')[,1]
#LUR prediction realizations
readFile <- function(scenario,sensorOrTropomi,training=T){
fileName <- list.files(path = '../data/GLUE/evaluation_allOutputData',pattern = scenario)
fileName <- fileName[grepl("^(?!.*clip)", fileName, perl=TRUE)]
if(training){fileName <- fileName[grepl("^(?=.*training)", fileName, perl=TRUE)]}else{fileName <- fileName[grepl("^(?=.*validation)", fileName, perl=TRUE)]}
if(sensorOrTropomi=='sensor'){
fileName <- fileName[grepl("^(?=.*SensorLocations)", fileName, perl=TRUE)]
}
if(sensorOrTropomi=='tropomi'){
fileName <- fileName[grepl("^(?=.*TropomiBlocks)", fileName, perl=TRUE)]
}
print(fileName)
read.csv(paste0('../data/GLUE/evaluation_allOutputData/', fileName), header = T)
}
# All the values are acquired from the validation pixels or validation monitoring stations
frontierSensorTrain <- readFile('frontier', 'sensor',T)
frontierSensorValidate <- readFile('frontier', 'sensor',F)
frontierTropomiTrain <- readFile('frontier', 'tropomi',T)
frontierTropomiValidate <- readFile('frontier', 'tropomi',F)
sensorSensorTrain <- readFile('onlySensor', 'sensor',T)
sensorSensorValidate <- readFile('onlySensor', 'sensor',F)
sensorTropomiTrain <- readFile('onlySensor', 'tropomi',T)
sensorTropomiValidate <- readFile('onlySensor', 'tropomi',F)
tropomiSensorTrain <- readFile('onlyTropomi', 'sensor',T)
tropomiSensorValidate <- readFile('onlyTropomi', 'sensor',F)
tropomiTropomiTrain <- readFile('onlyTropomi', 'tropomi',T)
tropomiTropomiValidate <- readFile('onlyTropomi', 'tropomi',F)
|
a641dbeb3b381aff6131adaad31c515d48508faf
|
beed510c5f96942b0330cd7e531b84c58c18b0eb
|
/src/scripts/loadResults.R
|
10265767db84d31ad79ee88602dbc20b72f91448
|
[] |
no_license
|
pauloditarso/costmodel
|
81acde07b4dcae47789f6617f9e1710c9249957c
|
5ee907d62067345437a9d7397df97c8a5989c9ed
|
refs/heads/master
| 2022-06-20T23:00:15.107373
| 2022-05-14T18:03:51
| 2022-05-14T18:03:51
| 200,047,586
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,846
|
r
|
loadResults.R
|
rm(list = ls())
demands <- c("02", "04", "08", "16", "32", "64")
allCosts <- data.frame(matrix(ncol = 6, nrow = 0))
allCostsCI <- data.frame(matrix(ncol = 6, nrow = 0))
allDisc <- data.frame(matrix(ncol=6, nrow=0))
allReps <- data.frame(matrix(ncol=6, nrow=0))
allRepsCI <- data.frame(matrix(ncol=6, nrow=0))
for ( demand in demands ) {
load(paste("../experiments/", demand, "/.RData", sep = ""))
allCosts <- rbind( allCosts, finalCosts)
allCostsCI <- rbind( allCostsCI, finalCostsCI )
allDisc <- rbind( allDisc, finalDisc )
allReps <- rbind( allReps, finalReps )
allRepsCI <- rbind( allRepsCI, finalRepsCI )
rm( list = setdiff(ls(), ls(pattern = "all")) )
}
colnames(allCosts) <- c("demand", "provs", "turn", "opt", "first", "random")
colnames(allCostsCI) <- c("demand", "provs", "type", "upper", "mean", "lower")
colnames(allDisc) <- c("demand", "provs", "turn", "type", "target", "opt")
colnames(allReps) <- c("demand", "hosts", "provs", "turn", "used", "type")
colnames(allRepsCI) <- c("demand", "provs", "type", "upper", "mean", "lower")
allDiscCI <- data.frame(matrix(nrow = 0, ncol = 6))
discTypes <- c(4, 5, 6)
providers <- 5:20
demands <- unique(factor(allDisc$demand))
for ( demand in demands ) {
for ( provider in providers ) {
for ( discType in discTypes ) {
targetAux <- as.numeric(allDisc[allDisc$demand == demand & allDisc$provs == provider & allDisc$type == discType,]$target)
optAux <- as.numeric(allDisc[allDisc$demand == demand & allDisc$provs == provider & allDisc$type == discType,]$opt)
allDiscCI <- rbind( allDiscCI, c(demand, provider, discType, Rmisc::CI(targetAux/optAux)) )
}
}
}
rm(demand, demands, provider, providers, discType, discTypes, optAux, targetAux)
colnames(allDiscCI) <- c("demand", "provs", "type", "upper", "mean", "lower")
|
029f16a07f222d5d73c6bd3679036cc1736513f2
|
3f6396b8716b02ae5b126dfee4c6bd03c81ccb21
|
/server.R
|
f703738d6d061b49e601eb3819d7cc17c0e51a66
|
[] |
no_license
|
qwang-big/visVCF
|
5a91f0b499bd930dd7895767faac47fdaa688eb7
|
6f4d022f2722df6939855d98b8e2daa9cb2b8a31
|
refs/heads/master
| 2020-04-11T05:04:49.574607
| 2018-12-12T20:06:44
| 2018-12-12T20:06:44
| 161,537,179
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 738
|
r
|
server.R
|
library(shiny)
library(ggplot2)
load('res.rda')
function(input, output) {
output$plot <- renderPlot({
codon=input[['tRNA']]
p <- ggplot(df[df[,1]==codon,2:4], aes(Var1, f, fill = Freq)) + geom_tile(colour = "white") +
scale_x_continuous() + scale_fill_gradient2(low="white", high="red") +
theme(axis.text.x = element_text(angle = 30, hjust = 1))+labs(x="Position",y="",title=codon)
print(p)
}, height=500)
output$plot2 <- renderPlot({
p <- ggplot(tf, aes(Var1, f, fill = Freq)) + geom_tile(colour = "white") +
scale_fill_gradient2(low="white", high="red") +
theme(axis.text.x = element_text(angle = 30, hjust = 1))+labs(x="",y="",title="tRNA frequencies")
print(p)
}, height=500)
}
|
2f4df5e741003531f1c5ced05c0b8b6b978cdc98
|
39d7de14e99f6c30e3bb4b8f38859f52433f78c3
|
/Stepik/stepik.R
|
d3b4dad8175bf0db6ed23b49d00607ce0cc0da71
|
[] |
no_license
|
petr-konovalov/R
|
f8c6fe4abd7fa77ef92ef21c5c79743c6d4b8442
|
14e729968693f6b2f3513e0eb14772cb174e33f9
|
refs/heads/master
| 2021-06-24T09:53:09.510178
| 2021-05-10T15:47:20
| 2021-05-10T15:47:20
| 224,501,137
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,001
|
r
|
stepik.R
|
mtcars$new_var <- ifelse(mtcars$carb >= 4 | mtcars$cyl > 6, 1, 0)
ts (1:12, frequency = 12, start = 1990)
v <- c(1, 3, 2, 7, 11, 5)
v <-as.vector(AirPassengers)
v[2:length(v)][v[2:length(v)] > v[1:(length(v)-1)]]
v <- c(0, cumsum(as.vector(AirPassengers)))
moving_average <- (v[-(1:10)] - v[-((length(v) - 9): length(v))]) / 10
descriptions_stat <- aggregate(cbind(mtcars$hp, mtcars$disp) ~ mtcars$am, mtcars, sd)
descriptions_stat
aggregate(Ozone ~ Month, data = airquality[7 <= airquality$Month & airquality$Month <= 9, ], length)
describeBy(airquality, airquality$Month)
describeBy(iris[,-5], iris$Species)
my_vector <- rnorm(30)
my_vector[sample(1:30, 10)] <- NA # на десять случайных позиций поместим NA
my_vector
fixed_vector <- replace(my_vector, is.na(my_vector), mean(my_vector, na.rm = T))
library(ggplot2)
data(airquality)
airquality$Month <- as.factor(airquality$Month)
ggplot(data = airquality, aes(x = Month, y = Ozone)) +
geom_boxplot()
plot1 <- ggplot(data = mtcars, aes(x = mpg, y = disp, col = hp)) +
geom_point()
ggplot(iris, aes(Sepal.Length)) + geom_histogram(aes(fill = Species))
red_men <- prop.table(HairEyeColor[,,'Male'], 2)['Red', 'Blue']
sum(HairEyeColor[,'Green','Female'])
library("ggplot2")
mydata <- as.data.frame(HairEyeColor[,,'Female'])
obj <- ggplot(data = mydata, aes(x = Hair, y = Freq, fill = Eye)) +
geom_bar(stat="identity", position = 'dodge') +
scale_fill_manual(values=c("Brown", "Blue", "Darkgrey", "Darkgreen"))
binom.test(x = 7, n = 20)
chisq.test(HairEyeColor['Brown',,'Female'])
chisq.test(diamonds$cut, diamonds$color)$statistic
diamonds$factor_price = diamonds$price >= mean(diamonds$price)
diamonds$factor_carat = diamonds$carat >= mean(diamonds$carat)
main_stat = chisq.test(diamonds$factor_price, diamonds$factor_carat)$statistic
fisher.test(mtcars$am, mtcars$vs)$p.value
df = ToothGrowth
t_stat = t.test(subset(df, supp == 'OJ' & dose == 0.5)$len, subset(df, supp == 'VC' & dose == 2)$len)$statistic
df = read.csv('/home/petr/Рабочий стол/R/Stepik/lekarstva.csv')
t.test(df$Pressure_before, df$Pressure_after, paired = T)
setwd('/home/petr/Рабочий стол/R/Stepik')
df = read.table('dataset_11504_15.txt')
bartlett.test(V1 ~ V2, df)
df = read.table('dataset_11504_16.txt')
t.test(df$V1, df$V2)
summary(aov(yield ~ N + P + K, npk))
fit <- aov(Sepal.Width ~ Species, data = iris)
summary(fit)
TukeyHSD(fit)
df <- read.csv(url("https://stepic.org/media/attachments/lesson/11505/Pillulkin.csv"))
View(df)
df$patient <- as.factor(df$patient)
summary(aov(temperature ~ pill + Error(patient/pill), data = df))
summary(aov(temperature ~ pill * doctor + Error(patient/(pill * doctor)), data = df))
#install.packages('devtools')
#require(devtools)
#install_version('Hmisc', version = "4.1-0")
#install.packages("Hmisc")
library(ggplot2)
#library(Hmisc)
obj <- ggplot(ToothGrowth, aes(x = as.factor(dose), y = len, col = supp))+
stat_summary(fun.data = mean_cl_boot, geom = 'errorbar', width = 0.1, position = position_dodge(0.2))+
stat_summary(fun.data = mean_cl_boot, geom = 'point', size = 3, position = position_dodge(0.2))+
stat_summary(fun.data = mean_cl_boot, geom = 'line', position = position_dodge(0.2))
vect = c(1, 3, NA, NA, 4, NA, 5, 6, NA)
NA.position <- function(x){
# put your code here
return(length((1:length(x))[is.na(x)]))
}
filtered.sum <- function(x){
# put your code here
return(sum(x[x > 0]))
}
outliers.rm <- function(x){
# put your code here
rg = 1.5 * IQR(x)
q = quantile(x, probs = c(0.25, 0.75))
return(x[-rg + q[1] <= x & x <= rg + q[2]])
}
filtered.cor <- function(x){
maxAbs <- 0
len <- length(x[1, ])
filt <- logical(len)
for (id in 1:len) {
if (is.numeric(x[,id])) {
filt[id] = T
}
}
for (i in (1:len)[filt]) {
for (j in (1:len)[filt]) {
if (i != j) {
curTst = as.numeric(cor.test(x[, i], x[, j])$estimate)
if (abs(curTst) > maxAbs) {
maxAbs <- curTst
}
}
}
}
return(maxAbs)
}
filtered.cor <- function(x){
tst <- corr.test(x[, sapply(x, is.numeric)])$r
diag(tst) <- 0
tst <- as.vector(tst)
return(tst[which.max(abs(tst))])
}
test_data <- read.csv("https://stepik.org/media/attachments/course/129/test_data.csv")
str(test_data)
test_data <- as.data.frame(list(col1 = c(-0.12, 0.57, -1.91, -1.02, -0.93, -1.93, -1.37, -1.4, 1.08, 1.61, 0.4, -1.35, -0.88, -1.53, 0.99, -1.62, 1.59, -1.94, 0.6, 1.08, 0.09, -1.55, -0.65, 0.34, 1.38, -0.83, 1.41, -1.41, -0.42, -1.8), col2 = c(-0.17, 0.34, -2.43, 2.06, -0.5, 0.58, 0.05, 0.65, 0.93, 0.67, 0.2, 1.21, 0.07, -2.13, 0.77, -0.08, -1.51, 0.53, 1.41, 0.08, -0.12, -0.02, 0.33, 1.29, -0.39, 0.23, -0.33, 0.55, 0.45, 1.96)))
smart_cor <- function(x) {
if (shapiro.test(x[, 1])$p.value < 0.05 | shapiro.test(x[, 2])$p.value < 0.05) {
return(as.numeric(cor.test(x[,1], x[,2], method = "spearman")$estimate))
} else {
return(as.numeric(cor.test(x[,1], x[,2])$estimate))
}
}
smart_cor(test_data)
df <- read.table('dataset_11508_12.txt')
fff <- summary(lm(V1 ~ V2, df))
fit_coef <- lm(price ~ depth, subset(ggplot2::diamonds, cut == 'Ideal' & carat == 0.46))$coefficients
regr.calc <- function(x){
oldNames <- names(x)
names(x) <- c("V1", "V2")
if (cor.test(x$V1, x$V2)$p.value < 0.05) {
x$fit <- lm(V1 ~ V2, x)$fitted.values
names(x) <- c(oldNames, 'fit')
return(x)
} else {
return("There is no sense in prediction")
}
}
ggplot(iris, aes(x = Sepal.Width, y = Petal.Width, col = Species))+
geom_point(size = 2) +
geom_smooth(method = "lm")
test_data <- read.csv("https://stepic.org/media/attachments/course/129/fill_na_test.csv")
fill_na <- function(x){
flt <- !is.na(x$y)
lReg <- lm(y ~ x_1 + x_2, x)
x$y_full <- predict(lReg, x[1:2])
x$y_full[flt] <- x$y[flt]
return(x)
}
fill_na(test_data)
model <- lm(wt ~ mpg + disp, mtcars)
df <- mtcars
df$am <- factor(df$am, labels = c('Automatic', 'Manual'))
summary(lm(mpg ~ wt * am, df))
library(ggplot2)
# сначала переведем переменную am в фактор
mtcars$am <- factor(mtcars$am)
# теперь строим график
my_plot <- ggplot(mtcars, aes(x = wt, y = mpg, col = am)) +
geom_smooth(method = 'lm')
model_full <- lm(rating ~ ., data = attitude)
model_null <- lm(rating ~ 1, data = attitude)
ideal_model <- step(object = model_null, scope = list(lower = model_null, upper = model_full), direction = 'forward')
anova(ideal_model, model_full)
calc_quality_stats <- function(df) {
df$Fail <- as.logical(df$Fail)
df$avgErr <- as.numeric(df$avgErr)
df$avgMaxErr <- as.numeric(df$avgMaxErr)
df$less5cmErrTime <- as.numeric(df$less5cmErrTime)
df$less10cmErrTime <- as.numeric(df$less10cmErrTime)
df$less15cmErrTime <- as.numeric(df$less15cmErrTime)
df$less20cmErrTime <- as.numeric(df$less20cmErrTime)
dfNFail <- subset(df, !Fail)
return(list(
length(subset(df, Fail)$Fail),
mean(dfNFail$avgErr),
mean(dfNFail$avgMaxErr),
length(subset(dfNFail, less5cmErrTime < 0)$Fail),
length(subset(dfNFail, less10cmErrTime < 0)$Fail),
length(subset(dfNFail, less15cmErrTime < 0)$Fail),
length(subset(dfNFail, less20cmErrTime < 0)$Fail),
mean(subset(dfNFail, less5cmErrTime > 0)$less5cmErrTime),
mean(subset(dfNFail, less10cmErrTime > 0)$less10cmErrTime),
mean(subset(dfNFail, less15cmErrTime > 0)$less15cmErrTime),
mean(subset(dfNFail, less20cmErrTime > 0)$less20cmErrTime)
))
}
df <- read.csv('/home/petr/Рабочий стол/python/Симуляция для IFAC/random_sinus_Phs006Amp13_walls_experiments.csv')
levels(df$Fail)
df$Fail <- as.logical(df$Fail)
length(subset(df, Fail)$Fail)
dfNFail <- subset(df, !Fail)
mean(dfNFail$avgErr)
mean(dfNFail$avgMaxErr)
length(subset(dfNFail, less5cmErrTime < 0)$Fail)
length(subset(dfNFail, less10cmErrTime < 0)$Fail)
length(subset(dfNFail, less15cmErrTime < 0)$Fail)
length(subset(dfNFail, less20cmErrTime < 0)$Fail)
mean(subset(dfNFail, less5cmErrTime > 0)$less5cmErrTime)
mean(subset(dfNFail, less10cmErrTime > 0)$less10cmErrTime)
mean(subset(dfNFail, less15cmErrTime > 0)$less15cmErrTime)
mean(subset(dfNFail, less20cmErrTime > 0)$less20cmErrTime)
c1 <- merge(list("rndsinsum_phs006amp13"), calc_quality_stats(read.csv('/home/petr/Рабочий стол/python/Симуляция для IFAC/random_sinus_Phs006Amp13_walls_experiments.csv')))
c2 <- merge(list("rndsinsum_phs003amp13"), calc_quality_stats(read.csv('/home/petr/Рабочий стол/python/Симуляция для IFAC/random_sinus_Phs003Amp13_walls_experiments.csv')))
c3 <- merge(list("zigzag"), calc_quality_stats(read.csv('/home/petr/Рабочий стол/python/Симуляция для IFAC/zig_zag_experiments.csv')))
c4 <- merge(list("streight"), calc_quality_stats(read.csv('/home/petr/Рабочий стол/python/Симуляция для IFAC/streight_line_experiments.csv')))
names(c1) <- c("scene_type", "fail_count", "avgErr", "avgMaxErr", "more5cmErrCnt", "more10cmErrCnt", "more15cmErrCnt", "more20cmErrCnt", "avg5cmErrTime", "avg10cmErrTime", "avg15cmErrTime", "avg20cmErrTime")
names(c2) <- names(c1)
names(c3) <- names(c2)
names(c4) <- names(c3)
#common_table <- data.frame("scene_type" = "undefined", "fail_count" = 0, "avgErr" = 0, "avgMaxErr" = 0, "more5cmErrCnt" = 0, "more10cmErrCnt" = 0, "more15cmErrCnt" = 0, "more20cmErrCnt" = 0, "avg5cmErrTime" = 0, "avg10cmErrTime" = 0, "avg15cmErrTime" = 0, "avg20cmErrTime" = 0)
#common_table <- data.frame("scene_type", "fail_count", "avgErr", "avgMaxErr", "more5cmErrCnt", "more10cmErrCnt", "more15cmErrCnt", "more20cmErrCnt", "avg5cmErrTime", "avg10cmErrTime", "avg15cmErrTime", "avg20cmErrTime")
common_table <- data.frame(c1)
common_table <- rbind(common_table, c2)
common_table <- rbind(common_table, c3)
common_table <- rbind(common_table, c4)
write.csv(common_table, "/home/petr/Рабочий стол/python/Симуляция для IFAC/common_table.csv")
|
55fccb1b76bd72fd475bfba5929b63417401b5ed
|
d5cb55bdfa329eae2da7c2c084fbda7580c3f0a0
|
/R/run_analyteHTO_nk1.R
|
1f450ffd585a89a885eb91df2fa42139774711d7
|
[] |
no_license
|
janihuuh/cml_stop_manu
|
bcc1a2a7b38810008e42dc3231e0fc20d064bba6
|
e11f4b677e44a9255652e143b8e76af7d6ff8bcb
|
refs/heads/master
| 2023-03-10T19:09:33.021383
| 2023-03-07T13:39:17
| 2023-03-07T13:39:17
| 282,205,741
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 940
|
r
|
run_analyteHTO_nk1.R
|
cml1_seurat <- cml_seurat_new_filt1
cml1_seurat$is_nk <- ifelse(cml1_seurat$RNA_snn_res.0.1 == 2 & grepl("NK", cml1_seurat$my.demux), "NK", "CML")
## Select only tumor cells
cells.to.keep <- cml1_seurat_tumor@meta.data %>% filter(is_nk != "NK" & !my.demux %in% c("E-NK-only", "NE-NK-only")) %>% pull(barcode)
cml1_seurat_tumor <- subset(cml1_seurat_tumor, cells = cells.to.keep)
cml1_seurat_tumor <- cml1_seurat_tumor %>% preprocessSeuratCellCycle(cells.to.use = colnames(cml1_seurat_tumor), nPCs = 14)
## Select only tumor cells
cells.to.keep <- cml1_seurat_tumor@meta.data %>% filter(is_nk == "NK") %>% pull(barcode)
cml1_seurat_nk <- subset(cml1_seurat, cells = cells.to.keep)
cml1_seurat_nk <- cml1_seurat_nk %>% preprocessSeuratCellCycle(cells.to.use = colnames(cml1_seurat_nk), nPCs = 12)
saveRDS(cml1_seurat_tumor, "results/functional/cml1_seurat_tumor.rds")
saveRDS(cml1_seurat_nk, "results/functional/cml1_seurat_nk.rds")
|
35ad32ca542d4d73d5ab0d49a526b81d5a8c44de
|
df903cf2bbd2119dc25417a24cbd3967952e6a8e
|
/L1/svc_L1_v1.R
|
38301bd04de2da248c39655b36bbffb66524cf84
|
[] |
no_license
|
bishwarup307/Santander_Customer_Satisfaction
|
7ea56b314a2265f15dca3c5e6bee4f6c89c054cf
|
566514267bd9692f6409c475d10dcb9cc22c8227
|
refs/heads/master
| 2016-09-12T12:54:51.559866
| 2016-05-18T05:37:19
| 2016-05-18T05:37:19
| 59,084,157
| 7
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,020
|
r
|
svc_L1_v1.R
|
# load required libraries
require(readr)
require(Hmisc)
require(dplyr)
require(caret)
require(LiblineaR)
require(Metrics)
require(MASS)
require(glmnet)
options(dplyr.print_max = Inf)
options(dplyr.width = Inf)
# set working directory and seed
dir <- 'F:/Kaggle/Santander/'
setwd(dir)
source('./Scripts/linear_utils__.R')
set.seed(201)
# minmax scaling of features
minMaxScale <- function(x) {
minima <- min(x, na.rm= TRUE)
maxima <- max(x, na.rm = TRUE)
p <- (x - minima)/(maxima - minima)
return(p)
}
# load the data files
train <- read.csv(paste0(dir, 'RawData/train.csv'))
test <- read.csv(paste0(dir, 'RawData/test.csv'))
fold_ids <- read.csv(paste0(dir, 'Fold5F.csv'))
# convert cv folds to data.frame
# ff <- read_csv('cvFolds.csv')
# fold_list<- list()
# num_folds <- length(unique(ff$foldIndex))
# for(ii in 1:num_folds){
# ids <- train[which(ff$foldIndex == ii),]$ID
# fold_list[[ii]] <- ids
# }
# fold_ids <- as.data.frame(fold_list)
# names(fold_ids) <- c('fold1', 'fold2', 'fold3', 'fold4', 'fold5')
# write_csv(fold_ids, paste(dir, 'fold5f.csv'))
# merge train & test
test$TARGET <- NA
alldata <- rbind(train, test)
# remove constant features
const.cols <- names(which(sapply(alldata[, -c(1, 371, 372)], function(x) length(unique(x))) == 1))
alldata <- alldata[, !names(alldata) %in% const.cols] # remove constant cols
cat('\nremoved ', length(const.cols), ' features ...')
# remove dpulicate features
features_pair <- combn(names(alldata), 2, simplify = F)
toRemove <- c()
for(pair in features_pair) {
f1 <- pair[1]
f2 <- pair[2]
if (!(f1 %in% toRemove) & !(f2 %in% toRemove)) {
if (all(alldata[[f1]] == alldata[[f2]])) {
cat(f1, "and", f2, "are equals.\n")
toRemove <- c(toRemove, f2)
}
}
}
alldata <- alldata[, !names(alldata) %in% toRemove]
# remove linearly correlated features
lin.comb <- findLinearCombos(alldata[, -c(1, 310)])
lin.comb.remove <- names(alldata[, -c(1, 310, 311)])[lin.comb$remove]
alldata <- alldata[, !names(alldata) %in% lin.comb.remove]
#
# alldata$var3[alldata$var3 == -999999] <- -1
# use ridge regression to select only important features
tr <- alldata[which(!is.na(alldata$TARGET)),]
st <- tr[, -1]
rd <- lm.ridge(TARGET ~ ., data=st, lambda=0.5)
impo <- names(which((abs(rd$coef) > quantile(abs(rd$coef), 0.5)) == TRUE))
alldata <- alldata[, c('ID', 'TARGET', impo)]
# save(alldata, file = './cleanedLinearNN.RData')
# scale the numeric features
load('./cleanedLinearNN.RData')
bins <- grep('ind', names(alldata), value = TRUE)
binaryDF <- alldata[, bins]
alldata <- alldata[, !names(alldata) %in% bins] # drop binary cols
binaryDF[binaryDF == 0] <- -1
# unq.cnt <- sapply(alldata, function(x) length(unique(x)))
# unq.cnt <- sapply(alldata[, -c(1, 2)], function(x) length(unique(x)))
# discreteCols <- names(which(unq.cnt <= 30))
# discreteDF <- alldata[, discreteCols]
# for(f in names(discreteDF)){
# discreteDF[[f]] <- as.character(discreteDF[[f]])
# }
# dmy <- dummyVars('~.', data = discreteDF, fullRank = TRUE)
# discreteDF <- data.frame(predict(dmy, discreteDF))
# denseCols <- names(which(sapply(discreteDF, sum) > 20))
# discreteDF <- discreteDF[, denseCols]
# alldata <- alldata[, !names(alldata) %in% discreteCols]
# alldata$var15 <- log1p(alldata$var15)
# alldata$var38 <- log1p(alldata$var38)
# alldata$num_var4 <- log1p(alldata$num_var4)
# alldata$num_meses_var5_ult3 <- log1p()
useCols <- setdiff(names(alldata), c('ID', 'TARGET'))
#
# corUniv <- data.frame(Feature = character(), cor = numeric())
# for (f in useCols){
# pearson <- cor(tr$TARGET, tr[[f]])
# tmp <- data.frame(Feature = f, cor = abs(pearson))
# corUniv <- rbind(corUniv, tmp)
# rm(tmp)
# }
# corUniv <- corUniv[order(-corUniv$cor),]
#
# corBiv <- data.frame(Feature1 = character(), Feature2 = character(), oper = integer(), cor = numeric(), lift = numeric())
# twoWay <- combn(useCols, 2, simplify = FALSE)
# for(pair in twoWay){
# f1 <- pair[1]
# f2 <- pair[2]
# maxCor <- max(cor(tr$TARGET, tr[[f1]]), cor(tr$TARGET, tr[[f2]]))
# a <- tr[[f1]] + tr[[f2]]
# }
tr <- alldata[which(!is.na(alldata$TARGET)),]
useDF <- alldata[,useCols]
useDF$var3[useDF$var3 == -999999] <- 2
useDF$saldo_var1[useDF$saldo_var1 < 0] <- 0
useDF$delta_imp_reemb_var17_1y3[useDF$delta_imp_reemb_var17_1y3 < 0]<- 0
useDF$saldo_medio_var17_hace2[useDF$saldo_medio_var17_hace2 < 0]<- 0
useDF$saldo_medio_var33_ult1[useDF$saldo_medio_var33_ult1 < 0] <- 0
logVars <- checkLogTrafo(useDF[1:nrow(tr),], useCols, tr$TARGET)
for(f in logVars){
useDF[[f]] <- log1p(useDF[[f]])
}
# find the two way interactions that improves correlation with
# target
# twoWay <- combn(useCols, 2, simplify = F)
# interactions <- checkCorTwoWay(useDF[1:nrow(tr),], twoWay, tr$TARGET)
# interactions <- interactions[order(-interactions$improve),]
#write_csv(interactions, './twowayLinear.csv')
inters <- read_csv('./twowayLinear.csv')
useDF$var15_num_var4 <- useDF$var15 - useDF$num_var4
useDF$var15_num_var42 <- useDF$var15 - useDF$num_var42
useDF$var15_num_meses_var5_ult3 <- useDF$var15 - useDF$num_meses_var5_ult3
useDF$var15_num_var5 <- useDF$var15 - useDF$num_var5
useDF$num_var4_num_var8_0 <- useDF$num_var4 - useDF$num_var8_0
useDF$num_var22_ult1_num_op_var41_efect_ult3 <- useDF$num_var22_ult1 * useDF$num_op_var41_efect_ult3
useDF$num_var5_0_num_var39_0 <- useDF$num_var5_0 * useDF$num_var39_0
useDF$num_var5_0_num_var41_0 <- useDF$num_var5_0 * useDF$num_var41_0
useDF$num_var4_num_var5_0 <- useDF$num_var4 - useDF$num_var5_0
useDF$num_var4_num_meses_var8_ult3 <- useDF$num_var4 - useDF$num_meses_var8_ult3
useDF$num_var5_0_num_var12 <- useDF$num_var5_0 + useDF$num_var12
useDF$imp_op_var39_comer_ult3_num_med_var22_ult3 <- useDF$imp_op_var39_comer_ult3 * useDF$num_med_var22_ult3
useDF$var15_num_meses_var13_corto_ult3 <- useDF$var15 - useDF$num_meses_var13_corto_ult3
useDF$num_var8_0_var38 <- useDF$num_var8_0 - useDF$var38
useDF$num_var5_0_num_var12_0 <- useDF$num_var5_0 + useDF$num_var12_0
ap <- useDF[1:nrow(tr),]
ap <- cbind(tr$TARGET, ap)
names(ap)[1] <- 'TARGET'
rd <- lm.ridge(tr$TARGET ~ ., data=ap, lambda=0.4)
impo <- names(which((abs(rd$coef) > quantile(abs(rd$coef), 0.5)) == TRUE))
useDF <- useDF[, impo]
# excludeCols <- c('saldo_var5', 'delta_imp_aport_var13_1y3', 'saldo_medio_var8_ult1')
# logDF <- useDF[, !names(useDF) %in% excludeCols]
# logDF <- as.data.frame(log1p(logDF)^0.6)
# useDF <- useDF[, !names(useDF) %in% names(logDF)]
# useDF <- cbind(useDF, logDF)
useDF <- data.frame(scale(useDF))
#useDF<- data.frame(sapply(useDF, minMaxScale)) # apply minmax scalar
alldata <- alldata[, !names(alldata) %in% useCols]
alldata <- cbind(alldata, binaryDF)
alldata <- cbind(alldata, useDF)
#
# useDF <- log1p(useDF)
# alldata <- alldata[, !names(alldata) %in% useCols]
# alldata <- cbind(alldata, useDF)
# split train & test
tr <- alldata[which(!is.na(alldata$TARGET)),]
te <- alldata[which(is.na(alldata$TARGET)),]
#
gcv <- cv.glmnet(x = as.matrix(tr[, feature.names]),
y = tr$TARGET,
type.measure = 'auc',
family = 'binomial',
nfolds = 5)
coeffs <- as.matrix(coef(gcv, s = 'lambda.1se'))
selected <- rownames(coeffs)[abs(coeffs[,1]) > 0][-1]
# train and test meta feature containers
evalMatrix <- data.frame(ID = numeric(), svc1_preds = numeric())
testMatrix <- data.frame(ID = te$ID)
# features to use in the model
# feature.names <- names(tr)[!names(tr) %in% c("ID", "TARGET")]
feature.names <- chosenVars
for(i in 1:ncol(fold_ids)) {
cat("\n---------------------------")
cat("\n------- Fold: ", i, "----------")
cat("\n---------------------------\n")
cname <- paste("Fold_", i)
idx <- fold_ids[[i]]
idx <- idx[!is.na(idx)]
trainingSet <- tr[!tr$ID %in% idx,]
validationSet <- tr[tr$ID %in% idx,]
cat("\nnrow train: ", nrow(trainingSet))
cat("\nnrow eval: ", nrow(validationSet), "\n")
frm <- as.formula(paste0('TARGET ~ ', paste(feature.names, collapse = ' + ')))
lr <- glm(frm, family = 'binomial',
data = trainingSet)
p <- as.numeric(predict(lr, validationSet[, feature.names], type = 'response'))
auc(validationSet$TARGET, p)
#
svp <- LiblineaR(as.matrix(trainingSet[, feature.names]), trainingSet$TARGET, type = 0, cost = 3, epsilon = 1e-7, verbose = TRUE)
preds <- as.numeric(predict(svp, newx = as.matrix(validationSet[, feature.names]), proba = TRUE)$probabilities[, 2])
AUC <- auc(validationSet$TARGET, preds)
cat('\nAUC: ', AUC)
enet <- glmnet(x = as.matrix(trainingSet[, feature.names]),
y = trainingSet$TARGET,
alpha = 0.8,
family = 'binomial',
lambda.min.ratio = 0.03,
standardize = FALSE)
preds <- as.numeric(predict(enet, newx = as.matrix(validationSet[,feature.names]), type = "response", s = 1e-5))
auc(validationSet$TARGET, preds)
valid <- data.frame(ID = validationSet$ID, svc1_preds = preds)
evalMatrix <- rbind(evalMatrix, valid)
gc()
}
|
b08b1c48659d389f1ee716f199973b6fafe41d76
|
8c5db57fc672558a9f95a798f7a79daa3c58f0ad
|
/tewari/tewari_equimolar/r_code/03_other_tools.R
|
cba6348602bce9c5833a25b5799859bf7ec781b3
|
[] |
no_license
|
miRTop/incubator
|
7c24fd271913b07588c9accccfbcf1d70713b14a
|
916eb164554f8b287747ce3d593a3d58dc5ee2ee
|
refs/heads/master
| 2021-01-19T03:38:06.463354
| 2019-04-25T21:25:20
| 2019-04-25T21:25:20
| 45,271,759
| 6
| 4
| null | 2018-09-19T16:31:23
| 2015-10-30T19:17:11
|
R
|
UTF-8
|
R
| false
| false
| 1,920
|
r
|
03_other_tools.R
|
library(tidyverse)
library(ggplot2)
library(pheatmap)
theme_set(
theme_light(base_size = 14L))
theme_update(
legend.justification = "center",
legend.position = "bottom")
prepare = . %>% filter(ref_is_1 == 1) %>%
dplyr::count(protocol, sample, pct_cat, iso) %>%
group_by(protocol) %>%
mutate(sample2 = paste0(protocol, "_", 1:length(unique(sample)))) %>%
group_by(sample2, protocol) %>%
mutate(pct_total = n/sum(n)*100,
iso = ifelse(grepl("snp ", iso), "snp + other", iso),
iso = ifelse(grepl("add3p ", iso), "add3p + other", iso))
bind_rows(
equimolar_razer3 %>%
filter(pct > 1) %>%
prepare() %>%
filter(iso %in% c("shift5p", "shift3p", "snp")) %>%
mutate(tool = "razer3"),
equimolar_mirge %>%
mutate(protocol = ifelse(grepl("NEBN", protocol), "neb", protocol),
protocol = ifelse(grepl("Tru", protocol), "tru", protocol),
protocol = ifelse(grepl("4N", protocol), "x4n", protocol),
protocol = ifelse(grepl("Clean", protocol), "clean", protocol)) %>%
filter(pct > 1) %>%
prepare() %>%
filter(iso %in% c("shift5p", "shift3p", "snp")) %>%
mutate(tool = "mirGe"),
equimolar %>%
filter(pct > 1) %>%
prepare() %>%
filter(iso %in% c("shift5p", "shift3p", "snp")) %>%
mutate(tool = "bcbio")
) %>%
ggplot(aes(x = protocol, y = pct_total, color = pct_cat)) +
geom_boxplot(outlier.color = NA) +
facet_grid(tool~iso) +
theme(axis.text.x = element_text(angle = 90, hjust = 1, vjust = 0.5),
strip.text = element_text(size = 14, color = "black")) +
ylab("PCT") +
scale_color_manual("IMPORTANCE",
values = RColorBrewer::brewer.pal(7, "Dark2")[3:7]) +
ggsave("results/03_other_tools/03_other_tools_pct_g1.pdf", height = 9)
|
0f21c861da460f0abd36c09c7ab9cea29752f020
|
ceb95b1483ba7a41a73f1d04df8fdfd40d502d23
|
/test_for_nomality_and_transform.R
|
322ca3499b0109cc5a6b94b64a136911de9766f4
|
[] |
no_license
|
RustyBrain/phds_assignment
|
b5ea4c7cb91e39352f7abb9a2ba49ac10bc01402
|
104b5227d07def3de0ac2fa31087e447b0bb2a9c
|
refs/heads/master
| 2020-05-03T11:48:06.903818
| 2019-04-29T09:44:00
| 2019-04-29T09:44:00
| 178,609,310
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,211
|
r
|
test_for_nomality_and_transform.R
|
library(fingertipsR)
library(dplyr)
library(tibble)
library(tidyr)
library(mi)
library(car)
library(ggplot2)
library(ggcorrplot)
library(caret)
library(psycho)
# Get data
df <- fingertips_data(ProfileID = 19, rank = TRUE)
df <- social_determinants_df
# Create a data frame of indicator polarities
polarities <- df %>%
select(IndicatorID, Polarity) %>%
distinct() %>%
spread(key = IndicatorID, value = Polarity)
# Select as most recent
most_recent_data <- df %>%
group_by(IndicatorID) %>%
filter(TimeperiodSortable == max(TimeperiodSortable)) %>%
ungroup()
# Select County & UA
most_recent_data <- most_recent_data[grep('County', most_recent_data$AreaType), ]
# Transpose to wide data
wide_df <- most_recent_data %>%
select(AreaCode, IndicatorID, Value) %>%
tibble::rowid_to_column() %>%
group_by(IndicatorID, AreaCode) %>%
spread(key = IndicatorID, value = Value) %>%
summarise_all(funs(na.omit(.)[1]))
rownames(wide_df) <- wide_df$AreaCode
wide_df <- wide_df %>%
select(-rowid) # Remove row id column
# Remove areas with over 50% missing data - areas with this are unlikely to add any information as their values are more imputed than not
wide_df$proportna <- apply(wide_df, 1, function(x) sum(is.na(x)) / dim(wide_df)[2])
wide_df <- wide_df[wide_df$proportna < 0.5,]
wide_df <- wide_df %>% select(-proportna)
# Remove indicators with over 80% missing data - rather arbatrary number at the moment
wide_df <- wide_df[colSums(is.na(wide_df)) / dim(wide_df)[1] < 0.2]
# impute missing data - using a Bayesian framework, with 30 inerations and 4 chains. A random seed was chosen to ensure reproducability. The bootstrap was envoked as a method of selecting random imputations.
# http://www.stat.columbia.edu/~gelman/research/published/mipaper.pdf
to_impute <- wide_df
rownames(to_impute) <- to_impute$AreaCode
to_impute <- to_impute %>% select(-AreaCode)
imputed <- mi(as.data.frame(to_impute), seed = 225)
summary(imputed)
image(imputed) # Show heatmap of imputed values in heatmap
imputed_df <- mi::complete(imputed, m = 1) # Retrieve the dataframe
imputed_df <- select(imputed_df, -contains("missing")) # Remove the boolean 'missing' columns.
# Test normality - using shapiro-wilks with an alpha of 0.001. Alpha chosen as Central Limit Theorum would suggest that with n=150, data should tend towards being normally distributed.
# These tests are to catch extremes where normality is violated and to transform them to normalise using square root, log, and box-cox transformations in turn. The box-cox transformation
# used was the Yeo-Johnson variation (yjPower()) as some of the data contains 0 values.
# Yeo, In-Kwon and Johnson, Richard (2000) A new family of power transformations to improve normality or symmetry. Biometrika, 87, 954-959.
test_normaility_and_transform <- function(column_data){
norm_test <- shapiro.test(column_data)
print(norm_test)
if (norm_test$p.value < 0.001) {
norm_test_sr <- shapiro.test(sqrt(column_data))
if (norm_test_sr$p.value < 0.001) {
column_data[column_data == 0] <- 0.00001
if (is.numeric(column_data == FALSE)){
print(column_data)
}
norm_test_lg <- shapiro.test(log(column_data))
if (norm_test_lg$p.value < 0.001) {
norm_test_bc <- shapiro.test(yjPower(column_data, lambda = 1))
print(norm_test_bc)
if (norm_test_bc$p.value < 0.001) {
return(NA)
}
else { return(yjPower(column_data, lambda = 1))} }
else {
return(log(column_data))
}
}
else {
return(sqrt(column_data))
}
}
else {
return(column_data)
}
}
normality <- imputed_df %>% summarise_all(.funs = funs(statistic = shapiro.test(.)$statistic, p.value = shapiro.test(.)$p.value))
# Normalise data.
# normalised <- data.frame(imputed_df, lapply(imputed_df, test_normaility_and_transform))
#
# #Some transformed data contains NA values, and therefore these values were imputed using the same method as above.
# norm_reduced <- select(normalised, contains(".1"))
# norm_reduced <- norm_reduced[!sapply(norm_reduced, function(x) all(is.na(x)))]
# norm_reduced <- mi(norm_reduced, seed = 225)
# summary(norm_reduced)
# image(norm_reduced)
# norm_reduced <- mi::complete(norm_reduced, m = 1)
# norm_reduced <- select(norm_reduced, -contains("missing"))
norm_reduced <- imputed_df
# Test colinearity - needs a lot of work!
corr <- cor(norm_reduced)
ggcorrplot(corr, hc.order = TRUE, type = "lower",
outline.col = "white",
ggtheme = ggplot2::theme_gray,
colors = c("#6D9EC1", "white", "#E46726"),
insig = "blank")
hc <- findCorrelation(corr)
hc <- sort(hc)
reduced_data <- norm_reduced[, -c(hc)]
corr_reduced <- cor(reduced_data)
ggcorrplot(corr_reduced, hc.order = TRUE, type = "lower",
outline.col = "white",
ggtheme = ggplot2::theme_gray,
colors = c("#6D9EC1", "white", "#E46726"),
insig = "blank")
# Polarity check and inversion. If 'Low is good', then the scores are inverted to ensure the same direction of performance in the data.
polarity_check <- function(col_name){
if (grepl(polarities[[col_name]], "RAG - Low is good")) {
z_data[[col_name]] <- -z_data[[col_name]]
} else {
z_data[[col_name]] <- z_data[[col_name]]
}
}
# Create Z scores using psycho package's standardize() function.
# Makowski, (2018). The psycho Package: an Efficient and Publishing-Oriented Workflow for Psychological Science. Journal of Open Source Software, 3(22), 470. https://doi.org/10.21105/joss.00470
z_data <- reduced_data %>% standardize()
names(z_data) <- substring(names(z_data), 2)
z_data <- z_data %>%
rename_at(.vars = vars(ends_with(".1")),
.funs = funs(sub("[.]1$", "", .)))
polarities <- polarities[, colnames(z_data)]
# Create a dataframe of z-scores, create mean score & add area codes on.
z_data <- data.frame(z_data, lapply(colnames(z_data), polarity_check))
z_data <- z_data[, -c(1:ncol(polarities))]
colnames(z_data) <- colnames(polarities)
z_data$mean <- apply(z_data, 1, mean)
z_data$AreaCode <- wide_df$AreaCode
z_mean <- z_data %>% select(mean, AreaCode)
|
c9941b4a84cd77ad0693280fd3273c2ba488e5cc
|
599f6b6abf1cab72648c6a0589a6d7057a701551
|
/Scripts/8_BSP_Plot.r
|
200031fab69c5ee88c536aa54bfeda12da018df5
|
[] |
no_license
|
StopTB/China_TB_Evolutionary_History
|
ebd3af2a515e8b396179ebc4ef8067dae8737eaf
|
f12be9221710c2ad7768ee55285728c400baf133
|
refs/heads/master
| 2021-09-05T00:41:21.138523
| 2018-01-23T05:44:29
| 2018-01-23T05:44:29
| 117,248,878
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,514
|
r
|
8_BSP_Plot.r
|
library(ggplot2)
MTBC <- read.table("data.txt", header = T, fill = T )
options(max.print=1000000)
ggplot(data=MTBC) +
geom_line(aes(x=y5, y=l5),color="Magenta", linetype="dotted", alpha=0.5) +
geom_line(aes(x=y5, y=u5),color="Magenta", linetype="dotted", alpha=0.5) +
geom_line(aes(x=y5, y=m5), size=0.5, color="Magenta") +
geom_line(aes(x=y4, y=l4),color="orange", linetype="dotted", alpha=0.5) +
geom_line(aes(x=y4, y=u4),color="orange", linetype="dotted", alpha=0.5) +
geom_line(aes(x=y4, y=m4), size=0.5, color="orange") +
geom_line(aes(x=y3, y=l3),color="BlueViolet", linetype="dotted", alpha=0.5) +
geom_line(aes(x=y3, y=u3),color="BlueViolet", linetype="dotted", alpha=0.5) +
geom_line(aes(x=y3, y=m3), size=0.5, color="BlueViolet") +
geom_line(aes(x=y1, y=l1),color="DodgerBlue", linetype="dotted", alpha=0.5) +
geom_line(aes(x=y1, y=u1),color="DodgerBlue", linetype="dotted", alpha=0.5) +
geom_line(aes(x=y1, y=m1), size=0.5, color="DodgerBlue") +
geom_line(aes(x=y2, y=l2),color="Navy", linetype="dotted", alpha=0.5) +
geom_line(aes(x=y2, y=u2),color="Navy", linetype="dotted", alpha=0.5) +
geom_line(aes(x=y2, y=m2), size=0.5, color="Navy") +
scale_x_continuous(name="Year",limits=c(800, 2010), breaks=c(seq(800,2010, by=200))) +
scale_y_log10(name="Effective Population Size", limits=c(100, 1000000), breaks=c(1E+02, 1E+03, 1E+04, 1e+05, 1E+06, 1E+07) )+
theme_bw() +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank())
|
1c3af1f162dea6cbcdc39a46646179e4ce758d84
|
f7408683a4b9f3ea36e6c56588f257eba9761e12
|
/R/PEER.Sim-data.R
|
5ed033280eb8b74ed53d015cb674c20c56d099d5
|
[] |
no_license
|
refunders/refund
|
a12ad139bc56f4c637ec142f07a78657727cc367
|
93cb2e44106f794491c7008970760efbfc8a744f
|
refs/heads/master
| 2023-07-21T21:00:06.028918
| 2023-07-17T20:52:08
| 2023-07-17T20:52:08
| 30,697,953
| 42
| 22
| null | 2023-06-27T15:17:47
| 2015-02-12T10:41:27
|
R
|
UTF-8
|
R
| false
| false
| 1,362
|
r
|
PEER.Sim-data.R
|
##' Simulated longitudinal data with functional predictor and scalar response,
##' and structural information associated with predictor function
##'
##' \code{PEER.Sim} contains simulated observations from 100 subjects, each
##' observed at 4 distinct timepoints. At each timepoint bumpy predictor
##' profile is generated randomly and the scalar response variable is generated
##' considering a time-varying regression function and subject intercept.
##' Accompanying the functional predictor and scalar response are the subject
##' ID numbers and time of measurements.
##'
##' \code{Q} represents the 7 x 100 matrix where each row provides structural
##' information about the functional predictor profile for data
##' \code{PEER.Sim}. For specific details about the simulation and Q matrix,
##' please refer to Kundu et. al. (2012).
##'
##'
##' @name PEER.Sim
##' @aliases PEER.Sim Q
##' @docType data
##' @format The data frame \code{PEER.Sim} is made up of subject ID
##' number(\code{id}), subject-specific time of measurement (\code{t}),
##' functional predictor profile (\code{W.1-W.100}) and scalar response
##' (\code{Y})
##' @references Kundu, M. G., Harezlak, J., and Randolph, T. W. (2012).
##' Longitudinal functional models with structured penalties. (please contact
##' J. Harezlak at \email{harezlak@@iupui.edu})
NULL
|
d86219aebd8d2566e69750ae2841ca223ffc4282
|
bae0c518f1e2c8cec2eff5b2d2c64e39427543b8
|
/kubobook_2012/binomial/COMMON.R
|
66a1b5100cf622caac090ed62a1bff969d6f5b8a
|
[] |
no_license
|
yoshiki146/Stat_Modelling_for_Data_Analysis
|
9844e4424731d5f7584b1d445f555b892df48e35
|
8e7383f42b0f0af03f781618501f144264a4929d
|
refs/heads/master
| 2020-03-19T09:25:30.982914
| 2018-06-12T11:46:46
| 2018-06-12T11:46:46
| 136,287,546
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 94
|
r
|
COMMON.R
|
source("../COMMON.R")
d <- read.csv("data4a.csv")
width <- 3.5 # inch
height <- 2.8 # inch
|
b627e90e7f127c56a5fed6cad87c9ed092f33477
|
13633e7b14681e43366ca86b7b00e22ab6a72aac
|
/R/matrix.R
|
9dcc33ffe92cfdf9b9d5fdcfc5dddc874fea505b
|
[] |
no_license
|
thaisssimoes/cursoML
|
aca7543a57b9f3bfd97afc13e539ff3e8596bde7
|
0920fc8323b1772c5990af1996c3850306aa7ba6
|
refs/heads/master
| 2022-02-19T07:46:15.973519
| 2019-08-10T19:56:38
| 2019-08-10T19:56:38
| 197,964,338
| 0
| 0
| null | null | null | null |
ISO-8859-1
|
R
| false
| false
| 1,037
|
r
|
matrix.R
|
#Matrizs
#criação de matriz
m1 <- matrix(1:6, nc=3)
m1
m2 <- matrix( 10*m1, nc=3)
m2
#Operação com matrizes
m1 + m2
m1 - m2
m1 / m2
m1 * m2
m1 ** m2
#Transposta da matriz
t(m1)
#criar matriz preenhendo pela linha
m1 <- matrix(1:12, nc =3, byrow=T)
m1
#Tamanho (quantidade de elementos)
length(m1)
#Dimensão
dim(m1)
#Linhas
nrow(m1)
#Colunas
ncol(m1)
#Elementos na posição
m1[1,2]
#Elementos na posição da linha 3
m1[3, ]
#nome nas dimensões
dimnames(m1)
dimnames(m1) <- list(c("l1", 'l2', 'l3', 'l4'),c('c1', 'c2', 'c3'))
m1
m1["l1",]
#combinação de linhas
#criação de matriz
m2 <- cbind(1:5, 6:10)
m2
m3 <- cbind (1:5, 3)
m3
#funções de tabelas
#Soma -> margin -> linhas (margin = 1) e colunas (margin = 2)
margin.table(m1, margin=1)
margin.table(m1, margin=2)
m1
#apply - matriz, dimensão, função matemática
apply(m1, 1, max)
apply(m1, 2, max)
#Somatório das colunas
colSums(m1)
|
d21adb1a5c5b609d136ce5d0624fb811673b315a
|
d6670a701d4dc07a921cff8c94331677fdde3804
|
/man/print.summary.dcalasso.Rd
|
468f43bc8e209e704f2e17f9afd4369888f54bd0
|
[] |
no_license
|
wujing121/dcalasso-1
|
b72a957aef80443d604face553dd5a9229991f2e
|
d41c991325b4f18857d3114660af18b78c436382
|
refs/heads/master
| 2023-03-18T13:58:11.859155
| 2021-02-23T07:21:26
| 2021-02-23T07:21:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 441
|
rd
|
print.summary.dcalasso.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/summarymethods.R
\name{print.summary.dcalasso}
\alias{print.summary.dcalasso}
\title{Print summary for dcalasso objects}
\usage{
\method{print}{summary.dcalasso}(x, ...)
}
\arguments{
\item{x}{a summary.dcalasso object}
\item{...}{...}
}
\description{
\code{print.summary.dcalasso} summarizes output of dcalasso fit
}
\author{
Yan Wang, Tianxi Cai, Chuan Hong
}
|
0867c624967e399ac8f8ed9456d0e370d998cff6
|
bf6e6d9a51776287e03c531d122e83287617cdfb
|
/labsimplex.Rcheck/00_pkg_src/labsimplex/R/print.smplx.R
|
760aab167f9c3b22f34b1014370c9304832d5997
|
[] |
no_license
|
Crparedes/labsimplex
|
2ec465947373e7fffabeaf469354e61eaef1e07c
|
8228dd1ac6adfe9a4da42c59bb3c2b9ba1c63046
|
refs/heads/master
| 2021-09-27T03:48:09.634111
| 2021-09-22T14:22:18
| 2021-09-22T14:22:18
| 179,175,356
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,407
|
r
|
print.smplx.R
|
#' Prints given simplex (a \code{smplx} class object)
#'
#' Prints given simplex information.
#'
#' @param x simplex object to be printed
#' @param extended logical, if \code{TRUE}, the object is printed as a
#' list containing all hiden elements
#' @param conventions logical, if \code{TRUE} (default), the conventions
#' used are printed
#' @param ... other arguments passed to print
#'
#' @method print smplx
#' @author Cristhian Paredes, \email{craparedesca@@unal.edu.co}
#' @author Jesús Ágreda, \email{jagreda@@unal.edu.co}
#' @export
# S3 method for smplx class object
print.smplx <- function(x, extended = FALSE, conventions = TRUE, ...){
if (extended) {
class(x) <- "list"
print(x)
} else {
if (is.null(x$coords)) {
x$coords <- matrix()
}
NvertexSim <- x$dim + 1
NvertexTot <- nrow(x$coords)
if (x$P.eval) {
FvertexAct <- NvertexTot - NvertexSim
} else {
FvertexAct <- NvertexTot - NvertexSim + 1
}
lab <- shape(x = x$vertex.label, simplex = x)
QF <- shape(x = x$qual.fun, simplex = x)
nat <- shape(x = x$vertex.nat, simplex = x)
row.names(x$coords) <- paste0(row.names(x$coords), ":")
cat("\nCurrent simplex:\n")
print(data.frame(x$coords[NvertexTot:FvertexAct, ], . = "|",
Response = QF[NvertexTot:FvertexAct],
Label = lab[NvertexTot:FvertexAct],
Nature = nat[NvertexTot:FvertexAct]))
if (FvertexAct > 1) {
cat("\nHistorical Vertices:\n")
M <- x$coords[(FvertexAct - 1):1, ]
if (FvertexAct == 2) {
M <- t(M)
rownames(M) <- rownames(x$coords)[1]
}
print(data.frame(M, . = "|",
Response = QF[(FvertexAct - 1):1],
Label = lab[(FvertexAct - 1):1],
Nature = nat[(FvertexAct - 1):1]))
}
if (conventions) {
cat("\nConventions:\n")
cat(" Labels: Nature:
W: Worst or Wastebasket S: Starting
N: Next to the worst R: Reflected
B: Best E: Expanded
Cr: Contraction on the reflection side
D: Disregarded Cw: Contraction on the worst side
Use print(..., conventions = FALSE) to disable conventions printing. \n")
}
}
}
|
04f3352e91dfc46e0dce4d06b33413aa2e776389
|
cf374f665ea9e683e441001c509a902ecf8399cc
|
/R/test.R
|
20a39cffe1a78931744a946681f8ae2b11ded647
|
[] |
no_license
|
kpagacz/superlintertest
|
a4cede995d85e0dd1a511a4f87191726b00c534a
|
ad14114a38c215d559fee6bdc0612857a418499a
|
refs/heads/main
| 2023-07-17T06:28:17.576184
| 2021-08-31T05:28:58
| 2021-08-31T05:28:58
| 401,570,297
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 84
|
r
|
test.R
|
#' test
#' @import shiny
#'
test_function <- function() {
ns <- NS
ns("test")
}
|
cd537a89f30a9acd7ae920cd35220a8122ff07c4
|
3bfcfbb1ce30e01e2bee4f9d611388f7860be455
|
/prediction_model.R
|
3fb1cf05d76a6ba45077c00f369148a693174c27
|
[] |
no_license
|
JaymonV/DC3-G7
|
9cfaa1b7d0d139f248aa026ec7143fb1d3e7c423
|
c5e5d1690c341b14b1efe8d04b1dc47d6b19232d
|
refs/heads/master
| 2020-07-24T19:13:46.951485
| 2019-11-08T19:55:39
| 2019-11-08T19:55:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,802
|
r
|
prediction_model.R
|
if(!require(rstudioapi))install.packages("rstudioapi")
library(rstudioapi)
if(!require(forecast))install.packages("forecast")
library(forecast)
if(!require(lmtest))install.packages("lmtest")
library(lmtest)
library(ggplot2)
setwd(dirname(rstudioapi::getActiveDocumentContext()$path))
###############################################################
############################# NEW MODEL #######################
###############################################################
timeslot = 48
df <- read.csv('flow_level_and_rain.csv')
unique(df[,'pump_station'])
# Select model
df <- df[df['pump_station'] == 'helftheuvelweg' & df['measurement_type'] == 'flow',]
df[is.na(df[,'rainfall_volume']),'rainfall_volume'] <- 0
rainfall <- df[,'rainfall_volume']
flow_value <- df[,'mean_value']
df[,'datetime'] <- as.Date(df[,'datetime'])
df_ts <- ts(df[,'mean_value'],
start=min(df[,'datetime']))
df_ts_train <- window(df_ts, end=(31930-timeslot))
df_ts_test <- window(df_ts, start=(31931-timeslot))
rainfall_train <- c(0, rainfall[1:(14398-timeslot)])
rainfall_test <- rainfall[(14399-timeslot):14398]
lags <- rep(0, 175)
lags[167:175] <- NA
lags[24] <- NA
length(lags)
length(df_ts_train)
length(rainfall_train)
model <- Arima(df_ts_train, order=c(169, 1, 1), xreg=rainfall_train, method='CSS')
df_forecast <- forecast(model, df_ts_test, xreg=rainfall_test)
summary(df_forecast)
coeftest(model)
df[,'mean_value'] %>%
ts(frequency = 24)%>%
stlf() %>%
autoplot() +
xlim(595, 605)
df[,'mean_value'] %>%
ts(frequency = 24)%>%
mstl() %>%
autoplot() +
xlim(595, 605)
rain_df <- as.data.frame(list(rain=rainfall_test, Time=(31931-timeslot):31930))
acf(diff(df_ts,1))
df_aacf <- pacf(df_ts, lag=169)
autoplot(df_forecast) +
xlim(32000-168, 31930)
plot(df_forecast)
|
d16bc2022ff0af01b270db680e17abdda261e323
|
08c566c7906434c99bb4f6bb6b649023807e7437
|
/sentiment_bar_graph.R
|
b92a07962bec29ae2938206b9461a41ef31b0ad8
|
[] |
no_license
|
shivi1019/NLP-Algorithms-Python
|
02c47a15ed16059db54124441aae91147e28487b
|
72eaf728f056551e348a1e3fd7c0d6b8dd752d4b
|
refs/heads/master
| 2023-06-07T14:42:33.757073
| 2021-06-28T10:18:39
| 2021-06-28T10:18:39
| 288,165,943
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,827
|
r
|
sentiment_bar_graph.R
|
#### Loading library ####
require(tidyverse)
require(tidytext)
require(RColorBrewer)
require(gplots)
theme_set(theme_bw(12))
library(textdata)
library(reshape2)
# Reading dataset
setwd("~/Project1-WFM/Project5 - NLP/Emotion Tagging/")
data <- read.csv("emotion_data.csv", stringsAsFactors = F)
#### Step 1 - emotion tagging ####
#counting total number of words
# total_words_count <- data %>%
# unnest_tokens(word, email_new_1_POS_filt) %>%
# anti_join(stop_words, by = "word") %>%
# filter(!grepl('[0-9]', word)) %>%
# left_join(get_sentiments("nrc"), by = "word") %>%
# filter(!(sentiment == "negative" | sentiment == "positive" | sentiment == "NA") &
# !(word %in% colors()) & !(word == "shopping" )) %>%
# group_by() %>%
# summarize(total= n()) %>%
# ungroup()
#
#
# #counting total number of words
# total_words_count2 <- data %>%
# unnest_tokens(word, email_new_2_POS_filt) %>%
# anti_join(stop_words, by = "word") %>%
# filter(!grepl('[0-9]', word)) %>%
# left_join(get_sentiments("nrc"), by = "word") %>%
# filter(!(sentiment == "negative" | sentiment == "positive" | sentiment == "NA") &
# !(word %in% colors()) & !(word == "shopping" )) %>%
# group_by() %>%
# summarize(total= n()) %>%
# ungroup()
#
#
# #counting total number of words
# total_words_count3 <- data %>%
# unnest_tokens(word, email_new_3_POS_filt) %>%
# anti_join(stop_words, by = "word") %>%
# filter(!grepl('[0-9]', word)) %>%
# left_join(get_sentiments("nrc"), by = "word") %>%
# filter(!(sentiment == "negative" | sentiment == "positive" | sentiment == "NA") &
# !(word %in% colors()) & !(word == "shopping" )) %>%
# group_by() %>%
# summarize(total= n()) %>%
# ungroup()
#
#
# #counting total number of words
# total_words_count4 <- data %>%
# unnest_tokens(word, email_new_4_POS_filt) %>%
# anti_join(stop_words, by = "word") %>%
# filter(!grepl('[0-9]', word)) %>%
# left_join(get_sentiments("nrc"), by = "word") %>%
# filter(!(sentiment == "negative" | sentiment == "positive" | sentiment == "NA") &
# !(word %in% colors()) & !(word == "shopping" )) %>%
# group_by() %>%
# summarize(total= n()) %>%
# ungroup()
#
# #counting total number of words
# total_words_count5 <- data %>%
# unnest_tokens(word, email_new_5_POS_filt) %>%
# anti_join(stop_words, by = "word") %>%
# filter(!grepl('[0-9]', word)) %>%
# left_join(get_sentiments("nrc"), by = "word") %>%
# filter(!(sentiment == "negative" | sentiment == "positive" | sentiment == "NA") &
# !(word %in% colors()) & !(word == "shopping" )) %>%
# group_by() %>%
# summarize(total= n()) %>%
# ungroup()
################## PROMOTER ###################################
promoter <- data %>% filter(csat_flag == 0)
covid_terms <- c("serve","shortly","inform","assist","unable","assured","avoid","shopping","love")
# email 1
emotion_words_count <- promoter %>%
unnest_tokens( word,email_new_1_POS_filt, drop = FALSE, collapse = T) %>%
anti_join(stop_words, by = "word") %>%
filter(!grepl('[0-9]', word)) %>%
left_join(get_sentiments("nrc"), by = "word") %>%
filter(!(sentiment == "negative" | sentiment == "positive" | sentiment == "NA") &
!(word %in% colors()) & !(word %in% covid_terms )) %>%
group_by(sentiment) %>%
summarize(total= n()) %>%
ungroup()
# email 2
emotion_words_count2 <- promoter %>%
unnest_tokens( word,email_new_2_POS_filt, drop = FALSE, collapse = T) %>%
anti_join(stop_words, by = "word") %>%
filter(!grepl('[0-9]', word)) %>%
left_join(get_sentiments("nrc"), by = "word") %>%
filter(!(sentiment == "negative" | sentiment == "positive" | sentiment == "NA") &
!(word %in% colors()) & !(word %in% covid_terms )) %>%
group_by(sentiment) %>%
summarize(total= n()) %>%
ungroup()
# email 3
emotion_words_count3 <- promoter %>%
unnest_tokens( word,email_new_3_POS_filt, drop = FALSE, collapse = T) %>%
anti_join(stop_words, by = "word") %>%
filter(!grepl('[0-9]', word)) %>%
left_join(get_sentiments("nrc"), by = "word") %>%
filter(!(sentiment == "negative" | sentiment == "positive" | sentiment == "NA") &
!(word %in% colors()) & !(word %in% covid_terms )) %>%
group_by(sentiment) %>%
summarize(total= n()) %>%
ungroup()
# email 4
emotion_words_count4 <- promoter %>%
unnest_tokens( word,email_new_4_POS_filt, drop = FALSE, collapse = T) %>%
anti_join(stop_words, by = "word") %>%
filter(!grepl('[0-9]', word)) %>%
left_join(get_sentiments("nrc"), by = "word") %>%
filter(!(sentiment == "negative" | sentiment == "positive" | sentiment == "NA") &
!(word %in% colors()) & !(word %in% covid_terms )) %>%
group_by(sentiment) %>%
summarize(total= n()) %>%
ungroup()
# email 5
emotion_words_count5 <- promoter %>%
unnest_tokens( word,email_new_5_POS_filt, drop = FALSE, collapse = T) %>%
anti_join(stop_words, by = "word") %>%
filter(!grepl('[0-9]', word)) %>%
left_join(get_sentiments("nrc"), by = "word") %>%
filter(!(sentiment == "negative" | sentiment == "positive" | sentiment == "NA") &
!(word %in% colors()) & !(word %in% covid_terms )) %>%
group_by(sentiment) %>%
summarize(total= n()) %>%
ungroup()
emotion_words_count <- emotion_words_count %>% rename( email_1 = total)
emotion_words_count2 <- emotion_words_count2 %>% rename( email_2 = total)
emotion_words_count3 <- emotion_words_count3 %>% rename( email_3 = total)
emotion_words_count4 <- emotion_words_count4 %>% rename( email_4 = total)
emotion_words_count5 <- emotion_words_count5 %>% rename( email_5 = total)
e12 <- merge(emotion_words_count, emotion_words_count2, by = "sentiment")
e123 <- merge(e12, emotion_words_count3, by = "sentiment")
e1234 <- merge(e123, emotion_words_count4, by = "sentiment")
e12345 <- merge(e1234, emotion_words_count5, by = "sentiment")
rm(e12,e123,e1234)
rm(emotion_words_count, emotion_words_count2, emotion_words_count3, emotion_words_count4, emotion_words_count5)
graph_promoter <- e12345
mydf.molten <- melt(graph_promoter[,c("sentiment","email_4","email_5")], value.name="Count", variable.name="Variable", na.rm=TRUE)
ggplot(mydf.molten, aes(x=Variable,y = Count)) +geom_bar(stat = "identity") + facet_wrap( "sentiment" )
write.csv(mydf.molten, file = "after removing covid words/Demoter_distribution.csv", row.names = F)
|
49be3f2f65adca38533de8a3cc7117b639a3aad1
|
66e04f24259a07363ad8da7cd47872f75abbaea0
|
/Correlation and Regression/Chapter 5-Model Fit/7.R
|
732ed520e4a3829cb944f38d556339fcce5593c1
|
[
"MIT"
] |
permissive
|
artileda/Datacamp-Data-Scientist-with-R-2019
|
19d64729a691880228f5a18994ad7b58d3e7b40e
|
a8b3f8f64cc5756add7ec5cae0e332101cb00bd9
|
refs/heads/master
| 2022-02-24T04:18:28.860980
| 2019-08-28T04:35:32
| 2019-08-28T04:35:32
| 325,043,594
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 841
|
r
|
7.R
|
# High leverage points
# Not all points of high leverage are influential. While the high leverage observation corresponding to Bobby Scales in the previous exercise is influential, the three observations for players with OBP and SLG values of 0 are not influential.
#
# This is because they happen to lie right near the regression anyway. Thus, while their extremely low OBP gives them the power to exert influence over the slope of the regression line, their low SLG prevents them from using it.
#
# Instructions
# 100 XP
# The linear model, mod, is available in your workspace. Use a combination of augment(), arrange() with two arguments, and head() to find the top 6 observations with the highest leverage but the lowest Cook's distance.
# Rank high leverage points
mod %>%
augment() %>%
arrange(desc(.hat), .cooksd) %>%
head()
|
b8d1582f613a7698acbc2c4b9a7d8a9e0cad1828
|
47949eb43a023ad2f81dedf498d2bb5e50a66e73
|
/06-MSN.R
|
5d7d3a732847483c8f1a1db634a8579961c22cfd
|
[
"MIT"
] |
permissive
|
everhartlab/brazil-sclerotinia-2017
|
fd1eff318e7f7a51f4b20ebf317f2b67f9ddd20f
|
e3b472a3f204be5a9b37e013abd788ebfba3bc79
|
refs/heads/master
| 2021-03-16T10:20:53.039968
| 2020-09-25T21:22:10
| 2020-09-25T21:22:10
| 111,736,175
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,902
|
r
|
06-MSN.R
|
library("poppr")
library("igraph")
enc <- getOption("encoding")
in_script <- !interactive()
options(encoding = "iso-8859-1")
CD <- readRDS(here::here("data", "full-genclone-object.rds"))
min_span_net <- bruvo.msn(CD, replen = other(CD)$REPLEN, showplot = FALSE, include.ties = TRUE)
set.seed(69)
min_span_net <- plot_poppr_msn(CD,
min_span_net,
inds = "NONE",
mlg = FALSE,
gadj = 3,
palette = other(CD)$palette,
cutoff = NULL,
quantiles = FALSE,
beforecut = TRUE,
pop.leg = FALSE,
layfun = igraph::layout_nicely)
opar <- par(no.readonly = TRUE)
if (in_script){
pdf(here::here("figs/MSN.pdf"), width = 3.464565 * 1, height = 3.464565 * 1, pointsize = 5, colormodel = "cmyk")
dev.control("enable")
}
par(mar = c(0.1, 0.1, 0.1, 0.1))
# code from plot_poppr_msn.
make_scale_bar <- function(msn, glim = c(0, 0.8), gadj = 3){
w <- edge_attr(msn$graph, "weight")
wmin <- min(w)
wmax <- max(w)
scales <- seq(wmin, wmax, l = 1000)
greyscales <- grDevices::gray(poppr:::adjustcurve(scales, show = FALSE, glim = glim, correction = gadj))
legend_image <- grDevices::as.raster(matrix(greyscales, nrow = 1))
graphics::par(mar = c(0, 1, 0, 1) + 0.5)
graphics::plot.new()
graphics::rasterImage(legend_image, 0, 0.5, 1, 1)
graphics::polygon(c(0, 1, 1), c(0.5, 0.5, 0.8), col = "white",
border = "white", lwd = 2)
graphics::axis(3, at = c(0, 0.25, 0.5, 0.75, 1),
labels = round(quantile(scales), 3))
graphics::text(0.5, 0, labels = "Bruvo's distance", font = 2,
cex = 1.5, adj = c(0.5, 0))
}
graphics::layout(matrix(c(1,2), nrow = 2), heights = c(4.5, 0.5))
set.seed(124)
# Graph is plotted so the area is scaled by number of samples
vsizes <- vertex_attr(min_span_net$graph, "size")
vsizes <- if (packageVersion("poppr") < package_version("2.5.0.99")) sqrt(vsizes) * 5 else vsizes * 5
lay <- igraph::layout_with_gem(min_span_net$graph)#[, 2:1]
plot.igraph(min_span_net$graph,
margin = -0.025,
vertex.size = vsizes,
vertex.label = NA,
layout = lay)
# Create population legend and save it into variable "a"
sortpop <- names(other(CD)$palette)
a <- legend(x = -1.2,
y = 0.2,
legend = sortpop,
fill = min_span_net$colors[sortpop])
# Create example circles for comparison
rads <- (sqrt(seq(5, 1))*5)/200
# Get the bottom of the pop legend
ybot <- a$rect$top - a$rect$h
# Get the space between legend elements
yspace <- min(abs(diff(a$text$y)))
# Create positions of circles vertically
circly <- rep(ybot - (2.5 * yspace), length(rads))
# Find the distance between two circles.
# https://stackoverflow.com/a/14830596/2752888
cdist <- function(c1, c2){
a <- (c1 + c2)^2
b <- (c1 - c2)^2
sqrt(a - b)
}
# spread the circles out
make_adjacent_circles <- function(radii){
res <- vapply(seq(radii), function(i){
if (i == 1)
0.0
else
cdist(radii[i], radii[i - 1])
}, numeric(1))
cumsum(res)
}
# shift the x position of the circles
circlx <- a$rect$left + a$rect$w/4
circlx <- make_adjacent_circles(rads) + circlx
# Create the circle legend
text(x = a$rect$left + a$rect$w/2, y = ybot - (yspace), label = "Samples per MLG")
symbols(x = circlx, y = circly, circles = rads, add = TRUE, inches = FALSE, asp = 1)
text(x = circlx, y = circly, labels = seq(5, 1), font = 2)
# Create the scale bar legend
make_scale_bar(min_span_net)
# reset the graphics
graphics::layout(matrix(1, ncol = 1, byrow = TRUE))
if (in_script) dev.copy(device = tiff, here::here("figs/MSN.tiff"), width = 3.464565 * 1, height = 3.464565 * 1, pointsize = 5, units = "in", res = 1200)
# reset par
par(opar)
if (in_script) {
dev.off()
dev.off()
}
options(encoding = enc)
|
2cb17800a0eebde8035a6b19e74aad5152f40017
|
eece0098d8700a7a56809fba4f2e3be17d95c233
|
/spark/demo/ms_demo_1000_rmark.R
|
ddd01726b18519407db73cb439162cdad6eb08bd
|
[] |
no_license
|
sjbonner/spark
|
2ab969de7ffeb7bde3aa07905efc00f281747220
|
281e2dc961236eb526b874f72872329825066c9a
|
refs/heads/master
| 2023-07-10T05:44:29.856545
| 2023-06-29T16:04:45
| 2023-06-29T16:04:45
| 36,303,277
| 0
| 1
| null | 2016-09-20T02:27:45
| 2015-05-26T14:57:30
|
R
|
UTF-8
|
R
| false
| false
| 3,440
|
r
|
ms_demo_1000_rmark.R
|
## Load packages
library(RMark)
library(ggplot2)
## Set path to data set
infile = system.file("extdata", "msdata1000.inp", package = "spark")
## Run spark
msdata = spark(infile = infile,
informat = "mark",
outformat = "mark")
## Fit model to truncated data
## 1) Process data
msprocessed.trunc =
process.data(
msdata,
model = "Multistrata"
)
## 2) Build design data
msddl.trunc = make.design.data(msprocessed.trunc,
parameters = list(
Psi = list(pim.type = "constant"),
S = list(pim.type = "time"),
p = list(pim.type = "time")
))
## 3) Fit model
model.parameters = list(
S = list(formula = ~ time),
p = list(formula = ~ time),
Psi = list(formula = ~ stratum:tostratum - 1)
)
time.trunc =
system.time(
msmodel.trunc <- mark(
msprocessed.trunc,
msddl.trunc,
model.parameters = model.parameters,
threads = 4,
output = FALSE
)
)
## Fit model to full data
## 1) Process data
msprocessed.full =
process.data(RMark::convert.inp(infile), model = "Multistrata")
## 2) Build design data
msddl.full = make.design.data(msprocessed.full,
parameters = list(
Psi = list(pim.type = "constant"),
S = list(pim.type = "time"),
p = list(pim.type = "time")
))
## 3) Run model
time.full =
system.time(
msmodel.full <- mark(
msprocessed.full,
msddl.full,
model.parameters = model.parameters,
threads = 4,
output = FALSE
)
)
## Compare parameter estimates
# Survival
ms.survival <- rbind(data.frame(Data="Truncated",
x=1:34 -.2,
msmodel.trunc$results$real[1:34,c(1,3,4)]),
data.frame(Data="Original",
x=1:34 +.2,
msmodel.full$results$real[1:34,c(1,3,4)]))
ggplot(ms.survival,aes(x,estimate,group=Data,color=Data)) +
geom_point() +
geom_errorbar(aes(ymin=lcl,ymax=ucl)) +
ylim(c(0,1)) +
xlab("Occasion") + ylab("Survival Probability")
# Capture
ms.capture <- rbind(data.frame(Data="Truncated",
x=1:34 -.2,
msmodel.trunc$results$real[35:68,c(1,3,4)]),
data.frame(Data="Original",
x=1:34 +.2,
msmodel.full$results$real[35:68,c(1,3,4)]))
ggplot(ms.capture,aes(x,estimate,group=Data,color=Data)) +
geom_point() +
geom_errorbar(aes(ymin=lcl,ymax=ucl)) +
ylim(c(0,1)) +
xlab("Occasion") + ylab("Capture Probability")
# Transition
ms.transition <- rbind(data.frame(Data="Truncated",
x=1:20 -.2,
msmodel.trunc$results$real[69:88,c(1,3,4)]),
data.frame(Data="Original",
x=1:20 +.2,
msmodel.full$results$real[69:88,c(1,3,4)]))
ggplot(ms.transition,aes(x,estimate,group=Data,color=Data)) +
geom_point() +
geom_errorbar(aes(ymin=lcl,ymax=ucl)) +
ylim(c(0,1)) +
xlab("Parameter") + ylab("Transition Probability")
|
3786371db02e997e323adaf2db7ef1a250f31201
|
4c23c6f3a80bfb5a2ec611fc3534ae2a89a757ba
|
/R/gbt.fit.R
|
441000eb23b69adae16602bbc347d0a533d07f76
|
[] |
no_license
|
harrysouthworth/gbt
|
dd90dca3052d06a7486d9f0483ec279aba12aedb
|
ccda94b1c21951e20aaa09d04d2b27b3f494e361
|
refs/heads/master
| 2016-09-05T17:28:12.767656
| 2014-04-10T10:16:58
| 2014-04-10T10:16:58
| 18,555,593
| 3
| 5
| null | 2014-04-10T10:16:58
| 2014-04-08T11:23:01
|
C
|
UTF-8
|
R
| false
| false
| 7,084
|
r
|
gbt.fit.R
|
gbt.fit <-
function( formula,
loss="squaredLoss",
data,
n.trees,
interaction.depth,
shrinkage,
bag.fraction,
cv.folds,
conjugate.gradient,
store.results,
verbose)
{
gbt.defaultObj <- list( formula = formula,
loss = loss,
data = data.frame(),
n.trees = n.trees,
interaction.depth = interaction.depth,
shrinkage = shrinkage,
bag.fraction = bag.fraction,
conjugate.gradient = conjugate.gradient,
initF = 0,
treematrix = numeric(1),
nu = numeric(n.trees),
results = numeric(1),
ri = numeric(1))
mf <- model.frame(formula=formula, data=data)
x <- model.matrix(attr(mf, "terms"), data=mf)
y <- model.response(mf)
ri <- x[1,]
ri[] <- 0
pf <- process.features(x)
data.size <- dim(x)[1]
allowed.rows <- numeric(data.size)
#if classification
if(gbt.defaultObj$loss == 'binomialDeviance') {
t<-table(y)
if(length(t) != 2) {
stop("The response must contain exactly 2 classes")
}
if(as.integer(names(t)[1]) != 0 || as.integer(names(t)[2]) != 1) {
stop("The response must be either 0 or 1")
}
loss.integer <- 1
}
else {
loss.integer <- 0
}
training.sets.indexes <- vector("list",cv.folds)
indexes <- 1:data.size
#if cross-validation
if(cv.folds > 1) {
#this cross-validation folds-creating code is adapted from the gbm package source code
#if classification, create folds that preserve the proportion of positives and negatives examples
if(gbt.defaultObj$loss == 'binomialDeviance') {
uc <- names(t)
if ( min( t ) < cv.folds ){
stop( paste("The smallest class has only", min(t), "objects in the training set. Can't do", cv.folds, "fold cross-validation."))
}
cv.group <- vector( length = data.size )
for ( i in 1:length( uc ) ){
cv.group[ y == uc[i] ] <- sample( rep( 1:cv.folds , length = t[i] ) )
}
}
else {
cv.group <- sample(rep(1:cv.folds, length=data.size))
}
valid.size <- table(cv.group)
#print(valid.size)
for(f in 1:cv.folds) {
training.sets.indexes[[f]] <- indexes[(cv.group!=f)]
}
}
else { #no cross validation
cv.folds <- 1
valid.size <- c(0)
training.sets.indexes[[1]] <- 1:data.size
}
if(store.results || verbose) {
n.results.per.cv <- floor(n.trees/100)+min(n.trees, 10)
gbt.defaultObj$results <- numeric(n.results.per.cv * 5)
}
if(store.results) {
results <- numeric(cv.folds * n.results.per.cv * 5)
}
shrinkage.integer <- shrinkage
shrinkage.integer$type <- as.integer(shrinkage$type)
if(shrinkage$type == "fixed") {
shrinkage.integer$type <- 0
}
else if(shrinkage$type == "arithmetic") {
shrinkage.integer$type <- 1
}
else if(shrinkage$type == "geometric") {
shrinkage.integer$type <- 2
}
else if(shrinkage$type == "negative.exp") {
shrinkage.integer$type <- 3
}
else {
stop("Unkown shrinkage type")
}
shrinkage.integer$type <- as.integer(shrinkage.integer$type)
treevector.size <- 6*(2^interaction.depth)
#per training set
for(f in 1:cv.folds) {
gbt.obj <- gbt.defaultObj
#current training set size
training.size <- data.size - valid.size[f]
#subsample size
sample.size <- floor(gbt.obj$bag.fraction * training.size)
#start with constant
if(gbt.defaultObj$loss == 'binomialDeviance') {
gbt.obj$initF <- log(sum(y[training.sets.indexes[[f]]])/(training.size-sum(y[training.sets.indexes[[f]]])))
}
else {
gbt.obj$initF <- mean(y[training.sets.indexes[[f]]])
}
if(gbt.obj$n.trees > 0) {
allowed.rows.training <- allowed.rows
allowed.rows.valid <- allowed.rows
training.indexes <- training.sets.indexes[[f]]
allowed.rows.training[training.indexes] <- 1
if(valid.size[f] > 0) {
valid.indexes <- indexes[(cv.group==f)]
allowed.rows.valid[valid.indexes] <- 1
}
gbt.obj$treematrix <- numeric(n.trees*treevector.size)
.Call("gbt", as.integer(loss.integer), as.integer(n.trees), as.integer(dim(x)[2]-1), as.integer(training.size), as.integer(valid.size[f]), as.numeric(x), as.numeric(y), as.integer(pf$ordered.indexes), as.integer(training.indexes-1), as.integer(allowed.rows.training), as.integer(allowed.rows.valid), as.character(pf$type), as.numeric(pf$val), as.integer(sample.size), shrinkage.integer, as.numeric(gbt.obj$initF), as.integer(conjugate.gradient), as.integer(interaction.depth), as.integer(treevector.size), as.integer(verbose || store.results), as.numeric(gbt.obj$treematrix), as.numeric(gbt.obj$nu), as.numeric(gbt.obj$results))
if(verbose) {
cat("CV: ", f, "\n")
print(matrix(byrow=TRUE, data=gbt.obj$results, nrow=n.results.per.cv, ncol=5, dimnames=list(seq(1,n.results.per.cv,1), c("Iteration", "Train Error/Deviance", "Step", "Test Error/Deviance", "Improve"))))
flush.console()
}
if(store.results) {
results[(5*(f-1)*n.results.per.cv+1):(5*f*n.results.per.cv)] <- gbt.obj$results
}
}
}
if(cv.folds > 1) { #train using the entire training set now
gbt.obj <- gbt.defaultObj
training.size <- data.size
valid.size <- 0
allowed.rows.training <- allowed.rows
allowed.rows.valid <- allowed.rows
training.indexes <- 1:training.size
allowed.rows.training[] <- 1
gbt.obj$treematrix <- numeric(n.trees*treevector.size)
.Call("gbt", as.integer(loss.integer), as.integer(n.trees), as.integer(dim(x)[2]-1), as.integer(training.size), as.integer(valid.size), as.numeric(x), as.numeric(y), as.integer(pf$ordered.indexes), as.integer(training.indexes-1), as.integer(allowed.rows.training), as.integer(allowed.rows.valid), as.character(pf$type), as.numeric(pf$val), as.integer(sample.size), shrinkage.integer, as.numeric(gbt.obj$initF), as.integer(conjugate.gradient), as.integer(interaction.depth), as.integer(treevector.size), as.integer(verbose || store.results), as.numeric(gbt.obj$treematrix), as.numeric(gbt.obj$nu), as.numeric(gbt.obj$results))
}
gbt.model <- gbt.obj
if(store.results) {
gbt.model$results <- matrix(byrow=TRUE, data=results, nrow=(cv.folds * n.results.per.cv), ncol=5, dimnames=list(seq(1,cv.folds*n.results.per.cv,1), c("Iteration", "Train Error/Deviance", "Step", "Test Error/Deviance", "Improve")))
}
gbt.model$ri <- ri
return(gbt.model)
}
|
a363ee42bc0edb545dfc1823d8ee3b3e702d8611
|
66f8711bc942a1bc635a6deea253e9a49c718094
|
/R/nnNetwork.R
|
bf250f3f8112ce863566cbc8b54cd66c69ffae8a
|
[
"MIT"
] |
permissive
|
seanrsilver/novnet
|
bd179476c48a8dd809757c60488dde7193a4145b
|
85107cfbbabc68c603134db5b5fc8bbf9219624b
|
refs/heads/master
| 2020-06-05T18:20:58.057024
| 2019-06-18T14:29:45
| 2019-06-18T14:29:45
| 192,495,039
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,976
|
r
|
nnNetwork.R
|
#' nnNetwork()
#'
#' A visualization function, which produces a network-style visualization from
#' a properly formed Novel Networks data set.
#' nnNetwork() chains
#'
#' @param filename <- "Frankenstein"
#' @param include.all <- TRUE
#' @param set.voice <- NA
#' @param local <- TRUE
#' @param filename Character string of text name, with associated Char.csv and .txt files.
#' @param include.all Logical vector or numerical range. If TRUE (default), processes whole text. If range (i.e. 1:18), processes only those chapters. If FALSE, will offer a prompt to determine a range.
#' @param set.voice NA, or character string. If NA, Char.csv pulls voice from associated *Char.csv file. If "third", will ignore first-person pronouns. If "first", will attempt to transform all non-dialogue "I" and "me" into narrator, signaled on Char.csv file as "TextEgo".
#' @param local Logical vector. If FALSE (default), looks in Google Drive for files. If TRUE, looks for filename in a folder with path data/filename/.
#' @param layout.with Character. Sets layout algorithm. Accepts "fr" (Fruchterman-Reingold, default), "gem" (GEM force-directed), "kk" (Kamada-Kawai), "tree", "circle", "sphere", "lgl" (Large Graph).
#' @param plot.community NA for no community detection, "spinglass" for simulated annealing, or "walktrap" for a random walk.
#' @param vertex.size Numeric (default = 1). Adjusts vertex size in plot. Coefficient applied to square root of vertex degree.
#' @param edge.width Numeric (default = .5). Adjusts edge width in plot. Coefficient applied to square root of total number of context coappearances.
#' @param context Counting number. How many words from each name should the function seek others? Default = 15.
#' @param multiplex Logical. Return a multiplex graph? Default = FALSE.
#' @param token.threshold Numeric. Removes characters with token count < x from graph. Default = 1.
#' @param metadata.off Default = NA. Expects a length-two vector of c("meta", "VALUE"), or matrix of vectors, to cull characters by metadata category. Useful for removing non-characters, generating subgraphs by gender, etc...
#' @param sample Default = FALSE. Logical, indicating whether the .txt should be randomized (i.e. words scrambled). Useful for checking whether modularity results are statistically meaningful.
#' @param vertex.label.thresh Numeric (Default = 1). Only vertices >= this value will be labeled.
#' @param bg.col Character. Sets background color for plot. Default = "white".
#'
#' @keywords NovNet Utilities
#'
#' @import igraph
#'
#' @export
#'
nnNetwork <- function(filename,
include.all = TRUE,
set.voice = NA,
local = FALSE,
context = 15,
layout.with = "fr",
plot.community = "spinglass",
vertex.size = 1,
edge.width = .5,
multiplex = FALSE,
token.threshold = 1,
metadata.off = NA,
sample = FALSE,
vertex.label.thresh = 1,
bg.col = "white"){
### 1) Scan in .txt and .csv files, return as data frame ----
novel.dat <- Txt_to_df(filename = filename,
include.all = TRUE,
set.voice = NA, # set to override Char.csv settings
local = FALSE)
### 2) Generate edgelist from char.data.df and text.uniq.v ----
### a) Identify everywhere a character name appears within a certain range
### of another character name, and record in an edgelist: from, to, position.
## Position == word number of from character
## create a lookforward KWIC matrix
novel.net <- charDf_to_edgelist(data = novel.dat,
context = 15,
multiplex = FALSE,
token.threshold = 1, # Austen, 10
metadata.off = NA, # for Austen settings: c("character, "N")
sample = FALSE)
## Extract a network object (net.igr)
net.igr <- nnLayout(novel.net = novel.net,
layout.with = "fr",
remove.isolates = TRUE,
min.degree = 1)
## Create Colors
E(net.igr)$edge.color <- createColorPalette(edgelist = net.igr,
vertex.color.index = "weight",
color.palette = c("gray80", "gray20"),
alpha.range = c(.2, .8),
vertex.edge = "edge")
V(net.igr)$vertex.color <- createColorPalette(edgelist = net.igr,
edge.color.index = "tokenCount",
color.palette = c("yellow", "tomato"),
alpha.range = c(.5, 1),
vertex.edge = "vertex")
# Cut off names of low-degree characters
V(net.igr)$vertex.label <- V(net.igr)$name
V(net.igr)$vertex.label[which(V(net.igr)$tokenCount <= vertex.label.thresh)] <- NA
## Plot
# Set plot parameters
# background color and margins
par(bg = bg.col, mar = c(1, 1, 1, 1))
# plot title
if(plot.community != FALSE){title.start.v <- "The Communities of "} else {
title.start.v <- "Character Network of "}
main.title <- paste0(title.start.v, " ",
novel.dat$file.metadata.v["Title"],
" (",
novel.dat$file.metadata.v["Year"],
")")
# Plot without community:
if(plot.community == FALSE){
plot(net.igr,
edge.arrow.size = 0, edge.curved = 0,
edge.width = edge.width*E(net.igr)$weight^.4,
edge.color = E(net.igr)$edge.color,
layout = net.igr$layout,
vertex.size = vertex.size*degree(net.igr)^.4,
vertex.color = V(net.igr)$vertex.color,
vertex.label = V(net.igr)$vertex.label,
vertex.label.family = "Monaco", vertex.label.color = "black",
vertex.label.cex = .5,
label.degree = pi/2,
label.dist = 1,
main = main.title)
}
# Plot with community detection
# generate community object (net.com) from walktrap or spinglass algorithms
if(plot.community == "walktrap"){
# walktrap
net.com <- cluster_walktrap(net.igr)
}
if(plot.community == "spinglass"){
# spinglass (involves finding the largest connected component)
max.component.which.v <- which.max(clusters(net.igr)$csize)
max.component.v <- which(clusters(net.igr)$membership == max.component.which.v)
subgraph.igr <- induced.subgraph(net.igr, max.component.v)
net.com <- cluster_spinglass(subgraph.igr)
# map communities from subgraph to net.igr for color assignment
V(net.igr)$community.from.subgraph <-
membership(net.com)[V(net.igr)$name]
}
if(!is.na(plot.community)){
## assign color functions-- two options (one for gradient, the other for communities)
colors.fun <- colorRampPalette(brewer.pal(9, "Blues"))
default.colors <- categorical_pal(max(membership(net.com)))
plot(net.com,
net.igr,
mark.col = NA,
mark.border = "gray80",
col = default.colors[V(net.igr)$community.from.subgraph],
edge.arrow.size = 0, edge.curved = 0,
edge.width = E(net.igr)$weight^.4,
edge.color = E(net.igr)$edge.color,
layout = net.igr$layout,
vertex.size = vertex.size*degree(net.igr)^.4,
vertex.label = V(net.igr)$vertex.label,
vertex.label.family = "Monaco",
vertex.label.color = "black",
vertex.label.cex = .8,
label.degree = pi/2,
label.dist = 1,
main = main.title)
}
}
|
e9c0c1d039d059a6884a650daaa085fed6a0da9c
|
3723f4e4aa552d9e2dc5de24d158720492f5b711
|
/mu-gis-stats/load-packages.R
|
e000152742090cda570ad546ec08da5d2005baf9
|
[] |
no_license
|
dylanbeaudette/reports
|
46048145f67824a7def13247fddd6947b184a640
|
2c00783093b2d35f18b19c800f17c281dac76a5e
|
refs/heads/master
| 2020-05-21T04:28:06.474527
| 2016-10-11T17:25:04
| 2016-10-11T17:25:04
| 54,495,118
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 612
|
r
|
load-packages.R
|
## you need all of these packages to run the report
# packages + deps from CRAN
packages.to.get <- c('knitr', 'rmarkdown', 'rgdal', 'raster', 'plyr', 'reshape2', 'aqp', 'soilDB', 'sharpshootR', 'latticeExtra', 'clhs', 'devtools', 'rgeos')
res <- sapply(packages.to.get, install.packages, dep=TRUE)
# latest versions from GitHub
devtools::install_github("ncss-tech/aqp", dependencies=FALSE, upgrade_dependencies=FALSE)
devtools::install_github("ncss-tech/soilDB", dependencies=FALSE, upgrade_dependencies=FALSE)
devtools::install_github("ncss-tech/sharpshootR", dependencies=FALSE, upgrade_dependencies=FALSE)
|
963dfa6c0a9b903b81ea9962e688204aa4252c96
|
dc64458135fdfd0ce1885714c1d56e82c5e94fa6
|
/nevada_scripts/task_scheduler.R
|
77084ea2026ba71c02fb2f8208c03c3291438d5f
|
[] |
no_license
|
GOliveira10/Nevada-Caucus-Monitoring
|
0d2abeac0fa06c061658440230021baf8a631dba
|
138d0856ca92e05c10a521ace605cee1de57fcd0
|
refs/heads/master
| 2021-01-02T07:48:16.592730
| 2020-02-25T04:55:08
| 2020-02-25T04:55:08
| 239,553,489
| 3
| 0
| null | 2020-02-23T04:08:56
| 2020-02-10T16:07:48
|
R
|
UTF-8
|
R
| false
| false
| 1,437
|
r
|
task_scheduler.R
|
# remotes::install_github("r-lib/later")
library(later)
library(lubridate)
run_times <- seq.POSIXt(from = as.POSIXct("2020-02-22 14:10:00"),
to = as.POSIXct("2020-02-23 14:05:00"),
length.out = 24*12)
check_for_updates <- function(){
file_info <- file.info(list.files("nevada_data/cleaned_timestamped", full.names = TRUE))
latest_file <- rownames(file_info)[which.max(file_info$mtime)]
d <- suppressMessages(read_csv(latest_file, progress = FALSE))
last_precincts_reporting <- d %>%
group_by(GEOID10) %>%
summarize(votes = sum(votes, na.rm = TRUE)) %>%
filter(votes > 0) %>% nrow()
last_total_votes <- sum(d$votes)
precincts <- GET('https://int.nyt.com/applications/elections/2020/data/api/2020-02-22/precincts/NevadaDemPrecinctsGcs-latest.json') %>%
content()
precincts_reporting <- precincts$meta$precincts_reporting
total_votes <- precincts$meta$total_votes
return(((last_precincts_reporting < precincts_reporting) | (last_total_votes < total_votes)))
}
repeat{
if(round_date(Sys.time(), "minutes") %in% run_times){
new_data <- check_for_updates()
if(new_data){
message("New data found.")
source("./nevada_scripts/nevada_full_analysis_script.R")
} else {
message("No updates.")
}
Sys.sleep(60)
}
}
|
c99a1e9835d4ef5505e2657189e093f29114e359
|
7e478f91bf78f5492cf33c0dc70ce0dd95b2ea1b
|
/studentship_project/examples/Non-additive interaction for diuretics.R
|
94a9d9bf8389db14068a02b1d6a855bd022a9551
|
[
"MIT"
] |
permissive
|
MerrimanLab/urate_variance
|
82858372b968c78b50df4e99fa9cf144123eec7d
|
f280997664093907f543fc34b8a79788cec6c388
|
refs/heads/master
| 2020-12-24T16:24:10.016574
| 2016-03-07T01:43:32
| 2016-03-07T01:43:32
| 31,228,927
| 1
| 1
| null | 2016-03-07T01:43:33
| 2015-02-23T20:47:10
|
R
|
UTF-8
|
R
| false
| false
| 4,020
|
r
|
Non-additive interaction for diuretics.R
|
load("Non-additive interaction for diuretics.RData")
dataset <- rbind(ARIC_snps, CARDIA_snps, CHS, CHSDIU_snps, FHS_snps)
dataset$DIU <- NA
dataset$DIU[dataset$COHORT=="CHSDIU" | dataset$COHORT=="ARICDIU"] <- 1
dataset$DIU[dataset$COHORT=="CHS" | dataset$COHORT=="ARIC" | dataset$COHORT=="FHS" | dataset$COHORT=="CARDIA"] <- 0
##RS3775935 AA > CA > CC (AA urate raising)
##6449173 TT > TG > GG (TT urate raising)
##7657853 AA > AG > GG (AA urate raising)
## Non-additive interaction testing - interaction term is the coefficient extracted.
dataset$RS3775935 <- as.character(dataset$RS3775935)
dataset$RS3775935_code[dataset$RS3775935=="CC" & !is.na(dataset$RS3775935)] <- 0 ## Change to RS3775935=="AA" for alternative allelic grouping
dataset$RS3775935_code[dataset$RS3775935=="CA" | dataset$RS3775935=="AA" & !is.na(dataset$RS3775935)] <- 1 ## Change to RS3775935=="CC" for alternative allelic grouping (AA vs. CA/AA)
table(dataset$RS3775935_code)
summary(lm(URICACID~(RS3775935_code)*DIU+SEX+AGE+BMI+PCA1+PCA2, data=dataset))
dataset$RS6449173 <- as.character(dataset$RS6449173)
dataset$RS6449173_code[dataset$RS6449173=="TT" & !is.na(dataset$RS6449173)] <- 0
dataset$RS6449173_code[dataset$RS6449173=="TG" | dataset$RS6449173=="GG" & !is.na(dataset$RS6449173)] <- 1
table(dataset$RS6449173_code)
summary(lm(URICACID~(RS6449173_code)*DIU+SEX+AGE+BMI+PCA1+PCA2, data=dataset))
dataset$RS7657853 <- as.character(dataset$RS7657853)
dataset$RS7657853_code[dataset$RS7657853=="AA" & !is.na(dataset$RS7657853)] <- 0 ## Change to RS7657853=="GG" for alternative
dataset$RS7657853_code[dataset$RS7657853=="GA" | dataset$RS7657853=="GG" & !is.na(dataset$RS7657853)] <- 1 ## Change to RS7657853=="AA" for alternative
table(dataset$RS7657853_code)
summary(lm(URICACID~(RS7657853_code)*DIU+SEX+AGE+BMI+PCA1+PCA2, data=dataset))
## GxE groups, with "grouP" as the variable regressed against.
dataset$RS6449173_group <- NA
dataset$RS6449173_group[dataset$RS6449173_code==0 & dataset$DIU==0 & !is.na(dataset$RS6449173_code) & !is.na(dataset$DIU)] <- 0
dataset$RS6449173_group[dataset$RS6449173_code==1 & dataset$DIU==0 & !is.na(dataset$RS6449173_code) & !is.na(dataset$DIU)] <- 1
dataset$RS6449173_group[dataset$RS6449173_code==0 & dataset$DIU==1 & !is.na(dataset$RS6449173_code) & !is.na(dataset$DIU)] <- 2
dataset$RS6449173_group[dataset$RS6449173_code==1 & dataset$DIU==1 & !is.na(dataset$RS6449173_code) & !is.na(dataset$DIU)] <- 3
dataset$RS6449173_group <- as.factor(dataset$RS6449173_group)
table(dataset$RS6449173_group)
summary(lm(URICACID~RS6449173_group+SEX+AGE+BMI+PCA1+PCA2, data=dataset))
dataset$RS7657853_group <- NA
dataset$RS7657853_group[dataset$RS7657853_code==0 & dataset$DIU==0 & !is.na(dataset$RS7657853_code) & !is.na(dataset$DIU)] <- 0
dataset$RS7657853_group[dataset$RS7657853_code==1 & dataset$DIU==0 & !is.na(dataset$RS7657853_code) & !is.na(dataset$DIU)] <- 1
dataset$RS7657853_group[dataset$RS7657853_code==0 & dataset$DIU==1 & !is.na(dataset$RS7657853_code) & !is.na(dataset$DIU)] <- 2
dataset$RS7657853_group[dataset$RS7657853_code==1 & dataset$DIU==1 & !is.na(dataset$RS7657853_code) & !is.na(dataset$DIU)] <- 3
dataset$RS7657853_group <- as.factor(dataset$RS7657853_group)
table(dataset$RS7657853_group)
summary(lm(URICACID~RS7657853_group+SEX+AGE+BMI+PCA1+PCA2, data=dataset))
dataset$RS3775935_group <- NA
dataset$RS3775935_group[dataset$RS3775935_code==0 & dataset$DIU==0 & !is.na(dataset$RS3775935_code) & !is.na(dataset$DIU)] <- 0
dataset$RS3775935_group[dataset$RS3775935_code==1 & dataset$DIU==0 & !is.na(dataset$RS3775935_code) & !is.na(dataset$DIU)] <- 1
dataset$RS3775935_group[dataset$RS3775935_code==0 & dataset$DIU==1 & !is.na(dataset$RS3775935_code) & !is.na(dataset$DIU)] <- 2
dataset$RS3775935_group[dataset$RS3775935_code==1 & dataset$DIU==1 & !is.na(dataset$RS3775935_code) & !is.na(dataset$DIU)] <- 3
dataset$RS3775935_group <- as.factor(dataset$RS3775935_group)
table(dataset$RS3775935_group)
summary(lm(URICACID~RS3775935_group+SEX+AGE+BMI+PCA1+PCA2, data=dataset))
|
3a003baa2e99bfc16f0776c58359ab19ac616fe3
|
61d28d0598efb9636bd8ca6c5055b3197429e71c
|
/2_Forcing_WIN.R
|
42eee7415c7486952cb7437d78467f4effb37fef
|
[] |
no_license
|
DaDaDaDaDaLi/VIC_data_process
|
bf3ae2ab53ffbd4aac90522f6060cb50856e1992
|
efaca690c5cfb6206feda5a9f2aa290753a9eb99
|
refs/heads/master
| 2020-04-23T07:05:20.144922
| 2019-02-16T11:32:29
| 2019-02-16T11:32:29
| 170,995,971
| 0
| 0
| null | null | null | null |
GB18030
|
R
| false
| false
| 4,465
|
r
|
2_Forcing_WIN.R
|
#这段代码的作用是提取研究区的气象站点数据
lati = read.table("D:\\data_xijiang\\data_result\\forcing\\win\\SURF_CLI_CHN_MUL_DAY-PRE-13011-201001.TXT")[,2]
lati = lati/100
long = read.table("D:\\data_xijiang\\data_result\\forcing\\win\\SURF_CLI_CHN_MUL_DAY-PRE-13011-201001.TXT")[,3]
long = long/100
S_code = read.table("D:\\data_xijiang\\data_result\\forcing\\win\\SURF_CLI_CHN_MUL_DAY-PRE-13011-201001.TXT")[,1]
#这里提取的是站点编码
fdata<-data.frame(lati,long,S_code)
write.table(fdata,"D:\\data_xijiang\\data_result\\forcing\\arcgis_input_win_long_lati.txt",row.names=F,col.names=F,quote=F)
#输出全国流域站点经纬度坐标和站点编码,需要用arcgis进行提取
S_station_code <- read.table("D:\\data_xijiang\\data_result\\forcing\\arcgis_output_station_long_lati_XJ.txt",header=T,sep=",")[,4]
f_station_code <- factor(S_station_code)
S_code = levels(f_station_code)
#输出因子水平,转换为数字
S_code=as.numeric(S_code)
v3=length(S_code)
#v3是研究流域所需站点个数
#以上所有代码的作用是输出研究流域所有站点的代码,除了降水,其他气象要素也一样适用
#上次做到这里
path <- "D:\\data_xijiang\\data_result\\forcing\\win"
#读取文件夹路径
FileNames <- dir(path)
#获取文件名
FilePath <- paste(path,FileNames,sep="\\")
#生成文件路径
v1 = length(FilePath)
#v1数据文件个数
for(i in 1:v1){
v4=1
#v4是变量
V_S <- read.table(file=FilePath[i])[,1]
V_lati <-read.table(file=FilePath[i])[,2]
V_lati <- V_lati/100
V_long <-read.table(file=FilePath[i])[,3]
V_long <- V_long/100
V_year <-read.table(file=FilePath[i])[,5]
V_month <- read.table(file=FilePath[i])[,6]
V_day <- read.table(file=FilePath[i])[,7]
V_win <- read.table(file=FilePath[i])[,8]
v2=length(V_win)
#v2的数据量最大,因为它是该月全国所有站点的日数据
for(j in 1:v2){
if(V_win[j]==32700)
V_win[j]=0
}
for(j in 1:v2){
if(V_win[j]==32766){
if(j<=5){
V_win[j]=15
}
if(j>5){
V_sum=0
V_sum=V_win[j-5]+V_win[j-4]+V_win[j-3]+V_win[j-2]+V_win[j-1]
V_win[j]=V_sum/5
V_win[j]=round(V_win[j])
}
}
}
#这里可能也会出现32766,这里排32766思想是用前五天的数据相加求均值来代替这一天的数据
#排32700
f_data=data.frame(V_S,V_lati,V_long,V_year,V_month,V_day,V_win)
m_f_data=as.matrix(f_data)
if(V_month[1] %in% c(1,3,5,7,8,10,12)){
m_data <- matrix(,nrow=v3*31,ncol=7,byrow=T)
for(k in 1:v2){
for(l in 1:v3){
if(V_S[k]==S_code[l]){
m_data[v4,]=m_f_data[k,]
v4=v4+1
}
}
}
write.table(m_data,file=paste("D:\\data_xijiang\\data_result\\forcing\\XJ_forcing_data\\win\\XJ_win",V_year[1],V_month[1],".txt",sep="_"),row.names=F,col.names=F,quote=F)
}
#大月的计算
else if(V_month[1] %in% c(4,6,9,11)){
m_data <- matrix(,nrow=v3*30,ncol=7,byrow=T)
for(k in 1:v2){
for(l in 1:v3){
if(V_S[k]==S_code[l]){
m_data[v4,]=m_f_data[k,]
v4=v4+1
}
}
}
write.table(m_data,file=paste("D:\\data_xijiang\\data_result\\forcing\\XJ_forcing_data\\win\\XJ_win",V_year[1],V_month[1],".txt",sep="_"),row.names=F,col.names=F,quote=F)
}
#小月的计算
if(F){
x<-c(1990:2010)
for(i in 1:21){
if(x[i]%%4==0&&x[i]%%100!=0||x[i]%%400==0){
print(x[i])
}
}
}
#以下代码是判断在2010—2017年的八年中有没有那一年是闰年,结果2012,2016是闰年,其余是平年
else if(V_month[1]==2){
if(V_year[1]%%4==0&&V_year[1]%%100!=0||V_year[1]%%400==0){
m_data <- matrix(,nrow=v3*29,ncol=7,byrow=T)
for(k in 1:v2){
for(l in 1:v3){
if(V_S[k]==S_code[l]){
m_data[v4,]=m_f_data[k,]
v4=v4+1
}
}
}
write.table(m_data,file=paste("D:\\data_xijiang\\data_result\\forcing\\XJ_forcing_data\\win\\XJ_win",V_year[1],V_month[1],".txt",sep="_"),row.names=F,col.names=F,quote=F)
}
#闰年二月
else{
m_data <- matrix(,nrow=v3*28,ncol=7,byrow=T)
for(k in 1:v2){
for(l in 1:v3){
if(V_S[k]==S_code[l]){
m_data[v4,]=m_f_data[k,]
v4=v4+1
}
}
}
write.table(m_data,file=paste("D:\\data_xijiang\\data_result\\forcing\\XJ_forcing_data\\win\\XJ_win",V_year[1],V_month[1],".txt",sep="_"),row.names=F,col.names=F,quote=F)
}
#平年二月
}
v4=1
}
#以上是降雨数据的提取。其他的要素数据也一样适用
|
8ff541144ce2a0ab8b46703517272564aa10e6a6
|
a8cd1d7b01e62053f2bab2fe9c0e1544fa558aa0
|
/cachematrix.R
|
11f30f1a501d039b2194cd71725d7e3c91212384
|
[] |
no_license
|
lobes/ProgrammingAssignment2
|
387dbce2672a156f6eef11ed6ff8f965a2be6ff8
|
48e03f80cf2614d6bbfc70500034505dfc8e92c5
|
refs/heads/master
| 2020-06-17T13:00:36.093363
| 2019-07-12T04:13:31
| 2019-07-12T04:13:31
| 195,932,138
| 0
| 0
| null | 2019-07-09T04:23:31
| 2019-07-09T04:23:30
| null |
UTF-8
|
R
| false
| false
| 1,742
|
r
|
cachematrix.R
|
# This pair of functions will check to see if the inverse is stored in cache,
# and if not will calculate the inverse and set it to the cache.
# Generate a matrix that contains the following functions as a list:
# 1. set the values of the matrix
# 2. get the values of the matrix
# 3. set the inverse of the matrix
# 4. get the inverse of the matrix
makeCacheMatrix <- function(orig_matrix = matrix()) {
inv_matrix <- NULL
# Clear the cache and assign input to orig_matrix
set_matrix <- function(y) {
orig_matrix <<- y
inv_matrix <<- NULL
}
# Return the original matrix
get_matrix <- function() orig_matrix
# Assign the solved inverse to inv_matrix, puts it in the cache
set_inverse <- function(solution) inv_matrix <<- solution
# Return the matrix inverse
get_inverse <- function() inv_matrix
# Construct a list that allow for the functions inside
# makeCacheMatrix to be called
list(
set_matrix = set_matrix,
get_matrix = get_matrix,
set_inverse = set_inverse,
get_inverse = get_inverse
)
}
# Check to see if the inverse is in the cache. If TRUE, return from cache.
# If FALSE, calculate the inverse, put in cache, and return the solution.
cacheSolve <- function(invertible, ...) {
# Check if the inverse is in the cache. Return value if NOT NULL.
inv_matrix <- invertible$get_inverse()
if(!is.null(inv_matrix)) {
message("Getting inverse from cache")
return(inv_matrix)
}
# Calculate the inverse, put in cache, and return the solution.
orig_matrix <- invertible$get_matrix()
inv_matrix <- solve(orig_matrix)
invertible$set_inverse(inv_matrix)
inv_matrix
}
|
8b316b24ff47a4d05ed9e0d8ae69993ed9587081
|
e784dc9d52588bc6c00fa18fab014f6cf3fe73b7
|
/R-Finance-Programming/ch03_graph/13_mfrow.R
|
5187b2ff376c22ee7cc3753581f95fbaef4c3028
|
[] |
no_license
|
Fintecuriosity11/Finance
|
3e073e4719d63f741e9b71d29a97598fa73d565d
|
b80879ece1408d239991d1bb13306cc91de53368
|
refs/heads/master
| 2021-02-20T12:59:51.559033
| 2020-08-08T16:21:49
| 2020-08-08T16:21:49
| 245,337,007
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,798
|
r
|
13_mfrow.R
|
##########################################################################################################################################
#(주의) -> 순차적으로 코드를 실행하는 것을 권함!
#에러 발생 시 github Finance/R-Finance-Programming 경로에 issue를 남기면 확인
##########################################################################################################################################
### par함수
# par 함수를 이용하면 그리기 관련 모수(plotting parameter)를 설정을 할 수 있음.
# par 함수를 실행한 뒤의 그래프부터 적용.
# par 함수의 인수 중 축눈금, 축라벨 등과 관련된 인수 확인.
##########################################################################################################################################
### mfrow 인수.
# mfrow 인수는 여러 그래프를 한꺼번에 그릴 때 사용.
# 크기가 2인 벡터를 인수로 받으며 첫 인수는 행의 갯수, 두 번째 인수는 열의 개수를 의미.
# 이 경우에는 행에 따라 배열.
# 첫 행을 먼저 채우고 다음으로 두번째 행을 채움.
# 즉 mfrow=c(2,2)이고 그래프가 4개 있다면 다음 순서대로 그래프를 배열.
par(mfrow=c(2,2)) # 2*2로 그림을 배열.
plot(x1, y1)
plot(x2, y2)
curve(dnorm(x))
curve(sin)
graphics.off() # 그래프 지워주는 함수.
############################################################결과값(print)#################################################################
# 그래프 찍어보기.
##########################################################################################################################################
|
9e9718f74814c3581b7fab32966043d233f39b46
|
9ef0ad8f18f19009244bb6b1608f129eeb20878f
|
/bin/Validation.R
|
a0e2341ef0852e384eef36be4571a8bfbe89d2d4
|
[] |
no_license
|
e-marcelo/EbitSim_MANET
|
8db2a56e20d27a0c3215cdae5bbb540f9208675f
|
31af7b4c900aac20c263b690a333e1422a4fc5b9
|
refs/heads/master
| 2021-01-11T18:11:06.532054
| 2017-03-13T17:00:19
| 2017-03-13T17:00:19
| 79,503,461
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,774
|
r
|
Validation.R
|
#!/usr/bin/env Rscript
# load libraries
## Summarizes data.
## Gives count, mean, standard deviation, standard error of the mean, and confidence interval (default 95%).
## data: a data frame.
## measurevar: the name of a column that contains the variable to be summariezed
## groupvars: a vector containing names of columns that contain grouping variables
## na.rm: a boolean that indicates whether to ignore NA's
## conf.interval: the percent range of the confidence interval (default is 95%)
summarySE <- function(data=NULL, measurevar, groupvars=NULL, na.rm=FALSE,
conf.interval=.95, .drop=TRUE) {
require(plyr)
# New version of length which can handle NA's: if na.rm==T, don't count them
length2 <- function (x, na.rm=FALSE) {
if (na.rm) sum(!is.na(x))
else length(x)
}
# This is does the summary; it's not easy to understand...
datac <- ddply(data, groupvars, .drop=.drop,
.fun= function(xx, col, na.rm) {
c( N = length2(xx[,col], na.rm=na.rm),
mean = mean (xx[,col], na.rm=na.rm),
sd = sd (xx[,col], na.rm=na.rm)
)
},
measurevar,
na.rm
)
# Rename the "mean" column
datac <- rename(datac, c("mean"=measurevar))
datac$se <- datac$sd / sqrt(datac$N) # Calculate standard error of the mean
# Confidence interval multiplier for standard error
# Calculate t-statistic for confidence interval:
# e.g., if conf.interval is .95, use .975 (above/below), and use df=N-1
ciMult <- qt(conf.interval/2 + .5, datac$N-1)
datac$ci <- datac$se * ciMult
return(datac)
}
library(plyr)
library(ggplot2)
# read the csv directory from the arguments list
args <- commandArgs(trailingOnly = TRUE)
csv.dir <- tail(args, 1)
# read the szydlowsky data
filenames <- list.files(pattern="\\.txt", path=csv.dir, full.names=TRUE)
csv.data <- ldply(llply(filenames, read.csv))
# read the simulation data
filenames <- list.files(pattern="small.*\\.csv$", path=csv.dir, full.names=TRUE)
csv.data.sim.small <- ldply(llply(filenames, read.csv))
filenames <- list.files(pattern="medium.*\\.csv$", path=csv.dir, full.names=TRUE)
csv.data.sim.medium <- ldply(llply(filenames, read.csv))
filenames <- list.files(pattern="large.*\\.csv$", path=csv.dir, full.names=TRUE)
csv.data.sim.large <- ldply(llply(filenames, read.csv))
#filenames <- list.files(pattern="sim-.*\\.csv$", path=csv.dir, full.names=TRUE)
#csv.data.sim.small.2 <- ldply(llply(filenames, read.csv))
filenames <- list.files(pattern="messages-.*\\.csv$", path=csv.dir, full.names=TRUE)
csv.data.sim.messages <- ldply(llply(filenames, read.csv))
# merge all simulation data
csv.data.sim <- rbind(csv.data.sim.small, csv.data.sim.medium,
csv.data.sim.large)#, csv.data.sim.small.2)
# generate the empirical cdf and put it in the column Probability
csv.data.sim <- ddply(csv.data.sim,.(Experiment),transform,
Probability = ecdf(Download.Time)(Download.Time))
# get only the columns I want
sim.cdf <- csv.data.sim[,c("Download.Time", "Probability", "Experiment", "Client")]
names(sim.cdf) <- c("Time", "Probability", "Experiment", "Client")
# merge the new data with the experiment data
csv.data.all <- rbind(csv.data,sim.cdf)
# select pallete
my.cols <- brewer.pal(4, "Set1")
my.cols[4] <- "#000000" # change the last color to black
ggplot(csv.data[csv.data$Experiment == "cl-35-5",],
aes(Time, Probability, colour=Client, linetype=Experiment)) +
geom_line() + # plot normal
# plot ebitsim
geom_line(data=sim.cdf[sim.cdf$Experiment=="es-35-5",], size=1) +
scale_size_manual(values=c("solid", "dashed"))
# get the cluster experiments only
d <- csv.data.all[grep("cl", csv.data.all$Experiment),]
d <- d[d$Client != "EbitSim2",]
p <- ggplot(data=d)
p + geom_line(aes(x=Time,y=Probability,color=Client)) +
facet_grid (Experiment ~ .) +
opts(title="Comparação de desempenho\nentre os experimentos e a simulação") +
xlab("Tempo de Download (s)") + ylab("Probabilidade") +
scale_colour_hue(name="Cliente")
d <- csv.data.all[grep("cl", csv.data.all$Experiment),]
d <- d[d$Client == "EbitSim" | d$Client == "mainline", ]
d <- d[d$Experiment == "cl-92-5", ]
p <- ggplot(data=d)
p <- p + geom_line(aes(x=Time,y=Probability,color=Client)) + facet_grid (Experiment ~ .)
p + opts(title="Desempenho dos Clientes BitTorrent\n") +
xlab("Tempo de Download (s)") + ylab("Probabilidade") +
scale_colour_hue(name="Cliente")
ggsave("experimento_cdf.png", dpi=300)
seeders.70 <- ggplot(data=csv.data[csv.data$Run < 30,])
seeders.30 <- ggplot(data=csv.data[csv.data$Run >= 30,])
seeders.30 + geom_line(aes(x=Elapsed.Time, y=Events.per.Second)) + facet_grid( Run ~ .)
qplot(Elapsed.Time, Events.per.Second,data=csv.data[csv.data$Run >9 || csv.data$Run<20,], color=Run, geom="line")
# unload libraries
detach("package:plyr")
|
25520bda577315aec18c75688fe3dae93a8a27d2
|
c29a2534fb4e5224d49d3fed5e23f6c86987d055
|
/man/recalculate_expression.Rd
|
a0c42ec099a593ee672762f1ee3f37c478cf4df6
|
[] |
no_license
|
ddeweerd/MODifieRDev
|
8c1ae2cd35c297a5394671e05d3198b6f3b6fcf8
|
5660de4df282b57cd2da20e8fe493e438019b759
|
refs/heads/Devel
| 2020-03-28T18:37:56.271549
| 2019-11-07T10:45:09
| 2019-11-07T10:45:09
| 148,896,901
| 4
| 0
| null | 2019-08-20T14:14:35
| 2018-09-15T11:42:06
|
R
|
UTF-8
|
R
| false
| true
| 1,596
|
rd
|
recalculate_expression.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/create_input.R
\name{recalculate_expression}
\alias{recalculate_expression}
\title{Recalculate collapsing probes to genes}
\usage{
recalculate_expression(MODifieR_input, method)
}
\arguments{
\item{MODifieR_input}{A MODifieR input object produced by one of the \code{create_input} functions}
\item{method}{character string for determining which method is used to choose a probe among
exactly 2 corresponding rows or when connectivityBasedCollapsing=FALSE. These are the options:
"MaxMean" (default) or "MinMean" = choose the row with the highest or lowest mean value, respectively.
"maxRowVariance" = choose the row with the highest variance (across the columns of \code{datET}).
"absMaxMean" or "absMinMean" = choose the row with the highest or lowest mean absolute value.
"ME" = choose the eigenrow (first principal component of the rows in each group). Note that with this
method option, connectivityBasedCollapsing is automatically set to FALSE.
"Average" = for each column, take the average value of the rows in each group
"function" = use this method for a user-input function (see the description of the argument
"methodFunction").
Note: if method="ME", "Average" or "function", the output parameters "group2row" and "selectedRow" are not informative.
}
}
\value{
MODifieR_input object
}
\description{
Recalculate collapsing probes to genes
}
\details{
Recalculate the collapsing of probes to genes using on the \code{method} options
}
\seealso{
\code{\link{create_input}}
}
\author{
Dirk de Weerd
}
|
62b4458a5aff5347a6decf33d5d469f7bfe908c4
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/splinetree/examples/projectedR2.Rd.R
|
2504e9ba1432d64e4b9de09622d65454a98daf6c
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 210
|
r
|
projectedR2.Rd.R
|
library(splinetree)
### Name: projectedR2
### Title: Computes percent of variation in projected response explained by
### a splinetree.
### Aliases: projectedR2
### ** Examples
r2 <- projectedR2(tree)
|
8d88cfa0d3c32d0534c0787d8693c1082d1f55f8
|
70ab7729db8c949aff5742e6e7fb88b99165db50
|
/server.R
|
512fe827b48a387e828bbb31896bf23e6f2aa46a
|
[] |
no_license
|
vincentgombe/Developing-Data-Products-Project
|
87fccb9be69642a35e154707c285f501e4ed0395
|
03146c6a8670813a54fe33056448c500145fc79c
|
refs/heads/master
| 2021-05-11T20:21:25.765681
| 2018-01-14T15:51:11
| 2018-01-14T15:51:11
| 117,442,119
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,154
|
r
|
server.R
|
library(shiny)
shinyServer(function(input, output) {
output$plot <- renderPlot({
n <- input$`Sample Size`
lambda <- input$`Given Occurances`
z <- input$`Interested Occurance`
t<- input$`Interested Interval`
lambda2 <- lambda * input$`Interested Interval`
p <- dpois(x= z,lambda = lambda*t)
p2 <- round((p * 100),3)
library(ggfortify)
ggdistribution(dpois, seq(0,n,1), lambda = lambda2, fill = "blue" ) +
geom_hline(yintercept = p) +
annotate("text",0.1, p, vjust = -0.5, label = paste(p2,"%")) +
geom_vline(xintercept = z) +
annotate("text",z, 0.005, hjust = 0.005, label = z) +
annotate("text",z, 0.005, hjust = 0.005, label = z) +
ggtitle(paste("lambda =",lambda2)) +
xlab("x: no of events") +
ylab("probability")
})
# generate some probabilities
probExact <- reactive({
lambda <- input$`Given Occurances`
z <- input$`Interested Occurance`
t<- input$`Interested Interval`
dpois(x= z,lambda = lambda*t)
})
probUpper <- reactive({
lambda <- input$`Given Occurances`
z <- input$`Interested Occurance`
t<- input$`Interested Interval`
ppois(q= z,lambda = lambda*t,lower.tail = FALSE)
})
probLower <- reactive({
lambda <- input$`Given Occurances`
z <- input$`Interested Occurance`
t<- input$`Interested Interval`
ppois(q= z,lambda = lambda*t,lower.tail = TRUE)
})
#create outputs
output$Exact <- renderText({
probExact()
})
output$Upper <- renderText({
probUpper()
})
output$Lower <- renderText({
probLower()
})
output$z <- renderText(input$`Interested Occurance`)
output$t <- renderText(input$`Interested Interval`)
})
|
88772c682db9202bcf052d6ca7e482f2a7e3f624
|
e9dc98ef9ef633666be0a91e51c4857f2c3eef32
|
/api/man/get_model_id.Rd
|
457c7155157680d49c4682bb7b1a09b2e0865a63
|
[
"NCSA",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
dlebauer/pecan
|
b0de087b2107460e481c96f1499601cbd73694bd
|
36cc58406190809c1cb916ee633ee4e9f93559c6
|
refs/heads/master
| 2022-06-08T13:02:54.809873
| 2020-05-06T00:44:23
| 2020-05-06T00:44:23
| 217,627,062
| 0
| 1
|
NOASSERTION
| 2020-09-08T01:53:06
| 2019-10-25T23:08:15
|
R
|
UTF-8
|
R
| false
| true
| 701
|
rd
|
get_model_id.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_model_id.R
\name{get_model_id}
\alias{get_model_id}
\title{Retrieve database ID of a particular version of a model}
\usage{
get_model_id(con, name, revision, multi_action = "last")
}
\arguments{
\item{con}{Database connection object (Pqconnection)}
\item{name}{Model name (character)}
\item{revision}{Model version/revision (character)}
\item{multi_action}{Action to take if multiple models found
(character). Must be one of "first", "last" (default), "all", or "error".}
}
\value{
Model ID, as \code{integer64}
}
\description{
Retrieve database ID of a particular version of a model
}
\author{
Alexey Shiklomanov
}
|
3663da275327d118fd7e3a52663fcbe69269fe3a
|
ca8e2e11ea54ba365aa5ab758a6c34c2f2411d9a
|
/06_既是世间法、自当有分别.R
|
017eade975ae876961a6644411c8eb2f46aee3e7
|
[
"MIT"
] |
permissive
|
byaxb/RDataAnalytics
|
0ecbf8b28918e6278aecc113d1856b2a670cedc0
|
7debb44a44ae1486e4b1555991dfa7db9ea84e38
|
refs/heads/master
| 2023-01-28T14:02:13.069370
| 2023-01-11T01:27:32
| 2023-01-11T01:27:32
| 96,006,564
| 169
| 91
|
MIT
| 2022-05-06T05:58:10
| 2017-07-02T06:29:54
|
R
|
UTF-8
|
R
| false
| false
| 36,188
|
r
|
06_既是世间法、自当有分别.R
|
# 06_既是世间法、自当有分别------------------------------------------------
#这里的分别,指的是分门别类
#更具体的讲,是根据特征做出判断、作出分类
#分类与回归,几乎是有监督学习的代名词
#也是机器学习/数据挖掘最核心的内容
#旨在揭示自变量与因变量之间的映射关系
#因变量为类别变量时,称之为分类
#因变量为连续变量时,称之为回归
#本实验以分类为主
#在R语言里,caret包提供了分类与回归的统一框架
#caret包也是R里边使用最广泛的包之一
#小伙伴们可以多加留意
# Data Exploration --------------------------------------------------------
#清空内存
rm(list = ls())
library(tidyverse)
#加载数据
cjb_url <- "data/cjb.csv"
cjb <- read_csv(cjb_url,
locale = locale(encoding = "CP936"))
cjb %<>%
mutate(zcj = rowSums(.[, 4:12])) %>%
mutate_at(vars(xb, bj, wlfk), factor) %>%
filter(zcj != 0) %>%
select(xb:wlfk)
#按照一般的数据分析流程,自然应该是先对数据进行探索性分析
#小伙伴们可以参照之前Get to Know Your Data相关代码,认识这份数据
#对于分类与回归问题,除了认识数据中的其他一些数据探索外
#通常需要观察不同自变量,相对于不同因变量取值时的数据分布
#考察其分类的潜力
#我们可以借助caret::featurePlot()和plotluck::plotluck()来进行观察
library(caret)
featurePlot(
x = cjb %>%
select(yw:sw),
y = cjb[, "wlfk"] %>%
as_vector(),
plot = "density",
scales = list(
x = list(relation = "free"),
y = list(relation = "free")
),
adjust = 1.5,
pch = "|"
)
# library(devtools)
# devtools::install_github("stefan-schroedl/plotluck")
library(plotluck)
plotluck(cjb, wlfk ~ xb)
#绘制不同所有自变量、因变量各自分布
plotluck(cjb, . ~ 1)
#绘制自变量相对于因变量的分组分布
plotluck(cjb, wlfk ~ .,
opts = plotluck.options(verbose = TRUE))
#上述代码出图顺序,并非变量原有顺序,
#而是conditional entropy从小打到排列的结果
plotluck(cjb, wlfk ~ .,
opts = plotluck.options(verbose = TRUE,
multi.entropy.order = FALSE))
plotluck(cjb, wlfk ~ yw + sx)
# k-fold Cross Validation -------------------------------------------------
#在建模之前就说模型评估,
#仿佛为时过早
#实际上,模型评估和模型建立是同等重要的
#道理很简单:
#All models are wrong, but some are useful~
#模型之所以称之为模型,就是因为它只不过是近似、逼近而已
#机器学习,无非是在我们已知的模型集里边,
#找到那个最逼近的而已
#有别于传统统计看p值、看统计的显著性
#机器学习/数据挖掘领域的模型评估
#主要是看实际效果
#看模型在实际数据上的性能指标
#通常的做法是:把数据分为训练集和测试集
#在训练集上训练、或者说让机器学习出一个模型
#然后在测试集上看其具体的性能指标:如正确率
#具体而言,有三种做法:
#1、留出法hold out:
#将数据集一分为二,一般是训练集70%,测试集50%
#2、交叉验证法cross validation:
#将数据分为k折,每一次都用当前折一折作为测试集
#其余的k-1折作为训练集,
#最后通过k折测试集上性能指标的平均值作为模型
#最终的性能指标
#显然,交叉验证比留出法更加稳定
#3、自助法out-of-bag
#主要是应用于组合学习之中,进行有放回抽样时,
#有36%左右的数据抽取不到,这些数据天然作为
#测试集
#从这三种方法的描述可以看出,交叉验证法
#是适用范围最广的方法
#留出法hold-out
#手工版
set.seed(2012)
train_set_idx <- sample(nrow(cjb), nrow(cjb) * 0.7)
str(train_set_idx)
#> int [1:541] 169 576 218 722 575 673 411 700 687 696 ...
length(train_set_idx) / nrow(cjb)
#> [1] 0.6989664
train_set <- cjb[train_set_idx, ]
# test_set <- ?
# #工业级
# train_set_idx <-
# caret::createDataPartition(cjb$wlfk,
# p = 0.7, list = FALSE)
# str(train_set_idx)
# length(train_set_idx) / nrow(cjb)
# train_set <- cjb[train_set_idx, ]
# # test_set <- ?
#k折交叉检验
cv_kfold <- function(data, k = 10, seed = 2012) {
n_row <- nrow(data)#计算数据的行数
n_foldmarkers <- rep(1:k, ceiling(n_row / k))[1:n_row]
set.seed(seed)
n_foldmarkers <- sample(n_foldmarkers) #打乱顺序
kfold <- lapply(1:k, function(i) {
(1:n_row)[n_foldmarkers == i]
})
return(kfold)
}
cv_kfold(cjb)
#> [[1]]
#> [1] 7 14 15 25 35 48 56 59 60 61 65 91 92
#> [14] 102 109 114 128 130 135 141 156 169 178 180 181 185
#> [27] 189 190 191 196 208 217 244 245 247 263 280 282 291
#> [40] 293 301 309 319 324 327 328 329 330 332 356 361 362
#> [53] 376 384 412 413 446 456 485 489 499 500 519 525 531
#> [66] 534 550 559 578 585 586 598 607 619 620 675 685 719
sapply(cv_kfold(cjb), length)
#> [1] 78 78 78 78 77 77 77 77 77 77
#k的具体取值,没有统一的标准,视数据量大小,
#可以取5、10等
#对于少量数据,甚至可以将其推到极致,
#取nrow(cjb)折
#也就是我们常说的留一法
kfolds <- cv_kfold(cjb, nrow(cjb))
#我们这里取k=10
kfolds <- cv_kfold(cjb)
sapply(kfolds, length)
#像k折交叉检验这么经典的方法,在很多扩展包中
#都有实现,比如caret或modelr等
# library(caret)
# kfolds <- createFolds(cjb$wlfk, k = 10)
# sapply(kfolds, length)
# Global performance ------------------------------------------------------
#对于分类模型的评估,首先需要看是否存在类不平衡问题,
#如果存在类不平衡,评估指标的选取,
#不能单纯用正确率、错误率来评估,
#比如:10000人中,有10个人得SARS。现在不采用任何模型,
#只是用众数进行预测,也就是判定所有人都不得SARS,
#此时模型的正确率为(10000 - 10) / 10000 = 99.90%,
#正确率达到99.9%,然而,这种预测没有任何意义
#故此,还需要引入召回率Recall和Precision,
#以及二者的调和平均数F1值等
#从plotluck()的结果可以看出,我们所拿到的数据,并不存在
#类不平衡问题
#plotluck(cjb, .~1)
#由于类是相对均衡的,本实验仅采用分类正确率和错误率
#手工版如下:
global_performance <- NULL
imetrics <- function(method, type, predicted, actual) {
con_table <- table(predicted, actual)
cur_one <- data.frame(
method = method,
#算法模型的名称
type = type,
#取值为train或是test
accuracy = sum(diag(con_table)) / sum(con_table),
error_rate = 1 - sum(diag(con_table)) / sum(con_table)
)
assign("global_performance",
rbind(get("global_performance", envir = .GlobalEnv) ,
cur_one),
envir = .GlobalEnv)
}
#有很多专门的包,已经实现了各种模型评估指标
#在实际的数据分析项目中,就不用去重复造轮子了
#拿来主义,直接用就好
#工业级
# imetrics <- function(method, type, predicted, actual) {
# cur_one <- data.frame(
# method = method,
# type = type,
# accuracy = MLmetrics::Accuracy(y_true = actual, y_pred = predicted),
# # accuracy = 1 - Metrics::ce(actual, predicted),
# # accuracy = 1 - ModelMetrics::ce(actual, predicted),
# error_rate = ModelMetrics::ce(actual, predicted)
# )
# global_performance <<- rbind(global_performance, cur_one)
# }
# #分类回归模型,有数十种
# #各类改进的模型,更是数以百计
# available_models <- modelLookup()
# unique(available_models$model)
# #> [1] "ada" "AdaBag"
# #> [3] "adaboost" "AdaBoost.M1"
# #> [235] "xgbLinear" "xgbTree"
# #> [237] "xyf"
# length(unique(available_models$model))
# #> [1] 237
# #想穷尽所有的算法模型,几乎是不可能的
# #本实验仅涉及部分经典算法模型
# #包括决策树、近邻法、朴素贝叶斯、
# #人工神经网络、支持向量机和随机森林
# kknn --------------------------------------------------------------------
load('data/cjb.rda')
cjb <- cjb %>%
select(4:13) %>%
mutate(wlfk = factor(wlfk))
train_set_idx <- sample(nrow(cjb), 0.7 * nrow(cjb))
test_set_idx <- (1:nrow(cjb))[-train_set_idx]
library(kknn)
set.seed(2012)
imodel <- kknn(wlfk ~ .,
train = cjb[train_set_idx, ],
test = cjb[train_set_idx, ])
predicted_train <- imodel$fit
#ce: classification error
Metrics::ce(cjb$wlfk[train_set_idx], predicted_train)
#> [1] 0.1090573
#作为惰性学习法,训练和测试同时进行
imodel <- kknn(wlfk ~ .,
train = cjb[train_set_idx, ],
test = cjb[-train_set_idx, ])
predicted_test <- imodel$fit
Metrics::ce(cjb$wlfk[-train_set_idx], predicted_test)
#> [1] 0.1888412
#选取最优的k和核
train_kk <- train.kknn(
wlfk ~ .,
data = cjb,
kmax = 100,
kernel = c(
"rectangular",
"epanechnikov",
"cos",
"inv",
"gaussian",
"optimal"
)
)
#查看具体结果
train_kk
#> Call:
#> train.kknn(formula = wlfk ~ ., data = cjb, kmax = 100,
#> kernel = c("rectangular", "epanechnikov",
#> "cos", "inv", "gaussian", "optimal"))
#>
#> Type of response variable: nominal
#> Minimal misclassification: 0.2105943
#> Best kernel: gaussian
#> Best k: 49
#不同的k和核所对应的分类错误率
train_kk$MISCLASS
# rectangular epanechnikov cos inv gaussian optimal
# 1 0.2919897 0.2919897 0.2919897 0.2919897 0.2919897 0.2919897
# 2 0.2984496 0.2919897 0.2919897 0.2919897 0.2919897 0.2919897
# 3 0.2661499 0.2739018 0.2751938 0.2661499 0.2661499 0.2919897
# 4 0.2713178 0.2661499 0.2648579 0.2519380 0.2532300 0.2919897
# 5 0.2583979 0.2571059 0.2596899 0.2571059 0.2558140 0.2558140
# 6 0.2609819 0.2558140 0.2532300 0.2441860 0.2454780 0.2532300
#显然,上述矩阵中,序号就是相应的k
#最佳的k值
best_k <- train_kk$best.parameters$k
best_k
#> [1] 49
best_kernel <- train_kk$best.parameters$kernel
best_kernel
#> [1] "gaussian"
#最小的误分率
min_ce <- train_kk$MISCLASS[best_k,
train_kk$best.parameters$kernel]
#下边这种方法更简单
min(train_kk$MISCLASS)
#提取不同k和核相应的分类错误率
ce_kk <- train_kk$MISCLASS
#View(ce_kk)
#最小错误率
min_ce <- min(train_kk$MISCLASS)
str(ce_kk)
#通过ggplot2进行绘制
ce_kk %>%
as.data.frame() %>%
mutate(k = row_number()) %>%
gather(key = "kernel", value = "ce", -k) %>%
ggplot(aes(x = k, y = ce, colour = kernel)) +
geom_vline(aes(xintercept = best_k), linetype = "dashed") +
geom_hline(aes(yintercept = min_ce), linetype = "dashed") +
geom_line() +
geom_point(aes(shape = kernel)) +
theme(legend.position = c(0.9, 0.8))
#进行k-折交叉检验k-fold cross validation
library(kknn)
sp <- Sys.time() #记录开始时间
cat("\n[Start at:", as.character(sp))
for (i in 1:length(kfolds)) {
curr_fold <- kfolds[[i]] #当前这一折
train_set <- cjb[-curr_fold,] #训练集
test_set <- cjb[curr_fold,] #测试集
predicted_train <- kknn(
wlfk ~ .,
train = train_set,
test = train_set,
k = best_k,
kernel = best_kernel
)$fit
imetrics("kknn", "Train", predicted_train, train_set$wlfk)
predicted_test <- kknn(
wlfk ~ .,
train = train_set,
test = test_set,
k = best_k,
kernel = best_kernel
)$fit
imetrics("kknn", "Test", predicted_test, test_set$wlfk)
}
ep <- Sys.time()
cat("\tFinised at:", as.character(ep), "]\n")
cat("[Time Ellapsed:\t",
difftime(ep, sp, units = "secs"),
" seconds]\n")
global_performance
#> method type accuracy error_rate
#> 1 kknn Train 0.8333333 0.1666667
#> 2 kknn Test 0.8076923 0.1923077
#> 3 kknn Train 0.8405172 0.1594828
#> 4 kknn Test 0.8076923 0.1923077
#> 5 kknn Train 0.8333333 0.1666667
#> 6 kknn Test 0.8461538 0.1538462
#> 7 kknn Train 0.8405172 0.1594828
#> 8 kknn Test 0.7564103 0.2435897
#> 9 kknn Train 0.8350072 0.1649928
#> 10 kknn Test 0.7922078 0.2077922
#> 11 kknn Train 0.8278336 0.1721664
#> 12 kknn Test 0.7922078 0.2077922
#> 13 kknn Train 0.8378766 0.1621234
#> 14 kknn Test 0.8051948 0.1948052
#> 15 kknn Train 0.8350072 0.1649928
#> 16 kknn Test 0.7792208 0.2207792
#> 17 kknn Train 0.8307030 0.1692970
#> 18 kknn Test 0.6623377 0.3376623
#> 19 kknn Train 0.8278336 0.1721664
#> 20 kknn Test 0.7792208 0.2207792
#考虑到每种方法都要采用交叉检验的方法,
#根据事不过三法则,反反复复拷贝、更改以上代码是不合适的
#为此,将上述代码改写为相应的函数
kfold_cross_validation <-
function(formula, data, kfolds, learner, ...) {
sp <- Sys.time() #记录开始时间
cat("\n[Start at:", as.character(sp))
lapply(kfolds, function(curr_fold) {
train_set <- data[-curr_fold,] #训练集
test_set <- data[curr_fold,] #测试集
predictions <- do.call(learner, args = c(
list(
formula = formula,
train = train_set,
test = test_set
),
list(...)
))
imetrics(learner,
"Train",
predictions$predicted_train,
train_set$wlfk)
imetrics(learner,
"Test",
predictions$predicted_test,
test_set$wlfk)
})
ep <- Sys.time()
cat("\tFinised at:", as.character(ep), "]\n")
cat("[Time Ellapsed:\t",
difftime(ep, sp, units = "secs"),
" seconds]\n")
}
learn.kknn <- function(formula, train, test, ...) {
predicted_train <-
kknn(formula, train = train, test = train, ...)$fit
predicted_test <-
kknn(formula, train = train, test = test, ...)$fit
return(list(predicted_train = predicted_train,
predicted_test = predicted_test))
}
global_performance <- NULL
kfold_cross_validation(
formula = wlfk ~ .,
data = cjb,
kfolds = kfolds,
learner = "learn.kknn",
k = best_k,
kernel = best_kernel
)
# CART --------------------------------------------------------------------
#决策树的生长
#rpart.plot包会自动加载rpart包
library(rpart.plot)
imodel <- rpart(wlfk ~ .,
data = cjb[train_set_idx,])
imodel
# n= 541
#
# node), split, n, loss, yval, (yprob)
# * denotes terminal node
#
# 1) root 541 258 文科 (0.47689464 0.52310536)
# 2) wl>=85.5 230 70 理科 (0.69565217 0.30434783)
# 4) sx>=87.5 185 41 理科 (0.77837838 0.22162162)
# 8) yw< 91.5 132 19 理科 (0.85606061 0.14393939) *
# 9) yw>=91.5 53 22 理科 (0.58490566 0.41509434)
# 18) wl>=88.5 38 10 理科 (0.73684211 0.26315789) *
# 19) wl< 88.5 15 3 文科 (0.20000000 0.80000000) *
# 5) sx< 87.5 45 16 文科 (0.35555556 0.64444444)
# 10) xb=男 23 10 理科 (0.56521739 0.43478261) *
# 11) xb=女 22 3 文科 (0.13636364 0.86363636) *
# 3) wl< 85.5 311 98 文科 (0.31511254 0.68488746)
# 6) xb=男 127 61 文科 (0.48031496 0.51968504)
# 12) yw< 88.5 98 43 理科 (0.56122449 0.43877551)
# 24) hx>=93 21 4 理科 (0.80952381 0.19047619) *
# 25) hx< 93 77 38 文科 (0.49350649 0.50649351)
# 50) ls< 86.5 41 16 理科 (0.60975610 0.39024390)
# 100) yw>=77.5 34 10 理科 (0.70588235 0.29411765) *
# 101) yw< 77.5 7 1 文科 (0.14285714 0.85714286) *
# 51) ls>=86.5 36 13 文科 (0.36111111 0.63888889) *
# 13) yw>=88.5 29 6 文科 (0.20689655 0.79310345) *
# 7) xb=女 184 37 文科 (0.20108696 0.79891304)
# 14) sw>=81.5 110 32 文科 (0.29090909 0.70909091)
# 28) ls< 91.5 62 27 文科 (0.43548387 0.56451613)
# 56) sx>=91.5 14 2 理科 (0.85714286 0.14285714) *
# 57) sx< 91.5 48 15 文科 (0.31250000 0.68750000) *
# 29) ls>=91.5 48 5 文科 (0.10416667 0.89583333) *
# 15) sw< 81.5 74 5 文科 (0.06756757 0.93243243) *
predicted_train <-
predict(imodel,
newdata = cjb[train_set_idx,],
type = "class")
Metrics::ce(cjb$wlfk[train_set_idx],
predicted_train)
#> [1] 0.1959335
#当然,我们更关注的是测试误差
predicted_test <-
predict(imodel,
newdata = cjb[-train_set_idx, ],
type = "class")
Metrics::ce(cjb$wlfk[-train_set_idx],
predicted_test)
#> [1] 0.2575107
#决策树剪枝
printcp(imodel, digits = 2)
#> Classification tree:
#> rpart(formula = wlfk ~ ., data = cjb[train_set_idx, ])
#>
#> Variables actually used in tree construction:
#> [1] hx ls sw sx wl wy xb
#>
#> Root node error: 266/542 = 0.49
#>
#> n= 542
#>
#> CP nsplit rel error xerror xstd
#> 1 0.349 0 1.00 1.00 0.045
#> 2 0.050 1 0.65 0.69 0.042
#> 3 0.023 2 0.60 0.70 0.042
#> 4 0.019 4 0.55 0.67 0.042
#> 5 0.017 7 0.50 0.67 0.042
#> 6 0.013 9 0.46 0.67 0.042
#> 7 0.012 12 0.42 0.66 0.042
#> 8 0.010 13 0.41 0.65 0.042
plotcp(imodel)
imodel$cptable
#> CP nsplit rel error xerror xstd
#> 1 0.34883721 0 1.0000000 1.0000000 0.04502822
#> 2 0.05038760 1 0.6511628 0.6627907 0.04191607
#> 3 0.02325581 2 0.6007752 0.6589147 0.04184977
#> 4 0.01937984 4 0.5542636 0.6666667 0.04198161
#> 5 0.01744186 7 0.4961240 0.6627907 0.04191607
#> 6 0.01291990 9 0.4612403 0.6434109 0.04157683
#> 7 0.01162791 12 0.4224806 0.6356589 0.04143566
#> 8 0.01000000 13 0.4108527 0.6356589 0.04143566
imodel$cptable
#剪枝的一般方法
opt <- which.min(imodel$cptable[, "xerror"])
cp <- imodel$cptable[opt, "CP"]
#> [1] 0.01
imodel_pruned <- prune(imodel, cp = cp)
print(imodel_pruned)
#剪枝前后效果对比
predicted_train <- predict(imodel_pruned,
newdata = cjb[train_set_idx,],
type = "class")
Metrics::ce(cjb$wlfk[train_set_idx],
predicted_train)
#> [1] 0.1959335
predicted_test <- predict(imodel_pruned,
newdata = cjb[-train_set_idx,],
type = "class")
Metrics::ce(cjb$wlfk[-train_set_idx],
predicted_test)
#> 0.2575107
#绘制决策树的基本方法
plot(imodel)
text(imodel)
#上边的效果小伙伴们肯定是不满意的
rpart.plot(
imodel_pruned,
type = 4,
fallen = F,
branch = 0.5,
round = 0,
leaf.round = 2,
clip.right.labs = T,
cex = 0.85,
under.cex = 0.75,
box.palette = "GnYlRd",
branch.col = "gray",
branch.lwd = 2,
extra = 108,
#extra参数的含义需留意
under = T,
split.cex = 1
)
#除了可视化之外,我们还希望把这个树导成规则
library(rattle)
rules <- asRules(imodel_pruned, compact = TRUE)
#> R 7 [22%,0.90] sx< 85.5 xb=女
#> R 11 [11%,0.85] sx>=85.5 wl< 86.5 ls>=92.5
#> R 51 [ 4%,0.79] sx< 85.5 xb=男 hx>=83 wy>=81.5 sx< 76.5
#> R 13 [ 5%,0.75] sx< 85.5 xb=男 hx< 83
#> R101 [ 1%,0.75] sx< 85.5 xb=男 hx>=83 wy>=81.5 sx>=76.5 sw>=89.5
#> R 21 [ 3%,0.75] sx>=85.5 wl< 86.5 ls< 92.5 sw< 80.5
#> R 19 [ 3%,0.75] sx>=85.5 wl>=86.5 ls>=95.5 sw< 92.5
#> R 24 [ 5%,0.28] sx< 85.5 xb=男 hx>=83 wy< 81.5
#> R 20 [13%,0.24] sx>=85.5 wl< 86.5 ls< 92.5 sw>=80.5
#> R 18 [ 7%,0.22] sx>=85.5 wl>=86.5 ls>=95.5 sw>=92.5
#> R100 [ 3%,0.19] sx< 85.5 xb=男 hx>=83 wy>=81.5 sx>=76.5 sw< 89.5
#> R 8 [23%,0.13] sx>=85.5 wl>=86.5 ls< 95.5
#进行k-折交叉检验k-fold cross validation
learn.rpart <- function(formula, train, test, ...) {
imodel_kfold <- rpart(formula, train) #模型训练
opt <- which.min(imodel_kfold$cptable[, "xerror"])
cp <- imodel_kfold$cptable[opt, "CP"]
imodel_kfold <- prune(imodel_kfold, cp = cp)
predicted_train <- predict(imodel_kfold, train, type = "class")
predicted_test <- predict(imodel_kfold, test, type = "class")
return(list(predicted_train = predicted_train,
predicted_test = predicted_test))
}
kfold_cross_validation(
formula = wlfk ~ .,
data = cjb,
kfolds = kfolds,
learner = "learn.rpart"
)
# RandomForest ------------------------------------------------------------
library(randomForest)
set.seed(2012)
imodel <- randomForest(wlfk ~ .,
ntree = 25,
data = cjb[train_set_idx, ])
predicted_train <- predict(imodel,
newdata = cjb[train_set_idx,],
type = "response")
Metrics::ce(cjb$wlfk[train_set_idx],
predicted_train)
#>[1] 0.001848429
predicted_test <- predict(imodel,
newdata = cjb[-train_set_idx,],
type = "response")
Metrics::ce(cjb$wlfk[-train_set_idx],
predicted_test)
#> [1] 0.1845494
rf_ces <- sapply(1:500, function(x) {
set.seed(2012)
imodel <- randomForest(wlfk ~ .,
ntree = x,
data = cjb[train_set_idx, ])
predicted_train <- predict(imodel,
newdata = cjb[train_set_idx,],
type = "response")
Metrics::ce(cjb$wlfk[train_set_idx],
predicted_train)
#>[1] 0
predicted_test <- predict(imodel,
newdata = cjb[-train_set_idx,],
type = "response")
Metrics::ce(cjb$wlfk[-train_set_idx],
predicted_test)
})
which.min(rf_ces)
plot(rf_ces, type = "o")
#基于OOB的误分率
imodel$confusion
#> 理科 文科 class.error
#> 理科 195 71 0.2669173
#> 文科 54 222 0.1956522
#进行k-折交叉检验k-fold cross validation
learn.randomForest <- function(formula, train, test, ...) {
imodel_kfold <- randomForest(formula, train, ...)
predicted_train <-
predict(imodel_kfold, train, type = "response")
predicted_test <- predict(imodel_kfold, test, type = "response")
return(list(predicted_train = predicted_train,
predicted_test = predicted_test))
}
kfold_cross_validation(
formula = wlfk ~ .,
data = cjb,
kfolds = kfolds,
learner = "learn.randomForest",
ntree = which.min(rf_ces)
)
# NaiveBayes --------------------------------------------------------------
library(e1071)
imodel <- naiveBayes(wlfk ~ .,
data = cjb[train_set_idx, ])
predicted_train <- predict(imodel,
newdata = cjb[train_set_idx,],
type = "class")
Metrics::ce(cjb$wlfk[train_set_idx], predicted_train)
#> [1] 0.2920518
predicted_test <- predict(imodel,
newdata = cjb[-train_set_idx,],
type = "class")
Metrics::ce(cjb$wlfk[-train_set_idx], predicted_test)
#> [1] 0.27897
#进行k-折交叉检验k-fold cross validation
learn.naiveBayes <- function(formula, train, test, ...) {
imodel_kfold <- naiveBayes(formula, train)
predicted_train <- predict(imodel_kfold, train, type = "class")
predicted_test <- predict(imodel_kfold, test, type = "class")
return(list(predicted_train = predicted_train,
predicted_test = predicted_test))
}
kfold_cross_validation(
formula = wlfk ~ .,
data = cjb,
kfolds = kfolds,
learner = "learn.naiveBayes"
)
# Logistic Regression -----------------------------------------------------
library(ggplot2)
# 以下代码仅为复现课件中的动画,感兴趣的小伙伴可以了解一下
# library(animation)
# saveGIF(
# expr = {
# mov_frame <- 5 * (1:30)
# for (i in mov_frame) {
# x <- seq(-i, i, len = 1000)
# y <- 1 / (1 + exp(-x))
# p <- ggplot(data.frame(x, y), aes(x = x, y = y)) +
# geom_line()
# if (i == head(mov_frame, 1) ||
# i == tail(mov_frame, 1)) {
# #开始和结束时多停留一会儿
# lapply(1:5, function(x)
# plot(p))
# }
# plot(p)
# }
# },
# movie.name = "animation.gif",
# convert = "gm convert",
# interval = 0.2
# )
# dev.off()
#
imodel <- glm(wlfk ~ .,
data = cjb[train_set_idx,],
family = binomial(link = "logit"))
predicted_logit <- predict(imodel,
newdata = cjb[train_set_idx,],
type = "response")
predicted_train <-
rep(levels(cjb$wlfk)[2], length(train_set_idx))
predicted_train[predicted_logit < 0.5] <- levels(cjb$wlfk)[1]
Metrics::ce(cjb$wlfk[train_set_idx], predicted_train)
#> [1] 0.2181146
predicted_logit <- predict(imodel,
newdata = cjb[-train_set_idx, ],
type = "response")
predicted_test <-
rep(levels(cjb$wlfk)[2], nrow(cjb[-train_set_idx,]))
predicted_test[predicted_logit < 0.5] <-
levels(cjb$wlfk)[1]
Metrics::ce(cjb$wlfk[-train_set_idx], predicted_test)
#> [1] 0.1888412
#找到最好的分隔阈值
best_threshold <- NA
min_err <- Inf
cur_threshold <- 0.1
for (cur_threshold in seq(0.1, 0.9, by = 0.001)) {
predicted_test <-
rep(levels(cjb$wlfk)[2], nrow(cjb[-train_set_idx,]))
predicted_test[predicted_logit < cur_threshold] <-
levels(cjb$wlfk)[1]
cur_err <- Metrics::ce(cjb$wlfk[-train_set_idx],
predicted_test)
if (cur_err < min_err) {
best_threshold <- cur_threshold
min_err <- cur_err
}
}
best_threshold
#> [1] 0.592
#当然,也可以用下边这种写法
threshold_range <- seq(0.1, 0.9, by = 0.001)
ce_set <- sapply(threshold_range, function(cur_threshold) {
predicted_test <-
rep(levels(cjb$wlfk)[2], nrow(cjb[-train_set_idx,]))
predicted_test[predicted_logit < cur_threshold] <-
levels(cjb$wlfk)[1]
cur_err <- Metrics::ce(cjb$wlfk[-train_set_idx],
predicted_test)
})
#最佳阈值
threshold_range[which.min(ce_set)]
#相应的分类错误率
min(ce_set)
#进行k-折交叉检验k-fold cross validation
learn.LogisticRegression <- function(formula, train, test, ...) {
dot_args <- list(...)
imodel_kfold <-
glm(formula, train, family = binomial(link = "logit"))
predicted_logit <-
predict(imodel_kfold, train, type = "response")
predicted_train <- rep(levels(cjb$wlfk)[2], nrow(train))
predicted_train[predicted_logit < dot_args[["best_threshold"]]] <-
levels(cjb$wlfk)[1]
predicted_logit <-
predict(imodel_kfold, test, type = "response")
predicted_test <- rep(levels(cjb$wlfk)[2], nrow(test))
predicted_test[predicted_logit < dot_args[["best_threshold"]]] <-
levels(cjb$wlfk)[1]
return(list(predicted_train = predicted_train,
predicted_test = predicted_test))
}
kfold_cross_validation(
formula = wlfk ~ .,
data = cjb,
kfolds = kfolds,
learner = "learn.LogisticRegression",
best_threshold = threshold_range[which.min(ce_set)]
)
# Artificial Neural Network -----------------------------------------------
library(nnet)
set.seed(2012)
imodel <- nnet(wlfk ~ .,
data = cjb[train_set_idx, ],
size = 7)
names(imodel)
#> [1] "n" "nunits" "nconn"
#> [4] "conn" "nsunits" "decay"
#> [7] "entropy" "softmax" "censored"
#> [10] "value" "wts" "convergence"
#> [13] "fitted.values" "residuals" "lev"
#> [16] "call" "terms" "coefnames"
#> [19] "contrasts" "xlevels"
imodel$n
#> [1] 10 7 1
imodel$wts
#> [1] -0.394367962 0.341672486 -0.305656476
#> [4] 0.609244299 0.344983392 0.524696717
#> [7] 0.049098761 0.577261671 0.553892391
#> [79] 0.851107738 0.275935098 -0.237562349
#> [82] 0.109386068 0.637609693 -2.774100396
#> [85] 0.019783268
imodel$fitted.values
# [,1]
# 1 0.8048857
# 2 0.2047307
# 3 0.8048857
#
# 540 0.8048857
# 541 0.2047307
predicted_train <- predict(imodel,
newdata = cjb[train_set_idx,],
type = "class")
Metrics::ce(cjb$wlfk[train_set_idx], predicted_train)
#> [1] 0.1996303
predicted_test <- predict(imodel,
newdata = cjb[-train_set_idx,],
type = "class")
Metrics::ce(cjb$wlfk[-train_set_idx], predicted_test)
#> [1] 0.1759657
#神经网络参数的设置相对比较复杂
#一般来讲,没有绝对的套路可循
#我们当然可以写一些循环,来进行参数的选择
#不过,类似于e1071::tune.nnet()已经替我们作了很多工作
#下面,采用的是caret包中的方法
#通过caret包中的grid搜索来进行参数选择
tune_results <- e1071::tune.nnet(
wlfk ~ .,
data = cjb,
decay = c(0.01, 0.03, 0.1, 0.3, 0.6, 0.9),
size = 1:7
)
library(caret)
set.seed(2012)
nn_grid <- expand.grid(size = c(1, 3, 7, 9),
decay = c(0.01, 0.03, 0.1, 0.3, 0.6, 0.9))
# nn_grid <- expand.grid(.decay = c(0.5, 0.1, 1e-2, 1e-3, 1e-4, 1e-5, 1e-6, 1e-7),
# .size = c(3, 5, 10, 20))
imodel <- train(
wlfk ~ .,
data = cjb,
method = "nnet",
maxit = 10000,
tuneGrid = nn_grid
)
imodel$bestTune
#> size decay
#> 9 1 0.6
#查看训练结果
plot(imodel)
predicted_train <- predict(imodel,
newdata = cjb[train_set_idx,],
type = "raw")
Metrics::ce(cjb$wlfk[train_set_idx],
predicted_train)
#> [1] 0.1697417
predicted_test <- predict(imodel,
newdata = cjb[-train_set_idx,],
type = "raw")
Metrics::ce(cjb$wlfk[-train_set_idx],
predicted_test)
#> [1] 0.1896552
#绘制神经网络
library(NeuralNetTools)
imodel2 <- nnet(
wlfk ~ .,
data = train_set,
decay = imodel$bestTune$decay,
size = imodel$bestTune$size,
maxit = 2000
)
imodel2$wts
str(imodel2)
library(NeuralNetTools)
plotnet(
imodel2,
rel_rsc = c(1.8, 3),
circle_cex = 3,
cex_val = 0.75,
bord_col = "lightblue",
max_sp = TRUE
)
#进行k-折交叉检验k-fold cross validation
learn.nnet <- function(formula, train, test, ...) {
imodel_kfold <- nnet(formula, train, ...)
predicted_train <- predict(imodel_kfold, train, type = "class")
predicted_test <- predict(imodel_kfold, test, type = "class")
return(list(predicted_train = predicted_train,
predicted_test = predicted_test))
}
kfold_cross_validation(
formula = wlfk ~ .,
data = cjb,
kfolds = kfolds,
learner = "learn.nnet",
decay = imodel$bestTune$decay,
size = imodel$bestTune$size,
maxit = 2000
)
# Support Vector Machine --------------------------------------------------
library(kernlab)
set.seed(2012)
imodel <- ksvm(wlfk ~ .,
data = cjb[train_set_idx, ])
predicted_train <- predict(imodel,
newdata = cjb[train_set_idx,],
type = "response")
Metrics::ce(cjb$wlfk[train_set_idx], predicted_train)
#> [1] 0.1497227
predicted_test <- predict(imodel,
newdata = cjb[-train_set_idx,],
type = "response")
Metrics::ce(cjb$wlfk[-train_set_idx], predicted_test)
#> [1] 0.1759657
imodel
#当然也可以通过caret来进行调参
library(caret)
svm_grid <- expand.grid(sigma = 2 ^ (-10:4),
C = -5:20)
set.seed(2012)
imodel <- train(
wlfk ~ .,
data = cjb[train_set_idx, ],
method = "svmRadial",
preProc = c("center", "scale"),
tuneGrid = svm_grid
)
imodel$bestTune
#> sigma C
#> 2 0.25 1
#同样也可以对train的结果进行绘制
plot(imodel)
#进行k-折交叉检验k-fold cross validation
learn.svm <- function(formula, train, test, ...) {
imodel_kfold <- ksvm(formula, train, ...)
predicted_train <-
predict(imodel_kfold, train, type = "response")
predicted_test <- predict(imodel_kfold, test, type = "response")
return(list(predicted_train = predicted_train,
predicted_test = predicted_test))
}
kfold_cross_validation(
formula = wlfk ~ .,
data = cjb,
kfolds = kfolds,
learner = "learn.svm",
C = imodel$bestTune$C,
gamma = imodel$bestTune$sigma
)
# Variable Importance -----------------------------------------------------
#完成了模型训练、模型评估,故事基本告一段落
#再回顾一下本讲开始所讲的featurePlot
#进行完模型训练之后,咱们再通过变量重要性印证一下
#变量重要性,有很多评价方法
#既有 Model Specific Metrics,也有Model Independent Metrics
#如果是采用caret框架进行训练的话,多种指标可选
#具体请参阅
#http://topepo.github.io/caret/variable-importance.html
library(randomForest)
imodel <- randomForest(wlfk ~ ., data = cjb)
#变量重要性的分析
randomForest::importance(imodel) %>%
as.data.frame() %>%
rownames_to_column(var = "variables") %>%
arrange(desc(MeanDecreaseGini)) %>%
mutate(variables = factor(variables,
levels = variables)) %>%
ggplot(aes(x = variables,
y = MeanDecreaseGini,
fill = variables)) +
geom_bar(stat = "identity", width = 0.5) +
geom_text(aes(
y = MeanDecreaseGini * 1.02,
label = format(MeanDecreaseGini, digits = 4)
))
# Model Comparison --------------------------------------------------------
#模型进行评估
global_performance %>%
group_by(method, type) %>%
summarise(mean_error_rate = mean(error_rate)) %>%
arrange(type, mean_error_rate) %>%
ggplot(aes(
x = fct_inorder(method),
y = mean_error_rate,
fill = type
)) +
geom_bar(stat = "identity", position = "dodge") +
geom_text(aes(label = format(mean_error_rate, digits = 3)),
position = position_dodge(width = 1)) +
scale_fill_manual(values = c("orange", "darkgrey")) +
theme(axis.text.x = element_text(angle = 60, hjust = 1))
#本实验中,为了减少小伙伴们熟悉问题背景本身的成本
#再次使用了学生成绩这份数据
#受数据本身的限制,也让我们错过了很多的精彩:
#比如:
#这份数据太干净,没有缺失值,也就不要通过mice::md.pattern()
#之类的函数来观察缺失模式,或是通过近邻法等方法来填补缺失值
#又如:我们面对的是类相对均衡的问题,文理科学生数大体相当
#而实际问题中,也会有很多类不平衡的问题,这个时候可能专门
#需要对数据、算法进行处理,评估指标也不能用简单的正确率来衡量
#再比如:我们的数据量相对较少,没有涉及到复杂数据处理
#分类与回归(实际上本讲只是涉及到分类,不过二者本质一致)到此结束
#代码中,算法原理等阐述较少,请小伙伴们参照PPT讲义,
#或是相应的机器学习/数据挖掘教材
#当然,几乎所有的包、函数的帮助文档中,都列举了相应的参考文献,
#小伙伴们可自行参考
#分类与回归算法,其体量应该是数以百计的,
#caret包中列举了百余种算法
#本讲中,只是列举了比较经典的集中。有很多算法并未考虑纳入,
#比如:
#MASS::lda()
#adabag::bagging()
#adabag::boosting()
#caretEnsemble::caretStack
#xgboost::xgboost
#即便是演示过的算法,参数调优过程也显得比较粗糙
#更多的精彩,由小伙伴们自行探索吧
#毕竟,这份代码只是一个引导性的参考,
#并不是可以简单套用的标准模板
# The End ^-^ -------------------------------------------------------------
|
4c3a2b94fbfa82549006a5f9beace81445f6b877
|
f645b968f7290bba933e7db487366135cde14b62
|
/R/combine_smartstorevisit.R
|
f6c2ea57ede9a3e498e24b175d7ffb37a6103a53
|
[] |
no_license
|
drtagkim/ProjTkJy
|
c5ea762ea6a1acaf9c42fd002d801565ba5cae28
|
8574d24cfe264fdc3245a0cba37316f817c92618
|
refs/heads/main
| 2023-04-15T18:48:27.914293
| 2021-04-17T06:31:45
| 2021-04-17T06:31:45
| 355,756,036
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 456
|
r
|
combine_smartstorevisit.R
|
#combine ssvist
f=list.files(pattern='^ssvisit_')
dsetssvisit=f %>% map(function(x){readRDS(x)})
length(dsetssvisit)
ss_visit <- bind_rows(dsetssvisit)
dim(ss_visit)
names(smart_store_data)
smart_store_data <- smart_store_data %>% inner_join(ss_visit)
smart_store_data <- smart_store_data %>% inner_join(ss_grade_zzim)
dim(smart_store_data)
saveRDS(smart_store_data,file='smart_store_data.rds')
View(smart_store_data)
clipr::write_clip(smart_store_data)
|
a7bf420af934e3d3c673390e688a7958d4652fa3
|
e154cdb4788ff04f97af5c2b12504cdb879b8199
|
/kaggle/PortoSeguro/Numerical_Vars_FE.R
|
5c0c333524f2a8cb00439a9bbd9bef1b94395c46
|
[] |
no_license
|
melwinjose1991/LearningMachineLearning
|
970a3bcf21bb369c0afe335f34e4ec7a3f5830e4
|
d09a0fbea44cb8c53763446f32119b22bc8970c6
|
refs/heads/master
| 2021-01-17T05:02:41.193335
| 2018-05-17T01:35:23
| 2018-05-17T01:35:23
| 83,053,730
| 1
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,668
|
r
|
Numerical_Vars_FE.R
|
library(data.table)
###### Removing tables() ######
rm(list=tables()$NAME)
gc()
### Reading Data ###
fe_train = fread("data/train.csv")
fe_test = fread("data/test.csv")
cols = colnames(fe_train)
setkey(fe_train, id)
setkey(fe_test, id)
### Numerical Attributes ###
numeric_cols = cols[which(sapply(fe_train, class) == "numeric")]
numeric_cols
### Counting -1s ###
fe_train[, num_neg_one := sum(.SD==-1), by=id, .SDcols=numeric_cols]
fe_test[, num_neg_one := sum(.SD==-1), by=id, .SDcols=numeric_cols]
### Replacing -1 with mean ###
cols_with_neg1 = sapply(numeric_cols, function(col){sum(fe_train[,col,with=FALSE]==-1)>1})
cols_with_neg1 = numeric_cols[cols_with_neg1]
cols_with_neg1
for(col in cols_with_neg1){
mean_val = fe_train[fe_train[,get(col)]!=-1, mean(get(col))]
fe_train[fe_train[,get(col)]==-1, eval(col) := mean_val]
summary(fe_train[,col,with=FALSE])
}
cols_with_neg1 = sapply(numeric_cols, function(col){sum(fe_test[,col,with=FALSE]==-1)>1})
cols_with_neg1 = numeric_cols[cols_with_neg1]
cols_with_neg1
for(col in cols_with_neg1){
mean_val = fe_test[fe_test[,get(col)]!=-1, mean(get(col))]
fe_test[fe_test[,get(col)]==-1, eval(col) := mean_val]
summary(fe_test[,col,with=FALSE])
}
### Automated Feature Engineering after replacement ###
## List of functions
fXAddY = function(x,y){
x+y
}
fXMinusY = function(x,y){
x-y
}
fYMinusX = function(x,y){
y-x
}
fXMultY = function(x,y){
x*y
}
#funcs = list(XaddY=fXAddY, XminusY=fXMinusY, YminusX=fYMinusX, XmultiY=fXMinusY)
funcs = list(XmultiY=fXMinusY)
auto_engineered_without_missing = vector('character')
dim(fe_train)
for(i in 1:(length(numeric_cols)-1) ){
for(j in (i+1):length(numeric_cols)){
left_col = numeric_cols[i]
right_col = numeric_cols[j]
for(operation in names(funcs)){
new_col = paste0(left_col, "_", operation, "_", right_col)
auto_engineered_without_missing = c(auto_engineered_without_missing, new_col)
fe_train[, (new_col) := mapply(funcs[[operation]],
fe_train[,left_col,with=FALSE], fe_train[,right_col, with=FALSE]) ]
fe_test[, (new_col) := mapply(funcs[[operation]],
fe_test[,left_col,with=FALSE], fe_test[,right_col, with=FALSE]) ]
}
}
}
dim(fe_train)
### Engineered Features ###
cols_to_write = c("id", numeric_cols, "num_neg_one",
auto_engineered_without_missing)
###### PCA ######
cols_for_pca = c(numeric_cols, auto_engineered_without_missing)
combined = rbind(fe_train[,cols_for_pca,with=FALSE],
fe_test[,cols_for_pca,with=FALSE])
train_n_rows = dim(fe_train)[1]
train_ids = fe_train$id
train_target = fe_train$target
rm(fe_train)
test_n_rows = dim(fe_test)[1]
test_ids = fe_test$id
rm(fe_test)
gc()
pca = prcomp(combined, scale.=TRUE)
new_dim = as.matrix(combined) %*% as.matrix(pca$rotation[,1:10])
fe_train = as.data.frame(new_dim[1:train_n_rows,])
fe_train[,"id"] = train_ids
fe_train[,"target"] = train_target
fe_test = as.data.frame(new_dim[(train_n_rows+1):dim(new_dim)[1],])
fe_test[,"id"] = test_ids
fwrite(fe_train, "data/afe_train_pca_1.csv",
quote=FALSE, sep=",", row.names=FALSE)
fwrite(fe_test, "data/afe_test_pca_1.csv",
quote=FALSE, sep=",", row.names=FALSE)
### Writing ###
tables()
fwrite(fe_train[,c(cols_to_write,"target"),with=FALSE],
"data/afe_train_1.csv", quote=FALSE, sep=",", row.names=FALSE)
fwrite(fe_test[,cols_to_write, with=FALSE],
"data/afe_test_1.csv", quote=FALSE, sep=",", row.names=FALSE)
###### Removing tables() ######
rm(list=tables()$NAME)
gc()
|
7b8f8da6545187ac97c0a6f3cba0b7be50a04b49
|
ef58ea5f8792a292262f7561c18d94fd37b6d2bf
|
/tests/testthat/test_grapes-land-grapes.R
|
e3bc66759dfd12b854464c49ffc81382ba80f0a8
|
[] |
no_license
|
kevinwkc/replyr
|
f1e32de4ce2b0ed753195deb91d13d35cb8ed0ea
|
ac898b408b8b8f42e499ebc1b32a390ae88c69a2
|
refs/heads/master
| 2021-01-22T08:52:53.421412
| 2017-05-27T23:54:28
| 2017-05-27T23:54:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 198
|
r
|
test_grapes-land-grapes.R
|
library('replyr')
context("test land")
test_that("test_grapes-land-grapes.R", {
library("dplyr")
7 %>% sin() %->% z1
7 %>% sin() %->_% 'z2'
varname <- 'z3'
7 %>% sin() %->_% varname
})
|
6588af740be6e7828293ce023fdec215eacf6857
|
49645c4e57889635638399e88cb490a49b79607d
|
/R_scripts/Columbia_single_cell_bootcamp/tutorial_code.R
|
955403c286adfb3a82335aceb9c925d15b48c18e
|
[] |
no_license
|
pbpayal/Bioinformatics-Documents
|
f9440c5efb735c5f9ac705f15832d4eb163248cc
|
3c79fc8c9afc87b962c7297edde8cbf5dffe12b0
|
refs/heads/master
| 2023-05-21T02:04:12.057441
| 2021-06-14T19:55:09
| 2021-06-14T19:55:09
| 170,368,307
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,466
|
r
|
tutorial_code.R
|
#Session 4: Introduction of scRNA-seq Data Analysis (Hands-on Lab)
#load required packages
library(SingleR)
library(Seurat)
library(cluster)
library(umap)
library(pheatmap)
library(viper)
library(MAST)
library(ggplot2)
library(ggrepel)
library(infercnv)
library(MASS)
library(SeuratData)
library(Hmisc)
#Seurat walkthrough-- data loading and cleaning
AvailableData()
InstallData("ifnb")
LoadData("ifnb")
table(ifnb$stim)
#Downsample dataset for ease of analysis
set.seed(1234)
ifnb=ifnb[,sample(1:ncol(ifnb),2000)]
table(ifnb$stim)
#visualize data quality distributions
VlnPlot(ifnb, features = c("nFeature_RNA", "nCount_RNA"), ncol = 2,pt.size=0,group.by="stim")
ncol(ifnb)
##filter out low-quality cells and potential doublets
ifnb <- subset(ifnb, subset = nCount_RNA > 1000 & nCount_RNA < 25000)
VlnPlot(ifnb, features = c("nFeature_RNA", "nCount_RNA"), ncol = 2,pt.size=0,group.by="stim")
ncol(ifnb)
#split data by experiment batch
ifnb.list <- SplitObject(ifnb, split.by = "stim")
#Normalize Data
ifnb.list <- lapply(X = ifnb.list, FUN = SCTransform)
#Integrate to Perform Batch Correction
features <- SelectIntegrationFeatures(object.list = ifnb.list, nfeatures = 3000)
ifnb.list <- PrepSCTIntegration(object.list = ifnb.list, anchor.features = features)
immune.anchors <- FindIntegrationAnchors(object.list = ifnb.list, normalization.method = "SCT",
anchor.features = features)
immune.combined.sct <- IntegrateData(anchorset = immune.anchors, normalization.method = "SCT")
##NOTE: To speed up this process for large datasets and maintain consistency as you collect new data, can set one high-quality sample as a reference
#PCA Dimensionality Reduction
immune.combined.sct <- RunPCA(immune.combined.sct, features=VariableFeatures(object=immune.combined.sct))
ElbowPlot(immune.combined.sct)
immune.combined.sct <- RunLDA(immune.combined.sct, features=VariableFeatures(object=immune.combined.sct),labels = immune.combined.sct$seurat_annotations)
#Visualization
immune.combined.sct <- RunUMAP(immune.combined.sct, reduction = "pca", dims = 1:30)
immune.combined.sct <- RunTSNE(immune.combined.sct, reduction = "pca", dims = 1:30)
DimPlot(immune.combined.sct, reduction = "umap",label = TRUE,repel=T,group.by="seurat_annotations") + NoLegend()
DimPlot(immune.combined.sct, reduction = "tsne",label = TRUE,repel=T,group.by="seurat_annotations") + NoLegend()
DimPlot(immune.combined.sct, reduction = "pca",label = TRUE,repel=T,group.by="seurat_annotations") + NoLegend()
DimPlot(immune.combined.sct, reduction = "lda",label = TRUE,repel=T,group.by="seurat_annotations") + NoLegend()
DimPlot(immune.combined.sct, reduction = "umap",label = TRUE,repel=T,group.by="seurat_annotations",split.by="stim") + NoLegend()
DimPlot(immune.combined.sct, reduction = "tsne",label = TRUE,repel=T,group.by="seurat_annotations",split.by="stim") + NoLegend()
DimPlot(immune.combined.sct, reduction = "pca",label = TRUE,repel=T,group.by="seurat_annotations",split.by="stim") + NoLegend()
DimPlot(immune.combined.sct, reduction = "lda",label = TRUE,repel=T,group.by="seurat_annotations",split.by="stim") + NoLegend()
##density plots
ggplot(data.frame(UMAP_1=immune.combined.sct@reductions$umap@cell.embeddings[,1],UMAP_2=immune.combined.sct@reductions$umap@cell.embeddings[,2],cluster=immune.combined.sct$seurat_annotations),aes(x=UMAP_1,y=UMAP_2,color=cluster))+geom_point(size=0.01)+geom_density_2d(color="black")+theme_bw()+theme(panel.border=element_blank(),panel.grid.major=element_blank(),panel.grid.minor=element_blank(),axis.line=element_line(colour="black"))+xlim(-15,15)+ylim(-15,20)
##Louvain Clustering
immune.combined.sct <- FindNeighbors(immune.combined.sct, dims = 1:30, verbose = FALSE)
immune.combined.sct <- FindClusters(immune.combined.sct, resolution=seq(0.1,1,by=0.1), verbose = FALSE,algorithm=1)
###compare high-resolution vs low-resolution Louvain Clustering
DimPlot(immune.combined.sct, reduction = "umap",label = TRUE,repel=T,group.by="integrated_snn_res.0.1") + NoLegend()
DimPlot(immune.combined.sct, reduction = "umap",label = TRUE,repel=T,group.by="integrated_snn_res.1") + NoLegend()
#PAM Clustering
nclust=5
clust5=pam(immune.combined.sct@reductions$pca@cell.embeddings,nclust)
immune.combined.sct$pam5=clust5$clustering
DimPlot(immune.combined.sct, reduction = "umap",label = TRUE,repel=T,group.by="pam5") + NoLegend()
nclust=20
clust20=pam(immune.combined.sct@reductions$pca@cell.embeddings,nclust)
immune.combined.sct$pam20=clust20$clustering
DimPlot(immune.combined.sct, reduction = "umap",label = TRUE,repel=T,group.by="pam20") + NoLegend()
#Silhouette Score Evaluation
plot(silhouette(clust5,dist(immune.combined.sct@reductions$pca@cell.embeddings)),col=1:5,border=NA)
plot(silhouette(clust20,dist(immune.combined.sct@reductions$pca@cell.embeddings)),col=1:20,border=NA)
summary(silhouette(clust5,dist(immune.combined.sct@reductions$pca@cell.embeddings)))$avg.width
summary(silhouette(clust20,dist(immune.combined.sct@reductions$pca@cell.embeddings)))$avg.width
silhouette_scores=sapply(5:10,function(x){
clust=pam(immune.combined.sct@reductions$pca@cell.embeddings,x)
summary(silhouette(clust,dist(immune.combined.sct@reductions$pca@cell.embeddings)))$avg.width
})
plot(5:10,silhouette_scores,pch=18)
lines(5:10,silhouette_scores)
##Hybrid Louvain Approach With Silhouette Scoring
#' Function to select optimal Louvain clustering of single-cell matrix from 10
#' alternative resolution values. Sub-samples 1000 cells 100 times at each resolution
#' value to compute mean and standard deviation of silhouette score.
#' @param mat: matrix with rows as principal component vectors and columns as samples
#' @param clust: matrix with rows as samples as each column as a clustering vector for a given resolution
#' outputs list of mean silhouette scores and standard deviations of silhouette scores for each clustering.
sil_subsample=function(mat,clust){
out=as.data.frame(matrix(rep(NA,100*ncol(clust)),nrow=100))
for(x in 1:100){
i=sample(1:ncol(mat),min(1000,ncol(mat)))
d=as.dist(1 - cor(mat[,i], method = "pearson"))
for(j in 1:ncol(clust)){
if(length(table(clust[i,j]))==1){out[x,j]=0}
if(length(table(clust[i,j]))>1){
sil=silhouette(as.numeric(clust[i,j]),d)
out[x,j]=mean(sil[, "sil_width"])}}
}
means=apply(out,2,mean)
sd=apply(out,2,sd)
return(list(means,sd))
}
clust=immune.combined.sct@meta.data[,which(grepl("integrated_snn_res.",colnames(immune.combined.sct@meta.data)))]
mat=as.data.frame(t(immune.combined.sct$pca@cell.embeddings))
out=sil_subsample(mat,clust)
means=out[[1]]
sd=out[[2]]
x=seq(0.1,1,by=0.1)
errbar(x,means,means+sd,means-sd,ylab="mean silhouette score",xlab="resolution parameter")
lines(x,means)
best=tail(x[which(means==max(means))],n=1)
legend("topright",paste("Best",best,sep = " = "))
immune.combined.sct$seurat_clusters=immune.combined.sct@meta.data[,which(colnames(immune.combined.sct@meta.data)==paste("integrated_snn_res.",best,sep=""))]
Idents(immune.combined.sct) <- "seurat_clusters"
plot(DimPlot(immune.combined.sct, reduction = "umap",label = TRUE) + NoLegend())
##Differential Expression
#MAST, wilcox, roc
#'Function to plot heatmap of custom gene list grouped by cluster
#'truncates color scale to >5th percentile and <95th percentile, shades by quantile.
#' @param dat: a matrix input with genes as rows and samples as columns
#' @param clust: a vector of cluster labels
#' @param genes: a vector of genes to plot
#' @param genes_by_cluster: a boolean indicator of whether genes are grouped by cluster identity (e.g. top5 genes in cluster 1 followed by top5 genes in cluster2, etc.)
#' @param n_top_genes_per_cluster: a number of top genes per cluster being plotted if genes_by_cluster=T. Otherwise ignored.
#' @param color_palette: A manual color palette for clusters. Defaults to hue_pal(). If provided, must be vector of colors same length as the number of unique clusters.
#' @param scaled: a boolean indicator of whether data are already scaled. Defaults to F, in which case row-wise z-score scaling is applied.
#' The function outputs a custom heatmap of manually specified genes, grouped by cluster, default
#' behavior is to apply row-wise z-score scaling and plot the top 5 genes per cluster.
geneHeatmap_plot=function(dat,clust,genes,genes_by_cluster=T,n_top_genes_per_cluster=5,color_palette=NA,scaled=F){
identities <- levels(clust)
if(is.na(color_palette)){my_color_palette <- hue_pal()(length(identities))}
else{my_color_palette=color_palette}
features=genes
i=sample(1:ncol(dat),min(10000,ncol(dat)),replace = F)
x=dat[features,i]
df <- data.frame(clust[i],clust[i])
rownames(df)=colnames(x)
colnames(df)=c("cluster","cluster2")
anno_colors <- list(cluster = my_color_palette)
names(anno_colors$cluster) <- levels(df$cluster)
o=order(df$cluster)
x=x[,o]
df=df[o,]
df=df[1]
quantile_breaks <- function(xs, n = 10) {
breaks <- quantile(xs, probs = seq(0, 1, length.out = n))
breaks[!duplicated(breaks)]
}
if(scaled==F){t=as.matrix(apply(x,1,function(x){(x-mean(x))/sd(x)}))}
if(scaled==T){t=as.matrix(x)}
mat_breaks <- c(quantile_breaks(t[which(t<0)], n = 10),0,quantile_breaks(t[which(t>0)], n = 10))
mat_breaks=mat_breaks[2:(length(mat_breaks)-1)] #restrict range of data to quantiles 5%-95%, extreme values excluded
if(genes_by_cluster){
anno_colors$group=anno_colors$cluster
anno_row=data.frame(group=unlist(lapply(unique(df$cluster),function(x){rep(x,n_top_genes_per_cluster)})))
gene_names=rownames(x)
rownames(x)=1:nrow(x)
if(!scaled){return(pheatmap(x, cluster_rows=FALSE,show_rownames=T,cluster_cols=FALSE, annotation_row = anno_row,annotation_col=df,breaks=mat_breaks,color = colorRampPalette(colors = c('blue', 'white', 'red'))(length(mat_breaks)),fontsize_row = 10,show_colnames = F,annotation_colors = anno_colors,scale="row",gaps_row=(2:length(unique(clust))-1)*n_top_genes_per_cluster,annotation_names_row = F,labels_row=gene_names,row_annotation_legend=F))}
if(scaled){return(pheatmap(x, cluster_rows=FALSE,show_rownames=T,cluster_cols=FALSE, annotation_row = anno_row,annotation_col=df,breaks=mat_breaks,color = colorRampPalette(colors = c('blue', 'white', 'red'))(length(mat_breaks)),fontsize_row = 10,show_colnames = F,annotation_colors = anno_colors,gaps_row=(2:length(unique(clust))-1)*n_top_genes_per_cluster,annotation_names_row = F,labels_row=gene_names,row_annotation_legend=F))}
}
else{
if(!scaled){return(pheatmap(x, cluster_rows=FALSE,show_rownames=T,cluster_cols=FALSE, annotation_col=df,breaks=mat_breaks,color = colorRampPalette(colors = c('blue', 'white', 'red'))(length(mat_breaks)),fontsize_row = 8,show_colnames = F,annotation_colors = anno_colors,scale="row"))}
if(scaled){return(pheatmap(x, cluster_rows=FALSE,show_rownames=T,cluster_cols=FALSE, annotation_col=df,breaks=mat_breaks,color = colorRampPalette(colors = c('blue', 'white', 'red'))(length(mat_breaks)),fontsize_row = 8,show_colnames = F,annotation_colors = anno_colors))}
}
}
library(dplyr)
library(scales)
markers <- FindAllMarkers(immune.combined.sct, only.pos = TRUE, min.pct = 0.25, logfc.threshold = 0.5,test.use = "wilcox")
head(markers)
markers <- FindAllMarkers(immune.combined.sct, only.pos = TRUE, min.pct = 0.25, logfc.threshold = 0.5,test.use = "roc")
head(markers)
markers <- FindAllMarkers(immune.combined.sct, only.pos = TRUE, min.pct = 0.25, logfc.threshold = 0.5,test.use = "MAST")
head(markers)
top10 <- markers %>% group_by(cluster) %>% top_n(n = 5, wt = avg_log2FC)
geneHeatmap_plot(immune.combined.sct@assays$SCT@counts,as.factor(immune.combined.sct$seurat_clusters),top10$gene,n_top_genes_per_cluster = 5)
#Run SingleR cell type inference
immune.combined.sct.singler=CreateSinglerObject(immune.combined.sct[["SCT"]]@counts, annot = NULL,
project.name = "ifnb", min.genes = 0,
technology = "10X", species = "Human", citation = "",
do.signatures = F, clusters = NULL, numCores = numCores,
fine.tune=F,variable.genes = "de",reduce.file.size = T,do.main.types = T)
immune.combined.sct$hpca_labels=immune.combined.sct.singler$singler[[1]][[1]][[2]]
immune.combined.sct$hpca_main_labels=immune.combined.sct.singler$singler[[1]][[4]][[2]]
immune.combined.sct$blueprint_labels=immune.combined.sct.singler$singler[[2]][[1]][[2]]
immune.combined.sct$blueprint_main_labels=immune.combined.sct.singler$singler[[2]][[4]][[2]]
immune.combined.sct$hpca_pvals=immune.combined.sct.singler$singler[[1]][[1]][[3]]
immune.combined.sct$hpca_main_pvals=immune.combined.sct.singler$singler[[1]][[4]][[3]]
immune.combined.sct$blueprint_pvals=immune.combined.sct.singler$singler[[2]][[1]][[3]]
immune.combined.sct$blueprint_main_pvals=immune.combined.sct.singler$singler[[2]][[4]][[3]]
#Filter SingleR labels to labels with p<0.1 and number of cells per label >50
l=immune.combined.sct$blueprint_main_labels
l[which(immune.combined.sct$blueprint_main_pvals>0.05)]=NA
table(l)
l[which(l %in% names(which(table(l)<50)))]=NA
immune.combined.sct$celltype=l
plot(DimPlot(immune.combined.sct, reduction = "umap",label=TRUE,group.by="celltype",repel=T))
#saveRDS(immune.combined.sct,"immune.combined.sct.rds")
table(immune.combined.sct$celltype,immune.combined.sct$seurat_clusters)
|
185cd824a3f28e9cbd1ee248d375334bb9f03e29
|
a0ceb8a810553581850def0d17638c3fd7003895
|
/scripts/rstudioserver_analysis/spikeins/integrate_chic_spikeins_with_facs.R
|
0870b67247b7ece04f5a31e691ff9ec5e00a8d73
|
[] |
no_license
|
jakeyeung/sortchicAllScripts
|
9e624762ca07c40d23e16dbd793ef9569c962473
|
ecf27415e4e92680488b6f228c813467617e7ee5
|
refs/heads/master
| 2023-04-15T22:48:52.272410
| 2022-10-24T10:45:24
| 2022-10-24T10:45:24
| 556,698,796
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 354
|
r
|
integrate_chic_spikeins_with_facs.R
|
# Jake Yeung
# Date of Creation: 2020-08-14
# File: ~/projects/scchic/scripts/rstudioserver_analysis/spikeins/integrate_chic_spikeins_with_facs.R
#
rm(list=ls())
library(dplyr)
library(tidyr)
library(ggplot2)
library(data.table)
library(Matrix)
library(scchicFuncs)
# Load ChIC --------------------------------------------------------------
|
b64f4cdf380e5c951e33bc0fc71e76d1c711fe62
|
bf6eeabe8154eb0c192c1f27603dbd53fca4bdec
|
/man/show.Rd
|
9d9ab56a207bc1ceca8a01b77195ff5f533d528c
|
[] |
no_license
|
gokmenzararsiz/MLSeq
|
a81484c77bc77cc43a9317a5cc71ec10eb751a63
|
f58cf5297d97b1f338d1748cc38df75f2e2accd3
|
refs/heads/master
| 2021-01-17T09:21:45.177052
| 2016-12-22T07:42:47
| 2016-12-22T07:42:47
| 18,428,996
| 1
| 1
| null | 2016-04-19T13:10:50
| 2014-04-04T05:58:29
|
R
|
UTF-8
|
R
| false
| true
| 532
|
rd
|
show.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generics.R, R/methods.R
\docType{methods}
\name{show-methods}
\alias{show}
\alias{show,MLSeq-method}
\alias{show-methods}
\title{Show method for MLSeq objects}
\usage{
\S4method{show}{MLSeq}(object)
}
\arguments{
\item{object}{an \code{MLSeq} object returned from \code{classify} function.}
}
\description{
Prints out the information from the trained model using \code{classify} function.
}
\author{
Gokmen Zararsiz
}
\seealso{
\code{\link{classify}}
}
|
075f52fb2bc65ba252123da38efb7726b21c078e
|
75284442478bb6d9bc3502d1893a09c08e67aadb
|
/man/to_percent_change.Rd
|
ff233372d9e9c754f06e264b1a19c26be8a738e8
|
[] |
no_license
|
tbadams45/wrviz
|
5f95649516850da54e126b9364bab7a243abc203
|
35da48b9f6998bec138b3a042a00744a34a0fb81
|
refs/heads/master
| 2020-04-06T06:56:49.537233
| 2017-06-14T21:02:36
| 2017-06-14T21:02:36
| 62,154,611
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,070
|
rd
|
to_percent_change.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utility_functions.R
\name{to_percent_change}
\alias{to_percent_change}
\title{Convert decimals difference to percent differences.}
\usage{
to_percent_change(decimal_list, baseline = 1)
}
\arguments{
\item{decimal_list}{The list to convert.}
\item{baseline}{The value that represents 0\% percent change.}
}
\value{
A list of the same length as decimal_list.
}
\description{
Converts a list of decimal numbers (e.g. 0.7, 1.0, 1.3) to their
corresponding percent difference representation. The representation will
depend on what value is selected as the baseline, i.e. 0\% percent change.
}
\details{
This function uses round() to avoid any weird floating point representation
errors. Because of this, the best precision you can obtain is integers for
your percent differences (e.g. can only get "52%", not "52.3%")
}
\examples{
\dontrun{to_percent_change(seq(0.5,1.5,by=0.1))
to_percent_change(seq(-0.3,0.7,by=0.1),baseline=0)
to_percent_change(seq(0.512,1.512,by=0.1)) # answer is rounded}
}
|
30972d3ff774253f94a963c34984677b04eab16b
|
05363a01f0583ea0977f21cd627d2284f59be457
|
/R/classify.R
|
847517bdb31369a25fdf739b8d9dcf5a8815ac9e
|
[] |
no_license
|
emilygoren/MixtClust
|
6fa523bf13b07953e45855edea46a56753a64010
|
0865a1b896196f9d88f83c2260f082628c138d68
|
refs/heads/master
| 2022-03-12T01:41:54.492726
| 2022-03-04T03:35:40
| 2022-03-04T03:35:40
| 262,897,695
| 2
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,490
|
r
|
classify.R
|
#' Classify New Observations
#'
#' @description Classify new, possibly incomplete, observations arising from a finite mixture of multivariate
#' t distributions.
#'
#' @param newobs A matrix with new observations (rows), \eqn{p} columns
#' (dimensions), and missing entries set to \code{NA}.
#' @param params A list of parmaters defining a finite mixture of
#' multivariate t distributions,
#' usually obtained from \code{\link{MixtClust}} (see details).
#'
#' @details Classify new observations according to the finite mixture of t distirbutions
#' specified by the parameter values in \eqn{params}, a named list with elements:
#' \itemize{
#' \item{"pi"}{Mixing proportions. A vector of length \eqn{K} that
#' sums to one.}
#' \item{"nu"}{Degrees of freedom. A vector of length \eqn{K} with
#' entries at least equal to three (thus requiring the existance of the first
#' two moments.)}
#' \item{"mu"}{Locations. A \eqn{K \times p} matrix, where the
#' \eqn{k}-th row is the location \eqn{\mu_k \in R^p} for cluster
#' \eqn{k}.}
#' \item{"Sigma"}{Dispersions. A \eqn{p \times p \times K} array, where
#' the \eqn{k}-th slice is the \eqn{p \times p} positive-definite disperion
#' matrix \eqn{\Sigma_k} for cluster \eqn{k}.} }
#'
#' @return A vector classify each observation to the cluster \eqn{1, \dots, K}
#' with the highest posterior probability.
#'
#' @author Emily Goren, \email{emily.goren@gmail.com} based on modifications of
#' code by Ranjan Maitra.
#'
#' @export
classify <- function(newobs, params) {
if (is.null(dim(newobs))) {
x <- t(as.matrix(newobs))
} else {
x <- as.matrix(newobs)
}
prior <- params$pi
df <- params$nu
K <- length(prior)
if (ncol(params$mu) != ncol(x))
stop("Dimension of newobs and mu do not match")
if (any(dim(params$Sigma)[1:2] != ncol(x)))
stop("Dimension of newobs and Sigma do not match")
out <- apply(x, 1, function(xi) {
rmv <- !is.na(xi)
if (sum(!rmv) == length(xi)) {
ans <- NA
} else {
x.tr <- t(matrix(xi[rmv]))
mean.tr <- params$mu[, rmv]
sigma.tr <- params$Sigma[rmv, rmv, ]
if (sum(rmv) == 1) {
postr <- sapply(1:K, function(k) prior[k] * dMVT(x.tr, as.matrix(mean.tr[k]), as.matrix(sigma.tr[k]), df[k]))
} else {
postr <- sapply(1:K, function(k) prior[k] * dMVT(x.tr, mean.tr[k,], sigma.tr[,,k], df[k]))
}
postr <- postr/sum(postr)
ans <- which.max(postr)
}
return(ans)
})
return(t(out))
}
|
37d8c4c62226442a15900795f4d0ab1499bdaf23
|
3bc5bb3dbb3b34961fdc7b4523965a92591d23e8
|
/R/RCPGenerator.R
|
be416d0c8c37beee46338c73a579cd834d7b6925
|
[] |
no_license
|
brandonsie/phipcc
|
510291462cc0c89764729bbe6d0010ad957d5e70
|
60d785ca8a1373d799cc9cc82a42532a30997a84
|
refs/heads/master
| 2021-07-23T17:33:59.722692
| 2020-05-31T01:35:41
| 2020-05-31T01:35:41
| 180,176,287
| 2
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,356
|
r
|
RCPGenerator.R
|
#' RCPGenerator calculates RCP, Relative Control Percentile, a case-control
#' statistic describing the percentile from 0.0 to 1.0 of one score against
#' that of a control population. For example, if a control population consisted
#' of three scores, 4.1, 5.0, 6.8, and 10.1, then a score of 5.0 would be
#' assigned an RCP of 0.25, because 5.0 is higher than 25% of the control
#' population. In this implementation, ties count as losses.
#'
#' @param case Data frame of cases.
#' @param ctrl Data frame of controls.
#' @param min_hits Numeric, hreshold for number of cases scoring above hit_thresh in
#' original score required to be retained after filtering. By default, min_hits
#' is set to 0 and thus RCP is calculated for all peptides. Setting a nonzero
#' value for min_hits is only recommended if calculating RCP for all peptides
#' is prohibitively slow.
#' @param hit_thresh Vector of numeric, threshold value to apply when considering min_hits.
#' hit_thresh is only evaluated if min_hits > 0.
#' @param verbose Logical to print status updates to console.
#(!) DON'T REQUIRE MIN ZHITS OR MIN RHITS. REPORT ALL DATA THEN SUBSET LATER
#(!) therefore also don't need hit thresh or RCP thresh.
# old params
# @param min.rhits Threshold for number of cases scoring above RCP.thresh
# in RCP required to be retained after filtering. If symm, min.rhits <- min.zhits
# @param RCP.thresh Theshold value to apply to min.rhits.
RCPGenerator <- function(case, ctrl, min_hits = 0, hit_thresh = 0,
verbose = FALSE){
# ----------------------------------------------------------------------------
# Configure settings
#(!) only applies to old method
# if ctrl = "self", then assign both to be the same
self <- FALSE
if(class(ctrl)[1] == "character") if(ctrl[1] == "self"){
ctrl <- case
self <- TRUE
} else{
if(nrow(case) != nrow(ctrl)){
stop("Error: RCPGenerator: case control nrow mismatch.")
}
}
if(min_hits > 0){
if(verbose){
print(dim(case))
print(paste("Subsetting to ", min_hits, "above", hit_thresh))
}
num_hits <- apply(case[,-1], 1, function(x){
sum(x > hit_thresh)
})
case <- case[num_hits >= min_hits,]
ctrl <- ctrl[num_hits >= min_hits,]
if(verbose) print(dim(case))
}
if(verbose) print("continue")
# ----------------------------------------------------------------------------
# RCP calculation
pb <- txtProgressBar(min = 0, max = nrow(case), initial = 0, char = "=",
width = 20, style = 3)
rcp.calc <- matrix(NA, nrow(case), ncol(case)) %>% data.frame
rcp.calc[,1] <- case[,1]
names(rcp.calc) <- names(case)
if(self == FALSE){
if(verbose){print(Sys.time())}
for(i in 1:nrow(case)) {
setTxtProgressBar(pb,i)
quant <- stats::ecdf(as.numeric(ctrl[i,-1]))
rcp.calc[i,-1] <- quant(as.numeric(case[i,-1]) - 1e-10)
}
} else{ #self vs. self runs faster
i <- 1
while(i <= nrow(case)){
setTxtProgressBar(pb,i)
#set upper bound to calculate i through j chunk incrementally
inc <- 9999
if(i+inc < nrow(case)){j <- i + inc} else{j <- nrow(case)}
rcp.calc[i:j,-1] <- t(apply(case[i:j,-1],1, (dplyr::percent_rank)))
i <- j + 1
}
}
setTxtProgressBar(pb, nrow(case))
close(pb)
return(rcp.calc)
}
|
c48032b8a84e838e1e0f59872a20f3984a026a7d
|
eb6aeb235e9d476e8bc3dd1aefa52f04e086d4b5
|
/code/wgcna2igraph.R
|
dbc118625f0caca777ea082d56acb999cb7b5cb4
|
[] |
no_license
|
TheBeeGuy/WGCNA_Intragenomic_Conflict
|
8489713342c6c0a3bbef7c5540afe81ff0674a9c
|
3cad9d05b184273d716977d208dcb311f913107b
|
refs/heads/master
| 2021-07-13T14:18:24.113518
| 2020-07-27T01:27:44
| 2020-07-27T01:27:44
| 185,473,857
| 0
| 0
| null | 2019-05-31T16:05:44
| 2019-05-07T20:32:41
|
HTML
|
UTF-8
|
R
| false
| false
| 6,958
|
r
|
wgcna2igraph.R
|
#' @title Generate an igraph object from wgcna output
#'
#' @description
#' \code{wgcna2igraph} Function to cull and plot WGCNA networks. Requires
#' igraph and WGCNA to be installed.
#' @param net WGCNA generated network. Usually from WGCNA::blockwiseModules
#' @param datExpr The expression dataset, transposed so that genes are columns
#' and individuals are rows.
#' @param modules2plot The names (usually colors) of the WGCNA modules to plot.
#' All elements of modules2plot must be in the first element of net.
#' @param colors2plot The colors for modules2plot. Must match the length of
#' modules2plot.
#' @param kME.threshold The kME threshold to retain a node
#' @param adjacency.threshold The adjacency threshold to retain an edge.
#' @param adj.power The power used to calculate the adjacency matrix
#' @param node.size If >0, plot the nodes with a given size.
#' @param frame.color If node.size > 0, can specify the color for node outlines
#' @param node.color If node.size > 0, can specify the color for nodes
#' @param edge.alpha Numeric [0-1] specifying the transparency of the edges
#' @param verbose The position of the legend. Defaults to the top right.
#' @param returnNet Should the network be returned? If FALSE, a list of the
#' genes and original module colors that were input is returned.
#' @param ... additional arguments passed on WGCNA::adjacency
#'
#' @details More here soon.
#' @return an igraph network
#' @examples
#' \dontrun{
#' library(WGCNA)
#' library(igraph)
#' data(kidney) #' from simseq
#' counts<-kidney$counts
#' counts<-counts[sample(1:nrow(counts),1000),]
#' info<-with(kidney,
#' data.frame(id = paste(replic, treatment, sep = "_"),
#' rep=replic,
#' Treatment=ifelse(treatment == "Tumor","tumor","cntr"),
#' stringsAsFactors=F))
#' colnames(counts)<-info$id
#' stats <- pipeLIMMA(counts = counts,
#' info = info,
#' block = NULL,
#' formula = "~ Treatment")
#'
#' datExpr.1=t(stats$voom$E)
#' pow=6
#' net.1 = blockwiseModules(datExpr.1, power = pow,
#' maxBlockSize = 10000, deepSplit = 2,
#' minModuleSize = 10,
#' saveTOMs = FALSE,
#' verbose = F)
#'
#' graph<-wgcna2igraph(net = net.1, datExpr = datExpr.1,
#' modules2plot = c("blue","green","turquoise","brown"),
#' colors2plot = c("orange","darkred","cyan","cornflowerblue"),
#' kME.threshold = 0.5, adjacency.threshold = 0.1,
#' adj.power = pow, verbose = T,
#' node.size = 0, frame.color = NA, node.color = NA,
#' edge.alpha = .5, edge.width =1)
#' plot(graph)
#' }
#'
#' @export
wgcna2igraph<-function(net, datExpr, top.n.edges = NA,
modules2plot = NULL, colors2plot = NULL,
kME.threshold = .75, adjacency.threshold = 0.1,
adj.power = 6, verbose = T,min.edge=2,
node.size = 0, frame.color = NA, node.color = NA,
edge.alpha = .5, edge.width =1, returnNet=TRUE,...){
if(!returnNet & is.null(modules2plot)){
modules2plot = unique(net[[1]])
colors2plot = unique(net[[1]])
}
if(returnNet & length(colors2plot) != length(modules2plot))
stop("colors2plot and modules2plot must have the same number of elements\n")
if(!any(sapply(modules2plot, function(x) x %in% unique(net[[1]]))))
stop("all modules2plot must be found in the first element of net\n")
if(ncol(datExpr)!=length(net[[1]]))
stop("net and datExpr must contain the same number of genes\n")
if(!requireNamespace("WGCNA", quietly = TRUE) |
!requireNamespace("igraph", quietly = TRUE)){
stop("install the WGCNA and igraph packages before running\n")
}else{
require("igraph", quietly = TRUE)
require("WGCNA", quietly = TRUE)
}
gs<-colnames(datExpr)
cols<-net[[1]]
names(cols)<-gs
if(kME.threshold>0){
if(verbose) cat("using KME values to find genes with high module membership\n")
kme<-signedKME(datExpr = datExpr, datME = net$MEs, outputColumnName = "")
row.names(kme)<-gs
kmes<-sapply(1:length(gs), function(x) abs(kme[gs[x],colnames(kme) == cols[x]]))
kmes<-data.frame(genes = gs, cols = cols, kme = kmes)
gs<-kmes$genes[kmes$kme>=kME.threshold]
cols<-cols[kmes$kme>=kME.threshold]
datExpr = datExpr[,gs]
}
if(verbose & returnNet) cat("subsetting to modules:", paste(modules2plot, collapse = ", "),"\n")
datExpr<-datExpr[,cols %in% modules2plot]
cols<-cols[cols %in% modules2plot]
gs<-gs[cols %in% modules2plot]
if(verbose) cat("culling edges by adjacency\n")
adj_mat<-adjacency(datExpr,power=6, ...)
if(!is.na(top.n.edges)){
adjacency.threshold = sort(as.numeric(adj_mat), decreasing=T)[(top.n.edges*2)+nrow(adj_mat)]
}
adj_mat[adj_mat > adjacency.threshold] <- 1
adj_mat[adj_mat < adjacency.threshold] <- 0
diag(adj_mat) <- 0
rs<-rowSums(adj_mat)
if(verbose) cat("removing unconnected nodes\n")
adj_mat<-adj_mat[rs>min.edge,rs>min.edge]
if(!returnNet){
return(list(genes = colnames(adj_mat), cols = cols[colnames(adj_mat)]))
}else{
if(verbose) cat("coverting to igraph format\n")
graph.colors = sapply(cols, function(x) colors2plot[modules2plot == x])
net <- graph_from_adjacency_matrix(adj_mat, weighted=TRUE,
mode="upper")
net <- simplify(net, remove.multiple = T, remove.loops = T)
edge.start <- ends(net, es=E(net), names=T)[,1]
edge.end <- ends(net, es=E(net), names=T)[,2]
col.start <- graph.colors[edge.start]
col.end <- graph.colors[edge.end]
add.alpha <- function(col, alpha=1){
apply(sapply(col, col2rgb)/255, 2,
function(x)
rgb(x[1], x[2], x[3], alpha=alpha))
}
is.inMod<-col.start==col.end
E(net)$color<-ifelse(is.inMod, add.alpha(col.start,edge.alpha),rgb(0,0,0,edge.alpha))
E(net)$width<-edge.width
E(net)$lty<-ifelse(E(net)$color == "#00000080" | is.na(E(net)$color), 3,1)
V(net)$size <- node.size
V(net)$frame.color <- frame.color
V(net)$label <- NA
if(node.size == 0){
node.color = NA
}else{
if(is.na(node.color)){
node.color = graph.colors
}
}
V(net)$color <- node.color
E(net)$arrow.mode <- 0
if(verbose) cat("returning a network with",length(V(net)$size),"nodes and",length(E(net)$color),"edges\n")
return(net)
}
}
|
e72b9923b53120d46a6b2b650ce6d98468d1debe
|
f1e5a836d5c7e95e7f4251613211b0887fb7d1f6
|
/R/ops_fulltext.R
|
0a86861204c612b35c58be8f693bf77d9520db5c
|
[] |
no_license
|
wipo-analytics/opsrdev
|
42236173216d570a694deba1f587a8af696a7a0e
|
b32ca743f48238c11e947a36de2215c88d4bf995
|
refs/heads/master
| 2021-01-12T15:53:11.559849
| 2016-10-24T10:54:52
| 2016-10-24T10:54:52
| 69,320,484
| 0
| 11
| null | 2016-10-24T10:54:52
| 2016-09-27T04:52:09
|
R
|
UTF-8
|
R
| false
| false
| 2,863
|
r
|
ops_fulltext.R
|
#' @title retrieve full text using patent numbers
#' @description For jurisdictions where full text is available. This is
#' presently limited to Austria (AT), Canada (CA), the European Patent Office
#' (EP), Great Britain (GB), the Patent Cooperation Treaty (WO), Spain (ES)
#' and Switzerland (CH).
#' @param query A patent number or character vector containing patent numbers.
#' @param type Description, claims, or fulltext (availability). See details
#' @param timer Set the time delay between calls to OPS in seconds.
#' @return A list.
#' @details Setting type "fulltext" will simply retrieve information on the
#' availability of fulltext elements for a given record. description will
#' retrieve the available descriptions. claims will retrieve the available
#' claims. Note that the function filters the countries to thise with full
#' text availability (otherwise the query will fail). To identify documents
#' inside full text availability use the patent family service to link from
#' numbers outside fulltext (e.g. US) to those inside fulltext (e.g. EP or
#' WO). Retrieval of multiple segments (biblio, description, claims) is
#' supported by OPS but not yet in this function. Be cautious when using the
#' arguments (e.g. description) as they will pull back a lot of data affecting
#' your quota.
#' @export
#' @examples \dontrun{ops_fulltext("WO0000034", type = "fulltext", timer = 20)}
#' @examples \dontrun{ops_fulltext("WO0000034", type = "description", timer = 20)}
#' @examples \dontrun{ops_fulltext("WO0000034", type = "claims", timer = 20)}
ops_fulltext <- function(query = "", type = "", timer = 30){
# call ops_filter first to search only relevant names
# --- ops_filter call
#ops_filter <- function(x)
#x <- dplyr::filter(x, publication_country == "EP" | publication_country == "WO" | publication_country == "AT" | publicati#on_country == "CA" | publication_country == "CH" | publication_country == "GB" | publication_country == "ES")
#
# --- end ops filter call
# Generate URLS
baseurl <- "http://ops.epo.org/3.1/rest-services/published-data/publication/epodoc/"
query <- query # vector of numbers to get ft, description or claims for.
if(type == "fulltext"){
url <- paste0(baseurl, query, "/fulltext")
}
if(type == "description"){
url <- paste0(baseurl, query, "/description")
}
if(type == "claims"){
url <- paste0(baseurl, query, "/claims")
}
#if(length(query) > 1){ # this is working the problem is that the sys sleep is not working.
# out <- pbapply::pblapply(query, ops_fulltext, type=type)
# Sys.sleep(10)
# return(out)
#
#} # needs an if else statement and then positioning
if(length(query) == 1){
myquery <- httr::GET(paste0(url), httr::content_type("plain/text"), httr::accept("application/json"))
}
myquery <- httr::content(myquery)
}
|
d622f02f77ff909d4d09df7a48224282e8862051
|
246189c0e240e174b9ca74e2a42bfecee79cc9e1
|
/R/analyzeMatrixPopModels.R
|
bfc992d4506214800be0b212f980073de735813d
|
[] |
no_license
|
ksauby/GTMNERRproc
|
f3bcd140578d710c9b013da83d9ac8d08e781eee
|
fd5a073d5fd2690b6fde64a0313d1a3fdfe07645
|
refs/heads/master
| 2021-04-06T13:03:29.008590
| 2017-11-15T20:35:53
| 2017-11-15T20:35:53
| 83,352,082
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,775
|
r
|
analyzeMatrixPopModels.R
|
#' Analyze Matrix Population Models
#'
#' @description Analyze multiple matrix population models, allowing for a seed bank and clonal reproduction.
#' @return Returns a list including:
#' \itemize{
#' \item the growth/retrogression/survival/fecundity transition matrix and clonal reproduction transition matrix for each matrix population model analysis
#' \item the results of the matrix population model analysis, including repro. values for each stage, the stable stage distribution, sensitivities, elasticities, lambda, the damping ratio, the seed bank size, the number of seeds assumed per fruit, and the seed survival rate (the rate at which seeds stay in the seed stage from one year to the next).
#' }
#'
#' @param trans_data dataset from which to create growth/retrogression/survival/fecundity transition matrix
#' @param SizeClass A vector containing the size breaks with which to create stages based on size.
#' @param TransitionYear
#' @param SeedSurvival Rate at which seeds survive (i.e., rate at which seeds remain in seed stage). Can be a single value or vector of values.
#' @param SeedBankSize Number of seeds in the seed bank (can be a single value or vector of values).
#' @param SeedsPerFruit Average number of seeds produced per fruit (can be a single value or vector of values).
#' @param clone_transition clonal reproduction transition matrix
#' @param SizeClass
#' @param n.iter Number of iterations for pop.projection function. Default value is 1000.
#' @export
analyzeMatrixPopModels <- function(
trans_data,
SizeClass,
TransitionYear,
SeedSurvival,
SeedBankSize,
SeedsPerFruit,
n.iter=1000
) {
A <- vector("list", length(SeedBankSize))
for (i in 1:length(SeedBankSize)) {
for (j in 1:length(SeedsPerFruit)) {
for (k in 1:length(SeedSurvival)) {
# ----------------------------------------------------------- #
# Create growth/retrogression/survival/fecundity transition matrix
# ----------------------------------------------------------- #
proj_mat <- createProjectionMatrix(
trans_data$trans01,
SeedBankSize[i],
SeedsPerFruit[j],
SeedSurvival[k]
)
# ------------------------------------------------------- #
# add transition matrices together
# ------------------------------------------------------- #
all_proj_matrix <- proj_mat + trans_data$clone_transition_rates
# ------------------------------------------------------- #
# starting numbers
# ------------------------------------------------------- #
n_per_stage <- NULL
n_per_stage <- calculateNumberIndivperStage(
trans_data$trans01,
trans_data$stages
)
# ------------------------------------------------------- #
# dynamics
# ------------------------------------------------------- #
all_proj_matrix[1, 1] <- SeedSurvival[k]
n_per_stage %<>%
mutate(
n = replace(
n,
which(stage=="Seed"),
SeedBankSize[i]
)
)
pr <- pop.projection(
A = all_proj_matrix,
n = n_per_stage$n,
iterations = n.iter
)
analysis.results <- eigen.analysis(all_proj_matrix)
# create table of results
# repro.values
repro.value.dat <- analysis.results$repro.value
names(repro.value.dat) <-
paste("repro.value",names(repro.value.dat),sep=".")
repro.value.dat %<>% t %>% as.data.frame
# stable stage distribution
stable.stage.dat <- analysis.results$stable.stage
names(stable.stage.dat) <-
paste("stable.stage",names(stable.stage.dat),sep=".")
stable.stage.dat %<>% t %>% as.data.frame
# elasticities
elasticities <- analysis.results$elasticities %>%
as.data.frame %>%
mutate(
Name = paste(
"elasticities.",
Var2, "-",
Var1,
sep=""
)
) %>%
as.data.frame(row.names=.$Name) %>%
dplyr::select(Freq) %>%
t %>%
as.data.frame()
# sensitivities
sensitivities <- analysis.results$sensitivities %>%
as.table %>% as.data.frame %>%
mutate(
Name = paste(
"sensitivities.",
Var2, "-",
Var1,
sep=""
)
) %>%
as.data.frame(row.names=.$Name) %>%
dplyr::select(Freq) %>%
t %>%
as.data.frame()
A[[i]][[j]][[k]] <- data.frame(
repro.value.dat,
stable.stage.dat,
sensitivities,
elasticities,
lambda1 = analysis.results$lambda1,
damping.ratio = analysis.results$damping.ratio,
SeedBankSize = SeedBankSize[i],
SeedsPerFruit = SeedsPerFruit[j],
SeedSurvival = SeedSurvival[k]
)
}
A[[i]][[j]] <- do.call(rbind.data.frame, A[[i]][[j]])
}
A[[i]] <- do.call(rbind.data.frame, A[[i]])
}
A <- do.call(rbind.data.frame, A)
return(A)
}
|
9bdb180b31b91ab5b492c9d72dd88c0970d13ed4
|
f7eda815d3ce720ec4621f2b579914f8e8f8993e
|
/futboldata/tests/testthat/test_player_urls.R
|
89606e41f98d3e2f96faa49d1772872f3f805829
|
[
"MIT"
] |
permissive
|
cfranklin11/futbolean
|
1513a4d1483f0ee1e9d27bbc5dbb004d79dcd170
|
9b919ae752ecba66932d1d869f4354f0837b1b16
|
refs/heads/master
| 2023-01-21T06:41:51.276207
| 2019-11-01T22:44:01
| 2019-11-01T22:44:01
| 207,424,103
| 0
| 0
|
MIT
| 2022-12-26T20:58:42
| 2019-09-09T23:36:57
|
Python
|
UTF-8
|
R
| false
| false
| 622
|
r
|
test_player_urls.R
|
describe("scrape_player_links()", {
start_season <- "2016-2017"
end_season <- "2017-2018"
n_seasons <- 2
# Fetching data takes awhile, so we do it once for all tests
player_urls <- scrape_player_links(
start_season = start_season,
end_season = end_season
)
it("returns a vector of url characters", {
expect_true("character" %in% class(player_urls$data))
expect_gt(length(player_urls$data), n_seasons)
all_are_player_urls <- player_urls$data %>%
purrr::map(~ grepl("https://fbref.com/en/players/", .)) %>%
unlist %>%
all
expect_true(all_are_player_urls)
})
})
|
e3fb9cc5fce11f232b0de3f85d84ef9922a36142
|
e3e94e53a7fd67f6b437e633e1eec4c266db5ae1
|
/Abby/RandomForest.R
|
33ff0daff4aabb333e59e85984e09279d1d196ba
|
[] |
no_license
|
HandsOnTheRope/Data-Jaguar
|
7c1f9465a606595b3c725d4e8cb79695c938acb1
|
8fc6830fe382abdf6ed358b60d2b06afba4055fe
|
refs/heads/master
| 2020-06-16T12:27:09.252542
| 2016-12-01T15:51:37
| 2016-12-01T15:51:37
| 75,102,456
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,134
|
r
|
RandomForest.R
|
#import data
library(readr)
mydata <- read_csv("~/train.csv")
mydata$Sex <- factor(mydata$Sex)
mydata$Pclass <- factor(mydata$Pclass)
#make family size variable
mydata$famsize <- mydata$SibSp + mydata$Parch + 1
#check for blanks or NAs in Age
which(is.na(mydata$Age))
which(mydata$Age == '')
#fix NAs in age
library(rpart)
Agefit <- rpart(Age ~ Pclass + Sex + SibSp + Parch + Fare + Embarked + famsize,
data=mydata[!is.na(mydata$Age),],
method="anova")
mydata$Age[is.na(mydata$Age)] <- predict(Agefit, mydata[is.na(mydata$Age),])
summary(Agefit)
#check that NAs are gone
which(is.na(mydata$Age))
#check Embarked for blanks, NAs, convert to factor
which(mydata$Embarked == '')
which(is.na(mydata$Embarked))
mydata$Embarked[c(62,830)] = "S"
mydata$Embarked <- factor(mydata$Embarked)
summary(mydata$Embarked)
#check Fare for NAs or blanks
which(is.na(mydata$Fare))
which(mydata$Fare == '')
#split data into testing and training dataset
dt = sort(sample(nrow(mydata), nrow(mydata)*.5))
train <- mydata[dt,]
test <- mydata[-dt,]
#install random forest
library(randomForest)
#set random seed so that results are reproducible
set.seed(415)
#make random forest
forest <- randomForest(as.factor(Survived) ~ Pclass + Sex + Age + SibSp + Parch + Fare +
Embarked + famsize,
data=train,
importance=TRUE,
ntree=2000)
plot(forest)
#look at what variables are important
varImpPlot(forest)
#predict test data based on forest
prediction <- data.frame(predict(forest, test, type = "class"))
#compare predictions to actual test data
library(caret)
confusionmat <- confusionMatrix(prediction[[1]], test$Survived)
print(confusionmat)
#make ROC curve
library(ROCR)
prediction2 <- data.frame(predict(forest, test, type = "prob"))
pred <- prediction(prediction2[2], test$Survived)
perf <- performance(pred, measure = "tpr", x.measure = "fpr")
plot(perf)
#area under curve
auc <- performance(pred, measure = "auc")
auc <- auc@y.values[[1]]
auc
|
51ab2ce3bd20f5ea36a6bc7c85e36a11208daa9f
|
9a0e90c86f0e62f36368760d30ee82cbfa902cff
|
/R/read_li6400.R
|
95f84266db36d56fa274855b578c1fe300c104e3
|
[] |
no_license
|
bvsonawane/photosynthesis
|
1856fd74bfe671198950e012c20e70b181f914c0
|
5d29cf5843141e4f11e2c1d77fe149665a35f6e2
|
refs/heads/master
| 2020-04-04T17:39:53.678405
| 2019-09-09T23:24:49
| 2019-09-09T23:24:49
| 156,129,668
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,188
|
r
|
read_li6400.R
|
#' @title Read and clean LI6400 file from csv format.
#'
#' @description Reads and clean Li6400 file from csv format, removes unnessory coulmn, adds Li6400 name, measurment data and remarks in separate column.
#' @param file_path path of Li6400 csv file.
#' @return Read and clean csv file made from output of Li6400 gas-exchange system. Adds Li6400 name, measurment date and Remarks (with respective observation log) as an extra column.
#' @export
#' @importFrom lubridate mdy_hms
#' @importFrom utils read.csv
read.li6400<- function (file_path){
##Read remark function
read_remark<- function(read2){
rmk_df<- read2[read2$Obs == "Remark=", ]## remove remarks
temp_obs<- as.numeric(as.character(droplevels
(read2[as.numeric(rownames(rmk_df))+1, "Obs"])))
remark<- cbind(temp_obs, rmk_df)[ , c("temp_obs", "HHMMSS")]
names(remark)<- c("Obs","Remark" )
return(remark)
}
read1 <- read.csv(file_path)
where_do_i_start <- which(read1[,1]=="in") +1
# The number of lines before the data varies with each licor.
the_end <- nrow(read1) ## where I should end
open_ver<- colnames(read1)[1]
aa<-data.frame(droplevels(read1[[open_ver]][1]))
# Adding a timezone (computer not Li measurments locationfor clarity
datetime<- mdy_hms(aa[1,],tz=Sys.timezone())
licor.date<- as.Date(datetime)
start_time<- format(datetime, format= "%H:%M:%S")
#as.POSIXct(paste(licor.date, start_time), format="%Y-%m-%d %H:%M:%S")
##Licor name
licor_name<- levels(droplevels(read1[[2]][2]))
read2 <- read.csv(file_path, skip = (where_do_i_start - 2), header = T)
##remarks
remark<- read_remark(read2)
#read2<- merge(read2, remark, by="Obs", all.x=T)
read2 <- read2[!read2$Obs == "Remark=", ]## remove remarks
read2 = read2 [-1,]## bad row in data
for(i in c(5:ncol(read2))) {
read2[,i] <- as.numeric(as.character(read2[,i]))} ## remove the two-line header from Licor output, re-define columns as numeric
read2 <- droplevels(read2)
read2$datetime <- as.POSIXct(paste(licor.date,read2$HHMMSS, Sys.timezone()))
read2$licor<- licor_name
read2$Date<- licor.date
read2$Start_time<- start_time
## add remorks
test_r<- merge(read2, remark, by="Obs", all.x=T)
out<- test_r[order(test_r$datetime),]
return(out)
}
|
e0897f03768d50344ac06603d55744c9dafc197b
|
57eb613a446a89e08918c18e4a2ef5b7904754a3
|
/man/mothers-ProgenyArray-method.Rd
|
b743afac1f94dc7b952b5c2bc2e16c2dd51e0f3f
|
[] |
no_license
|
kate-crosby/ProgenyArray
|
c0fec380460e1d21d16477a05d05361e88e59f70
|
6fde9526f0bcb953251a28473a7c042b40254211
|
refs/heads/master
| 2020-12-28T23:49:59.252767
| 2015-05-15T23:44:22
| 2015-05-15T23:44:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 414
|
rd
|
mothers-ProgenyArray-method.Rd
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/progenyarray-methods.R
\docType{methods}
\name{mothers,ProgenyArray-method}
\alias{mothers,ProgenyArray-method}
\title{Accessor for mothers in a ProgenyArray object}
\usage{
\S4method{mothers}{ProgenyArray}(x)
}
\arguments{
\item{x}{a ProgenyArray object}
}
\description{
Internally, this references the \code{parents} slot.
}
|
f8a11dd3873b3a79219a144e28189f974b364abb
|
b2eeb5e69ce34680a1aeb6259a549cf0d8153978
|
/man/GIMAP5.Rd
|
9fbba13716faf2366208fd568043bb45e8fa2123
|
[] |
no_license
|
SFUStatgen/LDheatmap
|
abb3444304c6185287436fc482ae5b45c06f7cfc
|
510c60056c371127c4a2fc63c0d1b032717bde62
|
refs/heads/master
| 2023-03-09T17:57:04.521184
| 2023-02-24T01:15:32
| 2023-02-24T01:15:32
| 131,069,485
| 11
| 5
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,602
|
rd
|
GIMAP5.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/GIMAP5Rd.R
\docType{data}
\name{GIMAP5}
\alias{GIMAP5}
\title{Example data set for LDHeatmap}
\format{
GIMAP5 is a list with three elements: snp.data, snp.support and
subject.support. snp.data is a \code{SnpMatrix}
object containing the SNP genotypes. Rows correspond to
subjects and columns correspond to SNPs.
snp.support is a data frame with the following columns:
\tabular{rlll}{
[,1] \tab dbSNPalleles \tab character \tab alleles at each SNP\cr
[,2] \tab Assignment \tab character \tab same as dbSNPalleles\cr
[,3] \tab Chromosome \tab character \tab chromosome (chr7 for all)\cr
[,4] \tab Position \tab numeric \tab physical position\cr
[,5] \tab Strand \tab character \tab strand (all "+")\cr
}
subject.support is a one-column data frame with:
\tabular{rlll}{
[,1] \tab pop \tab character \tab HapMap population of each subject \cr
}
}
\source{
International HapMap Project \url{ftp://ftp.ncbi.nlm.nih.gov/hapmap/}
}
\usage{
data(GIMAP5)
}
\description{
SNP genotypes on HapMap founders for SNPs spanning the GIMAP5 gene.
}
\details{
SNP genotypes from HapMap release 27
for SNPs in a 10KB region spanning
the GIMAP5 gene. Data are on founders from each of the 11 HapMap
phase III populations:
\tabular{ll}{
ASW \tab African ancestry in Southwest USA \cr
CEU \tab Utah residents with Northern and Western European ancestry from the CEPH collection \cr
CHB \tab Han Chinese in Beijing, China \cr
CHD \tab Chinese in Metropolitan Denver, Colorado \cr
GIH \tab Gujarati Indians in Houston, Texas \cr
JPT \tab Japanese in Tokyo, Japan \cr
LWK \tab Luhya in Webuye, Kenya \cr
MEX \tab Mexican ancestry in Los Angeles, California \cr
MKK \tab Maasai in Kinyawa, Kenya \cr
TSI \tab Toscani in Italia \cr
YRI \tab Yoruba in Ibadan, Nigeria \cr
}
Only those SNPs with minor allele frequency greater
than 5\% in all populations were retained.
The base positions are from NCBI build 36
(UCSC genome hg18).
}
\examples{
data(GIMAP5)
#Now do a lattice plot with LDheatmaps in the panels
library(lattice)
pop<-GIMAP5$subject.support$pop
n<-nrow(GIMAP5$snp.data)
xyplot(1:n ~ 1:n | pop, type="n", scales=list(draw=FALSE), xlab="", ylab="",
panel=function(x, y, subscripts,...) {
LDheatmap(GIMAP5$snp.data[subscripts,],GIMAP5$snp.support$Position,
newpage=FALSE)})
rm(pop,n)
}
\references{
The International HapMap Consortium. A haplotype map of
the human genome. Nature 437, 1299-1320. 2005.
}
\seealso{
\code{\link{GIMAP5.CEU}}
}
\keyword{datasets}
|
3e2aa45dee8112af7add2eaf063048db4364f7ec
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/clv/examples/SD_SDbw.Rd.R
|
e554867682794dec00264e04cf193db4a60e44db
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 679
|
r
|
SD_SDbw.Rd.R
|
library(clv)
### Name: clv.SD, clv.SDbw
### Title: SD, SDbw - Internal Measures
### Aliases: clv.SD clv.SDbw
### Keywords: cluster
### ** Examples
# load and prepare
library(clv)
data(iris)
iris.data <- iris[,1:4]
# cluster data
agnes.mod <- agnes(iris.data) # create cluster tree
v.pred <- as.integer(cutree(agnes.mod,5)) # "cut" the tree
# prepare proper input data for SD and S_Dbw indicies
scatt <- clv.Scatt(iris.data, v.pred)
dis <- clv.Dis(scatt$cluster.center)
dens.bw <- clv.DensBw(iris.data, v.pred, scatt)
# compute SD and S_Dbw indicies
SD <- clv.SD(scatt$Scatt, dis, alfa=5) # alfa is equal to number of clusters
SDbw <- clv.SDbw(scatt$Scatt, dens.bw)
|
816c4dbd3c3019bb633e3475e68353a7881e03ae
|
3f87e835a8a7c2e755e139c7ff5596d549a37d90
|
/replace_and_convert.R
|
bb8d6202a0b63a6700f885e36a1f542c50c83782
|
[] |
no_license
|
Paularb3ar/data-analytics
|
584185780554c43ced5c3d2d358b7fc30c0d3cc9
|
4c4a4a3e342ccbc0106f3d8bf1efae1863159040
|
refs/heads/master
| 2021-04-25T14:45:42.147314
| 2018-07-17T08:59:35
| 2018-07-17T08:59:35
| 124,045,258
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 640
|
r
|
replace_and_convert.R
|
#replacing patterns with sub() and gsub(), first and all respectively
fin$Expenses <- gsub(" Dollars", "", fin$Expenses)
fin$Expenses <- gsub(",", "", fin$Expenses)
#since $ is a special character in R, create an escape sequence with double backslash
fin$Revenue <- gsub("\\$","", fin$Revenue)
fin$Revenue <- gsub(",","", fin$Revenue)
#Growth needs to be divided by 100 after percentage sign has been removed
fin$Growth <- gsub("%", "", fin$Growth)
#now convert to numeric since gsub converted to character already
fin$Expenses <- as.numeric(fin$Expenses)
fin$Revenue <- as.numeric(fin$Revenue)
fin$Growth <- as.numeric(fin$Growth)
|
bdea9b9d0a629e2a36df23a2cf3d224f19bf3f6c
|
1eb6d4302fd74f783018524ba23cc46bd40c9fcf
|
/otto3tsne.R
|
ecc1dfecbd89bf9a19119538925fa740d2cb9f1d
|
[] |
no_license
|
LiYan1988/otto
|
03f3c4fd7c24efd0396f54839ce6f54a2b9bdbdb
|
dfab4a066c59420557d9ca312a82ca7487ea79ef
|
refs/heads/master
| 2021-01-11T01:09:36.243654
| 2016-11-05T17:55:43
| 2016-11-05T17:55:43
| 71,056,521
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 593
|
r
|
otto3tsne.R
|
library(methods)
library(data.table)
library(Rtsne)
setwd("C:\\Users\\lyaa\\Documents\\otto")
train = fread("train.csv", header = T, data.table = F)[,-1]
test = fread("test.csv", header = T, data.table = F)[,-1]
y = train[,ncol(train)]
y = as.integer(gsub('Class_', '', y))-1
train = train[,-ncol(train)]
x = rbind(train, test)
x = as.matrix(x)
x = matrix(as.numeric(x), nrow(x), ncol(x))
x = log(x+1)
x_tsne = Rtsne(x, check_duplicates = T, pca = TRUE,
perplexity=30, theta=0.5, dims=3, verbose = TRUE)
write.csv(x_tsne$Y, file='tsne3all.csv', quote=FALSE, row.names=FALSE)
|
220442775364a55d3eae2e1b7f9a2350af61c005
|
92ace6ca147e2ecd00e4ddfc77e8a519f9364d1e
|
/helpers/sample_data.r
|
927ef4969797b217c2c787ff1ef594d3789c9704
|
[] |
no_license
|
drscghosh/spatial-analysis
|
6c646f1af1a1f35468958097cc6db3520494f727
|
41a33d70d80122cc47488edd3720bab9121c774a
|
refs/heads/master
| 2020-04-12T22:31:50.117213
| 2018-12-22T08:01:01
| 2018-12-22T08:01:01
| 162,791,617
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 205
|
r
|
sample_data.r
|
longitude <- c(-116.7, -120.4, -116.7, -113.5, -115.5, -120.8, -119.5, -113.7, -113.7, -110.7)
latitude <- c(45.3, 42.6, 38.9, 42.1, 35.7, 38.9, 36.2, 39, 41.6, 36.9)
lonlat <- cbind(longitude, latitude)
|
60a2ed0b1248b2597f3c551ffda466d2c5e481a6
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/aprean3/examples/dse15k.Rd.R
|
fbf02a813f82dcd525d502824d08fe4149139bdf
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 153
|
r
|
dse15k.Rd.R
|
library(aprean3)
### Name: dse15k
### Title: Dataset for Exercise K, Chapter 15
### Aliases: dse15k
### Keywords: datasets
### ** Examples
dse15k
|
a8ee38ad17393f5f3724373f0d043b994e6a7523
|
0293e3aada71aa5c22969b1fb42bb646b9e8d6cc
|
/Repeat Article/Semi_XGBoost.R
|
9839cee4528f14c20f0202c0b383a142982d8768
|
[] |
no_license
|
jim19930609/Superconductivity-Prediction
|
44aad24778cf1552403d9439d5b1666bb10deaf6
|
bfff7292220a82ba737fec9c42c121d30e8dbf68
|
refs/heads/master
| 2020-04-09T20:35:23.806383
| 2018-12-12T17:46:56
| 2018-12-12T17:46:56
| 160,578,379
| 0
| 3
| null | 2018-12-18T03:08:15
| 2018-12-05T21:04:12
|
R
|
UTF-8
|
R
| false
| false
| 2,576
|
r
|
Semi_XGBoost.R
|
library("xgboost")
rsq = function (x, y) cor(x, y) ^ 2
# Readin Data into DataFrame
sem = read.csv('C:\\Users\\jim19\\Desktop\\Statistics_Project\\ML Models\\train.csv')
# Average RMSE in 25 iters
TrainRMSE_list = c()
TrainRSQ_list = c()
TestRMSE_list = c()
TestRSQ_list = c()
for (iter in 1:25) {
# Split into Traing & Test
smp_size <- floor(0.67 * nrow(sem))
train_ind <- sample(seq_len(nrow(sem)), size = smp_size)
sem_train = sem[train_ind, ]
sem_test = sem[-train_ind, ]
X_train = as.matrix(sem_train[-82])
Y_train = as.matrix(sem_train["critical_temp"])
X_test = as.matrix(sem_test[-82])
Y_test = as.matrix(sem_test["critical_temp"])
# Apply Naive Bayes
model_xg = xgboost(data = X_train,
label = Y_train,
eta = 0.02,
max_depth = 16,
min_child_weight = 1,
colsample_bytree = 0.5,
subsample = 0.5,
nrounds = 750,
objective = "reg:linear")
# Training MSE
pred_train = predict(model_xg, X_train)
TrainRMSE = sqrt(sum((pred_train - Y_train)^2)/length(pred_train))
TrainRSQ = rsq(pred_train, Y_train)
# Test MSE
pred_test = predict(model_xg, X_test)
TestRMSE = sqrt(sum((pred_test - Y_test)^2)/length(pred_test))
TestRSQ = rsq(pred_test, Y_test)
# Collect Results
TrainRMSE_list = c(TrainRMSE_list, TrainRMSE)
TrainRSQ_list = c(TrainRSQ_list, TrainRSQ)
TestRMSE_list = c(TestRMSE_list, TestRMSE)
TestRSQ_list = c(TestRSQ_list, TestRSQ)
}
# Runtime
ptm = proc.time()
model_xg = xgboost(data = X_train,
label = Y_train,
eta = 0.02,
max_depth = 16,
min_child_weight = 1,
colsample_bytree = 0.5,
subsample = 0.5,
nrounds = 750,
objective = "reg:linear")
proc.time() - ptm
# Average RMSE and RSQ
AveTrainRMSE = mean(TrainRMSE_list)
AveTrainRSQ = mean(TrainRSQ_list)
AveTestRMSE = mean(TestRMSE_list)
AveTestRSQ = mean(TestRSQ_list)
AveTrainRMSE
AveTrainRSQ
AveTestRMSE
AveTestRSQ
# Plot TrueTc - PredTc for test batch(last iter)
plot(pred_test, Y_test)
lines(seq(1,200),seq(1,200),col="red")
# Importance Table
importance_table = xgb.importance(model = model_xg,
data = X_train,
label = Y_train)
importance_table
|
caef542215c2a2471abe684075ed969a2f9f250c
|
010b1c2fd855c025ea623c1f55dfca8d2723b24f
|
/inst/scripts/Estimation/biasvarcov.R
|
007135d8b0075293ccdaa2164f521890b5d23020
|
[] |
no_license
|
gbonte/gbcode
|
cd9cbdc5ef40b53ed82d79c5620c97357cdbcd0d
|
aa912f71a766b29fe4f4c2aa23bcf49751f5ea67
|
refs/heads/master
| 2023-03-22T03:17:26.278341
| 2023-03-17T15:52:09
| 2023-03-17T15:52:09
| 52,086,706
| 11
| 11
| null | 2022-06-03T11:42:52
| 2016-02-19T12:37:02
|
HTML
|
UTF-8
|
R
| false
| false
| 681
|
r
|
biasvarcov.R
|
rm(list=ls())
R=100
Ntr=1
Nts=100
sdw=0.1
E=NULL
CC=NULL
V=NULL
B2=NULL
N=NULL
for (r in 1:(R/2)){ ## over functions
theta=runif(1,1,1)
that=NULL
for (rr in 1:(2*R)){## over datasets
DNtr=rnorm(Ntr,theta,sdw)
DNts=rnorm(Nts,theta,sdw)
muhat=mean(DNtr)
that=c(that,muhat)
E=c(E,mean((DNts-muhat)^2))
CC=c(CC,(DNts-theta)*(theta-muhat))
}
V=c(V,(that-mean(that))^2)
B2=c(B2,(theta-mean(that))^2)
N=c(N,(DNts-theta)^2)
}
cat("MSEth=", sdw^2+sdw^2/Ntr,"Vth=",sdw^2/Ntr,"\n")
cat("MSE=",mean(E), "Noise=", mean(N), "B2=",mean(B2), "V=",mean(V), "CC=", mean(CC),
":",mean(N)+mean(B2)+mean(V), ":", mean(N)+mean(B2)+mean(V)+2*mean(CC)," \n")
|
11acf3b2142b5c7c438ffb5d21bb502ca7739eb8
|
a0919984c9cebe5d34f0e6a5eeb3e87ba9ec98e9
|
/Factor script.R
|
56fc2608b09d11f03548c399cc58d7f4070e9560
|
[] |
no_license
|
pbrehill/IDMSolsCoding
|
8fcf0a424fb9de02701bf8f6375ffd2ed01ef7d6
|
90db6f5609489b728b2b62ee28223948811677e1
|
refs/heads/master
| 2022-12-08T23:44:17.892049
| 2020-08-13T08:37:20
| 2020-08-13T08:37:20
| 267,760,849
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,295
|
r
|
Factor script.R
|
# Load tidyverse and load data
library(tidyverse)
library(car)
source('~/Documents/IWDA Consultancy 2/data/Dimension construction/Sols_dimensions.R')
setwd("~/Documents/IWDA Consultancy 2/Report chapters")
source('summaryfuncs.R')
# Unlist columns that have somehow become lists
data <- map_df(data, function (x) {
if(is.list(x)) return(unlist(x))
return(x)
})
# Read in data
dictionary <- read.csv('dictionary.csv') %>%
select(Newer.var.name, Label, Options)
questions <- read.csv('Questions.csv')
# # Get a question key
# questions$key <- questions %>%
# transmute(key = ifelse(is.na(.$Variable.name), .$Text, .$Variable.name))
#
# # Join labels to dictionary
# Define factor function
factorise <- function(data, series, var_list) {
tryCatch({
# Initialise useful vars
series_name <- series
series_content <- data
# If not in var_list send a warning and return unaltered content
if (!(series_name %in% var_list$Newer.var.name)) {
warning(paste0("Could not find ", series_name, " in var_list, did not transform the variable."))
return(series_content)
}
# Return unaltered if variable doesn't have factor options
if (is.na(dictionary[dictionary$Newer.var.name == series_name, 'Options']) |
dictionary[dictionary$Newer.var.name == series_name, 'Options'] == ""){
return(series_content)
}
# Get variable labels
var_labels <- var_list %>%
filter(Newer.var.name == series_name) %>%
select(Options) %>%
str_split('\\|') %>%
unlist()
# Check if column is currently made up of character labels or values, set to character
if (is.numeric(series_content)) {
for (i in 1:length(series_content)) {
if (is.na(series_content[i])) {
series_content[i] <- NA
} else if (series_content[i] == 97) {
series_content[i] <- "Refused to answer"
} else if (series_content[i] == 94) {
series_content[i] <- "Don't know"
} else if (series_content[i] == 98) {
series_content[i] <- "Privacy interrupted"
} else if (series_content[i] == 99) {
series_content[i] <- NA
} else {
series_content[i] <- var_labels[as.integer(series_content[i])]
}
}
}
# Change to factor and set levels
factor_series <- factor(series_content,
levels = var_labels,
ordered = TRUE)
# If length of series is 1, unlist
if (length(factor_series) == 1) factor_series <- unlist(factor_series)
# Return the new factorised variable
return(factor_series)
}, error = function(err) {
# error handler picks up where error was generated
print(paste("MY_ERROR: Error found at", series_name, "returning unaltered content."))
return(series_content)
})
}
# Apply this function
data_factored <- map2(.x = data, .y = names(data), ~factorise(.x, .y, dictionary))
lengths <- map_int(data_factored, length)
name_wrong_length <- names(lengths[lengths != 1874])
data_fact_df <- map_df(data_factored, function (x) {
if (length(x) == 1) {
return(unlist(x))
} else {
return(x)
}
})
write_csv(data_fact_df, 'PR_final_data.csv')
saveRDS(data_fact_df, 'PR_final_data.rds')
|
b244ae73b187158e7e6baba99e0118feb3146c72
|
3980ac1a2de7fe2c607d3b7b38d93a8dd0855337
|
/example_scripts/beating_data_into_shape_part3_transcript.R
|
e79fdbdfb4f7a3ffbb71ae405981790b012e8cbe
|
[] |
no_license
|
ea-guerette/ACC-From_thin_R
|
5bda9da9c08c992be9403e33a4466f1447fad39e
|
71aa7a79fd75d0839ca00d8d769b47e720a4344c
|
refs/heads/master
| 2022-11-24T21:32:34.727001
| 2020-07-27T02:32:07
| 2020-07-27T02:32:07
| 258,040,303
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,807
|
r
|
beating_data_into_shape_part3_transcript.R
|
#let's start with a cleaned up version of what we did last time - I removed all the 'non-essential' bits of code
#reminder that we are looking at 30 min data from BOM AWS at Gunn Point
#load packages
library(openair)
library(dplyr) #this is part of the tidyverse suite of packages
library(lubridate) #this package makes date manipulation easy
library(tidyr)
#in this script, I will try to explicitly state which package a function comes from by using the following syntax:
#package::function
#define variable holding the path to the folder containing the data files we want to read in
dir <- "C:/Users/gue02h/cloudstor/thinR/data/GPA/" #you need to change this to match the path on YOUR computer
gpmet_lapply <- lapply(list.files(path = dir, pattern = "014023", full.names = TRUE),
function(x) read.table(x, sep = ",", header = TRUE, comment.char = "", na.strings = -999))
#combine dataframes into one using bind_rows()
gpmet_combined <- dplyr::bind_rows(gpmet_lapply, .id = "id")
#we added the column 'id' to help with quality checks on the data
#note that we can combine the two steps above to bypass creating the list in our environment:
#gpmet_combined <- bind_rows(lapply(list.files(path = dir, pattern = "014023"),
# function(x) read.table(paste0(dir, x), sep = ",", header = T, comment.char = "", na.strings = -999)), .id = "id")
#let's look at our dataframe
#str(gpmet_combined)
#then we create a date column in a format that R understands, using as.POSIXct()
gpmet_combined$date <- as.POSIXct(gpmet_combined$Day.Month.Year.Hour24.Minutes.in.DD.MM.YYYY.HH24.MI.format.in.Local.standard.time,
format = "%d/%m/%Y %H:%M", tz = "Australia/Darwin")
#as.POSIXct is very versatile, and lets you turn almost anything string into a date - see last week for a fiendish example
#to fix the overlapping data, we use distinct() from the dplyr package - thanks Chris!
gpmet <- dplyr::distinct(gpmet_combined, date, .keep_all = TRUE)
#distinct() keeps the first instance of each date present in gpmet_combined. .keep_all = TRUE means we keep all columns
#To 'fix' the wind data:
#first, we make daily averages of the data using the timeAverage function from openair:
daily_means <- openair::timeAverage(gpmet, avg.time = "1 day", statistic = "mean")
#then, we can look for days with a mean wind speed of zero:
daily_ids <- which(daily_means$Wind.speed.in.km.h == 0 )
#daily_ids contains the row numbers associated with bad wind data in our daily_means dataframe
#daily_means only has 2141 rows, vs. >98k for our original dataframe... so daily_ids does not get us very far.
#we want to get the row numbers associated with bad wind data in our original dataframe!
daily_means$date[daily_ids] #this prints out all the dates associated with bad wind data
#this is a good start
#now we want to find these dates in our original dataframe
bad_wind_data_rows <- which(lubridate::date(gpmet$date) %in% lubridate::date(daily_means$date[daily_ids]))
#this is very similar to what we had above, but we reformat our dates so the string matching can work
#we can now assign a new value:
gpmet$Wind.speed.in.km.h[bad_wind_data_rows] <- NA
#we do the same for wind direction:
gpmet$Wind.direction.in.degrees.true[bad_wind_data_rows] <- NA
#we do the same for windgust:
gpmet$Speed.of.maximum.windgust.in.last.10.minutes.in..km.h[bad_wind_data_rows] <- NA
#let's have a quick look at the data to make sure all went according to plan:
openair::timePlot(gpmet, pollutant = "Wind.speed.in.km.h")
# Next, we want to recalculate precipitation to get 'Precipitation in the last 30 minutes'
#prcp2 <- c(NA, prcp[-length(prcp)])
#int_prcp <- prcp - prcp2
#ids <- grep("09:30:00", date)
#int_prcp[ids] <- prcp[ids]
#We need to fill in gaps in dates
#need to get rid of times that aren't :00 or :30
#let's try timeAverage using minimum as the statistic
#gpmet_min <- openair::timeAverage(gpmet, avg.time = "30 min", statistic = "min")
#summary(gpmet_min$Precipitation.since.9am.local.time.in.mm)
#timeAverage is slow, we lose the Quality columns, and some Inf values appear
#another approach is to create a sequence containing the dates we want to include:
#remember this:
#seq(1,12, by =2)
#it works for dates to:
date_seq_df <- data.frame(date = seq(min(gpmet$date), max(gpmet$date), by = "30 min"))
#we now have a dataframe containing column called 'date', that starts at
#the first date/time of our gpmet dataframe and ends at the last date/time of our gpmet dataframe
test <- merge(date_seq_df, gpmet, by = "date", all = TRUE) #full_join() - we have filled the gaps, but we get more rows than expected, due to the 12:25 etc. in the gpmet dataset
gpmet_complete <- merge(date_seq_df, test, by = "date", all = FALSE) #inner_join() - we now keep only the dates that are in date_seq
#we keep our Quality columns (although introducing NAs), fast, no Inf
summary(gpmet$Station.Number)
summary(gpmet_complete$Station.Number)
#we how we have introduced NAs in our Station.Number column. Not ideal - see at the end for a tweaked solution
#Now that we have a dataframe containing all and only the date_seq dates, we can perform our precipitation calculation
#prcp2 <- c(NA, prcp[-length(prcp)])
#int_prcp <- prcp - prcp2
#ids <- grep("09:30:00", date)
#int_prcp[ids] <- prcp[ids]
#we do this by replacing the place holders in the above with our real variables:
prcp_i_minus_1 <- c(NA, gpmet_complete$Precipitation.since.9am.local.time.in.mm[-length(gpmet_complete$Precipitation.since.9am.local.time.in.mm)])
gpmet_complete$Precipitation.in.last.30.minutes.in.mm <- gpmet_complete$Precipitation.since.9am.local.time.in.mm - prcp_i_minus_1
ids <- grep("09:30:00", gpmet_complete$date)
gpmet_complete$Precipitation.in.last.30.minutes.in.mm[ids] <- gpmet_complete$Precipitation.since.9am.local.time.in.mm[ids]
summary(gpmet_complete$Precipitation.in.last.30.minutes.in.mm)
#to make a function, we go back to our generalised code
#prcp2 <- c(NA, prcp[-length(prcp)])
#int_prcp <- prcp - prcp2
#ids <- grep("09:30:00", date)
#int_prcp[ids] <- prcp[ids]
#a function needs a name, some arguments, and a body
recalculate_precip <- function(prcp, date) { #name is recalculate_precip, the arguments are prcp and date, the body is the code within the curly brackets
prcp_i_minus_1 <- c(NA, prcp[-length(prcp)])
int_prcp <- prcp - prcp_i_minus_1
ids <- grep("09:30:00", date)
int_prcp[ids] <- prcp[ids]
return(int_prcp)
}
#we can now test our function:
test_fun <- recalculate_precip(prcp = gpmet_complete$Precipitation.since.9am.local.time.in.mm, date = gpmet_complete$date)
gpmet_complete$Precipitation.in.last.30.minutes.in.mm <- recalculate_precip(prcp = gpmet_complete$Precipitation.since.9am.local.time.in.mm,
date = gpmet_complete$date)
#our function does not have any vetting built in -
test_incomplete <- recalculate_precip(prcp = gpmet$Precipitation.since.9am.local.time.in.mm, date = gpmet$date)
#this also works, but the result is gibberish because we have gaps and shorter intervals in gpmet
#we could expand the function so that it:
# gives a warning if the dataframe has gaps AND/OR
# automatically fill in dates
# we could also add an extra argument for the time interval (to allow e.g. hourly data)
# etc.
#Ian mentioned tidyr:complete() as an alternative to
#date_seq_df <- data.frame(date = seq(min(gpmet$date), max(gpmet$date), by = "30 min"))
#test <- merge(date_seq_df, gpmet, by = "date", all = TRUE) #full_join() - we have filled the gaps, but we get more rows than expected, due to the 12:25 etc. in the gpmet dataset
#gpmet_complete <- merge(date_seq_df, test, by = "date", all = FALSE) #inner_join() - we now keep only the dates that are in date_seq
#tidyr:complete() lets you 'fill in' several variable, so we won't be introducing NAs in e.g. Station.Number
#let's try it:
gpmet_tidyr_complete_test <- tidyr::complete(gpmet)
#nothing happens, we need to specify which column(s) to fill
gpmet_tidyr_complete_test <- tidyr::complete(gp_tib, date = date_seq_df) #POSIXct error
#the fill in feature requires a vector, not a dataframe
#lets create a date_seq vector:
date_seq <- seq(min(gpmet$date), max(gpmet$date), by = "30 min") #this is a vector containing the dates we want to include
gpmet_tidyr_complete_test <- tidyr::complete(gpmet, date = date_seq)
#this worked - we have all the dates including 12:25 etc. equivalent to test <- merge(date_seq_df, gpmet, by = "date", all = TRUE)
summary(gpmet_tidyr_complete_test$Station.Number) # this has introduced NAs
#now let's specify values for more columns (to replace the default NAs)
gpmet_tidyr_complete_test2 <- tidyr::complete(gpmet, date = date_seq , Station.Number = 14023, AWS.Flag = 1,
Latitude.to.four.decimal.places.in.degrees = -12.249,
Longitude.to.four.decimal.places.in.degrees = 131.0449)
summary(gpmet_tidyr_complete_test2$Station.Number) #tada! no NAs
#HOWEVER, we still have the 12:25, 12:26 time stamps in there, so we need to do something like:
#gpmet_complete <- merge(date_seq_df, test, by = "date", all = FALSE) #inner_join() - we now keep only the dates that are in date_seq
#using a dplyr function instead of merge()
gpmet_tidyr_test_cont <- dplyr::inner_join(date_seq, gpmet_tidyr_complete_test2)
#notice how inner_join accepts a vector as one of its arguments, whereas merge() would not.
#so, using the tidyverse, we still have to use two steps,
#but it gives us an easy way to fill in other columns as well as the date column
#Thanks Ian!
#We will be exploring the tidyverse more in the next session
|
11b1dea4373d116c87ee098f3c46a3549badab75
|
f088bde14cc6b950474e57e89b297b482ccb18f3
|
/R/package_setup.R
|
7f35e2aa296bef2638d086b3e5c3c76278ef0939
|
[
"MIT"
] |
permissive
|
matiasandina/nobrainr
|
36c86d940f325d465d3b944d276d8dd6e3592d75
|
dceb61b7a1a608d537694e939d7424a24b223b1a
|
refs/heads/main
| 2023-08-22T02:26:21.328292
| 2021-10-12T22:05:02
| 2021-10-12T22:05:02
| 411,441,451
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 496
|
r
|
package_setup.R
|
# library(usethis)
# use_mit_license("Matias Andina")
# use_package("dplyr", type = "Imports")
# use_readme_md()
# use_news_md()
# library(wholebrain)
# library(SMART)
# data("EPSatlas", package = "wholebrain")
#
# use_package("ggplot2")
#
# trial_df <- readr::read_csv("D:/SWAP/ChoiLab/YS/MG776/001/results/MG776_counts.csv")
#
# trial_df <-
# trial_df %>%
# select(parent, side, value, mm_from_bregma = mm.from.bregma)
# use_data(trial_df)
#
# use_r("plot_rois_in_data.R")
|
466a048c42083fc265035543f0900665f2598e47
|
09fede3ddb1fe90d486ead390e557275514ebcd7
|
/R/aaa.R
|
1be7f8ab3c44d111e60f46f2ea3b4f50c08cd949
|
[] |
no_license
|
Mrlirhan/mlr3extralearners
|
05723212d650c623a8bcf5ba4eb809eb7cc6707e
|
3655ccecb43f18837bc6a2078672636a399bab5d
|
refs/heads/main
| 2023-06-29T09:31:58.113359
| 2021-08-04T17:42:26
| 2021-08-04T17:42:26
| 392,993,757
| 1
| 0
| null | 2021-08-05T10:05:51
| 2021-08-05T10:05:50
| null |
UTF-8
|
R
| false
| false
| 495
|
r
|
aaa.R
|
# clearly this should be a Dictionary but there's a weird bug that leads to objects
# being handled wrong and not cloned properly when loaded from dictionary in `mlr3::lrn`
.extralrns_dict = R6Class(".extralrns_dict",
public = list(
lrns = list(),
add = function(key, learn) {
assert_character(key, len = 1)
assert_class(learn, "R6ClassGenerator")
lst = list(key = learn)
names(lst) = key
self$lrns = mlr3misc::insert_named(self$lrns, lst)
})
)$new()
|
c0648fe32af207324ae0d01ae004feb6f731e2e3
|
b2eb1052fe4e9a8ce458f3c786c8449330e0c09f
|
/R/fixSVGDim.R
|
af6d68c236580be49d0a4f44716683c93fd3575a
|
[] |
no_license
|
duncantl/XDynDocs
|
9245e9c25ef5cc4b575c4a11e85552c09c1bcd43
|
6612b35323a195c2b9646a2591163092935f328f
|
refs/heads/master
| 2023-01-09T02:51:24.797126
| 2022-12-22T17:01:08
| 2022-12-22T17:01:08
| 40,777,819
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,297
|
r
|
fixSVGDim.R
|
checkImages =
function(doc)
{
if(is.character(doc))
doc = htmlParse(doc)
src = xpathSApply(doc, "//iframe[@src]|//img[@src]", xmlGetAttr, "src")
src[!file.exists(src)]
}
renameReferences =
function(doc, to = ".", base = NA,
xpath = "//iframe[@src]|//img[@src]|//a[@href]|//link[@href]|//script[@src]")
{
if(is.character(doc))
doc = htmlParse(doc)
xpathSApply(doc, xpath, # Need to handle the <a href> carefully so as not to damage references to external things
function(x)
renameReference(x, to, base, if(xmlName(x) %in% c("iframe", "img", "script")) "src" else "href"))
doc
}
renameReference =
function(node, to, base, at = "src")
{
browser()
id = xmlGetAttr(node, at, NA)
if(is.na(id))
# external references are left as is.
if(grepl("^(http|ftp)", id))
return(node)
id = if(is.na(base))
basename(id)
else
gsub(sprintf("^%s", base), "", id)
tmp = gsub(sprintf("%s$", .Platform$file.sep), "", c(to, id))
xmlAttrs(node) = structure(sprintf("%s%s%s", tmp[1], .Platform$file.sep, tmp[2]), names = at)
node
}
getAllReferences =
function(doc, xpath = c("//img/@src", "//a/@href", "//script/@src", "//iframe/@src", "//link/@href"),
omitExternal = TRUE)
{
if(is.character(doc))
doc = htmlParse(doc)
xp = paste(xpath, collapse = "|")
ans = xpathSApply(doc, xp, as, "character")
if(omitExternal)
ans = ans[ ! grepl( "^(http|ftp)", ans) ]
ans
}
fixSVGDimensions =
function(doc, all = FALSE)
{
if(is.character(doc))
doc = htmlParse(doc)
xp = if(all) "//iframe[@src]" else "//iframe[@src and @width='NaN']"
xpathApply(doc, xp, fixSVGDim)
doc
}
fixSVGDim =
function(node, ptToPixelFactor = 1.34)
{
ats = xmlAttrs(node)
if(!file.exists(ats["src"])) {
warning("cannot find ", ats["src"])
return(node)
}
if(FALSE)
svg = xmlParse(ats["src"])
else {
svg = xmlParse(paste(c(readLines(ats["src"], 2), "</svg>"), collapse = ""), asText = TRUE)
}
svg.ats = xmlAttrs(xmlRoot(svg))
dims = as.numeric(gsub("pt", "", svg.ats[c("width", "height")])) * ptToPixelFactor
xmlAttrs(node) = structure(dims, names = c("width", "height"))
node
}
|
0a40ce7574dc9f2495a9e3533ae0b7ae154fb665
|
241cf24d4b7226631c161d37eb85580d603cc3e3
|
/Plot3.R
|
07e1f987bbf59644d9244c7cc7638eae51abee6f
|
[] |
no_license
|
ashokjain001/Exploratory_Data_Analysis
|
9dcd26fd7603739488c2abce0adfc73a98312b2d
|
caac4bab7958d9a33558ec948c0e045ae27b7953
|
refs/heads/master
| 2020-04-06T04:22:26.041539
| 2014-08-09T04:53:22
| 2014-08-09T04:53:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 648
|
r
|
Plot3.R
|
plot3<-function(){
url<-"https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip"
dir.create("EPA")
download.file(url,destfile="./EPA/data.zip",method="curl")
NEI <- readRDS("./EPA/summarySCC_PM25.rds")
SCC <- readRDS("./EPA/Source_Classification_Code.rds")
baltimore<-subset(NEI,NEI$fips=="24510",)
p<-ggplot(baltimore, aes(x = factor(year), y = Emissions)) + geom_bar(stat = "identity")
p+facet_grid(~type)+labs(title = "Emissions by type")+labs(x="Year")+labs(y=expression(PM[2.5]*" Emissions"))+theme_bw(base_family = "Helvetica", base_size =11)
dev.copy(png,file="plot3.png")
dev.off()
}
|
cf19c62d36010987973db37ca0659c084b389160
|
f62736da11b1818af73866a6c5da7c5b8b75b980
|
/2015/05_megafon.R
|
3b6a800cfe6268a14dcbec045a3ef439a0c0ca83
|
[] |
no_license
|
erikgahner/posts
|
95b108dccea199a81656fd207857ba7afc7cf92a
|
38293e4f7d5a02ef87f9ae4cf36af0fefa209b86
|
refs/heads/master
| 2023-08-30T17:36:37.503975
| 2023-08-27T08:33:32
| 2023-08-27T08:33:32
| 25,849,217
| 0
| 0
| null | null | null | null |
ISO-8859-15
|
R
| false
| false
| 1,785
|
r
|
05_megafon.R
|
# R script til figur i "Hvorfor er stikprøvestørrelsen mindre i den seneste Megafon-måling?"
# Link: http://erikgahner.dk/2015/05/27/hvorfor-er-stikproevestoerrelsen-mindre-i-den-seneste-megafon-maaling/
# Åben pakker. Kan installeres med install.packages("")
library(downloader)
library(ggplot2)
# Download fil fra GitHub
download("https://raw.githubusercontent.com/erikgahner/polls/master/polls.csv", "polls.csv", mode = "wb")
# Hent polls.csv
polls <- read.csv("polls.csv")
# Lav dato variabel
polls$date <- format(as.Date(c(paste(polls$year, polls$month, polls$day, sep="-")), by = "days"))
# Brug kun meningsmålinger fra denne valgperiode
polls <- polls[polls$date > as.Date("2011-09-15") & polls$date < as.Date("2015-05-26"),]
# Få antallet af meningsmålinger fra Megafon i denne valgperiode
NROW(polls$n[polls$pollingfirm == "Megafon"])
# Se målingerne fra Megafon, der har under 1000 respondenter
polls[polls$n < 1000 & polls$pollingfirm == "Megafon",]
# Lav unik værdi for den seneste Megafon-måling
polls$Periode <- "15. september 2011 til 30. april 2015"
polls$Periode[polls$day == 25 & polls$month == 5 & polls$year == 2015] <- "25. maj 2015"
png('megafon.png', height=4, width=6, units='in', res=200)
ggplot(polls[polls$pollingfirm == "Megafon",], aes(x=n, fill=Periode)) +
geom_histogram(binwidth = 10) +
ylab("") +
ggtitle("Stikprøvestørrelsen i Megafon-målingerne, 2011-2015") +
xlab("Stikprøvestørrelse") +
theme_minimal() +
theme(legend.direction = "horizontal", legend.position = "bottom")
dev.off()
# Udregn statistisk usikkerhed med et 95% konfidensniveau
## N = 870
1.96 * sqrt( (48 * (100-48) ) / 870)
## N = gennemsnit i Megafon for denne valgperiode
1.96 * sqrt( (48 * (100-48) ) / mean(polls$n[polls$pollingfirm == "Megafon"]))
|
a23b9593b512690b498569a9a609698372570a61
|
1f3ae09ccb392ad94f0fb2a29c0ed20921505f58
|
/R/debugVariable.R
|
6bde443b3da8edbe7dea074004accad5efec9709
|
[] |
no_license
|
End-to-end-provenance/provDebugR
|
fdf0e387a1ade48d7c308e00d2c9167d3d82ac30
|
6c0819dfb9c6208dab3a4f274861d87d83d0df4e
|
refs/heads/master
| 2023-05-27T15:10:03.609547
| 2021-04-26T20:24:17
| 2021-04-26T20:24:17
| 141,144,094
| 1
| 1
| null | 2023-05-16T18:40:58
| 2018-07-16T13:42:02
|
R
|
UTF-8
|
R
| false
| false
| 16,295
|
r
|
debugVariable.R
|
# Copyright (C) President and Fellows of Harvard College and
# Trustees of Mount Holyoke College, 2020, 2021.
# This program is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public
# License along with this program. If not, see
# <http://www.gnu.org/licenses/>.
###############################################################################
# === VARIABLE =============================================================== #
#' debug.variable
#'
#' debug.variable shows all values that a particular variable has during
#' execution of a script.
#'
#' For each variable queried, debug.variable returns a data frame of all
#' instances (data nodes) of that variable.
#' Each data frame contains the following columns:
#' \itemize{
#' \item value: The value of the variable.
#' \item container: The type of the container of the variable.
#' \item dimension: The size of the container.
#' \item type: The data type(s) contained within the container.
#' \item scriptNum: The script number the variable is associated with.
#' \item scriptName: The name of the script the variable or file is associated with.
#' \item startLine: The line number the variable is associated with.
#' \item code: The code this variable is associated with.
#' }
#'
#' @param ... The variable names to be queried.
#' @param val.type If not "all", this filters the results to contain
#' only instances where the valType (container or type) has the
#' queried type. Only one type may be queried per function call.
#' @param script.num The script number of the queried variables. Defaults to "all".
#' @param all If TRUE, results for all variables of the specified script will be
#' returned.
#' @param showType If TRUE, variable container, dimension, and type are displayed.
#'
#'
#' @return debug.variable returns a list of data frames showing all instances of each variable queried.
#'
#' @examples
#' \dontrun{
#' prov.debug.run("test.R")
#' debug.variable(x)
#' debug.variable(all = TRUE)
#' debug.variable("a", b, "x", val.type = "logical")
#' debug.variable("a", "b", x, script.num = 3)
#' }
#'
#' @export
#' @rdname debug.line
debug.variable <- function(..., val.type = "all", script.num = "all",
all = FALSE, showType = FALSE)
{
# CASE: no provenance
if(!.debug.env$has.graph)
stop("There is no provenance.")
# STEP: get all possible variables
# data nodes must have type = "Data" or "Snapshot" to be considered a variable
# columns: d.id, p.id, name, valType, startLine, scriptNum
data.nodes <- .extract.vars(.debug.env$data.nodes)
pos.vars <- .get.pos.var(data.nodes)
# CASE: no variables
if(is.null(pos.vars)) {
cat("There are no variables.\n")
return(invisible(NULL))
}
# STEP: get user's query
# columns: name, valType, startLine, scriptNum
if(all)
query.vars <- unique(pos.vars$name)
else
query.vars <- .flatten.args(...)
query <- .get.query.var(query.vars, val.type = val.type, script.num = script.num)
# STEP: get valid queries
valid.queries <- .get.valid.query.var(pos.vars, query, forward = FALSE)
# CASE: no valid queries
if(is.null(valid.queries)) {
cat("No valid queries.\n\n")
.print.pos.options(pos.vars[ , c("name", "startLine", "scriptNum", "scriptName")])
return(invisible(NULL))
}
# STEP: extract name and valType columns
valid.queries <- valid.queries[ , c("name", "valType")]
valid.queries <- unique(valid.queries)
# STEP: for each valid query, form table for user output
output <- lapply(c(1:nrow(valid.queries)), function(i) {
return(.get.output.var(valid.queries[i, ]))
})
names(output) <- valid.queries$name
.print.variable(output, showType)
return(invisible(output))
}
#' For each possible data node find its corresponding procedure node.
#' Function shared with debug.lineage
#' columns: d.id, p.id, name, valType, startLine, scriptNum, scriptName
#'
#' @param data.nodes A table of all possible data nodes
#'
#' @return The table of all possible data nodes, with the necessary fields found.
#' columns: d.id, p.id, name, value, valType, startLine, scriptNum, scriptName
#'
#' @noRd
.get.pos.var <- function(data.nodes)
{
# CASE: no data nodes/variables
if(nrow(data.nodes) == 0)
return(NULL)
# from data nodes, keep columns: id, name, value, valType
# rename id column to d.id
data.nodes <- data.nodes[ , c("id", "name", "value", "valType")]
colnames(data.nodes) <- c("d.id", "name", "value", "valType")
# for each data node, get the corresponding procedure node
# some may have multiple proc nodes associated with this (files, url, fromEnv nodes etc.)
rows <- lapply(c(1:nrow(data.nodes)), function(i)
{
# get id and row from data nodes table
d.fields <- data.nodes[i, ]
d.id <- d.fields$`d.id`
# try to get the procedure node that produced/used the data node (could be multiple)
p.id <- .get.p.id(d.id)
# get id, startLine, scriptNum, scriptName from proc nodes table
# cbind with row from data nodes table
row <- lapply(p.id, function(id) {
p.fields <- .debug.env$proc.nodes[.debug.env$proc.nodes$id == id,
c("id", "startLine", "scriptNum", "scriptName")]
scriptName <- .debug.env$scripts[p.fields$scriptNum]
return(cbind(d.fields, p.fields, stringsAsFactors = FALSE))
})
# if there are multiple rows, combine into data frame
if(length(row) == 1)
row <- row[[1]]
else
row <- .form.df(row)
return(row)
})
# bind into a single data frame
rows <- .form.df(rows)
# rename and rearrange columns
colnames(rows) <- c("d.id", "name", "value", "valType", "p.id", "startLine", "scriptNum", "scriptName")
rows <- rows[ , c("d.id", "p.id", "name", "value", "valType", "startLine", "scriptNum", "scriptName")]
return(rows)
}
#' Get the user's queries, bound into a data frame.
#' Function shared with debug.lineage and debug.view
#' columns: name, valType, startLine, scriptNum
#'
#' @param query.vars The queried variables/data node names.
#' @param val.type valType queries, if any.
#' @param start.line line number queries, if any.
#' @param script.num The script number query.
#'
#' @return A data frame of the user's queries.
#' columns: name, valType, startLine, scriptNum
#'
#' @noRd
.get.query.var <- function(query.vars, val.type = "all", start.line = "all", script.num = "all")
{
# CASE: no queried variables
if(is.null(query.vars))
return(NULL)
# script.num == "all"
if(tolower(script.num[1]) == "all")
script.num <- c(1:nrow(provParseR::get.scripts(.debug.env$prov)))
# get all queries for each queried node & combine
queries <- lapply(query.vars, function(var)
{
q.lines <- start.line
# start.line = "all"
# get all start lines for the node queried. leave NA if none found
if(!is.na(q.lines[1]) && tolower(q.lines[1]) == "all")
{
# get data node ids
d.id <- .debug.env$data.nodes$id[.debug.env$data.nodes$name == var]
if(length(d.id) == 0) {
q.lines <- NA
}
else {
# get corresponding proc node ids & start lines
q.lines <- lapply(d.id, function(id) {
p.id <- .get.p.id(id)
lines <- sapply(p.id, function(id) {
return(.debug.env$proc.nodes$startLine[.debug.env$proc.nodes$id == id])
})
return(lines)
})
q.lines <- unique(unlist(q.lines))
}
}
# match start lines to script numbers
query.lines <- rep(q.lines, length(script.num))
query.scripts <- rep(script.num, each = length(q.lines))
# Match valType query to script numbers and start lines.
length.scripts <- length(query.scripts)
length.types <- length(val.type)
query.lines <- rep(query.lines, each = length.types)
query.scripts <- rep(query.scripts, each = length.types)
query.types <- rep(val.type, length.scripts)
# replicate var query to match length of other columns
vars <- rep(var, length(query.lines))
# combine each column into a table
query.table <- data.frame(vars, query.types, query.lines, query.scripts, stringsAsFactors = FALSE)
names(query.table) <- c("name", "valType", "startLine", "scriptNum")
return(query.table)
})
queries <- .form.df(queries)
# return unique rows (queries)
return(unique(queries))
}
#' Get valid queries.
#' Function shared with debug.lineage and debug.view
#'
#' @param pos.nodes Table of possible data nodes.
#' columns: d.id, p.id, name, valType, startLine, scriptNum
#' @param query The user's queries.
#' columns: name, valType, startLine, scriptNum
#' @param forward For lineage queries. This determines which d.id is returned.
#'
#' @return The valid queries.
#' columns: d.id, name, valType, startLine, scriptNum, scriptName
#'
#' @noRd
.get.valid.query.var <- function(pos.nodes, query, forward = FALSE)
{
# CASE: no queries
if(is.null(query))
return(NULL)
# STEP: check validity of each query (row)
query.indices <- c(1:nrow(query))
# (for forward/backward lineage queries)
# store id of valid data nodes when found
# this is so that in cases where no start.line is searched for,
# we will know which node id to return
valid.d.id <- c()
# this is akin to a loop where, for every query (row),
# a TRUE or FALSE will be returned. TRUE corresponds to valid query
# this is used later to extract from the table of queries
valid.indices <- sapply(query.indices, function(i)
{
# extract individual components of the query
query.var <- query$name[i]
query.valType <- query$valType[i]
query.line <- .to.int(query$startLine[i])
query.script <- .to.int(query$scriptNum[i])
# CASE: line or script number is not an int
if(is.null(query.line) || is.null(query.script))
return(FALSE)
# QUERY: filter by node name and script num
subset <- pos.nodes[pos.nodes$name == query.var &
pos.nodes$scriptNum == query.script, ]
subset <- .remove.na.rows(subset)
# CASE: no row with queried node name found - return false
if(nrow(subset) == 0)
return(FALSE)
# QUERY: filter by valType, if queried valType is not "all"
if(tolower(query.valType) != "all")
{
# get the regex form for the valType query
query.valType <- paste("*", query.valType, "*", sep="")
# extract the cells where the queried valType can be found
subset <- subset[grep(query.valType, subset$valType), ]
subset <- .remove.na.rows(subset)
# CASE: no nodes with queried valType found - return false
if(nrow(subset) == 0)
return(FALSE)
}
# (for lineage queries)
# QUERY: start line queried is "all" or NA,
# find the id of the node to be used
query.line.int <- .to.int(query.line)
if(is.null(query.line.int))
return(FALSE)
query.line <- query.line.int
if(is.na(query.line.int))
{
# extract data node id
d.id <- subset$`d.id`
# find the id of the node to be used
# forward lineage - get first node
# backwards lineage - get last node
if(nrow(subset) == 1)
valid.d.id <<- append(valid.d.id, d.id)
else if(forward)
valid.d.id <<- append(valid.d.id, d.id[1])
else
valid.d.id <<- append(valid.d.id, d.id[length(d.id)])
# node is found - return true
return(TRUE)
}
# QUERY: search for queried start line
subset <- subset[subset$startLine == query.line, ]
subset <- .remove.na.rows(subset)
# CASE: start line not found
if(nrow(subset) == 0)
return(FALSE)
# node found: record data node id
valid.d.id <<- append(valid.d.id, subset$`d.id`)
return(TRUE)
})
# STEP: extract valid queries
valid.queries <- query[valid.indices, ]
valid.queries <- .remove.na.rows(valid.queries)
# CASE: no valid queries
if(nrow(valid.queries) == 0)
return(NULL)
# STEP: bind valid data node id column to valid queries
valid.queries <- cbind("d.id" = valid.d.id,
valid.queries,
stringsAsFactors = FALSE)
return(valid.queries)
}
#' Get each instance of the queried data node/variable name, bound into a data frame.
#'
#' @param query A query. Must be valid.
#' columns: name, valType
#'
#' @return A data frame of all instances of the queried data node.
#' columns: value, container, dimension, type, scriptNum, scriptName, startLine, code
#'
#' @noRd
.get.output.var <- function(query)
{
pos.data <- .debug.env$data.nodes
pos.proc <- .debug.env$proc.nodes
# STEP: from all data nodes,
# get nodes with queried name
# extract columns: id, value, valType
data.nodes <- pos.data[pos.data$name == query$name,
c("id", "value", "valType")]
# STEP: extract nodes with queried valType, if not "all"
if(tolower(query$valType) != "all") {
query.valType <- paste("*", query$valType, "*", sep="")
data.nodes <- data.nodes[grep(query.valType, data.nodes$valType), ]
}
# STEP: for each data node, get columns for val type
# and from corresponding procedure node
rows <- lapply(c(1:nrow(data.nodes)), function(i)
{
# STEP: get row from data nodes
# columns: id, value
data.fields <- data.nodes[i, c("id", "value")]
# STEP: get val type columns from provParseR
# columns: container, dimension, type
valType.fields <- provParseR::get.val.type(.debug.env$prov, data.fields$id)
valType.fields <- valType.fields[ , c("container", "dimension", "type")]
# STEP: get corresponding procedure node id (could have mulitple)
p.id <- .get.p.id(data.fields$id)
# STEP: get fields from proc nodes
# columns: scriptNum, scriptName, startLine, code
# cbind with data.fields and valType.fields
row <- lapply(p.id, function(id)
{
# get and rename proc.fields
proc.fields <- pos.proc[pos.proc$id == id, c("scriptNum", "scriptName",
"startLine", "name")]
colnames(proc.fields) <- c("scriptNum","scriptName","startLine","code")
# cbind with data.fields and valType.fields
# remove id (first) column
fields <- cbind(data.fields, valType.fields, proc.fields, stringsAsFactors = FALSE)
fields <- fields[ ,-1]
return(fields)
})
# if there are multiple rows, combine into data frame
if(length(row) == 1)
row <- row[[1]]
else
row <- .form.df(row)
return(row)
})
# STEP: bind rows into data frame, return
return(.form.df(rows))
}
#' Prints the lineage of each variable queried.
#'
#' @param output list of variable lineage for each valid variable queried.
#' @param showType if TRUE, container, dimension, and type information for each
#' variable is shown.
#'
#' @noRd
.print.variable <- function(output, showType) {
# print script numbers, if multiple scripts
num.scripts <- .print.script.nums()
# print details for each query
lapply(c(1:length(output)), function(i) {
# print variable name
cat(paste("Var:", names(output[i]), "\n"))
# print lineage
lapply(c(1:nrow(output[[i]])), function(j) {
# if only one script, print just line number
if (num.scripts == 1) {
cat(paste("\t", output[[i]]$startLine[j], ": ", sep=""))
}
else {
cat(paste("\t", output[[i]]$scriptNum[j], ", ",
output[[i]]$startLine[j], ": ", sep=""))
}
# split code based on \n
tempCode <- strsplit(output[[i]]$code[j], "\n")
# print line of code, shortening if over 50 chars
if (nchar(tempCode[[1]][1]) > 50)
cat(paste("\t", output[[i]]$value[j], "\t",
substring(tempCode[[1]][1], 1, 47), "...\n"))
else
cat(paste("\t", output[[i]]$value[j], "\t", tempCode[[1]][1], "\n"))
# print valType info, if desired
if (showType == TRUE) {
print(output[[i]][j, c(2:4)], right = FALSE)
}
})
})
if (showType == FALSE)
cat("\nRerun with showType = TRUE to see more detailed variable information.\n")
}
|
121b2dc21d108d97e2bb65eaf0b6a9468abd4fe0
|
d2f39a2258dbe6253bc28fd00717a67b131751f4
|
/man/Obs.Rd
|
ba75600458d61d7ce09919cbe1b5ef821a32b6c9
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
andrewzm/MVST
|
6e5d9d5c84ba0d28e38fdb69b12cfa8ba1bcc45f
|
2bf0835e66e04e120f78fe8673afe3dd9d6f42c0
|
refs/heads/master
| 2022-09-29T23:40:39.048820
| 2022-09-15T21:37:50
| 2022-09-15T21:37:50
| 20,478,703
| 10
| 9
| null | 2018-10-18T14:50:36
| 2014-06-04T10:13:03
|
R
|
UTF-8
|
R
| false
| true
| 1,350
|
rd
|
Obs.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AllConstructor.R
\name{Obs}
\alias{Obs}
\title{Observation block}
\usage{
Obs(df, name = "Obs", remove_cross_ins = 0, ...)
}
\arguments{
\item{df}{a data frame which should contain at least 5 entries, \code{x,y,t,z} and \code{std} which denote the horizontal, vertical and temporal indices of the observations, the value and error respectively. Alternatively this could be a path name.}
\item{name}{the name of the observation process}
\item{remove_cross_ins}{removes data outside a circle centred at zero with specified radius. Convenient when working with satellite data in polar stereographic projection when some cross-ins are detected.}
\item{...}{other arguments passed on to \code{preprocess_obs}}
}
\value{
Object of class \code{Obs} (which inherits from class \code{block} and is thus also a block)
}
\description{
This function initialises an object of class \code{Obs} which defines a an observation data set. By default, this is for observations with negligible spatial footprint. For larger supports, use \code{Obs_poly}.
}
\examples{
O <- Obs(df=data.frame(x=runif(5),y=runif(5),t=c(1,1,1,2,2),z=runif(5),std=runif(5)))
print(O)
plot(subset(O,t==1),"z",pt_size=4)
}
\keyword{Observations,}
\keyword{block}
\keyword{change}
\keyword{of}
\keyword{support,}
|
77763f72dd440a6b8007d2971ed21ca0d6ba06df
|
29a1809b5307273dd75adab4d9214b8ba7a191d1
|
/R/rlapjv.R
|
7e57c52f24f28254e7353190e9d16782762f431f
|
[
"BSD-2-Clause"
] |
permissive
|
bkj/rlapjv
|
ff28796904b9332bb29bd6dee8b2b7f3e2398cd0
|
e01f65db4cca0898967ac87d9f8ccfcbf3a149c5
|
refs/heads/master
| 2020-03-30T20:41:23.912803
| 2018-10-01T20:49:14
| 2018-10-01T20:49:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,212
|
r
|
rlapjv.R
|
#' @title Solves the linear assignment problem using the Jonker-Vogenant algorithm
#'
#' @description Find a set of vertices pairs in the order of goodness of matching according to a
#' specified measure.
#'
#' @param cost A non-negative matrix-like object that can be coerced to a matrix
#' @param maximize If FALSE (default) then costs are minimized and if TRUE the
#' costs are maximized
#'
#' @return The assignment of rows to columns as an integer vector
#'
#' @export
lapjv <- function(cost, maximize = FALSE) {
cpp_lapjv(as.matrix(cost), maximize)
}
#' @title Solves the linear assignment problem using the LAPMOD algorithm
#'
#' @description Find a set of vertices pairs in the order of goodness of matching according to a
#' specified measure.
#'
#' @param n number of rows in the cost matrix
#' @param cc vector of all finite elements of the assignement cost matri
#' @param ii vector of indices of the zero indexed row starts in cc. The following must hold
#' ii[1] = 0 and ii[n+2] = length(cc).
#' @param kk 0-based column numbers for each finite cost in the matrix,
#' i.e., kk must be in 0:(nrow(.)-1).
#' @param maximize If FALSE (default) then costs are minimized and if TRUE the
#' costs are maximized
#'
#' @return The assignment of rows to columns as an integer vector
#'
#' @export
lapmod_index <- function(n, cc, ii, kk, maximize = FALSE) {
# warning("Currently does not produce expected answers for all matrices.")
cpp_lapmod(n, cc, ii, kk, maximize)
}
#' @title Solves the linear assignment problem using the LAPMOD algorithm
#'
#' @description Find a set of vertices pairs in the order of goodness of matching according to a
#' specified measure.
#'
#' @param cost A non-negative CsparseMatrix object from the Matrix package
#' @param maximize If FALSE (default) then costs are minimized and if TRUE the
#' costs are maximized
#'
#' @return The assignment of rows to columns as an integer vector
#'
#' @export
lapmod <- function(sparse_matrix, maximize = FALSE){
# warning("Currently does not produce expected answers for all matrices.")
n <- nrow(sparse_matrix)
cpp_lapmod(n, sparse_matrix@x,
sparse_matrix@p, sparse_matrix@i, maximize)
}
|
1764160e0b5339780fc3d45575b8c1a5be8615a6
|
09a76c99e0bfd15cdd2740e2471aceab344d4e60
|
/statistikdeskriptif.R
|
177d79e27768fc128591ab1326ac3b60c87d0a0d
|
[] |
no_license
|
dedylesmana/stock_analysis
|
208f8981e64e7c45fa60b033b931c5095e94cadc
|
58a19dd63883ebc4dc0723c00a5b8e77b21c1717
|
refs/heads/main
| 2023-08-22T00:06:38.692799
| 2021-10-13T20:57:09
| 2021-10-13T20:57:09
| 416,857,922
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,208
|
r
|
statistikdeskriptif.R
|
#Regresi data panel
#Panggil Packages dan Input data
install.packages("devtools")
library(devtools)
install_github("Displayr/flipPlots")
devtools::install_github("Displayr/flipTransformations")
require(devtools)
install_github("Displayr/flipPlots")
install.packages("flipPlots")
library(flipPlots)
library(dplyr)
library(data.table)
library(dtplyr)
otomotifdatapanel=read.csv(file.choose(),sep=";")
View(otomotifdatapanel)
str(otomotifdatapanel)
otomotifdatapanel$Tahun=as.factor(otomotifdatapanel$Tahun)
names(otomotifdatapanel)
head(otomotifdatapanel,48)
summary(otomotifdatapanel)
#Install dan panggil packages
install.packages('plm')
library(lmtest)
install.packages('randtests')
library(randtests)
library(ggplot2)
library(plm)
library(readxl)
# PLOT DATA
qplot(data = otomotifdatapanel,HargaSaham ,fill = KodeEmiten,HargaSaham = 30)
qplot(data = otomotifdatapanel,HargaSaham ,fill = Tahun,HargaSaham = 30)
ggplot (otomotifdatapanel, aes (x=ROA, y=HargaSaham ) ) + # Buat plot ggplot2 dengan benar
geom_point (aes(size=KodeEmiten, col=Tahun))
ggplot(otomotifdatapanel,aes(x=SukuBunga,y=HargaSaham)) +
geom_point(aes(size=KodeEmiten,col=Tahun))
|
98371e3ec8c12af02e635d0e3111ad76c941eecf
|
4b48647555feaac4cbb9bb4864db20e6e40a8980
|
/man/ggside-ggproto-geoms.Rd
|
f0d4cb267ab8d6795febc2cfeb7f6e63bbd31b43
|
[
"MIT"
] |
permissive
|
seifudd/ggside
|
8d9fdca5b042f9528c5dc4ef5ce0d7f64537f730
|
442c83db4cca57bc9cc962be563fbd7df0463d86
|
refs/heads/master
| 2023-07-12T20:29:20.936007
| 2021-08-16T19:30:55
| 2021-08-16T19:30:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,538
|
rd
|
ggside-ggproto-geoms.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/aab-other_utils.r, R/geom-sidebar.r,
% R/geom-sideboxplot.r, R/geom-sidecol.r, R/geom-sidedensity.r,
% R/geom-sideline.r, R/geom-sidepath.r, R/geom-sidepoint.r,
% R/geom-sidesegment.r, R/geom-sidetext.r, R/geom-sidetile.r,
% R/geom-sideviolin.r, R/ggside-ggproto.r
\docType{data}
\name{use_xside_aes}
\alias{use_xside_aes}
\alias{use_yside_aes}
\alias{parse_side_aes}
\alias{GeomXsidebar}
\alias{GeomYsidebar}
\alias{GeomXsideboxplot}
\alias{GeomYsideboxplot}
\alias{GeomXsidecol}
\alias{GeomYsidecol}
\alias{GeomXsidedensity}
\alias{GeomYsidedensity}
\alias{GeomXsideline}
\alias{GeomYsideline}
\alias{GeomXsidepath}
\alias{GeomYsidepath}
\alias{GeomXsidepoint}
\alias{GeomYsidepoint}
\alias{GeomXsidesegment}
\alias{GeomYsidesegment}
\alias{GeomXsidetext}
\alias{GeomYsidetext}
\alias{GeomXsidetile}
\alias{GeomYsidetile}
\alias{GeomXsideviolin}
\alias{GeomYsideviolin}
\alias{ggside-ggproto-geoms}
\title{Extending base ggproto classes for ggside}
\usage{
use_xside_aes(data)
use_yside_aes(data)
parse_side_aes(data, params)
}
\arguments{
\item{data}{data passed internally}
\item{params}{params available to ggproto object}
}
\value{
ggproto object that is usually passed to \link[ggplot2]{layer}
}
\description{
These ggproto classes are slightly modified from their
respective inherited \link[ggplot2]{ggproto} class. The
biggest difference is exposing 'x/yfill', 'x/ycolour', and
'x/ycolor' as viable aesthetic mappings.
}
\keyword{datasets}
|
7ddfe1c52c936a6c411c4c149d9034e2f056a35d
|
c280e616fb69fef82ca78a2a9642232e98026277
|
/man/FEM-package.Rd
|
8aeceb370e0f64ad26f46e35a9f02d96f76a0f32
|
[] |
no_license
|
zyangx/FEM
|
535f730381722111fc24471ca6ded09515971406
|
7e5dfccd696c3b1db01f24cb5157a651e1f2f49c
|
refs/heads/master
| 2022-09-11T11:21:59.971018
| 2020-06-04T12:28:56
| 2020-06-04T12:28:56
| 267,861,250
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,738
|
rd
|
FEM-package.Rd
|
\name{FEM-package}
\alias{FEM-package}
\alias{FEM}
\docType{package}
\title{
Identification of interactome hotspots of differential promoter DNA methylation
and differential expression
}
\description{
The FEM package performs a systems-level integrative analysis of DNA
methylationa and gene expression. It seeks modules of functionally
related genes which exhibit differential promoter DNA methylation and
differential expression, where an inverse association between promoter
DNA methylation and gene expression is assumed. For full details, see
Jiao et al Bioinformatics 2014.
}
\details{
\tabular{ll}{
Package: \tab FEM\cr
Type: \tab Package\cr
Version: \tab 2.1.4\cr
Date: \tab 2015-03-27\cr
License: \tab GPL >2 \cr
}
}
\author{
"Yinming Jiao"<20907099@zju.edu.cn>, "Andrew E Teschendorff"<andrew@picb.ac.cn>
}
\references{
1) Jiao Y, Widschwendter M, Teschendorff AE. A systems-level integrative
framework for genome-wide DNA methylation and gene expression data
identifies differential gene expression modules under epigenetic
control. Bioinformatics 2014 Aug 15;30(16):2360-2366.
2) Jones A, Teschendorff AE, Li Q, Hayward JD, Kannan A, et al. (2013) Role of dna methylation and epigenetic silencing of hand2 in endometrial cancer development. PLoS Med 10:e1001551.
3) Reichardt J, Bornholdt S (2006) Statistical mechanics of community detection. Phys Rev E 74:016110. doi:10.1103/PhysRevE.74.016110. URL http://link.aps.org/doi/10.1103/PhysRevE.74.016110.
4) West J, Beck S, Wang X, Teschendorff AE (2013) An integrative network algorithm identifies age-associated differential methylation interactome
hotspots targeting stem-cell differentiation pathways. Sci Rep 3:1630.
}
\keyword{ package }
\seealso{
}
\examples{
}
|
42a2cd8f4c26971550f6750f73f062cf1aa77346
|
c51347680754745733293e00aacf7b633334c1fc
|
/R/zenaz.R
|
33425825675c4f28bde59d7d12c7989383d2ff69
|
[] |
no_license
|
cran/YplantQMC
|
771c341d00e410a0e61dbdadc02af8866d5cd198
|
dc62bfc247ba9d6dd92498e8afa00d511a36e00e
|
refs/heads/master
| 2021-01-21T21:47:33.241377
| 2016-05-23T06:34:50
| 2016-05-23T06:34:50
| 17,694,152
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,439
|
r
|
zenaz.R
|
#'Calculates position of the sun
#'
#'@description Calculates the zenith and azimuth angle of the position of the sun, based
#'mostly on routines from Iqbal (1983).
#'
#'
#'By default, it is assumed that the time of day is not given in local apparent
#'time (LAT, also known as 'solar time'). To convert the standard time to LAT,
#'the longitude of the location, and the longitude of the nearest time zone
#'border must be given.
#'
#'Alternatively, use \code{LAT=TRUE} to specify that the time of day is in LAT
#'(that is, solar maximum occurs exactly at noon).
#'
#'The user can specify a number of timesteps (\code{KHRS}), so that the solar
#'positions are calculated for the midpoint of each timestep (this is used
#'within YplantQMC). Alternatively, specify \code{timeofday} directly.
#'
#'@param year YYYY - to account for eccentricity (small effect).
#'@param month Month number
#'@param day Day of month number
#'@param lat Latitude, degrees.
#'@param long Longitude, degrees. Optional (only used for local apparent time
#'correction.)
#'@param tzlong Longitude of the nearest timezone border
#'@param KHRS Number of timesteps in a day (optional). See Details.
#'@param timeofday Optional, time of day (in hours) (a vector of any length) to
#'calculate the position of the sun
#'@param LAT Logical (default=FALSE). Are the times of day given in 'local
#'apparent time'?
#'@return A list with the following components: \describe{
#'\item{list("hour")}{Time in decimal hours} \item{list("altitude")}{Solar
#'altitude (degrees)} \item{list("azimuth")}{Solar azimuth (degrees. N=0,
#'E=90)} \item{list("daylength")}{Day length in hours}
#'\item{list("sunset")}{Time of sunset (hours)} \item{list("zenrad")}{Solar
#'zenith position (radians)} }
#'@note This routine is no doubt less accurate that the NOAA routines provided
#'by the \code{solarpos} function in the \code{maptools} package. It is easier
#'to use, though.
#'@author Remko Duursma, based mostly on original FORTRAN code by Belinda
#'Medlyn.
#'@seealso \code{\link{setHemi}}
#'@references Iqbal, B., 1983. An Introduction to Solar Radiation. Academic
#'Press, New York, 386 pp
#'@keywords misc
#'@examples
#'
#'
#'# Simple use
#'zenaz(month=8, day=16, timeofday=12, lat=-33)
#'
#'# Get half-hourly solar positions
#'hourpos <- zenaz(month=2, day=16, KHRS=48, lat=-33, long=155, tzlong=150)
#' with(hourpos, plot(hour, altitude, type='o',ylab=expression(Altitude~(degree))))
#'
#'@export
zenaz <- function(year=2012, month=4, day=1,
lat= -33.6, long=150.7,
tzlong=long, KHRS=24, timeofday=NA, LAT=FALSE){
# Private function SUN (only gets declination, time corrections.)
SUN <- function(DOY, ALAT, TTIMD){
# Compatability issue
KHRS <- 24
HHRS <- 12
# Private functions
ECCENT <- function(T) 0.01675104 - (4.08E-5 + 1.26E-7*T)*T
ANOM <- function(T,D){
anom <- -1.52417 + (1.5E-4 + 3.0E-6*T)*T^2
anom <- anom + 0.98560*D
if(anom > 360)
anom <- anom - 360.0*floor(anom/360.0)
return(anom * pi/180)
}
EPSIL <- function(T)(23.452294- (1.30125E-2+ (1.64E-6-5.03E-7*T)*T)*T)*pi/180
OMEGA <- function(T,D)(281.22083+ (4.53E-4+3.0E-6*T)*T*T+4.70684E-5*D)*pi/180
ALUNAR <- function(T,D) (259.1833+ (2.078E-3+2.0E-6*T)*T*T-0.05295*D)*pi/180
# End private functions.
T <- DOY/36525
# COMPUTE SOLAR ORBIT
ECC <- ECCENT(T)
RM <- ANOM(T,DOY)
E <- RM
for(IM in 1:3){
E <- E + (RM- (E-ECC*sin(E)))/ (1-ECC*cos(E))
}
V <- 2.0*atan(sqrt((1+ECC)/ (1-ECC))*tan(0.5*E))
if(V < 0) V <- V + 2*pi
R <- 1 - ECC*cos(E)
EPS <- EPSIL(T)
OMEG <- OMEGA(T,DOY)
# COMPUTE NUTATION TERMS
LUNLON <- ALUNAR(T,DOY)
NUTOBL <- (2.5583E-3+2.5E-7*T)*cos(LUNLON)*pi/180
EPS <- EPS + NUTOBL
NUTLON <- - (4.7872E-3+4.7222E-6*T)*sin(LUNLON)*pi/180
# COMPUTE SOLAR DECLINATION
DEC <- asin(sin(EPS)*sin(V+OMEG))
# COMPUTE EQN OF TIME
MLON <- OMEG + RM
if(MLON < 0)MLON <- MLON + 2*pi
if(MLON > 2*pi)MLON <- MLON - 2*pi*floor(MLON/(2*pi))
Y <- (tan(EPS/2))^2
Y <- (1-Y)/ (1+Y)
SL <- OMEG + NUTLON + V
if(SL < 0) SL <- SL + 2*pi
if(SL > 2*pi)SL <- SL - 2*pi*floor(SL/(2*pi))
AO <- atan(Y*tan(SL))
EQNTIM <- AO - MLON
EQNTIM <- EQNTIM - pi*floor(EQNTIM/pi)
if(abs(EQNTIM) > 0.9*pi) EQNTIM <- EQNTIM - pi*EQNTIM/abs(EQNTIM)
AO <- EQNTIM + MLON
if(AO > 2*pi) AO <- AO - 2*pi*floor(AO/(2*pi))
# DAY LENGTH
MUM <- cos(ALAT-DEC)
MUN <- -cos(ALAT+DEC)
MUA <- 0.0
REFAC <- 0.0
UMN <- -MUM*MUN
if(UMN > 0) REFAC = 0.05556/sqrt(UMN)
if(MUN > MUA) MUA <- MUN
if(MUM > MUA){
FRACSU <- sqrt((MUA-MUN)/ (MUM-MUA))
FRACSU <- 1.0 - 2.0*atan(FRACSU)/pi
SUNSET <- HHRS*FRACSU
SUNRIS <- SUNSET
SUNSET <- SUNSET + REFAC + EQNTIM*HHRS/pi
SUNRIS <- SUNRIS + REFAC - EQNTIM*HHRS/pi
SUNSET <- SUNSET + HHRS + TTIMD
SUNRIS <- HHRS - SUNRIS + TTIMD
EQNTIM <- EQNTIM*HHRS/pi
DAYL <- SUNSET - SUNRIS
}
return(list(DEC=DEC,EQNTIM=EQNTIM,DAYL=DAYL,SUNSET=SUNSET))
}
# continue zenaz.
DATE <- as.Date(ISOdate(year,month,day))
DJUL <- as.vector(DATE - as.Date("1900-1-1") + 1)
k <- pi/180
# latitude in radians
ALAT <- lat * k
if(long < 0){
long <- 360.0 - long
tzlong <- 360.0 - tzlong
}
ALONG <- long * k
tzlong <- tzlong * k
if(!LAT){
TTIMD <- (24/ (2*pi))*(ALONG - tzlong)
} else {
TTIMD <- 0
}
# Maestra evaluates solar position mid-timestep (see zenaz subroutine).
if(all(is.na(timeofday))){
HOURS <- seq(from=24/KHRS/2, by=24/KHRS, length=KHRS)
} else {
HOURS <- timeofday
KHRS <- length(timeofday)
}
SUNcall <- SUN(DJUL, ALAT, TTIMD)
DEC <- SUNcall$DEC
EQNTIM <- SUNcall$EQNTIM
DAYL <- SUNcall$DAYL
SUNSET <- SUNcall$SUNSET
# To match old definition, with 24 hours in a day.
solarnoon <- 12
ZEN <- c()
AZ <- c()
SolarTime <- HOURS - TTIMD - EQNTIM
# get solar zenith and azimuth angles.
for(i in 1:KHRS){
# hour angle
HI <- (pi/180) * (12 - SolarTime[i])*15
# zenith angle
ZEN[i] <- acos(sin(ALAT)*sin(DEC) +
cos(ALAT)*cos(DEC)*cos(HI))
# Cosine of the azimuth angle (Iqbal)
H <- pi/2 - ZEN[i]
COSAZ <- (sin(H)*sin(ALAT)- sin(DEC)) /
(cos(H)*cos(ALAT))
if (COSAZ > 1.0)
COSAZ <- 1.0
if (COSAZ < -1.0)
COSAZ <- -1.0
AZ[i] <- acos(COSAZ)
if(SolarTime[i] > 12)AZ[i] <- -AZ[i]
}
dfr <- data.frame(zen=ZEN, az= pi - AZ)
dfr[dfr$zen > pi/2,] <- NA
dfr <- dfr / k
# Solar altitude.
dfr$alt <- 90 - dfr$zen
return(list(hour=HOURS, altitude=dfr$alt, azimuth=dfr$az,
daylength=DAYL, sunset = SUNSET, zenrad=k*dfr$zen, LAT=SolarTime))
}
|
1bbb17709d950d4d58d585fbca5b260d4e21fdde
|
79b52d45fc4fa5d55f6564ed0d4921ef6a092dae
|
/man/plotScaleFreeTopology.Rd
|
a1b9b3f0e5ef46a67bb374af5549a5a045c510df
|
[
"MIT"
] |
permissive
|
avcarr2/MetaNetwork_R_package
|
174e5019c9eb4c6672b63c2f6dd77628621faab6
|
b4aa5dbb96d183f169c14a50a603a448a302689b
|
refs/heads/master
| 2023-03-08T00:29:33.846065
| 2021-02-19T17:28:06
| 2021-02-19T17:28:06
| 340,441,133
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 976
|
rd
|
plotScaleFreeTopology.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/WGCNAFunctions.R
\name{plotScaleFreeTopology}
\alias{plotScaleFreeTopology}
\title{Plots scale free topology diagnostic plots.}
\usage{
plotScaleFreeTopology(
fileName = "Results/ScaleFreeTopology.pdf",
powersOutput,
powers,
cex1 = 0.9,
scaleFreeThreshold,
widthInches = 10
)
}
\arguments{
\item{fileName}{preferred file name.}
\item{powersOutput}{Result of WGCNA::picksfotthreshhold.}
\item{powers}{Vector of powers from 1:user input value}
\item{cex1}{Resizes the final graphics device}
\item{scaleFreeThreshold}{User input value corresponding to the R^2 topology threshold}
\item{widthInches}{Default size of the graphical output}
}
\value{
Creates the diagnostic plot showing the satisfaction of the scale-free
topology and the effect of power on the mean connectivity of the proteins.
#' @importFrom grDevices pdf
}
\description{
Plots scale free topology diagnostic plots.
}
|
bdfdf59715a3683be520f9c76803fbbfe037b264
|
92af37e3c14034ce9b69442317a8e91cacd7584e
|
/man/bc_decomp.Rd
|
05cf1aeffae827f71613422d34f597fb95ac5012
|
[
"MIT"
] |
permissive
|
MarcioFCMartins/blue_carbon
|
948cc59a4651f982fd4f7ed5b1d1db73ed6e3317
|
42d499122e66b19c8f0d04d9cb015bde36fd5c46
|
refs/heads/main
| 2023-08-27T20:13:59.523062
| 2021-10-07T14:30:33
| 2021-10-07T14:30:33
| 339,830,063
| 0
| 0
|
MIT
| 2021-02-17T19:15:08
| 2021-02-17T19:15:07
| null |
UTF-8
|
R
| false
| true
| 945
|
rd
|
bc_decomp.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bc_decomp.R
\name{bc_decomp}
\alias{bc_decomp}
\title{bc_decomp}
\usage{
bc_decomp(
data,
sampler_length,
internal_distance,
external_distance,
sampler_diameter,
method = "linear"
)
}
\arguments{
\item{data}{data.frame with the following columns "ID" "cm" "weight" "LOI" "c_org".}
\item{sampler_length}{name of the column with the total length of the sampler tube}
\item{internal_distance}{The length in cm of the part of the sampler left outside of the sediment (from the inside of the sampler).}
\item{external_distance}{The length in cm of the part of the sampler left outside of the sediment (from the outside of the sampler).}
\item{sampler_diameter}{diameter in cm of the sampler}
\item{method}{used to estimate the decompressed depth of each section, "linear" or "exp". Default is "linear".}
}
\description{
This function uses six arguments
}
|
2dd897374383003ed717261da1dc0bd62e7bfc71
|
4970b6d208ce9eb89503a88d3957d6e4acbb35c5
|
/Fig1b.R
|
1581e299b7f256143707631709b3d06e159ec2a7
|
[
"MIT"
] |
permissive
|
BICC-UNIL-EPFL/CatalyticHostParasiteCycles
|
908fc18191eb30770a7bb52525c7bd70c6fd5e60
|
35e214ffdbc56bac60db98acabfc6452dc9930dd
|
refs/heads/main
| 2023-04-09T15:37:01.440447
| 2022-10-28T12:04:16
| 2022-10-28T12:04:16
| 558,826,095
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,245
|
r
|
Fig1b.R
|
# R code to generate the figures of the manuscript
# Energy-harnessing problem solving of primordial life:
# modeling the emergence of cata-lytic host-nested parasite life cycles
# Copyright (c) 2022, Christian Iseli, Bernard Conrad and Magnus Pirovino
#
### setup
dt = 0.01
t = 0
m = 5997
E_lower = 0.5
E_upper = 0.9
###Take values of E from Excel file “_1 Figs 1a-b Virulence step & smoothstep 050521”,
###Sheet “Calculations”, Column E, Row 9:6006****
data <- read.table('_1_E_values.txt',header=TRUE,sep='\t',skip=7)
E <- data$E..lhs.
step_time = 1.5
from_below = FALSE
from_below_integer = 0
t_last_switch = -10 ### initialization of smooth_step_function
smooth_step = 1
Step_virulence <- rep(0, m)
Smooth_step_virulence <- rep(0, m)
for (k in 1:m) {
t <- t + dt
### calculate step_virulence:
below = E[k] < E_lower
in_between = (E[k] > E_lower) && (E[k] < E_upper)
if (from_below && (below || in_between)) {
Step_virulence[k] = 1
} else {
# already set by default
Step_virulence[k] = 0
}
### calculate smooth-step_virulence:
if (E[k] < E_lower && !from_below) {
t_last_switch = t
} else if ((E[k] > E_upper) && from_below) {
t_last_switch = t
} # else {
# t_last_switch = t_last_switch
#}
if (t <= t_last_switch) {
smoothstep = 0
} else if (t > t_last_switch && (t-t_last_switch) < step_time) {
smoothstep = 3*((t-t_last_switch)/step_time)^2-2*((t-t_last_switch)/step_time)^3
} else {
smoothstep = 1
}
if (from_below && E[k] < E_upper) {
from_below = TRUE
from_below_integer = 1
} else if (E[k] < E_lower) {
from_below = TRUE
from_below_integer = 1
} else {
from_below = FALSE
from_below_integer = 0
}
Smooth_step_virulence[k] = from_below_integer*smoothstep+(1-from_below_integer)*(1-smoothstep)
}
y1_min = -.2
y1_max = 2.3
y2_min = -.25
y2_max = 1.15
pdf(file='_1_Fig1b.pdf',width=12,height=5)
## add extra space to right margin of plot within frame
par(mar=c(5, 4, 4, 6) + 0.1)
## Plot first set of data and draw its axis
plot(E, axes=FALSE, ylim=c(y1_min,y1_max), xlab="", ylab="", type="b", col="green", main="Figure 1b")
box()
abline(h=E_lower, col="darkgreen", lty=3, lwd=2)
abline(h=E_upper, col="darkgreen", lty=2, lwd=2)
axis(2, ylim=c(y1_min,y1_max),col="green",col.axis="green",las=1) ## las=1 makes horizontal labels
mtext("Energy",side=2,line=2.5,col="green")
## Allow a second plot on the same graph
par(new=TRUE)
## Plot the second plot and put axis scale on right
plot(Step_virulence, pch=15, xlab="", ylab="", ylim=c(y2_min,y2_max), axes=FALSE, type="b", col="red")
points(Smooth_step_virulence, type="l", lwd=2, col="orange")
## a little farther out (line=4) to make room for labels
mtext("Step-Virulence",side=4,col="red",line=4)
axis(4, at=c(0,1), ylim=c(y2_min,y2_max), col="red",col.axis="red",las=1)
## Draw the time axis
axis(1,pretty(c(1,length(E)),10))
mtext("Time (Steps)",side=1,col="black",line=2.5)
## Add Legend
legend("bottomleft",horiz=T,legend=c("Energy","Virulence","Smoothed virulence","E_lower","E_upper"),
text.col=c("green","red","orange","darkgreen","darkgreen"),
col=c("green","red","orange","darkgreen","darkgreen"),lty=c(1,1,1,3,2),lwd=c(4,4,2,2,2))
dev.off()
###
|
6392eac36ffe35dd425579b69383988fe767ecfb
|
c6ccaabb627f8b29a7cb32c5b3fe19b72d07e188
|
/man/Particle.Rd
|
f72d1b1d8a85ed17ad2529bd1ebeab61841a6963
|
[] |
no_license
|
cran/particle.swarm.optimisation
|
5f5a14944f95bfa095c6088f9fbbbe971095ad42
|
e8d7e4b31817a8be4c2e2a52437397c445ee01a8
|
refs/heads/master
| 2023-04-30T00:25:00.363880
| 2021-05-21T07:00:02
| 2021-05-21T07:00:02
| 369,583,860
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 5,753
|
rd
|
Particle.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/particule.R
\name{Particle}
\alias{Particle}
\title{Particle}
\description{
Class for the Particles used in the Particle Swarm Optimisation, It is call by the Particle Swarm object to make the population.
}
\examples{
# If you use the Particle Swarm Object there is no need to manually create the Particle
# But if you want to use the Particle for another project:
# In this example we use the PSO to solve the following equation:
# a * 5 + b * 25 + 10 = 15
fitness_function <- function(values){
a <- values[1]
b <- values[2]
particule_result <- a*5 + b*25 + 10
difference <- 15 - particule_result
fitness <- 1 - abs(difference)
return(fitness)
}
values_ranges <- list(c(-10^3,10^3),c(-10^3,10^3))
particle_example <- Particle$new(values_ranges = values_ranges,
values = c(0,0),
fitness_function = fitness_function,
acceleration_coefficient = c(0.5,0.5),
inertia = 0.4)
print(particle_example)
particle_example$get_fitness()
print(particle_example)
particle_example$update(c(10,25))
print(particle_example)
}
\section{Active bindings}{
\if{html}{\out{<div class="r6-active-bindings">}}
\describe{
\item{\code{values_ranges}}{(list) max and min for each value of the particle}
\item{\code{values}}{(numeric) values of the particle (his position in space)}
\item{\code{fitness}}{(numeric) fitness of the particle (his score)}
\item{\code{fitness_function}}{(function) function used to find the fitness}
\item{\code{personal_best_values}}{(numeric) Best values of the particle}
\item{\code{personal_best_fitness}}{(numeric) Fitness of the best values}
\item{\code{velocity}}{(numeric) Velocity of the particle (one velocity for each values)}
\item{\code{acceleration_coefficient}}{(numeric) coefficient c1 and c2 (for personal and global best)}
\item{\code{inertia}}{(numeric) inertia of the particle}
}
\if{html}{\out{</div>}}
}
\section{Methods}{
\subsection{Public methods}{
\itemize{
\item \href{#method-new}{\code{Particle$new()}}
\item \href{#method-get_fitness}{\code{Particle$get_fitness()}}
\item \href{#method-update}{\code{Particle$update()}}
\item \href{#method-update_personal_best_fitness}{\code{Particle$update_personal_best_fitness()}}
\item \href{#method-print}{\code{Particle$print()}}
\item \href{#method-clone}{\code{Particle$clone()}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-new"></a>}}
\if{latex}{\out{\hypertarget{method-new}{}}}
\subsection{Method \code{new()}}{
Create a new Particle object.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{Particle$new(
values_ranges,
values,
fitness_function,
acceleration_coefficient,
inertia
)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{values_ranges}}{range for each value of the particle (min and max), his size need to be the same as values. (List)}
\item{\code{values, }}{values of the particles. (numeric)}
\item{\code{fitness_function}}{function used to test the Particle and find his fitness. (function)}
\item{\code{acceleration_coefficient}}{a vector of two values, one for c1 (the personal coefficient), and one for c2 (the global coefficient). (numeric)}
\item{\code{inertia}}{The inertia of the particle (the influence of the previous velocity on the next velocity). (numeric)}
}
\if{html}{\out{</div>}}
}
\subsection{Returns}{
A new \code{Particle} object.
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-get_fitness"></a>}}
\if{latex}{\out{\hypertarget{method-get_fitness}{}}}
\subsection{Method \code{get_fitness()}}{
Calculate the fitness of the particle with the fitness function and save it in self$fitness
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{Particle$get_fitness()}\if{html}{\out{</div>}}
}
\subsection{Returns}{
self
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-update"></a>}}
\if{latex}{\out{\hypertarget{method-update}{}}}
\subsection{Method \code{update()}}{
Update Particle's position and velocity.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{Particle$update(swarm_best)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{swarm_best}}{the best values of the swarm used to update the velocity}
}
\if{html}{\out{</div>}}
}
\subsection{Returns}{
self
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-update_personal_best_fitness"></a>}}
\if{latex}{\out{\hypertarget{method-update_personal_best_fitness}{}}}
\subsection{Method \code{update_personal_best_fitness()}}{
Update the Particle's best values and fitness.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{Particle$update_personal_best_fitness()}\if{html}{\out{</div>}}
}
\subsection{Returns}{
self
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-print"></a>}}
\if{latex}{\out{\hypertarget{method-print}{}}}
\subsection{Method \code{print()}}{
print the current values of the particle and his fitness
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{Particle$print()}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-clone"></a>}}
\if{latex}{\out{\hypertarget{method-clone}{}}}
\subsection{Method \code{clone()}}{
The objects of this class are cloneable with this method.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{Particle$clone(deep = FALSE)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{deep}}{Whether to make a deep clone.}
}
\if{html}{\out{</div>}}
}
}
}
|
b26680ce67b852d69420b2940231e00465c8dc9b
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/mcmcse/examples/mcmcse-package.Rd.R
|
c371dd78d3e67f36c49947aa8e3824ad48aca23b
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 527
|
r
|
mcmcse-package.Rd.R
|
library(mcmcse)
### Name: mcmcse-package
### Title: Monte Carlo Standard Errors for MCMC
### Aliases: mcmcse-package mcmcse
### ** Examples
library(mAr)
p <- 3
n <- 1e3
omega <- 5*diag(1,p)
## Making correlation matrix var(1) model
set.seed(100)
foo <- matrix(rnorm(p^2), nrow = p)
foo <- foo %*% t(foo)
phi <- foo / (max(eigen(foo)$values) + 1)
out <- as.matrix(mAr.sim(rep(0,p), phi, omega, N = n))
mcse(out[,1], method = "bart")
mcse.bm <- mcse.multi(x = out)
mcse.tuk <- mcse.multi(x = out, method = "tukey")
|
04b39313f3ba94f0ba6c3503dd61d1e268984449
|
77da83540db02cfa5c0caf0e470c2d50e9201e63
|
/DomacaNaloga2/arhiv/tretjaMartin.r
|
4e74521a9fa994200b2ceb1134e821a77291bb7d
|
[] |
no_license
|
tinarazic/machine_learning
|
1d42a8ee5d209a988407e73385a03f4ddd898379
|
9e8f9c73079ae8f8b2fd8e2b4ef35ab0737b2bf2
|
refs/heads/master
| 2021-01-26T03:41:57.676366
| 2020-06-16T21:11:04
| 2020-06-16T21:11:04
| 243,295,247
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,348
|
r
|
tretjaMartin.r
|
#115 120
#3.1.
library(readr)
podatki31 <- read_csv("podatki31.csv")
razvrstitev <- function(w0,w1,w2){
vrednosti <- rep(0,dim(podatki31)[1])
matrika <- data.frame(podatki31$y,vrednosti)
for(i in 1:dim(podatki)[1]){
matrika$vrednosti[i] <- -(w0+w1*podatki$x1[i]+w2*podatki$x2[i])/podatki$x3[i]
}
#za negativne
negativni <- matrika$vrednosti[matrika$podatki31.y==-1]
pozitivni <- matrika$vrednosti[matrika$podatki31.y== 1]
maxspodnje <- max(negativni)
minspodaj <- min(negativni)
minzgoraj <- min(pozitivni)
maxzgoraj <- max(pozitivni)
#na kvizu je rešitev maxzgoraj
vektor <- list(maxspodnje,minspodaj,minzgoraj,maxzgoraj)
return(vektor)
}
#3.2.
#korak pri številu uteži nima vpliva!
#vir https://www.learnopencv.com/number-of-parameters-and-tensor-sizes-in-convolutional-neural-network/
# https://stackoverflow.com/questions/28232235/how-to-calculate-the-number-of-parameters-of-convolutional-neural-networks
#na stack drugi odgovor!
#nas primer
#input 224,224,1
#conv3-64 [222x222X1]
#conv3-64 [220x220X64]
#pool2-64 110x110x64
#conv3-128 [108x108x64]
#conv3-128 [106x106X128]
#pool2-128 53x53x128
#conv3-256 [51x51x128]
#conv3-256 [49x49x256]
#conv3-256 [47x47x256]
#pool2-256 23.5x23.5x256
#conv3-512 [21.5x21.5x256]
#conv3-512 [19.5x19.5x512]
#conv3-512 [17.5x17.5x512]
#pool2-512 8.75x8.75x512
#conv3-512 [6.75x6.75x512]
#conv3-512 [4.75x4.75x512]
#conv3-512 [2.75x2.75x512]
#pool2-512 1.375x1.375x512
#fc4096
#fc4096
#fc2
stevilo_konvolucija <- function(k,n,c){
#cnn
#K = Size (width) of kernels used in the Conv Layer. oblika filtra
#N = Number of kernels. število filtrov
#C = Number of channels of the input image. velikost vhoda
rezultat <- k^2*n*c
return(rezultat)
}
stevilo_zbiranje<- function(){
#pooling
#to ne vpliva, doda uteži!
return(0)
}
stevilo_polnopovezan_konvolucija <- function(o,n,f){
#O = Size (width) of the output image of the previous Conv Layer.
#N = Number of kernels in the previous Conv Layer.
#F = Number of neurons in the FC Layer.
rezultat <- o^2*n*f
return(rezultat)
}
stevilo_polnopovezan_polnopovezan <- function(f,f_){
#F = Number of neurons in the FC Layer.
#F_ = Number of neurons in the previous FC Layer.
rezultat <- f*f_
return(rezultat)
}
stevilo_input <- function(){
return(0)
}
stevilo_output <- function(){
return(0)
}
hidden_conv <- function(h1,f,p,s){
#h1 je oblika vhoda
#f je oblika filra
#p je padding, pri nas 0
#s je korak!
rezultat <- (h1-f+2*p)/s+1
return(rezultat)
}
hidden_pool <- function(h1,f,s){
#h1 je oblika vhoda
#f je oblika filra
#s je korak!
rezultat <- (h1-f)/s+1
return(rezultat)
}
#pri nas je padding 0!!!!!!!
#https://cs231n.github.io/convolutional-networks/
nas_primer <- stevilo_input()+stevilo_konvolucija(3,64,1)+stevilo_konvolucija(3,64,64)+
stevilo_zbiranje()+
stevilo_konvolucija(3,128,64)+stevilo_konvolucija(3,128,128)+
stevilo_zbiranje()+
stevilo_konvolucija(3,256,128)+
stevilo_konvolucija(3,256,256)+
stevilo_konvolucija(3,256,256)+
stevilo_zbiranje()+
stevilo_konvolucija(3,512,256)+
stevilo_zbiranje()+
stevilo_konvolucija(3,512,512)+
stevilo_konvolucija(3,512,512)+
stevilo_konvolucija(3,512,512)+
stevilo_zbiranje()+
stevilo_konvolucija(3,512,512)+
stevilo_konvolucija(3,512,512)+
stevilo_konvolucija(3,512,512)+
stevilo_zbiranje()+
#tukaj so koraki povezani z poolingom!
stevilo_polnopovezan_konvolucija(1,512,4096)+
stevilo_polnopovezan_polnopovezan(4096,4096)+
stevilo_polnopovezan_polnopovezan(4096,2)+
stevilo_output()
#3.3
library(png)
naloziSliko = function(){ # loadImage
slika = readPNG("slikaKviz.png")
slika = slika[,,1:3]
return(slika)
}
X <- naloziSliko()
#ker so kvadrati velikost 5, poiščemo zgoraj desni kot po sliki; in nato prištejemo
odkrijModro <- function(X){
#n je velikost modrega kvadrata; iz tega lahko dobimo nato center, oziroma iskano vrednost
#X je tenzor
#X ima tri dimenzije; X[i,j,k] za vsak par pikslov (i,j)
#X tretja dimenzija je X[,,i], i in 1,2,3
velikost <- dim(X)
for(i in 1:velikost[1]){
for(j in 1:velikost[2]){
if(X[i,j,1] == 0 && X[i,j,2] == 0 && X[i,j,3] > 0){
levo <- i+2
zgoraj <- j+2
polozaj <- list(levo,zgoraj)
return(polozaj)
}
}
}
return(vektor)
}
|
cabbf1b9448ae5e48926620f4e521e6944c5c6d5
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/modi/examples/TRC.Rd.R
|
bec30ee138bcd8e7b51b90cc2e168019edea58d6
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 285
|
r
|
TRC.Rd.R
|
library(modi)
### Name: TRC
### Title: Transformed rank correlations for multivariate outlier detection
### Aliases: TRC
### ** Examples
data(bushfirem, bushfire.weights)
det.res <- TRC(bushfirem, weights = bushfire.weights)
PlotMD(det.res$dist, ncol(bushfirem))
print(det.res)
|
56a1875a84574eafe90e5e90c1efcdf4b85c65d3
|
10558c2626f3e1fc1f19e0a0e5ebd23258e3ec16
|
/man/is_tsdrtools_install.Rd
|
863a1ca0e153bb712632c2f79e68ff9f2e7c60de
|
[
"CC-BY-4.0"
] |
permissive
|
nejerovbo/r-tsdrtools
|
f8589415fea6b9c283114c524003a5570efac63d
|
f0bb10c6b6a9e5a286f8c0da77a1b756026bbf55
|
refs/heads/master
| 2022-12-05T16:54:08.426684
| 2020-08-31T18:21:32
| 2020-08-31T18:21:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 301
|
rd
|
is_tsdrtools_install.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tsdrtools-classes.R
\name{is_tsdrtools_install}
\alias{is_tsdrtools_install}
\title{Validate tsdrtools_install}
\usage{
is_tsdrtools_install(x)
}
\arguments{
\item{x}{an object}
}
\description{
Validate tsdrtools_install
}
|
0aafc08550637b7f06050e02c00200fd43d64451
|
d52c1d77744387b94e2266ff2758881547351bfe
|
/worker_dashboard/server.R
|
b9c2063d999ef40e978a283f86504a97939d44a1
|
[
"MIT"
] |
permissive
|
okassi/onlinelabourindex
|
f6727c4d2397ce98876f1d2fdc295d9a300d499a
|
fc11573e619dff456052303f467e37bda92bb7b9
|
refs/heads/master
| 2022-01-26T08:56:34.108692
| 2022-01-17T13:40:47
| 2022-01-17T13:40:47
| 96,996,459
| 0
| 0
| null | 2018-08-09T10:52:18
| 2017-07-12T10:40:32
|
R
|
UTF-8
|
R
| false
| false
| 1,672
|
r
|
server.R
|
## =============================== License ========================================
## ================================================================================
## This work is distributed under the MIT license, included in the parent directory
## Copyright Owner: University of Oxford
## Date of Authorship: 2016
## Author: Martin John Hadley (orcid.org/0000-0002-3039-6849)
## Academic Contact: otto.kassi@oii.ox.ac.uk
## Data Source: https://dx.doi.org/10.6084/m9.figshare.3761562
## ================================================================================
library("shiny")
library("rfigshare")
library("lubridate")
library("highcharter")
library("dygraphs")
library("htmltools")
library("tidyverse")
library("shinyBS")
library("shinyjs")
library("leaflet")
library("sf")
library("viridis")
library("rlang")
library("forcats")
library("leaflet.extras") ## Needed for background color of leaflet map
## otto edits
library('XML')
library('RCurl')
library('markdown')
source("data-processing.R", local = T)
source("hc_subcategorised_employment.R", local = T)
iLabour_branding <- function(x) {
hc_credits(
hc = x,
text = 'Source: Online Labour Index',
enabled = TRUE,
href = 'http://ilabour.oii.ox.ac.uk/online-labour-index/',
position = list(align = "right")
)
}
shinyServer(function(input, output, session) {
source("dominant-occupation-worldmap.R", local = TRUE)$value
source(file = "tab_occupation-x-worker-country.R", local = TRUE)$value
source(file = "tab_worldmap.R", local = TRUE)$value
# Commented out as no longer required.
# source(file = "tab_stacked-area-chart.R", local = TRUE)$value
})
|
897b34fc8216b73a5cb3121ff283bc0e5e5ac472
|
2099a2b0f63f250e09f7cd7350ca45d212e2d364
|
/DUC-Dataset/Summary_p100_R/D096.AP900130-0113.html.R
|
b05e43bb5baa342a24e37702b33bdf83842bb5ce
|
[] |
no_license
|
Angela7126/SLNSumEval
|
3548301645264f9656b67dc807aec93b636778ef
|
b9e7157a735555861d2baf6c182e807e732a9dd6
|
refs/heads/master
| 2023-04-20T06:41:01.728968
| 2021-05-12T03:40:11
| 2021-05-12T03:40:11
| 366,429,744
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 732
|
r
|
D096.AP900130-0113.html.R
|
<html>
<head>
<meta name="TextLength" content="SENT_NUM:4, WORD_NUM:91">
</head>
<body bgcolor="white">
<a href="#0" id="0">This year's game was seen in about 35.9 million homes.</a>
<a href="#1" id="1">The higher viewership was made possible by the annual increase in the number of homes with television.</a>
<a href="#2" id="2">``Given the expected blowout, the numbers are completely understandable and we're happy to have a 39,'' said Susan Kerr, director of programming for CBS Sports.</a>
<a href="#3" id="3">The game on CBS averaged a 39.0 rating and a 63 share, the lowest Super Bowl rating since 1969, when the New York Jets' 16-7 victory over Baltimore got a 36.0 on NBC for the worst rating ever, A.C.</a>
</body>
</html>
|
a5684f1343e6b1843feac575b9e4cbce4f5e3596
|
6058ae780cde6ec6117a3fc86a4c1c26b578650e
|
/R/seq_pigeon.R
|
06103f6b4f11d9d9d00f7b19858488d6ce6c455b
|
[] |
no_license
|
mjg211/xover
|
88243eb2f99a76502057f67924725fda1796dd39
|
9e8bb48dcc735d78aa9a8d14f236a8ba13609866
|
refs/heads/master
| 2020-03-26T14:55:03.702584
| 2019-10-15T10:40:29
| 2019-10-15T10:40:29
| 145,011,976
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 16,377
|
r
|
seq_pigeon.R
|
#' Pigeon (1984) cross-over design specification
#'
#' Specifies cross-over designs from Pigeon (1984).
#'
#' \code{seq_pigeon()} supports the specification of designs from
#' Pigeon (1984). Designs for three through seven treatments (see
#' \code{D}) are supported, for any chosen treatment labels (see \code{labels}).
#' In addition, the designs can be returned in \code{\link[base]{matrix}} or
#' \code{\link[tibble]{tibble}} form (see \code{as_matrix}).
#'
#' Precisely, for \code{D} equal to three through seven, there are one, four,
#' six, seven, and two designs available respectively (accessible by setting
#' \code{selection} equal to one through seven as appropriate). Ultimately, the
#' \ifelse{html}{\out{(<i>k</i>,<i>j</i>)}}{\eqn{(k,j)}}th element of the
#' cross-over design matrix corresponds to the treatment a subject on the
#' ifelse{html}{\out{<i>k</i>}}{\eqn{k}}th sequence would receive in the
#' \ifelse{html}{\out{<i>j</i>}}{\eqn{j}}th period.
#'
#' @param D The number of treatments. Must be a single
#' \code{\link[base]{numeric}} integer between three and seven inclusive.
#' Defaults to \code{3}.
#' @param selection A single \code{\link[base]{numeric}} integer indicating
#' which design to return, for the chosen value of \code{D}. See
#' \strong{Details} for information on supported values.
#' @param labels A \code{\link[base]{vector}} of labels for the treatments.
#' Should be of \code{\link[base]{length}} \code{D}, containing unique elements.
#' Defaults to \code{0:(D - 1)}.
#' @param as_matrix A \code{\link[base]{logical}} variable indicating whether
#' the design should be returned as a \code{\link[base]{matrix}}, or a
#' \code{\link[tibble]{tibble}}. Defaults to \code{T}.
#' @param summary A \code{\link[base]{logical}} variable indicating whether a
#' summary of the function's progress should be printed to the console. Defaults
#' to \code{T}.
#' @return Either a \code{\link[base]{matrix}} if \code{as_matrix = T} (with
#' rows corresponding to sequences and columns to periods), or a
#' \code{\link[tibble]{tibble}} if \code{as_matrix = F} (with rows corresponding
#' to a particular period on a particular sequence). In either case, the
#' returned object will have class \code{xover_seq}.
#' @examples
#' # Pigeon (1984) designs for four treatments
#' pigeon1 <- seq_pigeon(D = 4)
#' pigeon2 <- seq_pigeon(D = 4, selection = 2)
#' pigeon3 <- seq_pigeon(D = 4, selection = 3)
#' pigeon4 <- seq_pigeon(D = 4, selection = 4)
#' # Using different labels
#' pigeon1_ABCD <- seq_pigeon(D = 4, labels = LETTERS[1:4])
#' pigeon2_ABCD <- seq_pigeon(D = 4, selection = 2, labels = LETTERS[1:4])
#' pigeon3_ABCD <- seq_pigeon(D = 4, selection = 3, labels = LETTERS[1:4])
#' pigeon4_ABCD <- seq_pigeon(D = 4, selection = 4, labels = LETTERS[1:4])
#' # Returning in tibble form
#' pigeon1_tibble <- seq_pigeon(D = 4, as_matrix = F)
#' pigeon2_tibble <- seq_pigeon(D = 4, selection = 2, as_matrix = F)
#' pigeon3_tibble <- seq_pigeon(D = 4, selection = 3, as_matrix = F)
#' pigeon4_tibble <- seq_pigeon(D = 4, selection = 4, as_matrix = F)
#' @references Pigeon JG (1985) Residual effects designs for comparing
#' treatments with a control. \emph{PhD thesis, Temple University}.
#' @author Based on data from the \code{\link[Crossover]{Crossover}} package by
#' Kornelius Rohmeyer.
#' @export
seq_pigeon <- function(D = 3, selection = 1, labels = 0:(D - 1),
as_matrix = T, summary = T) {
##### Input checking #########################################################
check_integer_range(D, "D", c(2, 8), 1)
check_selection(selection, c(rep(NA, 2), 1, 4, 6, 7, 2), D)
check_labels(labels, D)
check_logical(as_matrix, "as_matrix")
check_logical(summary, "summary")
##### Main computations ######################################################
if (summary) {
message(" Beginning the design specification...")
}
if (all(D == 3, selection == 1)) {
sequences <- matrix(c(1, 2, 3, 1, 3, 4, 1, 4, 2, 2, 1, 4, 3, 1, 2, 4, 1, 3,
2, 4, 1, 3, 2, 1, 4, 3, 1), 9, 3, byrow = T)
} else if (all(D == 4, selection == 1)) {
sequences <- matrix(c(1, 2, 3, 1, 3, 4, 1, 4, 2, 1, 2, 3, 1, 3, 4, 1, 4, 2,
2, 1, 4, 3, 1, 2, 4, 1, 3, 2, 1, 4, 3, 1, 2, 4, 1, 3,
2, 4, 1, 3, 2, 1, 4, 3, 1, 2, 4, 1, 3, 2, 1, 4, 3, 1),
18, 3, byrow = T)
} else if (all(D == 4, selection == 2)) {
sequences <- matrix(c(1, 2, 3, 1, 3, 4, 1, 4, 2, 1, 2, 3, 1, 3, 4, 1, 4, 2,
2, 1, 4, 3, 1, 2, 4, 1, 3, 2, 1, 4, 3, 1, 2, 4, 1, 3,
2, 3, 1, 3, 4, 1, 4, 2, 1, 2, 4, 1, 3, 2, 1, 4, 3, 1,
2, 4, 3, 3, 2, 4, 4, 3, 2), 21, 3, byrow = T)
} else if (all(D == 4, selection == 3)) {
sequences <- matrix(c(1, 2, 2, 1, 3, 3, 1, 4, 4, 1, 2, 2, 1, 3, 3, 1, 4, 4,
1, 2, 2, 1, 3, 3, 1, 4, 4, 2, 1, 4, 3, 1, 2, 4, 1, 3,
2, 1, 4, 3, 1, 2, 4, 1, 3, 2, 1, 4, 3, 1, 2, 4, 1, 3,
2, 4, 1, 3, 2, 1, 4, 3, 1, 2, 4, 1, 3, 2, 1, 4, 3, 1,
2, 4, 1, 3, 2, 1, 4, 3, 1), 27, 3, byrow = T)
} else if (all(D == 4, selection == 4)) {
sequences <- matrix(c(1, 2, 2, 1, 3, 3, 1, 4, 4, 1, 2, 2, 1, 3, 3, 1, 4, 4,
1, 2, 2, 1, 3, 3, 1, 4, 4, 2, 1, 4, 3, 1, 2, 4, 1, 3,
2, 1, 4, 3, 1, 2, 4, 1, 3, 2, 1, 4, 3, 1, 2, 4, 1, 3,
2, 3, 1, 3, 4, 1, 4, 2, 1, 2, 4, 1, 3, 2, 1, 4, 3, 1,
2, 4, 1, 3, 2, 1, 4, 3, 1, 2, 4, 3, 3, 2, 4, 4, 3, 2),
30, 3, byrow = T)
} else if (all(D == 5, selection == 1)) {
sequences <- matrix(c(1, 3, 5, 4, 1, 4, 2, 5, 1, 5, 3, 2, 1, 2, 4, 3, 2, 1,
5, 4, 3, 1, 2, 5, 4, 1, 3, 2, 5, 1, 4, 3, 2, 3, 1, 4,
3, 4, 1, 5, 4, 5, 1, 2, 5, 2, 1, 3, 2, 3, 5, 1, 3, 4,
2, 1, 4, 5, 3, 1, 5, 2, 4, 1), 16, 4, byrow = T)
} else if (all(D == 5, selection == 2)) {
sequences <- matrix(c(1, 3, 5, 4, 1, 4, 2, 5, 1, 5, 3, 2, 1, 2, 4, 3, 1, 3,
5, 4, 1, 4, 2, 5, 1, 5, 3, 2, 1, 2, 4, 3, 2, 1, 5, 4,
3, 1, 2, 5, 4, 1, 3, 2, 5, 1, 4, 3, 2, 1, 5, 4, 3, 1,
2, 5, 4, 1, 3, 2, 5, 1, 4, 3, 2, 3, 1, 4, 3, 4, 1, 5,
4, 5, 1, 2, 5, 2, 1, 3, 2, 3, 1, 4, 3, 4, 1, 5, 4, 5,
1, 2, 5, 2, 1, 3, 2, 3, 5, 1, 3, 4, 2, 1, 4, 5, 3, 1,
5, 2, 4, 1, 2, 3, 5, 1, 3, 4, 2, 1, 4, 5, 3, 1, 5, 2,
4, 1), 32, 4, byrow = T)
} else if (all(D == 5, selection == 3)) {
sequences <- matrix(c(1, 3, 5, 4, 1, 4, 2, 5, 1, 5, 3, 2, 1, 2, 4, 3, 1, 3,
5, 4, 1, 4, 2, 5, 1, 5, 3, 2, 1, 2, 4, 3, 2, 1, 5, 4,
3, 1, 2, 5, 4, 1, 3, 2, 5, 1, 4, 3, 2, 1, 5, 4, 3, 1,
2, 5, 4, 1, 3, 2, 5, 1, 4, 3, 2, 3, 1, 4, 3, 4, 1, 5,
4, 5, 1, 2, 5, 2, 1, 3, 2, 3, 1, 4, 3, 4, 1, 5, 4, 5,
1, 2, 5, 2, 1, 3, 2, 3, 5, 1, 3, 4, 2, 1, 4, 5, 3, 1,
5, 2, 4, 1, 2, 3, 5, 1, 3, 4, 2, 1, 4, 5, 3, 1, 5, 2,
4, 1, 2, 3, 5, 4, 3, 4, 2, 5, 4, 5, 3, 2, 5, 2, 4, 3),
36, 4, byrow = T)
} else if (all(D == 5, selection == 4)) {
sequences <- matrix(c(1, 2, 3, 1, 3, 4, 1, 4, 5, 1, 5, 2, 1, 2, 4, 1, 3, 5,
1, 4, 2, 1, 5, 3, 2, 1, 3, 3, 1, 4, 4, 1, 5, 5, 1, 2,
2, 1, 5, 3, 1, 2, 4, 1, 3, 5, 1, 4, 2, 3, 1, 3, 4, 1,
4, 5, 1, 5, 2, 1, 2, 4, 1, 3, 5, 1, 4, 2, 1, 5, 3, 1,
2, 5, 4, 3, 2, 5, 4, 3, 2, 5, 4, 3), 28, 3, byrow = T)
} else if (all(D == 5, selection == 5)) {
sequences <- matrix(c(1, 2, 4, 1, 2, 5, 1, 3, 4, 1, 3, 2, 1, 4, 5, 1, 4, 3,
1, 4, 5, 1, 5, 2, 1, 5, 3, 1, 5, 4, 2, 1, 3, 2, 1, 3,
2, 1, 4, 3, 1, 2, 3, 1, 5, 3, 1, 4, 4, 1, 2, 4, 1, 5,
5, 1, 2, 5, 1, 3, 4, 2, 1, 5, 2, 1, 2, 3, 1, 4, 3, 1,
2, 4, 1, 3, 4, 1, 5, 4, 1, 2, 5, 1, 3, 5, 1, 3, 5, 1,
4, 2, 3, 5, 2, 4, 3, 2, 5, 4, 3, 2, 5, 3, 4, 2, 3, 5,
3, 4, 2, 4, 5, 3), 38, 3, byrow = T)
} else if (all(D == 5, selection == 6)) {
sequences <- matrix(c(1, 2, 3, 1, 3, 4, 1, 4, 5, 1, 5, 2, 1, 2, 4, 1, 3, 5,
1, 4, 2, 1, 5, 3, 1, 2, 5, 1, 3, 2, 1, 4, 3, 1, 5, 4,
2, 1, 3, 3, 1, 4, 4, 1, 5, 5, 1, 2, 2, 1, 4, 3, 1, 5,
4, 1, 2, 5, 1, 3, 2, 1, 5, 3, 1, 2, 4, 1, 3, 5, 1, 4,
2, 3, 1, 3, 4, 1, 4, 5, 1, 5, 2, 1, 2, 4, 1, 3, 5, 1,
4, 2, 1, 5, 3, 1, 2, 5, 1, 3, 2, 1, 4, 3, 1, 5, 4, 1),
36, 3, byrow = T)
} else if (all(D == 6, selection == 1)) {
sequences <- matrix(c(1, 3, 6, 4, 5, 1, 4, 2, 5, 6, 1, 5, 3, 6, 2, 1, 6, 4,
2, 3, 1, 2, 5, 3, 4, 2, 1, 6, 4, 5, 3, 1, 2, 5, 6, 4,
1, 3, 6, 2, 5, 1, 4, 2, 3, 6, 1, 5, 3, 4, 3, 2, 1, 4,
5, 4, 3, 1, 5, 6, 5, 4, 1, 6, 2, 6, 5, 1, 2, 3, 2, 6,
1, 3, 4, 6, 3, 2, 1, 5, 2, 4, 3, 1, 6, 3, 5, 4, 1, 2,
4, 6, 5, 1, 3, 5, 2, 6, 1, 4, 4, 6, 3, 2, 1, 5, 2, 4,
3, 1, 6, 3, 5, 4, 1, 2, 4, 6, 5, 1, 3, 5, 2, 6, 1),
25, 5, byrow = T)
} else if (all(D == 6, selection == 2)) {
sequences <- matrix(c(1, 3, 6, 4, 5, 1, 4, 2, 5, 6, 1, 5, 3, 6, 2, 1, 6, 4,
2, 3, 1, 2, 5, 3, 4, 1, 3, 6, 4, 5, 1, 4, 2, 5, 6, 1,
5, 3, 6, 2, 1, 6, 4, 2, 3, 1, 2, 5, 3, 4, 2, 1, 6, 4,
5, 3, 1, 2, 5, 6, 4, 1, 3, 6, 2, 5, 1, 4, 2, 3, 6, 1,
5, 3, 4, 2, 1, 6, 4, 5, 3, 1, 2, 5, 6, 4, 1, 3, 6, 2,
5, 1, 4, 2, 3, 6, 1, 5, 3, 4, 3, 2, 1, 4, 5, 4, 3, 1,
5, 6, 5, 4, 1, 6, 2, 6, 5, 1, 2, 3, 2, 6, 1, 3, 4, 3,
2, 1, 4, 5, 4, 3, 1, 5, 6, 5, 4, 1, 6, 2, 6, 5, 1, 2,
3, 2, 6, 1, 3, 4, 6, 3, 2, 1, 5, 2, 4, 3, 1, 6, 3, 5,
4, 1, 2, 4, 6, 5, 1, 3, 5, 2, 6, 1, 4, 6, 3, 2, 1, 5,
2, 4, 3, 1, 6, 3, 5, 4, 1, 2, 4, 6, 5, 1, 3, 5, 2, 6,
1, 4, 4, 6, 3, 2, 1, 5, 2, 4, 3, 1, 6, 3, 5, 4, 1, 2,
4, 6, 5, 1, 3, 5, 2, 6, 1, 4, 6, 3, 2, 1, 5, 2, 4, 3,
1, 6, 3, 5, 4, 1, 2, 4, 6, 5, 1, 3, 5, 2, 6, 1), 50,
5, byrow = T)
} else if (all(D == 6, selection == 3)) {
sequences <- matrix(c(1, 2, 3, 6, 1, 3, 4, 2, 1, 4, 5, 3, 1, 5, 6, 4, 1, 6,
2, 5, 1, 2, 4, 5, 1, 3, 5, 6, 1, 4, 6, 2, 1, 5, 2, 3,
1, 6, 3, 4, 2, 1, 5, 4, 3, 1, 6, 5, 4, 1, 2, 6, 5, 1,
3, 2, 6, 1, 4, 3, 2, 1, 6, 3, 3, 1, 2, 4, 4, 1, 3, 5,
5, 1, 4, 6, 6, 1, 5, 2, 3, 6, 1, 2, 4, 2, 1, 3, 5, 3,
1, 4, 6, 4, 1, 5, 2, 5, 1, 6, 4, 5, 1, 2, 5, 6, 1, 3,
6, 2, 1, 4, 2, 3, 1, 5, 3, 4, 1, 6, 5, 4, 2, 1, 6, 5,
3, 1, 2, 6, 4, 1, 3, 2, 5, 1, 4, 3, 6, 1, 6, 3, 2, 1,
2, 4, 3, 1, 3, 5, 4, 1, 4, 6, 5, 1, 5, 2, 6, 1), 40,
4, byrow = T)
} else if (all(D == 6, selection == 4)) {
sequences <- matrix(c(1, 2, 3, 1, 3, 4, 1, 4, 5, 1, 5, 6, 1, 6, 2, 2, 1, 6,
3, 1, 2, 4, 1, 3, 5, 1, 4, 6, 1, 5, 2, 4, 1, 3, 5, 1,
4, 6, 1, 5, 2, 1, 6, 3, 1, 6, 5, 3, 2, 6, 4, 3, 2, 5,
4, 3, 6, 5, 4, 2), 20, 3, byrow = T)
} else if (all(D == 6, selection == 5)) {
sequences <- matrix(c(1, 2, 3, 1, 3, 4, 1, 4, 5, 1, 5, 6, 1, 6, 2, 1, 2, 4,
1, 3, 5, 1, 4, 6, 1, 5, 2, 1, 6, 3, 2, 1, 5, 3, 1, 6,
4, 1, 2, 5, 1, 3, 6, 1, 4, 2, 1, 6, 3, 1, 2, 4, 1, 3,
5, 1, 4, 6, 1, 5, 2, 5, 1, 3, 6, 1, 4, 2, 1, 5, 3, 1,
6, 4, 1, 2, 6, 1, 3, 2, 1, 4, 3, 1, 5, 4, 1, 6, 5, 1),
30, 3, byrow = T)
} else if (all(D == 6, selection == 6)) {
sequences <- matrix(c(1, 2, 3, 1, 3, 4, 1, 4, 5, 1, 5, 6, 1, 6, 2, 1, 2, 3,
1, 3, 4, 1, 4, 5, 1, 5, 6, 1, 6, 2, 2, 1, 6, 3, 1, 2,
4, 1, 3, 5, 1, 4, 6, 1, 5, 2, 1, 6, 3, 1, 2, 4, 1, 3,
5, 1, 4, 6, 1, 5, 2, 4, 1, 3, 5, 1, 4, 6, 1, 5, 2, 1,
6, 3, 1, 2, 4, 1, 3, 5, 1, 4, 6, 1, 5, 2, 1, 6, 3, 1,
6, 5, 3, 2, 6, 4, 3, 2, 5, 4, 3, 6, 5, 4, 2, 6, 5, 3,
2, 6, 4, 3, 2, 5, 4, 3, 6, 5, 4, 2), 40, 3, byrow = T)
} else if (all(D == 6, selection == 7)) {
sequences <- matrix(c(1, 2, 3, 1, 3, 4, 1, 4, 5, 1, 5, 6, 1, 6, 2, 1, 2, 3,
1, 3, 4, 1, 4, 5, 1, 5, 6, 1, 6, 2, 1, 2, 4, 1, 3, 5,
1, 4, 6, 1, 5, 2, 1, 6, 3, 2, 1, 6, 3, 1, 2, 4, 1, 3,
5, 1, 4, 6, 1, 5, 2, 1, 5, 3, 1, 6, 4, 1, 2, 5, 1, 3,
6, 1, 4, 2, 1, 6, 3, 1, 2, 4, 1, 3, 5, 1, 4, 6, 1, 5,
2, 4, 1, 3, 5, 1, 4, 6, 1, 5, 2, 1, 6, 3, 1, 2, 5, 1,
3, 6, 1, 4, 2, 1, 5, 3, 1, 6, 4, 1, 2, 6, 1, 3, 2, 1,
4, 3, 1, 5, 4, 1, 6, 5, 1, 6, 5, 3, 2, 6, 4, 3, 2, 5,
4, 3, 6, 5, 4, 2), 50, 3, byrow = T)
} else if (all(D == 7, selection == 1)) {
sequences <- matrix(c(1, 3, 7, 4, 6, 5, 1, 4, 2, 5, 7, 6, 1, 5, 3, 6, 2, 7,
1, 6, 4, 7, 3, 2, 1, 7, 5, 2, 4, 3, 1, 2, 6, 3, 5, 4,
2, 1, 7, 4, 6, 5, 3, 1, 2, 5, 7, 6, 4, 1, 3, 6, 2, 7,
5, 1, 4, 7, 3, 2, 6, 1, 5, 2, 4, 3, 7, 1, 6, 3, 5, 4,
2, 3, 1, 4, 6, 5, 3, 4, 1, 5, 7, 6, 4, 5, 1, 6, 2, 7,
5, 6, 1, 7, 3, 2, 6, 7, 1, 2, 4, 3, 7, 2, 1, 3, 5, 4,
2, 3, 7, 1, 6, 5, 3, 4, 2, 1, 7, 6, 4, 5, 3, 1, 2, 7,
5, 6, 4, 1, 3, 2, 6, 7, 5, 1, 4, 3, 7, 2, 6, 1, 5, 4,
2, 3, 7, 4, 1, 5, 3, 4, 2, 5, 1, 6, 4, 5, 3, 6, 1, 7,
5, 6, 4, 7, 1, 2, 6, 7, 5, 2, 1, 3, 7, 2, 6, 3, 1, 4,
2, 3, 7, 4, 6, 1, 3, 4, 2, 5, 7, 1, 4, 5, 3, 6, 2, 1,
5, 6, 4, 7, 3, 1, 6, 7, 5, 2, 4, 1, 7, 2, 6, 3, 5, 1),
36, 6, byrow = T)
} else if (all(D == 7, selection == 2)) {
sequences <- matrix(c(2, 3, 1, 6, 3, 6, 2, 1, 6, 1, 3, 2, 1, 2, 6, 3, 2, 3,
1, 7, 3, 7, 2, 1, 7, 1, 3, 2, 1, 2, 7, 3, 2, 4, 1, 5,
4, 5, 2, 1, 5, 1, 4, 2, 1, 2, 5, 4, 2, 4, 1, 7, 4, 7,
2, 1, 7, 1, 4, 2, 1, 2, 7, 4, 2, 5, 1, 6, 5, 6, 2, 1,
6, 1, 5, 2, 1, 2, 6, 5, 3, 4, 1, 5, 4, 5, 3, 1, 5, 1,
4, 3, 1, 3, 5, 4, 3, 4, 1, 6, 4, 6, 3, 1, 6, 1, 4, 3,
1, 3, 6, 4, 3, 5, 1, 7, 5, 7, 3, 1, 7, 1, 5, 3, 1, 3,
7, 5, 4, 6, 1, 7, 6, 7, 4, 1, 7, 1, 6, 4, 1, 4, 7, 6,
5, 6, 1, 7, 6, 7, 5, 1, 7, 1, 6, 5, 1, 5, 7, 6, 2, 3,
5, 1, 3, 4, 6, 2, 4, 5, 7, 3, 5, 6, 1, 4, 6, 7, 2, 5,
7, 1, 3, 6, 1, 2, 4, 7, 2, 1, 6, 3, 3, 2, 7, 4, 4, 3,
1, 5, 5, 4, 2, 6, 6, 5, 3, 7, 7, 6, 4, 1, 1, 7, 5, 2),
54, 4, byrow = T)
}
if (summary) {
message("...completed the design specification. Preparing outputs...")
}
sequences <- convert_labels(sequences, D, labels, 1:D)
sequences <- transform_to_xover(sequences, labels, as_matrix)
##### Outputting #############################################################
if (summary) {
message("...outputting.")
}
return(sequences)
}
|
191beb16a4fc5dd5da247e3b5899b61b03c82368
|
182257a7b18220970988c68cdd0d815e12ab4c85
|
/scratch/Modeling.R
|
1cad2cc3cde39234a3f91d2d2af6cb464f1f41e5
|
[] |
no_license
|
tylerandrewscott/elwha
|
3b81c495f96a8e14819621a1b70556f7eca29d06
|
b2c3f0b3b9cafc3382eaa57acf87ebc2c47b1cfc
|
refs/heads/master
| 2022-05-12T16:03:49.773027
| 2016-05-11T14:32:48
| 2016-05-11T14:32:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,500
|
r
|
Modeling.R
|
rm(list=ls())
setwd("//Users/TScott/Google Drive/elwha")
library(statnet)
#dat_imp=read.csv(file="edgelist_implement.csv",row.names=1)
#dat_plan=read.csv(file="edgelist_plan.csv",row.names=1)
#dat_cons=read.csv(file="edgelist_consult.csv",row.names=1)
dat_all=read.csv(file="edgelist_all.csv",row.names=1)
dat_all$combi = paste(dat_all$ORG,dat_all$Contact)
dat_imp = dat_all[dat_all$TType=='WT',]
dat_plan = dat_all[dat_all$TType=='PT',]
dat_cons = dat_all[dat_all$TType=='CT',]
#sum(dat_imp$combi %in% dat_plan$combi)
#sum(dat_imp$combi %in% dat_cons$combi)
#sum(dat_cons$combi %in% dat_plan$combi)
resp.dat=read.csv(file="Response.Used.csv",row.names=1)
psp_group = read.csv(file="Group.Overlap.Matrix.PSP.csv",row.names=1)
all_group = read.csv(file="Group.Overlap.Matrix.csv",row.names=1)
npsp_group = read.csv(file="Group.Overlap.Matrix.NPSP.csv",row.names=1)
fina_up_group=read.csv(file="Group.Fina.Up.Matrix.csv",row.names=1)
fina_down_group=read.csv(file="Group.Fina.Down.Matrix.csv",row.names=1)
huma_up_group=read.csv(file="Group.Huma.Up.Matrix.csv",row.names=1)
huma_down_group=read.csv(file="Group.Huma.Down.Matrix.csv",row.names=1)
valu_up_group=read.csv(file="Group.Valu.Up.Matrix.csv",row.names=1)
valu_down_group=read.csv(file="Group.Value.Down.Matrix.csv",row.names=1)
lang_up_group=read.csv(file="Group.Lang.Up.Matrix.csv",row.names=1)
lang_down_group=read.csv(file="Group.Lang.Down.Matrix.csv",row.names=1)
scie_up_group=read.csv(file="Group.Scie.Up.Matrix.csv",row.names=1)
scie_down_group=read.csv(file="Group.Scie.Down.Matrix.csv",row.names=1)
face_up_group=read.csv(file="Group.Face.Up.Matrix.csv",row.names=1)
face_down_group=read.csv(file="Group.Face.Down.Matrix.csv",row.names=1)
test<-(read.csv(file="Group.Overlap.Matrix.PSP.csv",row.names=1))
#sort out only those who responded to survey
uq = resp.dat[unique(resp.dat$ORG),]
isolate_vertices = unique(resp.dat$ORG)[unique(resp.dat$ORG) %in% unique(dat_all$ORG)==FALSE]
nonisolate_vertices = sort(unique(dat_all$ORG))
all_vertices = c(as.character(isolate_vertices),as.character(nonisolate_vertices))
all_vertices = sort(as.factor(all_vertices))
respondent_edges = dat_all[dat_all$Contact %in% all_vertices == TRUE,]
dat_imp = dat_imp[dat_imp$Contact %in% all_vertices==TRUE,]
dat_plan = dat_plan[dat_plan$Contact %in% all_vertices==TRUE,]
dat_cons = dat_cons[dat_cons$Contact %in% all_vertices==TRUE,]
dat0 = rbind(dat_imp,dat_plan,dat_cons)
dat1 = dat0[as.character(dat0$ORG) != as.character(dat0$Contact),]
edgename = paste(dat1$ORG,"@@@",dat1$Contact)
unique_dat = dat1[ifelse(duplicated(edgename),0,1)==1,]
dat0 = dat_imp
dat1 = dat0[as.character(dat0$ORG) != as.character(dat0$Contact),]
edgename = paste(dat1$ORG,"@@@",dat1$Contact)
dat_imp = dat1[ifelse(duplicated(edgename),0,1)==1,]
dat0 = dat_cons
dat1 = dat0[as.character(dat0$ORG) != as.character(dat0$Contact),]
edgename = paste(dat1$ORG,"@@@",dat1$Contact)
dat_cons = dat1[ifelse(duplicated(edgename),0,1)==1,]
dat0 = dat_plan
dat1 = dat0[as.character(dat0$ORG) != as.character(dat0$Contact),]
edgename = paste(dat1$ORG,"@@@",dat1$Contact)
dat_plan = dat1[ifelse(duplicated(edgename),0,1)==1,]
#combine all three types of ties
dat_imp$TYPE = "IMP"
dat_plan$TYPE = "PLAN"
dat_cons$TYPE = "CONS"
temp = rbind(dat_imp,dat_plan,dat_cons)
dat_combined = temp[temp$Contact %in% all_vertices==TRUE,]
colnames(npsp_group)<-rownames(npsp_group)
colnames(psp_group)<-rownames(psp_group)
colnames(all_group)<-rownames(all_group)
net_temp = network.initialize(length(all_vertices),directed=TRUE,loops=FALSE)
vertex_attributes = data.frame(sort(all_vertices))
colnames(vertex_attributes) = "NAME"
TOTALYEARS = rep(0,length(vertex_attributes$NAME))
for (i in 1:length(TOTALYEARS))
{
TOTALYEARS[i]=resp.dat$total.years[which(resp.dat$ORG==vertex_attributes$NAME[i])[1]]
}
vertex_attributes$TOTALYEARS = TOTALYEARS
NUMGROUPS = rep(0,length(vertex_attributes$NAME))
for (i in 1:length(NUMGROUPS))
{
NUMGROUPS[i]=mean(resp.dat$NumGroups[which(resp.dat$ORG==vertex_attributes$NAME[i])])
}
vertex_attributes$NUMGROUPS = NUMGROUPS
NUMRESP = rep(0,length(vertex_attributes$NAME))
for (i in 1:length(NUMRESP))
{
NUMRESP[i]=mean(resp.dat$Numres[which(resp.dat$ORG==vertex_attributes$NAME[i])])
}
vertex_attributes$NUMRESP = NUMRESP
MEANYEARS = rep(0,length(vertex_attributes$NAME))
for (i in 1:length(MEANYEARS))
{
MEANYEARS[i]=mean(resp.dat$Years[which(resp.dat$ORG==vertex_attributes$NAME[i])])
}
vertex_attributes$MEANYEARS = MEANYEARS
ORGTYPE = rep(0,length(vertex_attributes$NAME))
for (i in 1:length(ORGTYPE))
{
ORGTYPE[i]=as.character(resp.dat$ORGType[which(resp.dat$ORG==vertex_attributes$NAME[i])[1]])
}
vertex_attributes$ORGTYPE = ORGTYPE
USEPLAN = rep(0,length(vertex_attributes$NAME))
for (i in 1:length(USEPLAN))
{
USEPLAN[i]=as.character(resp.dat$useful_plan[which(resp.dat$ORG==vertex_attributes$NAME[i])[1]])
}
vertex_attributes$USEPLAN = USEPLAN
USEWORK = rep(0,length(vertex_attributes$NAME))
for (i in 1:length(USEWORK))
{
USEWORK[i]=as.character(resp.dat$useful_work[which(resp.dat$ORG==vertex_attributes$NAME[i])[1]])
}
vertex_attributes$USEWORK = USEWORK
USECONS = rep(0,length(vertex_attributes$NAME))
for (i in 1:length(USECONS))
{
USECONS[i]=as.character(resp.dat$useful_cons[which(resp.dat$ORG==vertex_attributes$NAME[i])[1]])
}
vertex_attributes$USECONS = USECONS
for (i in 1:nrow(resp.dat))
{resp.dat$npsp[i]<-tapply(resp.dat$npsp,resp.dat$ORG,mean)[which(sort(unique(resp.dat$ORG))==resp.dat$ORG[i])]}
PSP_N = rep(0,length(vertex_attributes$NAME))
for (i in 1:length(PSP_N))
{
{
PSP_N[i]=tapply(resp.dat$psp,resp.dat$ORG,mean)[which(rownames(tapply(resp.dat$psp,resp.dat$ORG,mean))==vertex_attributes$NAME[i])[1]]
}
}
vertex_attributes$PSP_N = PSP_N
NPSP_N = rep(0,length(vertex_attributes$NAME))
for (i in 1:length(PSP_N))
{
NPSP_N[i]=tapply(resp.dat$npsp,resp.dat$ORG,mean)[which(rownames(tapply(resp.dat$npsp,resp.dat$ORG,mean))==vertex_attributes$NAME[i])[1]]
}
vertex_attributes$NPSP_N = NPSP_N
network.vertex.names(net_temp) = as.character(vertex_attributes$NAME)
set.vertex.attribute(net_temp,"ORGTYPE",value=vertex_attributes$ORGTYPE)
set.vertex.attribute(net_temp,"TOTALYEARS",value=vertex_attributes$TOTALYEARS)
set.vertex.attribute(net_temp,"NUMGROUPS",value=vertex_attributes$NUMGROUPS)
set.vertex.attribute(net_temp,"NUMRESP",value=vertex_attributes$NUMRESP)
set.vertex.attribute(net_temp,"MEANYEARS",value=vertex_attributes$MEANYEARS)
set.vertex.attribute(net_temp,"PSP_N",value=vertex_attributes$PSP_N)
set.vertex.attribute(net_temp,"NPSP_N",value=vertex_attributes$NPSP_N)
set.vertex.attribute(net_temp,"USEWORK",value=ifelse(is.na(as.numeric(vertex_attributes$USEWORK)),0,as.numeric(vertex_attributes$USEWORK)))
set.vertex.attribute(net_temp,"USEPLAN",value=ifelse(is.na(as.numeric(vertex_attributes$USEPLAN)),0,as.numeric(vertex_attributes$USEPLAN)))
set.vertex.attribute(net_temp,"USECONS",value=ifelse(is.na(as.numeric(vertex_attributes$USECONS)),0,as.numeric(vertex_attributes$USECONS)))
TAIL_ID = rep(0,nrow(respondent_edges))
HEAD_ID = rep(0,nrow(respondent_edges))
IMP_TAIL_ID = rep(0,nrow(dat_imp))
IMP_HEAD_ID = rep(0,nrow(dat_imp))
net_imp = net_temp
for (i in 1:nrow(dat_imp))
{
IMP_TAIL_ID[i] = (which(network.vertex.names(net_imp)==dat_imp$ORG[i]))
IMP_HEAD_ID[i] = (which(network.vertex.names(net_imp)==dat_imp$Contact[i]))
}
for (i in 1:length(IMP_TAIL_ID))
{
net_imp[IMP_TAIL_ID[i],IMP_HEAD_ID[i]]<-1
}
CONS_TAIL_ID = rep(0,nrow(dat_cons))
CONS_HEAD_ID = rep(0,nrow(dat_cons))
net_cons = net_temp
for (i in 1:nrow(dat_cons))
{
CONS_TAIL_ID[i] = (which(network.vertex.names(net_cons)==dat_cons$ORG[i]))
CONS_HEAD_ID[i] = (which(network.vertex.names(net_cons)==dat_cons$Contact[i]))
}
for (i in 1:length(CONS_TAIL_ID))
{
net_cons[CONS_TAIL_ID[i],CONS_HEAD_ID[i]]<-1
}
PLAN_TAIL_ID = rep(0,nrow(dat_plan))
PLAN_HEAD_ID = rep(0,nrow(dat_plan))
net_plan = net_temp
for (i in 1:nrow(dat_plan))
{
PLAN_TAIL_ID[i] = (which(network.vertex.names(net_plan)==dat_plan$ORG[i]))
PLAN_HEAD_ID[i] = (which(network.vertex.names(net_plan)==dat_plan$Contact[i]))
}
for (i in 1:length(PLAN_TAIL_ID))
{
net_plan[PLAN_TAIL_ID[i],PLAN_HEAD_ID[i]]<-1
}
PLAN_TAIL_ID = rep(0,nrow(unique_dat))
PLAN_HEAD_ID = rep(0,nrow(unique_dat))
net_uq = net_temp
for (i in 1:nrow(unique_dat))
{
PLAN_TAIL_ID[i] = (which(network.vertex.names(net_uq)==unique_dat$ORG[i]))
PLAN_HEAD_ID[i] = (which(network.vertex.names(net_uq)==unique_dat$Contact[i]))
}
for (i in 1:length(PLAN_TAIL_ID))
{
net_uq[PLAN_TAIL_ID[i],PLAN_HEAD_ID[i]]<-1
}
net_temp_p <- net_plan
net_temp_i <- net_imp
net_temp_c <- net_cons
net_temp_c$gal[6]=TRUE
set.edge.value(net_temp_p,"T_VALUE",value=2)
set.edge.value(net_temp_i,"T_VALUE",value=3)
set.edge.value(net_temp_c,"T_VALUE",value=1)
net_temp_all <-net_temp_c
add.edges(net_temp_all,tail=as.edgelist(net_temp_i)[,1],head=as.edgelist(net_temp_i)[,2],names.eval="T_VALUE",vals.eval=3)
add.edges(net_temp_all,tail=as.edgelist(net_temp_p)[,1],head=as.edgelist(net_temp_p)[,2],names.eval="T_VALUE",vals.eval=2)
#set.edge.attribute(net_temp_all,"WIN5_dk1",value=)
#set.edge.attribute(net_temp_all,"WIN5_dk0",value=)
for (i in 1:nrow(dat_imp))
{
t<-which(network.vertex.names(net_temp_i)==dat_imp$ORG[i])
h<-which(network.vertex.names(net_temp_i)==dat_imp$Contact[i])
if (h>0)
{
set.edge.attribute(net_temp_i,"WIN5_dk1",value=ifelse(dat_imp$WIN5>0,1,dat_imp$WIN5))
set.edge.attribute(net_temp_i,"WIN5_dk0",value=ifelse(dat_imp$WIN5==1,1,0))
}
}
for (i in 1:nrow(dat_cons))
{
t<-which(network.vertex.names(net_temp_c)==dat_cons$ORG[i])
h<-which(network.vertex.names(net_temp_c)==dat_cons$Contact[i])
if (h>0)
{
set.edge.attribute(net_temp_c,"WIN5_dk1",value=ifelse(dat_cons$WIN5>0,1,dat_cons$WIN5))
set.edge.attribute(net_temp_c,"WIN5_dk0",value=ifelse(dat_cons$WIN5==1,1,0))
}
}
for (i in 1:nrow(dat_plan))
{
t<-which(network.vertex.names(net_temp_p)==dat_plan$ORG[i])
h<-which(network.vertex.names(net_temp_p)==dat_plan$Contact[i])
if (h>0)
{
set.edge.attribute(net_temp_p,"WIN5_dk1",value=ifelse(dat_plan$WIN5>0,1,dat_plan$WIN5))
set.edge.attribute(net_temp_p,"WIN5_dk0",value=ifelse(dat_plan$WIN5==1,1,0))
}
}
#for (i in 1:nrow(dat_all))
#{
# t<-which(network.vertex.names(net_temp_all)==dat_all$ORG[i])
# h<-which(network.vertex.names(net_temp_all)==dat_all$Contact[i])
# if (h>0)
# {
# set.edge.attribute(net_temp_all,"WIN5_dk1",value=ifelse(dat_all$WIN5>0,1,dat_all$WIN5))
# set.edge.attribute(net_temp_all,"WIN5_dk0",value=ifelse(dat_all$WIN5==1,1,0))
# }
#}
dat_plan2 = dat_plan[dat_plan$combi %in% dat_imp$combi == FALSE,]
dat_cons2 = dat_cons[dat_cons$combi %in% dat_imp$combi | dat_cons$combi %in% dat_plan$combi == FALSE,]
full_temp = rbind(dat_imp,dat_plan2,dat_cons2)
net_temp = network.initialize(length(all_vertices),directed=TRUE,loops=FALSE)
network.vertex.names(net_temp) = as.character(all_vertices)
TEMP_TAIL_ID = rep(0,nrow(full_temp))
TEMP_HEAD_ID = rep(0,nrow(full_temp))
for (i in 1:nrow(full_temp))
{
TEMP_TAIL_ID[i] = (which(network.vertex.names(net_temp)==full_temp$ORG[i]))
TEMP_HEAD_ID[i] = (which(network.vertex.names(net_temp)==full_temp$Contact[i]))
}
add.edges(net_temp,tail=TEMP_TAIL_ID,head=TEMP_HEAD_ID)
summary(degree(net_temp,gmode='digraph',cmode='indegree'))
sd(degree(net_temp,gmode='digraph',cmode='indegree'))
summary(degree(net_temp,gmode='digraph',cmode='outdegree'))
sd(degree(net_temp,gmode='digraph',cmode='outdegree'))
save.image(file="Ready_to_ERGM.RData")
|
4614f7d47ac48c660a9453c5bc91e2f50ae2b4cb
|
7dfd2b5a31ac723907f4888c98782084eac920fc
|
/man/has_transcription_factors.Rd
|
d5e23eef8691e8c03000f6f095535ea45205871d
|
[] |
no_license
|
ehenrion/ChIAnalysis
|
c9ac7ec9412d841dc7c1ac21706c055800b5600c
|
e53c25d142d294e26989e2e801d2b8633cf94b2f
|
refs/heads/master
| 2020-03-31T13:30:14.430350
| 2017-01-18T19:51:49
| 2017-01-18T19:51:49
| 152,258,410
| 0
| 0
| null | 2018-10-09T13:44:07
| 2018-10-09T13:44:23
| null |
UTF-8
|
R
| false
| true
| 519
|
rd
|
has_transcription_factors.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AccessorsChIA.R
\name{has_transcription_factors}
\alias{has_transcription_factors}
\title{Determines if the given chia.obj has TF binding information.}
\usage{
has_transcription_factors(chia.obj)
}
\arguments{
\item{chia.obj}{A list containing the ChIA-PET data, as returned by \code{\link{load_chia}}.}
}
\value{
True if the object has TF binding information.
}
\description{
Determines if the given chia.obj has TF binding information.
}
|
d883bc10d16266470ea28ac3dcde09b686a3fd85
|
4f44c325c990789f75a20f49c28343b436147a89
|
/tests/testthat.R
|
edad9cfdfdfd37537939c98f4c9faa149251e62b
|
[
"MIT"
] |
permissive
|
towitter/gramr
|
80f332eca58bef50e5a75b72ff4b8bad02efcb6f
|
6902273a158b1cb033b79009ac1d8f1809e81a31
|
refs/heads/master
| 2023-03-16T08:40:55.947874
| 2017-05-27T16:29:39
| 2017-05-27T16:29:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 54
|
r
|
testthat.R
|
library(testthat)
library(gramr)
test_check("gramr")
|
e3f93c09120e59cb55f1ad19c618ccbd02a2bf59
|
d80dbe0e2c8ca1b15cb7ef1f91eeaf0575c34a94
|
/analysis/R/alternative.R
|
d786943990607853507f259360233d62bcac4852
|
[
"Apache-2.0"
] |
permissive
|
silky/rappor
|
0e282c914f8fd71a5febb7506e2db0dc2b2b3cdb
|
b9b413c3057e5f840f81d7016de0e9f5332659ee
|
refs/heads/master
| 2021-01-18T17:44:36.192757
| 2015-03-18T04:27:03
| 2015-03-18T04:27:03
| 32,444,169
| 1
| 0
| null | 2015-03-18T07:26:58
| 2015-03-18T07:26:58
| null |
UTF-8
|
R
| false
| false
| 1,496
|
r
|
alternative.R
|
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# alternative.R
#
# This is some messy code to test out alternative regression using pcls().
library(mgcv)
# uniform vector
makep = function(n) {
rep(1, n) / (n+1)
}
# diagonal matrix with -1
makeAin = function(n) {
d = diag(x=1, n, n)
last = rep(-1, n)
rbind(d, last)
}
makebin = function(n) {
#ratio = 172318 / 128
# NOTE: Hard-coded hacks here
ratio = 70000 / 64
#ratio = 490000 / 64
print("RATIO")
print(ratio)
c(rep(0, n), -ratio)
}
makeM = function(X,Y) {
n=dim(X)[2]
p = makep(n)
Ain = makeAin(n)
bin = makebin(n)
list(X=as.matrix(X),
p=p,
off=array(0,0),
S=list(),
Ain=Ain,
bin=bin,
C=matrix(0,0,0),
sp=array(0,0),
y=Y,
w=rep(1, length(Y)) )
}
# CustomLM(X, Y)
newLM = function(X,Y) {
M = makeM(X,Y)
coefs = pcls(M)
print("SUM(coefs)")
print(sum(coefs))
return(coefs)
}
|
f00edfac9bf9347989f0c7cdaef5ee2916d18491
|
5929b731242c09b2feef736af70b48f1aedb24c3
|
/server.R
|
5e33515eb9623c979e44ef2b6211a18ac670c904
|
[] |
no_license
|
maddipatikiran/placement-data-visulization
|
e3e9c27e7218998896f3dde1b77c752b5ca07627
|
7e47d654ea123fcb7f7a88138bbcfb5899f970dd
|
refs/heads/master
| 2021-01-19T17:03:26.612037
| 2017-08-22T09:44:29
| 2017-08-22T09:44:29
| 101,041,087
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,058
|
r
|
server.R
|
# This is the server logic for a Shiny web application.
# You can find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com
#
library(shiny)
library(ggplot2)
library(plotly)
library(wordcloud2)
library(rsconnect)
data1<-read.csv("file/1617.csv")
shinyServer(function(input, output) {
output$value1 <- wordcloud2::renderWordcloud2({
#*****************************************no of students selected for different compines************************************************************
company<-aggregate(cbind(no_persons=COMPANY)~COMPANY ,data=data1,FUN=function(x){NROW(x)})
wordcloud2(company, size = 3.3, minRotation = -pi/6, maxRotation = -pi/6, rotateRatio = 1)
})
output$value2 <- renderPlotly({
sb<- subset(data1,COLLEGE == 'AEC' )
branch<-aggregate(cbind(no_persons=Branch)~Branch ,data=sb,FUN=function(x){NROW(x)})
k<- plot_ly(branch, labels = ~Branch, values = ~no_persons, type = 'pie') %>%
layout(
xaxis = list(showgrid = FALSE, zeroline = FALSE, showticklabels = FALSE),
yaxis = list(showgrid = FALSE, zeroline = FALSE, showticklabels = FALSE))
k
})
output$value3 <-renderPlotly({
cgi<- subset(data1, COMPANY == "CGI" & COLLEGE == 'AEC' )
cgi_b<-aggregate(cbind(no_persons=Branch)~Branch ,data=cgi,FUN=function(x){NROW(x)})
graph<-ggplot(cgi_b, aes(no_persons, Branch,fill=Branch)) + geom_col()+ ggtitle("")
ggplotly(graph)
})
output$value4 <-renderPlotly({
capgemini<- subset(data1, COMPANY == "CAPGEMINI" & COLLEGE == 'AEC' )
cgi_b<-aggregate(cbind(no_persons=Branch)~Branch ,data=capgemini,FUN=function(x){NROW(x)})
p <- cgi_b %>%
plot_ly(labels = ~Branch, values = ~no_persons) %>%
add_pie(hole = 0.6) %>%
layout(title = "", showlegend = F,
xaxis = list(showgrid = FALSE, zeroline = FALSE, showticklabels = FALSE),
yaxis = list(showgrid = FALSE, zeroline = FALSE, showticklabels = FALSE))
p
}
)
})
|
fae3e93e96bfc7321d86b32cb992d71ca7d516db
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/patternplot/examples/imagepie.Rd.R
|
ac22193ed89aaaf3c9118e56e0fc0f8b8858d315
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 770
|
r
|
imagepie.Rd.R
|
library(patternplot)
### Name: imagepie
### Title: Plot a pie chart with slices filled with png and jpeg images.
### Aliases: imagepie
### ** Examples
library(patternplot)
library(jpeg)
Tomatoes <- readJPEG(system.file("img", "tomatoes.jpg", package="patternplot"))
Peas <- readJPEG(system.file("img", "peas.jpg", package="patternplot"))
Potatoes <- readJPEG(system.file("img", "potatoes.jpg", package="patternplot"))
#Example 1
data <- read.csv(system.file("extdata", "vegetables.csv", package="patternplot"))
pattern.type<-list(Tomatoes,Peas,Potatoes)
imagepie(group=data$group,pct=data$pct,label=data$label,pattern.type=pattern.type,
label.distance=1.25,frame.color='burlywood4', frame.size=0.8, label.size=6,
label.color='forestgreen')
|
84ee676bb8e8a2b74bdd31513aafe4116b5ab481
|
04f349102910e5052ea34d3e7744e4d79a2fbb4f
|
/tests/testthat/test-n_cof_excl_ehv_132kv_tf.R
|
c7fc5a67ae41f07f475bcf3c9a003c32ebb4d236
|
[
"MIT"
] |
permissive
|
scoultersdcoe/CNAIM
|
f0728b00f0d0628e554975c78d767ee2c472fb3b
|
5c77ce4c50ef92fd05b9bb44b33fdca18302d020
|
refs/heads/master
| 2023-08-23T22:54:59.450292
| 2021-03-12T15:52:54
| 2021-03-12T15:52:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 359
|
r
|
test-n_cof_excl_ehv_132kv_tf.R
|
library(testthat)
library(CNAIM)
context("Network cost of Failure")
test_that("n_cof_excl_ehv_132kv_tf", {
# TODO: verify correctness
expect_equal(n_cof_excl_ehv_132kv_tf(asset_type_ncf = "6.6/11kV Transformer (GM)",
no_customers = 750,
kva_per_customer = 51), 455812.5)
})
|
f8173504dd019da065fcae2ae6a68a053584d4ef
|
0b695af895164180fab9361ae38c1af96a7c9222
|
/PA1_template.R
|
809353664c5f33b72d454b5b14a6aabafcae50d1
|
[] |
no_license
|
Rick2015/RepData_PeerAssessment1
|
f713106aa6a931377c3ed167ed4a1aa737dfdf3e
|
5cca8a0f37c19941a1455cf293d3c066316e56cd
|
refs/heads/master
| 2021-01-22T06:59:04.254393
| 2015-07-19T20:42:21
| 2015-07-19T20:42:21
| 39,195,157
| 0
| 0
| null | 2015-07-16T12:18:27
| 2015-07-16T12:18:27
| null |
UTF-8
|
R
| false
| false
| 2,450
|
r
|
PA1_template.R
|
#Reproducible Research: Peer Assessment 1
## Loading and preprocessing the data
adata=read.csv(unzip("./activity.zip"),header=TRUE, sep=",")
adata$date=as.Date(adata$date, "%Y-%m-%d")
## What is mean total number of steps taken per day?
library(dplyr)
day_group<-group_by(adata, date)
step_totals<-summarize(day_group, total=sum(steps))
step_totals
library(ggplot2)
ggplot(step_totals, aes(x=total)) + geom_histogram(binwidth = 3000)
mean_totalsteps<-mean(step_totals$total,na.rm=TRUE)
mean_totalsteps
median_totalsteps<-median(step_totals$total,na.rm=TRUE)
median_totalsteps
mean_steps<-tapply(adata$steps, adata$interval, mean, na.rm = TRUE)
plot(rownames(mean_steps), mean_steps, type="l", xlab="5-min. intervals")
max_steps<-names(which(mean_steps==max(mean_steps)))
plot(rownames(mean_steps), mean_steps, type="l", xlab="5-min. intervals")
abline(v=as.numeric(max_steps<-names(which(mean_steps==max(mean_steps)))), col="red")
rows_na<-sum(is.na(adata$steps))
newdata<-adata
newdata$steps[is.na(newdata$steps)]<-mean_steps[as.character(newdata$interval)]
new_day_group<-group_by(newdata, date)
new_step_totals<-summarize(new_day_group, total=sum(steps))
new_step_totals
ggplot(new_step_totals, aes(x=total)) + geom_histogram(binwidth = 3000)
new_mean_totalsteps<-mean(new_step_totals$total,na.rm=TRUE)
new_mean_totalsteps
new_median_totalsteps<-median(new_step_totals$total,na.rm=TRUE)
new_median_totalsteps
summary(median_totalsteps)
summary(new_median_totalsteps)
summary(mean_totalsteps)
summary(new_mean_totalsteps)
library(lubridate)
newdata$day<-as.factor(ifelse(match(wday(newdata$date),c(1,7), nomatch=0), "weekend", "weekday"))
library(lattice)
ndata.weekday<-subset(newdata, day=="weekday")
ndata.weekend<-subset(newdata, day=="weekend")
wkday_mean_steps<-tapply(ndata.weekday$steps, ndata.weekday$interval, mean, na.rm = TRUE)
wkend_mean_steps<-tapply(ndata.weekend$steps, ndata.weekend$interval, mean, na.rm = TRUE)
df1 <- data.frame(matrix(unlist(wkend_mean_steps)),interval=names(wkend_mean_steps),day="weekend")
names(df1)[1]<-"numberOfsteps"
df2 <- data.frame(matrix(unlist(wkday_mean_steps)),interval=names(wkday_mean_steps),day="weekday")
names(df2)[1]<-"numberOfsteps"
df <- rbind(df1,df2) #combine new data frames
df <- df[with(df, order(interval)), ] #order data frames
rownames(df) <- NULL #reindex data frames
xyplot(numberOfsteps ~ interval | day, data=df, type="l", layout= c(1,2))
dev.off()
|
6e619d87d69835f6a065c2bf53b4e5cb5b755b03
|
77157987168fc6a0827df2ecdd55104813be77b1
|
/palm/inst/testfiles/euc_distances/libFuzzer_euc_distances/euc_distances_valgrind_files/1612968463-test.R
|
49699edcb8c4d2550778828be81c1b3947d56dba
|
[] |
no_license
|
akhikolla/updatedatatype-list2
|
e8758b374f9a18fd3ef07664f1150e14a2e4c3d8
|
a3a519440e02d89640c75207c73c1456cf86487d
|
refs/heads/master
| 2023-03-21T13:17:13.762823
| 2021-03-20T15:46:49
| 2021-03-20T15:46:49
| 349,766,184
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 218
|
r
|
1612968463-test.R
|
testlist <- list(x1 = numeric(0), x2 = c(8.19687411242632e+107, 8.19687411242632e+107 ), y1 = c(8.19687411242632e+107, 8.19687411242632e+107), y2 = c(0, 0))
result <- do.call(palm:::euc_distances,testlist)
str(result)
|
227d0db822b53951c19f455298e243a0447cd283
|
3be933f8f60175a492b29debce223354ce7deca3
|
/tests/testthat/test-deltaV.R
|
437870152e406ae657a9405f9f5e457a057a9021
|
[
"MIT"
] |
permissive
|
PabRod/waydown
|
ca2a73129b1f92ecc79602fbb805a4b4c7abc347
|
bb963c50f6fa37f5f8d0f189acea4be8c4c8e828
|
refs/heads/master
| 2023-06-24T22:16:55.002698
| 2021-07-19T10:00:21
| 2021-07-19T10:00:21
| 323,318,893
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,722
|
r
|
test-deltaV.R
|
context("Potential difference approximation")
test_that("1D exact", {
# Flow
f <- function(x) { sin(x) }
# Evaluation points
x0 <- 1
x1 <- 1.01
results <- deltaV(f, x1, x0)
dV <- results$dV
# Compare with expected result
V_expected <- function(x) { cos(x) }
dV_expected <- V_expected(x1) - V_expected(x0)
expect_equal(results$dV, dV_expected, tolerance = 1e-4)
expect_equal(results$err, 0, tolerance = 1e-6)
})
test_that("2D exact", {
# Flow
f <- function(x) { c(
-2*x[1]*x[2],
-x[1]^2 - 1
)}
# Evaluation points
x0 <- matrix(c(1,2), ncol = 1)
x1 <- matrix(c(0.98,2.01), ncol = 1)
results <- deltaV(f, x1, x0)
# Compare with expected result
V_expected <- function(x) {x[1]^2*x[2] + x[2]}
dV_expected <- V_expected(x1) - V_expected(x0)
expect_equal(results$dV, dV_expected, tolerance = 1e-4)
expect_equal(results$err, 0, tolerance = 1e-6)
})
test_that("2D exact loose input", {
# Flow
f <- function(x) { c(
-2*x[1]*x[2],
-x[1]^2 - 1
)}
# Evaluation points (not as explicit column vectors)
x0 <- c(1,2)
x1 <- c(0.98,2.01)
results <- deltaV(f, x1, x0)
# Compare with expected result
V_expected <- function(x) {x[1]^2*x[2] + x[2]}
dV_expected <- V_expected(x1) - V_expected(x0)
expect_equal(results$dV, dV_expected, tolerance = 1e-4)
expect_equal(results$err, 0, tolerance = 1e-6)
})
test_that("2D curl", {
# Flow
f <- function(x) { c(
-x[2],
x[1]
)}
# Evaluation points (not as explicit column vectors)
x0 <- c(1,2)
x1 <- c(0.98,2.01)
results <- deltaV(f, x1, x0)
# Compare with expected result
expect_equal(results$err, 1, tolerance = 1e-6)
})
|
1781592c933a1643157f35bbdc47e84f3cc23a2d
|
d792c728a6b9d6b8312399224241d78dea1440d0
|
/EXPERIMENTS/8_ChIPseq_2_Clstr/Scripts/prop_plotting_functions.R
|
3b9c6f54e75eb7bb7bfa674d2669ed7e0a4ce54a
|
[] |
no_license
|
shamoni/cen-evo-devo
|
2b9ce0c03711fa047a272880fde634f7ea5225f1
|
81d1326e2800ceb0c25159b2f73b38900dc041d9
|
refs/heads/master
| 2021-01-17T15:47:46.683406
| 2016-05-04T05:50:13
| 2016-05-04T05:50:13
| 51,276,408
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,672
|
r
|
prop_plotting_functions.R
|
require(reshape)
require(ggplot2)
require(plyr)
require(RColorBrewer)
parse_clstr <- function(x){
h <-strsplit(x,"-")[[1]][3]
}
parse_Chr <- function(x){
h <-strsplit(x,":")[[1]][1]
substr(h,nchar(h)-3,nchar(h))
}
cen.fraction <- function(df,meta){
NChIP.summary <-adply(df, 1, transform, CEN = sum(Unique_CEN, Multi_CEN))
metadata <- read.csv(meta, header=T)
fileorder <- match(NChIP.summary$filename,metadata$LibraryID)
ID.results <- metadata[fileorder,c(2,3)]
mat <- data.matrix(NChIP.summary[,-c(1,3,4)])
mat <- prop.table(mat,1)
summary.prop <-cbind(Library=NChIP.summary$filename,ID.results,CEN=mat[,2])
summary.means <- ddply(summary.prop,.(Sample,Antibody),summarise,mean=mean(CEN),sem=sd(CEN)/sqrt(length(CEN)))
summary.means <- transform(summary.means, lower=mean-sem, upper=mean+sem)
summary.means$Sample <- factor(summary.means$Sample,levels=c("WT (+/+)","LoCENH3 (-/-)","ZmCENH3 (-/-)"))
ab <-c("INPUT","AtCENH3","LoCENH3","ZmCENH3")
summary.means$Antibody <- factor(summary.means$Antibody,levels=ab)
return(summary.means)
}
plot.cen.fraction <- function(df){
p <-ggplot(data=df, aes(x=Sample,y=mean, fill=Sample)) + theme_bw()
p <- p + theme(axis.text.x=element_blank(), axis.title.x=element_blank(), axis.ticks.x=element_blank(),panel.grid=element_blank())
p <- p + guides(fill=guide_legend(title="ChIP Sample"))
p <- p + theme(strip.text.x = element_text(size = 8))
p <- p + labs(y="CEN180 Reads/Total reads",x="")
p <- p + theme(axis.title.y=element_text(size=10))
p <- p + geom_bar(stat="identity") + geom_errorbar(aes(ymax=upper,ymin=lower), width=0.25)
p <- p + scale_fill_manual(values=brewer.pal(9,"Blues")[c(5,7,9)])
p <- p + facet_wrap(~ Antibody,nrow=1)
return(p)
}
clstr.distribution <- function(df,meta,class){
colnames(df) <- c("filename","Class","Cluster1","Cluster2","Cluster3","Cluster4","Cluster5","Cluster6")
metadata <- read.csv(meta, header=T)
fileorder <- match(df$filename,metadata$LibraryID)
ID.results <- metadata[fileorder,c(2,3)]
mat <- data.matrix(df[,-c(1,2)])
mat <- prop.table(mat,1)
results.prop <-cbind(Library=df$filename,ID.results,Class=df$Class,as.data.frame(mat))
results.melted <-melt(results.prop)
to.omit <- c("SML_19_merged.fa","SML_38_merged.fa","SML_43_merged.fa")
results.melted <-results.melted[-which(results.melted$Library %in% to.omit),]
results <-results.melted[results.melted$Class == class,]
results$group[which(results$Antibody == "INPUT")] <- "INPUT"
results$group[which(results$Antibody == "AtCENH3")] <- "AtCENH3"
results$group[which(results$Antibody == "LoCENH3")] <- "LoCENH3"
results$group[which(results$Antibody == "ZmCENH3")] <- "ZmCENH3"
results$tocolor <- results$group
results$group <- factor(results$group,levels=c("INPUT","AtCENH3","LoCENH3","ZmCENH3"))
results$tocolor <- factor(results$tocolor,levels=c("INPUT","AtCENH3","LoCENH3","ZmCENH3"))
return(results)
}
plot.clstr.distribution <-function(df){
custom <-c("black",brewer.pal(9,"Blues")[c(5,7,9)])
p <- ggplot(data=df, aes(x=group,y=value)) + theme_bw()
p <- p + theme(axis.text.x=element_blank(), axis.title.x=element_blank(), axis.ticks.x=element_blank(),panel.grid=element_blank())
p <- p + labs(y="Cluster-specific reads/All CEN180 reads")
p <- p + theme(axis.title.y=element_text(size=10))
p <- p + theme(legend.key=element_blank(), legend.title=element_blank())
p <- p + geom_point(aes(fill=tocolor),colour="black",pch=21, size=5)
p <- p + facet_wrap(~ variable,nrow=1) + scale_fill_manual(values=custom)
}
|
c9bf6cb744ec0a3bab3c0d27ece821d11e3a2b71
|
7f72ac13d08fa64bfd8ac00f44784fef6060fec3
|
/RGtk2/man/gtkCellRendererGetSensitive.Rd
|
02a6b1c69451be044c802d9619f11e1fe63eaa3f
|
[] |
no_license
|
lawremi/RGtk2
|
d2412ccedf2d2bc12888618b42486f7e9cceee43
|
eb315232f75c3bed73bae9584510018293ba6b83
|
refs/heads/master
| 2023-03-05T01:13:14.484107
| 2023-02-25T15:19:06
| 2023-02-25T15:20:41
| 2,554,865
| 14
| 9
| null | 2023-02-06T21:28:56
| 2011-10-11T11:50:22
|
R
|
UTF-8
|
R
| false
| false
| 428
|
rd
|
gtkCellRendererGetSensitive.Rd
|
\alias{gtkCellRendererGetSensitive}
\name{gtkCellRendererGetSensitive}
\title{gtkCellRendererGetSensitive}
\description{Returns the cell renderer's sensitivity.}
\usage{gtkCellRendererGetSensitive(object)}
\arguments{\item{\verb{object}}{A \code{\link{GtkCellRenderer}}}}
\details{Since 2.18}
\value{[logical] \code{TRUE} if the cell renderer is sensitive}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
|
ee7a8d2188799cb338024209eb5e6b84972f8bfb
|
d6b5cbc975c4f99bc1b8902e5949688fc464957c
|
/man/FastSampEn.Rd
|
adb1d0bc2b066a6a8deef13ee20a5d89523e94df
|
[] |
no_license
|
cran/TSEntropies
|
b0d7900de9d29724253706e8fc2bad2dcd41c7aa
|
3ef8888faa4c3bda6a403ce022eb210a85cce82c
|
refs/heads/master
| 2020-03-31T08:38:08.419940
| 2018-10-08T10:00:07
| 2018-10-08T10:00:07
| 152,065,698
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 732
|
rd
|
FastSampEn.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/FastSampEn_C.R
\name{FastSampEn}
\alias{FastSampEn}
\title{FastSampEn}
\usage{
FastSampEn(TS, dim = 2, lag = 1, r = 0.15 * sd(TS))
}
\arguments{
\item{TS}{- given time series}
\item{dim}{- dimension of given time series, default value is 2}
\item{lag}{- downsampling, default value is 1}
\item{r}{- radius of searched areas, default value is 0.15*sd(TS)}
}
\description{
This function computes fast sample entropy of given time series.
}
\examples{
timser <- rnorm(2000)
FastSampEn(timser)
FastSampEn(timser, r = 0.1*sd(timser))
FastSampEn(timser, dim = 3, r = 0.1*sd(timser))
}
\keyword{FastSampEn}
\keyword{entropy}
\keyword{fast}
\keyword{sample}
|
efc7a767c0dc0df329ebca35de219ec9626e0e54
|
c66690df66cd458ca1bd9352bd0c4e04ff70089a
|
/Code/loadDataCarvalhoEtAl.R
|
902d358cd0ad4f3736eb5afde02ea93a3795a9fe
|
[
"Apache-2.0"
] |
permissive
|
biobibibi/classifier_selection_code
|
6ef8a706c808de1a8b911df61e7fbd10e571551b
|
040a2e1c8504e5f0e3d2877fdbaa326cc9c0708d
|
refs/heads/master
| 2021-10-01T00:52:17.720895
| 2018-11-26T12:34:22
| 2018-11-26T12:34:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,361
|
r
|
loadDataCarvalhoEtAl.R
|
loadDataCarvalhoEtAl = function(pathFromDataFolderToCsv,pathToDataFolder)
{
library(plyr) # used for revalue
pathToFile = file.path(pathToDataFolder,pathFromDataFolderToCsv) # construct full file to csv
data = read.csv(pathToFile,sep = ",", dec = ".", strip.white = TRUE, na.strings = '') # read data from .csv
data_class = data$Survival # place outcome in separate variable
data_class = factor(data_class>= 2)
data$Survival = NULL #remove outcome from data
data$ID = NULL #remove from data
data$Status = NULL #remove from data
data$IL.6 = revalue(data$IL.6, c('<2' = 2)) # recode <2 as 2 in IL.6
data$IL.8 = revalue(data$IL.8, c('<5' = 5)) # recode <5 as 5 in IL.8
data$Cyfra.21.1 = revalue(data$Cyfra.21.1, c('<0,1' = 0.1)) # recode <0,1 as 0.1 in Cyfra.21.1
# convert columns from factor to numeric
data$IL.6 = as.numeric(levels(data$IL.6))[data$IL.6]
data$IL.8 = as.numeric(levels(data$IL.8))[data$IL.8]
data$Cyfra.21.1 = as.numeric(levels(data$Cyfra.21.1))[data$Cyfra.21.1]
# convert to factors
factorCols = c('WHO.PS') # list all columns that should be factors
data[factorCols] = lapply(data[factorCols], factor) # convert columns into factor variables
data_class = revalue(data_class, c('TRUE' = 'event','FALSE' = 'nonEvent')) # relabel outcome as event and nonEvent
return(list(data,data_class))
}
|
c868ff893fcf4b640cd56f3c46978fa457567301
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/NISTunits/examples/NISTpintUSdryTOcubMeter.Rd.R
|
17e9758ba7338cf9ae1ff95adce3c49179558e38
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 206
|
r
|
NISTpintUSdryTOcubMeter.Rd.R
|
library(NISTunits)
### Name: NISTpintUSdryTOcubMeter
### Title: Convert pint to cubic meter
### Aliases: NISTpintUSdryTOcubMeter
### Keywords: programming
### ** Examples
NISTpintUSdryTOcubMeter(10)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.