blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
935b8eee42f8d872bde7b0e8fff77e1305aa2a28 | ab69ee1913706f8f4b627fb4ab7f85f746eeba25 | /Feature_001.R | 84cbd595d6c0008ecff6b529804966b84e43f54c | [] | no_license | tap222/SMART_CUBE | c70b3cf987669a935686c14a543cf7534a23972f | f6a7e951f77e76c49cad95599dbbabf0f3e864d1 | refs/heads/master | 2021-09-07T10:47:57.215740 | 2018-02-21T20:07:55 | 2018-02-21T20:07:55 | 102,978,437 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 466 | r | Feature_001.R | setwd('/home/tapas/')
data = read.csv('cs-trainig-outlier.csv', header =T)
data = read.csv('cs-test-outlier.csv', header =T)
Debt = data$MonthlyIncome * data$DebtRatio
data = as.data.frame(cbind(data,Debt))
data = data[,-c(6,7)]
test = read.csv('cs-test.csv', header =T)
Debt = test$MonthlyIncome * test$DebtRatio
test = as.data.frame(cbind(test,Debt))
test = test[,-c(6,7)]
write.csv(data,'cs-test-feature-01.csv')
write.csv(data,'cs-training-feature-01.csv')
|
66ba450bc592ddcad65ce105c0c29e22e267052c | e4c2b000268ef0f3dc2b8d79dd7ffefb1741e382 | /bikelines.R | 788783dd1499dbc75d48b1123c1e0c9f42212a7c | [] | no_license | faiazrmn/austin_bike_routes | d1bb9f355ab1a9be8c1068e4563ff65a64809463 | 2cc617798fcd736e3e7abfc09f876fe78e4d2e9f | refs/heads/main | 2023-07-02T08:48:57.220024 | 2021-08-04T08:23:09 | 2021-08-04T08:23:09 | 392,607,826 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,229 | r | bikelines.R | library(plotly)
library(dplyr)
# Data available on kaggle
bike <- read.csv("austin_bikeshare_trips.csv")
station <- read.csv("austin_bikeshare_stations.csv")
glimpse(bike)
bike <- bike %>%
left_join(station, by = c("start_station_id" = "station_id")) %>%
mutate(start_long = longitude,
start_lat = latitude) %>%
select(-longitude, -latitude) %>%
left_join(station, by = c("end_station_id" = "station_id")) %>%
mutate(end_long = longitude,
end_lat = latitude,
route = paste(start_station_name, end_station_name, sep = "-"))
bike <- bike[sample(20000), ]
fig <- plot_ly() %>%
add_segments(
data = bike,
x = ~start_long,
xend = ~end_long,
y = ~start_lat,
yend = ~end_lat,
alpha = 0.08,
size = I(0.8),
text = ~route,
hoverinfo = "route",
color = I("orange")
) %>%
add_markers(
data = station,
x = ~longitude,
y = ~latitude,
text = ~name,
hoverinfo = "text",
alpha = 0.8,
color = I("green"),
size = I(15)
) %>%
layout(
title = 'Most Used Routes in Austin Bike Share Rides',
showlegend = FALSE,
xaxis = list(title = "Longitude"),
yaxis = list(title = "Latitude")
)
fig
|
c78bf380f26cd3a26b04a05963c474188c4c4cef | 73b219e1f517a434dcba3f640e1020661529118d | /man/PlotR2OneFact.Rd | d93e83bd69dd2fe1eb806b19e3d91f0719bf76b1 | [] | no_license | jordanaron22/ImputingMetabolites | 77e85ac2c7f70425405a8b6372e3cc8f8a87651c | 333b91f2a63dbb980dd72e34b662b42c8562e38b | refs/heads/main | 2023-05-03T00:34:47.366095 | 2021-05-26T16:53:53 | 2021-05-26T16:53:53 | 370,790,894 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 472 | rd | PlotR2OneFact.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ComparingImpMethods.R
\name{PlotR2OneFact}
\alias{PlotR2OneFact}
\title{Plots R2 of one factor imputation}
\usage{
PlotR2OneFact(r2.df)
}
\arguments{
\item{r2.df}{R2 dataframe, output from CalculateR2()}
}
\value{
Line chart of R2 values by imputation type
}
\description{
Plots R2 in line chart of different imputation methods.
Useful when underlying data is generated from a single factor.
}
|
7d51feb9dd69662b68e4d019d890334ede6df98e | 0884a38ae83dd78e92ec696a634430a8b48a830b | /run_analysis.R | 61e9c63807d4c0357d1103939240c1b5efdea23f | [] | no_license | patriciacrain/Samsung-Assignment | 9f9b9af0f0014b8375cbfdb9170ee566ad0b5eb6 | fc98530d84fb595e9012303d57cfd403b8e657f4 | refs/heads/master | 2020-05-18T18:43:21.508320 | 2014-05-25T17:19:58 | 2014-05-25T17:19:58 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,484 | r | run_analysis.R |
Analysis <- function(){
## Read in the features, select the ones we want and reformat them a bit
features <- read.table("features.txt", header=F)
features <- features[grepl("-mean\\(\\)|-std\\(\\)", features$V2),]
features$V2 <- gsub("\\(\\)","",features$V2)
features$V2 <- gsub("\\-",".",features$V2)
## Read in the activity labels to be used later
labels <- read.table("activity_labels.txt", header=F, col.names=c("Activity","Activity Label"))
## Cleaning the test data
message("cleaning the test data...")
test <- readLines("test/X_test.txt")
test <- gsub(" ", " ", test)
test <- gsub("^ ", "", test)
writeLines(test,"test.txt")
## Read the test data back in and convert to data frame with meaningful feature labels
require(data.table)
test <- fread("test.txt", header=F, select=features$V1)
test <- data.frame(test)
names(test) <- as.character(features$V2)
## Cleaning the train data
message("cleaning the train data...")
train <- readLines("train/X_train.txt")
train <- gsub(" ", " ", train)
train <- gsub("^ ", "", train)
writeLines(train,"train.txt")
## Read the train data back in and convert to data frame with meaningful feature labels
train <- fread("train.txt", header=F, select=features$V1)
train <- data.frame(train)
names(train) <- as.character(features$V2)
message("adding subject and activity features to test and train...")
## Add subject and activity columns to test data
require(plyr)
test$Subject <- readLines("test/subject_test.txt")
test$Activity <- readLines("test/y_test.txt")
test <- join(test,labels, by= "Activity")
## Add subject and activity columns to train data
train$Subject <- readLines("train/subject_train.txt")
train$Activity <- readLines("train/y_train.txt")
train <- join(train,labels, by= "Activity")
message("combining the test and train data together...")
## Rbind test and train data sets together
all <- rbind(test,train)
## Find the average of each variable for each activity and each subject, remove NA columns
message("computing the average for each variable by activity and subject...")
final <- suppressWarnings(aggregate(all, by=list(Activity=all$Activity.Label, Subject=all$Subject), mean, length.warning=F))
final <- final[,-c(69:71)]
names(final)[3:68] <- paste0(names(final)[3:68],".Avg")
message("writing out the tidy data set to 'HARdata_tidy.txt'")
write.csv(final, "HARdata_tidy.txt")
}
|
5e335ea7c5a76a45d6669b73dbb7d9cf8a9836a7 | e004f559d717386b20aa38b0a10ab41a83c08ba7 | /Initiation/TP6/tp6.R | 5ff7845409021b65020fc3870f4f4ac8a0c0d090 | [] | no_license | Timsctt/R | 39566785563a50f48bdc569edbc7ae38855cd8ba | a86764b07605d404bdd4d69b75d7691d2023f5f6 | refs/heads/master | 2023-03-10T07:24:00.780945 | 2019-07-27T06:55:55 | 2019-07-27T06:55:55 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,553 | r | tp6.R | #TP6
##############
#Exercice 1
##############
#1
df<-read.table("CollegeDistance.csv",sep = ";", header= T)
head(df)
#2
mean(df$ed)
mean(df$ed[df$female==0])
#3
sd(df$bytest[df$hispanic==1])
#4
sum(df$hispanic==1&df$black==1)
which(df$hispanic==1&df$black==1)
#5
hist(df$bytest, main="Répartition des étudiants en fonction du bytest", xlab= "bytest",
ylab = "Effectifs",col="orange")
#6
n<-nrow(df)
commu<-rep("W",n)
commu[df$black==1]<-"B"
commu[df$hispanic==1]<-"H"
#7
summary(df$ed[commu=="B"])
summary(df$ed[commu=="H"])
summary(df$ed[commu=="W"])
#8
par_sup<-rep(1,n)
par_sup[df$dadcoll==0&df$momcoll==0]<-0
df$par_sup<-par_sup
#9
df50<-df[df$bytest>=50,]
#10
colnames(df)
filtre<-which(colnames(df)=="black"|colnames(df)=="hispanic"|colnames(df)=="par_sup")
df50b<-df50[,-filtre]
head(df50b)
#############
#Exercice 2
#############
#Exercice 2
#1
dat<-read.table("Lun_Mar.txt",sep="\t",header=T)
head(dat)
#2
sum(dat$Origine=="M")
#3
Luh<-dat[dat$Sexe==1&dat$Origine=="L",]
Luh
mean(Luh$Taille)
hist(Luh$Observation,Luh$Taille)
hist(Luh$Taille,main="Répartition des luniens hommesen fonction de leur taille",
ylab="Effectifs", xlab="Tailles")
sum(Luh$Couleur==1)
#4
boxplot(dat$Taille[dat$Couleur==0],horizontal = T,
main="Taille des extra-terrestres rouge", xlab="Effectif")
#5
sum(dat$Taille>155)/nrow(dat)
#6
Q1<-quantile(dat$Taille,0.25)
Q2<-quantile(dat$Taille,0.75)
#7
qual<-rep("moyen",nrow(dat))
qual[dat$Taille<Q1]<-"petit"
qual[dat$Taille>=Q3]<-"grand"
dat$qual<-qual
head(dat)
View(dat)
|
adcb5505d78d20eb3222ad3a4ce3da48fd0b10ea | 37f6751867e86d659950e1d3d5dd2ef9f3d5d0a4 | /test1.R | ab04402e93ffc18e60cf10cc09fb1d63f006a2cd | [] | no_license | dvicencio/Airpollution | a8063df61f80bcf7a8141ac197f247bbffd73258 | e90f6894714e199db7fbaab5f7eb3299fdf9a4e8 | refs/heads/main | 2023-07-04T22:31:27.907127 | 2021-08-25T19:52:21 | 2021-08-25T19:52:21 | 399,628,300 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 329 | r | test1.R | corr1 <- function(directory, threshold = 0) {
files_full <- list.files(directory, full.names = TRUE)
dat <- data.frame()
for (i in 1:332){
A <- read.csv(files_full[i])
B <- complete.cases(A)
C <- sum(as.numeric(B))
if(C > threshold){
dat <- rbind(dat, C)
}
}
print(dat)
}
|
87d109ebc004dc63a3f5259b305b79447944ee1c | 2ab3480f71a9858a42bd06e175fb6b4a3e9063ad | /R/ggrepel.R | da4d4140488858185fb05dfad662dbea5a3d6cd9 | [] | no_license | ProjectMOSAIC/ggformulaExtra | 3f5a4ddeff552eef65d0ba9d68858288f271453c | 85743e150dcf44ac69c44ff9bd64066164f78e0c | refs/heads/master | 2020-03-19T03:23:30.858407 | 2018-06-01T21:41:10 | 2018-06-01T21:41:10 | 135,723,726 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 4,722 | r | ggrepel.R | #' Repulsive textual annotations
#'
#' `gf_text_repel()` adds a layer of text; `gf_label_repel()` additionally
#' draws a rectangle beneath the text, making it easier to read. In both
#' cases the labels repel away form each otehr and the data points.
#'
#' @inherit ggformula::gf_text
#' @inherit ggrepel::geom_text_repel
#' @importFrom ggrepel geom_text_repel geom_label_repel
#' @import ggplot2
#' @import ggformula
#'
#' @export
gf_text_repel <-
ggformula::layer_factory(
layer_fun = ggrepel::geom_text_repel,
geom = "text_repel",
stat = "identity",
position = "identity",
aes_form = y ~ x,
extras = alist(
label = , alpha = , angle = , color = , family = , fontface = ,
group = , hjust = , lineheight = , size = , vjust = ,
parse = FALSE,
box.padding = 0.25, point.padding = 1e-06,
segment.colour = NULL, segment.color = NULL,
segment.size = 0.5, segment.alpha = NULL,
min.segment.length = 0.5, arrow = NULL, force = 1,
max.iter = 2000, nudge_x = 0, nudge_y = 0,
xlim = c(NA, NA), ylim = c(NA, NA),
direction = c("both", "y", "x"),
seed = NA
)
)
#' @examples
#' mtcars$model <- rownames(mtcars)
#' mtcars$cylinders <- factor(mtcars$cyl)
#'
#' p <-
#' gf_point(mpg ~ wt, data = mtcars, color = ~ cylinders)
#'
#' # Avoid overlaps by repelling text labels
#' p %>% gf_text_repel(label = ~ model)
#'
#' # Labels with background
#' p %>% gf_label_repel(label = ~ model)
#'
#' # Add aesthetic mappings
#' p %>% gf_text_repel(alpha = ~ wt, size = ~ mpg, label = ~ model)
#' p %>% gf_label_repel(label = ~ model,
#' fill = ~ factor(cyl), color = "white", segment.color = "black")
#'
#' # Draw all line segments
#' p %>% gf_text_repel(label = ~ model, min.segment.length = 0)
#'
#' # Omit short line segments (default behavior)
#' p %>% gf_text_repel(label = ~ model, min.segment.length = 0.5)
#'
#' # Omit all line segments
#' p %>% gf_text_repel(label = ~ model, segment.colour = NA)
#' p %>% gf_text_repel(label = ~ model, min.segment.length = Inf)
#'
#' # Repel just the labels and totally ignore the data points
#' p %>% gf_text_repel(label = ~ model, point.padding = NA)
#'
#' # Hide some of the labels, but repel from all data points
#' mtcars$label <- rownames(mtcars)
#' mtcars$label[1:15] <- ""
#' p %>% gf_text_repel(data = mtcars, label = ~ label)
#'
#' # Nudge the starting positions
#' p %>% gf_text_repel(
#' label = ~ model,
#' nudge_x = ifelse(mtcars$cyl == 6, 2, 0),
#' nudge_y = ifelse(mtcars$cyl == 6, 7, 0))
#'
#' # Change the text size
#' p %>% gf_text_repel(label = ~ model, size = ~ wt)
#'
#' # Scale height of text, rather than sqrt(height)
#' p %>%
#' gf_text_repel(label = ~ model, size = ~ wt) %>%
#' gf_refine(scale_radius(range = c(3, 6)))
#'
#' # You can display expressions by setting parse = TRUE. The
#' # details of the display are described in `?plotmath`, but note that
#' # `gf_text_repel()` uses strings, not expressions.
#' p %>%
#' gf_text_repel(label = ~ paste(wt, "^(", cyl, ")", sep = ""),
#' parse = TRUE)
#'
#' # Add arrows
#' p %>%
#' gf_point(colour = "black") %>%
#' gf_text_repel(
#' label = ~ model,
#' arrow = arrow(length = unit(0.02, "npc")),
#' box.padding = 1
#' )
#'
#' gf_point( 1 ~ wt, data = mtcars, color = "red") %>%
#' gf_text_repel(label = ~ model,
#' nudge_y = 0.04,
#' direction = "x",
#' angle = 90,
#' vjust = 0,
#' segment.size = 0.2) %>%
#' gf_lims(x = c(1, 6), y = c(1, 0.9)) %>%
#' gf_theme(
#' axis.line.y = element_blank(),
#' axis.ticks.y = element_blank(),
#' axis.text.y = element_blank(),
#' axis.title.y = element_blank()
#' ) %>%
#' gf_theme(theme_bw())
#' @rdname gf_text_repel
#'
#' @export
gf_label_repel <-
ggformula::layer_factory(
layer_fun = ggrepel::geom_label_repel,
geom = "text_repel",
stat = "identity",
position = "identity",
pre = {
if (nudge_x != 0 || nudge_y != 0)
position <- position_nudge(nudge_x, nudge_y)
},
aes_form = y ~ x,
extras = alist(
label = , alpha = , angle = , color = , family = , fontface = ,
group = , hjust = , lineheight = , size = , vjust = ,
parse = FALSE,
box.padding = 0.25, label.padding = 0.25, point.padding = 1e-06,
label.r = 0.15, label.size = 0.25,
segment.colour = NULL, segment.color = NULL,
segment.size = 0.5, segment.alpha = NULL,
min.segment.length = 0.5, arrow = NULL, force = 1,
max.iter = 2000, nudge_x = 0, nudge_y = 0,
xlim = c(NA, NA), ylim = c(NA, NA),
direction = c("both", "y", "x"),
seed = NA
)
)
|
bbaa6e30c22e3b11da4a23857356f519bc92bafe | 2536276335fe9690785cdce37a074fde17165dd3 | /lawlargenumbers.R | 1ea1860d736bb2737faffc11efeca6b3fd146056 | [] | no_license | sdunbarne/Math-Modeling-Econ-Finance-R | a609e776da056bded1d1b0d4226a9e0ed538b7ed | ae021e5c1ef871dfd3267cc490e46464988d2c89 | refs/heads/master | 2020-06-03T02:44:03.862654 | 2019-06-11T15:40:41 | 2019-06-11T15:40:41 | 191,401,843 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,172 | r | lawlargenumbers.R | p <- 0.5
n <- 10000
k <- 1000
coinFlips <- array( (runif(n*k) <= p), dim=c(n,k))
# 0+ coerces Boolean to numeric
headsTotal <- colSums(coinFlips)
# 0..n binomial rv sample, size k
epsilon <- 0.01
mu <- p
prob <- sum( abs( headsTotal/n - mu ) > epsilon )/k
cat(sprintf("Empirical probability: %f \n", prob ))
## NAME: lawlargenumbers.R
##
## USAGE: within R, at interactive prompt
## source("lawlargenumbers.R")
## REQUIRED ARGUMENTS: none
##
## OPTIONS: none
## DESCRIPTION: Experiment of flipping a coin n times,
## and repeat the experiment k times.
## Check probability of deviation from mean is
## less than one.
## DIAGNOSTICS: None
## CONFIGURATION AND ENVIRONMENT: None
## DEPENDENCIES: None
## INCOMPATIBILITIES: None known
## PROVENANCE: Created
## BUGS AND LIMITATIONS: None known
## FEATURES AND POTENTIAL IMPROVEMENTS: None at this time
## Note: Profiling shows the majority of time is spent on the line
## array( 0+(runif(n*k) <= p), dim=c(n,k))
## with over 50% spent on the function runif
## AUTHOR: Steve Dunbar
## VERSION: Version 1.0 Tue Dec 4, 2012 5:18 AM
## KEYWORDS: Coin flips, binomial random variable.
|
a198469f36d14fe06ca5e8eb047b8b9857302217 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/gethr/examples/eth_submitHashrate.Rd.R | bd290093dd10fd2199e43e3be94c7aee669b0349 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 267 | r | eth_submitHashrate.Rd.R | library(gethr)
### Name: eth_submitHashrate
### Title: Mining hashrate submission.
### Aliases: eth_submitHashrate
### ** Examples
## No test:
eth_submitHashrate(5200050, '0x1234567890abcdef1234567890abcdef12345678
90abcdef1234567890abcdef')
## End(No test)
|
9e44727ddf1706261a447be23e682544e21ca38f | cd2f27faac9571f15afaf4c63e90d001b7ed33de | /man/calc_U_feed_U_eiou_r_eiou.Rd | 554bf4726937ec6d3f439ffed94201935c96fd90 | [
"MIT"
] | permissive | EnergyEconomyDecoupling/MWTools | 2430ad483b9bd759088e0a79572ca691ce05e9e4 | a3488a24a850d7e2338307446b66961ec3feb68a | refs/heads/master | 2023-09-04T13:03:10.451579 | 2023-08-20T09:30:56 | 2023-08-20T09:30:56 | 308,628,241 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 2,432 | rd | calc_U_feed_U_eiou_r_eiou.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/psut.R
\name{calc_U_feed_U_eiou_r_eiou}
\alias{calc_U_feed_U_eiou_r_eiou}
\title{Calculate U_feed, U_eiou, and r_eiou columns from a U matrix}
\usage{
calc_U_feed_U_eiou_r_eiou(
.df = NULL,
U = MWTools::psut_cols$U,
U_feed = MWTools::psut_cols$U_feed,
U_eiou = MWTools::psut_cols$U_eiou,
r_eiou = MWTools::psut_cols$r_eiou
)
}
\arguments{
\item{.df}{A PSUT data frame containing a column of \code{U} matrices.
Default is \code{NULL}, allowing a single matrix for the \code{U} argument.}
\item{U}{The name of the incoming \code{U} matrix. See \code{MWTools::psut_cols}.}
\item{U_feed, U_eiou, r_eiou}{Names for outgoing matrices. See \code{MWTools::psut_cols}.}
}
\value{
\code{.df} with new columns for \code{U_feed}, \code{U_eiou}, and \code{r_eiou} matrices.
}
\description{
\code{U_feed}, \code{U_eiou}, and \code{r_eiou} matrices are calculated from \code{U}.
All three matrices (\code{U_feed}, \code{U_eiou}, and \code{r_eiou})
have the same structure (row names, column names, row types, and column types)
as \code{U}.
For \code{MWTools}, there is no energy industry own use (EIOU),
so \code{U_feed} is simply a copy of \code{U}, and \code{U_eiou} and \code{r_eiou} are full of \code{0}s.
}
\details{
This function employs \code{matsindf::matsindf_apply()} internally, so
\code{U} can be either a single matrix or the name of the \code{U} column in \code{.df}.
}
\examples{
ilo_working_hours_data <- read.csv(file = MWTools::ilo_working_hours_test_data_path())
ilo_employment_data <- read.csv(file = MWTools::ilo_employment_test_data_path())
hmw_data <- prepareRawILOData(ilo_working_hours_data = ilo_working_hours_data,
ilo_employment_data = ilo_employment_data)
hmw_df <- hmw_data \%>\%
calc_hmw_pfu() \%>\%
# Keep only a few years for speed.
dplyr::filter(Year \%in\% 2000:2002)
amw_df <- amw_test_data_path() \%>\%
read.csv() \%>\%
calc_amw_pfu() \%>\%
# Keep only a few years for speed.
dplyr::filter(Year \%in\% 2000:2002)
specify_energy_type_method(hmw_df, amw_df) \%>\%
specify_product() \%>\%
specify_TJ() \%>\%
MWTools::specify_primary_production() \%>\%
specify_useful_products() \%>\%
specify_fu_machines() \%>\%
specify_last_stages() \%>\%
MWTools::add_row_col_meta() \%>\%
MWTools::collapse_to_psut() \%>\%
calc_S_units() \%>\%
calc_U_feed_U_eiou_r_eiou()
}
|
0bf3b515527d1d4410ec251f04a62ea50bc22ffe | 40bab98c8af2e2c7950c81932d7b4517ac8deecb | /question15.R | 1b55eb93ca6c6ee4fb3a6cbf819b5acbd5e2fc5d | [] | no_license | brittbarreto/es2019HW7 | 6349e18086d094aa23575b7c6360e37d8d83e77d | 1bdcf1216d0f9faeb2723f51953ceeb4006b7c8c | refs/heads/master | 2020-04-30T04:49:04.162842 | 2019-03-20T06:14:00 | 2019-03-20T06:14:00 | 176,617,352 | 0 | 0 | null | 2019-03-19T23:59:36 | 2019-03-19T23:59:35 | null | UTF-8 | R | false | false | 2,029 | r | question15.R | library(data.table)
library(stringr)
library(readxl)
library(ggforce)
# when you do HW7, skip data preparation and go to the function I made.
## import the data
#data downloading and preparation
o3.filenames <- list.files("data/ca_ozone", pattern = ".txt")
#change the working directory
setwd("data/ca_ozone")
#Load the data
o3.filelist <- lapply(o3.filenames, read_delim, delim = "|")
#set the name for each element in the list
names(o3.filelist) <- gsub(".txt", "", o3.filenames)
daily <- o3.filelist %>%
rbindlist() %>%
group_by(site = as.factor(site), date) %>%
summarize(o3 = mean(obs, na.rm = TRUE))
colnames(loc)[1] <- "site"
daily.site <- daily %>%
left_join(loc, by = "site")
# Before making the function, data preparation is needed.
#change the column name "Site Name" because this includes a space
colnames(daily.site)[5] <- "SiteName"
colnames(daily.site)[11] <- "CountyName"
#add new columns for annual mean, median, max, and min for each site.
by_sitename_year <- daily.site %>%
mutate(obs_year = year(date)) %>%
group_by(SiteName, obs_year) %>%
summarize(mean = mean(o3), median = median(o3), max = max(o3), min = min(o3))
#Before making the function, data preparation is needed.
```{r}
by_county_day <- daily.site %>%
mutate(year = year(date), month = month(date),day = mday(date)) %>%
group_by(CountyName, year, month, day) %>%
summarize(daily_mean = mean(o3))
#calculate annural daily mean ozone concentration
daily_mean_by_county <- by_county_day %>%
group_by(CountyName, year) %>%
summarize(annual_daily_mean = mean(daily_mean))
annual_daily_mean_county <- function(county){
#detect county by a regular expression assigned in county parameter
selected_data <- filter(daily_mean_by_county, str_detect(CountyName, pattern = county))
#results in quantitive foramt
print(selected_data)
#visual format
plot <- ggplot(data = selected_data)+
geom_point(mapping = aes(x = year, y = annual_daily_mean))
print(plot)
}
annual_daily_mean_county("Merced")
|
0db5c85c88c630242b7f7b128e5b2101be00894f | d4d4f909ecaff0c21d83443a8e0eebbd44291965 | /dataAnalysis.R | 2d704941d5844e14f20ba1357f6fa88da272b2a2 | [] | no_license | ysamwang/graphConfSet | 4861f46f4b1b128ef0de0b659dbb50911fdd6a09 | ecb3ff18d4b326558c99f5d93b9e3f5d80a0f8f6 | refs/heads/master | 2021-01-02T11:08:50.056227 | 2020-02-10T19:49:05 | 2020-02-10T19:49:05 | 239,594,477 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 550 | r | dataAnalysis.R | Y <- as.matrix(read.csv("data/Bollentestdata.txt", sep = "\t", header = T))
Y <- scale(Y)
par(mfrow = c(2,3))
for(i in 1:6){
hist(Y[,i])
}
outFisher <- exhaustiveBB(Y, verbose = T, aggMethod = "fisher", K = 3, alpha = .1)
outOther <- exhaustiveBB(Y, verbose = T, aggMethod = "other", K = 3, alpha = .1)
getPvalSpeed(X = Y[, c(1,3,6)], Y = Y[, 5])
getPvalSpeed(X = Y[, c(1,3,6, 5)], Y = Y[, 4])
getPvalSpeed(X = Y[, c(1,3,6, 5, 4)], Y = Y[, 2])
pchisq(-2 * (log(.023) + log(.751) + log(.098)), df = 4, lower = F)
quantile(minOfUnif(3), .1)
|
73a13c0577b322e2dfbb91bc4d064f8d1bef8e03 | 30d21f7163d8a0382e05566a48c0b042dc96ced6 | /Raven Modelling Library/Spatial Processing/Landuse_Changes.R | 206424a87806988879e1eb2cd6134089eca3c9ac | [] | no_license | mchernos/R-Hydro-Library | 123ad54592aa5e9b1fbb17d5dbe9a5e609c6b3d7 | 402c333918fb71736fc269667f4094f24d5881c2 | refs/heads/master | 2020-05-21T13:35:17.496809 | 2018-02-20T16:32:59 | 2018-02-20T16:32:59 | 45,942,745 | 4 | 1 | null | null | null | null | UTF-8 | R | false | false | 2,924 | r | Landuse_Changes.R | # Read in all HRUs and get Differencing Sets
# M Chernos - January 25, 2017
#
rm(list = ls())
library('tidyverse')
# Function to read in HRU files
read.hrus = function(file){
temp = read_csv(paste0('AO_HRUs/',file)) %>%
select(hru, COVER)
colnames(temp) = c('hru', gsub('adams_hrus_', '', gsub('.csv', '',file)))
temp
}
# Read in all HRU files
files = c('adams_hrus_Forecast_2010.csv',
paste0('adams_hrus_Forecast_', seq(2020, 2060, 10), '_583.csv') )
x = Reduce(function(x,y){full_join(x,y)}, lapply(files, read.hrus))
types = unique(x$Forecast_2010)
# ALTERNATE VERSION
# Read in all HRU files
# files = c('adams_hrus_Forecast_wcc_2010.csv',
# paste0('adams_hrus_Forecast_wcc_', seq(2020, 2060, 10), '_687.csv') )
# x = Reduce(function(x,y){full_join(x,y)}, lapply(files, read.hrus))
# types = unique(x$Forecast_wcc_2010)
# Work-horse function to find HRUs that change between decades
find.missing = function(old, new, year){
if(year == '2020'){xi = 2; xi1 = 3}
if(year == '2030'){xi = 3; xi1 = 4}
if(year == '2040'){xi = 4; xi1 = 5}
if(year == '2050'){xi = 5; xi1 = 6}
if(year == '2060'){xi = 6; xi1 = 7}
x$hru[x[,xi] == old & x[,xi1] == new]
}
# find.missing('FOREST', 'CUT', '2020')
# Get Changes in HRU Groups and write to File
get.changes = function(old, year){
# old = 'FOREST'
X = lapply(types[types != old], find.missing, old, year)
Xnames = paste(types[types != old], 'TO', old, year, sep = '_')
# Threshold to use change (must have at least 50 HRUs changing)
xnum = which(lengths(X) > 49)
# Make HRU Groups
Tx = c()
for(i in xnum){
Tx = c(Tx,
paste0(':HRUGroup ',Xnames[i] ),
paste(X[[i]],collapse = ','),
paste0(':EndHRUGroup'),
'#')
}
write(Tx, paste0('HRUGroups/',year,'.txt'), append = T)
# Make HRU Lists
yr1 = ifelse(as.numeric(year)> 2039, as.numeric(year)-59, as.numeric(year)-30)
# :LandUseChange GLACIER_2040 ALPINE 1986-01-02
# :HRUTypeChange GLACIER_2040 STANDARD 1986-01-02
Lx = c()
for(i in xnum){
Lx = c(Lx,
paste(':LandUseChange',Xnames[i],
gsub(paste0('_',year), '', unlist(strsplit(Xnames[i], 'TO_'))[2]), paste0(yr1, '-01-01')),
paste(':HRUTypeChange ', Xnames[i],
gsub(paste0('_',year), '', 'STANDARD'), paste0(yr1, '-01-01')),
'#')
}
write(Lx, paste0('HRUGroups/RVPlist',year,'.txt'), append = T)
# Define HRUs
if(length(xnum)>0){
Hx = paste(c(':DefineHRUGroups ',
paste(Xnames[xnum], collapse = ', ')),
collapse = '')
write(Hx, paste0('HRUGroups/HRUlist.txt'), append = T)
}
}
# Generate HRUGroups and write to file for each year
lapply(types, get.changes, '2020')
lapply(types, get.changes, '2030')
lapply(types, get.changes, '2040')
lapply(types, get.changes, '2050')
lapply(types, get.changes, '2060')
|
95c19810b484c5048d111cf4eb352a93ab01fde8 | c0fdc13358da83ec9dc7a12bb18b886c3d09f570 | /Harden in the playoffs.R | 8ec65e59cd627cde0f0a30f7c7cd09ed883b77e3 | [] | no_license | lbt3/DSCI304FinalProjectScripts | f10c4da6558546216ad4c39e36fc31d17fadb5a0 | 72e55aacf024221ea40423878a2272e2e9982729 | refs/heads/main | 2023-04-23T02:39:24.648682 | 2021-05-11T08:50:00 | 2021-05-11T08:50:00 | 366,314,516 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,464 | r | Harden in the playoffs.R | setwd("~/Documents/Rice_Junior_Year/Second_Semester/DSCI_304")
library(readxl)
library(ggplot2)
library(dplyr)
library(plotly)
playoffstats <- read_excel("Playoff Stats.xlsx")
#again filtering out small sample sizes
playoffstats <- subset(playoffstats, playoffstats$MP > 5 & playoffstats$MP_total > 48)
playoffstatsgrouped <- group_by(playoffstats, Player)
totalgames <- summarise(playoffstatsgrouped,
totalgames = sum(G))
playoffstatsgrouped <- left_join(playoffstatsgrouped, totalgames, by = "Player")
#Weight each season of playoffs correctly
playoffstatsgrouped$sznweight <- (playoffstatsgrouped$G)/(playoffstatsgrouped$totalgames)
playoffstatsgrouped$PTSweightedavg <- (playoffstatsgrouped$PTS)*(playoffstatsgrouped$sznweight)
playoffstatsgrouped$TSPweightedavg <- (playoffstatsgrouped$TSP)*(playoffstatsgrouped$sznweight)
playoffimpstats <- summarise(playoffstatsgrouped,
totalVORP = sum(VORP),
avgPPG = sum(PTSweightedavg),
avgTSP = sum(TSPweightedavg))
playoffimpstats$avgPPG <- round(playoffimpstats$avgPPG, 1)
playoffimpstats$avgTSP <- round(playoffimpstats$avgTSP, 3)
playoffimpstats$isithim <- ifelse(playoffimpstats$Player == "James Harden", 1, 0)
playoffimpstats$isithim <- as.factor(playoffimpstats$isithim)
playoffimpstatsVORPordered <- playoffimpstats[order(-playoffimpstats$totalVORP),]
playofftop15VORP <- playoffimpstatsVORPordered[1:15,]
VORPplayoffs <- ggplot(playofftop15VORP, mapping = aes(y = reorder(Player,totalVORP),
x = totalVORP, fill = isithim)) +
geom_bar(stat = "identity") +
ggtitle("Total VORP in Playoffs from 2014-2020") +
labs(x = "Total VORP") +
theme(axis.title.y = element_blank(), plot.title = element_text(hjust = 0.5),
legend.position = "none") +
scale_fill_manual(values = c("black", "red")) +
geom_text(mapping = aes(label = totalVORP), colour = "white", hjust = 1.25)
VORPplayoffs
PointsvEffPlayoffs <- ggplot(playoffimpstats, mapping = aes(y = avgTSP, x = avgPPG, text = Player, color = isithim)) +
geom_point() + theme(legend.position = "none", plot.title = element_text(size = 10, hjust = 0.5)) +
ggtitle("Points Per Game vs. True Shooting Percentage \n in the Playoffs 2014-2020") +
labs(x = "PPG", y = "TS%") + scale_color_manual(values = c("black", "red"))
ggplotly(PointsvEffPlayoffs, tooltip = c("text", "x", "y"))
|
4cd1789b1191b67e234cd2a239e457e62497d52a | 31c2c16323ca5935c2381ef0d379c5b8a09df3ff | /man/rq.fit.sfn_start_val.Rd | 7103d542b9b110d7eefbedd4f582e047c06d15c7 | [] | no_license | mazikazemifinance/quantspace | 61c2ac5fd6fae1794236d8f067e04e4570d1bff0 | ba13260f0e653cc3b0c541a990dab94f5a1edfc1 | refs/heads/master | 2022-12-05T02:21:44.475650 | 2020-09-04T15:56:44 | 2020-09-04T15:56:44 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,062 | rd | rq.fit.sfn_start_val.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/quant_regress.R
\name{rq.fit.sfn_start_val}
\alias{rq.fit.sfn_start_val}
\title{Sparse Regression Quantile Fitting with Weights}
\usage{
rq.fit.sfn_start_val(
a,
y,
tau = 0.5,
rhs = (1 - tau) * c(t(a) \%*\% rep(1, length(y))),
control,
sv,
weight_vec = NULL
)
}
\arguments{
\item{a}{structure of the design matrix X stored in csr format}
\item{y}{outcome vector}
\item{tau}{desired quantile}
\item{rhs}{the right-hand-side of the dual problem; regular users shouldn't need to specify this,
but in special cases can be quite usefully altered to meet special needs.
See e.g. Section 6.8 of Koenker (2005).}
\item{control}{control parameters for fitting routines: see [quantreg::sfn.control()]}
\item{weight_vec}{Optional vector of weights for regression}
}
\description{
Sparse Regression Quantile Fitting with Weights
}
\details{
A wrapper around the rq.fit.sfn function from the quantreg package,
extended to allow for a user-supplied starting value and weights
}
|
0ec5b10e3718c5bfbb97108d1d50cd51e061a056 | 0ee50dd399127ebe38bc8f5197114d46d717ccd7 | /R/print.combat.R | 39d4e813a9aca223f0c6eeaf3da6c8db3059aacf | [] | no_license | cran/bapred | 0051d57056f886e6df028255f0c85339c70d66f1 | e24720be3c6f82c2d5422ae97a8f12f5edc15adc | refs/heads/master | 2022-07-07T03:06:58.840921 | 2022-06-22T07:20:13 | 2022-06-22T07:20:13 | 48,076,867 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 404 | r | print.combat.R | print.combat <-
function(x, ...) {
cat("'ComBat'-adjusted training data with information for addon batch effect adjustment.", "\n")
cat(paste("Number of batches: ", x$nbatches, sep=""), "\n")
cat(paste("Number(s) of observations (within each batch): ", paste(as.vector(table(x$batch)), collapse=", "), sep=""), "\n")
cat(paste("Number of variables: ", ncol(x$xadj), sep=""), "\n")
}
|
a10e99147af18f617d4af17d965700b5b78f5072 | 21308f47abf9b54dba794d1c8df2f2c49b5c1137 | /man/metric.cluster.global.Rd | 2e54b1278009abf729a0451892bf668d25672136 | [] | no_license | cran/fastnet | e8eef85c2bf4027cd552bd3c54bed7fca07ecc0c | f22b02e806abcbda84fde09eebacfe3ba8b2069b | refs/heads/master | 2021-01-12T04:34:36.671878 | 2020-12-01T06:40:02 | 2020-12-01T06:40:02 | 77,675,556 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 921 | rd | metric.cluster.global.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/metric.cluster.global.R
\name{metric.cluster.global}
\alias{metric.cluster.global}
\title{Global Clustering Coefficient}
\usage{
metric.cluster.global(g)
}
\arguments{
\item{g}{The input network.}
}
\value{
A real constant.
}
\description{
Calculate the global clustering coefficient of a graph.
}
\details{
The global clustering coefficient measures the ratio of (closed) triples versus the total number of all possible triples in network \emph{g}. \code{metric.cluster.global()} calculates the global clustering coefficient of \emph{g}.
}
\examples{
\dontrun{
x <- net.erdos.renyi.gnp(1000, 0.01)
metric.cluster.global(x)}
}
\references{
Wasserman, Stanley, and Katherine Faust. Social network analysis: Methods and applications. Vol. 8. Cambridge university press, 1994.
}
\author{
Xu Dong, Nazrul Shaikh.
}
|
4bc14c763bb92e8d81ac6b4f05b9c03d20ba2986 | 8922b91ab54d00b9a7b239c6847b3b5ee65868ca | /project/knn.R | 0bccd17d07dcf0859ed17a363f81fe1843b8f955 | [] | no_license | sanjaysajjan/dataminingnjit | ade516b84ab9ac527155fa09a46d38d17857fd90 | 4fd13c534387cecba2614503667b12c24f0c561b | refs/heads/master | 2022-12-10T14:43:04.989889 | 2020-09-11T23:55:59 | 2020-09-11T23:55:59 | 294,834,622 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,579 | r | knn.R | #KNN
#mydata<- mydata[-c(4)]
data(training_set)
str(training_set)
summary(training_set)
knntraindata <- mydata[,-c(2,4,9,10)]
summary(oct2015)
accuracy_test <- oct2015[,8]
str(accuracy_test)
summary(accuracy_test)
#data(training_set)
str(knntraindata)
#summary(knntraindata$TRUE.date_block_num)
#summary(training_set$TRUE.date_block_num)
summary(mydata)
summary(knntraindata)
#summary(mydata$month_number)
#mydata_q1 <-
#summary(mydata_q1)
normalize <-function(x) { return((x-min(x))/(max(x) - min(x))) }
#normalize(c(0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33))
norm_data <- as.data.frame(lapply(knntraindata[,], normalize))
summary(norm_data)
#summary(knntraindata)
#str(norm_data)
#summary(norm_data)
#ran <- sample(1:nrow(mydata), 0.9 * nrow(mydata))
#require(class)
#k = 2
#training data = norm_data
# test data =
knn_split<- split(norm_data, norm_data$date_block_num !=1 )
norm_train_knn <- data.frame(knn_split["TRUE"])
norm_test_knn <- data.frame(knn_split["FALSE"])
#str(wo_norm_train_knn)
#str(wo_norm_test_knn)
#normtestknn <- norm_data[]
summary(norm_train_knn)
summary(norm_test_knn)
#knn_split_q1 <- split(norm_data,norm_data$date_block_num < 0.12121212)
knn_split_m1<- split(norm_data, norm_data$date_block_num == 0)
train_m1<- data.frame(knn_split_m1["TRUE"])
summary(train_m1)
#train_m1 <- train_m1[-c(4)]
summary(train_m1)
train_q1<- data.frame(knn_split_q1["TRUE"])
#split_data <- split(mydata, mydata$date_block_num != 33)
str(knn_split)
norm_train_knn <- data.frame(knn_split["TRUE"])
norm_test_knn <- data.frame(knn_split["FALSE"])
summary(norm_test_knn)
summary(norm_train_knn)
str(norm_test_knn)
str(norm_train_knn)
names(norm_test_knn)[1] <- "TRUE.item_id"
names(norm_test_knn)[2] <- "TRUE.item_category_id"
names(norm_test_knn)[3] <- "TRUE.shop_id"
#names(norm_test_knn)[4] <- "TRUE.date"
names(norm_test_knn)[4] <- "TRUE.date_block_num"
names(norm_test_knn)[5] <- "TRUE.item_price"
names(norm_test_knn)[6] <- "TRUE.item_cnt_day"
names(norm_test_knn)[7] <- "TRUE.daily_sales_value"
#train_target <- train_m1[,7]
summary(training_set)
train_target <- training_set[,8]
#test_target <- train_m1[,7]
str(train_target)
summary(train_target)
#str(test_target)
#summary(test_target)
#summary(knn_train)
#str(knn_train)
require(class)
m1 <- knn(train = norm_train_knn, test = norm_test_knn, cl = train_target, k= 341)
knn_pred <- knn(norm_train_knn, norm_test_knn,training_set[,8],k= 341)
summary(m1)
str(m1)
#plot(m1)
library(gmodels)
CrossTable(x = accuracy_test, y = m1, prop.chisq = FALSE)
tab <- table(m1,train_target)
summary(tab)
#predict(m1,oct2015)
accuracy <- function(x){sum(diag(x)/(sum(rowSums(x)))) * 100}
accuracy(tab)
#rmse
sqrt(mean(m1- norm_test_knn$FALSE.daily_sales_value)^2)
library(KODAMA)
library(caTools)
prediction_knn<- predict(train_m1, norm_test_knn, train_target, k=341, agg.meth="majority")
k <- sqrt(115690)
print(k)
summary(norm_train_knn)
summary(train_target)
str(norm_train_knn)
str(train_target)
str(training_set)
summary(merged_all)
library(ISLR)
library(caret)
trControl = trainControl(method = "repeatedcv", number = 10, repeats = 3)
set.seed(222)
fit <- train(train_m1$TRUE.daily_sales_value ~ .,
data = train_m1,
method= 'knn', tuneLength = 20,
trControl = trControl,
preProc = c("center","scale"))
#head(train_m1)
#fit
|
b6f4b3d29febf937aae1e9ad815322d1475cc4f6 | 81d5295337b437004070aa0bdb74e9884f705509 | /plot3.R | a12fa62d686159cfdd9bf888a6c4f1f369793e85 | [] | no_license | kanwarujjaval/ExData_Plotting1 | 02ee0f12365b86b01b435f55b9b6c04c4d37e541 | f2ab1243bc936929f357b5b70d2d0100c3d1ee22 | refs/heads/master | 2021-01-18T05:59:30.791285 | 2014-07-13T20:54:32 | 2014-07-13T20:54:32 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 831 | r | plot3.R | #read file
data <- read.table("household_power_consumption.txt",
header=T,
sep=";",
na.strings="?")
#extract subset data
usable <- subset(data, Date %in% c("1/2/2007","2/2/2007"))
#convert date and time from string to Date
usable$datetime <- paste(usable$Date, usable$Time)
usable$datetime <- strptime(usable$datetime, "%d/%m/%Y %R")
#open png device
png("plot3.png")
plot(usable$datetime,
usable$Sub_metering_1,
type="l",
ylab="Energy sub metering",
xlab="")
#add metering2 data
lines(usable$datetime,
usable$Sub_metering_2,
type="l",
col="red")
#add metering3 data
lines(usable$datetime,
usable$Sub_metering_3,
type="l",
col="blue")
#add legend
legend("topright",
legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),
col=c("black","red","blue"),
lwd=1)
#put to png and close the png dev
dev.off() |
8a0b3ad0aa4d2bd76679720852c6e0105f105e4e | c477784547e1b392fae24aa47438c7eeb006c390 | /mrsat/mrsat-master/mrsat-master/man/summary.SATcurve.Rd | 773d313a966ac859652a44fa689c3ad238c40080 | [
"MIT"
] | permissive | ilte3/satf_project | 58d0cd78552dd0a0f7b678a32f795370f29ec29a | c3adac9b531c71fee73e967eb42e139c79f029cc | refs/heads/master | 2021-06-30T04:32:19.427026 | 2020-10-13T21:05:13 | 2020-10-13T21:05:13 | 179,873,157 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,472 | rd | summary.SATcurve.Rd | \name{summary.SATcurve}
\alias{summary.SATcurve}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Summarize SATcurve object
}
\description{
Summary method for an "SATcurve" object
}
\usage{
\method{summary}{SATcurve}(object, ...)
}
\arguments{
\item{object}{
object of class \code{SATcurve}
}
\item{...}{other arguemnts to be passed. Not currently used.}
}
\value{
THe value returned is essentially the same as that returned by \code{\link{fit.SATcurve}}, but is rearrenged into a data farme with following columns:
\item{asym}{asymptote of the fitted curves. If there is more than one conditions, more than one columns of asym are returned, named as asym1, asym2, ... asymN, where N = number of conditions. }
\item{rate}{rate of the fitted curves. If there is more than one conditions, more than one columns of rate are returned, named as rate1, rate2, ... rateN, where N = number of conditions. }
\item{incp}{intercept of the fitted curves. If there is more than one conditions, more than one columns of incp are returned, named as incp1, incp2, ... incpN, where N = number of conditions. }
\item{R2}{R-squared of the model}
\item{adjR2}{Adjucted R-squared of the model}
\item{RMSEfit}{Root Mean Sqaured Error of the model}
\item{method}{The optimazation method used}
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
\code{\link{fit.SATcurve}}, \code{\link{SATsummary.list}}
}
|
3bac8b66c2f9760dee7d8b311ca0246762399e8a | 537d337b9dd654bc117675f0e1ffffa0add75fd7 | /Compare list and vector.R | 32333006575f123978580b9f1fe04b40fbbe503d | [] | no_license | DenzMaou/Compare-List-and-Vector | acd83757eda9beb6d5bdd95538db4bcef82d9dc7 | e9998d20c6e7b63a1d061ae7080d7e1796f87a68 | refs/heads/master | 2022-11-10T12:19:40.629150 | 2020-06-12T04:12:22 | 2020-06-12T04:12:22 | 271,705,657 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,762 | r | Compare list and vector.R | cat("\104")
rm(list = ls())
setwd("C:/Users/USER/DESKTOP/Rscript(Deniel)")
getwd()
dir()
#Compare List and Vector3
Vec1 <-c(44,25,64,96,30)
Vec2 <-c(1,FALSE,9.8,"hello world")
typeof(Vec1)
typeof(Vec2)
x<-c("Jan","Feb","March","Apr","May","June","July")
y<-x[c(3,2,7)]
print(y)
#List
x<-c("Jan","Feb","March","Apr","June","July")
y<-x[c(TRUE,FALSE,TRUE,FALSE,FALSE,TRUE,TRUE)]
z<-x[c(-3,-7)]
c<-x[c(0,0,0,1,0,0,1)]
print(y)
print(z)
print(c)
#Vector
v1<-c(4,6,7,31,45)
v2<-c(54,1,10,86,14,57)
add.v<-v1+v2
print(add.v)
sub.v<-v1-v2
print(sub.v)
multi.v<-v1*v2
print(multi.v)
v1<-c(8,7,6,5,0,1)
v2<-c(7,15)
add.v<-v1+v2
#v2 becomes c(7,15,7,15,7,15))
print(add.v)
sub.v<-v1-v2
print(sub.v)
#Sort Vector
v<-c(4,78,-45,6,89,678)
sort.v<-sort(v)
print(sort.v)
#Sort the elements in the reverse order
revsort.v<-sort(v, decreasing = TRUE)
print(revsort.v)
#Sorting character vectors
v<-c("Jan","Feb","March","April")
sort.v<-sort(v)
print(sort.v)
#Sort the elements in the reverse order
revsort.v<-sort(v, decreasing = TRUE)
print(revsort.v)
#Matrix
M1<- matrix(c(1:9), nrow=3, ncol=3, byrow = TRUE)
print(M1)
rownames= c("row1", "row2" , "row3")
colnames= c("col1", "col2" , "col3")
M3<-matrix(c(1:9), nrow = 3, byrow = TRUE, dimnames = list(rownames, colnames))
print(M3)
#Data Frame
empid<-c(1:4)
empname<-c("Sam","Rob","Max","John")
empdept<-c("Sales", "Marketing", "HR", "R & D")
emp.data<-data.frame(empid,empname,empdept)
print(emp.data)
result<-data.frame(emp.data$empname,emp.data$empdept)
print(result)
result<-emp.data[1:2,]
print(result)
result<-emp.data[c(1,2), c(2,3)]
print(result)
emp.data$salary<-c(20000,30000,40000,27000)
n<-emp.data
print(n) |
8d0e8c874f78c536165892a33acba0dba3fee134 | 491bc7af32e2f56725bbbd194c6ba871d2095bf6 | /ExploratoryAnalysis/HCluster-Kmeans-Week3-ExploratoryDataAnalysis.R | cb264afece4b286f38299d2e31fb787d601f6d48 | [] | no_license | rkmalaiya/DataMiningSpecialization | 913bd8dc88b9ebe9cf37192aadf9fa8a9579f47a | ac8218fd8bf38e0a83cb560ce256cfe72cbcf204 | refs/heads/master | 2021-01-19T09:41:55.675637 | 2015-09-18T20:49:15 | 2015-09-18T20:49:15 | 32,335,642 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 840 | r | HCluster-Kmeans-Week3-ExploratoryDataAnalysis.R | set.seed(1234)
par(mar=c(0,0,0,0))
x <- rnorm(12, mean = rep(1:3, each = 4), sd = 0.2)
y <- rnorm(12, mean = rep(c(1,2,1), each = 4), sd = 0.2)
plot(x,y, col = "blue", pch = 19, cex = 2)
text(x + 0.05, y + 0.05, labels = as.character(1:12))
mydf <- data.frame(x = x, y = y)
distxy <- dist(mydf)
## Hierarchical Cluster
hcluster <- hclust(distxy)
plot(hcluster)
## drawing heat map
set.seed(143)
mydm <- as.matrix(mydf)[sample(1:12),]
heatmap(mydm)
#KMeans cluster
kmeansobj <- kmeans(mydf,centers = 3)
names(kmeansobj)
str(kmeansobj)
par(mar = repo(0.2,4))
plot(x,y,col = kmeansobj$cluster, pch = 19, cex = 2 )
points(kmeansobj$centers, col =1:3, pch = 3, cex = 3, lwd = 3)
## heatmap for kmeans
par(mfrow = c(1,2), mar = c(2,4,0.1,0.1))
image(t(mydf)[, nrow(mydf):1], yaxt = "n")
image(t(mydf)[, order(kmeansobj$cluster):1], yaxt = "n")
|
5366c32075504c3e92f9b17c38b6a9515b3a6a09 | 2dc78a3377c57e0e5fbe8ee41e85942946666a36 | /man/nlaDrawdownIndicator.Rd | cf93987054528997ba19ee8ff0357549374eee84 | [] | no_license | jasonelaw/aquamet | e5acce53127de4505486669597aed3dd74564282 | 3464af6dbd1acc7b163dc726f86811249293dd92 | refs/heads/master | 2020-03-25T16:19:46.810382 | 2018-08-07T20:51:01 | 2018-08-07T20:51:01 | 143,925,702 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,375 | rd | nlaDrawdownIndicator.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/NLAphab_indicators.r
\name{nlaDrawdownIndicator}
\alias{nlaDrawdownIndicator}
\title{Calculate NLA Drawdown Indicator}
\usage{
nlaDrawdownIndicator(x, sampID, bfxVertDD, bfxHorizDD, ecoreg, lake_origin)
}
\arguments{
\item{x}{Input data frame containing other variables as specified in
arguments}
\item{sampID}{Vector of variables necessary to identify a unique sample
in \emph{x}.}
\item{bfxVertDD}{Vertical height to highwater mark in meters}
\item{bfxHorizDD}{Horizontal distance to highwater mark in meters}
\item{ecoreg}{Lake ecoregion, based on aggregated Omernick ecoregions,
with valid values of CPL, NAP, NPL, SAP, SPL, TPL, UMW, WMT, XER.}
\item{lake_origin}{Lake origin, with valid values of 'NATURAL'
or 'MAN_MADE'}
}
\value{
A data frame containing:
\itemize{
\item{sampID}{The variables in the argument \emph{sampID}}
\item{DRAWDOWN_COND}{Riparian anthropogenic disturbance condition class
(Small/Medium/Large/Not Assessed)}
}
}
\description{
Using metric values as inputs, calculate
indicator score for lake drawdown, based on both horizontal
and vertical drawdown. Each is treated separately, and the
two condition classes are then assessed to assign the
larger of the two condition classes.
}
\author{
Karen Blocksom \email{Blocksom.Karen@epa.gov}
}
\keyword{survey}
|
df8f34bb48be45bc98f13bee2fd89fdc5871e67b | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/uwIntroStats/examples/lspline.Rd.R | 09383030403a076f7edf86b9ffaf4ccd6ed08af3 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 413 | r | lspline.Rd.R | library(uwIntroStats)
### Name: lspline
### Title: Create Linear Splines
### Aliases: lspline lsplineD
### Keywords: ~kwd1 ~kwd2
### ** Examples
# Reading in a dataset
mri <- read.table("http://www.emersonstatistics.com/datasets/mri.txt", header=TRUE)
attach(mri)
# Create a spline based on absolute
lspline(ldl, c(70, 100, 130, 160))
# Create a spline based on change
lsplineD(ldl, c(70, 100, 130, 160))
|
cc2edfed99193a6a208531cc8e8ced7e31dc9427 | 2d367cd526ec7482f129a4660a0dab5fa7ae5df1 | /scripts/scripts_20180521/generate_manuscript_figures/aDNAStatsFigures/aDNAStatsFiguresForSI.R | 3e71b79ad59d26b92ed485ec95207b356b4ef834 | [] | no_license | ab08028/OtterExomeProject | 1bad439e2c42162e854eab995c45e6c8391b2640 | 96c7758d424c44ae4d52dc951d89a59761b9a2c7 | refs/heads/master | 2021-06-01T20:23:02.554308 | 2020-08-13T17:01:22 | 2020-08-13T17:01:22 | 134,327,795 | 2 | 2 | null | null | null | null | UTF-8 | R | false | false | 3,897 | r | aDNAStatsFiguresForSI.R | require(ggplot2)
require(scales)
######### aDNA plotting stats for supplement: A13, A29, A30 only:
# do we need to plot all the failed ones as well? Not sure how that all goes. # can add if asked?
plmxStats <- read.table("/Users/annabelbeichman/Documents/UCLA/Otters/OtterExomeProject/results/QC_Reports/plmx_mappingStats/20190926_fornewplots_aDNA.plusA29.A30/allaDNAPlmxStatsTogether.plusA29.A30.txt",header=T,stringsAsFactors = F)
plot.dir="/Users/annabelbeichman/Documents/UCLA/Otters/OtterExomeProject/results/QC_Reports/A13_A29_A30_StatReportForManuscript/"
head(plmxStats)
plmxStats$filter <- "not-used"
# the ones chosen to go forward are A13,A29 and A30
plmxStats[plmxStats$sampleShortName %in% c("A13","A29","A30"),]$filter <- "used"
plmxStats$sampleShortName <- unlist(lapply(strsplit(plmxStats$sample,"_"),"[",1))
plmxStats$descriptive <- unlist(lapply(strsplit(plmxStats$sample,"_Elut_"),"[",2))
# got to add lib blank:
plmxStats[grepl("Blank",plmxStats$sample),]$descriptive <- "libraryBlank"
######### NOTE there are multiple measures PER sample because of different references
# just use ferret reference? or sea otter?
####### first just want to show why I chose A13,A29 and A30: #######
p1 <- ggplot(plmxStats[plmxStats$statistic=="hits_unique" & plmxStats$reference=="sea_otter_23May2016_bS9RH.deduped.99",],aes(x=sample,y=value,fill=filter))+
geom_bar(stat="identity",position="dodge")+
theme_bw()+
xlab("")+
ylab("Number of reads that map uniquely to sea otter genome")+
coord_flip()+
ggtitle("The three ancient samples chosen for futher analysis (blue)\nhave the most reads mapping to the sea otter genome")+
theme(legend.position = "none")+
scale_y_continuous(labels=comma)
p1
ggsave(paste(plot.dir,"uniqueReadCount.allaDNASamples.pdf",sep=""),height=5,width=9)
######
######## show more properties of those good 3: ########
# note A29 was first screened as A19
# but I don't have canfam or human info for A29 (was too slow), but showed initial low levels of contam. But maybe I don't need to do show this. (just have a backup)
statsToPlot1 <- c("hits_unique_frac","hits_unique")
p2Temp <- ggplot(plmxStats[plmxStats$statistic %in% statsToPlot1 & plmxStats$filter=="used",],aes(x=sampleShortName,y=value,fill=reference))+
geom_bar(stat="identity",position="dodge")+
facet_wrap(~statistic,scales="free")+
coord_flip()+
theme_bw()
p2Temp
ggsave(paste(plot.dir,"notNeeded.MappingToDogHuman.A30.A13.MissingA29ButCouldUseA19instead.pdf",sep=""),p2Temp,height=5,width=9)
########## Show read distribution for A13,A29,A30 #########
statsToPlot2 <- c("hits_unique","hits_clonality","hits_length")
refs=c("sea_otter_23May2016_bS9RH.deduped.99","Mustela_putorius_furo.MusPutFur1.0.dna.toplevel")
### only plot top 3:
p3a <- ggplot(plmxStats[plmxStats$reference %in% refs & plmxStats$statistic %in% statsToPlot2 & plmxStats$filter=="used",],aes(x=sampleShortName,y=value,fill=reference))+
geom_bar(stat="identity",position="dodge")+
facet_wrap(~statistic,scales="free")+
theme_bw()+
xlab("")+
ylab("")
p3a
ggsave(paste(plot.dir,"A13.A29.A30.clonality.hitsLengths.TotalLength.ElutVsMfur.pdf",sep=""),p3a,height=5,width=9)
# just showing diff in mapping between ancient/modern: (but note in a plot made elsewhere when you add in modern samples they DON'T show the differnce-- PHEW PHEW PHEW.)
statsToPlot3 <- c("hits_unique")
refs=c("sea_otter_23May2016_bS9RH.deduped.99","Mustela_putorius_furo.MusPutFur1.0.dna.toplevel")
p3b <- ggplot(plmxStats[plmxStats$reference %in% refs & plmxStats$statistic %in% statsToPlot3 & plmxStats$filter=="used",],aes(x=sampleShortName,y=value,fill=reference))+
geom_bar(stat="identity",position="dodge")+
facet_wrap(~statistic,scales="free")+
theme_bw()+
xlab("")+
ylab("reads")+
scale_y_continuous(labels=comma)
p3b
ggsave(paste(plot.dir,"A13.A29.A30.ElutVsMfurHits.pdf",sep=""),p3b,height=4,width=7)
|
9d340325296b021a1b80a32b929d769d365aa8a0 | 673e813b89de8f8ccffe671c6b6070026abbc53d | /R/EventPointer_RNASeq.R | 64684bb3db1d45e13a2d1d1120ab34eec13ea593 | [] | no_license | jpromeror/EventPointer | 4eaa1f3a6bc653e72afef317517eec42dff41627 | aa24e3a15c6bdbd7b6c950b962b3d24c3eb80950 | refs/heads/master | 2023-05-25T16:48:24.853661 | 2023-05-15T11:14:22 | 2023-05-15T11:14:22 | 80,099,260 | 4 | 0 | null | 2022-11-28T11:24:50 | 2017-01-26T09:01:24 | R | UTF-8 | R | false | false | 8,016 | r | EventPointer_RNASeq.R | #' Statistical analysis of alternative splcing events for RNASeq data
#'
#' Statistical analysis of all the alternative splicing events found in the given bam files.
#'
#' @param Events Output from EventDetection function
#' @param Design The design matrix for the experiment.
#' @param Contrast The contrast matrix for the experiment.
#' @param Statistic Statistical test to identify differential splicing events, must be one of : LogFC, Dif_LogFC and DRS.
#' @param PSI Boolean variable to indicate if PSI should be calculated for every splicing event.
#'
#'
#' @return Data.frame ordered by the splicing p.value . The object contains the different information for each splicing event
#' such as Gene name, event type, genomic position, p.value, z.value and delta PSI.
#'
#' @examples
#'
#' data(AllEvents_RNASeq)
#' Dmatrix<-matrix(c(1,1,1,1,1,1,1,1,0,0,0,0,1,1,1,1),ncol=2,byrow=FALSE)
#' Cmatrix<-t(t(c(0,1)))
#' Events <- EventPointer_RNASeq(AllEvents_RNASeq,Dmatrix,Cmatrix,Statistic='LogFC',PSI=TRUE)
#' @export
EventPointer_RNASeq <- function(Events, Design,
Contrast, Statistic = "LogFC", PSI = FALSE) {
### Screen output for users new output
# stopifnot(Statistic == 'LogFC' |
# Statistic == 'Dif_LogFC' | Statistic ==
# 'DRS')
if (is.null(Events)) {
stop("Missing alternative splicing events")
}
options(warn = -1)
if (Statistic == "LogFC") {
stt <- "Logarithm of the fold change of both isoforms"
} else if (Statistic == "Dif_LogFC") {
stt <- "Relative concentrations of both isoforms"
} else if (Statistic == "DRS") {
stt <- "Difference of the logarithm of the fold change of both isoforms"
} else {
stop("Wrong statistical test provided")
}
if (PSI) {
psi_m <- " Delta PSI will be calculated"
}
# if (classss(Design) != "matrix" |
# classss(Contrast) != "matrix") {
if (!is(Design,"matrix") |
!is(Contrast,"matrix")) {
stop("Wrong Design and/or Contrast matrices")
}
TimeS <- paste(format(Sys.time(), "%X"),
sep = "")
cat(paste(TimeS, " Running EventPointer: ",
sep = ""), "\n")
cat(paste("\t** Statistical Analysis: ",
stt, sep = ""), "\n")
if (PSI) {
MPSI <- paste("\t**", psi_m, sep = "")
cat(paste(MPSI, sep = ""), "\n")
}
cat(paste(paste(rep(" ", length(unlist(strsplit(TimeS,
"")))), sep = "", collapse = ""),
" ----------------------------------------------------------------",
sep = ""), "\n")
##########################################################################################
if (PSI == TRUE) {
Msg <- paste("\t** Calculating PSI",
sep = "")
cat(paste(Msg, "...", sep = ""))
PSIss <- getPSI_RNASeq(Events)
PSIs <- PSIss$PSI
DPSIs <- vector("list", length = ncol(Contrast))
fit <- lmFit(PSIs, design = Design)
fit2 <- contrasts.fit(fit, Contrast)
fit2 <- eBayes(fit2)
for (jj in seq_len(ncol(Contrast))) {
TopPSI <- topTable(fit2, coef = jj,
number = Inf)[, 1, drop = FALSE]
DPSIs[[jj]] <- TopPSI
}
}
Count_Matrix <- PrepareCountData(Events)
# Auxiliary matrix for Kronecker Product
if (Statistic == "LogFC" | Statistic ==
"Dif_LogFC" | Statistic == "DRS") {
AuxM <- matrix(c(1, 0, 0, 1, 1, 0,
1, 1, 1), nrow = 3, byrow = TRUE)
D <- kronecker(Design, AuxM)
# Limma Pipeline
NormCounts <- voom(t(Count_Matrix),
D)
fit <- lmFit(object = NormCounts,
design = D)
FinalResult <- vector("list", length = ncol(Contrast))
for (mm in seq_len(ncol(Contrast))) {
Cused <- Contrast[, mm, drop = FALSE]
# The contrasts we are interested in are
# the ones related with each Path, and we
# apply a kronecker product of the
# contrast matrix with the corresponding
# vector for each Path (P1 = 1 1 0 ; P2 =
# 1 1 1)
if (Statistic == "LogFC" | Statistic ==
"Dif_LogFC") {
if (Statistic == "LogFC") {
P1 <- kronecker(Cused,
matrix(c(1, 1, 0), nrow = 3))
P2 <- kronecker(Cused,
matrix(c(1, 1, 1), nrow = 3))
} else if (Statistic == "Dif_LogFC") {
P1 <- kronecker(Cused,
matrix(c(0, 1, 0), nrow = 3))
P2 <- kronecker(Cused,
matrix(c(0, 1, 1), nrow = 3))
}
C <- cbind(P1, P2)
fit2 <- contrasts.fit(fit,
C)
fit2 <- eBayes(fit2)
# Merge the results from both contrasts
# in one table
T2 <- topTable(fit2, coef = 1,
number = Inf)
T3 <- topTable(fit2, coef = 2,
number = Inf)
EvsIds <- rownames(T2)
ii3 <- match(EvsIds, rownames(T3))
T3 <- T3[ii3, ]
colnames(T3) <- letters[seq_len(ncol(T3))]
T34_345 <- cbind(T2, T3)
# Irwin Hall Pvalue Summarization
Values1 <- IHsummarization(T34_345[,
4], T34_345[, 3], T34_345[,
10], T34_345[, 9])
Final <- data.frame(Gen = rownames(T34_345),
Pvalue = Values1$Pvalues,
ZValue = Values1$Tstats,
stringsAsFactors = FALSE)
EventsN <- PrepareOutput(Events,
Final)
} else if (Statistic == "DRS") {
DRS <- kronecker(Cused, matrix(c(0,
0, 1), nrow = 3))
# Compute estimated coefficients and
# standard errors for the given contrasts
fit2 <- contrasts.fit(fit,
DRS)
# Empirical Bayesian Statistics
fit2 <- eBayes(fit2)
# Obtain the ranking of events for each
# of the contrasts
T2 <- topTable(fit2, number = Inf)
Final <- data.frame(rownames(T2),
T2[, 4], T2[, 3], stringsAsFactors = FALSE)
colnames(Final) <- c("Gene",
"Pvalue", "Zvalue")
EventsN <- PrepareOutput(Events,
Final)
}
# Add extra information (Gene Name and
# Event Classification) and Sort
# data.frame by pvalue
if (PSI) {
IIx <- match(rownames(EventsN),
rownames(DPSIs[[mm]]))
EventsN <- cbind(EventsN,
DPSIs[[mm]][IIx, ])
colnames(EventsN)[6] <- "Delta PSI"
}
FinalResult[[mm]] <- EventsN
}
if (ncol(Contrast) == 1) {
FinalResult <- FinalResult[[1]]
}
cat("Done")
cat("\n Analysis Finished")
cat(paste("\n Done \n", sep = ""))
# Return the Result to the user
cat("\n", format(Sys.time(), "%X"),
" Analysis Completed \n")
return(FinalResult)
} else {
stop("Wrong Statistical Analysis Given")
}
}
|
3559be9b932f5c9f3eb82a398ee30b9ee355a2d1 | 69e1103d3779a2a7dc076072ada2a002c36ef96c | /treePlots.R | ffba817470db62a77ef905dac5d9b6e26a68fbe1 | [] | no_license | harryrackmil/manhattan-trees | 7a13380707959c7410aab346199da025803a1e4e | c5a0c86c3e827faa5b3f14a683b9aaa4120609fb | refs/heads/master | 2016-09-06T20:06:46.533108 | 2015-07-27T06:18:15 | 2015-07-27T06:18:15 | 39,752,267 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 452 | r | treePlots.R | par(mfrow = c(2,1))
stNum = read.csv("data/stNum.csv", header = FALSE)
names(stNum) = c("street", "count")
aggSt = aggregate(stNum, by = list(stNum$street), FUN = sum)
plot(aggSt$Group.1, aggSt$count, type = 'l')
axis(1, , at = seq(20,220, by = 20))
aveNum = read.csv("data/aveNum.csv", header = FALSE)
names(aveNum) = c("street", "count")
aggAve = aggregate(aveNum, by = list(aveNum$street), FUN = sum)
plot(aggAve$Group.1, aggAve$count, type = 'l')
|
ef22c89bd766f75302a0cf9a15e51182b52a3353 | 5c542ee6a12a4637236ee876da7eb6c42667b211 | /R/test-spline.R | 473b79b4eae02503f27a7cc1a0bd5114a65279f3 | [] | no_license | jeffeaton/epp-spectrum | cecacd42c0f926d681bf3f3d9d9e6e21dd28e9a3 | b525b52e05ddbd914ec6d9c7095a52a512419929 | refs/heads/master | 2021-01-10T21:13:09.377733 | 2015-05-27T09:20:12 | 2015-05-27T09:20:12 | 19,249,007 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 208 | r | test-spline.R | source("spectrum.R")
source("../analysis-functions.R")
theta <- c(-27.2776051, 0.2286067, 1.5936094, 2.0196487, -0.4601538, -1.6417664, 1.4846658, 0.1725820, 0.3623600)
t(matrix(fnBSpline(theta[3:9]), 10))
|
26f0e767f950d2b33be043dc999450edb4f63f91 | a5d6294f73856de226cb0ba23e4ef2b95e343d33 | /man/make_all_regex_greedy.Rd | c4f113cdd840f72977c159f7dea8abb6eb761945 | [
"CC0-1.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | jogrue/regexhelpeR | e3f074c46eb7d902e9dd38298ab1bfab32a58a49 | afe8e0ec678eb752455f826fd3916d3a1196dca2 | refs/heads/master | 2023-02-05T10:39:57.603414 | 2020-12-19T23:38:18 | 2020-12-19T23:38:18 | 221,893,898 | 3 | 0 | null | null | null | null | UTF-8 | R | false | true | 474 | rd | make_all_regex_greedy.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/greedy-lazy-regex.R
\name{make_all_regex_greedy}
\alias{make_all_regex_greedy}
\title{Make regex patterns greedy}
\usage{
make_all_regex_greedy(pattern)
}
\arguments{
\item{pattern}{A regex pattern in lazy format}
}
\value{
The provided pattern in greedy format.
}
\description{
Returns the provided pattern in greedy format. The ? is removed
in all instances of ??, +?, *?, \{n,\}?, \{n,m\}?.
}
|
bf89813be3adcf7d55e106aedae9afb623ff15f9 | 4c1f46b66916e0b151b4f5e4de8882f6c8f744fc | /man/getInitBoard.othello.Rd | 75868082c20c29576d05518bad6e0962c1f546d3 | [] | no_license | Atan1988/rothello | 37911c3e236a06b1718271639f7fbe0fd08c973d | 79d08b9d1a96547ea1670a4e52535d91d284c08f | refs/heads/master | 2020-04-18T16:32:23.483414 | 2019-04-08T00:26:18 | 2019-04-08T00:26:18 | 167,636,214 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 321 | rd | getInitBoard.othello.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/othello_class.R
\name{getInitBoard.othello}
\alias{getInitBoard.othello}
\title{othello get initial board}
\usage{
\method{getInitBoard}{othello}(game)
}
\arguments{
\item{game}{othello game object}
}
\description{
othello get initial board
}
|
2e79a9707189047212164d291e6d65171e9cdad8 | bb2cbdc84da81abc980026f7b4ecaaa09139861e | /readBlogData.R | 5ae7cb2e10c4367912c7faab4416ddd5c56db71c | [] | no_license | marichards/data_science_capstone | 4ac40886cdbed5e75b26ed204c6885533836adde | 5a220c1dc584c89fbc42ca9f24d81948686b6665 | refs/heads/master | 2021-01-22T05:10:30.916998 | 2017-03-29T23:12:29 | 2017-03-29T23:12:29 | 81,627,372 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 506 | r | readBlogData.R | #------------------------
# Load and explore the blogs data sets
# First load the tm package and create a corpus
library(tm)
en.docs <- Corpus(DirSource("./Coursera-SwiftKey/final/en_US"))
en.blog <- en.docs[["en_US.blogs.txt"]]
#de.docs <- Corpus(DirSource("./Coursera-SwiftKey/final/de_DE"))
#de.blog <- de.docs[[1]]
#fi.docs <- Corpus(DirSource("./Coursera-SwiftKey/final/fi_FI"))
#fi.blog <- fi.docs[[1]]
#ru.docs <- Corpus(DirSource("./Coursera-SwiftKey/final/ru_RU"))
#ru.blog <- ru.docs[[1]]
|
b60f068089e62b77b59e4c259362251d1a930c1e | 5a08e607367a964680b4740a6f64587eb7c7020a | /IlluminaEPICtools/man/manhattan.Rd | 9b404e299f7eadd5573e4e3334243cdb402cab41 | [] | no_license | qiongmeng-m/EpiPsychosis_IGF2 | 1b409ca334de0bab68f3f7b6822d289a1c647744 | 82fc709aa8e9406ae138aafe2fb13f79c658d54a | refs/heads/master | 2021-10-18T22:00:57.756578 | 2019-02-14T16:58:56 | 2019-02-14T16:58:56 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 497 | rd | manhattan.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/manhattan.R
\name{manhattan}
\alias{manhattan}
\title{manhattan function, adapted from qqman}
\usage{
manhattan(x, chr = "CHR", bp = "BP", p = "P", snp = "SNP",
col = c("gray10", "gray60"), chrlabs = NULL,
suggestiveline = -log10(1e-05), genomewideline = -log10(5e-08),
highlight = NULL, logp = TRUE, cex_hilite = 2, col_hilite = "red",
cex = 0.7, ...)
}
\description{
manhattan function, adapted from qqman
}
|
f432e08567311e1fc328c2838cf8450d85fc19d3 | c2149e9f34d0f01142aa01ddb386768e063c0538 | /R/publish_deploy.R | 1a4289e1d7cd97ed91b4bb8217425e2d55826c6a | [] | no_license | charlotte-ngs/LBGFS2018 | cf5a1f9a49caedee0c149a3d02db92d440727193 | 90887584b9d5b8c2454be475f7509edf7eb534cb | refs/heads/master | 2020-03-23T17:43:55.334964 | 2019-01-14T14:18:55 | 2019-01-14T14:18:55 | 141,872,623 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,226 | r | publish_deploy.R | ###
###
###
### Purpose: Deployment And Publication Of Course Material
### started: 2018-10-31 (pvr)
###
### ############################################################# ###
#' @title Deployment Of Course Material
#'
#' @description
#' The term deployment stands for copying the content of a directory
#' given by the parameter ps_deploy_path to a certain target directory
#'
#' @param ps_deploy_path path to file or directory to be deployed
#' @param ps_deploy_target deployment target directory
deploy_course_material <- function(ps_deploy_path, ps_deploy_target){
### # check whether ps_deploy_path exists, stop if not
if ( !file.exists(ps_deploy_path) )
stop("[ERROR: deploy_course_material] Cannot find ", ps_deploy_path)
### # if deploy target does not exist, create it
if ( !dir.exists(ps_deploy_target) ) {
cat("[INFO -- deploy_course_material] Create deployment target", ps_deploy_target, " ...\n")
dir.create(path = ps_deploy_target)
}
### # copy the content
s_deploy_path <- ps_deploy_path
### # if s_deploy_path is a file, the switch to its direname
if ( !file.info(s_deploy_path)$isdir )
s_deploy_path <- dirname(s_deploy_path)
### # go over directory content and deploy via copying
vec_deploy_content <- list.files(s_deploy_path, full.names = TRUE)
file.copy(from = vec_deploy_content, to = ps_deploy_target, recursive = TRUE)
return(invisible(TRUE))
}
#' @title Publish a Document To a Given Target Directory
#'
#' @description
#' A document specified by the parameter ps_publish_path is
#' moved to a given target directory.
#'
#' @param ps_publish_path path to file to be published
#' @param ps_publish_target publication target directory
publish_course_material <- function(ps_publish_path, ps_publish_target){
### # check whether ps_publish_path exists, stop if not
if ( !file.exists(ps_publish_path) )
stop("[ERROR -- publish_course_material] Cannot find ", ps_publish_path)
### # if deploy target does not exist, create it
if ( !dir.exists(ps_publish_target) ) {
cat("[INFO -- publish_course_material] Create deployment target: ", ps_publish_target, " ...\n")
dir.create(path = ps_publish_target)
}
### # move the file to be published
file_move(ps_source = ps_publish_path, ps_target = ps_publish_target)
### # return invisible
return(invisible(TRUE))
}
#' @title Move a File Into a Target Directory
#'
#' @description
#' This is a helber function that uses \code{file.rename} to
#' move a given file into a target directory.
#'
#' @param ps_source source file to be moved
#' @param ps_tartget target directory
file_move <- function(ps_source, ps_target){
### # check whether ps_source exists
if( !file.exists(ps_source) )
stop("[ERROR -- file_move] Cannot find: ", ps_source)
### # check whether target exists
if ( !dir.exists(ps_target) ){
cat("[INFO --file_move] Create target directory: ", ps_target, " ...\n")
dir.create(path = ps_target)
}
### # moving via renaming
s_source_bname <- basename(ps_source)
file.rename(from = ps_source, to = file.path(ps_target, s_source_bname))
### # return invisible
return(invisible(TRUE))
} |
c02d3379e756881e7bea0160e284812f6104e1c5 | 64b0d18eb0e78a963ef19599c2dec448da6603d3 | /tests/testthat/projects/mounted_engine_with_utilities/mounted/config/engines.R | c480b04a285c04e29742632d02a7e2c90b5b258b | [
"MIT"
] | permissive | Chicago-R-User-Group/2017-n4-Meetup-Syberia | 0bb8cf04112ba236e373e89b01db8f92b857b000 | dc248c8702fc851ae50335ad6406f14e414c0744 | refs/heads/master | 2021-01-01T04:22:57.806786 | 2017-07-14T04:19:50 | 2017-07-14T04:19:50 | 97,166,590 | 5 | 0 | null | null | null | null | UTF-8 | R | false | false | 367 | r | engines.R | engine("base", type = "local",
path = system.file(file.path("engines", "base.sy"), package = "syberia"),
mount = TRUE)
engine("utility1", type = "local",
path = normalizePath(file.path(root(), "..", "utility1")), mount = FALSE)
engine("utility2", type = "local",
path = normalizePath(file.path(root(), "..", "utility2")), mount = FALSE)
|
781e2f13875c67c79ff48646e28c575a7966c3e4 | a893f063e4fb685c6d959882b74f35cfc17686ee | /create_zip.R | 2f9325deb833566b2752a93fad70610b252aa3f5 | [] | no_license | jmarshallnz/intro_to_r | e8ebe29fe4df3d32f9848e79eb75ead3a46bce4c | 35065a9255915f5f9eec4248972560fcbe7ff991 | refs/heads/main | 2023-06-16T10:09:57.485226 | 2021-06-21T04:13:12 | 2021-06-27T23:29:42 | 332,910,494 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 533 | r | create_zip.R | # create zip file for the course
exercise_files <- list.files("exercises", pattern = "*.R", full.names = TRUE)
data_files <- list.files("data", recursive = TRUE, full.names = TRUE)
all_files <- c("intro_to_r.Rproj", exercise_files, data_files)
zip('intro_to_r.zip', all_files)
solution_files <- list.files("solutions", pattern = "*.R", full.names = TRUE)
data_files <- list.files("data", recursive = TRUE, full.names = TRUE)
all_files <- c("intro_to_r.Rproj", solution_files, data_files)
zip('intro_to_r_solutions.zip', all_files)
|
56250f120e31789ae271ffb5dac8742d8cdf62ce | 10c2cbecbfaa6622e4ad02388213562cda47b1c9 | /scripts/grit_graphs.R | 710b67782206f7a15139af5d0a40abcc68635757 | [] | no_license | DLBPointon/grit-realtime | 929b3313a24f082c0915669151b79c0b8bdbb203 | 7d58352493765dc3dc8cf94a68d37fd6fd5e66b8 | refs/heads/main | 2023-05-23T23:15:31.031338 | 2021-06-19T14:44:02 | 2021-06-19T14:44:02 | 331,304,635 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 26,848 | r | grit_graphs.R | # An R script to take jira_dump data and produce
# descriptive statistics and graphs
# This script is second in like for the grit-realtime project
# preceded by a python script.
# Written by dp24
# Updated January 2020
# Modules Required
require("ggplot2")
require("stringr")
require('dplyr')
require('gridExtra')
library("tidyr")
require("plotly")
library(tidyverse)
library(htmlwidgets)
################################################################################
getwd()
date <- format(Sys.Date(), "%d%m%y")
jira_data_file <- sprintf("../output/jira_dump_%s.tsv.sorted", date) # jira_dump_%s.tsv.sorted
jira_data <- read.csv(jira_data_file, sep='\t', header=T, row.names=NULL)
attach(jira_data)
# These should be moved to the python script
jira_data$manual_interventions_normalised <- (manual_interventions/length.after) * 1000000000 # mi / length = mi per base * 1 *10e9 (million) for per Gb
jira_data$length_in_mb <- length.after/1000000 # Equivilent to length in Gb * 1000 for length in Mb
jira_data$date_in_YMD <- as.Date(jira_data$date_in_YMD, "%Y-%m-%d")
detach(jira_data)
attach(jira_data)
default_save_loc <- '../grit-boot/assets/img/'
################################################################################
# Date Graphs and their outliers- general function
date_graphs <- function(dataframe, yaxis, colour_by, outlier_no) {
outlier_lim <- sort(yaxis, T)[outlier_no]
fig <- plot_ly(dataframe,
x=date_in_YMD,
y=yaxis,
type = 'scatter',
text = X.sample_id,
mode='markers',
color=colour_by,
colors="Set1",
hovertemplate = paste('Date: %{x}\n',
'Interventions: %{y}\n',
'Sample: %{text}\n'))
fig <- fig %>% layout(
xaxis = list(
rangeselector = list(
buttons = list(
list(
count = 1,
label = "1 mo",
step = "month",
stepmode = "backward"),
list(
count = 3,
label = "3 mo",
step = "month",
stepmode = "backward"),
list(
count = 6,
label = "6 mo",
step = "month",
stepmode = "backward"),
list(
count = 1,
label = "1 yr",
step = "year",
stepmode = "backward"),
list(step = "all"))),
rangeslider = list(type = "date")),
yaxis = list(title = "Manual Interventions per 1000Mb",
range = c(0,outlier_lim + 50)))
naming_no <- max(str_length(colour_by))
if (naming_no == 1) {
name_as <- 'prefix'
} else if (naming_no == 2) {
name_as <- 'prefix_v'
} else if (naming_no > 2){
name_as <- 'prefix_full'
} else {
name_as <- 'UNKNOWN'
}
fig
file_name <- sprintf("date_project_manual_interventions_%s.html", name_as)
full_loc <- paste(default_save_loc, file_name, sep="")
htmlwidgets::saveWidget(as_widget(fig), full_loc, selfcontained = TRUE)
}
scatter_of_outliers <- function(dataframe, outlier_no) {
table <- jira_data[order(jira_data$manual_interventions_normalised), ]
table <- tail(table, outlier_no)
fig <- plot_ly(table,
x=table$date_in_YMD,
y=table$manual_interventions_normalised,
type = 'scatter',
text = table$X.sample_id,
mode='markers',
color=table$prefix_full,
colors="Set1",
hovertemplate = paste('Date: %{x}\n',
'Interventions: %{y}\n',
'Sample: %{text}\n'))
fig <- fig %>% layout(
xaxis = list(
title = 'Date of Ticket Creation',
rangeselector = list(
buttons = list(
list(
count = 1,
label = "1 mo",
step = "month",
stepmode = "backward"),
list(
count = 3,
label = "3 mo",
step = "month",
stepmode = "backward"))),
rangeslider = list(type = "date")),
yaxis = list(title = "Manual Interventions Per 1000Mb"))
fig
file_name <-'scatter_outlier_manual.html'
full_loc <- paste(default_save_loc, file_name, sep="")
htmlwidgets::saveWidget(as_widget(fig), full_loc, selfcontained = TRUE)
}
table_of_outliers <- function(dataframe, yaxis, outlier_no) {
table <- jira_data[order(jira_data$manual_interventions_normalised), ]
table <- tail(table, outlier_no)
columns <- c("X.sample_id", "latin_name", "length.after", "manual_interventions", "manual_interventions_normalised")
table <- table[columns]
fig <- plot_ly(
type = 'table',
columnwidth = 200,
header = list(
values = c("<b>Outlier row no.</b>", names(table)),
align = c('left', rep('center', ncol(table))),
line = list(width = 1, color = 'black'),
fill = list(color = 'rgb(235, 100, 230)'),
font = list(family = "Arial", size = 14, color = "white")
),
cells = list(
values = rbind(
rownames(table),
t(as.matrix(unname(table)))
),
align = c('left', rep('center', ncol(table))),
line = list(color = "black", width = 1),
fill = list(color = c('rgb(235, 193, 238)', 'rgba(228, 222, 249, 0.65)')),
font = list(family = "Arial", size = 12, color = c("black")),
height = 25
))
file_name <-'outlier_table_manual.html'
full_loc <- paste(default_save_loc, file_name, sep="")
htmlwidgets::saveWidget(as_widget(fig), full_loc, selfcontained = TRUE)
} # Outliers from the date graph
# Supplementary box plot for index.html fig1
box_plot <- function(dataframe) {
fig <- plot_ly(data=jira_data, y=length.change, color = prefix, type="box")
file_name <- 'boxplot_all_length_change.html'
full_loc <- paste(default_save_loc, file_name, sep="")
htmlwidgets::saveWidget(as_widget(fig), full_loc, selfcontained = TRUE)
}
# Generalized graphs for all data
change_by_length_all_bar <- function(dataframe) {
dataframe <- jira_data
bar <- plot_ly(data=dataframe,
x=X.sample_id,
y=length.change,
color=prefix_full,
type="bar",
text = X.sample_id,
hovertemplate = paste('Sample: %{x}\n',
'Length Change: %{y}%\n'),
showlegend=TRUE)
bar <- bar %>% layout(xaxis = list(title = 'Sample ID'),
yaxis = list(title = "Change in Genome length (%)"))
file_name <- 'change_by_length_all_bar.html'
full_loc <- paste(default_save_loc, file_name, sep="")
htmlwidgets::saveWidget(as_widget(bar), full_loc, selfcontained = TRUE)
}
change_by_length_all_scatter <- function(dataframe) {
scatter <- plot_ly(dataframe,
x=X.sample_id,
y=length.change,
type = "scatter",
mode = "markers",
color=prefix_full,
text = X.sample_id,
hovertemplate = paste('Sample: %{x}\n',
'Length Change: %{y}%\n'),
showlegend=TRUE)
scatter <- scatter %>% layout(xaxis = list(title = 'Sample ID'),
yaxis = list(title = "Change in Genome length (%)", range = c(-20, 3)))
file_name <- 'change_by_length_all_scatter.html'
full_loc <- paste(default_save_loc, file_name, sep="")
htmlwidgets::saveWidget(as_widget(scatter), full_loc, selfcontained = TRUE)
}
fig3a <- function(dataframe) {
amp <- jira_data[ which(jira_data$prefix_v=='a' & jira_data$manual_interventions_normalised<1500), ]
bird <- jira_data[ which(jira_data$prefix_v=='b' & jira_data$manual_interventions_normalised<1500), ]
non_v_plant <- jira_data[ which(jira_data$prefix_v=='c' & jira_data$manual_interventions_normalised<1500), ]
dicot <- jira_data[ which(jira_data$prefix_v=='d' & jira_data$manual_interventions_normalised<1500), ]
echino <- jira_data[ which(jira_data$prefix_v=='e' & jira_data$manual_interventions_normalised<1500), ]
fish <- jira_data[ which(jira_data$prefix_v=='f' & jira_data$manual_interventions_normalised<1500), ]
fungi <- jira_data[ which(jira_data$prefix_v=='g' & jira_data$manual_interventions_normalised<1500), ]
platy <- jira_data[ which(jira_data$prefix_v=='h' & jira_data$manual_interventions_normalised<1500), ]
insect <- jira_data[ which(jira_data$prefix_v=='i' & jira_data$manual_interventions_normalised<1500), ]
jelly <- jira_data[ which(jira_data$prefix_v=='j' & jira_data$manual_interventions_normalised<1500), ]
o_cho <- jira_data[ which(jira_data$prefix_v=='k' & jira_data$manual_interventions_normalised<1500), ]
mono <- jira_data[ which(jira_data$prefix_v=='l' & jira_data$manual_interventions_normalised<1500), ]
mammal <- jira_data[ which(jira_data$prefix_v=='m' & jira_data$manual_interventions_normalised<1500), ]
nema <- jira_data[ which(jira_data$prefix_v=='n' & jira_data$manual_interventions_normalised<1500), ]
sponge <- jira_data[ which(jira_data$prefix_v=='o' & jira_data$manual_interventions_normalised<1500), ]
prot <- jira_data[ which(jira_data$prefix_v=='p' & jira_data$manual_interventions_normalised<1500), ]
o_arthro <- jira_data[ which(jira_data$prefix_v=='q' & jira_data$manual_interventions_normalised<1500), ]
rep <- jira_data[ which(jira_data$prefix_v=='r' & jira_data$manual_interventions_normalised<1500), ]
shark <- jira_data[ which(jira_data$prefix_v=='s' & jira_data$manual_interventions_normalised<1500), ]
o_ani <- jira_data[ which(jira_data$prefix_v=='t' & jira_data$manual_interventions_normalised<1500), ]
algae <- jira_data[ which(jira_data$prefix_v=='u' & jira_data$manual_interventions_normalised<1500), ]
o_vas_plant <- jira_data[ which(jira_data$prefix_v=='v' & jira_data$manual_interventions_normalised<1500), ]
anne <- jira_data[ which(jira_data$prefix_v=='w' & jira_data$manual_interventions_normalised<1500), ]
moll <- jira_data[ which(jira_data$prefix_v=='x' & jira_data$manual_interventions_normalised<1500), ]
bact <- jira_data[ which(jira_data$prefix_v=='y' & jira_data$manual_interventions_normalised<1500), ]
archae <- jira_data[ which(jira_data$prefix_v=='z' & jira_data$manual_interventions_normalised<1500), ]
leps <- jira_data[ which(jira_data$prefix_v=='il' & jira_data$manual_interventions_normalised<1500), ]
dipt <- jira_data[ which(jira_data$prefix_v=='id' & jira_data$manual_interventions_normalised<1500), ]
hymen <- jira_data[ which(jira_data$prefix_v=='iy' & jira_data$manual_interventions_normalised<1500), ]
inlier <- subset(jira_data, manual_interventions_normalised<1500)
outlier <- subset(jira_data, manual_interventions_normalised>1500)
fig1 <- plot_ly() %>%
add_trace(data = outlier, x = outlier$length_in_mb, y = 1450,
name = 'Outlier', type="scatter", mode="markers",
marker = list(color = 'black', size = 6), showlegend=TRUE) %>%
add_trace(data = amp, x = amp$length_in_mb, y = amp$manual_interventions_normalised,
name = 'Amphibia', type="scatter", mode="markers",
marker = list(color = '#FFC000', size = 6), showlegend=TRUE) %>%
add_trace(data = bird, x = bird$length_in_mb, y = bird$manual_interventions_normalised,
name = 'Bird', type="scatter", mode="markers",
marker = list(color = '#FFFC00', size = 6), showlegend=TRUE) %>%
add_trace(data = non_v_plant, x = non_v_plant$length_in_mb, y = non_v_plant$manual_interventions_normalised,
name = 'Non-Vascular Plants', type="scatter", mode="markers",
marker = list(color = '#FF0000', size = 6), showlegend=TRUE) %>%
add_trace(data = dicot, x = dicot$length_in_mb, y = dicot$manual_interventions_normalised,
name = 'Dicotyledons', type="scatter", mode="markers",
marker = list(color = 'red', size = 6), showlegend=TRUE) %>%
add_trace(data = echino, x = echino$length_in_mb, y = echino$manual_interventions_normalised,
name = 'Echinoderms', type="scatter", mode="markers",
marker = list(color = '#28b077', size = 6), showlegend=TRUE) %>%
add_trace(data = fish, x = fish$length_in_mb, y = fish$manual_interventions_normalised,
name = 'Fish', type="scatter", mode="markers",
marker = list(color = 'blue', size = 6), showlegend=TRUE) %>%
add_trace(data = fungi, x = fungi$length_in_mb, y = fungi$manual_interventions_normalised,
name = 'Fungi', type="scatter", mode="markers",
marker = list(color = '#e642f5', size = 6), showlegend=TRUE) %>%
add_trace(data = platy, x = platy$length_in_mb, y = platy$manual_interventions_normalised,
name = 'Platyhelminths', type="scatter", mode="markers",
marker = list(color = '#0032e5', size = 6), showlegend=TRUE) %>%
add_trace(data = insect, x = insect$length_in_mb, y = insect$manual_interventions_normalised,
name = 'Insect', type="scatter", mode="markers",
marker = list(color = '#b700ff', size = 6), showlegend=TRUE) %>%
add_trace(data = jelly, x = jelly$length_in_mb, y = jelly$manual_interventions_normalised,
name = 'Jellyfish and Cnidaria', type="scatter", mode="markers",
marker = list(color = '#e500d0', size = 6), showlegend=TRUE) %>%
add_trace(data = o_cho, x = o_cho$length_in_mb, y = o_cho$manual_interventions_normalised,
name = 'Other Chordates', type="scatter", mode="markers",
marker = list(color = '#0687ba', size = 6), showlegend=TRUE) %>%
add_trace(data = mono, x = mono$length_in_mb, y = mono$manual_interventions_normalised,
name = 'Monocotyledons', type="scatter", mode="markers",
marker = list(color = '#e57f00', size = 6), showlegend=TRUE) %>%
add_trace(data = mammal, x = mammal$length_in_mb, y = mammal$manual_interventions_normalised,
name = 'Mammals', type="scatter", mode="markers",
marker = list(color = '#ff00ea', size = 6), showlegend=TRUE) %>%
add_trace(data = nema, x = nema$length_in_mb, y = nema$manual_interventions_normalised,
name = 'Nematodes', type="scatter", mode="markers",
marker = list(color = '#00e5e0', size = 6), showlegend=TRUE) %>%
add_trace(data = sponge, x = sponge$length_in_mb, y = sponge$manual_interventions_normalised,
name = 'Sponges', type="scatter", mode="markers",
marker = list(color = '#e500dc', size = 6), showlegend=TRUE) %>%
add_trace(data = prot, x = prot$length_in_mb, y = prot$manual_interventions_normalised,
name = 'Protists', type="scatter", mode="markers",
marker = list(color = '#f54296', size = 6), showlegend=TRUE) %>%
add_trace(data = o_arthro, x = o_arthro$length_in_mb, y = o_arthro$manual_interventions_normalised,
name = 'Other Arthropods', type="scatter", mode="markers",
marker = list(color = '#58027a', size = 6), showlegend=TRUE) %>%
add_trace(data = rep, x = rep$length_in_mb, y = rep$manual_interventions_normalised,
name = 'Reptiles', type="scatter", mode="markers",
marker = list(color = '#08a826', size = 6), showlegend=TRUE) %>%
add_trace(data = shark, x = shark$length_in_mb, y = shark$manual_interventions_normalised,
name = 'Sharks', type="scatter", mode="markers",
marker = list(color = '#2ae8e8', size = 6), showlegend=TRUE) %>%
add_trace(data = o_ani, x = o_ani$length_in_mb, y = o_ani$manual_interventions_normalised,
name = 'Other Animal Phyla', type="scatter", mode="markers",
marker = list(color = '#ffd24a', size = 6), showlegend=TRUE) %>%
add_trace(data = algae, x = algae$length_in_mb, y = algae$manual_interventions_normalised,
name = 'Algae', type="scatter", mode="markers",
marker = list(color = '#98d437', size = 6), showlegend=TRUE) %>%
add_trace(data = o_vas_plant, x = o_vas_plant$length_in_mb, y = o_vas_plant$manual_interventions_normalised,
name = 'Other Vascular Plants', type="scatter", mode="markers",
marker = list(color = '#92ed77', size = 6), showlegend=TRUE) %>%
add_trace(data = anne, x = anne$length_in_mb, y = anne$manual_interventions_normalised,
name = 'Annelids', type="scatter", mode="markers",
marker = list(color = '#b59d24', size = 6), showlegend=TRUE) %>%
add_trace(data = moll, x = moll$length_in_mb, y = moll$manual_interventions_normalised,
name = 'Molluscs', type="scatter", mode="markers",
marker = list(color = '#a0a197', size = 6), showlegend=TRUE) %>%
add_trace(data = bact, x = bact$length_in_mb, y = bact$manual_interventions_normalised,
name = 'Bacteria', type="scatter", mode="markers",
marker = list(color = '#ff007b', size = 6), showlegend=TRUE) %>%
add_trace(data = archae, x = archae$length_in_mb, y = archae$manual_interventions_normalised,
name = 'Archae', type="scatter", mode="markers",
marker = list(color = '#2bff92', size = 6), showlegend=TRUE) %>%
add_trace(data = leps, x = leps$length_in_mb, y = leps$manual_interventions_normalised,
name = 'Lepidoptera', type="scatter", mode="markers",
marker = list(color = '#4a98ff', size = 6), showlegend=TRUE) %>%
add_trace(data = dipt, x = dipt$length_in_mb, y = dipt$manual_interventions_normalised,
name = 'Diptera', type="scatter", mode="markers",
marker = list(color = '#fcff4a', size = 6), showlegend=TRUE) %>%
add_trace(data = hymen, x = hymen$length_in_mb, y = hymen$manual_interventions_normalised,
name = 'Hymenoptera', type="scatter", mode="markers",
marker = list(color = '#adb000', size = 6), showlegend=TRUE)
fig1 <- fig1 %>% layout(xaxis = list(title = 'Assembly Length (Mb)'),
yaxis = list(title = "Manual Interventions per 1000Mb", range = c(0,1500)))
} # Not in USE
# Main graphs for display
fig3_mimic <- function(dataframe) {
full_pre_colour <- c('#ce782f', '#019053', '#0a75ad', '#553f69',
'#e50000', '#e3e500', '#2ae500', '#00e5c8',
'#0032e5', '#b500e5', '#e500d0', '#00a6e5',
'#e57f00', '#aae500', '#00e5e0', '#e500dc')
fig1 <- plot_ly(data = jira_data,
x=length_in_mb,
y=manual_interventions_normalised,
color = prefix_full,
colors = full_pre_colour,
type="scatter",
mode="markers",
legendgroup = prefix_full,
showlegend=TRUE)
fig1 <- fig1 %>% layout(xaxis = list(title = 'Assembly Length (Mb)'),
yaxis = list(title = "Manual Interventions per 1000Mb",
range = c(0,1500)))
fig2 <- plot_ly(data = jira_data,
x=length_in_mb,
y=scaff.n50.change,
color = prefix_full,
colors = full_pre_colour,
type="scatter",
mode="markers",
legendgroup = prefix_full,
showlegend=FALSE)
fig2 <- fig2 %>% layout(xaxis = list(title = 'Assembly Length (Mb)'),
yaxis = list(title = "Scaffold N50 % Change", range = c(-100,400)))
fig3 <- plot_ly(data = jira_data,
x=length_in_mb,
y=scaff_count_per,
color = prefix_full,
colors = full_pre_colour,
type="scatter",
mode="markers",
legendgroup = prefix_full,
showlegend=FALSE)
fig3 <- fig3 %>% layout(xaxis = list(title = 'Assembly Length (Mb)'),
yaxis = list(title = "Scaffold Number Change (%)", range = c(-100, 20)))
fig1
fig2
fig3
figure <- subplot(fig1, fig2, fig3, nrows=3, shareX=TRUE, titleY = TRUE, titleX = TRUE)
figure
#file_name <- 'fig3_mimic.html'
#fn1 <- 'fig1s_mimic.html'
#fn2 <- 'fig2s_mimic.html'
#fn3 <- 'fig3s_mimic.html'
#full_loc <- paste(default_save_loc, file_name, sep="")
#fl1<- paste(default_save_loc, fn1, sep="")
#fl2<- paste(default_save_loc, fn2, sep="")
#fl3<- paste(default_save_loc, fn3, sep="")
#htmlwidgets::saveWidget(as_widget(figure), full_loc, selfcontained = TRUE)
#htmlwidgets::saveWidget(as_widget(fig1), fl1, selfcontained = TRUE)
#htmlwidgets::saveWidget(as_widget(fig2), fl2, selfcontained = TRUE)
#htmlwidgets::saveWidget(as_widget(fig3), fl3, selfcontained = TRUE)
}
assigned_to_chromo <- function(dataframe) {
chromo <- plot_ly(data=jira_data,
x=length_in_mb,
y=assignment,
color = prefix_full,
colors = full_pre_colour,
type="scatter",
mode="markers",
legendgroup = prefix_full,
showlegend=TRUE)
chromo <- chromo %>% layout(xaxis = list(title = 'Assembly Length (Mb)'),
yaxis = list(title = "Sequence Assigned to Chromosome (%)", range = c(85, 101)))
chromo
file_name <- 'assigned_to_chromosome.html'
full_loc <- paste(default_save_loc, file_name, sep="")
htmlwidgets::saveWidget(as_widget(chromo), full_loc, selfcontained = TRUE)
}
project_pie <- function(dataframe) {
colors <- c('rgb(211,94,96)', 'rgb(128,133,133)', 'rgb(144,103,167)', 'rgb(171,104,87)', 'rgb(114,147,203)')
pie <- plot_ly(jira_data, labels=project_type,
values = nrow(project_type),
type = 'pie',
textinfo='label+percent',
marker = list(colors = colors,
line = list(color = '#FFFFFF', width = 1)),
showlegend=FALSE)
file_name <- 'project_pie.html'
full_loc <- paste(default_save_loc, file_name, sep="")
htmlwidgets::saveWidget(as_widget(pie), full_loc, selfcontained = TRUE)
}
project_graph <- function(dataframe) {
proj <- plot_ly(dataframe, x = X.sample_id, y = manual_interventions_normalised,
color = project_type,
type="scatter",
mode="markers"
)
file_name <- 'plot_by_project.html'
full_loc <- paste(default_save_loc, file_name, sep="")
htmlwidgets::saveWidget(as_widget(proj), full_loc, selfcontained = TRUE)
}
data_table_int <- function(dataframe) {
fig <- plot_ly(
type = 'table',
columnwidth = 200,
header = list(
values = c("<b>Jira Dump</b>", names(dataframe)),
align = c('left', rep('center', ncol(dataframe))),
line = list(width = 1, color = 'black'),
fill = list(color = 'rgb(235, 100, 230)'),
font = list(family = "Arial", size = 14, color = "white")
),
cells = list(
values = rbind(
rownames(dataframe),
t(as.matrix(unname(dataframe)))
),
align = c('left', rep('center', ncol(dataframe))),
line = list(color = "black", width = 1),
fill = list(color = c('rgb(235, 193, 238)', 'rgba(228, 222, 249, 0.65)')),
font = list(family = "Arial", size = 12, color = c("black")),
height = 10
))
file_name <- 'data_table_full.html'
full_loc <- paste(default_save_loc, file_name, sep="")
htmlwidgets::saveWidget(as_widget(fig), full_loc, selfcontained = TRUE)
}
change_by_length_normalised <- function(dataframe, yaxis, colour_by, outliers) {
outlier_lim <- sort(yaxis)[outlier_no]
dataframe <- jira_data
normed <- plot_ly(data = dataframe,
x = length.after / 1000000,
y = yaxis,
text = X.sample_id,
hovertemplate = paste('Xaxis: %{x}\n',
'Yaxis: %{y}\n',
'Sample: %{text}\n'),
type = 'scatter',
mode = 'markers',
color = colour_by,
colors = 'Set1')
normed <- normed %>% layout(title = 'Percent Length Change by Length of Genome in 1000MB',
xaxis = list(title = 'Length of Genome in 1000Mb'),
yaxis = list(title = 'Percent change in Genome Length',
range = c(max(yaxis) + 0.5, outlier_lim + 0.2)))
file_name <-'scatter_lengthchange_length.html'
full_loc <- paste(default_save_loc, file_name, sep="")
htmlwidgets::saveWidget(as_widget(fig), full_loc, selfcontained = TRUE)
table <- dataframe[order(dataframe$length.change),]
table <- head(table, outlier_no)
outlier <- plot_ly(data=table,
x=table$length.after/1000000,
y=table$length.change,
type='scatter',
mode='markers',
color = table$prefix_full,
colors = 'Set1')
outlier <- outlier %>% layout(title = 'Outlier Graph',
xaxis = list(title = 'Length of Genome in 1000Mb'),
yaxis = list(title = 'Percent change in Genome Length',
range = c(max(table$length.change)+1 , min(table$length.change)-1)))
file_name <-'scatter_lengthchange_length_outlier.html'
full_loc <- paste(default_save_loc, file_name, sep="")
htmlwidgets::saveWidget(as_widget(fig), full_loc, selfcontained = TRUE)
}
################################################################################
main <- function() {
# Date graphs
date_graphs(jira_data, manual_interventions_normalised, prefix, 3)
date_graphs(jira_data, manual_interventions_normalised, prefix_full, 3)
scatter_of_outliers(jira_data, 3)
table_of_outliers(jira_data, manual_interventions_normalised, 3)
# Boxplot length all
box_plot(jira_data)
# Length change
change_by_length_all_bar(jira_data)
change_by_length_all_scatter(jira_data)
# Main graphs
fig3_mimic(jira_data)
assigned_to_chromo(jira_data)
# Project graph and supplement
project_graph(jira_data)
project_pie(jira_data)
# Full data table <- very innefficient
data_table_int(jira_data)
#Scatter length change by genome length
change_by_length_normalised(jira_data, length.change, prefix_full, 6) # Outlier graph is hard coded here
}
################################################################################
main()
|
8dcdb53d0d45fd2fb7622c65a795d192665926a5 | 497a6fa06fb167f53e531ff546f75cea2ff5ca72 | /R/krige.R | 17b8fc8282a7f66a4ba92c75c5f473dc21b11eda | [] | no_license | cran/geostatsp | 8ffd90b15240476ec6e12ecc2f3fe629040178d0 | 8a707e53004f5e587df3c7f5813fdd954306781d | refs/heads/master | 2021-10-14T07:08:10.607607 | 2021-10-05T07:10:08 | 2021-10-05T07:10:08 | 17,696,363 | 6 | 0 | null | null | null | null | UTF-8 | R | false | false | 20,307 | r | krige.R |
# y is gaussian, w is positive
# y = (w^lambda - 1)/lambda
# lambda * y + 1 = w^lambda = exp(lambda * log(w))
meanBoxCox=function(pred, sd, boxcox, Nbc = 100) {
# if boxcox is negative, don't let values get closer
# than this to zero
epsX = exp(12*boxcox)
epsBC = 0.001 # check box-cox within this distance
# of zero or one
if(abs(boxcox)<epsBC){
return(
exp(pred + sd^2/2)
)
} else if (abs(boxcox-1)<epsBC){
return(pred)
}
# normal probabilities for numerical integration
SXboxcox = seq(-7,7,len=Nbc)
SXboxcox = unique(signif(
c(
SXboxcox,
SXboxcox/SXboxcox[1]
),6)
)
SXboxcox = sort(SXboxcox)
PXboxcox = pnorm(SXboxcox)#, log=TRUE)
# probability of normal being in a bin
DXboxcox = diff(PXboxcox)/2
NDX = length(DXboxcox)
DXboxcox = c(
DXboxcox[1],
DXboxcox[-1] + DXboxcox[-NDX],
DXboxcox[NDX]
)
IXboxcox = log(DXboxcox)
x = boxcox * (outer(sd, SXboxcox) + pred)+1
# negatives to zero
xneg= which( as.vector(x < 0))
if(boxcox<0){
# get rid of values very close to zero
xneg = c(xneg,
which(abs(as.vector(x)) < epsX)
)
}
x[xneg] = NA
logx = log(x)/boxcox
IXmat = matrix(IXboxcox, nrow(x), ncol(x), byrow=TRUE)
result = rowSums(exp(logx + IXmat),na.rm=TRUE)
allNA = rowSums(!is.na(x))==0
if(length(xneg)) {
IXmat[xneg] = NA
result = cbind(
predict=result,
probComplex.boxcox =
1-rowSums(exp(IXmat),na.rm=TRUE)
)
result[allNA,] = NA
} else {
result[allNA] = NA
}
result
}
krigeLgm = function(
formula, data,
grid,
covariates=NULL,
param,
expPred=FALSE,
nuggetInPrediction=TRUE,
mc.cores=getOption("mc.cores", 1L)) {
# this function really needs some tidying!
trend = formula
locations = grid
coordinates=data
theVars = NULL
haveBoxCox = any(names(param)=="boxcox")
NsimBoxCox=50
if(haveBoxCox) {
haveBoxCox = abs(param["boxcox"]-1) > 0.001
if(param['boxcox']<0) NsimBoxCox=100
if(param['boxcox']< -0.2) NsimBoxCox=200
if(param['boxcox']< -0.5) NsimBoxCox=400
}
haveNugget = any(names(param)=="nugget")
if(haveNugget) {
haveNugget = param["nugget"] > 0
}
if(!haveNugget) {
nuggetInPrediction=FALSE
}
if(is.numeric(locations)){
locations = squareRaster(data, locations)
}
if(nrow(locations) * ncol(locations) > 10^7) warning("there are lots of cells in the prediction raster,\n this might take a very long time")
observations = meanRaster = NULL
if(!length(names(covariates))) {
# no coariates, mean is intercept
if(any(names(param)=='(Intercept)')) {
meanForRaster = param['(Intercept)']
} else {
meanForRaster = 0
}
meanFixedEffects =
rep(meanForRaster, ncell(locations))
meanRaster = locations
values(meanRaster) = meanFixedEffects
}
if( is.data.frame(covariates) & any(class(formula)=="formula")) {
if(nrow(covariates)){
if(nrow(covariates) != ncell(locations))
warning("covariates and grid aren't compatible")
# put zeros for covariates not included in the data frame
notInCov = setdiff(all.vars(formula), names(covariates))
for(D in notInCov)
covariates[[D]] = 0
modelMatrixForRaster = model.matrix(formula, covariates)
theParams = intersect(colnames(modelMatrixForRaster), names(param))
meanForRaster = drop(
tcrossprod( param[theParams], modelMatrixForRaster[,theParams] )
)
meanFixedEffects = rep(NA, ncell(locations))
meanFixedEffects[as.integer(names(meanForRaster))] = meanForRaster
meanRaster = locations
values(meanRaster) = meanFixedEffects
}
} # end covariates is DF
if(any(class(data)=="SpatialPointsDataFrame")&
any(class(formula)=="formula")) {
if(all(names(covariates)%in% names(data))) {
modelMatrixForData = model.matrix(formula, data@data)
theParams = intersect(colnames(modelMatrixForData), names(param))
meanForData = as.vector(tcrossprod(
param[theParams],
modelMatrixForData[,theParams])
)
names(meanForData) = rownames(modelMatrixForData)
haveData = match(names(meanForData),
rownames(data@data))
data = data[haveData,]
coordinates=data
observations = drop(data@data[,
all.vars(formula)[1] ] )
if(haveBoxCox) {
if(abs(param["boxcox"]) < 0.001) {
observations = log(observations)
expPred = TRUE
haveBoxCox = FALSE
} else {
observations = ((observations^param["boxcox"]) - 1)/
param["boxcox"]
}
}
observations = observations - meanForData
} # end all covariates in data
} # end data is spdf
if(!length(observations) | is.null(meanRaster)) {
# the above didn't create observations and meanRaster
# use the old code, probably not being called from lgm
# find factors, so we reproject rasters using
# the correct method.
# search for factors in the data supplied
# look for factors in the model formula
if(any(class(trend)=="formula")){
trendFormula = update.formula(trend, junk ~ . )
covariatesForData = data@data
if(is.vector(data)) {
observations = data
} else {
observations = all.vars(trend)[1]
observations = covariatesForData[,observations]
}
theVars = all.vars(trendFormula)[-1]
if(length(theVars)) {
factorsInData = unlist(lapply(
covariatesForData[,theVars,drop=FALSE],
is.factor))
factorsInData = names(factorsInData)[factorsInData]
} else {
factorsInData=NULL
}
allterms = rownames(attributes(terms(trend))$factors)
factorsInFormula = grep("^factor", allterms, value=TRUE)
factorsInFormula = gsub("^factor\\(", "", factorsInFormula)
factorsInFormula = gsub("\\)$", "", factorsInFormula)
factorsInTrend=NULL
allterms = gsub("^[[:alnum:]]+\\(", "", allterms)
allterms = gsub("\\)$", "", allterms)
if(!all(allterms %in% names(data)))
warning("some covariates don't appear in data")
} else { # trend not formula
# trend is a data frame of covariates
# look for factors in it
covariatesForData = as.data.frame(trend)
observations = as.data.frame(data)[,1]
factorsInTrend = unlist(lapply(
covariatesForData, is.factor
))
factorsInTrend = names(factorsInTrend)[factorsInTrend]
factorsInFormula = factorsInData = NULL
# guess at the formula
trendFormula = as.formula(paste(
"junk ~ ",
paste(c('1', names(covariatesForData)), collapse="+")
)
)
} # end trend not a formula
# we know which variables factors
theFactors = unique(c(factorsInFormula, factorsInData, factorsInTrend))
theFactors = theFactors[theFactors %in% names(covariates) ]
if(length(grep("^Raster|^list", class(covariates)))) {
# if there's only variable in the model assign it's name to covariates
covariateNames = all.vars(
update.formula(trendFormula, junk~ . )
)[-1]
if(length(covariateNames)==1){
# so far only one variable
names(covariates)= covariateNames
}
# loop through factors
# and make sure integer values in rasters get converted
# to things with parameter values!
for(D in theFactors) {
# is this variable in param with a factor around it?
# for instance factor(x)1 and factor(x)2 ?
paramWithFactor = grep(
paste("factor\\(", D, "\\)[[:digit:]]+$", sep=""),
names(param), value=TRUE)
paramStartWithD = grep(
paste("^", D, ".+$", sep=""),
names(param), value=TRUE)
paramFactorCharacter = grep(
paste("factor\\(", D, "\\).+$", sep=""),
names(param), value=TRUE)
if(length(paramWithFactor)) {
# formula will convert to factor, don't
# create factor beforehand
theLevels = gsub(
paste("^factor\\(",D,"\\)",sep=""),
"",paramWithFactor)
theLevels = as.integer(theLevels)
allValues = raster::unique(covariates[[D]])
dontHave = allValues[!allValues %in% theLevels]
# make values with no data all equal to the lowest value
# so it's the baseline when turning into a factor.
forRecla = cbind(dontHave, min(allValues)-1)
covariates[[D]] =
raster::reclassify(covariates[[D]], forRecla)
} else if( length(paramStartWithD) ) {
# not a bunch of digits,
# stuff like xTrees and xGrassland for covariate x and levels Trees and Grassland
# see if these line up with
theLevels = gsub(paste("^", D, sep=""),"",paramStartWithD)
levelsTable = covariates[[D]]@data@attributes[[1]]
inId = theLevels %in% as.character(levelsTable[,1])
inLabel = theLevels %in% levelsTable[,2]
if(mean(inId) > mean(inLabel)){
levelsTable$levelsInParams =
as.character(levelsTable[,1])
labelCol = ncol(levelsTable)
levelsInTable = levelsTable[,1] %in%
theLevels
} else {
levelsInTable = levelsTable[,2]%in% theLevels
labelCol=2
}
if(mean(theLevels %in% levelsTable[,labelCol]) < 0.4)
warning("many levels appear missing in covariate", D)
valuesInParams = levelsTable[levelsInTable,1]
allValues = raster::unique(covariates[[D]])
dontHave = allValues[!allValues %in% valuesInParams]
forRecla = cbind(dontHave, min(allValues)-1)
covariates[[D]] =
raster::reclassify(covariates[[D]], forRecla)
levelsTable =
levelsTable[c(1, 1:nrow(levelsTable)),c(1,labelCol)]
levelsTable[1,1]= min(allValues)-1
levelsTable[1,2] = ''
colnames(levelsTable)[2] = "levels"
levels(covariates[[D]]) = levelsTable
covariates[[D]]@data@isfactor = TRUE
} else if (length(paramFactorCharacter)) {
# stuff like factor(x)Trees and factor(x)Grassland for covariate x and levels Trees and Grassland
theLevels = gsub(paste("^factor\\(", D,"\\)", sep=""),"",
paramFactorCharacter)
levelsTable = covariates[[D]]@data@attributes[[1]]
levelsInTable = levelsTable[,2]%in% theLevels
if(mean(theLevels %in% levelsTable[,2]) < 0.4)
warning("many levels appear missing in covariate", D)
valuesInParams = as.numeric(levelsTable[levelsInTable,1])
allValues = raster::unique(covariates[[D]])
dontHave = allValues[!allValues %in% valuesInParams]
forRecla = cbind(dontHave, min(allValues)-1)
covariates[[D]] =
raster::reclassify(covariates[[D]], forRecla)
levelsTable =
levelsTable[c(1, 1:nrow(levelsTable)),]
levelsTable[1,1]= min(allValues)-1
levelsTable[1,2] = "0"
colnames(levelsTable)[2]="levels"
levels(covariates[[D]])[[1]] = levelsTable
} else {
warning("don't know what to do with covariate", D,
"\n can't assign parameters to levels of this factor")
}
} # end loop through factors
if(length(grep("^Raster|^list", class(covariates))) & length(theVars)) {
# method for resampling covariate rasters
method = resampleMethods(formula, covariates)
covariates = stackRasterList(covariates,template=locations, method=method)
theVars = do.call('intersect',
dimnames(attributes(terms(trendFormula))$factors))
if(nlayers(covariates)==1 & length(theVars)==1) {
names(covariates) = theVars
}
# construct the fixed effects component
covariatesDF = raster::as.data.frame(covariates, xy=TRUE)
# get rid of trailing _ created by as.data.frame
names(covariatesDF) = gsub("_levels$", "", names(covariatesDF))
} else {
covariatesDF = as.data.frame(matrix(NA, ncol=0, nrow=ncell(locations)))
}
} else {# end covariates is raster, assume it's a data frame
covariatesDF=covariates
}
# get rid of response variable in trend formula
meanRaster = raster(locations)
names(meanRaster) = "fixed"
if(length(all.vars(trendFormula))>1){ # if have covariates
missingVars = all.vars(trendFormula)[-1] %in% names(covariatesDF)
missingVars = all.vars(trendFormula)[-1][!missingVars]
# check if all variables are in covariates
if(length(missingVars)) {
cat("cant find covariates ",
paste(missingVars, collapse=","),
" for prediction, imputing zeros\n")
covariatesDF[,missingVars]=0
}
modelMatrixForRaster = model.matrix(trendFormula, cbind(covariatesDF,junk=0))
if(!all(colnames(modelMatrixForRaster)%in% names(param))){
warning("cant find coefficients",
paste(names(modelMatrixForRaster)[
!names(modelMatrixForRaster)%in% names(param)
], collapse=","),
"in param\n")
}
meanFixedEffects =
modelMatrixForRaster %*% param[colnames(modelMatrixForRaster)]
anyNA = apply(covariatesDF, 1, function(qq) any(is.na(qq)))
if(any(anyNA)) {
oldmm = rep(NA, ncell(meanRaster))
oldmm[!anyNA] = meanFixedEffects
values(meanRaster) = oldmm
} else {
values(meanRaster) = meanFixedEffects
}
modelMatrixForData = model.matrix(trendFormula,
cbind(covariatesForData,junk=0))
haveData = match(rownames(modelMatrixForData),
rownames(covariatesForData))
observations = observations[haveData]
coordinates = coordinates[haveData,]
meanForData =
modelMatrixForData %*% param[colnames(modelMatrixForData)]
} else { #no covariates
if(any(names(param)=='(Intercept)')) {
values(meanRaster) = param['(Intercept)']
} else {
values(meanRaster) = 0
}
meanForData = rep(values(meanRaster)[1], length(observations))
}
# subtract mean from data
theNAdata =
is.na(observations)
if(all(theNAdata)) {
warning(
'it appears there are no observations without at least one covariate missing')
}
if(any(theNAdata)) {
noNAdata = !theNAdata
if(length(grep("^SpatialPoints", class(coordinates)))) {
coordinates = coordinates[noNAdata,]
} else if(any(class(coordinates)=="dist")){
coordinates = as.matrix(coordinates)
coordinates = coordinates[noNAdata,noNAdata]
coordinates = as.dist(coordinates)
} else {
warning("missing vlaues in data but unclear how to remove them from coordinates")
}
observations = observations[noNAdata]
}
if(haveBoxCox) {
if(abs(param["boxcox"]) < 0.001) {
observations = log(observations)
expPred = TRUE
haveBoxCox = FALSE
} else {
observations = ((observations^param["boxcox"]) - 1)/
param["boxcox"]
}
} # end have box cox
observations = observations - meanForData
} # end old code not called from LGM
cholVarData = geostatsp::matern(coordinates, param=param, type='cholesky')
# if(haveNugget) Matrix::diag(varData) = Matrix::diag(varData) + param["nugget"]
# cholVarData = Matrix::chol(varData)
observations = as.matrix(observations)
cholVarDatInvData = Matrix::solve(cholVarData, observations)
Ny = length(observations)
param = fillParam(param)
krigeOneRowPar = function(Drow, yFromRowDrow,
locations,
param,coordinates,Ny,cholVarData,
cholVarDatInvData,
xminl,xresl,ncoll,
lengthc){
# covariance of cells in row Drow with data points
resC = .C(C_maternArasterBpoints,
as.double(xminl),
as.double(xresl),
as.integer(ncoll),
as.double(yFromRowDrow),
as.double(0), as.integer(1),
as.double(coordinates@coords[,1]),
as.double(coordinates@coords[,2]),
N=as.integer(Ny),
result=as.double(matrix(0, ncoll,
lengthc)),
as.double(param["range"]),
as.double(param["shape"]),
as.double(param["variance"]),
as.double(param["anisoRatio"]),
as.double(param["anisoAngleRadians"])
)
covDataPred = matrix(resC$result, nrow=ncoll, ncol=Ny)
cholVarDataInvCovDataPred = Matrix::solve(cholVarData,
t(covDataPred))
x= cbind( # the conditional expectation
forExp=as.vector(Matrix::crossprod(cholVarDataInvCovDataPred,
cholVarDatInvData)),
# part of the conditional variance
forVar=apply(cholVarDataInvCovDataPred^2, 2, sum)
)
x
}
datForK = list(
locations=locations,param=param,
coordinates=coordinates,Ny=Ny,
cholVarData=cholVarData,
cholVarDatInvData = cholVarDatInvData,
xminl=xmin(locations),
xresl = xres(locations),
ncoll=ncol(locations),
lengthc=length(coordinates)
)
Srow = 1:nrow(locations)
if(mc.cores ==1 ) {
sums=mapply(krigeOneRowPar, Srow,
yFromRow(locations,Srow),
MoreArgs=datForK,SIMPLIFY=FALSE)
} else {
sums=parallel::mcmapply(krigeOneRowPar, Srow,
yFromRow(locations,Srow),
MoreArgs=datForK,SIMPLIFY=FALSE,mc.cores=mc.cores)
}
sums <- simplify2array(sums)
# row sums of cholVarDataInvCovDataPred
forExpected = sums[,'forExp',]
# row sums of squares
forVar = sums[,'forVar',]
randomRaster = raster(meanRaster)
names(randomRaster) = "random"
values(randomRaster) = as.vector(forExpected)
predRaster = meanRaster + randomRaster
names(predRaster) = "predict"
if(any(forVar > param["variance"])){
themax = max(forVar - param["variance"],na.rm=TRUE)
if(themax > 1e-6)
warning("converted variances of ", themax, " to zero")
# forVar = pmin(forVar, param["variance"])
}
krigeSd = raster(meanRaster)
names(krigeSd) = "krigeSd"
if(nuggetInPrediction) {
values(krigeSd) = sqrt(sum(param[c("nugget","variance")]) -
as.vector(forVar))
} else {
values(krigeSd) = sqrt(param["variance"] - as.vector(forVar))
}
names(meanRaster) = "fixed"
result = stack(meanRaster, randomRaster, predRaster,
krigeSd)
# box-cox
if(haveBoxCox){
names(result)[names(result)=="predict"] = "predict.boxcox"
bcpred = meanBoxCox(
pred=values(result[['predict.boxcox']]),
sd=values(result[['krigeSd']]),
boxcox=param['boxcox']
)
newraster=raster(result[["predict.boxcox"]])
names(newraster) = "predict"
if(is.matrix(bcpred)){
values(newraster) = bcpred[,'predict']
result = addLayer(result,
newraster)
# names(newraster) = 'probComplex.boxcox'
# values(newraster) = bcpred[,'probComplex.boxcox']
# result = addLayer(result,
# newraster)
} else {
values(newraster) = bcpred
result = addLayer(result,
newraster)
}
} # end have box cox
if(expPred){
names(result)[names(result)=="predict"] = "predict.log"
newLayer = exp(result[["predict.log"]]+ 0.5*result[["krigeSd"]]^2 )
names(newLayer) = "predict"
result = addLayer(result, newLayer)
} # end expPred
result
}
|
b6cee4f87b36194485d8eb3d0f0d2bc4d58dc5ea | 397182bd2f6ed4e39dcc3f3d62e89807af03ac5a | /man/count.graphs.Rd | 39b28f56116412091cd014d499f7e566ca4dec18 | [] | no_license | cran/bnlearn | e04a869228c237067fc09d64b8c65a360dada76a | dda6458daf2e0b25d3e87313f35f23d8a1c440c1 | refs/heads/master | 2023-05-14T14:21:07.744313 | 2023-04-29T09:30:02 | 2023-04-29T09:30:02 | 17,694,839 | 58 | 42 | null | 2020-01-03T23:49:19 | 2014-03-13T04:09:21 | C | UTF-8 | R | false | false | 1,830 | rd | count.graphs.Rd | \name{graph enumeration}
\alias{count.graphs}
\alias{graph enumeration}
\title{Count graphs with specific characteristics}
\description{
Count directed acyclic graphs of various sizes with specific characteristics.
}
\usage{
count.graphs(type = "all.dags", nodes, ..., debug = FALSE)
}
\arguments{
\item{type}{a character string, the label describing the types of graphs to
be counted (see below).}
\item{nodes}{a vector of positive integers, the graph sizes as given by the
numbers of nodes.}
\item{\dots}{additional parameters (see below).}
\item{debug}{a boolean value. If \code{TRUE} a lot of debugging output is
printed; otherwise the function is completely silent. Ignored in some
generation methods.}
}
\details{
The types of graphs, and the associated additional parameters, are:
\itemize{
\item \code{all-dags}: all directed acyclic graphs.
\item \code{dags-given-ordering}: all directed acyclic graphs
with a specific topological ordering.
\item \code{dags-with-k-roots}: all directed acyclic graphs with \code{k}
root nodes.
\item \code{dags-with-r-arcs}: all directed acyclic graphs with \code{r}
arcs.
}
}
\value{
\code{count.graphs()} returns an objects of class \code{bigz} from the
\pkg{gmp} package, a vector with the graph counts.
}
\references{
Harary F, Palmer EM (1973). "Graphical Enumeration". Academic Press.
Rodionov VI (1992). "On the Number of Labeled Acyclic Digraphs".
\emph{Discrete Mathematics}, \strong{105}:319--321.
Liskovets VA (1976). "On the Number of Maximal Vertices of a Random Acyclic
Digraph". \emph{Theory of Probability and its Applications},
\strong{20}(2):401--409.
}
\examples{
\dontrun{
count.graphs("dags.with.r.arcs", nodes = 3:6, r = 2)
}}
\author{Marco Scutari}
\keyword{graphs}
|
62716b7eba4a6368f73519564d31df08f828134c | 7120666c09987d6615978029b8c3d24b60cec065 | /KobusTutorial/server.R | 3e7094e9e50f2adc1ca302ed3bfd75a7301a4bf1 | [] | no_license | kobusbosman/shiny.rstudio.com-tutorial | 4db429f120dc787dcc725c6a6005e0f06ce5605b | d49216bdb38daa323c6f73bdaa69b6f18436bfbe | refs/heads/master | 2023-03-02T20:08:37.981027 | 2021-02-15T16:28:07 | 2021-02-15T16:28:07 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,039 | r | server.R | library(shiny)
library(tidyverse)
# Data
data <- faithful[,2][order(faithful[,2])];
rv <- reactiveValues(data = rnorm(100));
server <- function(input, output) {
# Observations
observeEvent(input$go, {});
observeEvent(input$norm, { rv$data <- rnorm(100) });
observeEvent(input$unif, { rv$data <- runif(100) });
# Reactives
bins <- eventReactive(input$go,
{seq(min(data), max(data), length.out = input$bins + 1)},
ignoreNULL = FALSE
)
histdata <- reactive({
hist(x = data, breaks = bins(), col = 'darkgray', border = 'white', main = isolate(input$title))
})
# Plotting
output$clicks <- renderPrint({
input$go[1];
})
output$ggplothist <- renderPlot({
ggplot(tibble(data), aes(data)) +
geom_histogram(breaks=bins()) +
ggtitle(isolate(input$title));
})
output$distPlot <- renderPlot({
histdata()
})
output$stats <- renderPrint({
histdata()$counts;
})
output$toggledist <- renderPlot({
hist(rv$data)
})
}
|
c869b0eae75f8652b4fc3da9f949636326e66c23 | 9c41132ee872256ff609882f2ea54cd5cb3ac567 | /Codes/Functions/BS_Indicator_Function.R | aea51b5b449a859fc5610572dd77250f6a0ec7c6 | [] | no_license | jfontestad/Stock-Strategy-Exploration | 33384a1bea0e88da393940e71ce59047c65290ab | 0baa29b392081ced6c78eed92cf371dcb8cdb1de | refs/heads/master | 2023-01-23T12:49:51.937786 | 2020-10-20T14:51:10 | 2020-10-20T14:51:10 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,427 | r | BS_Indicator_Function.R | ## Labels all Valleys and Peaks
## Back fills all Buy / Sells and calculates % return from current point to the next peak / valley
## Calculates holding time and days to the next peak / valley
## Returns Original DF appended with...
# PR (% Return to next peak or valley)
# Max (Total Holding Time in Days for Buy or Sell Period)
# Days (Days to the next valley or peak)
BS_Indicator_Function = function(DF,Column = NULL){
########################## Sample Data #######################
# DF = Combined_Results %>%
# group_by(Stock) %>%
# filter(Stock == "AMZN")
# # load(file = "//climsidfs07/RefEng/1 Ref. Engineering (SH, Scroll & IPD)/13) Analytics/Small Projects/Stocks/Data/NASDAQ Historical.RDATA")
# Column = "Adjusted"
##############################################################
require(tidyverse)
require(quantmod)
## Defining Buy Sell Indicators (Local Mins/Maxs)
DF = DF %>%
mutate(Buy = NA,
Sell = NA)
DF = as.data.frame(DF)
Peaks = findPeaks(DF[,Column])-1
Valleys = findValleys(DF[,Column])-1
DF[Peaks,"Sell"] = 1
DF[Peaks,"Buy"] = 0
DF[Valleys,"Buy"] = 1
DF[Valleys,"Sell"] = 0
DF = na.locf(DF)
## Solving Issue When No Valleys or Peaks
if(is_empty(Peaks) | is_empty(Valleys)){
DF2 = DF %>%
mutate(Buy = 1,
Max = 6,
PR = NA) %>%
select(-Sell)
return(DF2)
}
## Calculating Price Ratio (Percentage Return / Number of Days Invested)
DF2 = DF %>%
mutate(Indicator = sequence(rle(Buy)$lengths),
Max = ifelse(lead(Indicator) == 1,Indicator + 1,NA)) %>%
na.locf(fromLast = T,na.rm = F) %>%
mutate(Days = Max - Indicator,
End_Price_Buy = ifelse(Sell == 1 & Indicator == 1,
Adjusted,NA),
End_Price_Sell = ifelse(Buy == 1 & Indicator == 1,
Adjusted,NA)) %>%
na.locf(fromLast = T,na.rm = F) %>%
mutate(PR = ifelse(Buy == 1,
(End_Price_Buy-Adjusted)/(Days*Adjusted),
(End_Price_Sell-Adjusted)/(Days*Adjusted))) %>%
select(-c(Indicator,End_Price_Buy,End_Price_Sell,Sell))
DF2[is.na(DF2)] = 0
# ggplot(DF2[1:100,],aes(Date,Adjusted,color = factor(Buy))) +
# geom_point()
return(DF2)
}
|
419766eb7decdca78ed77a467d45976db359c9ae | 5372991444cf4ea29225ceae591c53bb1ed1685f | /plot4.R | 802ca4b7f3dfff407072321be12cf27fa1f31208 | [] | no_license | Scavetta/ExData_Plotting1 | 82cf8a51cb6445a9568b8e8eaebe354429c75cd8 | b4efc411bde1112a463e3e314973fd833f14cff9 | refs/heads/master | 2021-01-18T06:03:35.269995 | 2015-03-08T18:00:48 | 2015-03-08T18:00:48 | 31,856,207 | 0 | 0 | null | 2015-03-08T16:13:50 | 2015-03-08T16:13:49 | null | UTF-8 | R | false | false | 2,294 | r | plot4.R | # Project 1:
# Plot 4:
# Set your own working directory:
# setwd("~/Documents/Coursera/Data Science/ExplorData/Proj1/")
# Read in data:
df <- read.csv("household_power_consumption.txt",
stringsAsFactors=FALSE,
check.names=FALSE ,
comment.char="",
sep = ";")
# Set time and date correctly:
df$Time <- strptime(df$Time, format = "%H:%M:%S")
df$Date <- as.Date(df$Date, format = "%d/%m/%Y")
# Limit to specific range-of-interest
df1 <- df[df$Date >= "2007-02-01" & df$Date <= "2007-02-02",]
# Take one time point from next day for labelling x-axis:
df <- rbind(df1, df[df$Date == "2007-02-03",][1:2,])
rm(df1) # remove temp df.
# open graphics device
png("./ExData/ExData_Plotting1/plot4.png", height = 480, width = 480, units = "px", bg = "transparent")
# Set up plotting space.
opar <- par(mfrow = c(2,2))
# Make plot4a, time-series:
plot(as.numeric(df$Global_active_power),
type = "l",
xaxt = "n",
xlab = "",
ylab = "Global Active Power")
axis(1, c(1, nrow(df)/2, nrow(df)), format(df$Date[c(1, nrow(df)/2, nrow(df))], "%a"))
# Make plot4b, time-series:
plot(as.numeric(df$Voltage),
type = "l",
xaxt = "n",
xlab = "datetime",
ylab = "Voltage")
axis(1, c(1, nrow(df)/2, nrow(df)), format(df$Date[c(1, nrow(df)/2, nrow(df))], "%a"))
# Make plot4c, time-series:
plot(as.numeric(df$Sub_metering_1),
type = "l",
ylab = "Energy sub metering",
xaxt = "n",
xlab = "")
lines(as.numeric(df$Sub_metering_2),
col = 2)
lines(as.numeric(df$Sub_metering_3),
col = 4)
axis(1, c(1, nrow(df)/2, nrow(df)), format(df$Date[c(1, nrow(df)/2, nrow(df))], "%a"))
legend.text <- names(df[7:9])
legend(900,40, # places a legend at the appropriate place
legend.text, # puts text in the legend
bty = "n",
lty=c(1,1,1), # gives the legend appropriate symbols (lines)
col=c(1, 2, 4)) # gives the legend lines the correct color and width
# Make plot4d, time-series:
plot(as.numeric(df$Global_reactive_power),
type = "l",
xaxt = "n",
xlab = "datetime",
ylab = "Global Reactive Power")
axis(1, c(1, nrow(df)/2, nrow(df)), format(df$Date[c(1, nrow(df)/2, nrow(df))], "%a"))
par(opar)
# Close device:
dev.off()
|
b7ef150be668759c601e4583a909bf09f608f8a8 | 7de34974ddc7bb2f12246a0372f2cf8c6f7fa0c9 | /man/print.vbscript_lines.Rd | 188cce67a3485f4ce221179b51dea34ed1621351 | [
"MIT"
] | permissive | FinnishCancerRegistry/vbscript | 94add9cedf26ae2062ed99d5299100b2d4757c88 | cb35e40d3c4be48fb23636bd9c4a38ee66743460 | refs/heads/master | 2023-04-11T03:06:47.232942 | 2023-04-02T09:21:37 | 2023-04-02T09:21:37 | 218,963,246 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 678 | rd | print.vbscript_lines.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/S3.R
\name{print.vbscript_lines}
\alias{print.vbscript_lines}
\title{Print \code{vbscript_lines}}
\usage{
\method{print}{vbscript_lines}(x, max.print = 50, ...)
}
\arguments{
\item{x}{\code{[vbscript_lines]} (mandatory, no default)
a \code{vbscript_lines} object}
\item{max.print}{\code{[integer, numeric]} (mandatory, default 50)
maximum number of lines allowed to be printed; if \code{x} has more elements
than this, only the fist 10 and last 10 elements are shown in print}
\item{...}{added for compatibility with \link{print}}
}
\description{
Print method for \code{vbscript_lines} objects
}
|
09f310570674f8b4b780e5df3b20cf2b77c3e1d7 | 10cc53a735b78779c67bac5ca1c5791549eaf129 | /explore/cluster.R | bf8736b6b753dc329c752fbbafebc092fe03a3e1 | [] | no_license | nsolcampbell/tasks | 74e6e355e5803f5f5986c5ac1834126bbe1b768e | 273bbcce02ad922bed1e5c30f77b410634c966f5 | refs/heads/master | 2020-12-26T04:16:57.327358 | 2013-11-18T01:35:56 | 2013-11-18T01:35:56 | 16,030,043 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 766 | r | cluster.R | # Hierarchical clustering of worker activities
for (measure in c("importance","level")) {
pdf(paste("dendro_",measure,".pdf",sep=""), height=12, width=8)
for (file in c("skill", "ability", "knowledge", "work_activity")) {
ability <- read.csv(paste("../data/onet/tables/",file,"_",measure,".csv", sep=""))
rownames(ability) <- ability[,"Title"]
ability <- ability[,c(-1,-2)]
for (seed in 1:2) {
set.seed(seed)
subset <- sample(1:nrow(ability), size=30)
ability_sub <- ability[subset,]
distxy <- dist(ability_sub)
hClustering <- hclust(distxy, "average")
plot(hClustering, hang=0.1,
main=paste("30 O*NET occupations clustered by", file,measure),
cex.lab=0.5, cex=0.5)
mtext(paste("Randomly sampled with seed =",seed))
}
}
dev.off()
}
|
1d1feb7c04e1eb303fd0fcc60752f49ffa925840 | 0de9b10fdb64c746bbe9fc145a3885308fa5fb91 | /list10.R | ec148ff58da8d212f1e9d499d638cafddbcedec8 | [] | no_license | LuizaPinto/Progest | d2b36a24e17b2c3be40ab689d6b72f25aa302dae | d1b188997fb5264214e56501d38053c9e4e7e89e | refs/heads/master | 2020-07-11T11:40:06.064736 | 2019-11-27T18:52:48 | 2019-11-27T18:52:48 | 204,530,026 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,491 | r | list10.R | pol_exp <- function(x,n){
fat <- 1
som <- 1
for (i in 1:n){
fat <- fat*i
som <- som + (x**i)/fat
}
return(som)
}
pol_exp(2,100)
plot(exp,-4,4)
grid()
segments(x0=0,y0=0,x1=0,y1=150,lty=2)
curve(pol_exp(x,n=2),add=T,col="violet")
curve(pol_exp(x,n=3),add=T,col="red")
curve(pol_exp(x,n=4),add=T,col="blue")
curve(pol_exp(x,n=5),add=T,col="green")
#2
aproex<- function(x,incremento){
fat <- 1
aprox<- 1
for (i in 1:10){
fat <- fat*i
aprox <- aprox + (x^i)/fat
}
repeat{
i <- i+1
fat <- fat*i
parcela <- x^i/fat
if(abs(parcela)< incremento){
return(list(aprox+parcela,i))
}
aprox <- aprox + parcela
}
}
aproex(1, 0.000000000001)
######################################################################################
##PROFESSORA QUE FEZ
#3
fexp = function(x,n){
exp_x = 1
fat_i = 1
for(i in 1:n){
fat_i = fat_i*i
parcela = (x^i)/fat_i
exp_x = exp_x + parcela
}
return(exp_x)
}
fexp1 = function(x,n){
exp_x = 1
for(i in 1:n){
parcela = (x^i)/factorial(i)
exp_x = exp_x + parcela
}
return(exp_x)
}
fexp(1,10)
fexp1(1,10)
fexp(1,100000)
fexp1(1,100000)
f10_3_c = function(x,inc=0.0001){
exp_x = 1
fat_i = 1
# for(i in 1:10){
# fat_i = fat_i*i
# parcela = (x^i)/fat_i
# exp_x = exp_x + parcela
# }
# i = 11
i = 1
repeat{
fat_i = fat_i*i
parcela = (x^i)/fat_i
if(abs(parcela)<inc){
aprox = exp_x + parcela
return(list(aprox,i))
}
exp_x = exp_x + parcela
i = i + 1
}
}
f10_3_c(1,inc=0.0001)
#2.718282
f10_3_c(5,inc=0.0001)
#148.4132
f10_3_c(10,inc=0.0001)
#22026.47
#10.6
#a)
f10_6_a = function(x,inc=0.00001){
if(x >= 2)
return(-f10_6_a(1/x,inc))
aprox = 0
i = 1
repeat{
parcela = ((-1)^(i+1))*((x-1)^i)/i
aprox = aprox + parcela
if(abs(parcela)<inc){
return(aprox)
}
i = i + 1
}
}
f10_6_a(3)
#c
f10_6_c = function(x,b,inc=0.0001){
lnx = f10_6_a(x,inc)
lnb = f10_6_a(b,inc)
return(lnx/lnb)
}
f10_6_c(3,4)
log(3,base=4)
#10.7
f10_7_a = function(x,inc=0.0001){
if(x > pi)
return(f10_7_a(x-2*pi,inc))
if(x < -pi)
return(f10_7_a(x+2*pi,inc))
aprox = x
for(i in 1:10){
parcela = (((-1)^i)*(x^(2*i+1)))/factorial(2*i+1)
aprox = aprox + parcela
}
i = 11
repeat{
parcela = (((-1)^i)*(x^(2*i+1)))/factorial(2*i+1)
if(abs(parcela)<inc)
return(aprox)
aprox = aprox + parcela
i = i + 1
}
}
|
9c64252a7ae2970ff66d78b606f8ad5cbe4d7c00 | 0f39db29ed2a6971d14958680344b80eaf97fef9 | /plot-iris.R | feac127ada62f7c63a7ec32d66e0fb37babfb987 | [] | no_license | luboOliko/renv-test | c3aa15d384c4a25174887906b9009ce90a68b3a7 | f912f365b3bf7f18b74725f8665cb1fd8763b247 | refs/heads/master | 2023-07-09T14:26:16.886793 | 2019-11-07T22:10:19 | 2019-11-07T22:10:19 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 365 | r | plot-iris.R | # Load packages
library(dplyr)
library(ggplot2)
# Filter for particular iris species
iris_filter <- iris %>%
filter(Species %in% c("setosa", "versicolor"))
# Plot petal by sepal width and colour by species
iris_plot <- ggplot(
iris_filter,
aes(
x = Sepal.Width,
y = Petal.Width,
colour = Species
)
) +
geom_point()
# Print plot
iris_plot
|
96574f4748761e4810419c01a5204f3aa5a78baa | c3dc08fe8319c9d71f10473d80b055ac8132530e | /challenge-136/abigail/r/ch-1.r | 851c753f4e4078a8b8083719961fd7694aceb278 | [] | no_license | southpawgeek/perlweeklychallenge-club | d4b70d9d8e4314c4dfc4cf7a60ddf457bcaa7a1e | 63fb76188e132564e50feefd2d9d5b8491568948 | refs/heads/master | 2023-01-08T19:43:56.982828 | 2022-12-26T07:13:05 | 2022-12-26T07:13:05 | 241,471,631 | 1 | 0 | null | 2020-02-18T21:30:34 | 2020-02-18T21:30:33 | null | UTF-8 | R | false | false | 1,301 | r | ch-1.r | #
# See ../README.md
#
#
# Run as: Rscript ch-1.r < input-file
#
gcd <- function (u, v) {
u_odd <- u %% 2 != 0
v_odd <- v %% 2 != 0
if (u == v | v == 0) {u}
else if ( u == 0) {v}
else if (!u_odd & !v_odd) {
bitwShiftL (gcd (bitwShiftR (u, 1), bitwShiftR (v, 1)), 1)}
else if (!u_odd & v_odd) {gcd (bitwShiftR (u, 1), v)}
else if ( u_odd & !v_odd) {gcd (u, bitwShiftR (v, 1))}
else if ( u > v) {gcd (u - v, v)}
else {gcd (v - u, u)}
}
is_power_of_n <- function (number, n) {
if (number < 1) {FALSE}
else if (number == 1) {TRUE}
else if (number %% n != 0) {FALSE}
else {is_power_of_n (number / n, n)}
}
is_power_of_2 <- function (number) {
is_power_of_n (number, 2)
}
stdin <- file ('stdin', 'r')
repeat {
line <- readLines (stdin, n = 1)
if (length (line) == 0) {
break
}
parts <- strsplit (line, " ")
m <- as.numeric (parts [[1]] [[1]])
n <- as.numeric (parts [[1]] [[2]])
if (n %% 2 == 1 | m %% 2 == 1) {
cat ("0\n")
}
else {
r <- gcd (n, m)
if (r > 1 & is_power_of_2 (r)) {
cat ("1\n")
}
else {
cat ("0\n")
}
}
}
|
305b912116c5ed56f1861621d96652b47b68326e | ccd971226aca65ae79fcba77b84493ce6a35ada9 | /scripts/wb_info/wb_pc1_sector_aid_pie.R | 1dc1c5cc3fc7c643a335c0b92e30d4485b4f111b | [] | no_license | DanRunfola/WBVFMNCC | 6697e8054e84465386d49535c762b155348c23eb | b6bf204d0268806e5d635fe8013209dabfb7afd8 | refs/heads/master | 2021-01-17T06:15:17.162684 | 2016-12-13T16:08:48 | 2016-12-13T16:08:48 | 50,121,752 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 857 | r | wb_pc1_sector_aid_pie.R |
library("rgdal")
shpfile = "/home/userz/projects/wb_vfm_work/wb_pc1/wb_pc1.geojson"
dta_Shp = readOGR(shpfile, "OGRGeoJSON")
combined_sectors <- c()
for (i in 1:length(dta_Shp@data$ad_sector_names)) {
x = unlist(strsplit(as.character(dta_Shp@data$ad_sector_names[i]), "|", fixed=TRUE))
combined_sectors <-c (combined_sectors, x)
}
unique_sectors <- unique(combined_sectors)
dollar_list <- c()
for (i in 1:length(unique_sectors)) {
sector <- unique_sectors[i]
filtered <- subset(dta_Shp@data, grepl(sector, ad_sector_names))
dollars <- sum(filtered$total_commitments)
dollar_list[sector] <- dollars
}
dlist <- dollar_list[ dollar_list >= sort(dollar_list, decreasing=TRUE)[10] ]
dlist['Other'] <- dlist['Other'] + sum(dollar_list[ dollar_list < sort(dollar_list, decreasing=TRUE)[10] ])
pie(dlist, cex=1.7)
|
85120d84bfd4ae490939da19d0a6e3c29ca4188b | bf6c5a246ee69975c2e0c46524bd18cb9f40bad8 | /R/data.R | 9e4fd109df5609923d2ed5eb8dc866797ac5d3fa | [] | no_license | hrmJ/stockholm | 39905cafc12565267ccba6ecc4283217d1c148c3 | aea4697f136162d3a8ead384acd943646f44cdd0 | refs/heads/master | 2021-06-04T21:22:35.548234 | 2020-03-05T14:11:48 | 2020-03-05T14:11:48 | 114,048,420 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 633 | r | data.R | #' Data extracted from the word files (dorogoj.docx, milyj.docx etc.)
#'
#' This data frame includes all the contexts with the adjectives and information about the contexts, e.g. the participiants of the speech situation etc.
#'
#' @format A data frame with the following structure:
#' \describe{
#' \item{adj}{the actual adjective in question: milyj, dorogoj, ljubimyj or rodnoj}
#' \item{participants}{participants of the speech situation, e.g. brat i sestra}
#' \item{year}{The publication year of the original context, NOTE: this is a bit inaccurate}
#' \item{tonaltonst}{currently just "other"}
#' ...
#' }
"bolshaja"
|
d1ba71a3f8d271c28a1cb0f41536d85ef4b64dbf | bab5400b4d93195ebca8f3162abad69cb2fdc679 | /src/scIB_stats.R | 143f07a5aea8f4289f60e90e95577b5572c5eaf4 | [] | no_license | whtns/scEiaD | c645fec7eb63b380b071a7d1a3468e257b97f73f | 2f0705236b7af9024ab881bfc3cd28bbfc29a28b | refs/heads/master | 2023-01-20T00:27:51.111694 | 2020-11-02T16:53:20 | 2020-11-02T16:53:20 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 850 | r | scIB_stats.R | # wrapper for python script to load conda
# yes this is crazy
conda_dir = Sys.getenv('SCIAD_CONDA_DIR')
git_dir = Sys.getenv('SCIAD_GIT_DIR')
library(tidyverse)
library(glue)
args <- commandArgs(trailingOnly = TRUE)
Sys.setenv(RETICULATE_PYTHON = glue('{conda_dir}/envs/scIB/bin/python') )
library(reticulate)
method = args[1]
if (method == 'CCA'){
reduction <- 'pca'
} else if (method == 'fastMNN'){
reduction <- 'mnn'
} else if (method == 'none'){
reduction <- 'pca'
} else if (method == 'combat'){
reduction <- 'pca'
} else if (method == 'liger'){
reduction <- 'iNMF'
} else if (method == 'scVI'){
reduction <- 'scVI'
} else {
print("GUESSING!")
reduction <- method
}
args[1] <- paste0('X_', tolower(reduction))
system(paste(glue('{conda_dir}/envs/scIB/bin/python {git_dir}/src/scIB_stats.py '), paste(args, collapse = ' ')))
|
f14b57a5566f79c009c506cd3c3b664e7586980d | b76f45eddafec3879a1b5668fa8fce0b82d1f702 | /IHM_2_PreAvlLink4b_Journey_Merge_and_Index.R | 9191bf3ef491f6257509e8d93e59c56cb32c5760 | [] | no_license | jktulu/ihm_transit_data_linkage | 6e3ef1fce89bac577939ab53385a47df17ed3353 | 21f1840204f74b56388a9bfdafb74ceec9818b81 | refs/heads/master | 2020-03-26T07:36:12.107894 | 2018-08-14T07:09:52 | 2018-08-14T07:09:52 | 144,662,003 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,339 | r | IHM_2_PreAvlLink4b_Journey_Merge_and_Index.R |
require(RPostgreSQL)
require(dplyr)
require(lubridate)
require(stringr)
# Establish Database Connection ------------------------------------------------
# Increasing the effective cache size may improve performance of the SQL
# interprater
dbGetQuery(con, "set effective_cache_size = '256GB'")
# query all table names from data base and subset those associated with
# pre_avl_journeys.
tabs <- dbListTables(con) %>% .[str_detect(., pattern = "pre_avl_journeys")]
# Based on the first table, create a new table to contain all pre-avl journey
# data.
dbGetQuery(con,
paste0("create table pre_avl_journeys_all as select *
from pre_avl_afc_journeys.",tabs[1]))
# Create a progress bar to monitor progress.
p <- progress_estimated(n = length(tabs))
# loop through remaing tables and insert the data into the new output table.
for (i in 2:length(tabs)) {
p$tick()$print()
dbGetQuery(con, paste0("insert into pre_avl_journeys_all select *
from pre_avl_afc_journeys.",tabs[i]))
}
# Create key indexes such that the data may be efficiently queried.
dbGetQuery(con, "create index on pre_avl_journeys_all (record_id)")
dbGetQuery(con, "create index on pre_avl_journeys_all (transaction_datetime)")
dbGetQuery(con, "create index on pre_avl_journeys_all (account)")
|
936ffe2187059b4902e6bce32b9d98110621e81d | ada1437970128526daf432e5bcdc14c3dd4435d8 | /R/infinity_constraint.R | 108b2e348acd38052056e093412a10e328b3a046 | [
"MIT",
"BSD-3-Clause"
] | permissive | thomasp85/particles | 71c7a09cc636000a188a1ae0d9b841e6d6ec37ad | b194ad3c5c4017320ae0c51103401c274397cbf3 | refs/heads/main | 2022-09-04T17:54:53.130881 | 2022-08-19T12:16:38 | 2022-08-19T12:16:38 | 92,283,359 | 127 | 9 | NOASSERTION | 2022-08-19T06:29:35 | 2017-05-24T11:07:43 | R | UTF-8 | R | false | false | 2,788 | r | infinity_constraint.R | #' Reposition particles outside a canvas so they wrap around
#'
#' This constraint keeps particles inside of a defined area by positioning
#' exiting particles on the other side of the area. In effect this makes
#' particles that moves outside the upper bound reenter at the lower bound and
#' vice versa.
#'
#' @section Training parameters:
#' The following parameters defines the training of the constraint and can be
#' passed along a call to [impose()]
#'
#' - `xlim` : The left and right bound of the area
#' - `ylim` : The upper and lower bound of the area
#'
#' @family constraints
#' @usage NULL
#' @format NULL
#' @export
infinity_constraint <- structure(list(
xmin = NULL,
xmax = NULL,
ymin = NULL,
ymax = NULL
), class = c('infinity_constraint', 'constraint'))
#' @export
print.infinity_constraint <- function(x, ...) {
cat('Infinity Constraint:\n')
cat('* A constraint forces particles to be inside a canvas by wrapping them around it.\n')
}
#' @export
train_constraint.infinity_constraint <- function(constraint, particles, xlim = c(-5, 5), ylim = xlim, ...) {
constraint <- NextMethod()
constraint$xmin <- xlim[1]
constraint$xmax <- xlim[2]
constraint$ymin <- ylim[1]
constraint$ymax <- ylim[2]
constraint
}
#' @importFrom rlang quos
#' @importFrom digest digest
#' @export
retrain_constraint.infinity_constraint <- function(constraint, particles, ...) {
dots <- quos(...)
particle_hash <- digest(particles)
new_particles <- particle_hash != constraint$particle_hash
constraint$particle_hash <- particle_hash
nodes <- as_tibble(particles, active = 'nodes')
constraint <- update_quo(constraint, 'include', dots, nodes, new_particles, TRUE)
if ('xlim' %in% names(dots)) {
xlim <- eval_tidy(dots$xlim)
constraint$xmin <- xlim[1]
constraint$xmax <- xlim[2]
}
if ('ylim' %in% names(dots)) {
ylim <- eval_tidy(dots$ylim)
constraint$ymin <- ylim[1]
constraint$ymax <- ylim[2]
}
constraint
}
#' @export
apply_constraint.infinity_constraint <- function(constraint, particles, pos, vel, alpha, ...) {
next_pos <- pos + vel
l <- next_pos[, 1] < constraint$xmin
r <- next_pos[, 1] > constraint$xmax
b <- next_pos[, 2] < constraint$ymin
t <- next_pos[, 2] > constraint$ymax
next_pos[l, 1] <- constraint$xmax - (abs(constraint$xmin - next_pos[l, 1]) %% (constraint$xmax - constraint$xmin))
next_pos[r, 1] <- constraint$xmin + (abs(constraint$xmax - next_pos[r, 1]) %% (constraint$xmax - constraint$xmin))
next_pos[b, 2] <- constraint$ymax - (abs(constraint$ymin - next_pos[b, 2]) %% (constraint$ymax - constraint$ymin))
next_pos[t, 2] <- constraint$ymin + (abs(constraint$ymax - next_pos[t, 2]) %% (constraint$ymax - constraint$ymin))
pos <- next_pos - vel
list(position = pos, velocity = vel)
}
|
fb40a0ec90fec331933af60d7c6f0e0c9c6cb30c | 34a2cf96cb59b70ab17619fd36890d0e4aebd6bf | /Rexercise3.R | d55de5418acb4222011cb0b36c2f043f2d2f15c6 | [] | no_license | as410tro/masters-epidemiology-r-course | df0ba4c8e5dc9bc1dad6345920028ed0fd6ed3c3 | 171f3860fd42f9bbf9b785b9a79856f5d440a4e2 | refs/heads/master | 2023-01-21T09:26:20.880056 | 2020-12-04T20:26:19 | 2020-12-04T20:26:19 | 318,618,673 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,081 | r | Rexercise3.R | load(file = "dat.Rdata")
attach(dat)
# Use a statistical test to see if there is a significant difference in height between men and women.
# before doing a t-test, check if the variables are normally distributed
qqnorm(height[male==F])
qqline(height[male==F], col="red", lwd=2)
qqnorm(height[male==T])
qqline(height[male==T], col="red", lwd=2) ## Seems normally distributed
# H0: no difference btwn men & women, H1: there is a difference btwn men & women
t.test(height~male) ## Welch Two Sample t-test for both genders
t.test(height[male==T]) ## Or do One Sample t-test for each gender
t.test(height[male==F]) ## both CIs do not overlap. Therefore, the height of men and women are 'significantly' different.
# Test if the variance in height is different between men and women.
var.test(height~male) ## CI includes 1, so the varience is not significantly different in both groups
# a small difference in blood mercury levels between men & women. Difference statistically significant?
qqnorm(hg)
qqline(hg, col="pink", lwd=2) ## hg distribution is right skewed
qqnorm(log(hg))
qqline(log(hg), col="pink", lwd=2, lty=2) ## log(hg) seems almost normally distributed following the normal quantiles
# except for the values on the left that look problematic. We will use the t-test
log_hg <- log(hg)
t.test(log_hg~male) ## CI of difference=[-0.08, 0.04] contains "0." Hg level is not sig. diff btwn both genders.
# Is HDL significantly different between people who have been diagnosed with & without overweight?
# If so, what is the difference you expect? Test your hypothesis.
qqnorm(hdl)
qqline(hdl, col="yellow", lwd=2) ## HDL is not normally distributed
qqnorm(logHDL)
qqline(logHDL, col="yellow", lwd=2) ## log(HDL) is normally distributed
t.test(logHDL~ovrwght_ever) ## CI of difference=[0.09,0.12] does not contain 0 & p<0.05, sig. diff
### When reporting the result should I exp the CI?
# if your hypothesis is that HDL is lower in overweight people, do a one-sided t-test:
t.test(logHDL[ovrwght_ever==T], alternative="greater") ## H0=no difference btwn overweight/not
## H1=true mean difference greater than 0
## CI=[0.25,inf]
t.test(logHDL~ovrwght_ever, alternative="g")
# Is there sig. diff. in HDL btwn diabetics(3) vs non-diabetics(1)?
t.test(log(hdl)[diab_lft==1], log(hdl)[diab_lft==3]) ## nond-diab(non has higher HDL) CI=[0.09,0.13] does not contain 0, sig. diff
# Consider HDL of diabetics vs non-diabetics among ppl who have NEVER been diagnosed with overweight.
t.test(log(hdl)[diab_lft==1 & ovrwght_ever==F],
log(hdl)[diab_lft==3 & ovrwght_ever==F]) ## nond-diab CI=[0.04,0.11] not contain 0, still sig diff btwn diab & non-diab
# Does the result change? Interpret the results from both tests.
## no,
# Do you expect other numerical variables in your data set to be different btwn certain groups?
# Test your hypotheses and interpret your findings. |
1d5bf2b2de9992b5fa08ced317b6034986a347b2 | ba7560654f73b5aceffccb90651e66d75f933e8d | /plot3.R | 3d0a0e89444e81d24b34181b9726681816496095 | [] | no_license | ZhaoZ-2020/ExData_Plotting1 | 286defd800686e3fe3e541796c341928816d3456 | 36b03b2a574e33d1a1f828dba4cb709febec4abf | refs/heads/master | 2022-11-12T21:20:38.489620 | 2020-07-02T03:10:46 | 2020-07-02T03:10:46 | 276,325,014 | 0 | 0 | null | 2020-07-01T08:44:57 | 2020-07-01T08:44:56 | null | UTF-8 | R | false | false | 1,164 | r | plot3.R |
## Read in the data (with the txt file in the working directory)
## for Dates 1/2/2007 and 2/2/2007 only
tempdata <- read.table("household_power_consumption.txt", header=T, sep=";", nrows = 5)
colna<-colnames(tempdata)
classes <- sapply(tempdata, class)
data<-read.table("household_power_consumption.txt", sep=";", col.names=colna,
colClasses=classes, skip=66637, nrows=2880)
## Create a new variable contains the date and time,
## and change its class to Date
data$DT <- paste(data$Date, data$Time)
data$DT[1:5]
data$DT <- strptime(data$DT, "%d/%m/%Y %H:%M:%S")
data$DT[1:5]
class(data$DT)
## Construct the 3rd plot and
## save it to a PNG file with a width of 480 pixels and a height of 480 pixels.
png(file="plot3.png", width=480, height=480)
with(data, plot(DT, Sub_metering_1, type="n",
xlab="", ylab="Energy sub metering"))
with(data, lines(DT, Sub_metering_1, col="black"))
with(data, lines(DT, Sub_metering_2, col="red"))
with(data, lines(DT, Sub_metering_3, col="blue"))
legend("topright",lty=1,col=c("black","red","blue"),
legend=c("Sub_metering_1", "Sub_metering_2","Sub_metering_3"))
dev.off()
|
3b9542e294beb9215cec246e3c8b48983440d6b4 | 227e3fb88aebf0d26545caed0c7145b34d6414dc | /3_visualize.R | db2d6f210f49146731881faa66b8bdf975f51ced | [] | no_license | SimonTopp/ds-pipelines-targets-2 | f098f54cee0f094d44bbd13f51c4e1a4f749e1a1 | f7629c903e702dfa1b6e88dbfd595896dad25080 | refs/heads/main | 2023-05-08T01:37:13.425981 | 2021-05-24T16:36:17 | 2021-05-24T16:36:17 | 369,594,754 | 0 | 0 | null | 2021-05-24T16:36:18 | 2021-05-21T16:30:50 | R | UTF-8 | R | false | false | 235 | r | 3_visualize.R | source("3_visualize/src/plot_timeseries.R")
p3_targets_list <- list(
tar_target(
p3_figure_1_temp_ts_png,
plot_nwis_timeseries(fileout = "3_visualize/out/figure_1_temp_ts.png", p2_site_data_munged),
format = "file"
)
) |
21df267fd9e134c436ff61022db72ffa2c1d8722 | 1558b17d843ee5421f91069d7d5bb1a8d8852950 | /code_Base/models/reuse_functions.R | e7c1071c94c2551730252f3d979c04890f996bd2 | [] | no_license | anish-singh-07/DataScienceR | 1a229d6d31e8454d8301eafaa52a2e12eed7b673 | 7b76b20d5ab6d5b3b359007e9ef4cb8896517a22 | refs/heads/main | 2023-06-16T12:55:19.338903 | 2021-07-13T09:15:18 | 2021-07-13T09:15:18 | 375,276,755 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,250 | r | reuse_functions.R | #a function that will return the evaluation metric
evaluation_Metric <- function(target, predicted, modelName) {
conf_matrix <- as.matrix(table(Actual = target, Predicted = predicted))
TP = conf_matrix[4]
TN = conf_matrix[1]
FP = conf_matrix[3]
FN = conf_matrix[2]
accuracy = (TP + TN)/(TP + TN + FP +FN)
precision = TP/(TP + FP)
recall = TP/(TP + FN)
f1 = 2 * ((precision * recall) / (precision + recall))
model_eval_df <- data.frame(matrix(ncol=5,nrow=0, dimnames=list(NULL, c("Model", "Accuracy", "Precision", "Recall", "F1_Score"))))
model_eval_df[nrow(model_eval_df) + 1,] = c(modelName, round(accuracy, 2), round(precision, 2), round(recall, 2), round(f1, 2))
return(model_eval_df)
}
#a function that will print the accuracy by comparing authors
return_accuracy <- function(predicted_data, modelName) {
predicted_Target_df <- data.frame(matrix(ncol=1,nrow=0, dimnames=list(NULL, c("Predicted_Target"))))
#loop over through he predicted targets
for ( currVal in predicted_data){
predicted_Target_df[nrow(predicted_Target_df) + 1,] = c(currVal)
}
model_results_df <- data.frame(matrix(ncol=3,nrow=0, dimnames=list(NULL, c("Author_Id", "Original", "Predicted"))))
countZeros = 0
countOnes = 0
#loop over and get original, predicted authors targets
for (row in 1:nrow(predicted_Target_df)) {
if(predicted_Target_df[row, c(1)] == 0){
countZeros = countZeros + 1
} else {
countOnes = countOnes + 1
}
if(row+1 == nrow(data_test)){
} else if(row == nrow(data_test)){
model_results_df[nrow(model_results_df) + 1,] = c(nrow(model_results_df) + 1, data_test[row, c(37)], if(countOnes >= countZeros) 1 else 0)
countZeros = 0
countOnes = 0
} else {
if(data_test[row, c(2)] != data_test[row+1, c(2)]){
model_results_df[nrow(model_results_df) + 1,] = c(nrow(model_results_df) + 1, data_test[row, c(37)], if(countOnes >= countZeros) 1 else 0)
countZeros = 0
countOnes = 0
}
}
}
#print(paste('Confusion Matrix based on authors for ', modelName, " Model: "))
eval_df <- evaluation_Metric(model_results_df$Original, model_results_df$Predicted, modelName)
return(eval_df)
} |
ff585eb3c2f507ff3054ea75b6021ff17fc5b860 | fb3f24a4af741e73144fc473c3dc317a0eb87cf0 | /code/analysis/run_analysis.R | 313c31f884732b55c02af42ea5bdffac8963de23 | [] | no_license | littlecanargie/CtTrajectories | 26893d318dccba3e32190aa01c0933850e939f75 | 7c359a8a175022544124a504d7e899cbf28e67f0 | refs/heads/main | 2023-02-07T19:38:54.423836 | 2020-12-31T10:10:47 | 2020-12-31T10:10:47 | 325,772,241 | 0 | 0 | null | 2020-12-31T10:10:30 | 2020-12-31T10:10:30 | null | UTF-8 | R | false | false | 1,476 | r | run_analysis.R | # source('code/analysis/run_analysis.R')
# Import:
library(tidyverse)
source('code/utilities/utils_analysis.R')
# Generate list of parameter sets for sensitivity:
source('code/analysis/make_masterpars.R')
final_fitlist <- list()
# Read data:
source("code/data_parsing/parse_Ct_data.R")
# Generate essential values:
source("code/analysis/set_global_pars.R")
# Generate and save the posteriors:
for(parset in 1:length(masterpars)){ #length(masterpars)
print(paste0("STARTING PARSET ",parset," OF ",length(masterpars)))
# set parameters:
current_pars <- masterpars[[parset]]
# Refine data:
source('code/analysis/refine_data.R')
# Fit posteriors:
if(current_pars[["parametrization"]]=="waittime"){
source('code/analysis/fit_posteriors_preamble.R')
source('code/analysis/fit_posteriors.R')
source('code/analysis/make_figures.R')
source('code/analysis/save_figures.R')
source('code/analysis/save_figures_png.R')
} else if(current_pars[["parametrization"]]=="slope"){
source('code/analysis/fit_posteriors_preamble_slope.R')
source('code/analysis/fit_posteriors_slope.R')
source('code/analysis/make_figures_slope.R')
source('code/analysis/save_figures_slope.R')
source('code/analysis/save_figures_png_slope.R')
}
# # Save the figures
final_fitlist[[parset]] <- ct_fit
}
# save(final_fitlist, file="output/final_fitlist.RData")
source('code/analysis/report_results.R')
# launch_shinystan_nonblocking(final_fitlist[[1]]) |
4b89c5f49714bdfc494b9308449845ef6c0d110c | b36107d33c5adfe529f84b4ae060df1f7cd38217 | /cachematrix.R | e2a01254b340c4e2978bba2ae8c8e13a344a7c0c | [] | no_license | rkrishnan53/ProgrammingAssignment2 | 1080f3268cef0d6c2c4ff4ab82c4f98edf6531a1 | e8284fe6d2c33c82607388d2916a556037260b4d | refs/heads/master | 2021-01-16T21:01:49.727606 | 2015-03-19T00:31:51 | 2015-03-19T00:31:51 | 32,426,302 | 0 | 0 | null | 2015-03-17T23:25:44 | 2015-03-17T23:25:44 | null | UTF-8 | R | false | false | 1,269 | r | cachematrix.R | ##makeCacheMatrix: Create a cache space in the global environment to hold a matrix and it's inverse. Preset both with NULL.makeCacheMatrix returns a list which includes two intermediate objects set and get but also the original matrix and it's inverse
##cacheSolve: Checks if the inverse exists (i.e. non-NULL).
##If NULL, it computes the inverse using solve and caches it for future iterations.
##If non-NULL, cacheSolve retrieves the inverse from the cache
##
makeCacheMatrix<-function(x=matrix()){
m<-NULL ##preset m (inverse) to NULL
set<-function(y){ ##define function that...
x<<-y ##caches orig matrix ....
m<<-NULL ##and the inverse
}
get<-function() x ##define function that creates a list...
setmatrix<-function(solve) m<<-solve ##with the cached matrix and inverse
getmatrix<-function() m
list(set=set,get=get,setmatrix=setmatrix,getmatrix=getmatrix)
}
cacheSolve<-function(x=matrix(),...){ ##create function to...
m<-x$getmatrix()
if(!is.null(m)){ ##get inverse from cache IF NON-NULL
message("getting cached data")
return(m)
}
matrix<-x$get() ##or, if Inverse NULL (first time)
m<-solve(matrix,...) ##...then compute inverse using solve()
x$setmatrix(m)
m ##..and return inverse
} |
0f5fd48deb94045faccff6dec6593f5422713e6f | df6dc09aca37d6cd616436fe4ee1021677cebd37 | /R/addGlcTrns.R | e6249a98300137151783732606b2b963ec561055 | [] | no_license | cran/sybilccFBA | 9eadbbcc6bf0cd550dd4cc003a106b742b9764cd | 4c7c379f9407323f9d6963b3315487146cb5321e | refs/heads/master | 2020-05-17T11:07:00.514539 | 2019-12-15T14:20:05 | 2019-12-15T14:20:05 | 18,368,868 | 1 | 2 | null | null | null | null | UTF-8 | R | false | false | 1,667 | r | addGlcTrns.R | addGlcTrns<-function(prob,mod2){
Si=0.2# units?
Hxt1 =41*Si/(Si+107)#Kcat*Si/(Si+Km)
Hxt2 = 16.1*Si/(Si+2.9)
Hxt3 = 18.5*Si/(Si+29)
Hxt4 =12*Si/(Si+6.2)
Hxt5 = 14*Si/(Si+10)
Hxt6 =11.4*Si/(Si+1.5)
Hxt7 =11.7*Si/(Si+1.3)
Gal2 =17.5*Si/(Si+1.5)
#Gal2 in existence of Galactose only
colid=getNumCols(lp = problem(prob))+1
trnsCol=NULL
# for(i in (1:7)){
# trnsCol=rbind(trnsCol,cbind(trns=i,Col=colid))
# addCols(lp = problem(prob),1)
# changeColsBnds(lp = problem(prob),colid,lb=0,ub=1000)
# colid=colid+1;
# }
#Hxt4 YHR092C
#Hxt1 YHR094C
#Hxt2 YMR011W
#( YHR092C or YLR081W or YOL156W or YDR536W or YHR094C or YEL069C or YDL245C or YJR158W or YFL011W or YNR072W or YMR011W or YDR345C or YHR096C or YDR343C or YDR342C or YJL214W or YJL219W )
#( Hxt4 ) or ( Gal2 ) or ( Hxt11 ) or ( Stl1 ) or ( Hxt1 ) or ( Hxt13 ) or ( Hxt15 ) or ( Hxt16 ) or ( Hxt10 ) or ( Hxt17 ) or ( Hxt2 ) or ( Hxt3 ) or ( Hxt5 ) or ( Hxt6 ) or ( Hxt7 ) or ( Hxt8 ) or ( Hxt9 )"
#Add constraint
rowind=getNumRows(lp = problem(prob))+1
#glcRxn=which(react_id(mod2)=='R_EX_glc_e__b')
glcRxn=which(react_id(mod2)=="R_GLCt1")
addRowsToProb(lp = problem(prob),
i = rowind , type = "U",
lb = 0, ub = 0,
cind = list(c(trnsCol[1,"Col"],trnsCol[2,"Col"],trnsCol[3,"Col"],trnsCol[4,"Col"],trnsCol[5,"Col"],trnsCol[6,"Col"],
trnsCol[7,"Col"],glcRxn)),
nzval = list(c(-Hxt1,-Hxt2,-Hxt3,-Hxt4,-Hxt5,-Hxt6,-Hxt7,1))
,rnames = "glcTrns"
)
#Add to crowding constraint (same budget), Molecular weights required?
return(prob)
}
|
91451adb3ab96b49760e5deb720c444279a53d63 | 7917fc0a7108a994bf39359385fb5728d189c182 | /paws/R/fms_interfaces.R | dd009cf7f92d46ce818dcc2b32569db78683ea15 | [
"Apache-2.0"
] | permissive | TWarczak/paws | b59300a5c41e374542a80aba223f84e1e2538bec | e70532e3e245286452e97e3286b5decce5c4eb90 | refs/heads/main | 2023-07-06T21:51:31.572720 | 2021-08-06T02:08:53 | 2021-08-06T02:08:53 | 396,131,582 | 1 | 0 | NOASSERTION | 2021-08-14T21:11:04 | 2021-08-14T21:11:04 | null | UTF-8 | R | false | false | 33,198 | r | fms_interfaces.R | # This file is generated by make.paws. Please do not edit here.
#' @importFrom paws.common populate
#' @include fms_service.R
NULL
.fms$associate_admin_account_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(AdminAccount = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$associate_admin_account_output <- function(...) {
list()
}
.fms$delete_apps_list_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ListId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$delete_apps_list_output <- function(...) {
list()
}
.fms$delete_notification_channel_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$delete_notification_channel_output <- function(...) {
list()
}
.fms$delete_policy_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(PolicyId = structure(logical(0), tags = list(type = "string")), DeleteAllPolicyResources = structure(logical(0), tags = list(type = "boolean"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$delete_policy_output <- function(...) {
list()
}
.fms$delete_protocols_list_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ListId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$delete_protocols_list_output <- function(...) {
list()
}
.fms$disassociate_admin_account_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$disassociate_admin_account_output <- function(...) {
list()
}
.fms$get_admin_account_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$get_admin_account_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(AdminAccount = structure(logical(0), tags = list(type = "string")), RoleStatus = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$get_apps_list_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ListId = structure(logical(0), tags = list(type = "string")), DefaultList = structure(logical(0), tags = list(type = "boolean"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$get_apps_list_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(AppsList = structure(list(ListId = structure(logical(0), tags = list(type = "string")), ListName = structure(logical(0), tags = list(type = "string")), ListUpdateToken = structure(logical(0), tags = list(type = "string")), CreateTime = structure(logical(0), tags = list(type = "timestamp")), LastUpdateTime = structure(logical(0), tags = list(type = "timestamp")), AppsList = structure(list(structure(list(AppName = structure(logical(0), tags = list(type = "string")), Protocol = structure(logical(0), tags = list(type = "string")), Port = structure(logical(0), tags = list(type = "long"))), tags = list(type = "structure"))), tags = list(type = "list")), PreviousAppsList = structure(list(structure(list(structure(list(AppName = structure(logical(0), tags = list(type = "string")), Protocol = structure(logical(0), tags = list(type = "string")), Port = structure(logical(0), tags = list(type = "long"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "map"))), tags = list(type = "structure")), AppsListArn = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$get_compliance_detail_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(PolicyId = structure(logical(0), tags = list(type = "string")), MemberAccount = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$get_compliance_detail_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(PolicyComplianceDetail = structure(list(PolicyOwner = structure(logical(0), tags = list(type = "string")), PolicyId = structure(logical(0), tags = list(type = "string")), MemberAccount = structure(logical(0), tags = list(type = "string")), Violators = structure(list(structure(list(ResourceId = structure(logical(0), tags = list(type = "string")), ViolationReason = structure(logical(0), tags = list(type = "string")), ResourceType = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), EvaluationLimitExceeded = structure(logical(0), tags = list(type = "boolean")), ExpiredAt = structure(logical(0), tags = list(type = "timestamp")), IssueInfoMap = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$get_notification_channel_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$get_notification_channel_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(SnsTopicArn = structure(logical(0), tags = list(type = "string")), SnsRoleName = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$get_policy_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(PolicyId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$get_policy_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Policy = structure(list(PolicyId = structure(logical(0), tags = list(type = "string")), PolicyName = structure(logical(0), tags = list(type = "string")), PolicyUpdateToken = structure(logical(0), tags = list(type = "string")), SecurityServicePolicyData = structure(list(Type = structure(logical(0), tags = list(type = "string")), ManagedServiceData = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), ResourceType = structure(logical(0), tags = list(type = "string")), ResourceTypeList = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), ResourceTags = structure(list(structure(list(Key = structure(logical(0), tags = list(type = "string")), Value = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), ExcludeResourceTags = structure(logical(0), tags = list(type = "boolean")), RemediationEnabled = structure(logical(0), tags = list(type = "boolean")), IncludeMap = structure(list(structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "map")), ExcludeMap = structure(list(structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "map"))), tags = list(type = "structure")), PolicyArn = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$get_protection_status_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(PolicyId = structure(logical(0), tags = list(type = "string")), MemberAccountId = structure(logical(0), tags = list(type = "string")), StartTime = structure(logical(0), tags = list(type = "timestamp")), EndTime = structure(logical(0), tags = list(type = "timestamp")), NextToken = structure(logical(0), tags = list(type = "string")), MaxResults = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$get_protection_status_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(AdminAccountId = structure(logical(0), tags = list(type = "string")), ServiceType = structure(logical(0), tags = list(type = "string")), Data = structure(logical(0), tags = list(type = "string")), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$get_protocols_list_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ListId = structure(logical(0), tags = list(type = "string")), DefaultList = structure(logical(0), tags = list(type = "boolean"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$get_protocols_list_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ProtocolsList = structure(list(ListId = structure(logical(0), tags = list(type = "string")), ListName = structure(logical(0), tags = list(type = "string")), ListUpdateToken = structure(logical(0), tags = list(type = "string")), CreateTime = structure(logical(0), tags = list(type = "timestamp")), LastUpdateTime = structure(logical(0), tags = list(type = "timestamp")), ProtocolsList = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), PreviousProtocolsList = structure(list(structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "map"))), tags = list(type = "structure")), ProtocolsListArn = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$get_violation_details_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(PolicyId = structure(logical(0), tags = list(type = "string")), MemberAccount = structure(logical(0), tags = list(type = "string")), ResourceId = structure(logical(0), tags = list(type = "string")), ResourceType = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$get_violation_details_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ViolationDetail = structure(list(PolicyId = structure(logical(0), tags = list(type = "string")), MemberAccount = structure(logical(0), tags = list(type = "string")), ResourceId = structure(logical(0), tags = list(type = "string")), ResourceType = structure(logical(0), tags = list(type = "string")), ResourceViolations = structure(list(structure(list(AwsVPCSecurityGroupViolation = structure(list(ViolationTarget = structure(logical(0), tags = list(type = "string")), ViolationTargetDescription = structure(logical(0), tags = list(type = "string")), PartialMatches = structure(list(structure(list(Reference = structure(logical(0), tags = list(type = "string")), TargetViolationReasons = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "list")), PossibleSecurityGroupRemediationActions = structure(list(structure(list(RemediationActionType = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), RemediationResult = structure(list(IPV4Range = structure(logical(0), tags = list(type = "string")), IPV6Range = structure(logical(0), tags = list(type = "string")), PrefixListId = structure(logical(0), tags = list(type = "string")), Protocol = structure(logical(0), tags = list(type = "string")), FromPort = structure(logical(0), tags = list(type = "long")), ToPort = structure(logical(0), tags = list(type = "long"))), tags = list(type = "structure")), IsDefaultAction = structure(logical(0), tags = list(type = "boolean"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure")), AwsEc2NetworkInterfaceViolation = structure(list(ViolationTarget = structure(logical(0), tags = list(type = "string")), ViolatingSecurityGroups = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure")), AwsEc2InstanceViolation = structure(list(ViolationTarget = structure(logical(0), tags = list(type = "string")), AwsEc2NetworkInterfaceViolations = structure(list(structure(list(ViolationTarget = structure(logical(0), tags = list(type = "string")), ViolatingSecurityGroups = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure")), NetworkFirewallMissingFirewallViolation = structure(list(ViolationTarget = structure(logical(0), tags = list(type = "string")), VPC = structure(logical(0), tags = list(type = "string")), AvailabilityZone = structure(logical(0), tags = list(type = "string")), TargetViolationReason = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), NetworkFirewallMissingSubnetViolation = structure(list(ViolationTarget = structure(logical(0), tags = list(type = "string")), VPC = structure(logical(0), tags = list(type = "string")), AvailabilityZone = structure(logical(0), tags = list(type = "string")), TargetViolationReason = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), NetworkFirewallMissingExpectedRTViolation = structure(list(ViolationTarget = structure(logical(0), tags = list(type = "string")), VPC = structure(logical(0), tags = list(type = "string")), AvailabilityZone = structure(logical(0), tags = list(type = "string")), CurrentRouteTable = structure(logical(0), tags = list(type = "string")), ExpectedRouteTable = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), NetworkFirewallPolicyModifiedViolation = structure(list(ViolationTarget = structure(logical(0), tags = list(type = "string")), CurrentPolicyDescription = structure(list(StatelessRuleGroups = structure(list(structure(list(RuleGroupName = structure(logical(0), tags = list(type = "string")), ResourceId = structure(logical(0), tags = list(type = "string")), Priority = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure"))), tags = list(type = "list")), StatelessDefaultActions = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), StatelessFragmentDefaultActions = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), StatelessCustomActions = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), StatefulRuleGroups = structure(list(structure(list(RuleGroupName = structure(logical(0), tags = list(type = "string")), ResourceId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure")), ExpectedPolicyDescription = structure(list(StatelessRuleGroups = structure(list(structure(list(RuleGroupName = structure(logical(0), tags = list(type = "string")), ResourceId = structure(logical(0), tags = list(type = "string")), Priority = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure"))), tags = list(type = "list")), StatelessDefaultActions = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), StatelessFragmentDefaultActions = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), StatelessCustomActions = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), StatefulRuleGroups = structure(list(structure(list(RuleGroupName = structure(logical(0), tags = list(type = "string")), ResourceId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "list")), ResourceTags = structure(list(structure(list(Key = structure(logical(0), tags = list(type = "string")), Value = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), ResourceDescription = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$list_apps_lists_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(DefaultLists = structure(logical(0), tags = list(type = "boolean")), NextToken = structure(logical(0), tags = list(type = "string")), MaxResults = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$list_apps_lists_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(AppsLists = structure(list(structure(list(ListArn = structure(logical(0), tags = list(type = "string")), ListId = structure(logical(0), tags = list(type = "string")), ListName = structure(logical(0), tags = list(type = "string")), AppsList = structure(list(structure(list(AppName = structure(logical(0), tags = list(type = "string")), Protocol = structure(logical(0), tags = list(type = "string")), Port = structure(logical(0), tags = list(type = "long"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "list")), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$list_compliance_status_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(PolicyId = structure(logical(0), tags = list(type = "string")), NextToken = structure(logical(0), tags = list(type = "string")), MaxResults = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$list_compliance_status_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(PolicyComplianceStatusList = structure(list(structure(list(PolicyOwner = structure(logical(0), tags = list(type = "string")), PolicyId = structure(logical(0), tags = list(type = "string")), PolicyName = structure(logical(0), tags = list(type = "string")), MemberAccount = structure(logical(0), tags = list(type = "string")), EvaluationResults = structure(list(structure(list(ComplianceStatus = structure(logical(0), tags = list(type = "string")), ViolatorCount = structure(logical(0), tags = list(type = "long")), EvaluationLimitExceeded = structure(logical(0), tags = list(type = "boolean"))), tags = list(type = "structure"))), tags = list(type = "list")), LastUpdated = structure(logical(0), tags = list(type = "timestamp")), IssueInfoMap = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure"))), tags = list(type = "list")), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$list_member_accounts_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(NextToken = structure(logical(0), tags = list(type = "string")), MaxResults = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$list_member_accounts_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(MemberAccounts = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$list_policies_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(NextToken = structure(logical(0), tags = list(type = "string")), MaxResults = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$list_policies_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(PolicyList = structure(list(structure(list(PolicyArn = structure(logical(0), tags = list(type = "string")), PolicyId = structure(logical(0), tags = list(type = "string")), PolicyName = structure(logical(0), tags = list(type = "string")), ResourceType = structure(logical(0), tags = list(type = "string")), SecurityServiceType = structure(logical(0), tags = list(type = "string")), RemediationEnabled = structure(logical(0), tags = list(type = "boolean"))), tags = list(type = "structure"))), tags = list(type = "list")), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$list_protocols_lists_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(DefaultLists = structure(logical(0), tags = list(type = "boolean")), NextToken = structure(logical(0), tags = list(type = "string")), MaxResults = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$list_protocols_lists_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ProtocolsLists = structure(list(structure(list(ListArn = structure(logical(0), tags = list(type = "string")), ListId = structure(logical(0), tags = list(type = "string")), ListName = structure(logical(0), tags = list(type = "string")), ProtocolsList = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "list")), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$list_tags_for_resource_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ResourceArn = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$list_tags_for_resource_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(TagList = structure(list(structure(list(Key = structure(logical(0), tags = list(type = "string")), Value = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$put_apps_list_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(AppsList = structure(list(ListId = structure(logical(0), tags = list(type = "string")), ListName = structure(logical(0), tags = list(type = "string")), ListUpdateToken = structure(logical(0), tags = list(type = "string")), CreateTime = structure(logical(0), tags = list(type = "timestamp")), LastUpdateTime = structure(logical(0), tags = list(type = "timestamp")), AppsList = structure(list(structure(list(AppName = structure(logical(0), tags = list(type = "string")), Protocol = structure(logical(0), tags = list(type = "string")), Port = structure(logical(0), tags = list(type = "long"))), tags = list(type = "structure"))), tags = list(type = "list")), PreviousAppsList = structure(list(structure(list(structure(list(AppName = structure(logical(0), tags = list(type = "string")), Protocol = structure(logical(0), tags = list(type = "string")), Port = structure(logical(0), tags = list(type = "long"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "map"))), tags = list(type = "structure")), TagList = structure(list(structure(list(Key = structure(logical(0), tags = list(type = "string")), Value = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$put_apps_list_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(AppsList = structure(list(ListId = structure(logical(0), tags = list(type = "string")), ListName = structure(logical(0), tags = list(type = "string")), ListUpdateToken = structure(logical(0), tags = list(type = "string")), CreateTime = structure(logical(0), tags = list(type = "timestamp")), LastUpdateTime = structure(logical(0), tags = list(type = "timestamp")), AppsList = structure(list(structure(list(AppName = structure(logical(0), tags = list(type = "string")), Protocol = structure(logical(0), tags = list(type = "string")), Port = structure(logical(0), tags = list(type = "long"))), tags = list(type = "structure"))), tags = list(type = "list")), PreviousAppsList = structure(list(structure(list(structure(list(AppName = structure(logical(0), tags = list(type = "string")), Protocol = structure(logical(0), tags = list(type = "string")), Port = structure(logical(0), tags = list(type = "long"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "map"))), tags = list(type = "structure")), AppsListArn = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$put_notification_channel_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(SnsTopicArn = structure(logical(0), tags = list(type = "string")), SnsRoleName = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$put_notification_channel_output <- function(...) {
list()
}
.fms$put_policy_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Policy = structure(list(PolicyId = structure(logical(0), tags = list(type = "string")), PolicyName = structure(logical(0), tags = list(type = "string")), PolicyUpdateToken = structure(logical(0), tags = list(type = "string")), SecurityServicePolicyData = structure(list(Type = structure(logical(0), tags = list(type = "string")), ManagedServiceData = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), ResourceType = structure(logical(0), tags = list(type = "string")), ResourceTypeList = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), ResourceTags = structure(list(structure(list(Key = structure(logical(0), tags = list(type = "string")), Value = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), ExcludeResourceTags = structure(logical(0), tags = list(type = "boolean")), RemediationEnabled = structure(logical(0), tags = list(type = "boolean")), IncludeMap = structure(list(structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "map")), ExcludeMap = structure(list(structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "map"))), tags = list(type = "structure")), TagList = structure(list(structure(list(Key = structure(logical(0), tags = list(type = "string")), Value = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$put_policy_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Policy = structure(list(PolicyId = structure(logical(0), tags = list(type = "string")), PolicyName = structure(logical(0), tags = list(type = "string")), PolicyUpdateToken = structure(logical(0), tags = list(type = "string")), SecurityServicePolicyData = structure(list(Type = structure(logical(0), tags = list(type = "string")), ManagedServiceData = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), ResourceType = structure(logical(0), tags = list(type = "string")), ResourceTypeList = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), ResourceTags = structure(list(structure(list(Key = structure(logical(0), tags = list(type = "string")), Value = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), ExcludeResourceTags = structure(logical(0), tags = list(type = "boolean")), RemediationEnabled = structure(logical(0), tags = list(type = "boolean")), IncludeMap = structure(list(structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "map")), ExcludeMap = structure(list(structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "map"))), tags = list(type = "structure")), PolicyArn = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$put_protocols_list_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ProtocolsList = structure(list(ListId = structure(logical(0), tags = list(type = "string")), ListName = structure(logical(0), tags = list(type = "string")), ListUpdateToken = structure(logical(0), tags = list(type = "string")), CreateTime = structure(logical(0), tags = list(type = "timestamp")), LastUpdateTime = structure(logical(0), tags = list(type = "timestamp")), ProtocolsList = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), PreviousProtocolsList = structure(list(structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "map"))), tags = list(type = "structure")), TagList = structure(list(structure(list(Key = structure(logical(0), tags = list(type = "string")), Value = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$put_protocols_list_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ProtocolsList = structure(list(ListId = structure(logical(0), tags = list(type = "string")), ListName = structure(logical(0), tags = list(type = "string")), ListUpdateToken = structure(logical(0), tags = list(type = "string")), CreateTime = structure(logical(0), tags = list(type = "timestamp")), LastUpdateTime = structure(logical(0), tags = list(type = "timestamp")), ProtocolsList = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), PreviousProtocolsList = structure(list(structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "map"))), tags = list(type = "structure")), ProtocolsListArn = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$tag_resource_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ResourceArn = structure(logical(0), tags = list(type = "string")), TagList = structure(list(structure(list(Key = structure(logical(0), tags = list(type = "string")), Value = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$tag_resource_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$untag_resource_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ResourceArn = structure(logical(0), tags = list(type = "string")), TagKeys = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$untag_resource_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(), tags = list(type = "structure"))
return(populate(args, shape))
}
|
8c3301e41f5c67eb73624695ebec04bb7586e0fa | 42b048705fa35c38a4e70b8b63bc9586bedd42ca | /Assignment 5/assignment5.R | f53ee0fec3afb2be934d781277736c4299635741 | [] | no_license | mikhailgaerlan/Computational-Statistics-Assignments | 77618850fcebf174baad715c83121c24990d6217 | 2c9a98f2013de8ce29f3cfa559a37ee31f3389a7 | refs/heads/master | 2020-04-26T03:50:38.242247 | 2019-03-01T10:16:02 | 2019-03-01T10:16:02 | 173,281,111 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,808 | r | assignment5.R | par(family = 'serif')
setwd("/Users/mikhailgaerlan/Box Sync/Education/UC Davis/2016-2017 Spring/STA 243 Computational Statistics/Assignments/Assignment 4")
rm(list=ls())
set.seed(0517)
phi = function(u){
return(exp(-u^2/2)/sqrt(2*pi))
}
f = function(x){
return(1.5*phi((x-0.35)/0.15)-phi((x-0.8)/0.04))
}
smoothingmatrix = function(x,y){
}
aicc = function(lambda,x,y){
X = array(0,c(n,p+1+k))
for(i in 1:(p+1)){
for(j in 1:n){
X[j,i] = x[j]^(i-1)
}
}
for(i in 1:k){
for(j in 1:n){
X[j,p+1+i] = (max(c((x[j]-knots[i]),0)))^p
}
}
D = diag(c(0*(1:(p+1)),0*(1:k)+1))
hlambda = X %*% (solve((t(X) %*% X)+lambda*D) %*% t(X))
fhatlambda = hlambda %*% y
n = length(x)
tr = sum(diag(hlambda))
norm = sum((y-fhatlambda)^2)
return(log(norm)+2*(tr+1)/(n-tr-2))
}
# Parameter Values
p = 3
n = 200
j = 1
k = 30
# Data generation
x = (1:200-0.5)/n
knots = min(x) + 1:k*(max(x)-min(x))/(k+1)
sigma = 0.02 + 0.04*(j-1)^2
epsilon = rnorm(n,0,1)
y = f(x) + sigma*epsilon
plot(x,y,pch=".")
# Spline Regression
X = array(0,c(n,p+1+k))
for(i in 1:(p+1)){
for(j in 1:n){
X[j,i] = x[j]^(i-1)
}
}
for(i in 1:k){
for(j in 1:n){
X[j,p+1+i] = (max(c((x[j]-knots[i]),0)))^p
}
}
fhat = (X %*% (solve(t(X) %*% X) %*% t(X))) %*% y
print(aicc(0,x,y))
lines(x,fhat,col="blue")
# Penalized Spline Regression
lambda = 0
minlambda = nlm(aicc(lambda,x,y),c(lambda))
lambda = minlambda$estimate
D = diag(c(0*(1:(p+1)),0*(1:k)+1))
hlambda = X %*% (solve((t(X) %*% X)+lambda*D) %*% t(X))
fhatlambda = hlambda %*% y
print(aicc(lambda,x,y))
lines(x,fhatlambda,col="red")
D = diag(c(0*(1:(p+1)),0*(1:k)+1))
hlambda = X %*% (solve((t(X) %*% X)+0.000000000001*D) %*% t(X))
fhatlambda = hlambda %*% y
print(aicc(0.000000000001,x,y))
lines(x,fhatlambda,col="green")
|
53e5d50f3558c0f3c7a7f46a3b5f6b13cc42b88b | 2e4540191590b6991bf887ad80f2bd46d6fbe1ae | /parameters.R | 1207f6248f8e2a1eab9c6188178d27c38aafb16b | [] | no_license | HonigsEnvato/customer-value | b526e46e8e583f696ab50263a08a3c596d745d66 | 439c805ff0d0602d23d877eabe015d4506a24dcb | refs/heads/master | 2022-09-12T08:34:31.665243 | 2020-02-18T02:38:11 | 2020-02-18T02:38:11 | 241,255,701 | 0 | 0 | null | 2022-08-23T18:03:24 | 2020-02-18T02:36:40 | R | UTF-8 | R | false | false | 383 | r | parameters.R | sandbox_configuration = config::get("postgres_sandbox", config = "dev", file = "enva_config.yml")
market_quantile_breaks = c(0, 0.5, 0.75, 1)
## Default plan price
plan_price = 33
plan_price_annual = 198
net_percentage = 0.485
paid_search_ltv_placeholder = 150
# See eda/10_24_months_ltv_with_annual/24_months_ltv.html for more information
paid_search_30_months_adjustment = 1.12
|
62730a58c37e7e52748ebcfad1e8be2a16e904f5 | 5f571c61465e3a743a576a96e7f6b92feaacf9f7 | /inst/examples/example1.R | 8df22ccb227f414f04dd6f66e81da35dac8f676e | [
"MIT"
] | permissive | timelyportfolio/reactspringwidget | 19c7117b3c3be0db876683761bea433f746ff33d | 0e01d9e029d8ac54fb7c629b61f04546b86518b7 | refs/heads/master | 2020-04-12T08:18:15.873869 | 2018-12-19T03:53:16 | 2018-12-19T03:53:16 | 162,379,832 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 167 | r | example1.R | library(reactR)
library(htmltools)
library(springwidget)
browsable(
tagList(
rmarkdown::html_dependency_jquery(),
springwidget(data=NULL, tags$div())
)
)
|
2152cba682b8508a47ba3378e816c90bc3e965e5 | 429f8f159ece1946d26feec2934b022ed34be214 | /fpem01/R/access_results.R | a60ac0cfdaeb9832b38b6d6c7dd56be5fe100fe1 | [] | no_license | an-h-tran/testrepo | a38da0ee9a9fff0168c74abf354fb0cfa3a146df | 76c19988e96f5bbbcc750339e936e0643e1e5d6c | refs/heads/master | 2020-04-06T11:04:33.075047 | 2018-11-18T23:41:18 | 2018-11-18T23:41:18 | 157,402,771 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,198 | r | access_results.R |
#' Access FP results
#'
#' @param fp_results tibble with iso (to filter on)
#' @param regions subset of fp_results$iso (error is returned when region is not included)
#'
#' @return fp_results for the selected regions
#'
#' @examples
access_results <- function(fp_results, regions) {
check_names(regions, fp_results$iso)
fp_results %>%
filter(iso %in% regions)
}
#' Check if regions are included in a larger list
#'
#' @param regions vector of strings
#' @param all_regions vector of strings
#'
#' @return error if there is at least one element of regions not included in all_regions
#' @export
#'
#' @examples
#' check_names(letters[1:10], letters[1:20]) # no error
#' check_names(letters[1:21], letters[1:20]) # error produced
check_names <- function(regions, all_regions) {
if (!all(regions %in% all_regions)) {
stop(
paste0(
# here paste0 is used to add values of the arguments to the error message
"All elements in 'regions' must be in 'all_regions'. \n",
"These regions are NOT in all regions: ",
paste0(regions[!regions %in% all_regions], collapse = ", ")
),
call. = FALSE
)
}
}
|
fa2e7f4959a197a74ea916bb73e262bd3c3450fe | 1554efd5de247b393039460b2905d1dbb00136bb | /TimeSeries8.R | b3d5ee780f2491140c6efb3c3e5528edfabe2725 | [] | no_license | daoleen/RAir | b0b71884f5470be7e83c0cf574e4ac30c929e461 | 323e70cc772dcaaa4fc1fa67db75aeeb924f6b8f | refs/heads/master | 2021-01-10T20:35:35.185053 | 2014-06-09T11:54:26 | 2014-06-09T11:54:26 | 20,642,702 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,848 | r | TimeSeries8.R | library(forecast)
library(astsa)
source('helper.R')
source('test.R')
require(randomForest)
data <- read.csv(file="GreenwichLondonAirQuality.csv", header=TRUE)
NO <- (data[data$Species == "NO",])
NO2 <- (data[data$Species == "NO2",])
NOX <- (data[data$Species == "NOX",])
PM10 <- (data[data$Species == "PM10",])
PM2.5 <- (data[data$Species == "PM2.5",])
WDIR <- (data[data$Species == "WDIR",])
WSPD <- (data[data$Species == "WSPD",])
data <- data.frame(
"NO"=(get.FillValueForTimeSeries(NO))$Value,
"NO2"=(get.FillValueForTimeSeries(NO2))$Value,
"NOX"=(get.FillValueForTimeSeries(NOX))$Value,
"PM10"=(get.FillValueForTimeSeries(PM10))$Value,
"PM2.5"=(get.FillValueForTimeSeries(PM2.5))$Value,
"WSPD"=(get.FillValueForTimeSeries(WSPD))$Value,
"WDIR"=(get.FillValueForTimeSeries(WDIR))$Value
);
rm(NO, NO2, NOX, PM10, PM2.5, WDIR, WSPD)
data <- get.ClusterDataFrame(data)
# Plotting dataTs
N <- 168
plot(data[1:N,])
plot.ts(
ts(data[1:N, "NO"], start=1, frequency=24),
col=data[1:N, "NOCluster"],
)
# random forest
plot(data$NO[1:N], data$NOCluster[1:N], col=data$NOCluster[1:N])
forest <- randomForest(NOCluster ~ NO, data=data[1:N,], importance=T, do.trace=100)
forestPredicts <- predict(forest, newdata=data[1:N,])
points(data$NO[1:N], forestPredicts)
# forecasting by SARIMA
hoursToPredict <- 48
predictStart <- N+1
predictEnd <- predictStart+hoursToPredict-1
model <- sarima.for(
ts(data$NO[1:N], start=1, frequency=24),
hoursToPredict, 2, 1, 1, 0, 1, 3, 24
)
lines(ts(data$NO[predictStart:predictEnd], start=N/24+1, frequency=24), type="o", col="blue")
# classification by random forest
model$NO <- model$pred
classes <- predict(forest, newdata=model)
plot(data$NO[predictStart:predictEnd], data$NOCluster[predictStart:predictEnd], col="green")
points(data$NO[predictStart:predictEnd], classes, col="red")
|
fccaf2340516a38fba56006202e8287edd4037de | 757c1d308a85dcbcc46956852830cac543dcdf85 | /Temporal_Analysis/Relative_motif_enrichment_score/with_expression_data/motifEnrich_barplot_E95E135CrossStages_byExpression.R | 9a7c53a4d43f2b62c7a273320f5da272e259345b | [] | no_license | hongpan-uva/Single-cell-chromatin-profiling-of-the-primitive-gut-tube | 3e780f1ffe60b247839900847833dc7483d5db4e | d96636f3b62c47eecd8df0f617dfe6b6716e6dab | refs/heads/main | 2023-04-12T01:05:52.014882 | 2022-03-29T18:50:06 | 2022-03-29T18:50:06 | 353,776,159 | 1 | 3 | null | null | null | null | UTF-8 | R | false | false | 7,947 | r | motifEnrich_barplot_E95E135CrossStages_byExpression.R | motifoverlaps=paste0("/scratch/hz9fq/toronto_project/E95E135_CrossStages/signal_mergedPeaks_1129/motif_overlap/",c("intestine","pancreas","stomach","lung"),"_peak_motifoverlap.txt")
cor_dir="/scratch/hz9fq/toronto_project/E95E135_CrossStages/signal_mergedPeaks_1129/cor/"
cor_files=c("E95intestine_E135intestine_cor.txt","E95pancreas_E135pancreas_cor.txt","E95stomach_E135stomach_cor.txt","E95lung_E135lung_cor.txt")
plot_dir="/scratch/hz9fq/toronto_project/E95E135_CrossStages/signal_mergedPeaks_1129/cor/plots/"
peaksets=paste0("/scratch/hz9fq/toronto_project/E95E135_CrossStages/mergedPeaks_1129/",c("intestine","pancreas","stomach","lung"),"_mergedPeak.bed")
#prepare FPKM tables
FPKM_E95 = read.table("/scratch/hz9fq/toronto_project/E95_scRNA/E95_FPKM_matrix_4Organs.txt",header=T)
rownames(FPKM_E95) <- toupper(rownames(FPKM_E95))
Exp_E95 <- log2(FPKM_E95+1)
FPKM_col = read.table("/scratch/hz9fq/toronto_project/ALL_RNA_collection/mod_exp/FPKM_collection.txt",header=T)
rownames(FPKM_col) = FPKM_col$Gene.ID
FPKM_E135 <- FPKM_col[,7:14]
FPKM_E135 <- cbind(apply(FPKM_E135[,1:2],1,mean),
apply(FPKM_E135[,3:4],1,mean),
apply(FPKM_E135[,5:6],1,mean),
apply(FPKM_E135[,7:8],1,mean))
colnames(FPKM_E135) <- c("intestine","pancreas","stomach","lung")
rownames(FPKM_E135) <- toupper(rownames(FPKM_E135))
#Exp_E135 <- log2(FPKM_E135+1)
for(set in 1:4){
motifoverlap <- motifoverlaps[set]
peakset <- peaksets[set]
file <- cor_files[set]
peak.tab <- read.table(peakset,header=F,stringsAsFactors=F)
motif.tab <- read.table(motifoverlap,header=T,stringsAsFactors=F)
peakNumber <- apply(motif.tab,2,sum)
names(peakNumber) <- sapply(names(peakNumber),function(x){
tmp <- strsplit(x,"_map")
return(tmp[[1]][1])
})
print(file)
t2 <- read.table(paste0(cor_dir,file),header=T,stringsAsFactors=F)
rownames(t2) <- t2[,1]
#set -300 as lower boundary
t2$wilcox.new[which(t2$wilcox.new<(-300))] <- -300
t2$wilcox.new[which(t2$wilcox.new>(300))] <- 300
#wilcox.new
datatype = "_wilcox.new"
t2_use <- t2[,c(1,8)]
colnames(t2_use)[2] <- "Enrichment.Score"
uplimit = max(abs(t2_use[,2]))
lowlimit = -uplimit
overlap1 <- intersect(rownames(t2_use),rownames(FPKM_E95))
overlap2 <- intersect(overlap1,rownames(FPKM_E135))
t2_use <- t2_use[overlap2,]
t2_use$peaks<-as.numeric(peakNumber[rownames(t2_use)])
blocks <- seq(min(t2_use$peaks),max(t2_use$peaks),length.out=101)
blockctrs <- c()
for(i in 1:(length(blocks)-1)){
blockctrs <- c(blockctrs,mean(c(blocks[i],blocks[i+1])))
}
names(blockctrs) <- 1:100
getblockctr <- function(x){
return(names(blockctrs)[which.min(abs(blockctrs-x))])
}
t2_use$V4 <- sapply(t2_use$peaks,getblockctr)
filename <- strsplit(file,"_cor")[[1]][1]
#write.table(t2_use,file=file,col.names=F,row.names=F,sep='\t',quote=F)
horizon_pal=c("#000075","#2E00FF","#9408F7","#C729D6","#FA4AB5","#FF6A95","#FF8B74","#FFAC53","#FFCD32","#FFFF60")
colors <- colorRampPalette(horizon_pal)(100)
color.V <- colors[as.numeric(t2_use$V4)]
t2_use$E95exp <- -1
count <- 0
for(i in 1:nrow(t2_use)){
if(rownames(t2_use)[i]%in%rownames(FPKM_E95)){
t2_use$E95exp[i] <- FPKM_E95[rownames(t2_use)[i],set]
count <- count+1
}
}
print(paste0("Number of TF with expression: ",count))
#t2_use$E95exp <- log2(t2_use$E95exp+1)
print(summary(t2_use$E95exp))
t2_use$E135exp <- -1
count <- 0
for(i in 1:nrow(t2_use)){
if(rownames(t2_use)[i]%in%rownames(FPKM_E135)){
t2_use$E135exp[i] <- FPKM_E135[rownames(t2_use)[i],set]
count <- count+1
}
}
print(paste0("Number of TF with expression: ",count))
#t2_use$E135exp <- log2(t2_use$E135exp+1)
print(summary(t2_use$E135exp))
#t2_use$rank <- rank(t2_use$wilcox.new,ties.method="first")
print(max(t2_use[,5]))
print(max(t2_use[,6]))
#markers=unique(c(rownames(t2_use)[order(t2_use[,2],decreasing=T)][1:20],rownames(t2_use)[order(t2_use[,2],decreasing=F)][1:20],rownames(t2_use)[order(t2_use[,5],decreasing=T)][1:10],rownames(t2_use)[order(t2_use[,6],decreasing=T)][1:10]))
#pdf(paste0(plot_dir,filename,datatype,"_byE9Exp.pdf"))
#par(mar=c(6,6,6,6))
#plot(t2_use[,2],t2_use[,5],xlab="wilcox_newValue",ylab="E9.5 log2(FPKM+1)",type="p",pch=19,cex=.7,col=color.V,main=paste0(filename,datatype),xlim=c(lowlimit-10,uplimit+10),ylim=c(0,9))
#labels <- c(min(t2_use$peaks),round((min(t2_use$peaks)+max(t2_use$peaks))/2),max(t2_use$peaks))
#plotrix::color.legend(360,100,400,250,legend=labels,rect.col=colors,align="rb",gradient="y",cex=0.7)
#for(i in markers){
# text(t2_use[i,2],(t2_use[i,5]+0.2),i,col="black",cex=0.5)
#}
#dev.off()
#pdf(paste0(plot_dir,filename,datatype,"_byE13Exp.pdf"))
#par(mar=c(6,6,6,6))
#plot(t2_use[,2],t2_use[,6],xlab="wilcox_newValue",ylab="E13.5 log2(FPKM+1)",type="p",pch=19,cex=.7,col=color.V,main=paste0(filename,datatype),xlim=c(lowlimit-10,uplimit+10),ylim=c(0,9))
#labels <- c(min(t2_use$peaks),round((min(t2_use$peaks)+max(t2_use$peaks))/2),max(t2_use$peaks))
#plotrix::color.legend(360,100,400,250,legend=labels,rect.col=colors,align="rb",gradient="y",cex=0.7)
#for(i in markers){
# text(t2_use[i,2],(t2_use[i,6]+0.2),i,col="black",cex=0.5)
#}
#dev.off()
#barplot
t2_use.order <- t2_use[order(t2_use$Enrichment.Score,decreasing=T),]
bardf <- t2_use.order[1:20,c(1,2,5,6)]
write.table(bardf,paste0(plot_dir,filename,datatype,"_top_table.txt"),row.names=F,col.names=T,sep="\t",quote=F)
pdf(paste0(plot_dir,filename,datatype,"_exp_top_barplot.pdf"))
par(mfrow = c(1:2))
scoreVec <- bardf[,2]
names(scoreVec) <- rownames(bardf)
scoreVec <- rev(-scoreVec)
par(mar=c(5.1,4.1,4.1,1.1))
barplot(scoreVec,names.arg=rep("",20),horiz=T,las=1,col="#512D6D",xlim=c(-300,0),axes=FALSE)
axis(1, at=seq(-0,-300,by=-50), labels=seq(0,300,by=50),cex.axis=0.6)
legend(-200,26,legend=c("relative motif enrichment score"),fill=c("#512D6D"),bty="n",cex=0.6,xpd=T)
expmtx <- t(as.matrix(bardf[,c(3:4)]))
expmtx <- expmtx[2:1,20:1]
par(mar=c(5.1,2.1,4.1,2.1))
barplot(expmtx,beside=T,horiz=T,las=1,col=c("#F8485E","#00C1D4"),xlim=c(0,9),cex.names=0.6,axes=FALSE)
axis(1, at=seq(0,9,by=1), labels=seq(0,9,by=1),cex.axis=0.6)
legend(3,65,legend=c("E9.5 Expression","E13.5 Expression"),fill=c("#F8485E","#00C1D4"),bty="n",cex=0.6,xpd=T)
dev.off()
bardf <- t2_use.order[(nrow(t2_use.order)-19):nrow(t2_use.order),c(1,2,5,6)]
write.table(bardf,paste0(plot_dir,filename,datatype,"_bottom_table.txt"),row.names=F,col.names=T,sep="\t",quote=F)
pdf(paste0(plot_dir,filename,datatype,"_exp_bottom_barplot.pdf"))
par(mfrow = c(1:2))
scoreVec <- bardf[,2]
names(scoreVec) <- rownames(bardf)
scoreVec <- rev(scoreVec)
par(mar=c(5.1,4.1,4.1,1.1))
barplot(scoreVec,names.arg=rep("",20),horiz=T,las=1,col="#512D6D",xlim=c(-300,0),axes=FALSE)
axis(1, at=seq(-0,-300,by=-50), labels=seq(0,-300,by=-50),cex.axis=0.6)
legend(-200,26,legend=c("relative motif enrichment score"),fill=c("#512D6D"),bty="n",cex=0.6,xpd=T)
expmtx <- t(as.matrix(bardf[,c(3:4)]))
expmtx <- expmtx[2:1,20:1]
par(mar=c(5.1,2.1,4.1,2.1))
barplot(expmtx,beside=T,horiz=T,las=1,col=c("#F8485E","#00C1D4"),xlim=c(0,9),cex.names=0.6,axes=FALSE)
axis(1, at=seq(0,9,by=1), labels=seq(0,9,by=1),cex.axis=0.6)
legend(3,65,legend=c("E9.5 Expression","E13.5 Expression"),fill=c("#F8485E","#00C1D4"),bty="n",cex=0.6,xpd=T)
dev.off()
}
|
0803a9b4385cc6af1b55cabc9cfd31f759687db6 | 4d7c1f3a432363bc5393aee823ef8fb2ffb4d937 | /src/models/GenerateSequenceEvaluations.R | 79083cfc8822be439e014012935da970de50179f | [] | no_license | lprisan/paper-JCAL-multimodal-teaching-analytics | f7deb882d21468656fe430a0740373ff005e3c78 | b9b74ec871748b2df413c76404b75b5f0f01c73a | refs/heads/master | 2021-05-12T19:53:10.511469 | 2018-01-11T22:31:32 | 2018-01-11T22:31:32 | 117,107,438 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,431 | r | GenerateSequenceEvaluations.R | # This helper script generates the sequence of commands needed to run
# the whole evaluation harness of the paper, for a certain training/testing model
# It outputs a txt file with the series of commands to run, names <command>.inputs.txt
# It assumes the R script is runnable directly (e.g., in Linux, starts with #!/usr/bin/env Rscript)
##############################################
# This prefix serves to distinguish the labels of, say, the kind of model we train/test here, e.g., Random Forest, LSTMs5layers, etc.
# Change it each time you generate a new file like this, to keep things findable!
# And do not put "_" in the middle of the prefix (we use _ as separators in the visualization scripts)
command <- "Train_Evaluate_RF_MC.R"
# LABELPREFIX <- "SVMBestNoCorr_"
LABELPREFIX <- "RF_MC_"
###############################################
outfile <- paste(command,".inputs.txt",sep="")
fileConn<-file(outfile)
lines <- character()
# List of sessions
sessions <- c("case1-day1-session1-teacher1","case1-day1-session2-teacher1",
"case1-day1-session3-teacher1","case1-day1-session4-teacher1",
"case2-day1-session1-teacher2","case2-day1-session2-teacher2",
"case2-day2-session1-teacher2","case2-day2-session2-teacher2",
"case2-day3-session1-teacher2","case2-day3-session2-teacher2",
"case2-day4-session1-teacher2","case2-day4-session2-teacher2")
wrap <- function(string){
new <- paste("\'",string,"\'",sep="")
new
}
# For each target variable
targets <- c("Activity", "Social")
for(target in targets){
# For each combination of data sources...
#sources <- c("et","acc","vid","aud","acc,aud","aud,vid","acc,aud,vid","all")
sources <- c("all")
for(source in sources){
# General models (multi-teacher)
## Leave one session out
for(i in 1:length(sessions)){
s <- sessions[i]
trainsessions <- sessions[!(sessions %in% s)]
trainstr <- paste(trainsessions, collapse=",")
testsessions <- sessions[sessions %in% s]
teststr <- paste(testsessions, collapse=",")
label <- paste(LABELPREFIX,gsub(pattern = ",", replacement = "", x = source),
"_GM_LOSO_",target,"_",i,sep="")
# We have to build commands of the form: <command> <label> <target-variable> <data-sources> <train-set-sessions> <test-set-sessions>
cmdLine <- paste(command,wrap(label),wrap(target),wrap(source),wrap(trainstr),wrap(teststr))
lines <- c(lines, cmdLine)
}
## Leave one teacher out (train on one, test on the other)
teachers <- c("teacher1","teacher2")
# for(i in 1:length(teachers)){
# t <- teachers[i]
#
# trainsessions <- sessions[!grepl(t,sessions,fixed=T)]
# trainstr <- paste(trainsessions, collapse=",")
# testsessions <- sessions[grepl(t,sessions,fixed=T)]
# teststr <- paste(testsessions, collapse=",")
#
# label <- paste(LABELPREFIX,gsub(pattern = ",", replacement = "", x = source),
# "_GM_LOTO_",target,"_",i,sep="")
#
# # We have to build commands of the form: <command> <label> <target-variable> <data-sources> <train-set-sessions> <test-set-sessions>
# cmdLine <- paste(command,wrap(label),wrap(target),wrap(source),wrap(trainstr),wrap(teststr))
#
# lines <- c(lines, cmdLine)
# }
# Personalized models (trained and tested with data from ONE teacher)
## Leave one session out
for(j in 1:length(teachers)){
t <- teachers[j]
partsessions <- sessions[grepl(t,sessions,fixed=T)] # Sessions of this teacher
# We train/test on the data for one teacher only
for(i in 1:length(partsessions)){
s <- partsessions[i]
trainsessions <- partsessions[!(partsessions %in% s)]
trainstr <- paste(trainsessions, collapse=",")
testsessions <- partsessions[partsessions %in% s]
teststr <- paste(testsessions, collapse=",")
label <- paste(LABELPREFIX,gsub(pattern = ",", replacement = "", x = source),
"_PM_LOSO_",target,"_t",j,"s",i,sep="")
# We have to build commands of the form: <command> <label> <target-variable> <data-sources> <train-set-sessions> <test-set-sessions>
cmdLine <- paste(command,wrap(label),wrap(target),wrap(source),wrap(trainstr),wrap(teststr))
lines <- c(lines, cmdLine)
}
}
## Leave one situation out (only for teacher 2, teacher 1 has only 1 situation)
# partsessions <- sessions[grepl("teacher2",sessions,fixed=T)]
# situations <- c("day1","day2","day3","day4")
# for(i in 1:length(situations)){
# s <- situations[i]
# trainsessions <- partsessions[!grepl(s,partsessions,fixed=T)]
# trainstr <- paste(trainsessions, collapse=",")
# testsessions <- partsessions[grepl(s,partsessions,fixed=T)]
# teststr <- paste(testsessions, collapse=",")
#
# label <- paste(LABELPREFIX,gsub(pattern = ",", replacement = "", x = source),
# "_PM_LOSitO_",target,"_",i,sep="")
#
# # We have to build commands of the form: <command> <label> <target-variable> <data-sources> <train-set-sessions> <test-set-sessions>
# cmdLine <- paste(command,wrap(label),wrap(target),wrap(source),wrap(trainstr),wrap(teststr))
#
# lines <- c(lines, cmdLine)
#
# }
}
}
writeLines(lines, fileConn)
close(fileConn)
|
af12cbefc99a1b4c2863fc50e836c9694f985e4e | cab93f2149fe6eb7601438532734a9b06f6e0b7d | /demo/smallAnimalsDemo.R | 0d232b5c7b6a1a7f8236584bea31d0b4e3ec2e2c | [] | no_license | xvallspl/ReRCHIC | 9a0776b7f6c326a4c6e8255555066edb902bb0d4 | 063e87a9ae48d989ad553c30d623a1f77dc8088f | refs/heads/master | 2021-01-01T06:44:40.379419 | 2014-10-01T14:19:56 | 2014-10-01T14:19:56 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 491 | r | smallAnimalsDemo.R | #' Performs the similarity analysis on the animalesSmall dataset, plotting the hierarchical tree as a phylogenetic tree.
#'
#' Similarity analysis on the animalesSmall dataset, a (cropped) dataset describing children perception of animals characteristics.
#'
#' @author Xavier Valls \email{xaviervallspla@@gmail.com}
data(animalesSmall)
require(ape)
ASI<-callASIAlgorithm(animales)
phylo<-read.tree(text=ASI$tree[length(ASI$tree)])
plot(phylo, use.edge.length=FALSE, direction="upwards")
|
a26292d5e92137741e47ea3138a08b4fec23e3fa | 769898772e7225264fd942b2e5a666af3105d3a1 | /R/quadrats.R | 2c5954d2205afad34b8cff5505e8b8cdb71cc3bb | [] | no_license | cran/spatialEco | 3fa4393496453b091c547cc7601a984e54bf2be6 | 22944d790b25451c848d420b61d386471073b1ee | refs/heads/master | 2023-07-08T05:04:12.117110 | 2023-06-30T07:40:02 | 2023-06-30T07:40:02 | 30,218,937 | 5 | 3 | null | null | null | null | UTF-8 | R | false | false | 2,736 | r | quadrats.R | #' @title Quadrats
#' @description Creates quadrat polygons for sampling or analysis
#'
#' @param x A sp or sf polygon object defining extent
#' @param s Radius defining single or range of sizes of quadrats
#' @param n Number of quadrats
#' @param r A rotation factor for random rotation, default is NULL
#' @param sp (FALSE | TRUE) Output sp class object
#'
#' @return an sf or sp polygon object with rotated polygon(s)
#'
#' @note
#' The radius (s) parameter can be a single value or a range of values,
#' representing a randomization range of resulting quadrat sizes. The
#' rotation (r) parameter can also be used to defined a fixed rotation or
#' random range of quadrat rotations. You can specify each of these parameters
#' using an explicit vector that will be sampled eg., seq(100,300,0.5)
#'
#' @examples
#' library(sf)
#' library(terra)
#'
#' # read meuse data and create convex hull
#' data(meuse, package = "sp")
#' meuse <- st_as_sf(meuse, coords = c("x", "y"),
#' crs = 28992, agr = "constant")
#' e <- st_convex_hull(st_union(meuse))
#'
#' # Fixed size 250 and no rotation
#' s <- quadrats(e, s = 250, n = 10)
#' plot(st_geometry(s))
#'
#' \donttest{
#' # Variable sizes 100-300 and rotation of 0-45 degrees
#' s <- quadrats(e, s = c(100,300), n = 10, r = c(0,45))
#' plot(st_geometry(s))
#'
#' # Variable sizes 100-300 and no rotation
#' s <- quadrats(e, s = c(100,300), n = 10)
#' plot(st_geometry(s))
#' }
#'
#' @export quadrats
quadrats <- function(x, s = 250, n = 100, r = NULL, sp = FALSE) {
if(!is.null(r)){
rrange = range(r)
if(min(rrange) < 0 | max(rrange) > 360)
stop("rotation parameter is out of 0-360 range")
}
quadrat <- list()
for(i in 1:n) {
if(length(s) == 1) {
ss = s
} else if(length(s) == 2) {
ss = sample(s[1]:s[2],1)
} else if(length(s) > 2) {
ss = sample(s,1)
}
if(!is.null(r)){
if(length(r) == 1) {
rr = r
} else if(length(r) == 2) {
rr = sample(r[1]:r[2],1)
} else if(length(rr) > 2) {
rr = sample(rr,1)
}
}
p <- sf::st_buffer(sf::st_sample(x, size=1, type="random"), ss)
p <- sf::st_as_sf(sf::st_as_sfc(sf::st_bbox(p)))
p$dist <- ss
if(!is.null(r)) {
p <- rotate.polygon(p, angle = rr)
p$angle <- rr
quadrat[[i]] <- p
} else {
quadrat[[i]] <- p
}
}
quadrat <- do.call("rbind", quadrat)
sf::st_crs(quadrat) <- sf::st_crs(x)
if(sp) p <- sf::as_Spatial(p)
return(quadrat)
}
|
2f89933510295f57a91771a0bd4af4411eae64db | 9862557e96982fa324f80991529990acb933db91 | /R/data.R | 6937a8043a21146dbd8b2c98ff90f2b04bbf4350 | [] | no_license | emilyrosecamp/bulhydrorap | 5530b7eb8b74f18739a2483b0708a4030db2f86a | f7646656fac482ce2284b06313730427b0933034 | refs/heads/master | 2020-05-21T00:28:57.428156 | 2019-05-21T09:38:53 | 2019-05-21T09:38:53 | 185,829,056 | 0 | 0 | null | 2019-05-21T09:38:55 | 2019-05-09T15:50:07 | R | UTF-8 | R | false | false | 383 | r | data.R | #' Donnees de base de la banque Hydro pour la Loire
#'
#'
#' Debit du fleuve mensuel depuis >100 ans mesuré à Saumur (code L8000020)
#'
#' @format Data frame with 103 observations and 27 variables
#'
#' \describe{
#'
#'
#' \item{Mois.}{Debit d'un mois donne, en m3/s}
#' \item{Qualité.}{Niveau de validation de la mesure}
#'
#' }
#' @source \url{http://banquehydro.fr}
"bulhydro"
|
99b4aaa45d6c4821b1a2fd3982a49173e91666f3 | 98ef022da9ba11f4ce9fcd9de5a10ee503e9bbf0 | /gene_means.R | 96079175a588244c215827d6f7b0ed66bbdb22a3 | [] | no_license | VAI3064/PhD-Code | 11f290985852a86e306ebe6dbe6b496bf2e91f59 | 6ead5c4b67009b8062d2db3a05998641d2b4c532 | refs/heads/master | 2016-09-06T15:36:01.958772 | 2015-08-13T10:44:09 | 2015-08-13T10:44:09 | 39,823,232 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 137 | r | gene_means.R | gene_means<- function (x)
{
means<- rep(1,length(x[,1]))
for ( i in 1: length(x[,1]))
{
means[i]<-(mean(x[i,]))
}
t_means<- t(means)
}
|
b50f1f54429498cb9b7ee4992880f38514aafbc0 | 58dcdd81e76e3fa8b6a14f83462a824b274eea14 | /jehan_scrapy/jehan_scrapy/jehan_lda (1).R | 9215a1c7c28feaefd50fecd48868e965b1020423 | [] | no_license | reflective99/Ind-Research | 40bd756b7be455f327c2c95944487f957b3ab73e | bb91fba80de66398e04d06309a9bb1353f5c0e4a | refs/heads/master | 2021-01-10T05:06:49.383422 | 2016-03-01T23:18:02 | 2016-03-01T23:18:02 | 52,917,371 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,706 | r | jehan_lda (1).R | library(snowfall)
library(jsonlite)
articles <- fromJSON("articles.json", flatten=TRUE)
# Get all the text bodies as vec from the articles dataframe
texts.vec = articles$text #run on 1000 entries first
i <- seq(1,964, 1)
names(texts.vec) = i[1:964]
texts.vec[1]
texts.vec[964]
# Get source functions
source("MakeDtmFunctions.R")
source("TopicModelingFunctions.R")
dtm = Vec2Dtm(vec = texts.vec)
dtm = DepluralizeDtm(dtm = dtm)
dim(dtm)
tf.mat = data.frame(term = colnames(dtm), tf = colSums(dtm), df = colSums(dtm>0), stringsAsFactors = F)
#^ could add idf column
keep.terms = tf.mat$term[tf.mat$df>=5 & tf.mat$df < nrow(dtm)/2]
# Func to get rid of words less than length 4
library(stringr)
getRidOfLessThan4 = function(terms){
ret = c()
for(i in terms){
if(str_length(i) > 4){
ret = c(ret, i)
}else{
ret = c(ret,i)
}
}
return(ret)
}
keep.terms = getRidOfLessThan4(keep.terms)
dtm = dtm[,keep.terms]
doc.length = rowSums(dtm)
which(doc.length==0)
dropped.docs=rownames(dtm)[doc.length <20]
dtm = dtm[!rownames(dtm) %in% dropped.docs,]
dim(dtm)
str(dtm)
new.docs = Dtm2Docs(dtm.sparse = dtm, parallel = F, cpus = 8)
lex = lexicalize(doclines = new.docs, sep = " ",vocab = keep.terms )
rm(new.docs);gc()
head(lex)
#
k.list = seq(50, 250, by = 50)
k.list = c(10,25, k.list)
sfInit(parallel = T, cpus = 8)
sfExport(list=c("lex","keep.terms"))
sfLibrary(lda)
models = sfLapply(k.list,function(k){
result = lda.collapsed.gibbs.sampler(documents = lex, K = k, vocab = keep.terms, num.iterations = 5,
alpha = 5/k,eta = .05, compute.log.likelihood = T)
})
#test = lda.collapsed.gibbs.sampler(documents = lex, K = 10, vocab = keep.terms, num.iterations = 5, alpha = .01,eta = .05, compute.log.likelihood = T)
##
sfStop()
names(models) = paste("k.", k.list, sep="")
plot(models$k.250$log.likelihoods[2,],type="l")
##models obtained
View(models$k.250$topics)
model.output= lapply(models, function(x){ ExtractLdaResults_lda(lda.result = x, docnames = rownames(dtm))})
str(model.output)
#choose k
r2 = lapply(model.output, function(x){TopicModelR2(dtm.sparse = dtm, topic.terms = x$topic.terms, doc.topics = x$doc.topics, normalize = F, parallel = T,cpus = 8)})
str(r2[[2]])
sfInit(parallel=T,cpus = 8)
sfExport("dtm")
sfSource("TopicModelingFunctions.R")
pcoh = sfLapply(model.output, function(x){
result = apply(x$topic.terms, 1, function(y){
ProbCoherence(topic = y,M = 5,dtm.sparse = dtm)
})
result
})
sfStop()
metrics.mat = data.frame(k = k.list, ll=sapply(model.output, function(x){
x$likelihood[2,ncol(x$likelihood)]}),
r2 = sapply(r2, function(x) x$r2),
pcoh = sapply(pcoh,median),
stringsAsFactors = F)
plot(metrics.mat$k, metrics.mat$ll, type="o")
plot(metrics.mat$k, metrics.mat$r2, type="o")
plot(metrics.mat$k, metrics.mat$pcoh, type="o")
top.terms3 = GetTopTerms(topic.terms = model.output$k.10$topic.terms, M = 3)
View(top.terms3)
top.terms10 = GetTopTerms(topic.terms = model.output$k.10$topic.terms, M = 10)
View(top.terms10)
#summary and visualization
topic.summary = data.frame(topic = colnames(top.terms10),
top.terms = apply(top.terms10, 2, function(x){paste(x, collapse = " | ")}),
pcoh = pcoh$k.10,
prevalence = colSums(model.output$k.10$doc.topics)/sum(colSums(model.output$k.10$doc.topics))*100,
stringsAsFactors = F)
View(topic.summary)
for(j in 1:nrow(model.output$k.100$topic.terms)){
TopicWordCloud(term.freq.vec = model.output$k.100$topic.terms[j,],
title = rownames(model.output$k.100$topic.terms)[j],
outfilepath = "/" )
}
|
5748f0227975bf78eddf8f3305aac1e20e5e2365 | 92db238599d9d7c9777bd99a80285963ee7e0d94 | /man/dg.BlockEdge-class.Rd | c31d54e6bb20399a6751182fa08a9de12d27d2b1 | [] | no_license | cran/dynamicGraph | 2225028c6396c8478ab23d5dabbc41c81b0f2de1 | 4b21ec7d229979dd8fd32e1ef32f5cd9952f7e77 | refs/heads/master | 2021-01-02T22:50:36.479678 | 2010-01-30T00:00:00 | 2010-01-30T00:00:00 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,049 | rd | dg.BlockEdge-class.Rd | \name{dg.BlockEdge-class}
\docType{class}
\alias{dg.BlockEdge-class}
\alias{nodeTypesOfEdge,dg.BlockEdge-method}
\alias{oriented<-,dg.BlockEdge-method}
\alias{oriented,dg.BlockEdge-method}
\alias{newBlockEdge}
\alias{newBlockEdge}
\alias{dg.BlockEdge}
\title{Class dg.BlockEdge}
\description{ The class for edges between blocks and for
edges between vertices and blocks. }
% \section{Objects from the Class}{
% Objects can be created by calls of the form \code{new("dg.BlockEdge", ...)}.
% }
% \usage{
% newBlockEdge(node.indices, nodes = NULL, width = 2,
% color = "DarkOliveGreen", oriented = NA, type = NULL,
% label = ifelse(is.null(nodes), "",
% paste(Labels(nodes), collapse = "~")),
% dash = "")
% }
% \arguments{
% \item{node.indices}{ Vector with \code{abs(node.indices)}
% the indices of the nodes of the block edge.
% If the index is positiv then the node is a vertex,
% else it is a block. }
% \item{nodes}{ List with the \code{nodes} of the block edge. Used for
% setting the default label. }
% \item{width}{ Numeric for the \code{width} of the block edge. }
% \item{color}{ Text string with the \code{color} of the block edge. }
% \item{oriented}{ Logical. % If TRUE then the block edge is \code{oriented}.
% }
% \item{type}{ Text string with the \code{type} of the block edge. }
% \item{label}{ Text string with the \code{label} of the block edge. }
% \item{dash}{ Text string for the \code{color} pattern of the edge. }
% }
\details{
The function is used in \code{\link{returnBlockEdgeList}}.
\code{\link{dynamicGraphMain}} will automatic update block edges
when vertices are moved between blocks.
}
\section{Slots}{
\describe{
\item{\code{oriented}:}{Object of class \code{"logical"},
see \code{"dg.VertexEdge"}. }
\item{\code{vertex.indices}:}{Object of class \code{"numeric"},
see also \code{"dg.Edge"}.
Vector with \code{abs(vertex.indices)}
the indices of the nodes of the block edge.
If the index is positiv then the node is a vertex,
else it is a block. }
\item{\code{width}:}{Object of class \code{"numeric"},
see \code{"dg.Edge"}. }
\item{\code{dash}:}{Object of class \code{"character"},
see \code{"dg.Edge"}. }
\item{\code{color}:}{Object of class \code{"character"},
see \code{"dg.Edge"}. }
\item{\code{label}:}{Object of class \code{"character"},
see \code{"dg.Edge"}. }
\item{\code{label.position}:}{Object of class \code{"numeric"},
see \code{"dg.Edge"}. }
}
}
\section{Extends}{
% Class \code{"dg.Edge"}, directly.
% Class \code{"dg.Node"}, by class \code{"dg.Edge"}.
Class \code{"dg.Edge"}, directly.
Class \code{"dg.Node"}, directly.
}
\section{Methods}{
\describe{
\item{nodeTypesOfEdge}{\code{signature(object = "dg.BlockEdge")}:
Extract the types ("super classes": \code{"Vertex"} or \code{"Block"})
of the vertices (nodes) of the edge. }
\item{oriented<-}{\code{signature(x = "dg.BlockEdge")}: ... }
\item{oriented}{\code{signature(object = "dg.BlockEdge")}: ... }
% \item{propertyDialog}{\code{signature(object = "dg.BlockEdge")}: ... }
% \item{propertyDialog}{\code{signature(object = "dg.BlockEdge")}: ... }
}
}
\author{Jens Henrik Badsberg}
\seealso{
% \code{\link{newBlockEdge}},
\code{\link{returnBlockEdgeList}},
\code{\link{dg.Edge-class}}.
}
\note{
The methods of \code{\link{dg.Edge}} also applies for \code{dg.BlockEdge}.
The method \code{new} also accepts the argument \code{vertices} or
\code{vertexList}. The \code{label} is then extracted from these vertices.
The length of \code{vertices} should match \code{vertex.indices}, where
\code{vertex.indices} is used to select vertices form \code{vertexList}.
}
% \seealso{\code{\link{dg.VertexEdge-class}}}
% # "newBlock" <-
% # function (stratum = 0, index = 0, position = matrix(rep(0, 6),
% # ncol = 3), closed = FALSE, visible = TRUE, color = "Grey",
% # label = "Label", label.position = c(0, 0, 0), ancestors = NULL,
% # parent = NULL)
% # {
% # result <- new("dg.Block", stratum = stratum, index = index,
% # position = position, closed = closed, visible = visible,
% # color = color, label = label, label.position = label.position,
% # ancestors = if (is.null(ancestors))
% # 0
% # else ancestors, parent = if (is.null(parent))
% # if (is.null(ancestors))
% # 0
% # else ancestors[length(ancestors)]
% # else parent)
% # return(result)
% # }
% # "newBlockEdge" <-
% # function (node.indices, nodes = NULL, width = 2, color = "DarkOliveGreen",
% # oriented = NA, type = NULL, label = ifelse(is.null(nodes),
% # "", paste(Labels(nodes), collapse = "~")), dash = "")
% # {
% # result <- new("dg.BlockEdge", vertex.indices = node.indices,
% # width = width, color = color, oriented = oriented, label = label,
% # dash = dash)
% # return(result)
% # }
% vertices <- returnVertexList(paste("V", 1:4, sep = ""))
% # block <- newBlock(stratum = 1)
% block <- new("dg.Block", stratum = 1)
% # Blockedge <- newBlockEdge(c(4, -1),
% # new("dg.VertexList", list(vertices[[1]], block)))
\examples{
vertices <- returnVertexList(paste("V", 1:4, sep = ""))
block <- new("dg.Block", stratum = 1)
blockedge <- new("dg.BlockEdge", vertex.indices = c(4, -1),
vertices = new("dg.VertexList", list(vertices[[1]], block)))
str(blockedge)
color(blockedge)
label(blockedge)
labelPosition(blockedge)
width(blockedge)
nodeIndicesOfEdge(blockedge)
nodeTypesOfEdge(blockedge)
color(blockedge) <- "Black"
label(blockedge) <- "V1~1"
labelPosition(blockedge) <- c(0, 1, 2)
width(blockedge) <- 1
nodeIndicesOfEdge(blockedge) <- c(1, -1)
str(blockedge)
}
\keyword{classes}
|
5795ce361d691441abaa4865725bbaa45a945699 | f27a544ae69a515f59459d34fdafad6056d3b90e | /racecar/man/oilpressure.Rd | 3ecbd0ed170f796dfab35089afcb3f1d008a5c36 | [] | no_license | PHP2560-Statistical-Programming-R/r-package-racecar | 82ecc554cb228c4ab49b404fd1432574a6f64bf8 | b5375272144a85953bd81a11123344a7dcdf19a9 | refs/heads/master | 2021-08-31T16:13:35.546194 | 2017-12-22T01:38:13 | 2017-12-22T01:38:13 | 111,433,964 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,355 | rd | oilpressure.Rd | \name{oilpressure}
\alias{oilpressure}
\title{
oilpressure
}
\description{
Creates a plot of the oil pressure around the track
}
\usage{
oilpressure(data, laps = 1, startdist = min(data$Distance) , enddist = max(data$Distance))
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{data}{a dataframe that has been output from cleanSingleLap or cleanMultiLap}
\item{laps}{the number of laps that you would like to look at}
\item{startdist}{the inital position of the track that you would like to look at plots for, the default is the beginning of the track}
\item{enddist}{the ending position of the track the you would like to look at plots for, the default is the ending of the track}
}
\value{
Returns a plot of the track map
}
\author{
Blain Morin
}
\examples{
oilpressure <- function(data, laps = 1, startdist = min(data$Distance) , enddist = max(data$Distance)){
data %>%
filter(Lap == laps) %>%
filter(Distance >= startdist) %>%
filter(Distance <= enddist) %>%
rename(oilpress = "Oil Pressure_Cal") %>%
ggplot(aes( x = GPS_Latitude, y = GPS_Longitude)) + geom_point(aes(color = oilpress))
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }% use one of RShowDoc("KEYWORDS")
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
e46608d0bae9fcaeab4f3630e83ecbff2e4f47b2 | d8e775429487eefa8b4e5731b4d50d4a95d20cd8 | /R/MeasureSurvMSE.R | ca53831c20fda882318cde023cc4447c5eb8b716 | [
"MIT"
] | permissive | sands58/mlr3proba | 766dd7d977badcd81f6efdeab1e51a2d661a0859 | 901e3bc04d4eaacc2ce28126c97df16df736b5b6 | refs/heads/master | 2021-05-17T00:48:58.863868 | 2020-03-25T22:09:24 | 2020-03-25T22:09:24 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 936 | r | MeasureSurvMSE.R | #' @template surv_measure
#' @templateVar title Mean Squared Error
#' @templateVar fullname MeasureSurvMSE
#'
#' @description
#' Calculates the mean squared error (MSE).
#'
#' The MSE is defined by
#' \deqn{\frac{1}{n} \sum ((t - \hat{t})^2)}{1/n \sum ((t - t*)^2)}
#' where \eqn{t} is the true value and \eqn{\hat{t}}{t*} is the prediction.
#'
#' Censored observations in the test set are ignored.
#'
#' @family response survival measures
#' @export
MeasureSurvMSE = R6::R6Class("MeasureSurvMSE",
inherit = MeasureSurv,
public = list(
#' @description Creates a new instance of this [R6][R6::R6Class] class.
initialize = function() {
super$initialize(
id = "surv.mse",
range = c(0, Inf),
minimize = TRUE,
predict_type = "response"
)
}
),
private = list(
.score = function(prediction, ...) {
mean(surv_mse(prediction$truth, prediction$response)$mse)
}
)
)
|
bf4eea91f24e9769e2655e026ea766c7a175ef5a | d49a3d00c8e54a0bc581b262757e0d9625b3dba9 | /plot4.R | f8eba9e88c3275e499eacb0aac9fb3edf521e962 | [] | no_license | mleski/ExData_Plotting1 | 7c3c14f844610d1da0102eea01d59912e003ad9c | 1dc0048bf1f9d05be1eba0813d31e5f139999623 | refs/heads/master | 2021-07-14T02:36:14.647712 | 2017-10-16T00:48:38 | 2017-10-16T00:48:38 | 106,341,059 | 0 | 0 | null | 2017-10-09T22:18:27 | 2017-10-09T22:18:26 | null | UTF-8 | R | false | false | 1,434 | r | plot4.R | #Calculate memory requirements in GB
mem_need <- 2075259 * 9 * 8 / 2^20 / 1000; mem_need
#Read in data
library(dplyr)
library(lubridate)
getwd()
setwd("/Users/EZ/Documents/Data/Exercises/Coursera/ExploratoryDataAnalysis/assign1")
electric <- read.delim("household_power_consumption.txt", sep=";", na.strings="?")
#Convert date to date/time class and get only data for Feb 1-Feb 2, 2007
electric$Date1 <- strptime(paste(electric$Date, electric$Time, sep=" "), format="%d/%m/%Y %H:%M:%S", tz="")
feb<-interval(ymd_hms("2007-02-01 00:00:00", tz=""), ymd_hms("2007-02-02 23:59:59", tz=""))
electric<-subset(electric, electric$Date1 %within% feb) %>%
select(Date1, Global_active_power:Sub_metering_3)
#Create Plot 4 and Export to PNG
png(filename="plot4.png")
par(mfrow=c(2,2), xpd=FALSE)
plot(electric$Date1, electric$Global_active_power, type="l", ylab="Global Active Power (kilowatts)", xlab="")
plot(electric$Date1, electric$Voltage, type="l", ylab="Voltage", xlab="datetime")
plot(electric$Date1, electric$Sub_metering_1, type="l", ylab="Energy sub metering", xlab="")
lines(electric$Date1, electric$Sub_metering_2, col="red")
lines(electric$Date1, electric$Sub_metering_3, col="blue")
legend("topright", legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), col=c("black", "red", "blue"), lty=1)
plot(electric$Date1, electric$Global_reactive_power, type="l", ylab="Global_reactive_power", xlab="datetime")
dev.off() |
d4f174ee77d3fe973f09e66d2f6ba46b44402b9b | ba8c93066b190808f70d54386359ee015639ca33 | /cpfs/man.r | cd2a417db5a51095aa19fdc9fad1c1152ceb770b | [] | no_license | unix-history/tropix-cmd | 0c0b54ae35b9e48c63aca8a7ac06e7935dd5b819 | 244f84697354b1c0e495a77cdff98549875d1b9f | refs/heads/master | 2021-05-27T22:06:51.390547 | 2014-11-06T17:41:20 | 2014-11-06T17:41:20 | 26,281,551 | 1 | 2 | null | null | null | null | ISO-8859-1 | R | false | false | 1,240 | r | man.r | .bp
.he 'CPFS (cmd)'TROPIX: Manual de Referência'CPFS (cmd)'
.fo 'Atualizado em 15.01.99'Versão 3.1.0'Pag. %'
.b NOME
.in 5
.wo "cpfs -"
copia sistemas de arquivos
.br
.in
.sp
.b SINTAXE
.in 5
.(l
cpfs <fonte> <destino>
.)l
.in
.sp
.b DESCRIÇÃO
.in 5
O comando "cpfs" copia sistemas de arquivos.
A cópia é feita "imagem", isto é, não é feita nenhuma
alteração no conteúdo copiado.
.sp
Tanto o sistema de arquivos <fonte> como o <destino> devem
ser dispositivos não estruturados (isto é, de caracteres).
Se os sistemas de arquivos não tiverem o mesmo tamanho,
o número de blocos copiados é o do menor sistema de arquivos.
.sp
O sistema de arquivo destino não pode estar montado.
É preferível que o sistema de arquivo fonte também esteja não montado,
no entanto a cópia é permitida, após uma advertência.
.in
.sp
.b OBSERVAÇÃO
.in 5
Nesta versão do comando, o conteúdo é copiado em unidades de 64 KB.
.sp
Como a tabela de partições não é atualizada automaticamente,
pode ocorrer (após uma cópia) que o tipo do sistema de arquivos
não corresponda mais ao tipo indicado na tabela de partições.
.in
.sp
.b
VEJA TAMBÉM
.r
.in 5
.wo "(cmd): "
mkfs, fsck
.br
.in
.sp
.b ESTADO
.in 5
Efetivo.
.in
|
223a720327a19a35c5db6ed03a7e162719c89601 | 2da2406aff1f6318cba7453db555c7ed4d2ea0d3 | /inst/snippet/concrete-rand03-fig.R | e1016f446f8365e14bc25b029e42c39a55c1c00c | [] | no_license | rpruim/fastR2 | 4efe9742f56fe7fcee0ede1c1ec1203abb312f34 | d0fe0464ea6a6258b2414e4fcd59166eaf3103f8 | refs/heads/main | 2022-05-05T23:24:55.024994 | 2022-03-15T23:06:08 | 2022-03-15T23:06:08 | 3,821,177 | 11 | 8 | null | null | null | null | UTF-8 | R | false | false | 153 | r | concrete-rand03-fig.R | SSplot(
lm(strength ~ limestone + water, data = Concrete),
lm(strength ~ limestone + rand(7), data = Concrete), n = 1000)
last_plot() + xlim(0, 2)
|
5eee4f1284e31a7872d80be78a482e44f52f8d2f | aff059f343d2b1c9d61b5c1091d0945b6a5cd04e | /tests/testthat/setup.R | 68253ce060b31e0b92f81c29620a6a5511fb38a3 | [
"MIT"
] | permissive | ropensci/rnassqs | 96dc104a525998072ba36422b0df85d3abe75569 | 287724e86208fd031acbed9e8a1dd41901ae1ab4 | refs/heads/master | 2023-08-31T20:42:48.578667 | 2023-08-29T15:20:02 | 2023-08-29T15:20:02 | 37,335,585 | 23 | 10 | NOASSERTION | 2023-09-01T13:25:26 | 2015-06-12T17:37:25 | R | UTF-8 | R | false | false | 1,823 | r | setup.R | library(httptest)
library(here)
# First evaluate the API KEY if available
api_key <- Sys.getenv("NASSQS_TOKEN")
api_file <- here::here("tests/testthat/api-key.txt")
if(nchar(Sys.getenv("NASSQS_TOKEN") ) != 36 & file.exists(api_file)) {
Sys.setenv(NASSQS_TOKEN = readLines(api_file))
}
# Parameters
params <- list(
agg_level_desc = "STATE",
commodity_desc = "CORN",
domaincat_desc = "NOT SPECIFIED",
state_alpha = "VA",
statisticcat_desc = "AREA HARVESTED",
year = "2012"
)
### Generate error response data objects if needed
#testparams <- params
## 400 error
# testparams$year <- 2102
# query <- list(key = api_key)
# query <- append(query, flatten(testparams))
# r <- httr::GET("https://quickstats.nass.usda.gov/api/api_GET", query = query)
#saveRDS(r, file = test_path("testdata", "qsresponse_400.rds"))
# r <- httr::GET("http://httpbin.org/status/400")
# saveRDS(r, file = test_path("testdata", "response_400.rds"))
# # 413 error
# query <- list(key = api_key)
# query <- append(query, flatten(list(year__GET = 2000)))
# r <- httr::GET("https://quickstats.nass.usda.gov/api/api_GET", query = query)
# saveRDS(r, file = test_path("testdata", "qsresponse_413.rds"))
#r <- httr::GET("http://httpbin.org/status/413")
#saveRDS(r, file = test_path("testdata", "response_413.rds"))
# 429 error
# r <- httr::GET("http://httpbin.org/status/429")
# saveRDS(r, file = test_path("testdata", "response_429.rds"))
with_mock_api <- function(expr) {
# Set a fake token just in this context
old_token <- Sys.getenv("NASSQS_TOKEN")
Sys.setenv(NASSQS_TOKEN = "API_KEY")
on.exit(Sys.setenv(NASSQS_TOKEN = old_token))
httptest::with_mock_api(expr)
}
with_authentication <- function(expr) {
if (nchar(Sys.getenv("NASSQS_TOKEN")) == 36) {
# Only evaluate if a token is set
expr
}
}
|
b5de4b72d1cb8c9448383386976ce3a73e146c2a | 43c9d57c872d49c3fe823429bd51e7d7124bf35f | /R/hcluster.R | 2ff745b15c8b91f2ee53b232e6118cd62d19d4cd | [] | no_license | cran/amap | 71134bfaf0b75ba396676d6e1781febc5b5d6098 | 5fc3186facf5afc29d0e28b4d93dae5123407720 | refs/heads/master | 2022-11-10T07:43:34.782272 | 2022-10-28T05:55:22 | 2022-10-28T05:55:22 | 17,694,352 | 1 | 2 | null | null | null | null | UTF-8 | R | false | false | 2,637 | r | hcluster.R | ## Hierarchical clustering
##
## Created : 18/11/02
## Last Modified : Time-stamp: <2018-05-12 16:48:19 (antoine)>
##
## This function is a "mix" of function dist and function hclust.
##
## Author : Antoine Lucas
##
hclusterpar <- hcluster <- function (x, method = "euclidean", diag = FALSE, upper = FALSE, link = "complete", members = NULL, nbproc = 2, doubleprecision = TRUE)
{
if(inherits(x, "exprSet"))
x <- Biobase::exprs(x)
## take from dist
if (!is.na(pmatch(method, "euclidian")))
method <- "euclidean"
METHODS <- c("euclidean", "maximum", "manhattan", "canberra",
"binary","pearson","correlation","spearman","kendall",
"abspearson","abscorrelation")
method <- pmatch(method, METHODS)
if (is.na(method))
stop("invalid distance method")
if (method == -1)
stop("ambiguous distance method")
N <- nrow(x <- as.matrix(x))
#take from hclust
METHODSLINKS <- c("ward", "single", "complete", "average", "mcquitty",
"median", "centroid","centroid2")
link <- pmatch(link, METHODSLINKS)
if (is.na(link))
stop("invalid clustering method")
if (link == -1)
stop("ambiguous clustering method")
if (N < 2)
stop("Must have n >= 2 objects to cluster")
if (is.null(members))
members <- rep(1, N)
if (length(members) != N)
stop("Invalid length of members")
n <- N
precision <- 1
if(doubleprecision)
precision <- 2
hcl <- .C(C_hcluster,
x = as.double(x),
nr = as.integer(n),
nc = as.integer(ncol(x)),
diag = as.integer(FALSE),
method = as.integer(method),
iopt = as.integer(link),
ia = integer(n),
ib = integer(n),
order = integer(n),
crit = double(n),
members = as.double(members),
nbprocess = as.integer(nbproc),
precision = as.integer(precision),
res = as.integer (1),
NAOK=TRUE,
PACKAGE= "amap")
if(hcl$res == 2)
stop("Cannot allocate memory")
if(hcl$res == 3)
stop("Missing values in distance Matrix")
if(hcl$res == 1)
stop("Error")
tree <- list(merge = cbind(hcl$ia[1:(N - 1)],
hcl$ib[1:(N - 1)]),
height = hcl$crit[1:(N - 1)],
order = hcl$order,
labels = dimnames(x)[[1]],
method = METHODSLINKS[link],
call = match.call(),
dist.method = METHODS[method]
)
class(tree) <- "hclust"
tree
}
|
9fd15a4393f5e525eb991b96aa11a9d9f6152c7b | 7356d18cf9ab2b385fa02af33a27d6ea6a387b3c | /ROS-Examples-master/SampleSize/simulation.R | b76c589ca39d99b010d334d93e9f3d16de7923d2 | [
"CC0-1.0"
] | permissive | cleeway/Working-through-Regression-and-other-stories | 1032658cfed6a80ad4fd81ce5a0b62ab550dabdb | f3d2a98eb540b12e19cb4e3732c96d988b4e1eba | refs/heads/main | 2023-04-15T01:45:33.212759 | 2021-04-28T19:09:11 | 2021-04-28T19:09:11 | 362,594,431 | 1 | 0 | CC0-1.0 | 2021-04-28T20:11:34 | 2021-04-28T20:11:33 | null | UTF-8 | R | false | false | 1,604 | r | simulation.R | #' ---
#' title: "Regression and Other Stories: Sample size simulation"
#' author: "Andrew Gelman, Jennifer Hill, Aki Vehtari"
#' date: "`r format(Sys.Date())`"
#' output:
#' html_document:
#' theme: readable
#' toc: true
#' toc_depth: 2
#' toc_float: true
#' code_download: true
#' ---
#' Sample size simulation. See Chapter 16 in Regression and Other Stories.
#'
#' -------------
#'
#+ setup, include=FALSE
knitr::opts_chunk$set(message=FALSE, error=FALSE, warning=FALSE, comment=NA)
#' #### Load packages
library("rprojroot")
root<-has_file(".ROS-Examples-root")$make_fix_file()
library("rstanarm")
#' #### Simulated data 1: predictor range (-0.5, 0.5)
N <- 1000
sigma <- 10
y <- rnorm(N, 0, sigma)
x1 <- sample(c(-0.5,0.5), N, replace=TRUE)
x2 <- sample(c(-0.5,0.5), N, replace=TRUE)
fake <- data.frame(c(y,x1,x2))
#' #### Fit models
fit_1a <- stan_glm(y ~ x1, data = fake, refresh = 0)
fit_1b <- stan_glm(y ~ x1 + x2 + x1:x2, data = fake, refresh = 0)
print(fit_1a)
print(fit_1b)
#' #### Simulated data 2: predictor range (0, 1)
x1 <- sample(c(0,1), N, replace=TRUE)
x2 <- sample(c(0,1), N, replace=TRUE)
#' #### Fit models
fit_2a <- stan_glm(y ~ x1, data = fake, refresh = 0)
fit_2b <- stan_glm(y ~ x1 + x2 + x1:x2, data = fake, refresh = 0)
print(fit_2a)
print(fit_2b)
#' #### Simulated data 2: predictor range (-1, 1)
x1 <- sample(c(-1,1), N, replace=TRUE)
x2 <- sample(c(-1,1), N, replace=TRUE)
#' #### Fit models
fit_3a <- stan_glm(y ~ x1, data = fake, refresh = 0)
fit_3b <- stan_glm(y ~ x1 + x2 + x1:x2, data = fake, refresh = 0)
print(fit_3a)
print(fit_3b)
|
c3310b5569c30a5c0c423f42ba4686e725bdc7c8 | 6cd98f2264f075565a3cdeffd1569675aa51cbe4 | /man/cqrIS_.Rd | 662dc8dbe3d643ae6beac2efcf5c749583c773f4 | [] | no_license | ZexiCAI/cqrIS | 2cc6c6b414b2dd2f6f2c2984b25b8f5fb0a2334d | 76a9dccf88617ecd86464422ff4fc1ff46775dc3 | refs/heads/master | 2022-12-09T00:43:43.138400 | 2020-06-25T11:15:44 | 2020-06-25T11:15:44 | 274,821,221 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,502 | rd | cqrIS_.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cqrIS_main.R
\name{cqrIS_}
\alias{cqrIS_}
\title{Censored Quantile Regression with Induced Smoothing (The Interface)}
\usage{
cqrIS_(Z, X, cen, xi = rep(1, length(X)), init, tau.max, grid, tol,
diverge, maxiter)
}
\arguments{
\item{Z}{Covariates}
\item{X}{Survival time, not log-transformed}
\item{cen}{Censoring indicator, 1 for failure, 0 for censored}
\item{xi}{Weights for samples, default: a 1-vector}
\item{init}{Whether Peng and Huang (2008) estimates are used as initial guess}
\item{tau.max}{Upper bound of estimated quantile}
\item{grid}{The grid size}
\item{tol}{Norm of the estimate for the update to be considered convergent}
\item{diverge}{Norm of the estimate for the update to be considered divergent}
\item{maxiter}{Maximum number of iteration}
}
\value{
This function returns a list of lists with each list containing two elements:
\itemize{
\item beta.ISest, the estimates for the parameters
\item idenLim, the identifiability limit for the data
}
}
\description{
This function fits a censored quantile regression model to the provided survival dataset.
The parameters are obtained by solving an induced-smoothed version of the estimating
equation used in Peng and Huang (2008). It also provides an interface for variance estimation
via multiplier boostrap approach.
}
\references{
Cai, Z. and Sit, T. (2020+),
"Censored Quantile Regression with Induced Smoothing,"
\emph{Working Paper}.
}
|
0f945d795fb7349e90f932fe3e4d9eae1aa98b49 | 1cbb1d2395055800942011b81a4629a59a342176 | /CODE_dizzys/refreshDIZZYS_2015_10_23/dizzyslan4/dizzysNewInfec.Rcheck/dizzysNewInfec-Ex.R | e494a088fd605cb745fa86778a07184ee4db8450 | [] | no_license | ttcgiang/THESE_GitHub | e783f4de184b8abc350c740e9dab83c2c493c6b4 | c13af9dfc6bfcb6270ec2fa1230dca5893ade185 | refs/heads/master | 2020-04-06T06:58:35.019224 | 2017-06-01T11:05:36 | 2017-06-01T11:05:36 | 38,418,770 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,664 | r | dizzysNewInfec-Ex.R | pkgname <- "dizzysNewInfec"
source(file.path(R.home("share"), "R", "examples-header.R"))
options(warn = 1)
library('dizzysNewInfec')
base::assign(".oldSearch", base::search(), pos = 'CheckExEnv')
cleanEx()
nameEx("coef.seirNewInfec")
### * coef.seirNewInfec
flush(stderr()); flush(stdout())
### Name: coef.seirNewInfec
### Title: Coefficient of seirNewInfec Object
### Aliases: coef
### Keywords: coefficients seirNewInfec object
### ** Examples
seirobj1<-globSEIRSimulNewInfec(N=1e7)
coef(seirobj1)
seirobj2<-globSEIRSimulNewInfec(nbVilles=3, N=c(1e7,1e6))
coef(seirobj2)
cleanEx()
nameEx("detSEIRNewInfec")
### * detSEIRNewInfec
flush(stderr()); flush(stdout())
### Name: detSEIRNewInfec
### Title: Creat a seir Object
### Aliases: detSEIRNewInfec
### Keywords: deterministic model
### ** Examples
obj<-globSEIRNewInfec(typeSIMU="deter",nbVilles=3,N=c(1e7,1e6))
plot(obj)
cleanEx()
nameEx("equiNewInfec")
### * equiNewInfec
flush(stderr()); flush(stdout())
### Name: equiNewInfec
### Title: Finding Endemic Equilibrium of a seasonally-forced SEIR/SIR
### model
### Aliases: equiNewInfec
### Keywords: SEIR/SIR model limit cycle equilibrium
### ** Examples
## The point on the limit cycle depends on the input phase value 'phi':
res<-equiNewInfec(duration=100*365,unitTIME=1,N=10e6,mu=1/(70*365),
nbCONTACT0=300,nbCONTACT1=.1,probINFECTER=0.1,sigma=1/7,gamma=1/7,phiPHASE=c(0),periDISE=365)
print(res)
cleanEx()
nameEx("globSEIRNewInfec")
### * globSEIRNewInfec
flush(stderr()); flush(stdout())
### Name: globSEIRNewInfec
### Title: Creat a seir Object
### Aliases: globSEIRNewInfec
### Keywords: deterministic model stochastic model
### ** Examples
obj<-globSEIRNewInfec()
plot(obj)
obj<-globSEIRNewInfec(typeSIMU="sto",nbVilles=3,N=c(1e7,1e6))
plot(obj)
obj<-globSEIRNewInfec(typeSIMU="deter",nbVilles=3,N=c(1e7,1e6))
plot(obj)
cleanEx()
nameEx("globSEIRSimulNewInfec")
### * globSEIRSimulNewInfec
flush(stderr()); flush(stdout())
### Name: globSEIRSimulNewInfec
### Title: Redoing or Continuing a Simulation.
### Aliases: globSEIRSimulNewInfec
### Keywords: seir model R package
### ** Examples
#STO, STO
sto<-globSEIRNewInfec(N=1e6,typeSIMU="stoch",duration=5*365,nbVilles=2)
plot(globSEIRSimulNewInfec(sto,typeSIMU="stoch",continue=TRUE,duration=5*365,nbCONTACT1=0,phiPHASE=c(pi/2,0)),col=c(1,2))
#DET, DET
det<-globSEIRNewInfec(N=10e4,typeSIMU="deter",duration=50*365)
plot(globSEIRSimulNewInfec(det,typeSIMU="deter",continue=TRUE,duration=5*365,nbCONTACT1=0.1,phiPHASE=pi))
cleanEx()
nameEx("lines.seirNewInfec")
### * lines.seirNewInfec
flush(stderr()); flush(stdout())
### Name: lines.seirNewInfec
### Title: Add Connected Line Segments of an seir object to a Plot 2D/3D
### Aliases: lines
### Keywords: lines projection on plane
### ** Examples
#creating a plot
#adding a line to the plot
globSEIRSimulNewInfec(nbVilles=2)->obj
globSEIRSimulNewInfec(nbVilles=1)->obj1
#2D
plot(obj,col="red")
lines(obj1,col="blue",lwd=2)
#3D
plot(obj,z="S",col="red",proj=list(c("time","P")))
lines(obj1,z="S",col="blue",proj=list(c("time","P")))
cleanEx()
nameEx("persNewInfec")
### * persNewInfec
flush(stderr()); flush(stdout())
### Name: persNewInfec
### Title: persNewInfec in a Metapopulation
### Aliases: persNewInfec
### Keywords: metapopulation persNewInfec
### ** Examples
obj1<-obj1<-globSEIRSimulNewInfec(nbVilles=5,N=1e5,nbCONTACT0=100,duration=365*30)
objper<-persNewInfec(obj1)
objper@persistence
cleanEx()
nameEx("plot.persNewInfec")
### * plot.persNewInfec
flush(stderr()); flush(stdout())
### Name: plot.persNewInfec
### Title: Plotting Kaplan Meier Survival Curve
### Aliases: plot
### Keywords: Kaplan–Meier curve Kaplan–Meier estimator
### ** Examples
p<-persNewInfec(globSEIRSimulNewInfec(nbVilles=5,N=1e5,nbCONTACT0=100,duration=365*30))
plot.persNewInfec(p)
x11()
plot.persNewInfec(p,curvetype="pop",col=c("green","blue"),vilabline=c(1,3))
cleanEx()
nameEx("plot.seirNewInfec")
### * plot.seirNewInfec
flush(stderr()); flush(stdout())
### Name: plot.seirNewInfec
### Title: Plotting 2D/3D a seir Object
### Aliases: plot
### Keywords: seir model
### ** Examples
obj<-globSEIRSimulNewInfec(nbVilles=3, N=1e6, nbCONTACT0=100)
plot(obj,col=c("red","blue"),lwd=2,xlab="time (day)", ylab="number of infectives")
pause()
plot(obj,z="S",col=c("red","blue"),lwd=2,xlab="time (day)", ylab="number of infectives",zlab="number of susceptible")
pause()
#plot(obj,z="S",col=c("red","blue"),lwd=2,proj=list(c("time","P"),c("time","S")),box=F,xlab="time (day)", ylab="number of infectives",zlab="number of susceptible")
cleanEx()
nameEx("pop.seirNewInfec")
### * pop.seirNewInfec
flush(stderr()); flush(stdout())
### Name: pop.seirNewInfec
### Title: Extract Values of State Variables of each City according to
### Time.
### Aliases: pop.seirNewInfec
### ** Examples
obj<-globSEIRSimulNewInfec(nbVilles=3, N=1e6)
tpobj<-pop(obj)
class(tpobj)
tpobj<-pop(obj,fct="sum")
class(tpobj)
tpobj<-pop(obj,subset=c(1,2),fct="sum")
class(tpobj)
cleanEx()
nameEx("stoSEIRNewInfec")
### * stoSEIRNewInfec
flush(stderr()); flush(stdout())
### Name: stoSEIRNewInfec
### Title: Creat a seir stochastic Object
### Aliases: stoSEIRNewInfec
### Keywords: stochastic model
### ** Examples
obj<-globSEIRNewInfec(typeSIMU="sto",nbVilles=3,N=c(1e7,1e6))
plot(obj)
### * <FOOTER>
###
options(digits = 7L)
base::cat("Time elapsed: ", proc.time() - base::get("ptime", pos = 'CheckExEnv'),"\n")
grDevices::dev.off()
###
### Local variables: ***
### mode: outline-minor ***
### outline-regexp: "\\(> \\)?### [*]+" ***
### End: ***
quit('no')
|
feea5f6b51ba9d306f0753e3ec061f914bbc882e | a593d96a7f0912d8dca587d7fd54ad96764ca058 | /R/tidiers_ml_unsupervised_models.R | f0d1bf55cfe2ea3628ccf6abf1c9aebfe5d62866 | [
"Apache-2.0"
] | permissive | sparklyr/sparklyr | 98f3da2c0dae2a82768e321c9af4224355af8a15 | 501d5cac9c067c22ad7a9857e7411707f7ea64ba | refs/heads/main | 2023-08-30T23:22:38.912488 | 2023-08-30T15:59:51 | 2023-08-30T15:59:51 | 59,305,491 | 257 | 68 | Apache-2.0 | 2023-09-11T15:02:52 | 2016-05-20T15:28:53 | R | UTF-8 | R | false | false | 4,269 | r | tidiers_ml_unsupervised_models.R | #' Tidying methods for Spark ML unsupervised models
#'
#' These methods summarize the results of Spark ML models into tidy forms.
#'
#' @param x a Spark ML model.
#' @param ... extra arguments (not used.)
#' @name ml_unsupervised_tidiers
NULL
#' @rdname ml_unsupervised_tidiers
#' @export
tidy.ml_model_kmeans <- function(x,
...) {
center <- x$centers
size <- x$summary$cluster_sizes()
k <- x$summary$k
cbind(center,
size = size,
cluster = 0:(k - 1)
) %>%
dplyr::as_tibble()
}
#' @rdname ml_unsupervised_tidiers
#' @param newdata a tbl_spark of new data to use for prediction.
#'
#' @importFrom rlang syms
#'
#' @export
augment.ml_model_kmeans <- function(x, newdata = NULL,
...) {
# if the user doesn't provide a new data, this funcion will
# use the training set
if (is.null(newdata)) {
newdata <- x$dataset
}
vars <- c(dplyr::tbl_vars(newdata), "prediction")
ml_predict(x, newdata) %>%
dplyr::select(!!!syms(vars)) %>%
dplyr::rename(.cluster = !!"prediction")
}
#' @rdname ml_unsupervised_tidiers
#' @export
glance.ml_model_kmeans <- function(x,
...) {
k <- x$summary$k
wssse <- compute_wssse(x)
glance_tbl <- dplyr::tibble(
k = k,
wssse = wssse
)
add_silhouette(x, glance_tbl)
}
#' @rdname ml_unsupervised_tidiers
#' @export
tidy.ml_model_bisecting_kmeans <- function(x,
...) {
center <- x$centers
size <- x$summary$cluster_sizes()
k <- x$summary$k
cbind(center,
size = size,
cluster = 0:(k - 1)
) %>%
dplyr::as_tibble()
}
#' @rdname ml_unsupervised_tidiers
#'
#' @importFrom rlang syms
#'
#' @export
augment.ml_model_bisecting_kmeans <- function(x, newdata = NULL,
...) {
# if the user doesn't provide a new data, this funcion will
# use the training set
if (is.null(newdata)) {
newdata <- x$dataset
}
vars <- c(dplyr::tbl_vars(newdata), "prediction")
ml_predict(x, newdata) %>%
dplyr::select(!!!syms(vars)) %>%
dplyr::rename(.cluster = !!"prediction")
}
#' @rdname ml_unsupervised_tidiers
#' @export
glance.ml_model_bisecting_kmeans <- function(x,
...) {
k <- x$summary$k
wssse <- compute_wssse(x)
glance_tbl <- dplyr::tibble(
k = k,
wssse = wssse
)
add_silhouette(x, glance_tbl)
}
#' @rdname ml_unsupervised_tidiers
#' @importFrom dplyr .data
#' @export
tidy.ml_model_gaussian_mixture <- function(x, ...) {
center <- x$gaussians_df()$mean %>%
as.data.frame() %>%
t() %>%
fix_data_frame() %>%
dplyr::select(-"term")
names(center) <- x$feature_names
weight <- x$weights
size <- x$summary$cluster_sizes()
k <- x$summary$k
cbind(center,
weight = weight,
size = size,
cluster = 0:(k - 1)
) %>%
dplyr::as_tibble()
}
#' @rdname ml_unsupervised_tidiers
#'
#' @importFrom rlang syms
#'
#' @export
augment.ml_model_gaussian_mixture <- function(x, newdata = NULL,
...) {
# if the user doesn't provide a new data, this funcion will
# use the training set
if (is.null(newdata)) {
newdata <- x$dataset
}
vars <- c(dplyr::tbl_vars(newdata), "prediction")
ml_predict(x, newdata) %>%
dplyr::select(!!!syms(vars)) %>%
dplyr::rename(.cluster = !!"prediction")
}
#' @rdname ml_unsupervised_tidiers
#' @export
glance.ml_model_gaussian_mixture <- function(x,
...) {
k <- x$summary$k
glance_tbl <- dplyr::tibble(k = k)
add_silhouette(x, glance_tbl)
}
# this function add silhouette to glance if
# spark version is even or greater than 2.3.0
add_silhouette <- function(x, glance_tbl) {
sc <- spark_connection(x$dataset)
version <- spark_version(sc)
if (version >= "2.3.0") {
silhouette <- ml_clustering_evaluator(x$summary$predictions)
glance_tbl <- dplyr::bind_cols(glance_tbl,
silhouette = silhouette
)
}
glance_tbl
}
compute_wssse <- function(x) {
if (is_required_spark(spark_connection(x$dataset), "3.0.0")) {
wssse <- x$summary$training_cost
} else {
wssse <- x$cost
}
wssse
}
|
bc829e820349d2b0949586b40db6aca2870305a4 | 7cf60676fcf01e1500b43dce908dfdd09a0033d0 | /queryExperiment1.R | 922b2d2963ff456bec55f8e703ec33b297ae2d28 | [] | no_license | kristalcurtis/r-scripts | d99f619c72807c3a05bc8f9e7d009027f22bb806 | 4ae5e7db56ca3f50331fb72c431e1d09fd4475b5 | refs/heads/master | 2021-01-20T13:47:55.519433 | 2010-12-17T00:10:31 | 2010-12-17T00:10:31 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,963 | r | queryExperiment1.R | # Goal: use a query's own op models to predict its latency for a certain quantile.
# Arg passed to function: basePath
# Log files should already be deinterleaved & parsed.
# ie, Expects to find the following files in "basePath":
# training-logs/Thread-*.csv
# validation+-logs/Thread-*.csv
# paramFile.RData
# *: startingThread to endingThread
# +: 1 to numValidationRuns
# Values that should appear in paramFile.RData:
# startingThread
# endingThread
# numValidationRuns
# latencyQuantile: \in (0,1)
# queryType: string rep of query (eg, "thoughtstream")
# numSampleSets
queryExperiment1 = function(basePath) {
source("experiment-functions.R")
print("Loading params...")
load(file=paste(basePath, "/paramFile.RData", sep=""))
## Training Phase
print("TRAINING PHASE:")
print("Loading training data...")
trainingData = getTrainingData(startingThread, endingThread, basePath)
print("Creating & saving histograms...")
if (queryType == "thoughtstream") {
createAndSaveThoughtstreamOpHistograms(basePath)
} else if (queryType == "userByEmail") {
createAndSaveUserByEmailOpHistograms(basePath)
} else if (queryType == "userByName") {
createAndSaveUserByNameOpHistograms(basePath)
} else if (queryType == "thoughtsByHashTag") {
createAndSaveThoughtsByHashTagOpHistograms(basePath)
} else {
return("Unrecognized query type.")
}
# Sanity Check
print("Sanity check the training data's dim:")
print(dim(trainingData))
print("Sanity check the # queries in the training data:")
print(length(which(trainingData$opLevel==3)))
## Validation Phase
print("VALIDATION PHASE:")
print("Getting validation stats...")
getValidationStats(startingThread, endingThread, basePath, numValidationRuns, latencyQuantile)
print("Getting predicted latency...")
getPredictedQueryLatencyQuantiles(queryType, numSampleSets, basePath, latencyQuantile)
error = getPredictionError(basePath)
print(paste("Error:", error))
return(error)
}
|
e9a4222705080849a7af91f6ee371efc0d4ca123 | a145f59a9ddbc01ac7a6d5c1120f535c10308051 | /IO_project.R | 6f2ad4cf2dab9852c2f87b7c0207284c93a8e6c7 | [] | no_license | NBar05/IO_project | 290ec638d730b5975645474e67e6433ff87b0849 | 961423a35bb98678013481ade85f856cbb9aa930 | refs/heads/master | 2022-09-21T09:35:54.134206 | 2020-05-30T20:03:06 | 2020-05-30T20:03:06 | 266,527,595 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,934 | r | IO_project.R | library(zoo)
library(dplyr)
library(tidyr)
library(psych)
library(readxl)
library(ggplot2)
library(stargazer)
# it's a kind of magic, skip it
`%--%` <- function(x, y) {
do.call(sprintf, c(list(x), y))
}
# задаём временной промежуток вытягиваемых данных
dates <- as.yearmon(2018 + 6/12 + seq(1, 21) / 12) # август 2018 -- апрель 2020
# читаем файлы и хоба в таблицу
naming <- "mediaholdings/mh-%s.xlsx" %--% list(c(1:21))
media_holdings <- data.frame()
for (i in 1:length(naming)) {
data <- read_xlsx(naming[i], skip = 3)
data$Период <- dates[i]
media_holdings <- rbind(media_holdings, data)
}
# читаем файлы и хоба в таблицу 2
naming <- "media_news/news-%s.csv" %--% list(c(1:21))
media_news <- data.frame()
for (i in 1:length(naming)) {
data <- read.csv(naming[i], header = TRUE, na.strings = "", stringsAsFactors = FALSE)
data$Период <- dates[i]
media_news <- rbind(media_news, data)
}
media_news$Период <- as.yearmon(media_news$Период)
# не будем искать врагов там, где их нет
media_news_c <- media_news %>%
filter(Тематики.ресурса !=
"Авто и мото->Издания об автомобилях, Авто и мото, Новости и СМИ->Издания об автомобилях, Новости и СМИ") %>%
filter(Тематики.ресурса !=
"Наука, Техника и технологии, Наука->Научные журналы и публикации, Новости и СМИ->Научные журналы и публикации, Техника и технологии->Новости Hi-Tech, Новости и СМИ") %>%
filter(Тематики.ресурса !=
"Техника и технологии, Новости и СМИ->Деловые новости, Техника и технологии->Новости Hi-Tech, Бизнес, Новости и СМИ, Бизнес->Деловые новости")
# оставим только газеты и журналы
# media_only_news <- media_news %>% filter(Тематики.ресурса == "Новости и СМИ, Новости и СМИ->Газеты, журналы")
#
media_holdings <- media_holdings %>% mutate(year = as.integer(format(as.Date(Период), "%Y")),
month = as.integer(format(as.Date(Период), "%m")))
names(media_holdings)[1:6] <- c("name", "cross_device_visitors", "users", "daily_audience", "avg_time", "date")
summary_holdings <- media_holdings %>% group_by(name, year) %>% summarise(yearly_audience = sum(cross_device_visitors),
counts = n()) %>% filter(year == 2019)
summary_holdings$gov <- c(1, 0, 1, 1, 0, 0, 1, NA, 1, NA, 0, 0, 1, 1, 0, 0, 0, 0, 0, NA, 1)
summary_holdings_fin <- summary_holdings %>% arrange(gov, desc(yearly_audience)) %>%
filter(!(name %in% c()))
# stargazer(summary_holdings, type = "latex", summary = FALSE)
names(media_news_c) <- c("name", "url", "theme", "type", "holding", "metrics", "cross_device_visitors",
"users", "avg_time", "mobile_audience", "daily_audience", "date")
summary_media_news <- media_news_c %>% mutate(year = as.integer(format(as.Date(date), "%Y")),
month = as.integer(format(as.Date(date), "%m"))) %>%
group_by(year, name) %>% summarise(yearly_audience = sum(cross_device_visitors),
counts = n()) %>% filter(year == 2019) %>%
filter(counts == 12) %>% mutate(share = yearly_audience / sum(yearly_audience),
share_2 = share^2) %>% summarise(H = sum(share_2)*10000)
|
9d4b83fb73bf329145502a54a48625086a2c5fb6 | 731e347d0751a5607593a7064989ba94ffd9ceb7 | /R/palette_hms.R | b580b2947c0e64ee661a95efe502c83974aca72a | [] | no_license | karsfri/HMSr | 71b726e7a8eb120b2cf6165d7f3ce2b7c4ffe6ab | 9516843a99c911bd14ee51758a559428bedcb71b | refs/heads/master | 2020-12-07T21:03:39.657117 | 2020-02-20T14:24:45 | 2020-02-20T14:24:45 | 232,801,472 | 0 | 2 | null | 2020-02-19T10:07:09 | 2020-01-09T12:15:34 | R | UTF-8 | R | false | false | 1,337 | r | palette_hms.R | library(tidyverse)
#' @import tidyverse
#' @import ggplot2
#' @import lubridate
#' @import magrittr
#' @export blue
#' @export red
#' @export color_main
#' @export color_extra
#' @export palette_light
#' @export palette_medium
#' @export palette_dark
#' @export palette_hms
#' @export palette_hms_darker
# Colors for monthly report -----------------------------------------------
blue <- "#11223a"
red <- "#ff5028"
color_main <- "#11223a"
color_extra <- "#4e5a68"
# we really need more colors. This is mixture of the new and the old palette
palette_light <- c(
"#11223a",
"#8cc0c6",
"#B44C3D",
"#E4CD63",
"#57967C",
"#6D7988",
"#C3C5B5",
"#b39271",
"#E5B860",
"#325C7E",
"#B64B6A"
)
library(magrittr)
palette_medium <- palette_light %>%
colorspace::darken()
palette_dark <- palette_light %>%
colorspace::darken(amount = 0.2) %>%
colorspace::desaturate(amount = 0.1)
scales::show_col(palette_light)
scales::show_col(palette_medium)
scales::show_col(palette_dark)
# Palette for the montly reports - use for areas and columns
palette_hms <- c(
palette_light,
palette_dark,
palette_medium
)
# Palette for the montly reports - use for lines and dots
palette_hms_darker <- c(
palette_dark,
palette_medium,
palette_light
)
colorspace::swatchplot(palette_light, palette_medium, palette_dark)
|
68a8963b109c008c201a1f52d1c41c563d775e04 | 3ab657f287cdfeff9c6bb0076d1bdf3696973f80 | /src/ldatest.R | bf20bffcd5e39b79b6614dfd48c3d91dd54b8419 | [] | no_license | cwf2/dh2016 | 7a4c5a5d12ab9d00be297435a0b4ba6bbc7d811f | b07acab8df3a1817f56ae0f401654c408dc6e533 | refs/heads/master | 2018-07-18T23:34:11.099376 | 2018-06-01T21:57:31 | 2018-06-01T21:57:31 | 48,430,253 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,000 | r | ldatest.R | #######
# topics test
one.lda.per.kmeans <- function(ntopics, nclusters, nreps=10, ncores=NA) {
inner.function <- function(i) {
t0 <- Sys.time()
on.exit(
cat(
paste(" - [", i, "/", nreps, "]"),
"...",
difftime(Sys.time(), t0, units = "min"),
"minutes\n"
)
)
kmeans(slot(LDA(dtm.tf, k = ntopics), "gamma"), nclusters)$cluster
}
cat("Generating", nreps, "reps with", ntopics, "topics and", nclusters, "classes\n")
if(is.na(ncores)) {
return(do.call(cbind, lapply(1:nreps, inner.function)))
} else {
return(do.call(cbind, mclapply(1:nreps, inner.function, mc.cores = ncores)))
}
}
output.dir <- "~/ldatest"
if(! dir.exists(output.dir)) {
dir.create(output.dir, recursive=T)
}
set.seed(11011981)
lapply(2:20, function(nclusters) {
output.file <- file.path(output.dir, paste(ntopics, "-", nclusters, ".txt", sep=""))
write.table(file=output.file, one.lda.per.kmeans(ntopics, nclusters, nreps = 15, ncores))
})
|
05c7f0c744a588c2f56a981a170a15871fdd39eb | 09af67cbd6897732da9ed5c52d31c63f83a48b96 | /Src/R_With_Cpp/Functions.R | 88014ce58b76c36266c7bddc3c5fe5127888c31a | [
"MIT"
] | permissive | arekbee/R_with_cpp | 9673ab49202d91ba63dabc824788b4d12836c8c5 | 6e1328bdbb3728da675c6b106e516df6889bd8f5 | refs/heads/master | 2021-01-10T13:56:48.791328 | 2015-12-28T18:37:25 | 2015-12-28T18:37:25 | 48,703,059 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 154 | r | Functions.R | mov.avg <- function(x, n=20) {
total <- numeric(length(x) - n + 1)
for (i in 1:n) {
total <- total + x[i:(length(x) - n + i)]
}
total / n
}
|
c58870084112cbdd18e563c3a7c0ce2fec86ec31 | 35e649b4ff8492fdc4b4b5428be5e996cd813239 | /ch_7_1_basic_graphics_5_salary.R | d234a08c0286c0ce4d647bf43ff5416fc235b4ab | [] | no_license | tsaomao/edX--MSDS-Curriculum--DAT204x | 55a67aad2dcf548f5ca6e1526cd51633ff07eb8e | 18b2e297575c6e3000bbaba9a609dbf41aee6bda | refs/heads/master | 2020-06-14T09:35:57.451090 | 2016-12-03T01:57:13 | 2016-12-03T01:57:13 | 75,204,647 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 203 | r | ch_7_1_basic_graphics_5_salary.R | # salaries already provided
# 65 rows
# Subset salaries: salaries_educ
salaries_educ <- salaries[salaries$degree == 3,]
# Create a histogram of the salary column
hist(salaries_educ$salary, breaks = 10) |
93bafe7f3d490d97d3d88278c5624f389ce9d872 | faf1f580595ad6912c1184858792870d88b965ff | /img_ejemplos/porcentajes14_meta_parche.R | ade6bf7fee80d2525e9943d612f75c0ab171d03a | [] | no_license | EncisoAlvaJC/TESIS | 732cb07f5488a388ad4b6f2417717a6262817c0d | c2bad0f255e7d5795d2ac63c80e65e3d752ea5f8 | refs/heads/master | 2021-01-09T05:34:36.738141 | 2018-08-08T20:46:52 | 2018-08-08T20:46:52 | 80,755,604 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,895 | r | porcentajes14_meta_parche.R | ###############################################################################
# carpeta central
data_dir = '~/TESIS/graf_datos/estacionariedad_filtro/'
central_dir = '~/TESIS/TESIS/img_art_dfa'
info_dir = '~/TESIS/TESIS/articulo_dfa'
g_dir = '~/TESIS/TESIS/img_art_dfa'
setwd(central_dir)
#################################################
# parametros del script
dur_chunk = 30
p.val = .01
grabar.gral = F
graf.indv = F
grabar.indv = F
grabar.ast = T
p.ast = c(.05,.01,.005,.001)
ast = c(' ','*','**','***','****')
guardar_archivo = F
nombre_archivo = paste0('asteriscos_',toString(dur_chunk),'.csv')
orden_stam = T
quienes = 1:5
###############################################################################
# librerias
require('readxl')
#require('xlsx')
require('ggplot2')
require('ggpubr')
#require('colo')
require('Rmisc')
require('reshape')
require('scales')
require('hms')
#################################################
# constantes generales
info = read_excel(paste0(info_dir,'/info_tecnico.xlsx'))
orden_k = read_excel(paste0(info_dir,'/info_canales.xlsx'))
kanales = read_excel(paste0(info_dir,'/info_canales.xlsx'))
if(orden_stam){
kanales = read_excel(paste0(info_dir,'/info_canales_alterno.xlsx'))
}
n.canales = length(kanales$Etiqueta)
canales.arch = kanales$Nombre_archivo
#n.participantes = length(info$Nombre)
n.participantes = length(quienes)
#################################################
# contenedores de datos
dif_significativas = matrix(nrow=n.canales,
ncol=n.participantes)
colnames(dif_significativas) = info$Nombre[quienes]
row.names(dif_significativas) = kanales$Etiqueta
matriz_mor = matrix(nrow=n.canales,ncol=n.participantes+2)
matriz_nmor = matrix(nrow=n.canales,ncol=n.participantes+2)
matriz_tot = matrix(nrow=n.canales,ncol=n.participantes+2)
colnames(matriz_mor) = c(info$Nombre[quienes],'Canal_var','Etapa')
colnames(matriz_nmor) = c(info$Nombre[quienes],'Canal_var','Etapa')
colnames(matriz_tot) = c(info$Nombre[quienes],'Canal_var','Etapa')
matriz_mor[,'Canal_var'] = 1:n.canales
matriz_nmor[,'Canal_var'] = 1:n.canales
matriz_tot[,'Canal_var'] = 1:n.canales
matriz_mor[,'Etapa'] = rep('MOR',n.canales)
matriz_nmor[,'Etapa'] = rep('NMOR',n.canales)
matriz_tot[,'Etapa'] = rep('Total',n.canales)
#################################################
# cargar los datos
for(sujeto in 1:n.participantes){
setwd(central_dir)
source('~/TESIS/TESIS/img_ejemplos/porcentajes14_parche.R')
}
#################################################
# diferencias significativas MOR VS NMOR
if(grabar.ast){
setwd(g_dir)
write.csv(dif_significativas,file=nombre_archivo)
}
#################################################
# separacion de grupos para comparar
matriz = rbind(matriz_mor,matriz_nmor)
matriz = as.data.frame(matriz)
matriz2 = melt(matriz,id=c('Canal_var','Etapa'))
matriz2$value = as.numeric(as.character(matriz2$value))
# parche
grupos = as.data.frame(c(rep(0,44*5)))
matriz2 = cbind(matriz2,grupos)
matriz2 = cbind(matriz2,grupos)
colnames(matriz2) = c('Canal_var','Etapa','Participante',
'Proporcion','Grupo','GrupoEtapa')
#droplevels(matriz2$Proporcion)
#matriz2$Proporcion = droplevels(matriz2$Proporcion)
matriz2$GrupoEtapa = 2*matriz2$Grupo + 1*(matriz2$Etapa=='NMOR')
matriz2$Canal_var = factor(matriz2$Canal_var,
labels=kanales$Etiqueta)
#labels=orden_k$Etiqueta)
matriz2$Grupo = factor(matriz2$Grupo,
labels=c('CTL'))
matriz2$GrupoEtapa = factor(matriz2$GrupoEtapa,
labels=c('CTL NMOR','CTL MOR'))
ggplot(matriz2,aes(x=Canal_var,y=100*Proporcion,fill=Etapa))+
geom_boxplot() +
xlab(NULL) + ylab('Épocas estacionarias [%]') +
theme_bw() +
#scale_y_continuous(labels=percent) +
#scale_fill_brewer(palette='Paired') +
scale_fill_manual(values=c('#40ff40','#808080'))+
#theme(legend.position='bottom') +
theme(legend.position = c(0.05,0.82)) +
labs(fill=NULL) +
#labs(title=paste('Época =',toString(dur_chunk),'s')) +
#facet_grid(Grupo~.) +
stat_compare_means(label = 'p.signif',method='wilcox.test',
hide.ns = T,paired = F,
method.args = list(alternative = "greater"))+
#stat_compare_means(label = 'p.format',method='wilcox.test',
# hide.ns = T)+
rotate_x_text(angle = 45)
if(grabar.gral){
ggsave(filename=paste0('PARCHE.pdf'),
path=g_dir,device='pdf',
width=9.843,height=3.515,unit='in',scale=.8)
}
#################################################
# parametros graficos
#rojito = rgb(255, 64, 64,maxColorValue=255)
#verdecito = rgb( 64,255, 64,maxColorValue=255)
#azulito = rgb( 64, 64,255,maxColorValue=255)
#gricesito = rgb(128,128,128,maxColorValue=255) |
4e4405f3aa2765335c8a52b298719d82d14966f0 | ff8d54c963f04cfcd7782214b450c26726366532 | /R/SICR_simulateState.R | 308e8b903d779e583973464cd443958cca07a16e | [] | no_license | TommiKarki/NpiEurope | 26e81f54084c2b5a3add1f0284e0e2291fedaefc | 406abb7c815552239b24fe3ae7de574414795b92 | refs/heads/master | 2022-12-13T23:22:57.724027 | 2020-08-22T13:44:17 | 2020-08-22T13:51:29 | 290,290,879 | 0 | 0 | null | 2020-08-25T18:19:59 | 2020-08-25T18:19:59 | null | UTF-8 | R | false | false | 2,101 | r | SICR_simulateState.R | #' @importFrom stats rpois
SICR_simulateState <- function(t0, t1,
P,
pIsol, timeIsol, matrixCtctAge, propAge, transmRate, popSize, lethalityAge, severityAge, confinementBegin, confinementDuration, confinementEfficiency, confinementTarget, epsilon, sigma, propAsympto,
Np) {
dt <- 1 / 2
n <- length(propAge)
nbCases <- rep_len(0L, n)
while (t0 + dt <= t1) {
# Main body of equations
P[P < 0] <- 0
# Calculating force of direct transmission
lambda <- crossprod(matrixCtctAge, P$I + P$A) * transmRate * P$S
if (t0 >= timeIsol) {
# lambda <- lambda * (1-pIsol)
}
if (any(t0 >= confinementBegin & t0 < confinementBegin + confinementDuration)) {
lambda <- lambda * (1 - confinementEfficiency[t0 >= confinementBegin & t0 < (confinementBegin + confinementDuration)])
}
# Rates calculation
# FIXME: it would be faster to not create this object and then subset it
# in each rpois() call but rather use the rate directly in the rpois() call
rates <- cbind(
lambda, # S-1,E+1
propAsympto * epsilon * P$E, # E-1,A+1
(1 - propAsympto) * epsilon * P$E, # E-1,I+1
sigma * P$A, # A-1,U+1
sigma * (1 - lethalityAge - severityAge) * P$I, # I-1,R+1
sigma * lethalityAge * P$I, # I-1,D+1
sigma * severityAge * P$I # I-1,M+1
)
# Rates application
Num <- rpois(n, rates[, 1] * dt)
P$S <- P$S - Num
P$E <- P$E + Num
Num <- rpois(n, rates[, 2] * dt)
P$E <- P$E - Num
P$A <- P$A + Num
Num <- rpois(n, rates[, 3] * dt)
P$E <- P$E - Num
P$I <- P$I + Num
nbCases <- nbCases + Num
Num <- rpois(n, rates[, 4] * dt)
P$A <- P$A - Num
P$U <- P$U + Num
Num <- rpois(n, rates[, 5] * dt)
P$I <- P$I - Num
P$R <- P$R + Num
Num <- rpois(n, rates[, 6] * dt)
P$I <- P$I - Num
P$D <- P$D + Num
Num <- rpois(n, rates[, 7] * dt)
P$I <- P$I - Num
P$M <- P$M + Num
t0 <- t0 + dt
P[P < 0] <- 0
}
P$NbCases <- nbCases
return(P)
}
|
f18159d07bf155837df175a1fca35d5a1fc8880a | 1099e609e85486372d56c5407280c9576094c9c2 | /R/mipplot.R | 1359a958431b49a49eeab8ca5de8594bb01b574d | [
"MIT"
] | permissive | UTokyo-mip/mipplot | e342703e6f70880631efb2ef6f0c93211c37856c | b0e3ecb4c6de622ceb8fe5acd22b6aaff00b6144 | refs/heads/master | 2022-06-16T09:57:47.277800 | 2021-07-27T15:03:28 | 2021-07-27T15:03:28 | 154,427,152 | 8 | 4 | null | null | null | null | UTF-8 | R | false | false | 219 | r | mipplot.R | #' mipplot
#'
#' Package contains generic functions to produce area/bar/box/line plots of data following IAMC submission format.
#'
#' @name mipplot
#' @docType package
#' @import ggplot2 stringr reshape2 tidyr
#'
NULL
|
5f602728711aeda7bd69c43beb6b17003840bfdf | 3a500b954d00b3f43f68d75a59508a35643f3729 | /RandomForest.R | c6686698a735827026c13bd15f0093c930e5fc13 | [] | no_license | bgautijonsson/groceries | cf4a6be86079b898acdf79e8f73b01ceeac35357 | f79ac95b1c61ca9767f4e7d379bc2229fd8938fc | refs/heads/master | 2021-03-30T18:25:50.236009 | 2017-11-01T23:57:26 | 2017-11-01T23:57:26 | 108,177,039 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,067 | r | RandomForest.R | # Wrangling
library(dplyr)
library(tidyr)
library(data.table)
library(tibble)
library(lubridate)
library(reshape2)
# Machine Learning
library(caret)
library(forecast)
library(foreach)
library(forecastHybrid)
source('cost.R')
# Parallel Processing
library(parallel)
library(doParallel)
# Seed
set.seed(1337)
# Reading the data
training <- fread('train_set_2017.csv')
dev <- fread('dev_set.csv')
training$date <- as.Date(training$date, format = "%Y-%m-%d")
dev$date <- as.Date(dev$date, format = "%Y-%m-%d")
# Transform to log1p
training$unit_sales <- as.numeric(training$unit_sales)
training[training$unit_sales < 0, "unit_sales"] <- 0
training$unit_sales <- log1p(training$unit_sales)
# Variables as factors
training$store_nbr <- as.character(training$store_nbr)
training$item_nbr <- as.character(training$item_nbr)
# Preprocess
training_small <- training %>%
mutate(week = ceiling(day(date)/4), weekday = weekdays(date)) %>%
mutate(perishable = as.factor(perishable)) %>%
group_by(week, weekday, store_nbr, perishable) %>%
summarise(unit_sales = mean(unit_sales))
training_small[training_small$store_nbr == 32,]$store_nbr = '26'
training_small$weekday <- as.factor(training_small$weekday)
dev_pred <- dev %>%
mutate(week = ceiling(day(date)/4), weekday = weekdays(date)) %>%
mutate(perishable = if_else(perishable == 1, 0, 1)) %>%
select(week, weekday, store_nbr, perishable)
dev_pred$weekday <- as.factor(dev_pred$weekday)
# Training
X_train <- training_small[,-5]
y_train <- training_small$unit_sales
trControl <- trainControl(method = 'cv', number = 5,
verboseIter = TRUE, allowParallel = TRUE)
cluster <- makeCluster(detectCores() - 1)
registerDoParallel(cluster)
fit <- train(x = X_train, y = y_train, method = 'rf',
trControl = trControl)
stopCluster(cluster)
registerDoSEQ()
pred <- predict(fit, dev_pred)
pred <- expm1(pred)
source('cost.R')
cost(pred = pred, test = dev)
plotnum <- sample(x = 1:1000000, size = 500)
plot(dev$unit_sales[plotnum], pred[plotnum])
|
e55d3597a12de9b0b23a7545d253ae6fb4f1ac03 | c3290a352d3699cbd0f2c4dc7f674d833184a783 | /R/DataDocumentation.R | f36a1ab098fa1b47f919003453ef9bef51e8beee | [] | no_license | kartheekkumar65/sparseKOS | 6226c0926fb20c54d0a7e598457f2007b29a87fd | f4481ae0f0331bcf7bdd53b53c0562bc56c4217e | refs/heads/master | 2021-10-19T09:02:51.189930 | 2019-02-19T21:36:02 | 2019-02-19T21:36:02 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,215 | r | DataDocumentation.R | #' A list consisting of Training and Test data along with corresponding class labels.
#' @format A list consisting of:
#' \describe{
#' \item{TrainData}{ (179 x 4) Matrix of training data features. the first two features satisfy sqrt(x_{i1}^2 + x_{i2}^2) > 2/3 if the ith sample is in class 1.
#' Otherwise, they satisfy sqrt(x_{i1}^2 + x_{i2}^2) < 2/3 - 1/10 if the ith sample is in class 2.
#' The third and fourth features are generated as independent N(0, 1/2) noise.}
#' \item{TestData}{ (94 x 4) Matrix of test data features. the first two features satisfy sqrt(x_{i1}^2 + x_{i2}^2) > 2/3 if the ith sample is in class 1.
#' Otherwise, they satisfy sqrt(x_{i1}^2 + x_{i2}^2) < 2/3 - 1/10 if the ith sample is in class 2.
#' The third and fourth features are generated as independent N(0, 1/2) noise.}
#' \item{CatTrain}{ (179 x 1) Vector of class labels for the training data.}
#' \item{CatTest}{ (94 x 1) Vector of class labels for the test data.}
#' ...
#' }
#' @source Simulation model 1 from [Lapanowski and Gaynanova, preprint].
#' @references Lapanowski, Alexander F., and Gaynanova, Irina. ``Sparse Feature Selection in Kernel Discriminant Analysis via Optimal Scoring'', preprint.
"Data" |
171c723fe3633c222f87e134d17215550c77e73e | 9d3ccd4b3908742dac1a1be4cb6f3e1be9287c4d | /code/simulation/strach/merge_simulation_plug_in_meta.R | 40070660fe7a6d4f6f7839dc6d8b40b9ed4cc518 | [] | no_license | andrewhaoyu/MR_MA | f8a35d74ea48d1662de26ee234f305606be2742b | 2e7ac792f1c40ddbc0eb6639d68981672584ccef | refs/heads/master | 2022-07-30T21:53:34.668451 | 2022-07-07T14:29:09 | 2022-07-07T14:29:09 | 203,250,618 | 3 | 1 | null | null | null | null | UTF-8 | R | false | false | 5,410 | r | merge_simulation_plug_in_meta.R | #goal: merge the simulations datasets by pluging in true variance
times = 10
reps = 1000
TwoStage_est = rep(0,times*reps)
IVW_est = rep(0,times*reps)
IVWs_est = rep(0,times*reps)
cover_TwoStage_est = rep(0,times*reps)
cover_IVW_est = rep(0,times*reps)
cover_IVWs_est = rep(0,times*reps)
sigma_TwoStage = rep(0,times*reps)
sigma_IVW = rep(0,times*reps)
sigma_IVWs = rep(0,times*reps)
twostage.nsnps = rep(0,times*reps)
twostage.prop = rep(0,times*reps)
IVW.nsnps = rep(0,times*reps)
IVW.prop = rep(0,times*reps)
setwd("/spin1/users/zhangh24/MR_MA/")
total = 0
for(i1 in 1:reps){
load( paste0("./result/simulation/simulation_plug_in_meta",i1,".Rdata"))
temp = length(result[[1]][[1]])
TwoStage_est[total+(1:temp)] <- result[[1]][[1]]
IVW_est[total+(1:temp)] <- result[[1]][[2]]
IVWs_est[total+(1:temp)] <- result[[1]][[3]]
cover_TwoStage_est[total+(1:temp)] <- result[[1]][[4]]
cover_IVW_est[total+(1:temp)] <- result[[1]][[5]]
cover_IVWs_est[total+(1:temp)] = result[[1]][[6]]
sigma_TwoStage[total+(1:temp)] = result[[1]][[7]]
sigma_IVW[total+(1:temp)] = result[[1]][[8]]
sigma_IVWs[total+(1:temp)] = result[[1]][[9]]
total = total+temp
}
#IVWs_est_new = IVWs_est[order(IVWs_est)][(times*reps*0.05):(times*reps*0.95)]
beta_M = 0.1
mean(TwoStage_est)-beta_M
mean(IVW_est)-beta_M
mean(IVWs_est)-beta_M
mean(cover_TwoStage_est)
mean(cover_IVW_est)
mean(cover_IVWs_est,na.rm = T)
var(TwoStage_est)
var(IVW_est)
var(IVWs_est)
mean(sigma_TwoStage)
mean(sigma_IVW,na.rm = T)
mean(sigma_IVWs)
[order(IVWs_est)][(times*reps*0.05):(times*reps*0.95)])
total = 0
for(i1 in 1:reps){
load( paste0("./result/simulation/simulation_plug_in_meta",i1,".Rdata"))
temp = length(result[[3]][[1]])
TwoStage_est[total+(1:temp)] <- result[[3]][[1]]
IVW_est[total+(1:temp)] <- result[[3]][[2]]
IVWs_est[total+(1:temp)] <- result[[3]][[3]]
cover_TwoStage_est[total+(1:temp)] <- result[[3]][[4]]
cover_IVW_est[total+(1:temp)] <- result[[3]][[5]]
cover_IVWs_est[total+(1:temp)] = result[[3]][[6]]
sigma_TwoStage[total+(1:temp)] = result[[3]][[7]]
sigma_IVW[total+(1:temp)] = result[[3]][[8]]
sigma_IVWs[total+(1:temp)] = result[[3]][[9]]
total = total+temp
}
beta_M = 0.1
mean(TwoStage_est)-beta_M
mean(IVW_est)-beta_M
mean(IVWs_est)-beta_M
IVWs_est_new = IVWs_est[order(IVWs_est)][(times*reps*0.05):(times*reps*0.95)]
mean(IVWs_est_new)
mean(cover_TwoStage_est)
mean(cover_IVW_est)
mean(cover_IVWs_est)
var(TwoStage_est)
var(IVW_est)
var(IVWs_est)
mean(sigma_TwoStage)
mean(sigma_IVW,na.rm = T)
mean(sigma_IVWs)
total = 0
n_thres = 7
TwoStage_est_all <- matrix(0,times*reps,n_thres)
IVW_est_all <- matrix(0,times*reps,n_thres)
IVWs_est_all <- matrix(0,times*reps,n_thres)
for(i1 in 1:reps){
load( paste0("./result/simulation/simulation_plug_in_meta",i1,".Rdata"))
temp = length(result[[2]][[1]])
TwoStage_est[total+(1:temp)] <- result[[2]][[1]]
IVW_est[total+(1:temp)] <- result[[2]][[2]]
IVWs_est[total+(1:temp)] <- result[[2]][[3]]
cover_TwoStage_est[total+(1:temp)] <- result[[2]][[4]]
cover_IVW_est[total+(1:temp)] <- result[[2]][[5]]
cover_IVWs_est[total+(1:temp)] = result[[2]][[6]]
sigma_TwoStage[total+(1:temp)] = result[[2]][[7]]
sigma_IVW[total+(1:temp)] = result[[2]][[8]]
sigma_IVWs[total+(1:temp)] = result[[2]][[9]]
twostage.nsnps[total+(1:temp)] = result[[2]][[10]]
twostage.prop[total+(1:temp)] = result[[2]][[11]]
IVW.nsnps[total+(1:temp)] = result[[2]][[12]]
IVW.prop[total+(1:temp)] = result[[2]][[13]]
TwoStage_est_all[total+(1:temp),] = result[[2]][[14]]
IVW_est_all[total+(1:temp),] = result[[2]][[15]]
IVWs_est_all[total+(1:temp),] = result[[2]][[16]]
total = total+temp
}
beta_M = 0.1
mean(TwoStage_est)-beta_M
mean(IVW_est,na.rm = T)-beta_M
mean(IVWs_est,na.rm = T)-beta_M
mean(cover_TwoStage_est,na.rm = T)
mean(cover_IVW_est,na.rm = T)
mean(cover_IVWs_est,na.rm = T)
var(TwoStage_est,na.rm = T)
var(IVW_est,na.rm = T)
var(IVWs_est,na.rm = T)
mean(sigma_TwoStage,na.rm = T)
mean(sigma_IVW,na.rm = T)
mean(sigma_IVWs,na.rm = T)
mean(twostage.nsnps,na.rm = T)
mean(twostage.prop,na.rm = T)
mean(IVW.nsnps,na.rm = T)
mean(IVW.prop)
total = 0
for(i1 in 1:reps){
load(paste0("./result/simulation/simulation_",i1,".Rdata"))
temp = length(result[[4]][[1]])
TwoStage_est[total+(1:temp)] <- result[[4]][[1]]
IVW_est[total+(1:temp)] <- result[[4]][[2]]
IVWs_est[total+(1:temp)] <- result[[4]][[3]]
cover_TwoStage_est[total+(1:temp)] <- result[[4]][[4]]
cover_IVW_est[total+(1:temp)] <- result[[4]][[5]]
cover_IVWs_est[total+(1:temp)] = result[[4]][[6]]
sigma_TwoStage[total+(1:temp)] = result[[4]][[7]]
sigma_IVW[total+(1:temp)] = result[[4]][[8]]
sigma_IVWs[total+(1:temp)] = result[[4]][[9]]
twostage.nsnps[total+(1:temp)] = result[[4]][[10]]
twostage.prop[total+(1:temp)] = result[[4]][[11]]
IVW.nsnps[total+(1:temp)] = result[[4]][[12]]
IVW.prop[total+(1:temp)] = result[[4]][[13]]
total = total+temp
}
beta_M = 0.1
mean(TwoStage_est)-beta_M
mean(IVW_est,na.rm = T)-beta_M
mean(IVWs_est,na.rm = T)-beta_M
mean(cover_TwoStage_est,na.rm = T)
mean(cover_IVW_est,na.rm = T)
mean(cover_IVWs_est,na.rm = T)
var(TwoStage_est,na.rm = T)
var(IVW_est,na.rm = T)
var(IVWs_est,na.rm = T)
mean(sigma_TwoStage,na.rm = T)
mean(sigma_IVW,na.rm = T)
mean(sigma_IVWs,na.rm = T)
mean(twostage.nsnps,na.rm = T)
mean(twostage.prop,na.rm = T)
mean(IVW.nsnps,na.rm = T)
mean(IVW.prop,na.rm = T)
|
05cf44f9ab50db04cf24307167ded0f5847a129f | 3c25f49d8592847a741b9324b482eb769721d985 | /man/summary.freqtab.Rd | 0ed5f7d758c23aad2349ea0b3e6c82ec944dade9 | [] | no_license | talbano/equate | 5de3d041aab6817dfad9b2fef9a37ca87321aeef | 3583de82faf337c4c9e0651db9293ed8b8a768c5 | refs/heads/master | 2022-12-01T17:23:14.460656 | 2022-12-01T16:12:50 | 2022-12-01T16:12:50 | 44,709,479 | 7 | 4 | null | null | null | null | UTF-8 | R | false | true | 2,610 | rd | summary.freqtab.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/descriptives.R
\name{summary.freqtab}
\alias{summary.freqtab}
\alias{mean.freqtab}
\alias{sd.freqtab}
\alias{var.freqtab}
\alias{cov.freqtab}
\alias{cor.freqtab}
\alias{min.freqtab}
\alias{max.freqtab}
\alias{range.freqtab}
\alias{skew.freqtab}
\alias{kurt.freqtab}
\title{Descriptive Statistics for Frequency Tables}
\usage{
\method{summary}{freqtab}(object, margin = seq(margins(object)), ...)
\method{mean}{freqtab}(x, margin = 1, ...)
sd.freqtab(x, margin = 1)
var.freqtab(x, margin = 1)
cov.freqtab(x, margin = seq(margins(x)))
cor.freqtab(x, margin = seq(margins(x)))
\method{min}{freqtab}(x, margin = 1, ..., na.rm = FALSE)
\method{max}{freqtab}(x, margin = 1, ..., na.rm = FALSE)
\method{range}{freqtab}(x, margin = 1, ..., na.rm = FALSE)
skew.freqtab(x, margin = 1)
kurt.freqtab(x, margin = 1)
}
\arguments{
\item{object, x}{object of class \dQuote{\code{freqtab}}.}
\item{margin}{integer vector specifying the margin(s) for which summary
statistics will be returned. This defaults to \code{1} for univariate
statistics, and \code{seq(margins(x))}, i.e., all the margins, for
multivariate statistics (covariance and correlation).}
\item{\dots}{further arguments passed to or from other methods.}
\item{na.rm}{logical indicating whether missing values should be removed,
currently ignored since frequency tables cannot contain missing values.}
}
\value{
\code{summary} returns a data frame of summary statistics, including
the mean, standard deviation, skewness, kurtosis, minimum, maximum, and
number of observations for each variable in \code{margin}. Otherwise, a
vector of length \code{length(margin)} is returned with the corresponding
statistic for each variable.
}
\description{
These functions return descriptive statistics for a frequency table of class
\dQuote{\code{\link{freqtab}}}.
}
\details{
\code{mean}, \code{sd.freqtab}, \code{var.freqtab}, \code{skew.freqtab}, and
\code{kurt.freqtab} return the mean, standard deviation, variance, skewness,
and kurtosis. \code{min} and \code{max} return the minimum and maximum
observed scores, and \code{range} returns both. \code{cov.freqtab} and
\code{cor.freqtab} return the covariance and correlation matrices for one or
more variables. \code{summary} returns univariate statistics across one or
more margins.
}
\examples{
summary(as.freqtab(ACTmath[, 1:2]))
ny <- freqtab(KBneat$y, scales = list(0:36, 0:12))
summary(ny)
cov.freqtab(ny)
}
\seealso{
\code{\link{freqtab}}
}
\author{
Anthony Albano \email{tony.d.albano@gmail.com}
}
\keyword{methods}
|
eda45bff00bc17f22aad3c0ab25c1a198f33c897 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/munsell/examples/rgb2mnsl.Rd.R | 6a80fd09c58ade9beb60502442f37cb2c60c0a23 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 265 | r | rgb2mnsl.Rd.R | library(munsell)
### Name: rgb2mnsl
### Title: Converts an sRGB colour to Munsell
### Aliases: rgb2mnsl
### ** Examples
rgb2mnsl(0.1, 0.1, 0.3)
rgb2mnsl(matrix(c(.1, .2, .4, .5, .6, .8), ncol = 3))
plot_closest(matrix(c(.1, .2, .4, .5, .6, .8), ncol = 3))
|
02ba3c69cc3d4f14cee9c1dda1b66ba9e0b21f25 | 66a65ac1a3d7bc3a87b46be3f637e553f86e47f9 | /CH4/chebyshev approximation.R | 3900a54bc3ef1d003d9aa2067dfc16b409670d0b | [] | no_license | uncorrelated/KSQME | ee8958a31df421e877d880da6c8f6eb6333b1cbe | 3d2b6c6666ea363cc6ce9e3599c2486e63da3f98 | refs/heads/master | 2022-12-26T21:59:43.053752 | 2020-10-15T21:55:25 | 2020-10-15T21:55:25 | 281,531,395 | 4 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,409 | r | chebyshev approximation.R | #
# 経済セミナー連載「定量的マクロ経済学と数値計算」
# 第4回「オイラー方程式と多項式近似」のソースコードのRへの野良移植: チェビシェフ多項式
#
#################################################################
### Warning: Package 'chebpol' の存在に気づいてはいけません。 ###
#################################################################
#
# y = f(x)をチェビシェフ多項式で近似するための関数群
#
#
# パラメーターθを作成するためのxの評価点を取得する
# args: xmin:xの定義域の最小値, xmax:xの定義域の最大値, n:求める評価点の数
#
ChebyshevCollocationPoints <- function(xmin, xmax, n, type="extrema"){
if("extrema" == type){
i <- 0:(n - 1)
s <- -cos((pi/(n - 1)) * i)
} else if("zeros" == type){
i <- 1:(n - 1)
s <- c(0, -cos((pi/2/(n-1))*(2*i - 1)))
} else {
stop("unknown type")
}
(xmax - xmin)*(s + 1)/2 + xmin
}
#
# 基底関数の行列を求める
# args: xmin:xの定義域の最小値, xmax:xの定義域の最大値, d:次数, x:近似する関数に実際に引数として与えられたベクトル
#
ChebyshevBasis <- function(xmin, xmax, d, x){
n <- d + 1
s <- (2/(xmax - xmin))*(x - xmin) - 1
T <- matrix(0, length(x), n)
T[, 1] <- 1
T[, 2] <- s
for(i in 3:n){
T[, i] <- 2*s*T[, i-1] - T[, i-2]
}
T
}
#
# Collocation pointsになるcp_xとcp_yの対応から関数の近似のためのパラメーターΘを作成し、xの定義域とセットにしてリストにまとめる
#
ChebyshevMapApproximation <- function(cp_x, cp_y, xmin, xmax, d){
T <- ChebyshevBasis(xmin, xmax, d, cp_x)
theta <- solve(T, cp_y) # = solve(T) %*% cp_y
list(theta=theta, xmin=xmin, xmax=xmax, cp_x=cp_x, cp_y=cp_y)
}
#
# 関数の近似のためのθを求める
# args: f:近似する対象の関数, xmin:xの定義域の最小, xmax:xの定義域の最大, d:次数
#
ChebyshevApproximation <- function(f, xmin, xmax, d){
cp_x <- ChebyshevCollocationPoints(xmin, xmax, d + 1)
cp_y <- f(cp_x)
ChebyshevMapApproximation(cp_x, cp_y, xmin, xmax, d)
}
#
# θから予測値をつくる
# args: xmin:xの定義域の最小, xmax:xの定義域の最大, theta:θ, x:予測する点
#
ChebyshevPredict <- function(caparam, x){
with(caparam, {
T <- ChebyshevBasis(xmin, xmax, length(theta) - 1, x)
predict_y <- T %*% theta
})
}
|
a3e026586b2be57bda4a52e8e8fb250c9caa7652 | a182b4e4aea4f664422a1015d014ee00286a09fb | /man/check_onland.Rd | 39799f9217d26f53ce34b603d9ba1b8f1e39720c | [
"MIT"
] | permissive | iobis/obistools | 572e0c5df9b603212ea251964c9dd84168738dcb | d001391cf6b9ac7e2f8ba09d54ef78ae89441d94 | refs/heads/master | 2023-08-05T13:26:29.684831 | 2023-07-31T09:15:47 | 2023-07-31T09:15:47 | 77,918,363 | 18 | 9 | NOASSERTION | 2023-05-26T13:43:40 | 2017-01-03T13:13:47 | R | UTF-8 | R | false | true | 1,314 | rd | check_onland.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/check_onland.R
\name{check_onland}
\alias{check_onland}
\title{Check which points are located on land.}
\usage{
check_onland(data, land = NULL, report = FALSE, buffer = 0, offline = FALSE)
}
\arguments{
\item{data}{The data frame.}
\item{land}{SpatialPolygonsDataFrame. If not provided the simplified land
polygons from OSM are used. This parameter is ignored when, \code{offline =
FALSE}.}
\item{report}{If TRUE, errors are returned instead of records.}
\item{buffer}{Set how far inland points are still to be deemed valid (in meters).}
\item{offline}{If TRUE, a local simplified shoreline is used, otherwise an
OBIS webservice is used. The default value is \code{FALSE}.}
}
\value{
Errors or problematic records.
}
\description{
Check which points are located on land.
}
\examples{
\dontrun{
report <- check_onland(abra, report = TRUE, buffer = 100)
print(report)
# plot records on land with 100 meter buffer
plot_map_leaflet(abra[report$row,], popup = "id")
# filter records not on land
ok <- abra[-1 * report$row,]
ok <- check_onland(abra, report = FALSE, buffer = 100)
print(nrow(ok))
}
}
\seealso{
\code{\link{check_depth}} \code{\link{check_outliers_dataset}}
\code{\link{check_outliers_species}} \code{\link{lookup_xy}}
}
|
f9a7dea4434734670c26e552aadde6d845bfdc53 | e76765a49d1be0b35c4f1fa9b98bd21fb94331ba | /write_multiple_csv_by_ACCT_ID.R | 84a4e2ac2921ec3786e678c69e58587bf0fe8e35 | [] | no_license | deontanweibin/Rcodes | e99e9fcf555b7eb92c0d9fd413770aa9a180b485 | ab114f979715fc251978889362f33f9fe180a203 | refs/heads/master | 2022-05-16T17:44:07.565768 | 2022-03-28T14:08:53 | 2022-03-28T14:08:53 | 166,375,832 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,004 | r | write_multiple_csv_by_ACCT_ID.R | #set working directory
library(dplyr)
directory <- "X:\\Group Digital Life\\mCommerce\\New Singtel Dash Reporting\\Fictitious_Accounts_Management_&_Regulatory_Data_Report\\Transaction Dump\\Transaction_Dump_Feb20\\"
setwd(directory)
df <- read.csv( file = "X:/Group Digital Life/mCommerce/New Singtel Dash Reporting/Fictitious_Accounts_Management_&_Regulatory_Data_Report/Transaction Dump/Transaction_Dump_Feb20.csv")
split_csv <- function (df,y) {
# splits dataframe into multiple csvs for each unique ID in the ID column
# df is a dataframe
# y is the index of the ID column, starting from 1
# the dplyr package is required for this function
unique_accounts <- unique(df[y])
for (i in 1:nrow(unique(df[y]))) {
print(i)
id = unique_accounts[i,]
print(id)
x_filename <- paste0(id,"_transactions.csv", sep='')
filtered_df <- df %>% filter(MAIN_ACCOUNT==unique_accounts[i,1])
write.csv(filtered_df, file = x_filename, row.names=FALSE)
}
}
split_csv(df,18)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.