blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1fadc29e610aaa6580c886a5b233a25569173a79 | 73b6f86a2336d8e0b419f4ee9cd6718882546d2e | /postprocesing/extract_pp_acum.R | a325e308c7032a404534f60b8ead508bfc877ce2 | [] | no_license | paocorrales/Analisis_2018112022 | 058559dbdc088270ddc97906586dcae396bc8fda | f562c8899bdf178b9bc27a1cc55191ada3980a30 | refs/heads/master | 2021-12-28T17:57:46.800503 | 2021-12-01T23:44:36 | 2021-12-01T23:44:36 | 200,736,204 | 0 | 1 | null | 2020-04-15T02:01:58 | 2019-08-05T22:16:22 | HTML | UTF-8 | R | false | false | 1,751 | r | extract_pp_acum.R | library(tidyverse)
library(metR)
library(reticulate)
library(lubridate)
wrf <- import("wrf")
ncdf <- import("netCDF4")
np <- import("numpy")
xr <- import("xarray")
ini_date <- "20181122060000"
exp <- "E6"
fcst_long <- 30
files <- list.files(path = paste0("/glade/scratch/jruiz/EXP/", exp, "/FCST/det", ini_date, "/"), full.names = TRUE)
files <- files[str_detect(files, ":00:00")]
for (f in seq_along(files)[-length(files)]) {
ncfile_ini <- ncdf$Dataset(files[f])
ncfile_end <- ncdf$Dataset(files[f+1])
p <- wrf$getvar(ncfile_ini, "RAINNC")
pp_ini <- wrf$getvar(ncfile_ini, "RAINNC", meta = FALSE) + wrf$getvar(ncfile_ini, "RAINC", meta = FALSE) +
wrf$getvar(ncfile_ini, "RAINSH", meta = FALSE)
pp_end <- wrf$getvar(ncfile_end, "RAINNC", meta = FALSE) + wrf$getvar(ncfile_end, "RAINC", meta = FALSE) +
wrf$getvar(ncfile_end, "RAINSH", meta = FALSE)
p$data <- pp_end - pp_ini
date <- as_datetime(ini_date) + hours(f)
dir_out <- paste0("/glade/scratch/jruiz/EXP/analisis/ppacum/", format(as_datetime(ini_date), "%Y%m%d%H"))
path_out <- paste0(dir_out, "/pp_acum_1h_fcst_", exp, "_", as.character(format(date, "%Y%m%d%H%M%S")), ".nc")
#path_out <- paste0("pp_1h_", exp, "_", ini_date, "_f", formatC(f, digits = 2, width = 3, flag = 0), ".nc")
#write_xarray_to_netcdf(p, path_out, engine ="netcdf4")
xarray_array_out <- p$copy(deep = TRUE)
# coordinates are extracted from variable
xarray_array_out$attrs['coordinates'] <- NULL
# wrf-python projection object cannot be processed
xarray_array_out$attrs['projection'] <- as.character(xarray_array_out$attrs['projection'])
xarray_array_out$to_netcdf(path=path_out, mode='w', engine='netcdf4')
ncfile_ini$close()
ncfile_end$close()
message(paste0("Listo: ", files[f+1]))
}
|
50c0c7da3bf1d3afb95103a07519fdadc36323e6 | 1dc1a1a4e717c20112517501c43f9a966ab0c0e5 | /R/as_sf_fun.R | f7f9c7e264ce6556668b0c7ca26a99bcad389a94 | [
"MIT"
] | permissive | Robinlovelace/stplanr | 5d11640b9b644e40d81b97ee1a2debb77ffb4e26 | d1f10fe2335c2494ba153fd09675756e2c1572b3 | refs/heads/master | 2021-01-23T09:02:00.128500 | 2018-09-13T08:19:36 | 2018-09-13T08:19:36 | 30,063,520 | 14 | 4 | null | null | null | null | UTF-8 | R | false | false | 692 | r | as_sf_fun.R | #' Convert functions support sf/sp
#'
#' @param input Input object - an sf or sp object
#' @param FUN A function that works on sp/sf data
#' @param ... Arguments passed to \code{FUN}
#' @aliases as_sp_fun
as_sf_fun <- function(input, FUN, ...) {
if(is(object = input, class2 = "sf")) {
input <- as(object = input, Class = "Spatial")
}
res <- FUN(input)
if(is(object = res, class2 = "Spatial")) {
res <- sf::st_as_sf(res)
}
return(res)
}
as_sp_fun <- function(input, FUN, ...) {
if(is(object = input, class2 = "Spatial")) {
input <- sf::st_as_sf(input)
}
res <- FUN(input)
if(is(object = res, class2 = "sf")) {
res <- as(res, "Spatial")
}
return(res)
} |
92e8fbc94b9819a4ca95169faaa46a633d479fd4 | 72f8342f7931b743eaafe9035108e5fce080e844 | /query_urine.R | 9e95b6477caa42827f7e92ccb4c69232573e5868 | [] | no_license | harshblue/shockalert-documented | c01ebac1ad9b4df6cd1c6467b5e2aa81f799d76e | 87d05f108b0c6e95ec3b680e0be1c78ac9b2f7db | refs/heads/master | 2022-02-19T23:24:18.021333 | 2019-08-20T21:30:37 | 2019-08-20T21:30:37 | 214,829,084 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,103 | r | query_urine.R | rm(list=ls())
library(RPostgreSQL)
library(pracma)
library(tictoc)
user = "postgres"
password = "postgres"
db = "mimic"
urine.items = c(40405,40428,40534,41857,42001,42362,42463,42507,42510,42556,42676,43171,43173,43175,40288,42042,42068,42111,42119,42209,40715,40056,40061,40085,40094,40096,43897,43931,43966,44080,44103,44132,44237,44313,43348,43355,43365,43372,43373,43374,43379,43380,43431,43462,43522,44706,44911,44925,42810,42859,43093,44325,44506,43856,45304,46532,46578,46658,46748,40651,40055,40057,40065,40069,44752,44824,44837,43576,43589,43633,43811,43812,46177,46727,46804,43987,44051,44253,44278,46180,45804,45841,45927,42592,42666,42765,42892,43053,43057,42130,41922,40473,43333,43347,44684,44834,43638,43654,43519,43537,42366,45991,227489,45415,226627,226631)
query = sprintf("SELECT * FROM outputevents where itemid in (%s)", paste(urine.items,collapse=","))
Sys.setenv(TZ="GMT")
tic()
connection = dbConnect(PostgreSQL(), user=user, password=password, dbname=db)
urine.data = dbGetQuery(connection, query)
dbDisconnect(connection)
toc()
saveRDS(urine.data,file="urine.data.rds") |
8731f5ef17f60673a5d8de20555613f692fdfd8f | f91f4e92e92c53987a9e647fe99d13d24a40fbdb | /Cover.R | f050917b2e69fee6a87f04f2a7f97732d8bd89d6 | [] | no_license | jwillou/lab-coding-project | 243a38ea16a592c97690621148234ecf55e97ae0 | bca525b61f006bbf0f3f99ce2411348a49e84227 | refs/heads/main | 2023-06-23T05:54:17.314262 | 2021-07-14T15:55:26 | 2021-07-14T15:55:26 | 368,275,134 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,013 | r | Cover.R | #setup
setwd("/Users/jannawilloughby/GDrive/Willoughby lab/summer coding project/lab-coding-project/") #set working directory
directory = getwd()
outdir = paste(directory,"/output/",sep="") #directory to save model output
source(paste(directory, "/source/FunctionSourcer.R", sep = '')) #source functions and set source directory
#simulation parameters
lifespan = 3 #max number of years an individual can live
years = 50 #number of years to run each replicate/simulated population
K = 100
fecundity = 1
maturity = 1
prop.female = 0.5
#generate list of combinations of parameter values to simulate
replicates = expand.grid(lifespan, years, K, fecundity, maturity, prop.female)
colnames(replicates) = c("lifespan", "years", "K", "fecundity", "maturity", "prop.female")
#run model iterating over parameters in replicates
for(r in 1:nrow(replicates)){
RunModel(replicates, r, directory)
}
|
12d96b395d12068babb4a3937e54220740621a08 | 25298b75d8e54e34261ce7816c9ed95774566dbc | /man/ci_normal.Rd | ea184b0ccd26e756e9591ba70d88703105e11213 | [] | no_license | BroadbentJim/MendelianRandomization | 7946787c662beee9c5f7d69189f655c1b4b2425d | 100d624bae0c5ac296887493c46b0b64ed656d8f | refs/heads/master | 2022-12-07T02:10:17.287876 | 2020-09-03T11:30:24 | 2020-09-03T11:30:24 | 289,373,305 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 666 | rd | ci_normal.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ExtraFunctions.R
\name{ci_normal}
\alias{ci_normal}
\title{Calculate confidence intervals using the normal distribution}
\usage{
ci_normal(type, mean, se, alpha)
}
\arguments{
\item{type}{"l" for lower, "u" for upper.}
\item{mean}{Causal estimate.}
\item{se}{Standard error of estimate.}
\item{alpha}{Significance level.}
}
\value{
Numeric value of confidence interval limit.
}
\description{
Internal function for calculating confidence intervals using the normal distribution.
}
\details{
None.
}
\examples{
ci_normal(type = "l", mean = 0, se = 1, alpha = 0.05)
}
\keyword{internal}
|
8884ce5a10c9d1872cdc06d4b85a1d7d61147687 | 53f63e15f76212a7a3fb5417458c09d96a84cd1b | /project.R | ef6045d6437d183993e5a522a0da57ec9496f647 | [] | no_license | codemaster94sb/MiningAndManagingBigData | e82c82301fca790a21c36170ba1ba0249d3f820f | f1b9da42ce4dbea9694ae65169b4ebca1bc510d7 | refs/heads/master | 2020-04-29T18:36:28.749670 | 2019-03-18T16:49:43 | 2019-03-18T16:49:43 | 176,328,980 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,168 | r | project.R | # import data from csv
data = read.csv("crimes2017.csv")
# remove all the lines with NA
data.clean = na.omit(data)
# get summary of the field primary type
summary_primaryType = summary(data.clean$Primary.Type,maxsum = 10)
# calculate the percentage of each category
piepercent = paste(round(100*summary_primaryType/sum(summary_primaryType),2),"%")
# draw pie chart
pie(summary_primaryType,piepercent,col=rainbow((length(summary_primaryType))),main = "Distribution of Primary Type of Crimes")
# draw legend
legend("topright",names(summary_primaryType),cex=0.5,fill=rainbow(length(summary_primaryType)))
# get summary of community area
summary_communityArea = table(data.clean$Community.Area)
# sort areas according to number of crimes
summary_communityArea = sort(summary_communityArea,decreasing = TRUE)
# get the 20 areas with most number of cimes
summary_communityArea = summary_communityArea[1:20]
barplot(
height = summary_communityArea,
names.arg = names(summary_communityArea),
xlab = "Area Code",
ylab = "Number of Crimes",
main="Crimes for Each Area",
col="yellow")
# get arrest rate for different type of crimes
arrest_type_true = rep(0,10)
arrest_type_true_percentage = rep(0,10)
for(i in 1:10){
arrest_type_true[i] = dim(data.clean[data.clean$Arrest == "true" & data.clean$Primary.Type == names(summary_primaryType[i]),])[1]
arrest_type_true_percentage[i] = paste(round(arrest_type_true[i]*100 / summary_primaryType[i],2),"%")
}
# draw pie chart
pie(arrest_type_true,arrest_type_true_percentage,col=rainbow((length(arrest_type_true))),main = "Arrest rate for different type of crimes")
# draw legend
legend("topright",names(summary_primaryType),cex=0.8,fill=rainbow(length(arrest_type_true)))
# draw crimes based on their severity
crime_type = rep(0,10)
crime-types_order = rep(0,10)
crime_types <- read.csv("crimes2017.csv")
plot(
x=crime_types$Primary,
main="Crime Visualisation",
xlab="catogery of crime",
ylab="Count of the crime")
summary_communityArea
dotchart(
x=table(crime_types$Primary.Type),
main="Crime Visualisation",
ylab="category of crime",
xlab="Count of the crime"
)
primtype <- table(crime_types$Primary.Type)
primtype<-primtype[1:10]
primtype <- factor(primetype)
primtype
primtype
primtype = primtype[1:5]
primtype = factor(primtype)
areas = areas[1:5]
areas
mtcars
areas = (1:5)
y_array = rep(0,15)
label_array = rep(0,15)
counter = 0
for (i in areas){
tmp = data.clean[data.clean$Community.Area == i,]
summary_tmp = summary(tmp$Primary.Type,maxsum=3)
for(j in seq(1:3)){
y_array[counter*3+j] = summary_tmp[j]
label_array[counter*3+j] = names(summary_tmp[j])
}
counter = counter + 1
}
y_array
label_array
g_areas <- matrix(list(), nrows=5, ncols=3)
mat[[1,1]]
mat[[1,2]]
mat[[1,3]]
mat[[2,1]]
mat[[2,2]]
mat[[2,3]]
mat[[3,1]]
mat[[3,2]]
mat[[3,3]]
mat[[4,1]]
mat[[4,2]]
mat[[4,3]]
mat[[5,1]]
mat[[5,2]]
mat[[5,3]]
dotchart(
x=y_array,
labels=label_array,
groups = c(1,1,1,2,2,2,3,3,3,4,4,4,5,5,5),
xlab="Counts of Crimes",
pch=16
)
|
da6776c97d03e5a075a6ea990bcd0c19610bfc59 | 4f781c1417ab399d68a7c37057ec2be91fc0330f | /examples/example_getPairwiseDistance.R | 2744f131b02ec3db0920998acbf1880da4ce7a09 | [] | no_license | ocouriot/TuktuTools | 6ed1142887c13bb49fe958507593204833339397 | eac29a9bdbebcdaf9e8cff848af4b3652512e3dc | refs/heads/main | 2023-06-23T18:41:51.620062 | 2023-06-16T18:16:03 | 2023-06-16T18:16:03 | 333,524,590 | 3 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,057 | r | example_getPairwiseDistance.R | data(caribou)
# subset on the two individuals which have monitoring the same year (Vixen and Comet, in 2007)
# and transform into Simple Feature object
b.subset <- caribou %>% plyr::mutate(yday = yday(Time)) %>%
arrange(ID, Time) %>% subset(Year == 2007) %>%
st_as_sf(coords = c("Lon","Lat")) %>% st_set_crs(4326) %>%
mutate(Lon = st_coordinates(.)[,1], Lat = st_coordinates(.)[,2])
b.list <- dlply(b.subset, "ID", st_as_sf) # create a list of the sf by individual
# estimate the pairwise distances
distance.df <- getPairwiseDistances(b.list)
# Figure of the distance between the two individuals over time
plot(distance/1000~Date, data = distance.df, type = "l", ylab = "Distance between individuals (km)",
xlab ="Date")
# Figure of the distance between the two individuals with the moments they were less than 5km apart
plot(distance/1000~Date, data = distance.df, type = "l", ylab = "Distance between individuals (km)",
xlab ="Date")
points(distance/1000~Date, data = distance.df[distance.df$distance<5000,], pch = 19, col = "red")
|
fa60af9513a7f6404271d984729cfc6e5a008c74 | 726a77aaedf2fe3025763952545b7d8f2955cebc | /cachematrix.R | 94e79d19e5b6c48e13a2ed7b4839cf98e9f4ce9b | [] | no_license | tjzeeman/ProgrammingAssignment2 | f6e2216b0b27c11072dfb16e926ba881aac8682f | b8af8fa8887bf6157de0e581dee28be43eaca728 | refs/heads/master | 2021-01-18T02:33:45.105940 | 2015-06-21T17:14:14 | 2015-06-21T17:14:14 | 37,780,759 | 0 | 0 | null | 2015-06-20T18:26:12 | 2015-06-20T18:26:12 | null | UTF-8 | R | false | false | 970 | r | cachematrix.R | ## Module for working with matrices and their inverse where inverted matrices
## are cached for performance reasons.
## Create a new instance of a matrix which can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(inverse) m <<- inverse
getinverse <- function() m
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## Solve a matrix such that if it was already solved (inverted) before a cached answer is returned instead of
## doing all the calculations again. Any answer calculated will be cached.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinverse(m)
m
}
|
50ccf3e7fe460240cd22f28b837647610d4051d9 | 93ccc9369c0d6c1b6f2ddb4a76165efdbe61a8a5 | /man/graphiT.Rd | 778d18dab80af0bf5adcfb7f4d87eb54b8fbfc26 | [] | no_license | lvaudor/graphiT | 4e02dca47d9ba15b249e619548283871d4822a07 | 53b4d1ddbcf9bf672fb4bc37eb257ce923f9fad1 | refs/heads/master | 2020-05-21T12:28:18.003907 | 2017-07-03T09:03:50 | 2017-07-03T09:03:50 | 54,575,749 | 7 | 0 | null | null | null | null | UTF-8 | R | false | true | 442 | rd | graphiT.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/graphiT.R
\name{graphiT}
\alias{graphiT}
\title{Launch the Graphic ToolKat}
\usage{
graphiT()
}
\description{
This function allows you to launch the Graphic ToolKat interface. This is a Shiny App designed to help users produce graphics.
For more information see package's vignette.
}
\examples{
graphiT()
}
\keyword{graphiT,}
\keyword{graphic,}
\keyword{toolkat}
|
d19b3efbb2d80b26aef0ff31b6cba4a45bf60a09 | 1d87df99223ca911dde7b14e37ce0e748dce5b7d | /man/lle.Rd | d0539393b85bb4b8f962ca5fe7bb8be9205ea824 | [] | no_license | xinyic324/StatComp20039 | bfa40a673cadc6e734710495bde35d790c2042d0 | 1fe65b8a572365020acba7b49a880f2622c7dc24 | refs/heads/master | 2023-02-02T17:32:36.770515 | 2020-12-20T13:21:45 | 2020-12-20T13:21:45 | 323,040,965 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 438 | rd | lle.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/StatCompR.R
\name{lle}
\alias{lle}
\title{Locally linear embedding using R}
\usage{
lle(X, d, k)
}
\arguments{
\item{X}{the Sample matrix(numeric)}
\item{d}{the number of dimension of sample matrix}
\item{k}{the number of nearest neighbors}
}
\value{
a random sample matrix
}
\description{
Use R package \code{lle} to achieve data dimensionality reduction
}
|
a21ce1a1b06c6fb720f648f3e675c34d734ea91b | 2267ad83d4ec90d656332f2d47a6d431785b01d5 | /run_analysis.R | 365a54867755c4456919db7ad9f7de0afbb7e4b0 | [] | no_license | seslava/GettingAndCleaningData | 6c201929fa50c35b5cedc24df847e784d9bacf4a | 2f4a2fd9e39531d0f204a27253d4a684d262c11b | refs/heads/master | 2021-07-09T05:32:33.162105 | 2017-10-09T02:36:32 | 2017-10-09T02:36:32 | 106,225,207 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 19 | r | run_analysis.R | ## run_analysis.R
|
3cbb9bf547f42347e7c7e5b9397c64ff30ec5361 | 72338e12782d6b456b747daf83b41a9775edcded | /R Code/Data_Manipulation.R | b6086a116561776d1018f4e4feaaa2558b2d4440 | [] | no_license | claredoc/EEES_Seminar | c157d6d012fa5972ebdadb53795c21ec943fe0bb | c7ebb6e66f305461df87d101d55d532cb327bbd6 | refs/heads/master | 2020-03-08T16:44:13.277630 | 2018-04-05T18:26:35 | 2018-04-05T18:26:35 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 501 | r | Data_Manipulation.R | #R code for Data Manipulation
#Created by JAB March 2018
# Load Libraries ####
library(tidyverse)
library(lubridate)
tidyverse_packages()
library(hms)
# Please add a comment below with your name to try using git actions ####
# Braden testing push #
# Morgan testing push #
<<<<<<< HEAD
# Clare testing push #
||||||| merged common ancestors
# Clare testing push #
=======
# Clare testing push #
# frank is also testing push #
#roger roger
>>>>>>> 9ba28e3c2340ded5caf52e69439a2b1cbd8fabf2
|
b3d6a4b34ae37a4e3ea9571cfaf40a91434439a0 | c3e693beafe67e6fb20afd158e9876770a3d3d3c | /Exhaustive-search/totvar/lt_dijkstra_uniuni_master.r | c75057f258cc0131028cb42578fee70069c6c458 | [] | no_license | rmuraglia/Schmidler | 8fd307f7393283d454c38600c29cc8f0adce6279 | 3026a84bf56da1477e2bf7f78d20c0f752de82cb | refs/heads/master | 2020-12-25T17:01:08.116539 | 2016-08-11T23:55:41 | 2016-08-11T23:55:41 | 57,866,522 | 0 | 0 | null | 2016-06-15T17:44:25 | 2016-05-02T04:44:07 | R | UTF-8 | R | false | false | 2,495 | r | lt_dijkstra_uniuni_master.r | # lt_dijkstra_uniuni_master.r
# run dijkstra search for total variation distance in a grid indexed by lambda and temperature (here represented as sigma)
# initial and target distributions are normal distributions
###########
# PART 1: set run parameters
###########
# store first time point for book keeping
time1<-proc.time()
# grab command line arguments if appropriate number are provided
args<-commandArgs(trailingOnly=TRUE)
if (length(args)==7) {
filname<-as.character(args[1])
move.jump<-as.numeric(args[2])
reps<-as.numeric(args[3])
sigma.min<-as.numeric(args[4])
sigma.max<-as.numeric(args[5])
sigma.numpoints<-as.numeric(args[6])
lambda.numpoints<-as.numeric(args[7])
} else {
filname<-'ltsearch-03'
move.jump<-4
reps<-100
sigma.min<-0.5
sigma.max<-2.5
sigma.numpoints<-9
lambda.numpoints<-11
}
##########
# PART 2: print run info
##########
# create file sink for output. double output to console too
sink(file=paste(filname, '.sink', sep=''),split=T)
print(paste('Job started at ', date(), sep=''))
print(paste('Output files carry the ', filname, ' prefix.', sep=''))
print(paste('The move jump coefficient is ', move.jump, sep=''))
print(paste('Each distribution was sampled ', reps, ' times.', sep=''))
##########
# PART 3: run dijkstra's algorithm
##########
print('Importing auxillary scripts...')
source('lt_dijkstra_uniuni_import.r')
print(paste('Initial set up complete at ', date(), sep=''))
print(paste('number of nodes: ', nrow(point.grid), sep=''))
print(paste('number of potential moves per node: ', nrow(move.universe), sep=''))
print('initial state: ')
print(init)
print('target state: ')
print(target)
print('lambda values: ')
print(lambda.points)
print('sigma values: ')
print(sigma.points)
print('Running Dijkstra\'s algorithm...')
ann.map<-do.dijkstra(map)
time2<-proc.time()
print(paste('Dijkstra\'s algorithm complete. Time for search was:'))
print(time2-time1)
#########
# PART 4: plot results
#########
print('Visualizing results')
# check solution path - it may need manual tuning before feeding to this final visualization
source('lt_dijkstra_uniuni_viz.r')
time3<-proc.time()
print('Visualization complete. Time for viz was:')
print(time3-time2)
########
# PART 5: clean up and finish
########
time4<-proc.time()
print('R script complete')
print('Total time elapsed:')
print(time4-time1)
save.image(file=paste(filname, '.RData', sep=''))
print('Quitting R...')
sink()
|
da90b4b8bcf1e7ab87e97c567403acc9d3bc9ec1 | 42c2727741417413b6126a7e8e5c8922e0204446 | /BCconf/man/est.conf.num.Rd | fa12d17178b13a28ae9f71500c608e7f059d20dd | [] | no_license | chrismckennan/BCconf | 9ad610e6e5bb93e6d7dfc737d55e53dd7ea4283e | b1b5d7b690f07f175014d7e27b42627a4d8edafa | refs/heads/master | 2022-04-30T15:44:23.695070 | 2022-04-20T18:33:13 | 2022-04-20T18:33:13 | 136,989,579 | 2 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,178 | rd | est.conf.num.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ConfounderCorrection.R
\name{est.conf.num}
\alias{est.conf.num}
\title{Estimate the number of latent factors}
\usage{
est.conf.num(Y, X, method = c("bcv", "ed"), max.r, nRepeat = 20, ...)
}
\arguments{
\item{Y}{a p x n data matrix, where p = #of units (i.e. genes) and n = #of samples}
\item{X}{an n x d model matrix, where d = total number of observed covariates to include in the model. This includes the covariates of interest (i.e. disease status), as well as nuisance covariates like the intercept, plate number, DNA concentration, etc.}
\item{method}{Either bi-cross validation ("bcv") or eigenvalue distance ("ed"), with "bcv" being the default. See \url{https://cran.r-project.org/web/packages/cate/cate.pdf} for more details.}
\item{max.r}{Maximum number of latent factors to consider}
\item{nRepeat}{Number of times to perform bcv}
\item{...}{Additional parameters to include in \code{cate::est.factor.num}.}
}
\value{
A \code{cate::est.factor.num} object.
}
\description{
Uses the \code{est.factor.num} function from the \code{CATE} package to estimate the number of latent factors
}
|
686f26ec007cf8f8aa2d53717c6cc16fd25140fb | 0224b084d380102289bd1cddca27e2e720037f9d | /tests/testthat/test_formatLongTable.R | d3d2e4715987b41b98173dce28b5730a37c6ff01 | [
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | kaleydodson/SOCDRaHR2 | be655ed52956dbedd46477ccb46f9c6171d8329e | f1ba546d879b1fc6bfb1f1d43a6a8d626557ce31 | refs/heads/master | 2023-08-03T02:38:32.299285 | 2021-09-20T15:39:00 | 2021-09-20T15:39:00 | 377,954,407 | 0 | 0 | BSD-2-Clause | 2021-09-15T19:44:32 | 2021-06-17T20:33:59 | null | UTF-8 | R | false | false | 20,173 | r | test_formatLongTable.R | #To run just this file
#test_file('tests/testthat/test_formatLongTable.R')
library(testthat)
context('formatLongTable.R')
testthat::test_that("test one variable one table",{
testInput <- list(T1 = data.table::as.data.table(
tibble::tribble(~siteID, ~sampleID, ~SOC,
'river', 'S1', '5.2',
'lake', 'S2', '3')))
inputKey <- data.table::as.data.table(
tibble::tribble(~table, ~header, ~variable, ~type, ~entry,
'T1', 'siteID', 'site_name', 'id', '',
'T1', 'sampleID', 'layer_name', 'id', '',
'T1', 'SOC', 'soc', 'value', ''))
outputKey <- data.table::as.data.table(
(tibble::tribble(~table, ~variable,
'sample', 'site_name',
'sample', 'layer_name',
'sample', 'soc')))
output <- formatLongTable(data.ls = testInput, sourceKey = inputKey, targetKey = outputKey)
expectedOutput <- list(sample=
as.data.table(read.csv(text = '"layer_name_id","site_name_id","header","entry","variable","type"
"S1","river","SOC","5.2","soc","value"
"S2","lake","SOC","3","soc","value"', colClasses = 'character')))
#setkey(expectedOutput$sample, 'header')
cols <- setdiff(names(expectedOutput$sample), c('entry'))
expectedOutput$sample[,(cols) := lapply(.SD, as.factor), .SDcols=cols]
#match number of tables
testthat::expect_equal(names(expectedOutput), names(output))
#match sample table
testthat::expect_identical(expectedOutput$sample, output$sample)
})
testthat::test_that('one header assigned to two variables [[wishlist]]',{
testInput <- list(T1 = data.table::as.data.table(
tibble::tribble(~sampleID, ~depth,
'S1', '1',
'S1', '3')))
inputKey <- data.table::as.data.table(
tibble::tribble(~table, ~header, ~variable, ~type, ~entry,
'T1', 'sampleID', 'profile_name', 'id', '',
'T1', 'depth', 'layer_name', 'value', '',
'T1', 'depth', 'layer_bottom', 'value', ''))
outputKey <- data.table::as.data.table(
(tibble::tribble(~table, ~variable,
'sample', 'profile_name',
'sample', 'layer_name',
'sample', 'layer_bottom')))
output <- formatLongTable(data.ls = testInput, sourceKey = inputKey, targetKey = outputKey)
})
testthat::test_that("test one variable, two methods, one table",{
testInput <- list(T1 = data.table::as.data.table(
tibble::tribble(~siteID, ~sampleID, ~SOC, ~SOC_flag, ~SOC_method,
'river', 'S1', '5.2', 'ISCN', 'gap filled',
'lake', 'S2', '3', NA, 'provided')))
inputKey <- data.table::as.data.table(
tibble::tribble(~table, ~header, ~variable, ~type, ~entry,
'T1', 'siteID', 'site_name', 'id', '',
'T1', 'sampleID', 'layer_name', 'id', '',
'T1', 'SOC', 'soc', 'value', '',
'T1', 'SOC_flag', 'soc', 'method', '',
'T1', 'SOC_method', 'soc', 'method', ''
))
outputKey <- data.table::as.data.table(
(tibble::tribble(~table, ~variable,
'sample', 'site_name',
'sample', 'layer_name',
'sample', 'soc')))
output <- formatLongTable(data.ls = testInput, sourceKey = inputKey, targetKey = outputKey)
#write.csv(output$sample, row.names=FALSE)
expectedOutput <- list(sample=data.table::as.data.table(
read.csv(text = '"layer_name_id","site_name_id","header","entry","variable","type"
"S1","river","SOC","5.2","soc","value"
"S2","lake","SOC","3","soc","value"
"S1","river","SOC_flag","ISCN","soc","method"
"S1","river","SOC_method","gap filled","soc","method"
"S2","lake","SOC_method","provided","soc","method"', stringsAsFactors=FALSE)))
#setkey(expectedOutput$sample, 'header')
cols <- setdiff(names(expectedOutput$sample), c('entry'))
expectedOutput$sample[,(cols) := lapply(.SD, as.factor), .SDcols=cols]
#match number of tables
testthat::expect_equal(names(expectedOutput), names(output))
#match sample table
testthat::expect_identical(expectedOutput$sample, output$sample)
})
testthat::test_that('test two variables with one shared unit column',{
testInput <- list(T1 = data.table::as.data.table(
tibble::tribble(~siteID, ~sampleID, ~SOC, ~BD, ~unit,
'river', 'S1', '5.2', '1', 'g cm-3',
'lake', 'S2', '3', '2', 'kg m-3')))
inputKey <- data.table::as.data.table(
tibble::tribble(~table, ~header, ~variable, ~type, ~entry,
'T1', 'siteID', 'site_name', 'id', '',
'T1', 'sampleID', 'layer_name', 'id', '',
'T1', 'SOC', 'soc', 'value', '',
'T1', 'BD', 'bulk_density', 'value', '',
'T1', 'unit', 'soc', 'unit', '',
'T1', 'unit', 'bulk_density', 'unit', ''))
outputKey <- data.table::as.data.table(
(tibble::tribble(~table, ~variable,
'sample', 'site_name',
'sample', 'layer_name',
'sample', 'soc',
'sample', 'bulk_density')))
output <- formatLongTable(data.ls = testInput, sourceKey = inputKey, targetKey = outputKey)
expectedOutput <-list(sample = data.table::as.data.table(
tibble::tribble(~layer_name_id, ~site_name_id, ~header, ~entry, ~variable, ~type,
"S1","river","BD","1","bulk_density","value",
"S2","lake","BD","2","bulk_density","value",
"S1","river","SOC","5.2","soc","value",
"S2","lake","SOC","3","soc","value",
"S1","river","unit","g cm-3","bulk_density","unit",
"S2","lake","unit","kg m-3","bulk_density","unit",
"S1","river","unit","g cm-3","soc","unit",
"S2","lake","unit","kg m-3","soc","unit")
))
cols <- setdiff(names(expectedOutput$sample), c('entry'))
expectedOutput$sample[,(cols) := lapply(.SD, as.factor), .SDcols=cols]
#match number of tables
testthat::expect_equal(names(expectedOutput), names(output))
})
testthat::test_that("test two variable one table",{
testInput <- list(T1 = data.table::as.data.table(
tibble::tribble(~siteID, ~sampleID, ~SOC, ~BD,
'river', 'S1', '5.2', '1.5',
'lake', 'S2', '3', '1.1')))
inputKey <- data.table::as.data.table(
tibble::tribble(~table, ~header, ~variable, ~type, ~entry,
'T1', 'siteID', 'site_name', 'id', '',
'T1', 'sampleID', 'layer_name', 'id', '',
'T1', 'SOC', 'soc', 'value', '',
'T1', 'BD', 'bulk_density', 'value', ''))
outputKey <- data.table::as.data.table(
(tibble::tribble(~table, ~variable,
'sample', 'site_name',
'sample', 'layer_name',
'sample', 'soc',
'sample', 'bulk_density')))
output <- formatLongTable(data.ls = testInput, sourceKey = inputKey, targetKey = outputKey)
expectedOutput <- list(sample=data.table::as.data.table(
tibble::tribble(~layer_name_id, ~site_name_id,~header,~entry,~variable,~type,
"S1","river","BD","1.5","bulk_density","value",
"S2","lake","BD","1.1","bulk_density","value",
"S1","river","SOC","5.2","soc","value",
"S2","lake","SOC","3","soc","value")))
#setkey(expectedOutput$sample, 'header')
cols <- setdiff(names(expectedOutput$sample), c('entry'))
expectedOutput$sample[,(cols) := lapply(.SD, as.factor), .SDcols=cols]
#match tables
testthat::expect_equal(expectedOutput, output)
})
testthat::test_that('test hard entries',{
testInput <- list(T1 = data.table::as.data.table(
tibble::tribble(~siteID, ~sampleID, ~SOC,
'river', 'S1', '5.2',
'lake', 'S2', '3')))
inputKey <- data.table::as.data.table(
tibble::tribble(~table, ~header, ~variable, ~type, ~entry,
'T1', 'siteID', 'site_name', 'id', '',
'T1', 'sampleID', 'layer_name', 'id', '',
'T1', 'SOC', 'soc', 'value', '',
'T1', '', 'soc', 'unit', 'g cm-3'))
outputKey <- data.table::as.data.table(
(tibble::tribble(~table, ~variable,
'sample', 'site_name',
'sample', 'layer_name',
'sample', 'soc')))
output <- formatLongTable(data.ls = testInput, sourceKey = inputKey, targetKey = outputKey)
expectedOutput <- list(sample=data.table::as.data.table(
tibble::tribble(~layer_name_id,~site_name_id,~header,~entry,~variable,~type,
"S1","river","SOC","5.2","soc","value",
"S2","lake","SOC","3","soc","value")))
#setkey(expectedOutput$sample, 'header')
cols <- setdiff(names(expectedOutput$sample), c('entry'))
expectedOutput$sample[,(cols) := lapply(.SD, as.factor), .SDcols=cols]
#match sample table
testthat::expect_equal(expectedOutput, output)
})
testthat::test_that("test two target table",{
testInput <- list(T1 = data.table::as.data.table(
tibble::tribble(~siteID, ~sampleID, ~MAT, ~SOC, ~BD,
'river', 'S1', '15', '5.2', '1.5',
'lake', 'S2', '13', '3', '1.1')))
inputKey <- data.table::as.data.table(
tibble::tribble(~table, ~header, ~variable, ~type, ~entry,
'T1', 'siteID', 'site_name', 'id', '',
'T1', 'sampleID', 'layer_name', 'id', '',
'T1', 'SOC', 'soc', 'value', '',
'T1', 'BD', 'bulk_density', 'value', '',
'T1', 'MAT', 'mat', 'value', ''))
outputKey <- data.table::as.data.table(
(tibble::tribble(~table, ~variable,
'site', 'site_name',
'site', 'mat',
'sample', 'site_name',
'sample', 'layer_name',
'sample', 'soc',
'sample', 'bulk_density')))
output <- formatLongTable(data.ls = testInput, sourceKey = inputKey, targetKey = outputKey)
expectedOutput <- list(
site = data.table::as.data.table(
tibble::tribble(~site_name_id, ~header,~entry,~variable,~type,
"river","MAT","15","mat","value",
"lake","MAT","13","mat","value")),
sample=data.table::as.data.table(
tibble::tribble(~layer_name_id,~site_name_id,~header,~entry,~variable,~type,
"S1","river","BD","1.5","bulk_density","value",
"S2","lake","BD","1.1","bulk_density","value",
"S1","river","SOC","5.2","soc","value",
"S2","lake","SOC","3","soc","value")))
#setkey(expectedOutput$site, 'header')
#setkey(expectedOutput$sample, 'header')
cols <- setdiff(names(expectedOutput$site), c('entry'))
expectedOutput$site[,(cols) := lapply(.SD, as.factor), .SDcols=cols]
cols <- setdiff(names(expectedOutput$sample), c('entry'))
expectedOutput$sample[,(cols) := lapply(.SD, as.factor), .SDcols=cols]
#match number of tables
testthat::expect_equal(expectedOutput, output)
testthat::expect_equivalent(expectedOutput$sample, output$sample)
testthat::expect_equivalent(expectedOutput$site, output$site)
})
testthat::test_that("test two source and two target table",{
testInput <- list(
T1 =data.table::as.data.table(
tibble::tribble(~siteID, ~MAT,
'river','15',
'lake', '13')),
T2 = data.table::as.data.table(
tibble::tribble(~siteID, ~sampleID, ~SOC, ~BD,
'river', 'S1', '5.2', '1.5',
'lake', 'S2', '3', '1.1')))
inputKey <- data.table::as.data.table(
tibble::tribble(~table, ~header, ~variable, ~type, ~entry,
'T2', 'siteID', 'site_name', 'id', '',
'T1', 'siteID', 'site_name', 'id', '',
'T2', 'sampleID', 'layer_name', 'id', '',
'T2', 'SOC', 'soc', 'value', '',
'T2', 'BD', 'bulk_density', 'value', '',
'T1', 'MAT', 'mat', 'value', ''))
outputKey <- data.table::as.data.table(
(tibble::tribble(~table, ~variable,
'site', 'site_name',
'site', 'mat',
'sample', 'site_name',
'sample', 'layer_name',
'sample', 'soc',
'sample', 'bulk_density')))
output <- formatLongTable(data.ls = testInput, sourceKey = inputKey, targetKey = outputKey)
expectedOutput <- list(
site = data.table::as.data.table(
tibble::tribble(~site_name_id,~header,~entry,~variable,~type,
"river","MAT","15","mat","value",
"lake","MAT","13","mat","value")),
sample=data.table::as.data.table(
tibble::tribble(~layer_name_id,~site_name_id, ~header,~entry, ~variable, ~type,
"S1","river","BD","1.5","bulk_density","value",
"S2","lake","BD","1.1","bulk_density","value",
"S1","river","SOC","5.2","soc","value",
"S2","lake","SOC","3","soc","value")))
cols <- setdiff(names(expectedOutput$site), c('entry'))
expectedOutput$site[,(cols) := lapply(.SD, as.factor), .SDcols=cols]
cols <- setdiff(names(expectedOutput$sample), c('entry'))
expectedOutput$sample[,(cols) := lapply(.SD, as.factor), .SDcols=cols]
#match number of tables
testthat::expect_equal(expectedOutput, output)
})
testthat::test_that("quasi real test",{
testInput <- list(
site = data.table::as.data.table(
tibble::tribble(~siteID, ~site_description, ~state, ~country, ~lat, ~lon, ~datum,
'lake', 'up by the lake', 'Maine', 'USA', '45.8200', '-68.8706', 'WGS84',
'river', 'down by the river', 'New Hampshire', 'USA', '46.52016', '-68.37050', 'NAD83')),
layer = data.table::as.data.table(
tibble::tribble(~siteID, ~sampleID, ~SOC, ~BD, ~BD_notes, ~color,
'river', 'S1', '5.2', '1.5', 'dry, not seive', 'black',
'lake', 'S1', '3', '1.1', 'dry and 2mm seive', 'brown')))
inputKey <- data.table::as.data.table(
tibble::tribble(~table, ~header, ~variable, ~type, ~entry,
'site', 'siteID', 'site_name', 'id', '',
'site', 'site_description', 'description', 'string', '',
'site', 'state', 'state', 'string', '',
'site', 'country', 'country', 'string','',
'site', 'lat', 'latitude', 'value', '',
'site', 'datum', 'latitude', 'unit', '',
'site', 'lon', 'longitude', 'value', '',
'site', 'datum', 'longitude', 'unit', '',
'layer', 'siteID', 'site_name', 'id', '',
'layer', 'sampleID', 'layer_name', 'id', '',
'layer', 'SOC', 'soc', 'value', '',
'layer', '','soc', 'unit', 'g cm-3',
'layer', 'BD', 'bulk_density', 'value', '',
'layer', 'BD_notes', 'bulk_density', 'method', '',
'layer', 'color', 'color', 'string', ''))
outputKey <- data.table::as.data.table(
(tibble::tribble(~table, ~variable,
'site', 'site_name',
'site', 'description',
'site', 'state',
'site', 'country',
'site', 'latitude',
'site', 'longitude',
'sample', 'site_name',
'sample', 'layer_name',
'sample', 'soc',
'sample', 'bulk_density',
'sample', 'color')))
output <- formatLongTable(data.ls = testInput, sourceKey = inputKey, targetKey = outputKey)
expectedOutput <- list(
site = data.table::as.data.table(
tibble::tribble(~site_name_id, ~header, ~entry, ~variable, ~type,
"lake","country","USA","country","string",
"river","country","USA","country","string",
"lake","datum","WGS84","latitude","unit",
"river","datum","NAD83","latitude","unit",
"lake","datum","WGS84","longitude","unit",
"river","datum","NAD83","longitude","unit",
"lake","lat","45.8200","latitude","value",
"river","lat","46.52016","latitude","value",
"lake","lon","-68.8706","longitude","value",
"river","lon","-68.37050","longitude","value",
"lake","site_description","up by the lake","description","string",
"river","site_description","down by the river","description","string",
"lake","state","Maine","state","string",
"river","state","New Hampshire","state","string")
),
sample=data.table::as.data.table(
tibble::tribble(~layer_name_id,~site_name_id,~header,~entry,~variable,~type,
"S1","river","BD","1.5","bulk_density","value",
"S1","lake","BD","1.1","bulk_density","value",
"S1","river","BD_notes","dry, not seive","bulk_density","method",
"S1","lake","BD_notes","dry and 2mm seive","bulk_density","method",
"S1","river","color","black","color","string",
"S1","lake","color","brown","color","string",
"S1","river","SOC","5.2","soc","value",
"S1","lake","SOC","3","soc","value")))
cols <- setdiff(names(expectedOutput$site), c('entry'))
expectedOutput$site[,(cols) := lapply(.SD, as.factor), .SDcols=cols]
cols <- setdiff(names(expectedOutput$sample), c('entry'))
expectedOutput$sample[,(cols) := lapply(.SD, as.factor), .SDcols=cols]
#match number of tables
testthat::expect_equal(expectedOutput, output)
})
testthat::test_that("merge site-level inputs variables across profile-level outputs",{
testInput <- list(
T1 =data.table::as.data.table(
tibble::tribble(~siteID, ~MAT,
'river','15',
'lake', '13')),
T2 = data.table::as.data.table(
tibble::tribble(~siteID, ~sampleID, ~SOC,
'river', 'S1', '5.2',
'river', 'S2', '4',
'lake', 'S2', '3')))
inputKey <- data.table::as.data.table(
tibble::tribble(~table, ~header, ~variable, ~type, ~entry,
'T2', 'siteID', 'site_name', 'id', '',
'T1', 'siteID', 'site_name', 'id', '',
'T2', 'sampleID', 'layer_name', 'id', '',
'T2', 'SOC', 'soc', 'value', '',
'T1', 'MAT', 'mat', 'value', ''))
outputKey <- data.table::as.data.table(
(tibble::tribble(~table, ~variable,
'sample', 'mat',
'sample', 'site_name',
'sample', 'layer_name',
'sample', 'soc')))
output <- formatLongTable(data.ls = testInput, sourceKey = inputKey, targetKey = outputKey)
expectedOutput <- data.table::as.data.table(
tibble::tribble(~site_name_id, ~header, ~entry, ~variable, ~type, ~layer_name_id,
"lake","MAT","13","mat","value","S2",
"river","MAT","15","mat","value","S1",
"river","MAT","15","mat","value","S2",
"river","SOC","5.2","soc","value","S1",
"river","SOC","4","soc","value","S2",
"lake","SOC","3","soc","value","S2")
)
cols <- setdiff(names(expectedOutput), c('entry'))
expectedOutput[,(cols) := lapply(.SD, as.factor), .SDcols=cols]
expect_equal(output$sample, expectedOutput)
})
|
c7ddeed991e9970ed65b6678ef891d2a17d08d98 | 7bce9556c1dee17fa6067599acf81f3b1fb8b336 | /src/main.R | 241b4cb11d301b00935a69dc9a4fa799c97c5d1b | [] | no_license | cesare-spinoso/UltimateTTT | 2e5ee93decd39613669d7f4baeca6783283a7343 | 5b4076bce5f8292635e1949aa24b69590ffe8b8f | refs/heads/master | 2022-12-01T13:03:42.098329 | 2020-08-24T16:52:51 | 2020-08-24T16:52:51 | 289,972,641 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 47,055 | r | main.R | ######### Game Dynamics ########
# All the functions that are used to play the game
hasWonBoard <- function(board,player){
hasWon <- F
dimension <- length(board[1,])
# First check horizontal
for(row in 1:dimension){
if(board[row,1] == board[row,2] && board[row,2] == board[row,3] && board[row,2] == player){
hasWon <- T
break
}
}
if(hasWon)
return(T)
# Check vertical
for(col in 1:dimension){
if(board[1,col] == board[2,col] && board[2,col] == board[3,col] && board[2,col] == player){
hasWon <- T
break
}
}
if(hasWon)
return(T)
# Check the diagonals
if(board[1,1] == board[2,2] && board[2,2] == board[3,3] && board[2,2] == player)
return(T)
if(board[3,1] == board[2,2] && board[2,2] == board[1,3] && board[2,2] == player)
return(T)
return(F)
}
# Return a list of valid moves
getValidMove <- function(board, forcedMove, boardStatus){
validMove <- list()
fullBoard <- T
# subBoard not won
if (boardStatus[forcedMove[1], forcedMove[2]] == 0){
for (subX in 1:3)
for (subY in 1:3)
if (board[forcedMove[1], forcedMove[2], subX, subY] == 0)
validMove <- append(validMove, list(c(forcedMove[1], forcedMove[2], subX, subY))) # Add the valid move
fullBoard <- F
}
if (fullBoard == F){
return (validMove)
}
# All moves that are in non-won subBoard and not played
for (x in 1:3)
for (y in 1:3)
if (boardStatus[x, y] == 0)
for (subX in 1:3)
for (subY in 1:3)
if (board[x, y, subX, subY] == 0)
validMove <- append(validMove, list(c(x, y, subX, subY))) # Add the valid move
return (validMove)
}
# player move on master board
doMove <- function(position,player,board){
if(board[postion[1],position[2],position[3],position[4]] == 0){
board[postion[1],position[2],position[3],position[4]] <- player
}
return(board)
}
# prints single individual tile of TTT
printBoard <- function(board){
for(i in 1:3){
cat(board[i,1]," | ",board[i,2]," | ",board[i,3],"\n")
if(i != 3){
cat("--------------\n")
}
}
}
# note [a,b,,] -> a,b are the principal matrix indeces
printMasterBoard <- function(masterBoard){
for(i in 1:3){
for(j in 1:3){
cat(c(masterBoard[i,1,j,], " | ", masterBoard[i,2,j,], " | ", masterBoard[i,3,j,],"\n"))
}
if(i != 3){
cat("--------------------------\n")
}
}
}
######### Random Agent ########
# this agent plays randomly against another random agent
simulUTTTRandom <- function(theseed){
# To reproduce experiment
set.seed(theseed)
# Start game
masterBoard <- array(0, dim = c(3,3,3,3))
statusBoard <- matrix(0, nrow = 3, ncol = 3, byrow = T)
winner <- 0 # Initially a tie
# First move
player <- 1
# random method
move <- sample(c(1,2,3), size = 4, replace = T)
masterBoard[move[1], move[2], move[3], move[4]] <- player
forcedMove <- c(move[3], move[4])
player <- player %% 2 + 1
# The game continue normally
while (T) {
validMoves <- getValidMove(masterBoard, forcedMove, statusBoard)
if (length(validMoves) == 0)
break
# random move method
move <- sample(validMoves, size = 1)
move <- move[[1]]
# Play the move
masterBoard[move[1], move[2], move[3], move[4]] <- player
forcedMove <- c(move[3], move[4])
# Check for win
if(hasWonBoard(masterBoard[move[1], move[2],,], player))
statusBoard[move[1], move[2]] <- player # Won subBoard
if(hasWonBoard(statusBoard, player)){
winner <- player # Won game
break
}
player <- player %% 2 + 1 # Change player
}
return(winner)
}
# run and find win ratio
# playing as player 1 - randomly
wins <- 0
draws <- 0
losses <- 0
nepis <- 10000
winRatio <- vector("numeric",length = nepis)
drawRatio <- vector("numeric",length = nepis)
lossRatio <- vector("numeric",length = nepis)
for(i in 1:nepis){
result <- simulUTTTRandom(i)[[1]]
if (result == 0)
draws <- draws + 1
else if(result == 1)
wins <- wins + 1
else
losses <- losses + 1
winRatio[i] <- wins/i
drawRatio[i] <- draws/i
lossRatio[i] <- losses/i
}
# win ratio of about 40.5%
plot(x=1:nepis,winRatio,ylab= "Ratios", xlab = "Episodes", type="l",col="darkgreen",ylim = c(0,1))
lines(drawRatio,col="blue")
lines(lossRatio,col="red")
legend("topright",legend=c("Win Ratio","Draw Ratio","Loss Ratio"),
col=c("darkgreen","blue","red"),lty=1)
# print the win, loss and draw rations
cat("The win ratio given a random policy was ", winRatio[nepis],"\n")
cat("The draw ratio given a random policy was ", drawRatio[nepis],"\n")
cat("The loss ratio given a random policy was ", lossRatio[nepis],"\n")
######### State Approximation 1 + Q-Learning Agent ########
# convert board to a vector for the state vectors
boardToVector <- function(board){
# take elments in board an lists them from left to right, top to bottom as a vector
vec <- c()
for(i in 1:3){
for(j in 1:3){
vec <- c(vec, board[i,1,j,], board[i,2,j,], board[i,3,j,])
}
}
return(vec)
}
# cinvert sub-board to a vector
suboardToVector <- function(subBoard){
vec <- c()
for(i in 1:3){
for(j in 1:3){
vec <- c(vec,subBoard[i,j])
}
}
return(vec)
}
# convert action {1,...,9} to tis corresponding vector form
actionToVecMapping <- function(actionNumerical){
return(switch(actionNumerical,
c(1,1),
c(1,2),
c(1,3),
c(2,1),
c(2,2),
c(2,3),
c(3,1),
c(3,2),
c(3,3)
)
)
}
# convert action in vector form to decimal {1,...,9}
vecToActionMapping <- function(actionVec){
if(actionVec[1] == 1){
if(actionVec[2] == 1)
return(1)
else if(actionVec[2] == 2)
return(2)
else
return(3)
}
else if(actionVec[1] == 2){
if(actionVec[2] == 1)
return(4)
else if(actionVec[2] == 2)
return(5)
else
return(6)
}
else if(actionVec[1] == 3){
if(actionVec[2] == 1)
return(7)
else if(actionVec[2] == 2)
return(8)
else
return(9)
}
else return -1
}
# take a state which looks like a ternary representation and convert it to decimal
# this is the unique mapping to decimal
stateToDec <- function(boardVec){
dec <- 0
for(i in 1:length(boardVec)){
dec <- dec + boardVec[i]*3^(i-1)
}
return(dec)
}
# reward function
reward <- function(winner, playingAs){
# returns a reward of -1,0,1 based on the winner + who you are playing as
if(winner == 0)
return(0) # want agent to win
else if(winner == 1 && playingAs == 1)
return(100)
else if(winner == 1 && playingAs == 2)
return(-100)
else if(winner == 2 && playingAs == 1)
return(-100)
else
return(100)
}
# simulate with state approximations - i.e. return all the states and actions that
# are being tracked
simulUTTTAgent1 <- function(theseed){
# To reproduce experiment
set.seed(theseed)
# Start game
masterBoard <- array(0, dim = c(3,3,3,3))
statusBoard <- matrix(0, nrow = 3, ncol = 3, byrow = T)
winner <- 0 # Initially a tie
# track the states
boardStates <- matrix(0, ncol = 81)
currentBoardState <- matrix(0, ncol = 9) # for Q learninng
actionList <- list() # for Q learning
# First move
player <- 1
# random method
move <- sample(c(1,2,3), size = 4, replace = T)
masterBoard[move[1], move[2], move[3], move[4]] <- player
forcedMove <- c(move[3], move[4])
player <- player %% 2 + 1
# add state to tracker
boardStates <- rbind(boardStates, boardToVector(masterBoard))
# add action to tracker
actionList <- append(actionList, vecToActionMapping(forcedMove))
# The game continue normally
while (T) {
validMoves <- getValidMove(masterBoard, forcedMove, statusBoard)
if (length(validMoves) == 0)
break
move <- sample(validMoves, size = 1)
move <- move[[1]]
if(player == 1){
# add current board to state tracker
currentBoardState <- rbind(currentBoardState, suboardToVector(masterBoard[move[1],move[2],,]))
}
# make the action
masterBoard[move[1], move[2], move[3], move[4]] <- player
forcedMove <- c(move[3], move[4])
if(player == 1){
# add action to state tracker
actionList <- append(actionList, vecToActionMapping(forcedMove))
}
# Update tracked states
boardStates <- rbind(boardStates, boardToVector(masterBoard))
# Check for win
if(hasWonBoard(masterBoard[move[1], move[2],,], player))
statusBoard[move[1], move[2]] <- player # Won subBoard
if(hasWonBoard(statusBoard, player)){
winner <- player # Won game
break
}
player <- player %% 2 + 1 # Change player
}
return(list(winner,boardStates,currentBoardState,actionList))
}
# create a dataset that contains runs of random plays (states) + of the winner (rewards)
nepis <- 10000
StateActionReward <- rep(NULL, nepis)
# playing as player 1
for(i in 1:nepis){
run <- simulUTTTAgent1(i)
rewardR <- reward(run[[1]],playingAs = 1)
StateActionReward[[i]] <- list(rewardR,run[[3]],run[[4]])
}
# use Watkin's Q Learning Techinque - Input all the simulations as a list of rewards, states and actions
# in this function the mapping is done in the QLearning function
ApplyQLearningAgent1 <- function(qInit,episodeSimu,stepSize){
qEstim <- qInit
for(episode in episodeSimu){
reward <- episode[[1]]
states <- episode[[2]]
actions <- episode[[3]]
Tt <- dim(states)[1]
for(t in 1:(Tt-1)){
S_t <- states[t,]
A_t <- actions[[t]][1]
R_tplus1 <- 0
if(t == Tt-1) # at the beginning will do a lot of of 0 updates
R_tplus1 <- reward
S_tplus1 <- states[t+1,]
# convert states to decimal representation
S_t <- stateToDec(S_t)
S_tplus1 <- stateToDec(S_tplus1)
# add 1 to every S because r indexing starts at 0
# undiscounted rewards
qEstim[S_t+1,A_t] <- qEstim[S_t+1,A_t] + stepSize*(R_tplus1 + max(qEstim[S_tplus1+1,]) - qEstim[S_t+1,A_t])
}
}
return(qEstim)
}
# apply q learning
stepsize <- 0.1
qEstimQ <- matrix(0,nrow = 3^9, ncol = 9)
qEstimQ <- ApplyQLearningAgent1(qEstimQ,StateActionReward,stepsize)
# this agent uses qEstim from Q learning to play against a random bot
simulUTTTQLearning1 <- function(theseed,qEstim){
# To reproduce experiment
set.seed(theseed)
# Start game
masterBoard <- array(0, dim = c(3,3,3,3))
statusBoard <- matrix(0, nrow = 3, ncol = 3, byrow = T)
winner <- 0 # Initially a tie
# track the states
boardStates <- matrix(0, ncol = 81)
# First move
player <- 1
# random method
move <- sample(c(1,2,3), size = 4, replace = T)
masterBoard[move[1], move[2], move[3], move[4]] <- player
forcedMove <- c(move[3], move[4])
player <- player %% 2 + 1
# The game continue normally
while (T) {
validMoves <- getValidMove(masterBoard, forcedMove, statusBoard)
if (length(validMoves) == 0)
break
# random move for opponent or for player 1 in special case
if(player == 2){
move <- sample(validMoves, size = 1)
move <- move[[1]]
}
else
{
if(validMoves[[1]][1] != forcedMove[1] || validMoves[[1]][2] != forcedMove[2]){
# in the case where move isn't in forced move subboard
randomSuboard <- sample(validMoves,size = 1)
randomSuboard <- randomSuboard[[1]]
forcedMove <- c(randomSuboard[1],randomSuboard[2])
# restrict valid moves only to that suboard
temp <- list()
for(i in 1:length(validMoves)){
if(validMoves[[i]][1] == forcedMove[1] && validMoves[[i]][2] == forcedMove[2])
temp <- append(temp, list(validMoves[[i]]))
}
validMoves <- temp
}
# otherwise player 1 can pick only within sub-board
# in this case can use qEstim
stateDecimal <- stateToDec(suboardToVector(masterBoard[forcedMove[1],forcedMove[2],,]))
validActionsDecimal <- c() # list of numbers
# go through list of moves and coonvert them to decimal
for(move in validMoves){
validActionsDecimal <- c(validActionsDecimal, vecToActionMapping(c(move[3],move[4])))
}
# find the max action based on values in qEstim
maxValue <- -Inf
maxIndex <- -1
for(actionIndex in validActionsDecimal){
if(qEstim[stateDecimal+1,actionIndex] > maxValue)
maxIndex <- actionIndex
}
# so the move will be
actionVector <- actionToVecMapping(maxIndex)
move <- c(forcedMove[1],forcedMove[2],actionVector[1],actionVector[2])
}
# Play the move
masterBoard[move[1], move[2], move[3], move[4]] <- player
forcedMove <- c(move[3], move[4])
# add state to tracker
boardStates <- rbind(boardStates, boardToVector(masterBoard))
# Check for win
if(hasWonBoard(masterBoard[move[1], move[2],,], player))
statusBoard[move[1], move[2]] <- player # Won subBoard
if(hasWonBoard(statusBoard, player)){
winner <- player # Won game
break
}
player <- player %% 2 + 1 # Change player
}
return(list(winner,boardStates))
}
# run and find win ratio
# playing as player 1 - with QLearning technique + State Approx 1
wins <- 0
draws <- 0
losses <- 0
nepis <- 10000
winRatio <- vector("numeric",length = nepis)
drawRatio <- vector("numeric",length = nepis)
lossRatio <- vector("numeric",length = nepis)
for(i in 1:nepis){
result <- simulUTTTQLearning1(i,qEstimQ)[[1]]
if (result == 0)
draws <- draws + 1
else if(result == 1)
wins <- wins + 1
else
losses <- losses + 1
winRatio[i] <- wins/i
drawRatio[i] <- draws/i
lossRatio[i] <- losses/i
}
# win ratio of about 0.435, 3% improvement
plot(x=1:nepis,winRatio,ylab= "Ratios", xlab = "Episodes", type="l",col="darkgreen",ylim = c(0,1))
lines(drawRatio,col="blue")
lines(lossRatio,col="red")
legend("topright",legend=c("Win Ratio","Draw Ratio","Loss Ratio"),
col=c("darkgreen","blue","red"),lty=1,cex = 0.8)
# print the win, loss and draw ratios
cat("The win ratio given a QL policy was ", winRatio[nepis],"\n")
cat("The draw ratio given a QL policy was ", drawRatio[nepis],"\n")
cat("The loss ratio given a QL policy was ", lossRatio[nepis],"\n")
######### State Approximation 2 + Q-Learning + Double Q-Learning Agent #########
# take the state and its number location (1 to 9) and convert it to a unique decimal from 0 to 3^11-1
# this is the unique mapping
stateToDec <- function(boardVec, location){
dec <- ternToDec(boardVec)
# shift the decimal by location-1 * 3^9 to create unique mapping
dec <- dec + (location-1)*3^9
return(dec)
}
# takes a vector of size 2 and returns number from 1 to 9
locationToDec <- function(boardVec){
if(boardVec[1] == 1){
if(boardVec[2] == 1)
return(1)
else if(boardVec[2] == 2)
return(2)
else
return(3)
}
else if(boardVec[1] == 2){
if(boardVec[2] == 1)
return(4)
else if(boardVec[2] == 2)
return(5)
else
return(6)
}
else if(boardVec[1] == 3){
if(boardVec[2] == 1)
return(7)
else if(boardVec[2] == 2)
return(8)
else
return(9)
}
else return -1
}
# take a state which looks like a ternary representation and convert it to decimal
ternToDec <- function(boardVec){
dec <- 0
for(i in 1:length(boardVec)){
dec <- dec + boardVec[i]*3^(i-1)
}
return(dec)
}
# reward function
reward <- function(winner, playingAs){
# returns a reward of -1,0,1 based on the winner + who you are playing as
if(winner == 0)
return(0) # want agent to win
else if(winner == 1 && playingAs == 1)
return(100)
else if(winner == 1 && playingAs == 2)
return(-100)
else if(winner == 2 && playingAs == 1)
return(-100)
else
return(100)
}
# simulate with state approximations
simulUTTTAgent2 <- function(theseed){
# To reproduce experiment
set.seed(theseed)
# Start game
masterBoard <- array(0, dim = c(3,3,3,3))
statusBoard <- matrix(0, nrow = 3, ncol = 3, byrow = T)
winner <- 0 # Initially a tie
# track the states
boardStates <- matrix(0, ncol = 81)
# for Q learning
currentBoardState <- matrix(0, ncol = 9)
locationStates <- c()
actionList <- c()
# First move
player <- 1
# random method
move <- sample(c(1,2,3), size = 4, replace = T)
masterBoard[move[1], move[2], move[3], move[4]] <- player
forcedMove <- c(move[3], move[4])
player <- player %% 2 + 1
# add state to tracker
boardStates <- rbind(boardStates, boardToVector(masterBoard))
# add location to tracker
locationStates <- c(locationStates, locationToDec(c(move[1],move[2])))
# add action to tracker
actionList <- c(actionList, vecToActionMapping(forcedMove))
# The game continue normally
while (T) {
validMoves <- getValidMove(masterBoard, forcedMove, statusBoard)
if (length(validMoves) == 0)
break
move <- sample(validMoves, size = 1)
move <- move[[1]]
if(player == 1){
# add current board to state tracker
currentBoardState <- rbind(currentBoardState, suboardToVector(masterBoard[move[1],move[2],,]))
# add location to location tracker
locationStates <- c(locationStates, locationToDec(c(move[1],move[2])))
}
# make the action
masterBoard[move[1], move[2], move[3], move[4]] <- player
forcedMove <- c(move[3], move[4])
if(player == 1){
# add action to state tracker
actionList <- c(actionList, vecToActionMapping(forcedMove))
}
# Update tracked states
boardStates <- rbind(boardStates, boardToVector(masterBoard))
# Check for win
if(hasWonBoard(masterBoard[move[1], move[2],,], player))
statusBoard[move[1], move[2]] <- player # Won subBoard
if(hasWonBoard(statusBoard, player)){
winner <- player # Won game
break
}
player <- player %% 2 + 1 # Change player
}
return(list(winner,boardStates,currentBoardState,locationStates,actionList))
}
# create a dataset that contains runs of random plays (states) + of the winner (rewards)
# increase number of episodes since state space is larger
nepis <- 25000
StateActionReward <- list()
# playing as player 1
for(i in 1:nepis){
run <- simulUTTTAgent2(i)
rewardR <- reward(run[[1]],playingAs = 1)
states <- c()
# need to preprocess the states so that they are in decimal format
for(i in 1:length(run[[4]])){
states <- c(states, stateToDec(run[[3]][i,],run[[4]][i]))
}
StateActionReward <- append(StateActionReward, list(list(rewardR,states,run[[5]])))
}
# use Watkin's Q Learning Techinque - Input all the simulations as a list of rewards, states and actions
QLearningAgent2 <- function(qInit,episodeSimu,stepSize){
qEstim <- qInit
for(episode in episodeSimu){
reward <- episode[[1]]
states <- episode[[2]]
actions <- episode[[3]]
# print(reward)
# print(states)
# print(actions)
Tt <- length(states)
# print(Tt)
# cat(dim(states)[1],"-----\n")
for(t in 1:(Tt-1)){
S_t <- states[t]
A_t <- actions[[t]][1]
R_tplus1 <- 0
if(t == Tt-1) # at the beginning will do a lot of of 0 updates
R_tplus1 <- reward
S_tplus1 <- states[t+1]
# add 1 to every S because r indexing starts at 0
# undiscounted rewards
qEstim[S_t+1,A_t] <- qEstim[S_t+1,A_t] + stepSize*(R_tplus1 + max(qEstim[S_tplus1+1,]) - qEstim[S_t+1,A_t])
}
}
return(qEstim)
}
# apply q learning
require(nnet) # for which.is.max
stepsize <- 0.1
qEstimQ <- matrix(0,nrow = 3^11, ncol = 9) # sloghtly bigger state space
qEstimQ <- QLearningAgent2(qEstimQ,StateActionReward,stepsize)
# used qEstim found by Q Learning to play UTTT
simulUTTTQLearningAgent2 <- function(theseed,qEstim){
# To reproduce experiment
set.seed(theseed)
# Start game
masterBoard <- array(0, dim = c(3,3,3,3))
statusBoard <- matrix(0, nrow = 3, ncol = 3, byrow = T)
winner <- 0 # Initially a tie
# track the states
boardStates <- matrix(0, ncol = 81)
# First move
player <- 1
# Since we have locations of current board we can pick the optimal initial action
# using qEstim
allMoves <- list()
for(r in 1:3)
for(c in 1:3)
for(rprime in 1:3)
for(cprime in 1:3)
allMoves <- append(allMoves, list(c(r,c,rprime,cprime)))
# find the max action based on values in qEstim
maxValue <- -Inf
maxMove <- -1
for(move in allMoves){
# first find Q(s,a)
stateDec <- stateToDec(suboardToVector(masterBoard[move[1],move[2],,]),locationToDec(c(move[1],move[2])))
actionDec <- vecToActionMapping(c(move[3],move[4]))
valueOfMove <- qEstim[stateDec+1,actionDec]
# find the max
if(valueOfMove > maxValue){
maxValue <- valueOfMove
maxMove <- move
}
}
# random method - start with max move according to qEstim
move <- maxMove
masterBoard[move[1], move[2], move[3], move[4]] <- player
forcedMove <- c(move[3], move[4])
player <- player %% 2 + 1
# The game continue normally
while (T) {
validMoves <- getValidMove(masterBoard, forcedMove, statusBoard)
if (length(validMoves) == 0)
break
# random move for opponent or for player 1 in special case
if(player == 2){
move <- sample(validMoves, size = 1)
move <- move[[1]]
}
else
{
# in this case we don't need to check for the special case that agent can play
# anywhere on the board since we can handle this with the location parameter of ht
# state
# find the max action based on values in qEstim
maxValue <- -Inf
maxMove <- -1
for(move in validMoves){
# go through all the valid moves - choose the one with the highe Q(s,a)
# first find Q(s,a)
stateDec <- stateToDec(suboardToVector(masterBoard[move[1],move[2],,]),locationToDec(c(move[1],move[2])))
actionDec <- vecToActionMapping(c(move[3],move[4]))
valueOfMove <- qEstim[stateDec+1,actionDec]
# find the max
if(valueOfMove > maxValue){
maxValue <- valueOfMove
maxMove <- move
}
}
# so the move will be
move <- maxMove
}
# Play the move
masterBoard[move[1], move[2], move[3], move[4]] <- player
forcedMove <- c(move[3], move[4])
# add state to tracker
boardStates <- rbind(boardStates, boardToVector(masterBoard))
# Check for win
if(hasWonBoard(masterBoard[move[1], move[2],,], player))
statusBoard[move[1], move[2]] <- player # Won subBoard
if(hasWonBoard(statusBoard, player)){
winner <- player # Won game
break
}
player <- player %% 2 + 1 # Change player
}
return(list(winner,boardStates))
}
# run and find win ratio
# playing as player 1 - with QLearning technique
wins <- 0
draws <- 0
losses <- 0
nepis <- 10000
winRatio <- vector("numeric",length = nepis)
drawRatio <- vector("numeric",length = nepis)
lossRatio <- vector("numeric",length = nepis)
for(i in 1:nepis){
result <- simulUTTTQLearningAgent2(i,qEstimQ)[[1]]
if (result == 0)
draws <- draws + 1
else if(result == 1)
wins <- wins + 1
else
losses <- losses + 1
winRatio[i] <- wins/i
drawRatio[i] <- draws/i
lossRatio[i] <- losses/i
}
# plots
plot(x=1:nepis,winRatio,ylab= "Ratios", xlab = "Episodes", type="l",col="darkgreen",ylim = c(0,1))
lines(drawRatio,col="blue")
lines(lossRatio,col="red")
legend("topright",legend=c("Win Ratio","Draw Ratio","Loss Ratio"),
col=c("darkgreen","blue","red"),lty=1,cex = 0.8)
# print the win, loss and draw rations
cat("The win ratio given a QL policy was ", winRatio[nepis],"\n")
cat("The draw ratio given a QL policy was ", drawRatio[nepis],"\n")
cat("The loss ratio given a QL policy was ", lossRatio[nepis],"\n")
ApplyDoubleQLearningAgent2 <- function(qInit1,qInit2,episodeSimu,stepsize){
Q1 <- qInit1 # will be used to select the opti action in simlulation
Q2 <- qInit2 # used to determine correspinding action value
for(episode in episodeSimu){
reward <- episode[[1]]
states <- episode[[2]]
actions <- episode[[3]]
# print(reward)
# print(states)
# print(actions)
Tt <- length(states)
# print(Tt)
# cat(dim(states)[1],"-----\n")
for(t in 1:(Tt-1)){
S_t <- states[t]
A_t <- actions[[t]][1]
R_tplus1 <- 0
if(t == Tt-1) # at the beginning will do a lot of of 0 updates
R_tplus1 <- reward
S_tplus1 <- states[t+1]
# coin flip to see which one will be updated
if(sample(c(1,2),1) == 1) # upadte Q1
{
Q1[S_t+1,A_t] <- Q1[S_t+1,A_t] <- stepsize*(R_tplus1 + max(Q2[S_tplus1+1,which.is.max(Q1[S_tplus1+1,])]) - Q1[S_t+1,A_t])
}
else{
# update Q2
Q2[S_t+1,A_t] <- Q2[S_t+1,A_t] <- stepsize*(R_tplus1 + max(Q1[S_tplus1+1,which.is.max(Q2[S_tplus1+1,])]) - Q2[S_t+1,A_t])
}
}
}
return(Q1) # this is the action value function function that will be used
}
# create a dataset that contains runs of random plays (states) + of the winner (rewards)
# double number of episode as in QLearning since have to Q's
nepis <- 50000
StateActionReward <- list()
# playing as player 1
for(i in 1:nepis){
run <- simulUTTT(i)
rewardR <- reward(run[[1]],playingAs = 1)
states <- c()
# need to preprocess the states so that they are in decimal format
for(i in 1:length(run[[4]])){
states <- c(states, stateToDec(run[[3]][i,],run[[4]][i]))
}
StateActionReward <- append(StateActionReward, list(list(rewardR,states,run[[5]])))
}
# apply q learning
stepsize <- 0.1
qEstim1 <- matrix(0,nrow = 3^11, ncol = 9) # sloghtly bigger state space
qEstim2 <- matrix(0,nrow = 3^11, ncol = 9) # two initial Q's for q learning
require(nnet)
qEstim1 <- ApplyDoubleQLearningAgent2(qEstim1,qEstim2,StateActionReward,stepsize)
# run and find win ratio
# playing as player 1 - with QLearning technique
wins <- 0
draws <- 0
losses <- 0
nepis <- 2000
winRatio <- vector("numeric",length = nepis)
drawRatio <- vector("numeric",length = nepis)
lossRatio <- vector("numeric",length = nepis)
for(i in 1:nepis){
result <- simulUTTTQLearningAgent2(i,qEstim1)[[1]]
if (result == 0)
draws <- draws + 1
else if(result == 1)
wins <- wins + 1
else
losses <- losses + 1
winRatio[i] <- wins/i
drawRatio[i] <- draws/i
lossRatio[i] <- losses/i
}
# plots
plot(x=1:nepis,winRatio,ylab= "Ratios", xlab = "Episodes", type="l",col="darkgreen",ylim = c(0,1))
lines(drawRatio,col="blue")
lines(lossRatio,col="red")
legend("topright",legend=c("Win Ratio","Draw Ratio","Loss Ratio"),
col=c("darkgreen","blue","red"),lty=1,cex=0.8)
# print the win, loss and draw rations
cat("The win ratio given a QL policy was ", winRatio[nepis],"\n")
cat("The draw ratio given a QL policy was ", drawRatio[nepis],"\n")
cat("The loss ratio given a QL policy was ", lossRatio[nepis],"\n")
######### State Approximation 3 + Q-Learning (NO Double Q-Learning) #####
# Note: Running this section may require clearing your global environment
# Take subboard and convert to binary vector where
# 1: X is there
# 0: X is not there (i.e. no diff between O and blank)
suboardToBinVector <- function(subBoard){
vec <- c()
for(i in 1:3)
for(j in 1:3){
if(subBoard[i,j] == 1)
vec <- c(vec,1)
else
vec <- c(vec,0)
}
return(vec)
}
# uniquely map currentboard + location + statusboard
stateToDec <- function(currentBoard,location,statusBoardBin){
# use unique mapping
ternD <- ternToDec(currentBoard)
binD <- binToDec(statusBoardBin)
# unqiue mapping
dec <- ternD + (location-1)*3^9 + (binD)*3^11
}
# simulate with state approximations
simulUTTTAgent3 <- function(theseed){
# To reproduce experiment
set.seed(theseed)
# Start game
masterBoard <- array(0, dim = c(3,3,3,3))
statusBoard <- matrix(0, nrow = 3, ncol = 3, byrow = T)
winner <- 0 # Initially a tie
# track the states
boardStates <- matrix(0, ncol = 81)
# for Q learning
currentBoardState <- matrix(0, ncol = 9)
locationStates <- c()
statusBoardState <- matrix(0, ncol = 9)
actionList <- c()
# First move
player <- 1
# random method
move <- sample(c(1,2,3), size = 4, replace = T)
masterBoard[move[1], move[2], move[3], move[4]] <- player
forcedMove <- c(move[3], move[4])
player <- player %% 2 + 1
# add state to tracker
boardStates <- rbind(boardStates, boardToVector(masterBoard))
# add location to tracker
locationStates <- c(locationStates, locationToDec(c(move[1],move[2])))
# add action to tracker
actionList <- c(actionList, vecToActionMapping(forcedMove))
# The game continue normally
while (T) {
validMoves <- getValidMove(masterBoard, forcedMove, statusBoard)
if (length(validMoves) == 0)
break
move <- sample(validMoves, size = 1)
move <- move[[1]]
if(player == 1){
# add current board to state tracker
currentBoardState <- rbind(currentBoardState, suboardToVector(masterBoard[move[1],move[2],,]))
# add location to location tracker
locationStates <- c(locationStates, locationToDec(c(move[1],move[2])))
# add status board to states
statusBoardState <- rbind(statusBoardState, suboardToBinVector(statusBoard))
}
# make the action
masterBoard[move[1], move[2], move[3], move[4]] <- player
forcedMove <- c(move[3], move[4])
if(player == 1){
# add action to state tracker
actionList <- c(actionList, vecToActionMapping(forcedMove))
}
# Update tracked states
boardStates <- rbind(boardStates, boardToVector(masterBoard))
# Check for win
if(hasWonBoard(masterBoard[move[1], move[2],,], player))
statusBoard[move[1], move[2]] <- player # Won subBoard
if(hasWonBoard(statusBoard, player)){
winner <- player # Won game
break
}
player <- player %% 2 + 1 # Change player
}
return(list(winner,boardStates,currentBoardState,locationStates,statusBoardState,actionList))
}
# create a dataset that contains runs of random plays (states) + of the winner (rewards)
# increase number of episodes since state space is larger
nepis <- 50000
StateActionReward <- list()
# playing as player 1
for(i in 1:nepis){
run <- simulUTTTAgent3(i)
rewardR <- reward(run[[1]],playingAs = 1)
states <- c()
# need to preprocess the states so that they are in decimal format
for(i in 1:length(run[[4]])){
states <- c(states, stateToDec(run[[3]][i,],run[[4]][i],run[[5]][i,]))
}
StateActionReward <- append(StateActionReward, list(list(rewardR,states,run[[6]])))
}
# use Watkin's Q Learning Techinque - Input all the simulations as a list of rewards, states and actions
ApplyQLearningAgent3 <- function(qInit,episodeSimu,stepSize){
# don't reassing qInit because of space complexity
for(episode in episodeSimu){
reward <- episode[[1]]
states <- episode[[2]]
actions <- episode[[3]]
# print(reward)
# print(states)
# print(actions)
Tt <- length(states)
# print(Tt)
# cat(dim(states)[1],"-----\n")
for(t in 1:(Tt-1)){
S_t <- states[t]
A_t <- actions[[t]][1]
R_tplus1 <- 0
if(t == Tt-1) # at the beginning will do a lot of of 0 updates
R_tplus1 <- reward
S_tplus1 <- states[t+1]
# add 1 to every S because r indexing starts at 0
# undiscounted rewards
qInit[S_t+1,A_t] <- qInit[S_t+1,A_t] + stepSize*(R_tplus1 + max(qInit[S_tplus1+1,]) - qInit[S_t+1,A_t])
}
}
return(qInit)
}
# apply q learning
stepsize <- 0.1
qEstimQ <- matrix(0,nrow = 3^11*2^9, ncol = 9) # biggest possible state space
qEstimQ <- ApplyQLearningAgent3(qEstimQ,StateActionReward,stepsize)
# used qEstim found by Q Learning to play UTTT
simulUTTTQLearningAgent3 <- function(theseed,qEstim){
# To reproduce experiment
set.seed(theseed)
# Start game
masterBoard <- array(0, dim = c(3,3,3,3))
statusBoard <- matrix(0, nrow = 3, ncol = 3, byrow = T)
winner <- 0 # Initially a tie
# track the states
boardStates <- matrix(0, ncol = 81)
# First move
player <- 1
# Since we have locations of current board we can pick the optimal initial action
# using qEstim
allMoves <- list()
for(r in 1:3)
for(c in 1:3)
for(rprime in 1:3)
for(cprime in 1:3)
allMoves <- append(allMoves, list(c(r,c,rprime,cprime)))
# find the max action based on values in qEstim
maxValue <- -Inf
maxMove <- -1
for(move in allMoves){
# first find Q(s,a)
stateDec <- stateToDec(suboardToVector(masterBoard[move[1],move[2],,]),locationToDec(c(move[1],move[2])),suboardToBinVector(statusBoard))
actionDec <- vecToActionMapping(c(move[3],move[4]))
valueOfMove <- qEstim[stateDec+1,actionDec]
# find the max
if(valueOfMove > maxValue){
maxValue <- valueOfMove
maxMove <- move
}
}
# random method - start with max move according to qEstim
move <- maxMove
masterBoard[move[1], move[2], move[3], move[4]] <- player
forcedMove <- c(move[3], move[4])
player <- player %% 2 + 1
# The game continues normally
while (T) {
validMoves <- getValidMove(masterBoard, forcedMove, statusBoard)
if (length(validMoves) == 0)
break
# random move for opponent or for player 1 in special case
if(player == 2){
move <- sample(validMoves, size = 1)
move <- move[[1]]
}
else
{
# in this case we don't need to check for the special case that agent can play
# anywhere on the board since we can handle this with the location parameter of ht
# state
# find the max action based on values in qEstim
maxValue <- -Inf
maxMove <- -1
for(move in validMoves){
# go through all the valid moves - choose the one with the highe Q(s,a)
# first find Q(s,a)
stateDec <- stateToDec(suboardToVector(masterBoard[move[1],move[2],,]),
locationToDec(c(move[1],move[2])),
suboardToBinVector(statusBoard))
actionDec <- vecToActionMapping(c(move[3],move[4]))
valueOfMove <- qEstim[stateDec+1,actionDec]
# find the max
if(valueOfMove > maxValue){
maxValue <- valueOfMove
maxMove <- move
}
}
# so the move will be
move <- maxMove
}
# Play the move
masterBoard[move[1], move[2], move[3], move[4]] <- player
forcedMove <- c(move[3], move[4])
# add state to tracker
boardStates <- rbind(boardStates, boardToVector(masterBoard))
# Check for win
if(hasWonBoard(masterBoard[move[1], move[2],,], player))
statusBoard[move[1], move[2]] <- player # Won subBoard
if(hasWonBoard(statusBoard, player)){
winner <- player # Won game
break
}
player <- player %% 2 + 1 # Change player
}
return(list(winner,boardStates))
}
# run and find win ratio
# playing as player 1 - with QLearning technique
wins <- 0
draws <- 0
losses <- 0
nepis <- 10000
winRatio <- vector("numeric",length = nepis)
drawRatio <- vector("numeric",length = nepis)
lossRatio <- vector("numeric",length = nepis)
for(i in 1:nepis){
result <- simulUTTTQLearningAgent3(i,qEstimQ)[[1]]
if (result == 0)
draws <- draws + 1
else if(result == 1)
wins <- wins + 1
else
losses <- losses + 1
winRatio[i] <- wins/i
drawRatio[i] <- draws/i
lossRatio[i] <- losses/i
}
# plots
plot(x=1:nepis,winRatio,ylab= "Ratios", xlab = "Episodes", type="l",col="darkgreen",ylim = c(0,1))
lines(drawRatio,col="blue")
lines(lossRatio,col="red")
legend("topright",legend=c("Win Ratio","Draw Ratio","Loss Ratio"),
col=c("darkgreen","blue","red"),lty=1,cex = 0.8)
# print the win, loss and draw rations
cat("The win ratio given a QL policy was ", winRatio[nepis],"\n")
cat("The draw ratio given a QL policy was ", drawRatio[nepis],"\n")
cat("The loss ratio given a QL policy was ", lossRatio[nepis],"\n")
######### Features + One-Step Sarsa (semi-Gradient) #####
# x1 to x9 number of x in sub
# x10 to x18 number of o in sub
# x19 number of x won
# x20 number of o won
x <- function(state,action){
# state is list of master + status board
# action is move coordinate
feature <- vector(mode="numeric",length = 20)
# update to s'
state[[1]][action[1],action[2],action[3],action[4]] <- 1
# update the status board in case
if(hasWonBoard(state[[1]][action[1],action[2],,],player=1))
state[[2]][action[1],action[2]] <- 1
# create feature vector
for(r in 1:3)
for(c in 1:3)
for(rprime in 1:3)
for(cprime in 1:3){
if(state[[1]][r,c,rprime,cprime] == 1){
if(r == 1)
feature[c] <- feature[c] + 1
else if(r == 2)
feature[c + 3] <- feature[c + 3] + 1
else
feature[c + 6] <- feature[c + 6] + 1
}
else if(state[[1]][r,c,rprime,cprime] == 2){
if(r == 1)
feature[c + 9] <- feature[c + 9] + 1
else if(r == 2)
feature[c + 3 + 9] <- feature[c + 3 + 9] + 1
else
feature[c + 6 + 9] <- feature[c + 6 + 9] + 1
}
}
# for status board
for(r in 1:3)
for(c in 1:3){
if(state[[2]][r,c] == 1){
feature[19] <- feature[19] + 1
}
else if(state[[2]][r,c] == 2){
feature[20] <- feature[20] + 1
}
}
return(feature)
}
# approx state-action value using q(s,a,w)
qHat <- function(state,action,weight) # state should be master + status board in list
{
return(x(state,action) %*% weight)
}
# gradient q - by construction it's just the feature
gradientQ <- function(state,action,weight){
return(x(state,action))
}
semiGradientSarsa <- function(stepsize,epsilon,weight,nepis){
# track win rate
wins <- 0
draws <- 0
losses <- 0
nepis <- 10000
winRatio <- vector("numeric",length = nepis)
drawRatio <- vector("numeric",length = nepis)
lossRatio <- vector("numeric",length = nepis)
for(i in 1:nepis){
# To reproduce experiment
set.seed(i)
# Every 1000 episodes reduce the epsilon by half
if(i %% 1000 == 0)
epsilon <- epsilon/2
# Start game
masterBoard <- array(0, dim = c(3,3,3,3))
statusBoard <- matrix(0, nrow = 3, ncol = 3, byrow = T)
winner <- 0 # Initially a tie
# First move
player <- 1
# Since we have q can use epsilon greedy approach
allMoves <- list()
for(r in 1:3)
for(c in 1:3)
for(rprime in 1:3)
for(cprime in 1:3)
allMoves <- append(allMoves, list(c(r,c,rprime,cprime)))
# epsilon greedy approach
if(runif(1,0,1) < epsilon) # non-greedy
{
move <- sample(allMoves, size = 1)
move <- move[[1]]
}
else{
# find the max action based on values in qEstim
maxValue <- -Inf
maxMove <- -1
for(move in allMoves){
# first find Q(s,a)
valueOfMove <- qHat(list(masterBoard,statusBoard),move,weight)
# find the max
if(valueOfMove > maxValue){
maxValue <- valueOfMove
maxMove <- move
}
}
move <- maxMove
}
# store state in S
S <- list(masterBoard,statusBoard)
# start with max move according to qHat
masterBoard[move[1], move[2], move[3], move[4]] <- player
forcedMove <- c(move[3], move[4])
player <- player %% 2 + 1
# A,S',A' for updating the weights
A <- move
Sprime <- NULL
Aprime <- NULL
# The game continues normally
while (T) {
validMoves <- getValidMove(masterBoard, forcedMove, statusBoard)
if (length(validMoves) == 0)
break
# random move for opponent or for player 1 in special case
if(player == 2){
move <- sample(validMoves, size = 1)
move <- move[[1]]
}
else
{
# use epsilon greedy approach
if(runif(1,0,1) < epsilon) # non-greedy
{
move <- sample(allMoves, size = 1)
move <- move[[1]]
}
else {
# find the max action based on values in qEstim
maxValue <- -Inf
maxMove <- -1
for(move in allMoves){
# first find Q(s,a)
valueOfMove <- qHat(list(masterBoard,statusBoard),move,weight)
# find the max
if(valueOfMove > maxValue){
maxValue <- valueOfMove
maxMove <- move
}
}
move <- maxMove
}
# store S' and A'
Aprime <- move
Sprime <- list(masterBoard,statusBoard)
}
# Play the move
masterBoard[move[1], move[2], move[3], move[4]] <- player
forcedMove <- c(move[3], move[4])
# Check for win
if(hasWonBoard(masterBoard[move[1], move[2],,], player))
statusBoard[move[1], move[2]] <- player # Won subBoard
if(hasWonBoard(statusBoard, player)){
winner <- player # Won game
break
}
if(player == 1) # update weights for R = 0
{
qHatPrime <- qHat(Sprime,Aprime,weight)
qHatInit <- qHat(S,A,weight)
grad <- gradientQ(S,A,weight)
weight <- weight + stepsize*(qHatPrime - qHatInit)*(grad) # R is 0 until the end
S <- Sprime
A <- Aprime
}
player <- player %% 2 + 1 # Change player
}
# format states based on winner
if(winner == 1){
S <- Sprime
A <- Aprime
}
# record final reward
R <- reward(winner,playingAs = 1)
# update weights
qHatInit <- qHat(S,A,weight)
grad <- gradientQ(S,A,weight)
weight <- weight + stepsize*(R - qHatInit)*grad
# for win, draw and loss ratios
if (winner == 0)
draws <- draws + 1
else if(winner == 1)
wins <- wins + 1
else
losses <- losses + 1
winRatio[i] <- wins/i
drawRatio[i] <- draws/i
lossRatio[i] <- losses/i
}
return(list(winRatio,drawRatio,lossRatio))
}
# test Sarsa w/ features
initW <- vector("numeric",length=20)
nepis <- 10000
stepsize <- 2^-20
epsi <- 0.4
ratios <- semiGradientSarsa(stepsize,epsi,initW,nepis)
# graph
winRatio <- ratios[[1]]
drawRatio <- ratios[[2]]
lossRatio <- ratios[[3]]
plot(x=1:nepis,winRatio,ylab= "Ratios", xlab = "Episodes", type="l",col="darkgreen",ylim = c(0,1))
lines(drawRatio,col="blue")
lines(lossRatio,col="red")
legend(y=0.6,x=8000,legend=c("Win Ratio","Draw Ratio","Loss Ratio"),
col=c("darkgreen","blue","red"),lty=1,cex = 0.8)
# print the win, loss and draw ration epsi = 0.4, stepsize = 2^-20
cat("The win ratio given a QL policy was ", winRatio[nepis],"\n")
cat("The draw ratio given a QL policy was ", drawRatio[nepis],"\n")
cat("The loss ratio given a QL policy was ", lossRatio[nepis],"\n")
# test Sarsa w/ features - epsi = 0.2
initW <- vector("numeric",length=20)
nepis <- 10000
stepsize <- 2^-20
epsi <- 0.2
ratios <- semiGradientSarsa(stepsize,epsi,initW,nepis)
# graph
winRatio1 <- ratios[[1]]
drawRatio1 <- ratios[[2]]
lossRatio1 <- ratios[[3]]
plot(x=1:nepis,winRatio1,ylab= "Ratios", xlab = "Episodes", type="l",col="darkgreen",ylim = c(0,1))
lines(drawRatio1,col="blue")
lines(lossRatio1,col="red")
legend(y=0.6,x=8000,legend=c("Win Ratio","Draw Ratio","Loss Ratio"),
col=c("darkgreen","blue","red"),lty=1,cex = 0.8)
# print the win, loss and draw ration epsi = 0.2, stepsize = 2^-20
cat("The win ratio given a QL policy was ", winRatio1[nepis],"\n")
cat("The draw ratio given a QL policy was ", drawRatio1[nepis],"\n")
cat("The loss ratio given a QL policy was ", lossRatio1[nepis],"\n")
# test Sarsa w/ features - epsi = 0.1
initW <- vector("numeric",length=20)
nepis <- 10000
stepsize <- 2^-20
epsi <- 0.1
ratios <- semiGradientSarsa(stepsize,epsi,initW,nepis)
# graph
winRatio2 <- ratios[[1]]
drawRatio2 <- ratios[[2]]
lossRatio2 <- ratios[[3]]
plot(x=1:nepis,winRatio2,ylab= "Ratios", xlab = "Episodes", type="l",col="darkgreen",ylim = c(0,1))
lines(drawRatio2,col="blue")
lines(lossRatio2,col="red")
legend(y=0.6,x=8000,legend=c("Win Ratio","Draw Ratio","Loss Ratio"),
col=c("darkgreen","blue","red"),lty=1,cex = 0.8)
# print the win, loss and draw ration epsi = 0.1, stepsize = 2^-20
cat("The win ratio given a QL policy was ", winRatio2[nepis],"\n")
cat("The draw ratio given a QL policy was ", drawRatio2[nepis],"\n")
cat("The loss ratio given a QL policy was ", lossRatio2[nepis],"\n")
# win ratio plots
plot(x=1:nepis,winRatio,ylab= "Win Ratios", xlab = "Episodes", type="l",col="darkgreen",ylim = c(0,1))
lines(winRatio1,col="blue")
lines(winRatio2,col="red")
legend("bottomright",legend=c(expression(paste(epsilon == 0.4)),expression(paste(epsilon == 0.2)),expression(paste(epsilon == 0.1))),
col=c("darkgreen","blue","red"),lty=1,cex = 0.8) |
9466d61548e3d536964a4b715516213df9e850d4 | 7e0f4777f4e06b0ac72b90422ac0d9c765767755 | /projects/josm-trend/josm-trend.R | 6adfa0404c935e57881601e3a8b7cd3a2fe4b676 | [] | no_license | psolymos/abmianalytics | edd6a040082260f85afbf4fc25c4f2726b369392 | 9e801c2c564be155124109b4888d29c80bd1340d | refs/heads/master | 2023-01-30T05:00:32.776882 | 2023-01-21T05:36:23 | 2023-01-21T05:36:23 | 34,713,422 | 0 | 7 | null | 2017-01-20T19:07:59 | 2015-04-28T06:39:37 | R | UTF-8 | R | false | false | 21,376 | r | josm-trend.R | library(parallel)
library(mefa4)
library(RColorBrewer)
ROOT <- "e:/peter/AB_data_v2016/out/birds"
#ROOT <- "~/Dropbox/Public"
level <- 0.9
up <- function() {
source("~/repos/bragging/R/glm_skeleton.R")
source("~/repos/abmianalytics/R/results_functions.R")
source("~/repos/bamanalytics/R/makingsense_functions.R")
# source("~/repos/abmianalytics/R/wrsi_functions.R")
# source("~/repos/abmianalytics/R/results_functions1.R")
# source("~/repos/abmianalytics/R/results_functions2.R")
invisible(NULL)
}
up()
en <- new.env()
load(file.path(ROOT, "data", "data-north.Rdata"), envir=en)
xnn <- en$DAT
modsn <- en$mods
yyn <- en$YY
off <- en$OFF
bb <- en$BB
## names etc
e <- new.env()
load(file.path(ROOT, "data", "data-wrsi.Rdata"), envir=e)
TAX <- droplevels(e$TAX)
TAX$Fn <- droplevels(TAX$English_Name)
levels(TAX$Fn) <- nameAlnum(levels(TAX$Fn), capitalize="mixed", collapse="")
rm(e, en)
## model for species
## subset: good models on spp website
splt <- read.csv("~/repos/abmispecies/_data/birds.csv")
SPP <- c("ALFL", "AMCR", "AMGO", "AMRE", "AMRO", "ATTW", "BAOR", "BARS",
"BAWW", "BBMA", "BBWA", "BBWO", "BCCH", "BHCO", "BHVI", "BLJA",
"BLPW", "BOCH", "BRBL", "BRCR", "BTNW", "CAWA", "CCSP", "CEDW",
"CHSP", "CMWA", "COGR", "CONW", "CORA", "COYE", "DEJU", "DOWO",
"EAKI", "EAPH", "EUST", "EVGR", "FOSP", "GCKI", "GRAJ", "GRCA",
"GRYE", "HAWO", "HETH", "HOWR", "KILL", "LCSP", "LEFL", "LEYE",
"LISP", "MAWA", "MODO", "MOWA", "NOFL", "NOWA", "OCWA", "OSFL",
"OVEN", "PAWA", "PHVI", "PIGR", "PISI", "PIWO", "PUFI", "RBGR",
"RBNU", "RCKI", "RECR", "REVI", "RUBL", "RUGR", "RWBL", "SAVS",
"SOSA", "SOSP", "SPSA", "SWSP", "SWTH", "TEWA", "TOSO", "TRES",
"VATH", "VEER", "VESP", "WAVI", "WBNU", "WCSP", "WETA", "WEWP",
"WISN", "WIWA", "WIWR", "WTSP", "WWCR", "YBFL", "YBSA", "YEWA",
"YRWA")
tax <- droplevels(TAX[SPP, c("Spp","English_Name","Scientific_Name","Family_Sci")])
compare_sets(tax$Spp, as.character(splt$sppid))
compare_sets(tax$Spp, as.character(splt$sppid[splt$veghf.north]))
SPPkeep <- sort(intersect(tax$Spp, as.character(splt$sppid[splt$veghf.north])))
tax <- droplevels(tax[tax$Spp %in% SPPkeep, ])
SPP <- rownames(tax)
## terms and design matrices
nTerms <- getTerms(modsn, "list")
Xnn <- model.matrix(getTerms(modsn, "formula"), xnn)
colnames(Xnn) <- fixNames(colnames(Xnn))
## spp specific output
#spp <- "BTNW"
all_yr <- list()
for (spp in SPP) {
cat(spp, "\n");flush.console()
resn <- loadSPP(file.path(ROOT, "results", "north", paste0("birds_abmi-north_", spp, ".Rdata")))
estYr <- getEst(resn, stage=which(names(modsn)=="Year"), na.out=FALSE, Xnn)
## Boreal year effect estimates
## 0.1* because it is annual and not decadal
apc <- 100 * (exp(0.1*estYr[,"YR"]) - 1)
all_yr[[spp]] <- apc
apcstat <- round(c(fstat(apc), summary(apc)), 3)
png(file.path(ROOT, "josm2", "yr", paste0(spp, ".png")))
d <- density(apc)
hist(apc, col="grey", xlab="% annual population change",
main=tax[spp,"English_Name"], freq=FALSE, border=NA, ylim=c(0, max(d$y)))
lines(d)
rug(apc)
i <- which.min(abs(d$x - apcstat[2]))
lines(c(d$x[c(i,i)]), c(d$y[i], -0), col=2, lwd=2)
i <- which.min(abs(d$x - apcstat[3]))
lines(c(d$x[c(i,i)]), c(d$y[i], -0), col=2, lwd=1)
i <- which.min(abs(d$x - apcstat[4]))
lines(c(d$x[c(i,i)]), c(d$y[i], -0), col=2, lwd=1)
dev.off()
}
save(all_yr, tax, file=file.path(ROOT, "josm2", "josm-yreffects.Rdata"))
## Residual trend estimates
yr_fun <- function(i, subset=NULL, part=c("all", "bbs", "bam"), colD="Dhf") {
part <- match.arg(part)
if (is.null(subset))
subset <- rep(TRUE, nrow(DAT))
dat <- DAT
dat$SUBSET <- subset
dat$D <- dat[[colD]]
dat <- dat[bb[,i],]
dat <- dat[dat$SUBSET,,drop=FALSE]
if (part=="bbs") # BBS only
dat <- dat[dat$isBBS,,drop=FALSE]
if (part=="bam") # non-BBS excluding roadside surveys
dat <- dat[!dat$isBBS & dat$ROAD01==0,,drop=FALSE]
if (part=="all") # non-BBS excluding roadside surveys
dat <- dat[dat$isBBS | (!dat$isBBS & dat$ROAD01==0),,drop=FALSE]
if (nrow(dat) < 1)
return(NA)
dat$logDoff <- log(dat$D) + dat$off
mod <- glm(Y ~ YR, data=dat, offset=dat$logDoff, family=poisson)
out <- 100 * (exp(0.1*coef(mod)[2]) - 1)
#attr(out, "n") <- nrow(dat)
out
}
vals <- expand.grid(part=c("both", "BBS", "offroad", "CL", "both-noCL", "offroad-noCL"),
dens=c("D0", "Dhb", "Dcl", "Dhf"))
rownames(vals) <- paste(vals$part, vals$dens, sep="_")
Bmax <- 100
all_res <- list()
for (spp in SPP) {
cat("\n------------", spp, "------------\n");flush.console()
resn <- loadSPP(file.path(ROOT, "results", "north", paste0("birds_abmi-north_", spp, ".Rdata")))
est0 <- sapply(resn, "[[", "null")
estHb <- getEst(resn, stage=which(names(modsn)=="ARU"), na.out=FALSE, Xnn)
estCl <- getEst(resn, stage=which(names(modsn)=="Space"), na.out=FALSE, Xnn)
estHF <- getEst(resn, stage=which(names(modsn)=="HF"), na.out=FALSE, Xnn)
pr0 <- exp(est0)
col_keep <- colSums(abs(estHb) > 0) != 0
prHb <- exp(sapply(1:nrow(estHb), function(j)
Xnn[,colnames(estHb[,col_keep,drop=FALSE]),drop=FALSE] %*%
estHb[j,col_keep]))
col_keep <- colSums(abs(estCl) > 0) != 0
prCl <- exp(sapply(1:nrow(estCl), function(j)
Xnn[,colnames(estCl[,col_keep,drop=FALSE]),drop=FALSE] %*%
estCl[j,col_keep]))
col_keep <- colSums(abs(estHF) > 0) != 0
prHF <- exp(sapply(1:nrow(estHF), function(j)
Xnn[,colnames(estHF[,col_keep,drop=FALSE]),drop=FALSE] %*%
estHF[j,col_keep]))
DAT <- droplevels(xnn[,c("PKEY","SS","PCODE","YEAR","YR","ROAD01")])
rownames(DAT) <- rownames(xnn)
DAT$Y <- yyn[,spp]
DAT$Y1 <- ifelse(yyn[,spp]>0, 1, 0)
DAT$off <- off[,spp]
DAT$isBBS <- DAT$PCODE == "BBSAB"
with(DAT, table(isBBS, ROAD01))
DAT$D0 <- mean(pr0)
DAT$Dhb <- rowMeans(prHb)
DAT$Dcl <- rowMeans(prCl)
DAT$Dhf <- rowMeans(prHF)
## North only
#DAT$sset <- xnn$NRNAME != "Grassland" & xnn$POINT_Y > 50
cl <- makeCluster(4)
tmp <- clusterExport(cl, c("DAT", "bb"))
res <- list()
for (j in 1:nrow(vals)) {
jj <- rownames(vals)[j]
cat(jj, "\n");flush.console()
PART <- switch(as.character(vals$part[j]),
"both"="all",
"BBS"="bbs",
"offroad"="bam",
"CL"="bam",
"both-noCL"="all",
"offroad-noCL"="bam")
SUBSET <- NULL
if (as.character(vals$part[j]) == "CL")
SUBSET <- DAT$PCODE == "CL"
if (as.character(vals$part[j]) %in% c("both-noCL", "offroad-noCL"))
SUBSET <- DAT$PCODE != "CL"
res[[jj]] <- pbsapply(1:Bmax, yr_fun, cl=cl,
subset=SUBSET, part=PART, colD=as.character(vals$dens[j]))
#res[[j]] <- yr_fun(1, subset=SUBSET, part=PART, colD=as.character(vals$dens[j]))
}
stopCluster(cl)
#vals$est <- unlist(res)
#vals$n <- sapply(res, attr, "n")
#vals
all_res[[spp]] <- res
}
save(all_res, tax, vals, file=file.path(ROOT, "josm2", "josm-reseffects.Rdata"))
## determining associations for species
library(ResourceSelection)
#spp <- "AMRO"
s_fun <- function(spp) {
DAT <- xnn[,c("ROAD01"),drop=FALSE]
rownames(DAT) <- rownames(xnn)
DAT$Y <- yyn[,spp]
DAT$ST <- 0
#DAT <- DAT[bb[,j],]
DAT1 <- DAT[rep(seq_len(nrow(DAT)), DAT$Y),]
DAT1$ST <- 1
DAT <- rbind(DAT1, DAT)
m <- rsf(ST ~ ROAD01, DAT, m=0, B=0)
#exp(coef(m))
#mm <- glm(ST ~ ROAD01, DAT, family="binomial")
#exp(coef(mm))
#fU <- sum(DAT$ROAD01[DAT$ST==1]) / sum(DAT$ST)
#fA <- sum(DAT$ROAD01[DAT$ST==0]) / sum(1-DAT$ST)
#s <- fU / fA
#c(s=s, fU=fU, fA=fA)
unname(coef(m))
}
sroad <- pbsapply(SPP, s_fun)
pdet <- colSums(yyn[,SPP])/nrow(yyn)
## sroad=log(pdet1/pdet0)
ii1 <- xnn$PCODE == "BBSAB"
ii0 <- !ii1 & xnn$ROAD01==0
pdet1 <- colSums(yyn[ii1,SPP])/sum(ii1)
pdet0 <- colSums(yyn[ii0,SPP])/sum(ii0)
save(sroad, pdet, pdet0, pdet1, file=file.path(ROOT, "josm2", "josm-sroad.Rdata"))
## combining all estimates
library(plotrix)
library(mefa4)
setwd("e:/peter/AB_data_v2016/out/birds/josm2")
## year effects
load("josm-yreffects.Rdata")
LEVEL <- 0.9
fstat <- function(x, level=0.95) {
c(Mean=mean(x), Median=median(x),
quantile(x, c((1-level)/2, 1 - (1-level)/2)), SE=sd(x))
}
tyr <- t(sapply(all_yr, fstat, level=LEVEL))
## species list
SPP <- rownames(tax)
tyr <- tyr[SPP,]
load("josm-sroad.Rdata")
tax$sroad <- sroad[rownames(tax)]
tax$pdet <- pdet[rownames(tax)]
TAX <- tax
TAX$AOUcode <- rownames(TAX)
lh <- read.csv("~/Dropbox/bam/lhreg2/bna-life-history-info.csv")
TAX$Hab <- lh$habitat[match(TAX$English_Name, lh$species)]
TAX$Beh <- lh$behavior[match(TAX$English_Name, lh$species)]
TAX$Hab4 <- TAX$Hab3 <- TAX$Hab2 <- TAX$Hab
#levels(x$Hab) <- c("Forest", "Grassland", "Lake/Pond", "Marsh", "Mountains",
# "Open Woodland", "Scrub", "Shore-line", "Town")
levels(TAX$Hab4) <- c("For", "Open", "Wet", "Wet", "Open",
"Wood", "Open", "Wet", "Open")
levels(TAX$Hab3) <- c("For", "Open", "Open", "Open", "Open",
"Wood", "Open", "Open", "Open")
levels(TAX$Hab2) <- c("Closed", "Open", "Open", "Open", "Open",
"Closed", "Open", "Open", "Open")
#levels(TAX$Beh) <- c("Aerial Dive", "Aerial Forager", "Bark Forager",
# "Flycatching", "Foliage Gleaner", "Ground Forager", "Hovering", "Probing")
TAX$Beh3 <- TAX$Beh
levels(TAX$Beh3) <- c("Aerial", "Aerial", "Bark/Foliage",
"Aerial", "Bark/Foliage", "Ground", "Bark/Foliage", "Ground")
mig <- read.csv("~/Dropbox/bam/lhreg2/tblAvianLifeHistory.csv")
TAX$Mig <- droplevels(mig$Migration1[match(TAX$English_Name, mig$English_Name)])
TAX["ATTW","Mig"] <- "Resident"
TAX["WISN","Mig"] <- "Short distance migrant"
levels(TAX$Mig)[levels(TAX$Mig)=="Nomadic"] <- "Short distance migrant"
#write.csv(TAX, row.names=FALSE, file="~/Dropbox/josm/2017/Table-tax.csv")
## official BBS trend in BCR6/AB
tbbs <- read.csv("ron_bbs_t20170330.csv")
compare_sets(SPP, tbbs$SpeciesID)
setdiff(SPP, tbbs$SpeciesID)
bbs_lt <- droplevels(tbbs[tbbs$timeFrame=="Long-term",])
bbs_st <- droplevels(tbbs[tbbs$timeFrame=="Short-term",])
bbs_lt <- bbs_lt[match(SPP, bbs_lt$SpeciesID),c("annualTrend", "lowerLimit", "upperLimit")]
bbs_st <- bbs_st[match(SPP, bbs_st$SpeciesID),c("annualTrend", "lowerLimit", "upperLimit")]
rownames(bbs_lt) <- rownames(bbs_st) <- SPP
tBBS <- data.frame(AOUcode=SPP,LongTerm=bbs_lt, ShortTerm=bbs_st)
#write.csv(tBBS, row.names=FALSE, file="~/Dropbox/josm/2017/Table-bbs.csv")
#write.csv(data.frame(AOUcode=rownames(tyr), tyr), row.names=FALSE,
# file="~/Dropbox/josm/2017/Table-yr.csv")
## residual trend
load("josm-reseffects.Rdata")
all_res2 <- list()
for (i in rownames(vals))
all_res2[[i]] <- t(sapply(all_res, function(z) fstat(z[[i]], level=LEVEL)))
D0 <- sapply(all_res2[1:6], function(z) z[,2])
Dhb <- sapply(all_res2[7:12], function(z) z[,2])
Dcl <- sapply(all_res2[13:18], function(z) z[,2])
Dhf <- sapply(all_res2[19:24], function(z) z[,2])
D0[D0 > 100] <- 100
Dhb[Dhb > 100] <- 100
Dcl[Dcl > 100] <- 100
Dhf[Dhf > 100] <- 100
colnames(D0) <- colnames(Dhb) <- colnames(Dcl) <- colnames(Dhf) <- levels(vals$part)
lDhf1 <- do.call(cbind, lapply(all_res2[c(19:21)], function(z) z[,2:4]))
colnames(lDhf1) <- paste(rep(names(all_res2)[c(19:21)], each=3),
c("Median", "CL5", "CL95"), sep="_")
lDhf2 <- do.call(cbind, lapply(all_res2[22:24], function(z) z[,2:4]))
colnames(lDhf2) <- paste(rep(names(all_res2)[c(22:24)], each=3),
c("Median", "CL5", "CL95"), sep="_")
lDhf1 <- data.frame(AOUcode=rownames(lDhf1), lDhf1)
lDhf2 <- data.frame(AOUcode=rownames(lDhf2), lDhf2)
#write.csv(lDhf1, row.names=FALSE, file="~/Dropbox/josm/2017/Table-res1.csv")
#write.csv(lDhf2, row.names=FALSE, file="~/Dropbox/josm/2017/Table-res2.csv")
## changing averages across the board
op <- par(mfrow=c(2,2), las=1, mar=c(5,8,2,2))
boxplot(D0[,6:1], horizontal=TRUE, col="#ABD9E9", main="D0", xlab="% annual change")
abline(v=0, col="#D7191C", lwd=2)
boxplot(Dhb[,6:1], horizontal=TRUE, col="#ABD9E9", main="Dhb", xlab="% annual change")
abline(v=0, col="#D7191C", lwd=2)
boxplot(Dcl[,6:1], horizontal=TRUE, col="#ABD9E9", main="Dcl", xlab="% annual change")
abline(v=0, col="#D7191C", lwd=2)
boxplot(Dhf[,6:1], horizontal=TRUE, col="#ABD9E9", main="Dhf", xlab="% annual change")
abline(v=0, col="#D7191C", lwd=2)
par(op)
## D0-Dhf effects
WHICH <- "both"
tmp <- cbind(Null=D0[,WHICH], Habitat=Dhb[,WHICH],
Climate=Dcl[,WHICH], SurroundingHF=Dhf[,WHICH])
ladderplot(tmp, pch=NA, col="#2C7BB6", ylab="% annual change")
abline(h=0, col="#D7191C", lwd=2)
## year effect vs BBS vs residual
rn <- intersect(rownames(Dhf), tbbs$SpeciesID)
tmp <- cbind(bbs_lt[rn,1], bbs_st[rn,1],
tyr[rn,"Median"], Dhf[rn,c("both","BBS","offroad")])
colnames(tmp) <- c("BBS Long", "BBS Short", "Joint Est.", "Resid. All", "Resid. Roadside", "Resid. Off-road")
ladderplot(tmp, pch=NA, col="#2C7BB6", ylab="% annual change")
abline(h=0, col="#D7191C", lwd=2)
e <- new.env()
load("e:/peter/AB_data_v2016/out/3x7/veg-hf_3x7_fix-fire_fix-age0.Rdata", envir=e)
slt <- read.csv("~/repos/abmianalytics/lookup/sitemetadata.csv")
nrn <- as.character(slt$SITE_ID[slt$NATURAL_REGIONS %in% c("Boreal", "Parkland", "Foothills")])
hfn <- c("RoadHardSurface", "RoadTrailVegetated", "RoadVegetatedVerge")
allhf <- c("BorrowpitsDugoutsSumps",
"Canals", "CultivationCropPastureBareground", "HighDensityLivestockOperation",
"IndustrialSiteRural", "MineSite", "MunicipalWaterSewage", "OtherDisturbedVegetation",
"PeatMine", "Pipeline", "RailHardSurface", "RailVegetatedVerge",
"Reservoirs", "RoadHardSurface", "RoadTrailVegetated", "RoadVegetatedVerge",
"RuralResidentialIndustrial", "SeismicLine", "TransmissionLine",
"Urban", "WellSite", "WindGenerationFacility", "CCDecid0", "CCDecidR",
"CCDecid1", "CCDecid2", "CCDecid3", "CCDecid4", "CCMixwood0",
"CCMixwoodR", "CCMixwood1", "CCMixwood2", "CCMixwood3", "CCMixwood4",
"CCConif0", "CCConifR", "CCConif1", "CCConif2", "CCConif3", "CCConif4",
"CCPine0", "CCPineR", "CCPine1", "CCPine2", "CCPine3", "CCPine4")
rd1999 <- sum(e$yearly_vhf[["1999"]][["veg_current"]][nrn, hfn])
rd2014 <- sum(e$yearly_vhf[["2014"]][["veg_current"]][nrn, hfn])
hf1999 <- sum(e$yearly_vhf[["1999"]][["veg_current"]][nrn, allhf])
hf2014 <- sum(e$yearly_vhf[["2014"]][["veg_current"]][nrn, allhf])
all1999 <- sum(e$yearly_vhf[["1999"]][["veg_current"]][nrn, ])
all2014 <- sum(e$yearly_vhf[["2014"]][["veg_current"]][nrn, ])
nrd1999 <- all1999-rd1999
nrd2014 <- all2014-rd2014
## annual rate of change
(rd2014/rd1999)^(1/(2014-1999)) # 1.008023
(nrd2014/nrd1999)^(1/(2014-1999)) # 0.9999121
(hf2014/hf1999)^(1/(2014-1999)) # 1.01053
Delta_1p <- (rd2014/rd1999)^(1/(2014-1999))
Delta_0p <- (nrd2014/nrd1999)^(1/(2014-1999))
Delta_p <- Delta_1p / Delta_0p
tmp2 <- Dhf[rn,c("both","BBS","offroad")]
d <- (1+tmp2[,"BBS"]/100) / (1+tmp2[,"offroad"]/100)
fstat(d)
Delta_p
dc <- d / Delta_p
plot(d, sroad[names(d)])
cor.test(d, sroad[names(d)]) # no correlation
boxplot(d ~ TAX[names(d),"Hab4"])
boxplot(d ~ TAX[names(d),"Beh3"])
boxplot(d ~ TAX[names(d),"Mig"])
par(mfrow=c(3,3))
z <- tmp[,2]
boxplot(z ~ TAX[names(d),"Hab4"], main="BBS")
boxplot(z ~ TAX[names(d),"Beh3"])
boxplot(z ~ TAX[names(d),"Mig"])
z <- tmp[,5]
boxplot(z ~ TAX[names(d),"Hab4"], main="rdside")
boxplot(z ~ TAX[names(d),"Beh3"])
boxplot(z ~ TAX[names(d),"Mig"])
z <- tmp[,6]
boxplot(z ~ TAX[names(d),"Hab4"], main="offroad")
boxplot(z ~ TAX[names(d),"Beh3"])
boxplot(z ~ TAX[names(d),"Mig"])
## subsets make a difference, that is consistent across how residual is defined
## residual definition impacts mostly extreme estimates
## CL is influential, correlates best with off-road data
## with and without CL (Dhf), explain correlations
## lack of pattern re road associations
## HF calculations: no huge change in road strata to drive changes
## emphasize that BBS and bbs is correlated (but bbs < BBS)
## strata specific trend: math indicates that
## but is off-road trend reliable given temporal sparsity?
## based on CL vs. offroad there is big scatter but agreement on average
## reasons why we see strata specific trend?
## - geographic shift relative to road network (climate change)
## - habitat related rearrangements: suboptimal habitat density declining more
## but why is offroad trend positive??? -- not very reliable (CL average is neutral)
## is it a data artifact or is it real?
## - Hard to decide, but there is strong relationship with pdet
## indicating that data is driving the extreme trends
## or that rare species decline more: no because of funnel shape
## can it be disturbance other than roads? not much bigger changes there either
## correcting for pdet indicates that roadside trend might be -5%
## offroad trend might be around 0%, definitely pointing towards concentration
## in better habitats, need to come up with ways of testing it
## e.g. annual variation in good/bad habitats, and trends over time in these
## look at lh traits
## yr effect and 'both' trend is very similar, joint estimates are more extreme
## also explain: drawing conclusion from 5-10% of the population (assuming even distr)
## we are missing the big part
plot(BBS ~ BBS_st, tmp2) # bbs is smaller than BBS
abline(0,1,lty=2)
abline(lm(BBS ~ BBS_st, data.frame(tmp2)))
plot(CL ~ offroad, tmp2) # 1:1 but huge scatter
abline(0,1,lty=2)
abline(lm(CL ~ offroad, data.frame(tmp2)))
#plot(abs(tmp[,"offroad-noCL"])~pdet[rownames(tmp)]) # strong pattern
plot(tmp[,"offroad-noCL"]~pdet[rownames(tmp)]) # strong pattern
abline(h=0, lty=2)
abline(lm(tmp[,"offroad-noCL"]~pdet[rownames(tmp)]))
plot(tmp[,"BBS"]~pdet[rownames(tmp)]) # strong pattern
abline(h=0, lty=2)
abline(lm(tmp[,"BBS"]~pdet[rownames(tmp)]))
plot(tmp2[,"both"] ~ tyr[rownames(tmp2),1])
abline(0,1, lty=2)
abline(lm(tmp2[,"both"] ~ tyr[rownames(tmp2),1]))
plot(tyr[,"SE"] ~ TAX[rownames(tyr),"pdet"])
mm <- lm(log(tyr[,"SE"]) ~ log(TAX[rownames(tyr),"pdet"]))
f <- function(x) exp(coef(mm)[1]+log(x)*coef(mm)[2])
curve(f, add=TRUE)
## uncertainty
library(intrval)
ci1 <- data.frame(all_res2[["BBS_Dhf"]])
ci1$sign <- sign(ci1[,2])
ci1$sign[0 %[]% ci1[,3:4]] <- 0
ci0 <- data.frame(all_res2[["offroad_Dhf"]])[rownames(ci1),]
ci0$sign <- sign(ci0[,2])
ci0$sign[0 %[]% ci0[,3:4]] <- 0
addmargins(table(ci0=ci0$sign, ci1=ci1$sign))
ftable(gr=TAX[rownames(ci1), "Mig"], ci0=ci0$sign, ci1=ci1$sign)
ftable(gr=TAX[rownames(ci1), "pdet"] >= 0.05, ci0=ci0$sign, ci1=ci1$sign)
fftable(gr=TAX[rownames(ci1), "Hab4"], ci0=ci0$sign, ci1=ci1$sign), ci0=ci0$sign, ci1=ci1$sign)
rownames(ci0)[ci0$sign < 0 & ci1$sign < 0]
rownames(ci0)[ci0$sign > 0 & ci1$sign < 0]
plot(ci1$SE~ci0$SE)
abline(0, 1)
plot(TAX[rownames(ci1),"sroad"], ci1$SE/ci0$SE, col="#2C7BB6", cex=1.5,
ylab="SE(roadside) / SE(offroad)", xlab="Road Association")
abline(h=1, col="#D7191C")
mm <- lm(log(ci1$SE/ci0$SE) ~ TAX[rownames(ci1),"sroad"])
f <- function(x) exp(coef(mm)[1]+x*coef(mm)[2])
curve(f, add=TRUE, col="#2C7BB6")
plot(tyr[,"SE"] ~ TAX[rownames(tyr),"pdet"], col="#2C7BB6", cex=1.5,
ylab="SE(joint model)", xlab="Detection Rate")
mm <- lm(log(tyr[,"SE"]) ~ log(TAX[rownames(tyr),"pdet"]))
f <- function(x) exp(coef(mm)[1]+log(x)*coef(mm)[2])
curve(f, add=TRUE, col="#2C7BB6")
Pairs <- function (x, ...)
{
#y <- data.frame(x)
y <- x
fun.lower <- function(x1, x2, ...) {
COR <- cor(x1, x2)
text(mean(range(x1, na.rm = TRUE)), mean(range(x2, na.rm = TRUE)),
round(COR, 2), cex = 0.5 + 2*abs(COR))
box(col="grey")
}
fun.upper <- function(x1, x2, ...) {
if (is.factor(x1)) {
x1 <- as.integer(x1)
}
if (is.factor(x2)) {
x1 <- as.integer(x2)
}
abline(h=0, v=0, lty=1, col="grey")
points(x1, x2, col = "#2C7BB6")
LM <- lm(x2 ~ x1)
abline(LM, col="#D7191C")
box(col="#80B9D8")
}
panel.hist <- function(x, ...) {
usr <- par("usr")
on.exit(par(usr))
par(usr = c(usr[1:2], 0, 1.5))
h <- hist(x, plot = FALSE)
breaks <- h$breaks
nB <- length(breaks)
y <- h$density
Max <- max(y)
y <- y/Max
rect(breaks[-nB], 0, breaks[-1], y, col = "#FDC980", border = "#F07C4A",
...)
abline(v=0, lty=1, col="grey")
den <- density(x)
den$y <- den$y/Max
lines(den, col="#F07C4A")
box(col="#F07C4A")
}
pairs.default(y, lower.panel = fun.lower, upper.panel = fun.upper,
diag.panel = panel.hist)
invisible(NULL)
}
Pairs(tmp)
par(mfrow=c(3,3))
z <- tmp[,1]
boxplot(z ~ TAX[names(d),"Hab4"], col="#FDC980", ylim=c(-15,15), main="BBS Long Term")
abline(h=0, col="#D7191C")
boxplot(z ~ TAX[names(d),"Beh3"], col="#FDC980", ylim=c(-15,15))
abline(h=0, col="#D7191C")
boxplot(z ~ TAX[names(d),"Mig"], col="#FDC980", ylim=c(-15,15))
abline(h=0, col="#D7191C")
z <- tmp[,5]
boxplot(z ~ TAX[names(d),"Hab4"], col="#FDC980", ylim=c(-15,15), main="Residual Roadside")
abline(h=0, col="#D7191C")
boxplot(z ~ TAX[names(d),"Beh3"], col="#FDC980", ylim=c(-15,15))
abline(h=0, col="#D7191C")
boxplot(z ~ TAX[names(d),"Mig"], col="#FDC980", ylim=c(-15,15))
abline(h=0, col="#D7191C")
z <- tmp[,6]
boxplot(z ~ TAX[names(d),"Hab4"], col="#FDC980", ylim=c(-15,15), main="Off-road")
abline(h=0, col="#D7191C")
boxplot(z ~ TAX[names(d),"Beh3"], col="#FDC980", ylim=c(-15,15))
abline(h=0, col="#D7191C")
boxplot(z ~ TAX[names(d),"Mig"], col="#FDC980", ylim=c(-15,15))
abline(h=0, col="#D7191C")
|
39e66f3488385164799e87b01375a601ec408a9f | 601e1baf3d4bae3a102a224cab1a0a8e6ce3af9b | /R/calculateGrm.R | 8775ce17582d8246730fbcea131d9e3710d86d5a | [] | no_license | jellily/PCAseq | c9ebc21eec16f71b1eaf078831bec15d452f1cde | 908f98cf984de67ea06aab67db3ebef3d0be0935 | refs/heads/master | 2020-12-25T17:03:30.994503 | 2018-02-04T18:29:19 | 2018-02-04T18:29:19 | 29,374,307 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 4,922 | r | calculateGrm.R | # Functions to calculate the GRMs
# runGRM -----------------------------------------------------------------------
# Open the GDS file and check the orientation of the genotype data; call the
# appropriate GRM method
runGRM <- function(gdsobj, weights, sampleId, snpId, autosomeOnly,
removeMonosnp, maf, missingRate) {
# Open the file
genoDat <- snpgdsOpen(gdsobj)
# If no sample ids are given, take all of them
# otherwise check that the sample.id vector
# has no duplicates and contains sample ids in the
# file sample.id vector
samples <- read.gdsn(index.gdsn(genoDat, "sample.id"))
if (is.null(sampleId)) {
sampleId <- samples
} else {
checkSamp(sampleId, samples)
}
# if no SNP IDs are given, take all of them
# (note SNP filtering happens at a later time point)
snps <- read.gdsn(index.gdsn(genoDat, "snp.id"))
if (is.null(snpId)) {
snpId <- snps
} else {
checkSnp(snpId, snps)
}
# Determine the orientation of the file -- the code to calculate
# the GRM is written for SNP x SAMPLE data, if the data is not
# in that order, it is transposed as it is read in
transpose <- identical(names(get.attr.gdsn(index.gdsn(genoDat, "genotype"))),
"sample.order")
# call the appropraite function based on the method
grm <- grmCalc(genoDat, weights, sampleId, snpId, autosomeOnly,
removeMonosnp, maf, missingRate, transpose)
# Close the file
snpgdsClose(genoDat)
return(list(grm[[1]], sampleId, grm[[2]]))
}
# grmCalc ---------------------------------------------------------------------
# Calculate the GRM
grmCalc <- function(genoDat, weights, sampleId, snpId, autosomeOnly,
removeMonosnp, maf, missingRate, transpose){
# constants
nBlocks <- 5000
byRows <- 1
nCopies <- 2
nSubj <- length(sampleId)
alpha <- weights[1]
beta <- weights[2]
# get the index of the subjects
subj <- read.gdsn(index.gdsn(genoDat, "sample.id"))
subj <- which(subj %in% sampleId)
snps <- read.gdsn(index.gdsn(genoDat, "snp.id")) # the letter SNP codes
keepSnps <- c()
nSnps <- length(snps)
maxBlocks <- ceiling(nSnps / nBlocks) # maximum number of blocks to loop over
# create empty grm & vector to count the number of snps used
grm <- matrix(0, nrow = nSubj, ncol = nSubj)
# Loop through the SNPs in blocks of size nblock
for(i in 1:maxBlocks) {
message(paste("Computing GRM: Block", i, "of", maxBlocks))
# Get the SNPs to choose
first <- 1 + (i - 1) * nBlocks
count <- nBlocks
if((nBlocks + first - 1) > nSnps){
count <- nSnps - first + 1
}
# Transpose data into SNPs x Samples
if ( isTRUE(transpose) ) {
snpDat <- read.gdsn(index.gdsn(genoDat, "genotype"), start = c(1, first), count = c(-1, count))
snpDat <- t(snpDat)
} else {
snpDat <- read.gdsn(index.gdsn(genoDat, "genotype"), start = c(first,1), count = c(count,-1))
}
# Read in the chromosome ids: might be faster to move this into the autosome only function,
# so that it's only done when it needs to be done...
snpChrom <- read.gdsn(index.gdsn(genoDat, "snp.chromosome"), start = first, count = count)
# Filter the data
# by subject
snpDat <- snpDat[ , subj] # subset by subject ID
# by SNP
snpIndex <- filterSnps(snpDat, autosomeOnly, removeMonosnp,
missingRate, maf, snpChrom)
snpIndex <- snpIndex & snps[first:(first + count - 1)] %in% snpId
snpDat <- snpDat[snpIndex, ] # subset by SNP ID
keepSnps <- c(keepSnps, snps[which(snpIndex) + (i-1) * nBlocks])
## check to make sure there are still SNPs in the data set
if ( !(identical(class(snpDat), "matrix")) || (dim(snpDat)[1] == 0) ) {
message("No data remains in this block after filtering. Going to next
block.")
next
} else {
alleleFreq <- (1 / nCopies) * rowMeans(snpDat, na.rm = TRUE)
weights <- betaWeights(alleleFreq, alpha, beta)
# Estimate the variance at each SNP
genoCent <- sweep(snpDat, byRows, STATS = nCopies * alleleFreq, check.margin = FALSE)
# Find the empirical correlation matrix
zee <- sweep(genoCent, byRows, STATS = weights, FUN = "*", check.margin = FALSE)
grm <- grm + crossprod(zee)
}
}
if (identical(grm, matrix(0, nrow = nSubj, ncol = nSubj))) {
stop("GRM is the zero matrix. Perhaps all of the SNPs were removed when
filtering or there is no variability in the genotype data.")
} else {
return(list(grm, keepSnps))
}
}
# betaWeights ------------------------------------------------------------------
# function to find the weights for each SNP by MAF
betaWeights <- function(alleleFreq, alpha, beta) {
# find the minor allele frequency
return(dbeta(calcMaf(alleleFreq), alpha, beta))
}
|
2a09de29d7fac1525f218b61ba9af48fdc18b139 | 0c01d291ffc39a0bc5b66c32630141c164e7bc3b | /server.R | 4c3a34e077310b1c3c5f4f8399e1d468241c3d08 | [] | no_license | gringer/conference-bingo | 94bc59caeea64ae772a133d073711297d0182bf7 | d47efe205c8f66d98d17da6a6b9bf8bc6da6fcfb | refs/heads/master | 2021-01-20T06:31:27.647284 | 2017-05-01T10:57:42 | 2017-05-01T10:57:42 | 89,889,588 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,825 | r | server.R | #
# This is the server logic of a Shiny web application. You can run the
# application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(digest)
# Define server logic required to draw a histogram
shinyServer(function(input, output) {
gridData <- reactiveValues();
gridData$Labels <- NULL;
gridData$Dims <- NULL;
drawGrid <- function(){
gridDims <- gridData$Dims;
gridLabels <- gridData$Labels;
par(mar=c(4,4,4,4));
plot(NA, xlim=c(0,1), ylim=c(0,1), axes=FALSE,
main=sprintf("%s Conference Bingo\n[Word list hash: %s]",
input$cName, gridData$Digest),
xlab="", ylab="", cex.main=2);
rect(0, 0, 1, 1, lwd=3);
for(xp in (1:gridDims[1]-1)){
for(yp in (1:gridDims[2]- 1)){
rect(xp/gridDims[1], yp/gridDims[2],
(xp+1)/gridDims[1], (yp+1)/gridDims[2]);
gridText <- gridLabels[yp*gridDims[2]+xp+1];
text((xp+0.5)/gridDims[1], (yp+0.5)/gridDims[2],
gridText, cex = min(3,max(0.5,(15/gridDims[1])/sqrt(nchar(gridText)))));
}
}
mtext(1, 1, cex=0.71,
text = "http://bingo.gringene.org [source: https://github.com/gringer/conference-bingo]");
box(which="figure");
}
output$bingoGrid <- renderPlot({
wordList <- sort(unlist(strsplit(input$wordList,"\n")));
## make labels
gridDims <- as.numeric(unlist(strsplit(input$style,"x")));
gridLabels <- sample(wordList, prod(gridDims), replace = length(wordList) < prod(gridDims));
gridLabels[ceiling(length(gridLabels)/2)] <- paste0("FREE\n",input$freeWord);
gridData$Dims <- gridDims;
gridData$Labels <- gridLabels;
gridData$Digest <- substring(digest(c(input$freeWord, wordList), "sha512"),1,12);
## draw grid
drawGrid();
}, width=600, height=600)
output$bingoBoard.pdf <- downloadHandler(
filename = function(){
fName <- sprintf("bingo_%s_%s_%s.pdf",input$style,
gsub(" ","-",input$cName),
format(Sys.Date(),"%Y-%b-%d"));
cat("Writing to file: ",fName, "\n");
return(fName);
},
content = function(con){
pdf(con, width=8, height=8);
drawGrid();
dev.off();
},
contentType = "text/pdf"
);
output$bingoBoard.png <- downloadHandler(
filename = function(){
fName <- sprintf("bingo_%s_%s_%s.png",input$style,
gsub(" ","-",input$cName),
format(Sys.Date(),"%Y-%b-%d"));
cat("Writing to file: ",fName, "\n");
return(fName);
},
content = function(con){
png(con, width=1024, height=1024, pointsize = 18);
drawGrid();
dev.off();
},
contentType = "image/png"
);
})
|
476c2e68c4476d917097ceb9257591b59be0a3f3 | a5623ca69f24f073f2c43469081c4bf3bbaa2825 | /man/redis.connect.Rd | 2d157e2fdc2ca7e16e3450e79822fee4e95db161 | [] | no_license | att-bdpaas/rediscc | a4b8e6183505d63145bca5b2dcc480d329ce1b92 | f43420964ea326e2bc35a24362273ec1437b38d9 | refs/heads/master | 2021-01-20T16:48:11.815902 | 2014-09-14T03:06:05 | 2014-09-14T03:06:05 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,270 | rd | redis.connect.Rd | \name{redis.connect}
\alias{redis.connect}
\alias{redis.close}
\title{
Manage connections to a Redis database
}
\description{
\code{redis.connect} creates a new connection to a Redis database and
returns the handle.
\code{redis.close} closes a Redis database connection.
}
\usage{
redis.connect(host = "localhost", port = 6379L, timeout = 30,
reconnect = FALSE, retry = FALSE, db = getOption("redis.default.db", 0L))
redis.close(rc)
}
\arguments{
\item{host}{name of the host to connect to or a path to the socket (if
\code{port} is 0)}
\item{port}{numeric, TCP port to connectt to or 0 is a local (unix)
socket is to be used instead (not supported on Windows, obvously).}
\item{timeout}{numeric, timeout in seconds for requests (reals are
supported for sub-second accuracy)}
\item{reconnect}{logical, if \code{TRUE} then commands used on this
connection will attempt to re-connect in case the connection is
closed unexpectedly (e.g., due to a previous error).}
\item{retry}{logical, if \code{TRUE} then commands will attempt to
retry once on connection failure by closing the connection,
re-connecting and re-trying. Only meaningful in conjunction with
\code{reconnect=TRUE}.}
\item{db}{integer, index of the database (keyspace) to use. The index
\code{0} is the default and any other index will result in a
\code{SELECT} command to be sent upon connection to select the
desired database.}
\item{rc}{Redis connection handle (as returned by \code{redis.connect})}
}
%\details{
%}
\value{
\code{redis.connect}: an opaque handle to use for subsequent
operations on the connection (object of the class \code{redisConnection})
\code{redis.close}: \code{NULL} (invisibly)
}
%\references{
%}
\author{
Simon Urbanek
}
%\note{
%}
%\seealso{
%}
\examples{
## try connecting -
c <- tryCatch(redis.connect(),
error = function(e) {
cat("Cannot connect",e$message, " - please start Redis\n")
NULL
})
if (!is.null(c)) { ## go ahead only if Redis is up and we got a connection
print(redis.get(c, "foo"))
print(redis.set(c, "foo", "bar"))
print(redis.get(c, "foo"))
redis.rm(c, "foo")
redis.close(c)
}
}
\keyword{database}
|
8cdf3a07b8744e107dc60cbc886621d116fee405 | 146d7de2db9f1bffeaa24e7f01200c38ea2e9d75 | /man/center.buoyancy.Rd | fa0a9ce80eca5d4966d89bd64ab764ef640425f5 | [] | no_license | lawinslow/rLakeAnalyzer | 13e9b11da4b44042fea0dd88210d7e47a9557c5c | 69749f58a665358dac8263a2d08d92f9100f1ff3 | refs/heads/master | 2021-01-21T03:13:08.229100 | 2018-09-13T19:49:51 | 2018-09-13T19:49:51 | 25,272,757 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,223 | rd | center.buoyancy.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/center.buoyancy.R
\name{center.buoyancy}
\alias{center.buoyancy}
\title{Calculates the center of buoyancy.}
\usage{
center.buoyancy(wtr, depths)
}
\arguments{
\item{wtr}{a numeric vector of water temperature in degrees C}
\item{depths}{a numeric vector corresponding to the depths (in m) of the wtr
measurements}
}
\value{
Returns a value for the center of buoyancy.
}
\description{
Calculate the center of buoyancy using buoyancy frequency with a center of
mass analysis. Brunt-Vaisala frequency is used for a temperature profile.
Negative values for N2 are set to 0 (as they represent transient
instabilities or sensor calibration issues) for this calculation.
}
\examples{
# A vector of water temperatures
wtr = c(22.51, 22.42, 22.4, 22.4, 22.4, 22.36, 22.3, 22.21, 22.11, 21.23, 16.42,
15.15, 14.24, 13.35, 10.94, 10.43, 10.36, 9.94, 9.45, 9.1, 8.91, 8.58, 8.43)
#A vector defining the depths
depths = c(0, 0.5, 1, 1.5, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
17, 18, 19, 20)
c.b = center.buoyancy(wtr, depths)
}
\seealso{
\code{buoyancy.freq}, \code{ts.buoyancy.freq},
\code{center.buoyancy}
}
\keyword{arith}
|
72dea7e460186c1bea45d12f5b011ea9750c6c13 | b7f0d300ee724bf170f08394257391ea12703945 | /c4exda-proj-2/plot2.R | edbb13c7eb72784f95ddc1a88e24d01f52242f81 | [] | no_license | TheYuanLiao/datasciencecoursera | c36a62d1f4af6e4cad2d9188cb9814d0d8ed6bd1 | e227991e3ab6169f01476557a8987d1e840b5d64 | refs/heads/master | 2023-01-23T19:02:49.647831 | 2020-10-24T15:31:28 | 2020-10-24T15:31:28 | 281,198,019 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 533 | r | plot2.R | library(data.table)
library(dplyr)
# Load data
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
# Calculate the total by fips x year
total_fips <-
NEI %>% group_by(fips, year) %>%
summarize(total = sum(Emissions)) %>%
filter(fips == "24510")
# Plot and save to .png
png("plot2.png", width = 480, height = 480)
with(total_fips, plot(year, total, size = 4,
xlab="Year",
ylab="Total emissions in the Baltimore City, Maryland (ton)"))
dev.off() |
8f2e3a0c90f2c4e373d91a6a278eb4d7c8d0cb05 | a4ee306ac560fc2f92d3e45ff47a63aa49eae085 | /scripts/graph1.R | f5c41c2ca648aacde1b04a27bedac93dac36aa8d | [
"MIT"
] | permissive | albertklam/final-project-INFO201 | 917cc404062568eec9aa9fe1a30f143f3e69c405 | 925a364df4164a75a52c59644d8cc0b1f9ba0125 | refs/heads/master | 2022-10-27T02:35:08.984885 | 2020-06-16T21:25:20 | 2020-06-16T21:25:20 | 272,814,133 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,657 | r | graph1.R | # Graph 1 Tab Panel for Shiny.io
graph_1 <- tabPanel("First Graph",
mainPanel(h1("Graph 1 Title")))
library(dplyr)
library(plotly)
#load data
co2_data <- read.csv("data/global.1751_2014.csv", stringsAsFactors = FALSE)
# Returns a stacked bar chart for the co2_data, given specified
# years
build_stacked_bar <- function(year1, year2) {
co2_data_filtered <- co2_data %>%
filter(!is.na(Carbon.emissions.from.gas.flaring)) %>%
filter(Year >= year1) %>%
filter(Year <= year2) %>%
select(
-Per.capita.carbon.emissions..metric.tons.of.carbon..after.1949.only.)
stacked_co2_chart <- plot_ly(
data = co2_data_filtered,
x = ~Year,
y = ~Carbon.emissions.from.gas.fuel.consumption,
type = "bar",
name = "Gas Fuel Consumption"
)
stacked_co2_chart <- stacked_co2_chart %>%
add_trace(y = ~Carbon.emissions.from.liquid.fuel.consumption,
name = "Liquid Fuel Consumption")
stacked_co2_chart <- stacked_co2_chart %>%
add_trace(y = ~Carbon.emissions.from.solid.fuel.consumption,
name = "Solid Fuel Consumption")
stacked_co2_chart <- stacked_co2_chart %>%
add_trace(y = ~Carbon.emissions.from.cement.production,
name = "Cement Production")
stacked_co2_chart <- stacked_co2_chart %>%
add_trace(y = ~Carbon.emissions.from.gas.flaring,
name = "Gas Flaring")
stacked_co2_chart <- stacked_co2_chart %>%
layout(title = "Global CO2 Emissions from Fossil Fuels through the Years",
yaxis = list(title = "Carbon Emissions (million metric tons of C)"),
barmode = "stack")
return(stacked_co2_chart)
} |
6233a6a7e72d35c67a8942c252b2ee4016e00f05 | 4bcb27b3b8f6d8f7fb2de20e08943e05d6aeaf84 | /Waterquality_timeseries.r | 99160b4fd3107b989c4f1711fec2bce113cf8f80 | [] | no_license | shoumikgoswami/IIM-A-Capstone | 161bb54968d784d3ce7e75336781ac4b8193eb85 | 7bab12ddc90d3c0f6f7673f149dde8d4f6810686 | refs/heads/master | 2020-04-03T03:08:41.330972 | 2018-10-27T15:10:23 | 2018-10-27T15:10:23 | 154,977,458 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 22,300 | r | Waterquality_timeseries.r |
library(forecast)
library(fpp)
library(TTR)
library(ggplot2)
data <- read.csv("waterqualitydata.csv")
row.has.na <- apply(data, 1, function(x){any(is.na(x))})
sum(row.has.na)
final_data <- data[!row.has.na,]
head(final_data)
final_data$mean_temp <- as.numeric(as.double(final_data$mean_temp))
final_data$mean_ph <- as.numeric(as.double(final_data$mean_ph))
final_data$mean_conductivity <- as.numeric(as.double(final_data$mean_conductivity))
final_data$mean_bod <- as.numeric(as.double(final_data$mean_bod))
final_data$mean_nitrate <- as.numeric(as.double(final_data$mean_nitrate))
final_data$mean_fecalcoliform <- as.numeric(as.double(final_data$mean_fecalcoliform))
final_data$WQI <- as.numeric(as.integer(final_data$WQI))
data.andhra <- final_data[final_data$STATE %in% "ANDHRA PRADESH",]
data.assam <- final_data[final_data$STATE %in% "ASSAM",]
data.bihar <- final_data[final_data$STATE %in% "BIHAR",]
data.chhattisgarh <- final_data[final_data$STATE %in% "CHHATTISGARH",]
data.dadra <- final_data[final_data$STATE %in% "DADRA NAGAR HAVELI",]
data.daman <- final_data[final_data$STATE %in% "DAMAN & DIU",]
data.goa <- final_data[final_data$STATE %in% "GOA",]
data.himachal <- final_data[final_data$STATE %in% "HIMACHAL PRADESH",]
data.kerala <- final_data[final_data$STATE %in% "KERALA",]
data.lakshadweep <- final_data[final_data$STATE %in% "LAKSHADWEEP",]
data.madhyapradesh <- final_data[final_data$STATE %in% "MADHYA PRADESH",]
data.maharashtra <- final_data[final_data$STATE %in% "MAHARASHTRA",]
data.mizoram <- final_data[final_data$STATE %in% "MIZORAM",]
data.odisha <- final_data[final_data$STATE %in% "ODISHA",]
data.pondi <- final_data[final_data$STATE %in% "PONDICHERRY",]
data.punjab <- final_data[final_data$STATE %in% "PUNJAB",]
data.rajasthan <- final_data[final_data$STATE %in% "RAJASTHAN",]
data.tripura <- final_data[final_data$STATE %in% "TRIPURA",]
data.uttarpradesh <- final_data[final_data$STATE %in% "UTTAR PRADESH",]
data.westbengal <- final_data[final_data$STATE %in% "WEST BENGAL",]
ap_temp <- data.andhra$mean_temp
ap_ph <- data.andhra$mean_ph
ap_conductivity <- data.andhra$mean_conductivity
ap_bod <- data.andhra$mean_bod
ap_nitrate <- data.andhra$mean_nitratenitrite
ap_fecal <- data.andhra$mean_fecalcoliform
ap_temp_ts <-ts(ap_temp, start=c(2006, 1), end=c(2014, 1), frequency=1)
ap_ph_ts <-ts(ap_ph, start=c(2006, 1), end=c(2014, 1), frequency=1)
ap_conductivity_ts <-ts(ap_conductivity, start=c(2006, 1), end=c(2014, 1), frequency=1)
ap_bod_ts <- ts(ap_bod, start=c(2006, 1), end=c(2014, 1), frequency=1)
ap_nitrate_ts <- ts(ap_nitrate, start=c(2006, 1), end=c(2014, 1), frequency=1)
ap_fecal_ts <- ts(ap_fecal, start=c(2006, 1), end=c(2014, 1), frequency=1)
plot(ap_temp_ts)
plot(ap_ph_ts)
plot(ap_conductivity_ts)
plot(ap_bod_ts)
plot(ap_nitrate_ts)
plot(ap_fecal_ts)
ggseasonplot(ap_temp_ts, col = rainbow(7), year.labels = TRUE)
ggseasonplot(ap_ph_ts, col = rainbow(7), year.labels = TRUE)
ggseasonplot(ap_conductivity_ts, col = rainbow(7), year.labels = TRUE)
ggseasonplot(ap_bod_ts, col = rainbow(7), year.labels = TRUE)
ggseasonplot(ap_nitrate_ts, col = rainbow(7), year.labels = TRUE)
ggseasonplot(ap_fecal_ts, col = rainbow(7), year.labels = TRUE)
library(tseries)
adf.test(diff(diff(ap_temp_ts)))
adf.test(diff(diff(ap_ph_ts)))
adf.test(diff(ap_conductivity_ts))
adf.test(diff(diff(ap_bod_ts)))
adf.test(diff(diff(ap_nitrate_ts)))
adf.test(diff(diff(ap_fecal_ts)))
fit_ap_temp_ts <- HoltWinters(ap_temp_ts, beta=FALSE, gamma=FALSE)
forecast(fit_ap_temp_ts, 3)
plot(forecast(fit_ap_temp_ts,3))
ARIMAfit_ap_temp_ts <- auto.arima(ap_temp_ts, d=2, approximation=FALSE,trace=FALSE)
fr_ap_temp_ts <- forecast(ARIMAfit_ap_temp_ts,3)
fr_ap_temp_ts
plot(fr_ap_temp_ts)
fit_ap_ph_ts <- HoltWinters(ap_ph_ts, beta=FALSE, gamma=FALSE)
forecast(fit_ap_ph_ts, 3)
plot(forecast(fit_ap_ph_ts,3))
ARIMAfit_ap_conductivity_ts <- auto.arima(ap_conductivity_ts,d=2, approximation=FALSE,trace=FALSE)
fr_ap_conductivity_ts <- forecast(ARIMAfit_ap_conductivity_ts,3)
fr_ap_conductivity_ts
plot(fr_ap_conductivity_ts)
ARIMAfit_ap_bod_ts <- auto.arima(ap_bod_ts, d=2, approximation=FALSE,trace=FALSE)
fr_ap_bod_ts <- forecast(ARIMAfit_ap_bod_ts,3)
fr_ap_bod_ts
plot(fr_ap_bod_ts)
ARIMAfit_ap_nitrate_ts <- auto.arima(ap_nitrate_ts, d=2, approximation=FALSE,trace=FALSE)
fr_ap_nitrate_ts <- forecast(ARIMAfit_ap_nitrate_ts,3)
fr_ap_nitrate_ts
plot(fr_ap_nitrate_ts)
ARIMAfit_ap_fecal_ts <- auto.arima(ap_fecal_ts, d=2, approximation=FALSE,trace=FALSE)
fr_ap_fecal_ts <- forecast(ARIMAfit_ap_fecal_ts,3)
fr_ap_fecal_ts
plot(fr_ap_fecal_ts)
as_temp_ts <-ts(data.assam$mean_temp, start=c(2006, 1), end=c(2014, 1), frequency=1)
as_ph_ts <-ts(data.assam$mean_ph, start=c(2006, 1), end=c(2014, 1), frequency=1)
as_conductivity_ts <-ts(data.assam$mean_conductivity, start=c(2006, 1), end=c(2014, 1), frequency=1)
as_bod_ts <- ts(data.assam$mean_bod, start=c(2006, 1), end=c(2014, 1), frequency=1)
as_nitrate_ts <- ts(data.assam$mean_nitratenitrite, start=c(2006, 1), end=c(2014, 1), frequency=1)
as_fecal_ts <- ts(data.assam$mean_fecalcoliform, start=c(2006, 1), end=c(2014, 1), frequency=1)
## Plots
plot(as_temp_ts)
plot(as_ph_ts)
plot(as_conductivity_ts)
plot(as_bod_ts)
plot(as_nitrate_ts)
plot(as_fecal_ts)
adf.test(diff(diff(as_temp_ts)))
adf.test(diff(as_ph_ts))
adf.test(diff(as_conductivity_ts))
adf.test(diff(diff(as_bod_ts)))
adf.test(log(as_nitrate_ts))
adf.test(as_fecal_ts)
ARIMAfit_as_temp_ts <- auto.arima(as_temp_ts, d=2, approximation=FALSE,trace=FALSE)
fr_as_temp_ts <- forecast(ARIMAfit_as_temp_ts,3)
fr_as_temp_ts
plot(fr_as_temp_ts)
ARIMAfit_as_ph_ts <- auto.arima(as_ph_ts, d=1, approximation=FALSE,trace=FALSE)
fr_as_ph_ts <- forecast(ARIMAfit_as_ph_ts,3)
fr_as_ph_ts
plot(fr_as_ph_ts)
ARIMAfit_as_conductivity_ts <- auto.arima(as_conductivity_ts, approximation=FALSE,trace=FALSE)
fr_as_conductivity_ts <- forecast(ARIMAfit_as_conductivity_ts,3)
fr_as_conductivity_ts
plot(fr_as_conductivity_ts)
ARIMAfit_as_bod_ts <- auto.arima(as_bod_ts, d=2, approximation=FALSE,trace=FALSE)
fr_as_bod_ts <- forecast(ARIMAfit_as_bod_ts,3)
fr_as_bod_ts
plot(fr_as_bod_ts)
ARIMAfit_as_nitrate_ts <- auto.arima(log(as_nitrate_ts), d=2, approximation=FALSE,trace=FALSE)
fr_as_nitrate_ts <- forecast(ARIMAfit_as_nitrate_ts,3)
fr_as_nitrate_ts
plot(fr_as_nitrate_ts)
ARIMAfit_as_fecal_ts <- auto.arima(as_fecal_ts,d=2, approximation=FALSE,trace=FALSE)
fr_as_fecal_ts <- forecast(ARIMAfit_as_fecal_ts,3)
fr_as_fecal_ts
plot(fr_as_fecal_ts)
goa_temp_ts <-ts(data.goa$mean_temp, start=c(2006, 1), end=c(2013, 1), frequency=1)
goa_ph_ts <-ts(data.goa$mean_ph, start=c(2006, 1), end=c(2013, 1), frequency=1)
goa_conductivity_ts <-ts(data.goa$mean_conductivity, start=c(2006, 1), end=c(2013, 1), frequency=1)
goa_bod_ts <- ts(data.goa$mean_bod, start=c(2006, 1), end=c(2013, 1), frequency=1)
goa_nitrate_ts <- ts(data.goa$mean_nitratenitrite, start=c(2006, 1), end=c(2013, 1), frequency=1)
goa_fecal_ts <- ts(data.goa$mean_fecalcoliform, start=c(2006, 1), end=c(2013, 1), frequency=1)
#Plots
plot(goa_temp_ts)
plot(goa_ph_ts)
plot(goa_conductivity_ts)
plot(goa_bod_ts)
plot(goa_nitrate_ts)
plot(goa_fecal_ts)
adf.test(diff(goa_temp_ts))
adf.test(goa_ph_ts)
adf.test(goa_conductivity_ts)
adf.test(goa_bod_ts)
adf.test(diff(goa_nitrate_ts))
adf.test(goa_fecal_ts)
ARIMAfit_goa_temp_ts <- auto.arima(goa_temp_ts, d=2, approximation=FALSE,trace=FALSE)
fr_goa_temp_ts <- forecast(ARIMAfit_goa_temp_ts,3)
fr_goa_temp_ts
plot(fr_goa_temp_ts)
ARIMAfit_goa_ph_ts <- auto.arima(goa_ph_ts, d=2, approximation=FALSE,trace=FALSE)
fr_goa_ph_ts <- forecast(ARIMAfit_goa_ph_ts,3)
fr_goa_ph_ts
plot(fr_goa_ph_ts)
fit_goa_conductivity_ts <- HoltWinters(goa_conductivity_ts, beta=FALSE, gamma=FALSE)
forecast(fit_goa_conductivity_ts, 3)
plot(forecast(fit_goa_conductivity_ts,3))
fit_goa_bod_ts <- HoltWinters(goa_bod_ts, beta=FALSE, gamma=FALSE)
forecast(fit_goa_bod_ts, 3)
plot(forecast(fit_goa_bod_ts,3))
ARIMAfit_goa_nirate_ts <- auto.arima(goa_nitrate_ts, d=1,approximation=FALSE,trace=FALSE)
fr_goa_nirate_ts <- forecast(ARIMAfit_goa_nirate_ts,3)
fr_goa_nirate_ts
plot(fr_goa_nirate_ts)
fit_goa_fecal_ts <- HoltWinters(goa_fecal_ts, beta=FALSE, gamma=FALSE)
forecast(fit_goa_fecal_ts, 3)
plot(forecast(fit_goa_fecal_ts,3))
hp_temp_ts <-ts(data.himachal$mean_temp, start=c(2006, 1), end=c(2014, 1), frequency=1)
hp_ph_ts <-ts(data.himachal$mean_ph, start=c(2006, 1), end=c(2014, 1), frequency=1)
hp_conductivity_ts <-ts(data.himachal$mean_conductivity, start=c(2006, 1), end=c(2014, 1), frequency=1)
hp_bod_ts <- ts(data.himachal$mean_bod, start=c(2006, 1), end=c(2014, 1), frequency=1)
hp_nitrate_ts <- ts(data.himachal$mean_nitratenitrite, start=c(2006, 1), end=c(2014, 1), frequency=1)
hp_fecal_ts <- ts(data.himachal$mean_fecalcoliform, start=c(2006, 1), end=c(2014, 1), frequency=1)
#Plots
plot(hp_temp_ts)
plot(hp_ph_ts)
plot(hp_conductivity_ts)
plot(hp_bod_ts)
plot(hp_nitrate_ts)
plot(hp_fecal_ts)
adf.test(hp_temp_ts)
adf.test(diff(hp_ph_ts))
adf.test(hp_conductivity_ts)
adf.test(diff(hp_bod_ts))
adf.test(diff(hp_nitrate_ts))
adf.test(hp_fecal_ts)
fit_hp_temp_ts <- HoltWinters(hp_temp_ts, beta=FALSE, gamma=FALSE)
forecast(fit_hp_temp_ts, 3)
plot(forecast(fit_hp_temp_ts,3))
ARIMAfit_hp_ph_ts <- auto.arima(hp_ph_ts, d=2,approximation=FALSE,trace=FALSE)
fr_hp_ph_ts <- forecast(ARIMAfit_hp_ph_ts,3)
fr_hp_ph_ts
plot(fr_hp_ph_ts)
fit_hp_conductivity_ts <- HoltWinters(hp_conductivity_ts, beta=FALSE, gamma=FALSE)
forecast(fit_hp_conductivity_ts, 3)
plot(forecast(fit_hp_conductivity_ts,3))
ARIMAfit_hp_bod_ts <- auto.arima(hp_bod_ts, d=2,approximation=FALSE,trace=FALSE)
fr_hp_bod_ts <- forecast(ARIMAfit_hp_bod_ts,3)
fr_hp_bod_ts
plot(fr_hp_bod_ts)
ARIMAfit_hp_nitrate_ts <- auto.arima(hp_nitrate_ts, d=1,approximation=FALSE,trace=FALSE)
fr_hp_nitrate_ts <- forecast(ARIMAfit_hp_nitrate_ts,3)
fr_hp_nitrate_ts
plot(fr_hp_nitrate_ts)
fit_hp_fecal_ts <- HoltWinters(hp_fecal_ts, beta=FALSE, gamma=FALSE)
forecast(fit_hp_fecal_ts, 3)
plot(forecast(fit_hp_fecal_ts,3))
kr_temp_ts <-ts(data.kerala$mean_temp, start=c(2006, 1), end=c(2014, 1), frequency=1)
kr_ph_ts <-ts(data.kerala$mean_ph, start=c(2006, 1), end=c(2014, 1), frequency=1)
kr_conductivity_ts <-ts(data.kerala$mean_conductivity, start=c(2006, 1), end=c(2014, 1), frequency=1)
kr_bod_ts <- ts(data.kerala$mean_bod, start=c(2006, 1), end=c(2014, 1), frequency=1)
kr_nitrate_ts <- ts(data.kerala$mean_nitratenitrite, start=c(2006, 1), end=c(2014, 1), frequency=1)
kr_fecal_ts <- ts(data.kerala$mean_fecalcoliform, start=c(2006, 1), end=c(2014, 1), frequency=1)
#Plots
plot(kr_temp_ts)
plot(kr_ph_ts)
plot(kr_conductivity_ts)
plot(kr_bod_ts)
plot(kr_nitrate_ts)
plot(kr_fecal_ts)
adf.test(diff(kr_temp_ts))
adf.test(diff(kr_ph_ts))
adf.test(kr_conductivity_ts)
adf.test(kr_bod_ts)
adf.test(diff(kr_nitrate_ts))
adf.test(kr_fecal_ts)
ARIMAfit_kr_temp_ts <- auto.arima(kr_temp_ts,d=2,approximation=FALSE,trace=FALSE)
fr_kr_temp_ts <- forecast(ARIMAfit_kr_temp_ts,3)
fr_kr_temp_ts
plot(fr_kr_temp_ts)
ARIMAfit_kr_ph_ts <- auto.arima(kr_ph_ts,d=1,approximation=FALSE,trace=FALSE)
fr_kr_ph_ts <- forecast(ARIMAfit_kr_ph_ts,3)
fr_kr_ph_ts
plot(fr_kr_ph_ts)
fit_kr_conductivity_ts <- HoltWinters(kr_conductivity_ts, beta=FALSE, gamma=FALSE)
forecast(fit_kr_conductivity_ts, 3)
plot(forecast(fit_kr_conductivity_ts,3))
fit_kr_bod_ts <- HoltWinters(kr_bod_ts, beta=FALSE, gamma=FALSE)
forecast(fit_kr_bod_ts, 3)
plot(forecast(fit_kr_bod_ts,3))
ARIMAfit_kr_nitrate_ts <- auto.arima(kr_nitrate_ts,d=2,approximation=FALSE,trace=FALSE)
fr_kr_nitrate_ts <- forecast(ARIMAfit_kr_nitrate_ts,3)
fr_kr_nitrate_ts
plot(fr_kr_nitrate_ts)
fit_kr_fecal_ts <- HoltWinters(kr_fecal_ts, beta=FALSE, gamma=FALSE)
forecast(fit_kr_fecal_ts, 3)
plot(forecast(fit_kr_fecal_ts,3))
mp_temp_ts <-ts(data.madhyapradesh$mean_temp, start=c(2006, 1), end=c(2014, 1), frequency=1)
mp_ph_ts <-ts(data.madhyapradesh$mean_ph, start=c(2006, 1), end=c(2014, 1), frequency=1)
mp_conductivity_ts <-ts(data.madhyapradesh$mean_conductivity, start=c(2006, 1), end=c(2014, 1), frequency=1)
mp_bod_ts <- ts(data.madhyapradesh$mean_bod, start=c(2006, 1), end=c(2014, 1), frequency=1)
mp_nitrate_ts <- ts(data.madhyapradesh$mean_nitratenitrite, start=c(2006, 1), end=c(2014, 1), frequency=1)
mp_fecal_ts <- ts(data.madhyapradesh$mean_fecalcoliform, start=c(2006, 1), end=c(2014, 1), frequency=1)
#Plots
plot(mp_temp_ts)
plot(mp_ph_ts)
plot(mp_conductivity_ts)
plot(mp_bod_ts)
plot(mp_nitrate_ts)
plot(mp_fecal_ts)
adf.test(mp_temp_ts)
adf.test(mp_ph_ts)
adf.test(mp_conductivity_ts)
adf.test(mp_bod_ts)
adf.test(diff(mp_nitrate_ts))
adf.test(mp_fecal_ts)
fit_mp_temp_ts <- HoltWinters(mp_temp_ts, beta=FALSE, gamma=FALSE)
forecast(fit_mp_temp_ts, 3)
plot(forecast(fit_mp_temp_ts,3))
ARIMAfit_mp_ph_ts <- auto.arima(mp_ph_ts,d=2,approximation=FALSE,trace=FALSE)
fr_mp_ph_ts <- forecast(ARIMAfit_mp_ph_ts,3)
fr_mp_ph_ts
plot(fr_mp_ph_ts)
ARIMAfit_mp_conductivity_ts <- auto.arima(mp_conductivity_ts,d=2,approximation=FALSE,trace=FALSE)
fr_mp_conductivity_ts <- forecast(ARIMAfit_mp_conductivity_ts,3)
fr_mp_conductivity_ts
plot(fr_mp_conductivity_ts)
fit_mp_bod_ts <- HoltWinters(mp_bod_ts, beta=FALSE, gamma=FALSE)
forecast(fit_mp_bod_ts, 3)
plot(forecast(fit_mp_bod_ts,3))
fit_mp_nitrate_ts <- HoltWinters(mp_nitrate_ts, beta=FALSE, gamma=FALSE)
forecast(fit_mp_nitrate_ts, 3)
plot(forecast(fit_mp_nitrate_ts,3))
ARIMAfit_mp_fecal_ts <- auto.arima(mp_fecal_ts, d=1,approximation=FALSE,trace=FALSE)
fr_mp_fecal_ts <- forecast(ARIMAfit_mp_fecal_ts,3)
fr_mp_fecal_ts
plot(fr_mp_fecal_ts)
data.maharashtra1 <- data.maharashtra[data.maharashtra$YEAR==2011|data.maharashtra$YEAR==2012|data.maharashtra$YEAR == 2013|data.maharashtra$YEAR == 2014 ,]
data.maharashtra1
mh_temp_ts <-ts(data.maharashtra1$mean_temp, start=c(2011, 1), end=c(2014, 1), frequency=1)
mh_ph_ts <-ts(data.maharashtra1$mean_ph, start=c(2011, 1), end=c(2014, 1), frequency=1)
mh_conductivity_ts <-ts(data.maharashtra1$mean_conductivity, start=c(2011, 1), end=c(2014, 1), frequency=1)
mh_bod_ts <- ts(data.maharashtra1$mean_bod, start=c(2011, 1), end=c(2014, 1), frequency=1)
mh_nitrate_ts <- ts(data.maharashtra1$mean_nitratenitrite, start=c(2011, 1), end=c(2014, 1), frequency=1)
mh_fecal_ts <- ts(data.maharashtra1$mean_fecalcoliform, start=c(2011, 1), end=c(2014, 1), frequency=1)
#Plots
plot(mh_temp_ts)
plot(mh_ph_ts)
plot(mh_conductivity_ts)
plot(mh_bod_ts)
plot(mh_nitrate_ts)
plot(mh_fecal_ts)
adf.test(mh_temp_ts)
adf.test(mh_ph_ts)
adf.test(mh_conductivity_ts)
adf.test(mh_bod_ts)
adf.test(diff(mh_nitrate_ts))
adf.test(mh_fecal_ts)
fit_mh_temp_ts <- HoltWinters(mh_temp_ts, beta=FALSE, gamma=FALSE)
forecast(fit_mh_temp_ts, 3)
plot(forecast(fit_mh_temp_ts,3))
fit_mh_ph_ts <- HoltWinters(mh_ph_ts, beta=FALSE, gamma=FALSE)
forecast(fit_mh_ph_ts, 3)
plot(forecast(fit_mh_ph_ts,3))
fit_mh_conductivity_ts <- HoltWinters(mh_conductivity_ts, beta=FALSE, gamma=FALSE)
forecast(fit_mh_conductivity_ts, 3)
plot(forecast(fit_mh_conductivity_ts,3))
fit_mh_bod_ts <- HoltWinters(mh_bod_ts, beta=FALSE, gamma=FALSE)
forecast(fit_mh_bod_ts, 3)
plot(forecast(fit_mh_bod_ts,3))
fit_mh_nitrate_ts <- HoltWinters(mh_nitrate_ts, beta=FALSE, gamma=FALSE)
forecast(fit_mh_nitrate_ts, 3)
plot(forecast(fit_mh_nitrate_ts,3))
fit_mh_fecal_ts <- HoltWinters(mh_fecal_ts, beta=FALSE, gamma=FALSE)
forecast(fit_mh_fecal_ts, 3)
plot(forecast(fit_mh_fecal_ts,3))
data.odisha1 <- data.odisha[data.odisha$YEAR==2011|data.odisha$YEAR==2012|data.odisha$YEAR == 2013|data.odisha$YEAR == 2014 ,]
data.odisha1
od_temp_ts <-ts(data.odisha1$mean_temp, start=c(2011, 1), end=c(2014, 1), frequency=1)
od_ph_ts <-ts(data.odisha1$mean_ph, start=c(2011, 1), end=c(2014, 1), frequency=1)
od_conductivity_ts <-ts(data.odisha1$mean_conductivity, start=c(2011, 1), end=c(2014, 1), frequency=1)
od_bod_ts <- ts(data.odisha1$mean_bod, start=c(2011, 1), end=c(2014, 1), frequency=1)
od_nitrate_ts <- ts(data.odisha1$mean_nitratenitrite, start=c(2011, 1), end=c(2014, 1), frequency=1)
od_fecal_ts <- ts(data.odisha1$mean_fecalcoliform, start=c(2011, 1), end=c(2014, 1), frequency=1)
#Plots
plot(od_temp_ts)
plot(od_ph_ts)
plot(od_conductivity_ts)
plot(od_bod_ts)
plot(od_nitrate_ts)
plot(od_fecal_ts)
adf.test(od_temp_ts)
adf.test(od_ph_ts)
adf.test(od_conductivity_ts)
adf.test(od_bod_ts)
adf.test(od_nitrate_ts)
adf.test(od_fecal_ts)
fit_od_temp_ts <- HoltWinters(od_temp_ts, beta=FALSE, gamma=FALSE)
forecast(fit_od_temp_ts, 3)
plot(forecast(fit_od_temp_ts,3))
fit_od_ph_ts <- HoltWinters(od_ph_ts, beta=FALSE, gamma=FALSE)
forecast(fit_od_ph_ts, 3)
plot(forecast(fit_od_ph_ts,3))
fit_od_conductivity_ts <- HoltWinters(od_conductivity_ts, beta=FALSE, gamma=FALSE)
forecast(fit_od_conductivity_ts, 3)
plot(forecast(fit_od_conductivity_ts,3))
fit_od_bod_ts <- HoltWinters(od_bod_ts, beta=FALSE, gamma=FALSE)
forecast(fit_od_bod_ts, 3)
plot(forecast(fit_od_bod_ts,3))
fit_od_nitrate_ts <- HoltWinters(od_nitrate_ts, beta=FALSE, gamma=FALSE)
forecast(fit_od_nitrate_ts, 3)
plot(forecast(fit_od_nitrate_ts,3))
fit_od_fecal_ts <- HoltWinters(od_fecal_ts, beta=FALSE, gamma=FALSE)
forecast(fit_od_fecal_ts, 3)
plot(forecast(fit_od_fecal_ts,3))
unique(data.rajasthan$YEAR)
data.raj1 <- data.rajasthan[data.rajasthan$YEAR==2011|data.rajasthan$YEAR==2012|data.rajasthan$YEAR == 2013|data.rajasthan$YEAR == 2014 ,]
raj_temp_ts <-ts(data.raj1$mean_temp, start=c(2011, 1), end=c(2014, 1), frequency=1)
raj_ph_ts <-ts(data.raj1$mean_ph, start=c(2011, 1), end=c(2014, 1), frequency=1)
raj_conductivity_ts <-ts(data.raj1$mean_conductivity, start=c(2011, 1), end=c(2014, 1), frequency=1)
raj_bod_ts <- ts(data.raj1$mean_bod, start=c(2011, 1), end=c(2014, 1), frequency=1)
raj_nitrate_ts <- ts(data.raj1$mean_nitratenitrite, start=c(2011, 1), end=c(2014, 1), frequency=1)
raj_fecal_ts <- ts(data.raj1$mean_fecalcoliform, start=c(2011, 1), end=c(2014, 1), frequency=1)
#Plots
plot(raj_temp_ts)
plot(raj_ph_ts)
plot(raj_conductivity_ts)
plot(raj_bod_ts)
plot(raj_nitrate_ts)
plot(raj_fecal_ts)
adf.test(raj_temp_ts)
adf.test(raj_ph_ts)
adf.test(raj_conductivity_ts)
adf.test(raj_bod_ts)
adf.test(raj_nitrate_ts)
adf.test(raj_fecal_ts)
fit_raj_temp_ts <- HoltWinters(raj_temp_ts, beta=FALSE, gamma=FALSE)
forecast(fit_raj_temp_ts, 3)
plot(forecast(fit_raj_temp_ts,3))
fit_raj_ph_ts <- HoltWinters(raj_ph_ts, beta=FALSE, gamma=FALSE)
forecast(fit_raj_ph_ts, 3)
plot(forecast(fit_raj_ph_ts,3))
fit_raj_conductivity_ts <- HoltWinters(raj_conductivity_ts, beta=FALSE, gamma=FALSE)
forecast(fit_raj_conductivity_ts, 3)
plot(forecast(fit_raj_conductivity_ts,3))
fit_raj_bod_ts <- HoltWinters(raj_bod_ts, beta=FALSE, gamma=FALSE)
forecast(fit_raj_bod_ts, 3)
plot(forecast(fit_raj_bod_ts,3))
fit_raj_nitrate_ts <- HoltWinters(raj_nitrate_ts, beta=FALSE, gamma=FALSE)
forecast(fit_raj_nitrate_ts, 3)
plot(forecast(fit_raj_nitrate_ts,3))
fit_raj_fecal_ts <- HoltWinters(raj_fecal_ts, beta=FALSE, gamma=FALSE)
forecast(fit_raj_fecal_ts, 3)
plot(forecast(fit_raj_fecal_ts,3))
unique(data.uttarpradesh$YEAR)
data.up1 <- data.uttarpradesh[data.uttarpradesh$YEAR==2011|data.uttarpradesh$YEAR==2012|data.uttarpradesh$YEAR == 2013|data.uttarpradesh$YEAR == 2014 ,]
up1_temp_ts <-ts(data.up1$mean_temp, start=c(2011, 1), end=c(2014, 1), frequency=1)
up1_ph_ts <-ts(data.up1$mean_ph, start=c(2011, 1), end=c(2014, 1), frequency=1)
up1_conductivity_ts <-ts(data.up1$mean_conductivity, start=c(2011, 1), end=c(2014, 1), frequency=1)
up1_bod_ts <- ts(data.up1$mean_bod, start=c(2011, 1), end=c(2014, 1), frequency=1)
up1_nitrate_ts <- ts(data.up1$mean_nitratenitrite, start=c(2011, 1), end=c(2014, 1), frequency=1)
up1_fecal_ts <- ts(data.up1$mean_fecalcoliform, start=c(2011, 1), end=c(2014, 1), frequency=1)
#Plots
plot(up1_temp_ts)
plot(up1_ph_ts)
plot(up1_conductivity_ts)
plot(up1_bod_ts)
plot(up1_nitrate_ts)
plot(up1_fecal_ts)
fit_up1_temp_ts <- HoltWinters(up1_temp_ts, beta=FALSE, gamma=FALSE)
forecast(fit_up1_temp_ts, 3)
plot(forecast(fit_up1_temp_ts,3))
fit_up1_ph_ts <- HoltWinters(up1_ph_ts, beta=FALSE, gamma=FALSE)
forecast(fit_up1_ph_ts, 3)
plot(forecast(fit_up1_ph_ts,3))
fit_up1_conductivity_ts <- HoltWinters(up1_conductivity_ts, beta=FALSE, gamma=FALSE)
forecast(fit_up1_conductivity_ts, 3)
plot(forecast(fit_up1_conductivity_ts,3))
fit_up1_bod_ts <- HoltWinters(up1_bod_ts, beta=FALSE, gamma=FALSE)
forecast(fit_up1_bod_ts, 3)
plot(forecast(fit_up1_bod_ts,3))
fit_up1_nitrate_ts <- HoltWinters(up1_nitrate_ts, beta=FALSE, gamma=FALSE)
forecast(fit_up1_nitrate_ts, 3)
plot(forecast(fit_up1_nitrate_ts,3))
fit_up1_fecal_ts <- HoltWinters(up1_fecal_ts, beta=FALSE, gamma=FALSE)
forecast(fit_up1_fecal_ts, 3)
plot(forecast(fit_up1_fecal_ts,3))
unique(data.westbengal$YEAR)
data.wb <- data.westbengal[data.westbengal$YEAR==2011|data.westbengal$YEAR==2012|data.westbengal$YEAR == 2013|data.westbengal$YEAR == 2014 ,]
wb_temp_ts <-ts(data.wb$mean_temp, start=c(2011, 1), end=c(2014, 1), frequency=1)
wb_ph_ts <-ts(data.wb$mean_ph, start=c(2011, 1), end=c(2014, 1), frequency=1)
wb_conductivity_ts <-ts(data.wb$mean_conductivity, start=c(2011, 1), end=c(2014, 1), frequency=1)
wb_bod_ts <- ts(data.wb$mean_bod, start=c(2011, 1), end=c(2014, 1), frequency=1)
wb_nitrate_ts <- ts(data.wb$mean_nitratenitrite, start=c(2011, 1), end=c(2014, 1), frequency=1)
wb_fecal_ts <- ts(data.wb$mean_fecalcoliform, start=c(2011, 1), end=c(2014, 1), frequency=1)
#Plots
plot(wb_temp_ts)
plot(wb_ph_ts)
plot(wb_conductivity_ts)
plot(wb_bod_ts)
plot(wb_nitrate_ts)
plot(wb_fecal_ts)
fit_wb_temp_ts <- HoltWinters(wb_temp_ts, beta=FALSE, gamma=FALSE)
forecast(fit_wb_temp_ts, 3)
plot(forecast(fit_wb_temp_ts,3))
fit_wb_ph_ts <- HoltWinters(wb_ph_ts, beta=FALSE, gamma=FALSE)
forecast(fit_wb_ph_ts, 3)
plot(forecast(fit_wb_ph_ts,3))
fit_wb_conductivity_ts <- HoltWinters(wb_conductivity_ts, beta=FALSE, gamma=FALSE)
forecast(fit_wb_conductivity_ts, 3)
plot(forecast(fit_wb_conductivity_ts,3))
fit_wb_bod_ts <- HoltWinters(wb_bod_ts, beta=FALSE, gamma=FALSE)
forecast(fit_wb_bod_ts, 3)
plot(forecast(fit_wb_bod_ts,3))
fit_wb_nitrate_ts <- HoltWinters(wb_nitrate_ts, beta=FALSE, gamma=FALSE)
forecast(fit_wb_nitrate_ts, 3)
plot(forecast(fit_wb_nitrate_ts,3))
fit_wb_fecal_ts <- HoltWinters(wb_fecal_ts, beta=FALSE, gamma=FALSE)
forecast(fit_wb_fecal_ts, 3)
plot(forecast(fit_wb_fecal_ts,3))
|
781d3c414d1d596a8611eecdba213fc7af60722c | c74585b6ce68a1363bd9fe8f2eabd1b0c81a8ce0 | /MonteCarloUnique.r | 33de8c2210fd93f07016ee0cf2176aabcb143069 | [] | no_license | ccrismancox/PSRM_signalingGames | f2e94ffe3fc3c474b35b5d799089968333bd558c | 24c133053e9400b18962505b33c1ad24179c178a | refs/heads/master | 2020-06-27T07:39:38.058843 | 2019-11-30T22:26:07 | 2019-11-30T22:26:07 | 199,887,265 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,228 | r | MonteCarloUnique.r | ############################################
############################################
######## Estimating Signaling Games ########
######## Monte Carlo Experiment ########
######## Unique Equilibrium ########
############################################
############################################
rm(list=ls())
suppressMessages(library(doParallel))
suppressMessages(library(doRNG))
suppressMessages(library(maxLik))
suppressMessages(library(foreach))
suppressMessages(library(rootSolve))
suppressMessages(library(pbivnorm))
suppressMessages(library(Formula))
suppressMessages(library(mc2d))
suppressMessages(library(randomForest))
source("signalingFunctions_main.r")
source("gradientFunctions.r")
## Setup the parameters of the experiment
nPerGame <- c(5, 25, 50, 100, 200)
nGames <- c(25, 50, 100, 200)
B <- 1000
nworkers <- detectCores()-1
Bparams <- expand.grid(nGames,nPerGame)
Results <- list()
workers <- makeCluster(nworkers) #in parallel
registerDoParallel(workers)
# Begin the simulation
set.seed(2) #seed 1 hanged at one point
for (i in 1:nrow(Bparams)){
Results[[i]] <- foreach(b=1:B,
.packages=c("pbivnorm","rootSolve", "Formula", "randomForest", "mc2d", "maxLik"),
.combine=cbind,
.multicombine=T,
.inorder=F
) %dorng% {
nPerGame <- Bparams[i,2]
M <- Bparams[i,1]
X <- data.frame(X=runif(M))
f1 <- # create regression matrices
as.Formula(~0|#SA
1|#VA
0|#CB
1 | #barWA
X| #barWB
1| #bara
1) #VB
truth1 <- c(1, -1.9, -2.9,.1, -1.2,1)
truth2 <- c(1, -1.7, -2,.1, -1.2,1)
truth <- .5*truth1 + .5*truth2
regr <- list()
for(j in 1:7){
regr[[j]] <- model.matrix(f1, data=X, rhs=j)
}
names(regr) <- c("SA", "VA", "CB", "barWA", "barWB", "bara", "VB")
U <- vec2U.regr(truth, regr)
Pstar <- selectEq(X$X)
Y <- genData.jo(nPerGame, Pstar, U)
# compute PRhat for starting values
index1 <- colSums(Y[2:4,]) >= 1 #observations where B chooses resist or not
index2 <- colSums(Y[3:4,]) >= 1 #observations where A chooses SF or BD
data1 <- cbind.data.frame((colSums(Y[3:4,])/colSums(Y[2:4,]))[index1], X[index1,]) #data where B chooses resist or not
data2 <- cbind.data.frame(((Y[3,])/colSums(Y[3:4,]))[index2], X[index2,]) #data where A chooses SF or BD
colnames(data1) <- c("Yr", "X")
colnames(data2) <- c("Yf", "X")
# Yr <- colSums(Y[3:4,])/colSums(Y[2:4,])
# Yf <- Y[3,]/colSums(Y[3:4,])
# dat <- cbind(X, Yr, Yf)
m1 <- randomForest(Yr~X, data = data1, na.action=na.omit, ntree=1000)
m2 <- randomForest(Yf~X, data = data2, na.action=na.omit, ntree=1000)
Phat <- list(PRhat = pmin(pmax(predict(m1, newdata=X), 0.0001), .9999),
PFhat = pmin(pmax(predict(m2, newdata=X), 0.0001), .9999))
fqll <- function(x){
-QLL.jo(x,Phat$PRhat,Phat$PFhat,Y,regr)
}
gr.qll <- function(x){
-eval_gr_qll(x,Phat$PRhat,Phat$PFhat,Y,regr)
}
x0 <- c(runif(6), qlogis(Phat$PRhat))
## Test to make sure that these data will actually work
while(anyNA(gr.qll(x0))|| any(is.infinite(gr.qll(x0)))){
X <- data.frame(X=runif(M))
f1 <- # create regression matrices
as.Formula(~0|#SA
1|#VA
0|#CB
1 | #barWA
X| #barWB
1| #bara
1) #VB
truth1 <- c(1, -1.9, -2.9,.1, -1.2,1)
truth2 <- c(1, -1.7, -2,.1, -1.2,1)
truth <- .5*truth1 + .5*truth2
regr <- list()
for(j in 1:7){
regr[[j]] <- model.matrix(f1, data=X, rhs=j)
}
names(regr) <- c("SA", "VA", "CB", "barWA", "barWB", "bara", "VB")
U <- vec2U.regr(truth, regr)
Pstar <- selectEq(X$X)
Y <- genData.jo(nPerGame, Pstar, U)
# compute PRhat for starting values
index1 <- colSums(Y[2:4,]) >= 1 #observations where B chooses resist or not
index2 <- colSums(Y[3:4,]) >= 1 #observations where A chooses SF or BD
data1 <- cbind.data.frame((colSums(Y[3:4,])/colSums(Y[2:4,]))[index1], X[index1,]) #data where B chooses resist or not
data2 <- cbind.data.frame(((Y[3,])/colSums(Y[3:4,]))[index2], X[index2,]) #data where A chooses SF or BD
colnames(data1) <- c("Yr", "X")
colnames(data2) <- c("Yf", "X")
# Yr <- colSums(Y[3:4,])/colSums(Y[2:4,])
# Yf <- Y[3,]/colSums(Y[3:4,])
# dat <- cbind(X, Yr, Yf)
m1 <- randomForest(Yr~X, data = data1, na.action=na.omit, ntree=1000)
m2 <- randomForest(Yf~X, data = data2, na.action=na.omit, ntree=1000)
Phat <- list(PRhat = pmin(pmax(predict(m1, newdata=X), 0.0001), .9999),
PFhat = pmin(pmax(predict(m2, newdata=X), 0.0001), .9999))
x0 <- c(runif(6),
qlogis(pmin(pmax(Phat$PRhat,
0.0001), .9999)))
}
#Once we have a usuable dataset, move to estimation
#### PL ####
ptm <- proc.time()[3]
out.2step <- try(maxLik(start=x0[1:6], logLik=fqll, grad=gr.qll, method="NR"))
out.2step$time <- proc.time()[3] - ptm
if(class(out.2step[[1]])=="character"){
out.2step$par <- rep(NA, 6)
out.2step$convergence <- -99
out.2step$iter <- -99
}else{
out.2step$par <- out.2step$est
out.2step$convergence <- out.2step$code
out.2step$iter <- out.2step$iter
}
#### tML ####
fnfxp <- function(x){LL.nfxp(x,Y,regr)}
ptm <- proc.time()[3]
out.nfxp <- try(optim(fn=fnfxp, par=x0[1:6], method='Nelder-Mead',
control=list(maxit=5000)))
out.nfxp$time <- proc.time()[3] - ptm
out.nfxp$iter <- out.nfxp$counts[1]
if(class(out.nfxp[[1]])=="character"){
out.nfxp$par <- rep(NA, 6)
out.nfxp$convergence <- -99
out.nfxp$iter <- -99
}
#### NPL ####
ptm <- proc.time()[3]
out.NPL <- out.2step
if(class(out.NPL[[1]])!="character"){
eval = 1000; tol = 1e-5; maxit=500
iter <- 0
Phat0 <- Phat
while(eval > tol & iter < maxit){
Uk <- vec2U.regr(out.NPL$estimate, regr)
Pk.F <- eqProbs(Phat$PRhat, Uk, RemoveZeros = T)[,3]
Pk.R <- pnorm((Phat$PFhat*Uk$barWB + (1-Phat$PFhat)*Uk$VB - Uk$CB)/Phat$PFhat)
Phat.k_1 <- Phat
Phat <- list(PRhat = Pk.R, PFhat = Pk.F)
Phat$PRhat <- pmin(pmax(Phat$PRhat,
0.0001), .9999)
Phat$PFhat <- pmin(pmax(Phat$PFhat,
0.0001), .9999)
out.NPL.k <- try(maxLik(start=out.NPL$par, logLik=fqll, grad=gr.qll, method="NR"))
if(class(out.NPL.k[[1]])=="character" || out.NPL.k$code==100){
out.NPL <- out.NPL.k
break
}
out.NPL.k$par <- out.NPL.k$est
out.NPL.k$convergence <- out.NPL.k$code
eval <- mean((c(out.NPL.k$par, unlist(Phat)) -c(out.NPL$par,unlist(Phat.k_1)))^2)
out.NPL <- out.NPL.k
iter <- iter + 1
}
out.NPL$time <- proc.time()[3] - ptm
out.NPL$iter <- iter
if(class(out.NPL[[1]])=="character"|| out.NPL.k$code==100){
out.NPL$par <- rep(NA, 6)
out.NPL$convergence <- -99
out.NPL$iter <- -99
}else{
out.NPL$convergence <- ifelse(iter==maxit,
-69,
out.NPL$convergence)
out.NPL$convergence <- ifelse(eval==0,
-99,
out.NPL$convergence)
}
}else{
out.NPL$par <- rep(NA, 6)
out.NPL$convergence <- -99
out.NPL$iter <- -99
}
c(out.2step$par, out.2step$convergence, out.2step$time, out.2step$iter,
out.NPL$par, out.NPL$convergence, out.NPL$time, out.NPL$iter,
out.nfxp$par, out.nfxp$convergence, out.nfxp$time, out.nfxp$iter
)
}
save(Results, Bparams, file="MonteCarloResults_Unique.rdata")
}
stopCluster(workers)
|
0a6d215302649540d72209ea8a1c176d8d287cfc | 2bc59a2d2a9b7562e66b1108b7ff87c2aee1a506 | /ch04/ch04_3_graph.R | 822fe8020e14e5124d1d2ebeab982c5d9eb102ea | [] | no_license | ckiekim/R-Statistics | 4bb78296b9e59761bdfac63433a44abf19c4e386 | d7b6f1bb32a15b310254e524ab4cf277a124a6f0 | refs/heads/master | 2020-06-05T03:35:42.388304 | 2019-07-05T08:35:39 | 2019-07-05T08:35:39 | 192,299,631 | 0 | 2 | null | null | null | null | UTF-8 | R | false | false | 2,412 | r | ch04_3_graph.R | # 다양한 표본분포 그래프
windowsFonts(malgun = "맑은 고딕")
par(mfrow=c(1,1), family="malgun", oma = c(0, 0, 0, 0))
# 그림 4.10 카이제곱 분포
df <- c(1, 3, 5, 10)
x <- seq(0, 20, by=0.01)
chi2.1 <- dchisq(x, df[1])
chi2.3 <- dchisq(x, df[2])
chi2.5 <- dchisq(x, df[3])
chi2.10 <- dchisq(x, df[4])
plot(x, type="n", xlim=c(0, 20), ylim=c(0, 0.3),
main="", xlab="x", ylab="", axes=F)
axis(1); axis(2)
lines(x, chi2.1, lwd=2, lty=1, col="black")
lines(x, chi2.3, lwd=2, lty=2, col="red")
lines(x, chi2.5, lwd=2, lty=3, col="blue")
lines(x, chi2.10, lwd=2, lty=4, col="green")
legend("topright", paste("df :", df), lty=1:4,
col=c("black","red", "blue", "green"), cex=0.7)
# 그림 4.11 T 분포
df <- c(1, 2, 8, 30)
x <- seq(-3, 3, by=0.01)
y <- dnorm(x)
t.1 <- dt(x, df=df[1])
t.2 <- dt(x, df=df[2])
t.8 <- dt(x, df=df[3])
t.30 <- dt(x, df=df[4])
par(mar=c(4,2,2,2))
plot(x, y, type="l", lty=1, axes=F, xlab="x", ylab="", col="red")
axis(1)
lines(x, t.1, lty=4, col="black")
lines(x, t.2, lty=3, col="magenta")
lines(x, t.8, lty=2, col="blue")
lines(x, t.30, lty=6, col="green")
legend("topright", paste("df :", df), lty=c(4, 3, 2, 6),
col=c("black", "magenta", "blue", "green"), cex=0.7)
# 그림 4.12 F 분포
df1 <- c(3, 10)
df2 <- c(5, 20)
x <- seq(0, 2, by=0.01)
f3.5 <- df(x, df1[1], df2[1]) # df(x, 3, 5)
f3.20 <- df(x, df1[1], df2[2]) # df(x, 3, 20)
f10.5 <- df(x, df1[2], df2[1]) # df(x, 10, 5)
f10.20 <- df(x, df1[2], df2[2]) # df(x, 10, 20)
plot(x, f3.5, type="l", ylim=c(0, 0.9), lwd=2,
axes=F, xlab="x", ylab="")
axis(1)
lines(x, f3.20, lty=2, lwd=2, col="blue")
lines(x, f10.5, lty=3, lwd=2, col="green")
lines(x, f10.20, lty=4, lwd=2, col="magenta")
legend("topright", paste("df :", c("3, 5", "3, 20", "10, 5", "10, 20")),
col=c("black", "blue", "green", "magenta"), lty=1:4, cex=0.7)
# 참고자료: 포아송 분포
x <- 1:20
p.3 <- dpois(x, lambda=3)
p.5 <- dpois(x, lambda=5)
p.10 <- dpois(x, lambda=10)
plot(x, p.3, type="l", lwd="2", col="red",
main="포아송 분포", xlab="x", ylab="P[X=x]")
points(x, p.3, pch=16, col="red")
lines(x, p.5, lwd="2", col="blue")
points(x, p.5, pch=17, col="blue")
lines(x, p.10, lwd="2", col="black")
points(x, p.10, pch=15, col="black")
legends <- c("lambda=3", "lambda=5", "lambda=10")
legend("topright", legend=legends, pch=c(16, 17, 15),
col=c("red", "blue", "black"))
|
91efb5ec29751ec93fd44c8a9b4a5422c62a8aa6 | da6b648740a173faf93e5aa468750faaf043403c | /create_gene_coexpression_categories_for_gowinda.R | c6152f501bdab537f55badb810d8f804cfad0a16 | [] | no_license | griffinp/desiccation-selection | 2567959dd5f21c3af0ea783142fae4fc336362ec | 2e8a76f62a92cef602ccbbf56183dfdac6ebb97a | refs/heads/master | 2021-01-21T19:28:39.535345 | 2016-11-14T00:47:33 | 2016-11-14T00:47:33 | 41,017,204 | 0 | 0 | null | 2015-09-14T04:12:21 | 2015-08-19T06:27:35 | R | UTF-8 | R | false | false | 2,882 | r | create_gene_coexpression_categories_for_gowinda.R | # This script aims to make a 'gene category' file for use in Gowinda
# using the modules of co-expressed genes reported in Huang et al. (2015) PNAS
setwd("~/Documents/Drosophila Selection Experiment/gowinda_gene_category_enrichment")
female_modules <- read.csv("Huang_et_al_female_modules.csv", header=TRUE,
sep=",", stringsAsFactors=FALSE)
module_length <- tapply(X=female_modules$FlyBase.ID, INDEX=female_modules$Module,
FUN=length)
# long_modules <- names(which(module_length>=10))
#
# female_long_modules <- subset(female_modules,
# as.character(female_modules$Module)%in%long_modules)
gowinda_format <- data.frame(category_name=character(),
category_name_2=character(),
gene_members=character())
for(i in 1:length(module_length)){
temp_module <- names(module_length)[i]
temp_module_name <- paste("female_module_", temp_module, sep="")
temp_FBgn_names <- female_modules[which(female_modules$Module==as.integer(temp_module)), 1]
temp_FBgn_as_vector <- paste(temp_FBgn_names, collapse=" ")
temp_data_frame <- data.frame(category_name=temp_module_name,
category_name_2=temp_module_name,
gene_members=temp_FBgn_as_vector)
gowinda_format <- rbind(gowinda_format, temp_data_frame)
}
write.table(gowinda_format, file="female_coexpression_modules.txt", col.names=FALSE,
row.names=FALSE, sep="\t", quote=FALSE)
male_modules <- read.csv("Huang_et_al_male_modules.csv", header=TRUE,
sep=",", stringsAsFactors=FALSE)
male_module_length <- tapply(X=male_modules$FlyBase.ID, INDEX=male_modules$Module,
FUN=length)
male_gowinda_format <- data.frame(category_name=character(),
category_name_2=character(),
gene_members=character())
for(i in 1:length(male_module_length)){
temp_module <- names(male_module_length)[i]
temp_module_name <- paste("male_module_", temp_module, sep="")
temp_FBgn_names <- male_modules[which(male_modules$Module==as.integer(temp_module)), 1]
temp_FBgn_as_vector <- paste(temp_FBgn_names, collapse=" ")
temp_data_frame <- data.frame(category_name=temp_module_name,
category_name_2=temp_module_name,
gene_members=temp_FBgn_as_vector)
male_gowinda_format <- rbind(male_gowinda_format, temp_data_frame)
}
both_sexes <- rbind(gowinda_format, male_gowinda_format)
write.table(male_gowinda_format, file="male_coexpression_modules.txt", col.names=FALSE,
row.names=FALSE, sep="\t", quote=FALSE)
write.table(both_sexes, file="both_sexes_coexpression_modules.txt", col.names=FALSE,
row.names=FALSE, sep="\t", quote=FALSE)
|
39f15ab017f4764ebda41b4fb08647da6c932957 | 6dd721bd95b9508b07f764fa1b0b456fde903fed | /Projekt_MS/Zajęcki/MS/acceleraion_jap_vs_eu.R | b138e30e8a63e9c8f7daaa91222a2870b393c9e8 | [] | no_license | MacPiston/MS_Proj_2020 | 7e56ea05826432a8d5f5aec3f23df176b451891d | 19895d457a03d7171d860006e72e526135a74a2b | refs/heads/master | 2022-09-02T04:04:42.187321 | 2020-05-31T20:35:50 | 2020-05-31T20:35:50 | 263,100,065 | 1 | 0 | null | 2020-05-31T16:04:31 | 2020-05-11T16:39:13 | R | UTF-8 | R | false | false | 653 | r | acceleraion_jap_vs_eu.R | #CZY AUTA JAPOŃSKIE (origin == 3) PRZYSPIESZAJĄ LEPIEJ OD EUROPEJSKICH (origin == 2)?
#Przyspieszenia dla obu typów
acceleration.japan <- Autko$acceleration[Autko$origin == 3]
acceleration.european <- Autko$acceleration[Autko$origin == 2]
#Przeprowadzenie testu
acceleration_jap_vs_eu.test <- t.test(acceleration.japan, acceleration.european, paired = F)
acceleration_jap_vs_eu_pvalue <- acceleration_jap_vs_eu.test$p.value
acceleration_jap_vs_eu_przedzial95 <- round(acceleration_jap_vs_eu.test$conf.int, 2)
#WNIOSEK: Nie można jednoznacznie stwierdzić które auta przyspieszają szybciej, ponieważ otrzymany przedział różnic to [-1.46 0.23] |
6d97974d93e261c127240825c748479a062d75f8 | d48e34adc6063a5ca3dbfd772ad186fb93922f50 | /package/clinDataReview/tests/testthat/test_skeleton.R | 84bf6cb9b284d20a201466fbdc1cbca51bc637dd | [] | no_license | Lion666/clinDataReview | 080832a95b74bebb595d59796758b9e8b4cf4e18 | 2876140f36c6bfe94d8626038d32b2f3f9477697 | refs/heads/master | 2023-08-05T18:55:26.658847 | 2021-10-01T16:55:27 | 2021-10-02T10:41:00 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,095 | r | test_skeleton.R | context("Test report skeleton")
library(tools)
test_that("Example xpt files are correctly extracted to the specified folder", {
dirData <- tempfile("data")
clinDataReview:::moveXpt(dirData)
res <- list.files(dirData)
expect_length(res, 8)
expect_setequal(object = file_ext(res), expected = "xpt")
})
test_that("An example metadata file is correctly created", {
dirData <- tempfile("data")
clinDataReview:::createExampleMetadata(dirData)
res <- list.files(dirData)
expect_length(res, 1)
expect_equal(object = basename(res), expected = "metadata.yml")
})
test_that("Report skeleton files are correctly copied to the specified folder", {
dirSkeletonFiles <- tempfile("skeleton")
tmp <- clinDataReview:::moveSkeletonFiles(dirSkeletonFiles)
res <- list.files(dirSkeletonFiles)
expect_setequal(
object = res,
expected = c("config", "figures", "index.Rmd")
)
resConfig <- list.files(file.path(dirSkeletonFiles, "config"))
expect_setequal(object = file_ext(resConfig), expected = "yml")
resFigures <- list.files(file.path(dirSkeletonFiles, "figures"))
expect_setequal(object = file_ext(resFigures), expected = c("svg", "png"))
})
test_that("Example of the main config file is correctly created", {
dirSkeleton <- tempfile("config")
clinDataReview:::createMainConfigSkeleton(
dir = dirSkeleton,
dirData = tempfile("data")
)
res <- list.files(dirSkeleton)
expect_equal(object = res, expected = "config.yml")
})
test_that("A report skeleton, consisting of config files, XPT datasets, figures and index file is correctly created", {
dirSkeleton <- tempfile("skeleton")
expect_message(
createClinDataReviewReportSkeleton(dirSkeleton),
"The skeleton of the report is ready!"
)
res <- list.files(dirSkeleton)
expect_identical(
object = res,
expected = c("config", "data", "figures", "index.Rmd")
)
})
test_that("A warning is generated during the skeleton creation when the specified folder is not empty", {
dirSkeleton <- tempfile("skeleton")
createClinDataReviewReportSkeleton(dirSkeleton)
expect_warning(
createClinDataReviewReportSkeleton(dirSkeleton),
".+ is not empty."
)
})
test_that("A skeleton report is successfully executed", {
skip_on_cran()
dirSkeleton <- tempfile("skeleton")
createClinDataReviewReportSkeleton(dirSkeleton)
# Track warnings during execution of example report:
warn <- NULL
resReport <- withCallingHandlers(
expr = expect_message(
render_clinDataReviewReport(
inputDir = dirSkeleton,
outputDir = file.path(dirSkeleton, "report"),
intermediateDir = file.path(dirSkeleton, "interim"),
quiet = TRUE # suppress printing of pandoc cmd line
)
),
warning = function(w){
warn <<- append(warn, conditionMessage(w))
invokeRestart("muffleWarning")
}
)
expect_true(file.exists(resReport))
# check that import parameters is successful & all chapters are successfully created
expect_false(any(
grepl(
"Extraction of the parameters.*failed|Rendering of the.*report failed",
warn
)
))
}) |
20141df97bc05949a16e63d33f808e2474a9fd6d | 221fd3b98e991eef661fabc72a40c3b64979e2b2 | /shiny.R | 73685fd30d0b59596d3b0f4c70e9155fa6ab2402 | [] | no_license | Kamil32323/Projekt-R | 9fbfb2287a02abd448654513c155d898798c9c4b | 8d2f26b2582f3c1072fc8a3b10f68ea83112eabb | refs/heads/master | 2021-01-13T04:34:34.419061 | 2017-01-20T11:22:17 | 2017-01-20T11:22:17 | 79,553,213 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,426 | r | shiny.R | library(rvest)
library(shinythemes)
library(ggplot2)
library(shiny)
library(leaflet)
library(RColorBrewer)
library(knitr)
library(maps)
library(rvest)
library(stringr)
library(stringi)
library(ggmap)
url1 <- read_html("https://www.olx.pl/praca/informatyka/")
oferty <- url1 %>%
html_nodes('.link strong') %>%
html_text()
foferty <- factor(oferty)
to_remove<-paste(c("FULL TIME","-","(Pentaho)","(Mainframe/Cobol)"), collapse="|")
foferty<-gsub(to_remove,"",foferty)
foferty
url2 <- read_html("https://www.olx.pl/praca/informatyka/")
lokalizacje <- url2 %>%
html_nodes('.list-item__location') %>%
html_text()
flokalizacje <- factor(lokalizacje)
toremove<-paste(c("\n","-","$110,000130,000 per year"), collapse="|")
flokalizacje<-gsub(toremove,"",flokalizacje)
flokalizacje
pensja <- sample(70000:100000, 43, replace=F)
fpensja = factor(pensja)
fpensja
geocodes<-geocode(flokalizacje, output="latlona")
leaflet(geocodes) %>% addTiles() %>%
addMarkers(~lon, ~lat, popup = ~as.character(foferty))
df = data.frame(foferty,fpensja)
names(df) <- c("Stanowisko", "Zarobki")
ui<-fluidPage(title = "Zarobki",theme = shinytheme("cerulean"),
headerPanel("Zarobki specjalisty IT na podstawie ofert pracy z portalu www.olx.pl"),
mainPanel(
tabsetPanel(
tabPanel("Mapa",leafletOutput('mapa',width = 850,height = 500)),
tabPanel("Wykres",plotOutput("wykres",width = 850,height = 500))
)
)
)
server<-function(input,output,session)
{
showModal(modalDialog(
title = "Aplikacja Shiny",
h3("Aplikacja webowa Shiny"),
easyClose = TRUE,footer = modalButton("Close")
))
lef<-leaflet(geocodes) %>% addTiles() %>%
addMarkers(~lon, ~lat, popup = ~as.character(foferty))
output$mapa <-renderLeaflet(lef)
plot<-ggplot(data=df, aes(x=fpensja, y=foferty )) +
geom_bar(colour="grey", stat="identity",
position=position_dodge(),
size=.3) +
xlab("Zarobki") + ylab("Stanowisko")
output$wykres<-renderPlot(plot)
}
shinyApp(ui,server) |
0925a97d1fa292ab03a69a179924423ea994ecee | 23738ffa9afb0151a822adf9f3f1a9664a827790 | /asu/math205/week05/hw/hw05.r | f257a7532d871abf0220b65ecd49ec3d1de51c72 | [] | no_license | etellman/ubb | 83eb84dba9d03d8c22e5a84c2509ce9eaff4e087 | 1320f5597042681dc67f608c468f72aa2b86f417 | refs/heads/master | 2020-03-29T20:10:54.783175 | 2015-11-09T02:05:36 | 2015-11-09T02:05:36 | 10,315,509 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,277 | r | hw05.r | math205.dir <- '~/Documents/U/ubb/asu/math205'
data.dir <- paste(math205.dir, 'data', 'bps', 'PC-Text', 'ch05', sep = '/')
setwd(data.dir)
week.dir <- paste(math205.dir, 'week05', sep = '/')
figure.dir <- paste(week.dir, 'hw', 'figures', sep = '/')
# exercise 27
dd <- function(d) 2.69 + 0.0138 * d
plot <- ggplot(data.frame(x = c(40,300)), aes(x = x)) +
stat_function(fun = dd) +
labs(x = 'Depth (meters)', y = 'Duration (minutes)')
print(plot)
ggsave(paste(figures.dir, "ex27.pdf", sep = "/"))
# exercise 30
ex30 <- read.delim("../ch04/ex04-44.dat", header = TRUE, sep = '\t')
plot <- ggplot(ex30, aes(x = Pairs, y = Pct)) +
geom_point() +
stat_smooth(method = "lm") +
labs(x = "Pairs", y = "Returning %") +
ggtitle("Exercise 30: Birds Returning")
print(plot)
ex30.lm <- lm(Pct ~ Pairs, data = ex30)
summary(ex30.lm)
round(predict(ex30.lm, data.frame(Pairs = c(30))), 2)
summary(lm(Pairs ~ Pct, data = ex30))
# exercise 31
ex31 <- function(x) 35.82 + 0.5185 * x
plot <- ggplot(data.frame(x = c(56, 72)), aes(x = x)) +
stat_function(fun = ex31) +
labs(x = 'Wife', y = 'Husband')
print(plot)
ggsave(paste(figures.dir, "ex31.pdf", sep = "/"))
ex31(67)
# exercise 32
.6 * 75/280
75 - 0.1607 * 280
y <- function(x) 30.04 + 0.1607 * x
y(300)
# exercise 34
ex34 <- read.delim("ex05-34.dat", header = TRUE, sep = '\t')
ex34.lm <- lm(Sister ~ Brother, data = ex34)
ex34.m <- melt(ex34)
ex34.s <- ddply(ex34.m, "variable", summarize,
mean = round(mean(value), 2), sd = round(sd(value), 2))
ex34.s
cor(ex34$Brother, ex34$Sister, method = 'kendall')
cor(ex34$Brother, ex34$Sister, method = 'spearman')
cor(ex34$Brother, ex34$Sister, method = 'pearson')
sink(paste(hw.dir, "r.tex", sep = "/"))
xtable(ex34.s)
sink()
plot <- ggplot(ex34, aes(x = Brother, y = Sister)) +
geom_point() +
stat_smooth(method = "lm") +
labs(x = "Brother", y = "Sister")
print(plot)
ggsave(paste(figures.dir, "ex34.pdf", sep = "/"))
round(predict(ex34.lm, data.frame(Brother = c(70))), 2)
summary(ex34.lm)
# exercise 37
ex37 <- read.delim("../ch04/ex04-29.dat", header = TRUE, sep = '\t')
mean(ex37$Behave)
ex37.m <- melt(ex37)
ex37.s <- ddply(ex37.m, "variable", summarize,
mean = round(mean(value), 2), sd = round(sd(value), 2))
cor(ex37)
sink(paste(hw.dir, "r.tex", sep = "/"))
xtable(ex37.s)
sink()
ex37.lm <- lm(Behave ~ Neural, data = ex37)
summary(ex37.lm)
round(coef(ex37.lm), 4)
round(cor(ex37), 4)
ex37.no.outlier <- subset(ex37, Neural < 150)
ex37.no.outlier.lm <- lm(Behave ~ Neural, data = ex37.no.outlier)
summary(ex37.no.outlier.lm)
round(coef(ex37.no.outlier.lm), 4)
round(cor(ex37.no.outlier), 4)
plot <- ggplot(ex37, aes(x = Neural, y = Behave)) +
geom_point() +
stat_smooth(method = "lm") +
labs(x = "Neural", y = "Behavioral")
print(plot)
ggsave(paste(figures.dir, "ex37.pdf", sep = "/"))
# exercise 38
ex38a <- read.delim("ta05-01a.dat", header = TRUE, sep = '\t')
ex38b <- read.delim("ta05-01b.dat", header = TRUE, sep = '\t')
ex38c <- read.delim("ta05-01c.dat", header = TRUE, sep = '\t')
ex38d <- read.delim("ta05-01d.dat", header = TRUE, sep = '\t')
ex38.m <- melt(ex38a)
ex38.s <- ddply(ex38.m, "variable", summarize,
mean = round(mean(value), 2), sd = round(sd(value), 2))
cor(ex38d)
sink(paste(hw.dir, "r.tex", sep = "/"))
xtable(ex38.s)
sink()
ex38.lm <- lm(y ~ x, data = ex38c)
coef(ex38.lm)
summary(ex38.lm)
round(coef(ex38.lm), 4)
cor(ex38)
plot <- ggplot(ex38d, aes(x = x, y = y)) +
geom_point() +
stat_smooth(method = "lm") +
labs(x = "", y = "")
print(plot)
ggsave(paste(figures.dir, "ex38d.pdf", sep = "/"))
# exercise 47
ex47 <- function(x) 61.93 + 0.18 * x
ex47(c(70, 80))
# exercise 51
ex51 <- read.delim("ex05-51.dat", header = TRUE, sep = '\t')
ex51.m <- melt(ex51)
ex51.s <- ddply(ex51.m, "variable", summarize,
mean = round(mean(value), 2), sd = round(sd(value), 2))
cor(ex51)
sink(paste(hw.dir, "r.tex", sep = "/"))
xtable(ex51.s)
sink()
ex51.lm <- lm(Larvae ~ Stumps, data = ex51)
summary(ex51.lm)
round(coef(ex51.lm), 2)
cor(ex51)
plot <- ggplot(ex51, aes(x = Stumps, y = Larvae)) +
geom_point() +
stat_smooth(method = "lm") +
labs(x = "Stumps", y = "Larvae")
print(plot)
ggsave(paste(figures.dir, "ex51.pdf", sep = "/"))
# exercise 53
ex53 <- read.delim("ex05-53.dat", header = TRUE, sep = '\t')
ex53 <- subset(ex53, Year != 2005)
ex53.m <- melt(ex53)
ex53.s <- ddply(ex53.m, "variable", summarize,
mean = round(mean(value), 2), sd = round(sd(value), 2))
cor(ex53)
sink(paste(hw.dir, "r.tex", sep = "/"))
xtable(ex53.s)
sink()
ex53.lm <- lm(Observed ~ Forecast, data = ex53)
summary(ex53.lm)
round(coef(ex53.lm), 3)
with(ex53, cor(Forecast, Observed))
residuals(ex53.lm)
round(predict(ex53.lm, data.frame(Forecast = c(16))), 2)
str(ex53)
plot <- ggplot(ex53, aes(x = Forecast, y = Observed)) +
geom_point() +
stat_smooth(method = "lm") +
labs(x = "Forecast", y = "Storms")
print(plot)
ggsave(paste(figures.dir, "ex53_forecast.pdf", sep = "/"))
plot <- ggplot(ex53, aes(x = Forecast, y = residuals(ex53.lm))) +
geom_point() +
# stat_smooth(method = "lm") +
labs(x = "Forecast", y = "Residuals")
print(plot)
ggsave(paste(figures.dir, "ex53_residuals.pdf", sep = "/"))
|
8f0d12d4a03c563a8ff4df59b23bfa4779c61380 | a561c75d192118d304a9df7504f9bbcc8847b27b | /R/outlier_filtering.R | 6bb14c9380efad1e2e7d83d7428d55cdc81f17ae | [] | no_license | nkurzaw/TPP2D | abd9815772cd9ef88fd6efef678047030fe9c3af | 007e79f700ad4ae8c7beae04de3c3443e8c3afb0 | refs/heads/master | 2023-05-10T08:17:11.587895 | 2023-04-25T15:10:18 | 2023-04-25T15:10:18 | 162,263,308 | 8 | 1 | null | 2020-01-16T16:15:06 | 2018-12-18T09:27:58 | R | UTF-8 | R | false | false | 3,990 | r | outlier_filtering.R | #' @import dplyr
.removeAmbiguousMeasurements <- function(in_df,
qualColName = "qupm"){
representative <- temperature <- conc <- value <-
raw_value <- NULL
out_df <- in_df %>%
group_by(representative, temperature, conc) %>%
filter_(paste(qualColName, " == max(", qualColName, ")",
collapse = "")) %>%
filter(raw_value == max(raw_value)) %>%
group_by(representative) %>%
filter_(paste("any(", qualColName, " > 1)", collapse = "")) %>%
arrange(temperature, conc) %>%
mutate(temp_id = dense_rank(temperature),
conc_id = dense_rank(conc)) %>%
ungroup
return(out_df)
}
#' @import dplyr
#' @importFrom stats sd
.detectOutliers <- function(in_df){
representative <- temp_id <- conc_id <- NULL
out_df <- bind_rows(lapply(unique(in_df$representative), function(repr){
temp_repr <- filter(in_df, representative == repr)
outlier_score <-
lapply(seq_len(nrow(temp_repr)), function(i){
temp2 <- filter(temp_repr, temp_id %in%
c(temp_repr$temp_id[i] - 1,
temp_repr$temp_id[i],
temp_repr$temp_id[i] + 1),
conc_id %in%
c(temp_repr$conc_id[i] - 1,
temp_repr$conc_id[i],
temp_repr$conc_id[i] + 1),
!(temp_id == temp_repr$temp_id[i] &
conc_id == temp_repr$conc_id[i]))
factor_sd1 <- abs(log2(filter(temp_repr, temp_id == temp_repr$temp_id[i] &
conc_id == temp_repr$conc_id[i])$rel_value) -
mean(log2(temp2$rel_value), na.rm = TRUE))/
sd(log2(temp2$rel_value), na.rm = TRUE)
factor_sd2 <- abs(log2(filter(temp_repr, temp_id == temp_repr$temp_id[i] &
conc_id == temp_repr$conc_id[i])$rel_value) -
mean(log2(temp2$rel_value), na.rm = TRUE))/
sd(log2(temp_repr$rel_value), na.rm = TRUE)
temp3 <- filter(temp_repr, temp_id %in%
c(temp_repr$temp_id[i] - 1,
temp_repr$temp_id[i],
temp_repr$temp_id[i] + 1),
conc_id %in%
c(temp_repr$conc_id[i] - 1,
temp_repr$conc_id[i],
temp_repr$conc_id[i] + 1))
return(list("factor1" = factor_sd1,
"factor2" = factor_sd2,
"shrinked_value" = mean(temp3$rel_value, na.rm = TRUE),
"conc_edge" = (temp_repr$conc_id[i] == max(temp_repr$conc_id))))
})
if(!is.null(outlier_score)){
temp_df <- temp_repr
temp_df$outlier_score_local =
sapply(outlier_score, function(x) x[["factor1"]])
temp_df$outlier_score_global =
sapply(outlier_score, function(x) x[["factor2"]])
temp_df$shrinked_value =
sapply(outlier_score, function(x) x[["shrinked_value"]])
temp_df$conc_edge =
sapply(outlier_score, function(x) x[["conc_edge"]])
}else{
temp_df <- temp_repr %>%
mutate(outlier_score_local = NA,
outlier_score_global = NA,
shrinked_value = NA,
conc_edge = NA)
}
return(temp_df)
}))
return(out_df)
} |
ad452201ed4aa02d5a823c4617bc97a40e18cba8 | e0fb4c6dc56cd73d1b4346ca6f0a973f2c63e8dd | /02_heatwave.R | 2fcc3030431fc5bc516ef313dbb3d4fd79634f82 | [] | no_license | Sugirlstar/CDHEs-in-China | ee8e169d395573336d052d85ecd321f05a3ebcfd | c59e96fd60bffcc00c62354d2b5341eff3b23dee | refs/heads/main | 2023-04-14T14:16:28.292602 | 2022-05-10T14:04:13 | 2022-05-10T14:04:13 | 457,317,774 | 4 | 0 | null | null | null | null | UTF-8 | R | false | false | 12,574 | r | 02_heatwave.R | # Note:
# 1. This script was written in Rstudio (Version 1.4.1717),
# but was recommended to be ran in the original R for saving memory space.
#
# 2. We use foldable code sections in Rstudio
# to break the large source file into a set of discrete regions
# for easy navigation between them.
# Each section begins with a group of pound signs (#)
# following the title and several trailing dashes (-),
# and ends with pound signs (be like: ##End##)
#
# 3. Comments explaining what the code is doing are above the lines or instantly follow the code.
#
# 4. To reproduce, just change the "pathname" (line 23) to where you store the file we offered.
#
# 5. Step05 and Step06 are not the main code,
# but just for preview the result.
# The formal figure generating code please refer to the folder "EF_figure_generating"
# 00 Background setting -------------
pathname = "D:/CDHEs_coding"
library(tcltk)
library(raster)
library(sp)
library(maptools)
library(rgdal)
library(RColorBrewer)
library(trend)
library(lattice)
# load necessary workspace and set file location
load(paste0(pathname,"/Tx_China_1dy_0.5deg_1961_2020.RData")) # temperature
load(paste0(pathname,"/supplement_materials/crs.RData")) # the coordinate reference system
setwd(pathname)
# read shp files
Chinasp = readShapePoly(paste0(pathname,"shpfile/china/China")) # not given in the repository
provincesp = readShapePoly(paste0(pathname,"shpfile/china/PROVINCE_region")) # not given in the repository
# fundamental parameters
{
xx = 72
yy = 128
days = length(TX[1,1,])
st = 2 # the start day of daily STI (same as scale)
da = 5 # the minimum duration
da2 = 3 # the maximum interval
thr1 = -1 # first threshold
thr2 = 0 # second threshold for merging
s=1961
e=2020
LL=e-s+1
# coordinates showing on figures
xL = seq(72, 136, 5)
xlabel = paste(xL, "°", sep = "") #设置坐标轴
yL = seq(18, 54, 5)
ylabel = paste(yL, "°", sep = "")
# name with the parameters
# format: extreme_index_thr1_da_thr2_da2
ctype = "heatwave--1-5days-0-3days"
}
##00End##
# 01 Functions praparing -------------
# 01-02 Counting the length of consecutive 1
sumfun = function(v,i) #给参:v为某格点逐日spi,起算位置i
{
a=0
while(v[i]==1)
{
if( i == length(v) ) #放在i=i+1前面
{a=a+1; break}
a=a+1
i=i+1
}
return(a)
}
# 01-03 Counting the length of consecutive 0
sumfun2 = function(v,i) #给参:v为某格点逐日spi,起算位置i
{
a=0
while(v[i]==0)
{
if( i == length(v) )
{a=a+1; break}
a=a+1
i=i+1
}
return(a)
}
# 01-04 Linear regression
tendency = function(v) #默认:由1961开始
{
long=length(v)
fit=lm(v~c(1961:(1961+long-1)))
ten=fit$coefficients[2]
intercept=fit$coefficients[1]
pvalue=summary(fit)$coefficients[,4][2]
tp=list(ten,pvalue,intercept)
return(tp)
}
# 01-05 FLMIP function (only one breakpoint)
bp1 = function(s,e,STGh)
{
# check whether adjacent slopes have opposite symbols
muti=function(v)
{
x=length(v)-1
z=0
for( i in 1:x )
{ p=v[i]*v[i+1]
if(p>0)
break
z=z+1
}
return(x==z)
}
m=e-s+1
Y=matrix(STGh,m,1)
T=s:e
SSR=10000000
RST=0
bp=(s+9):(e-9)
b=bp-s+1
for( j in 1:length(bp) )
{
A0=matrix(0,m,2)
A0[1: b[j],1]=s:bp[j]
A0[ (b[j]+1):m ,1 ]=bp[j]
A0[ (b[j]+1):m, 2 ]=1:(e-bp[j])
c=matrix(rep(1,m),m,1)
A=cbind(A0,c)
S=solve(t(A)%*%A)%*%t(A)%*%Y
if(muti(S[1:2]))
{
rst=matrix(0,4,2)
rownames(rst)=c("year","a","c","p of a") # y=a*x+c
rst[1,1]=bp[j]
rst[2,]=S[1: 2]
rst[3,1]=S[3]
rst[3,2]=rst[3,1]+(S[1]-S[2])*bp[j]
# Significance evaluate
rdf1=rst[1,1]-s+1-2
rdf2=e-rst[1,1]+1-2
ssr1=0
for(u in 1:b[j])
ssr1=ssr1+(Y[u]-(rst[2,1]*T[u]+rst[3,1]))^2
ssr2=0
for( u in (b[j]+1):m )
ssr2=ssr2+(Y[u]-(rst[2,2]*T[u]+rst[3,2]))^2
vv1=sqrt(ssr1/rdf1) #Residual standard error
vv2=sqrt(ssr2/rdf2) #Residual standard error
stderr1=vv1/sqrt(sum( (c(s:rst[1,1])-mean(c(s:rst[1,1])))^2))
stderr2=vv2/sqrt(sum( ( c((rst[1,1]+1):e)-mean(c((rst[1,1]+1):e)) )^2))
tval1=rst[2,1]/stderr1
tval2=rst[2,2]/stderr2
pr1=2*pt(abs(tval1),rdf1,lower.tail=FALSE)
pr2=2*pt(abs(tval2),rdf2,lower.tail=FALSE)
rst[4,1]=pr1
rst[4,2]=pr2
ssr=ssr1+ssr2
if( pr1>0.05 & pr2>0.05 )
ssr=1000000000000
if(ssr<SSR)
{ SSR=ssr
RST=rst }
}
}
return(RST)
}
# 01-06 M-K test in each grid
MK.raster = function(xraster, type="year", month0=1)
{
library(Kendall)
library(raster)
x = as.array(xraster)
year0=1961
D = dim(x)
MK.xraster = array(data=NA,dim=c(D[1],D[2],3))
if (type == "year"){
for (i in 1:D[1])
for (j in 1:D[2])
if (TRUE %in% (x[i, j, ] >= -9999))
{
if( length(which(x[i,j,]>-9999))>2 ) # require at least 3 values
{
xts = ts(x[i,j,],start=year0,frequency=1)
z = MannKendall(xts)
MK.xraster[i,j,1:2] = c(z$tau,z$sl)
}else
MK.xraster[i,j,1:2]=NA
}
}
return(MK.xraster)
}
# 01-07 Significance identifier classification (for figure)
classtype = function(v)
{
if(is.na(v)==TRUE)
ct=NA else
if(v<=0.05)
ct=4 else
ct=NA
return(ct)
}
# 01-08 Relocating the color bar
cf = function(colr,whitesite="white",z,z0=0)
{
# colr: the values of color bar
# whitesite: the color number (or name) you wish to settle,
# must contain in the "colr"
# z: the raster or array you wish to plot
# z0: the value you wish to aligned with "whitesite"
# return to the color bar where "whitesite" is aligned with z0
zz=as.matrix(z)
z1=min(zz,na.rm=TRUE)
z2=max(zz,na.rm=TRUE)
zL1=(z0-z1)/(z2-z1)
zL2=(z2-z0)/(z2-z1)
cL=length(colr)
if(whitesite=="mid")
c0=round(cL/2) else
c0=which(colr==whitesite)
cL1=(c0-1)/cL
cL2=(cL-c0+1)/cL
if(z0<z1)
{
x=round((z1-z0)/(z2-z0)*(cL-c0)+c0)
colr_result=colr[x:cL]
}else
if(z0>z2)
{
x=round((z2-z1)/(z0-z1)*c0)
colr_result=colr[1:x]
} else
if(zL1==0)
colr_result=colr[c0:cL] else
if(zL2==0)
colr_result=colr[c0:cL]=colr[1:c0] else
if(zL1>cL1)
{
x=round(c0/zL1)
colr_result=colr[1:x]
}else
if(zL1<cL1)
{
x=round((zL2*cL+c0-cL)/zL2)
colr_result=colr[x:cL]
}else
colr_result=colr
return(colr_result)
}
##01End##
# 02 STI calculating -------------
TSI=array(dim=c(dim(TX)))
dates=substring(dimnames(TX)[[3]],5,8)
fdates=levels(factor(dates))
nday = 7 # a 15-days window
pb = tkProgressBar(title="Progress",label="Completed %",
min=0, max=100, initial = 0, width = 300)
for(x in 1:xx)
for(y in 1:yy)
if (TRUE %in% (TX[x, y, ] >= -999))
for(i in 1:366)
{
today = fdates[i]
f <- which(dates == today)
ys=TX[x,y,f]
tdays = f + rep(-nday:nday,each=LL)
ff=subset(tdays,tdays<=days & tdays>0)
ymean=mean(TX[x,y,ff],na.rm=TRUE)
ysd=sd(TX[x,y,ff],na.rm=TRUE)
TSI[x,y,f]=(ys-ymean)/ysd
info = sprintf("Completed %d%%", round(x*100/xx))
setTkProgressBar(pb, value = x*100/xx,
title = sprintf("Progress (%s)",info),label = info)
}
close(pb)
save(TSI, file = "STI.RData")
##02End##
# 03 Heatwave identify (daily) -------------
{
## 03-01 judge using thr1 ----
Flag <- array(NA,dim = c(xx,yy,days))
for(i in st:days)
Flag[,,i] = ( -TSI[,,i] <= thr1 )
##03-01End##
## 03-02 identify consecutive 1 for at least da days ----
Flag[which(is.na(Flag)==TRUE)] = -9999 #空值置为-9999,最后调回空值
pb = tkProgressBar(title="Progress",label="Completed %",
min=0, max=100, initial = 0, width = 300)
for(x in 1:xx)
{
for(y in 1:yy)
if (TRUE %in% (Flag[x,y,] >= -999))
{
for( i in st:days ) #st=2
if(Flag[x,y,i] == 1 & Flag[x,y,i-1] != 1)
{
duration=sumfun(Flag[x,y,],i)
if(duration < da)
Flag[x,y,i:(i+duration-1)] <- rep(0,duration) #注意(i+duration-1)括号别漏
}
}
info = sprintf("Completed %d%%", round(x*100/xx))
setTkProgressBar(pb, value = x*100/xx,
title = sprintf("Progress (%s)",info),label = info)
}
close(pb)
##03-02End##
## 03-03 Merging ----
Flag2=Flag
pb = tkProgressBar(title="Progress",label="Completed %",
min=0, max=100, initial = 0, width = 300)
for(x in 1:xx)
{
for(y in 1:yy)
if(TRUE %in% (Flag2[x,y,] >= -999))
{
Flag2[x,y,st-1]=0
for( i in st:length(Flag2[x,y,]) )
if( Flag2[x,y,i] != 1 & Flag2[x,y,i-1] == 1)
{
duration=sumfun2(Flag2[x,y,],i)
if( duration <= da2 & all( -TSI[x,y,i:(i+duration-1)] <= thr2) )
Flag2[x,y,i:(i+duration-1)] = rep(1,duration)
}
}
info = sprintf("Completed %d%%", round(x*100/xx))
setTkProgressBar(pb, value = x*100/xx,
title = sprintf("Progress (%s)",info),label = info)
}
close(pb)
##03-03End##
## 03-04 Counting----
# set Flag3 as the times, i.e., each heatwave event only the first day is 1, others 0
Flag3= array(dim = c(xx, yy, dim(Flag2)[3]))
for (x in 1:xx)
for (y in 1:yy)
if (TRUE %in% (Flag2[x, y,] >= -999))
for(i in days:st)
if( Flag2[x,y,i] == 1 & Flag2[x,y,i-1] != 1 )
Flag3[x,y,i] = 1
Flag2[which(Flag2 == -9999)] = NA
Flag[which(Flag == -9999)] = NA
##03-04End##
}
##03End##
# 04 Metrics calculating -------------
{
## 04-01 initialization ----
SPId=TSI*Flag2
SPId[which(SPId==0)]=NA
Hy = substring(dimnames(TX)[[3]], 1, 4)
fhy = factor(Hy)
DRfre = array(dim = c(xx, yy, length(levels(fhy))))
DRdur = array(dim = c(xx, yy, length(levels(fhy))))
DRstg = array(dim = c(xx, yy, length(levels(fhy))))
dimnames(DRfre)[[3]] = levels(fhy)
dimnames(DRdur)[[3]] = levels(fhy)
dimnames(DRstg)[[3]] = levels(fhy)
##04-01End##
## 04-02 annual value at each grid calculating ----
for (x in 1:xx)
for (y in 1:yy)
if (TRUE %in% (Flag[x, y,] >= -999))
{
DRfre[x, y,] = tapply(Flag3[x, y,], fhy, sum, na.rm = TRUE)
DRdur[x, y,] = tapply(Flag2[x, y,], fhy, sum, na.rm = TRUE)
DRstg[x, y,] = tapply(SPId[x, y,], fhy, mean, na.rm = TRUE)
}
##04-02End##
## 04-03 multi-year mean value at each grid calculating ----
DRFRE = array(dim = c(xx, yy))
DRDUR = array(dim = c(xx, yy))
DRSTG = array(dim = c(xx, yy))
for (x in 1:xx)
for (y in 1:yy)
{
DRFRE[x, y] = mean(DRfre[x, y,], na.rm = TRUE)
DRDUR[x, y] = mean(DRdur[x, y,], na.rm = TRUE)
DRSTG[x, y] = mean(DRstg[x, y,], na.rm = TRUE)
}
# convert to raster to calculate area weight
DRFREr = raster(DRFRE)
DRDURr = raster(DRDUR)
DRSTGr = raster(DRSTG)
extent(DRFREr) = c(72, 136, 18, 54)
extent(DRDURr) = c(72, 136, 18, 54)
extent(DRSTGr) = c(72, 136, 18, 54)
crs(DRFREr) = crs(r)
crs(DRDURr) = crs(r)
crs(DRSTGr) = crs(r)
##04-03End##
## 04-04 calculate national multi year average (consider area weight)----
r1 = raster(DRfre[, , 1])
extent(r1) = c(72, 136, 18, 54)
crs(r1) = crs(r)
w = area(r1, weights = TRUE, na.rm = TRUE)
w = as.matrix(w)
DRFREh = array(dim = c(LL))
DRDURh = array(dim = c(LL))
DRSTGh = array(dim = c(LL))
for (i in 1:LL)
{
y1 = DRfre[, , i] * w
DRFREh[i] = sum(y1, na.rm = T)
y2 = DRdur[,,i] * w
DRDURh[i] = sum(y2, na.rm = T)
y3 = DRstg[,,i] * w
DRSTGh[i] = sum(y3, na.rm = T)
}
# calculate the coverage
DR1=DRfre[,,1]*0+1 #每个格点定为1
DRper=NULL
for( i in 1:length(DRfre[1,1,]) )
{
pt=which(DRfre[,,i]>0) #有事件发生的点位置
DRper=c(DRper,sum(DR1[pt]*w[pt],na.rm=TRUE)) #发生点位*面积权重
}
names(DRper)=levels(fhy)
##04-04End##
}
save.image(paste0(pathname,"/Metrics_heatwave.RData"))
##04End##
# the rest... ----------------
# the rest is the same as in 01_drought
|
599398074823249d5f9bf5e813a3cd25157336b5 | 032e5462c5aaed41b7c12f12abdf24a60c6da416 | /TP06_2/MLperceptronTP06_3.R | b8afb9a75467992737ec91a1d15a7219b75c4a8f | [] | no_license | antonioanunciacao/Redes-Neurais-Artificiais | f8a98c963acb52688502d1e5d719769211629f88 | 0e87a60bc81c9ab005e975eb583bd1cf5bb9ab2d | refs/heads/main | 2023-07-13T23:28:09.880983 | 2021-08-14T05:44:51 | 2021-08-14T05:44:51 | 341,011,263 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,924 | r | MLperceptronTP06_3.R | rm(list=ls())
dev.off()
library('plot3D')
library('roccv')
library('mlbench')
# Multi Layer Perceptron:
MLPerceptron <- function(xin, yd, eta, tol, maxepocas, neuronios, xtest, ytest, fold) {
dimxin <- dim(xin)
N <- dimxin[1]
n <- dimxin[2]
wo <- matrix( runif( (n+1)*neuronios, -0.5, 0.5), nrow =neuronios, ncol=n+1 )
wt <- matrix(runif(neuronios+1)-0.5, nrow = 1)
xin <- cbind(1, xin)
xtest <- cbind(1, xtest)
nepocas <- 0
eepoca <- tol + 1
evec <- matrix(0, nrow = 1, ncol = maxepocas)
eTestvec <- matrix(0, nrow = 1, ncol = maxepocas)
while((nepocas < maxepocas) && (eepoca > tol)) {
erro <- errotest <- 0
xseq <- sample(N)
for(i in 1:N) {
irand <- xseq[i]
z1 <- wo %*% xin[irand, ]
a1 <- rbind(1, tanh(z1))
z2 <- wt %*% a1
#yhati <- tanh(z2)
yhati <- z2
e <- yd[irand]-yhati
deltaE2 <- -1*e
dwt <- eta*deltaE2 %*% t(a1)
dwo <- matrix(0,dim(wo)[1], dim(wo)[2])
for(i in 1:dim(wo)[1]) {
dwo[i,] <- ( eta*deltaE2*wt[,i+1]*( 1/cosh(z1[i,])^2 ) ) %*% t(xin[irand, ])
}
wt <- wt - dwt
wo <- wo - dwo
erro <- erro + e*e
}
xtestseq <- sample(dim(xtest)[1])
for(i in 1:dim(xtest)[1]) {
irandtest <- xtestseq[i]
Z1test <- wo %*% xtest[irandtest, ]
A1test <- tanh(Z1test)
Yhattest <- wt %*% rbind(1,A1test)
Predict <- Yhattest
etest <- ytest[irandtest] - Predict
errotest <- errotest + etest*etest
}
nepocas <- nepocas + 1
evec[nepocas] <- erro/N
eTestvec[nepocas] <- errotest/dim(xtest)[1]
eepoca <- evec[nepocas]
if(nepocas %% 100 == 0) cat("Erro[", fold, ",", nepocas,"]:", evec[nepocas], "\n")
}
retlist <- list(wo, wt, evec[1:nepocas], eTestvec[1:nepocas])
return(retlist)
}
MLPredict <- function(xin, model) {
W1 <- model[[1]]
W2 <- model[[2]]
X <- cbind(1,xin)
Predict <- matrix(0, dim(xin)[1])
for(i in 1:dim(X)[1]) {
Z <- W1 %*% X[i,]
A <- tanh(Z)
Yhat <- tanh(W2 %*% rbind(1,A))
Predict[i] <- Yhat
}
return(Predict)
}
##################################################################
# Load Dataset
data(BreastCancer)
data <- data.matrix(BreastCancer)
data[is.na(data)] <- 0
dataV <- data[,2:10]
for(i in 1:dim(data)[1]){
if(data[i,11] == 1) data[i,11] = 0
else data[i,11] = 1
}
XBenign <- data[which( data[,11] == 0 ),]
XMalignant <- data[which( data[,11] == 1 ),]
kfolds <- 10
model_kfolds <- list()
resultTrain <- matrix(0, kfolds, 2)
resultTest <- matrix(0, kfolds, 2)
for(i in 1:kfolds){
bach_size <- round(0.7*min(dim(XBenign)[1], dim(XMalignant)[1]))
XBenign_index <- sample (c(1:dim(XBenign)[1]), size=bach_size, replace =F)
XMalignant_index <- sample (c(1:dim(XMalignant)[1]), size=bach_size, replace =F)
XBenignTest <- XBenign[-XBenign_index,]
XBenign_index_test <- sample (c(1:dim(XBenignTest)[1]), size=dim(XMalignant[-XMalignant_index,]), replace =F)
XTrain <- rbind(XBenign[XBenign_index,],XMalignant[XMalignant_index,])
XTest <- rbind(XBenign[XBenign_index_test,],XMalignant[-XMalignant_index,])
YTrain <- as.matrix(XTrain[,11])
XTrain <- as.matrix(XTrain[,2:10])
YTest <- XTest[,11]
XTest <- XTest[,2:10]
# Hiperparametros
eta <- 0.01
tol <- 10^-3
maxepocas <- 10^3
neuronios <- 3
# Entradas da Rede
model <- MLPerceptron(XTrain, YTrain, eta, tol, maxepocas, neuronios, XTest, YTest, i)
YTrainpredict <- MLPredict(XTrain, model)
YTestpredict <- MLPredict(XTest, model)
resultTrain[i,1] <- mean(YTrain-YTrainpredict)
resultTrain[i,2] <- sd(YTrain-YTrainpredict)
resultTest[i,1] <- mean(YTest-YTestpredict)
resultTest[i,2] <- sd(YTest-YTestpredict)
model_kfolds[i] <- list(model) # [Wo, W1], [erroTreino], [erroTeste]
}
acuraciaTest <- t(as.matrix(colMeans(resultTest)))
acuraciaTest[1,1] <- 1-acuraciaTest[1,1]
acuraciaTrain <- t(as.matrix(colMeans(resultTrain)))
acuraciaTrain[1,1] <- 1-acuraciaTrain[1,1]
ResultadoFinal <- as.matrix(rbind(acuraciaTrain, acuraciaTest))
limit1 <- 0.6
png("erroBreastCEq.png", 550, 380)
plot(seq(1:kfolds), resultTrain[,1], type='l', col = 'green', xlim = c(1,kfolds), ylim = c(0,limit1), xlab = 'fold', ylab = 'Erro')
par(new=T)
plot(seq(1:kfolds), resultTrain[,2], type='b', col = 'green', xlim = c(1,kfolds), ylim = c(0,limit1), xlab = '', ylab = '')
par(new=T)
plot(seq(1:kfolds), resultTest[,1], type='l', col = 'blue', xlim = c(1,kfolds), ylim = c(0,limit1), xlab = '', ylab = '')
par(new=T)
plot(seq(1:kfolds), resultTest[,2], type='b', col = 'blue', xlim = c(1,kfolds), ylim = c(0,limit1), xlab = '', ylab = '')
legend(x=6.5, y=limit1, legend = c('Erro Treino','Desvio Padrao','Erro Teste', 'Desvio Padrao'), col = c('green', 'green','blue', 'blue'), pch=c('-','-.', '-', '-.'))
dev.off()
png("CurvaAprendizadoBreastCEq.png", 550, 380)
plot(seq(1:maxepocas), model[[3]], type='l', col = 'green', xlim = c(0,maxepocas), ylim = c(0,0.1), xlab = 'Epoca', ylab = 'Erro')
par(new=T)
plot(seq(1:maxepocas), model[[4]], type='l', col = 'blue', xlim = c(0,maxepocas), ylim = c(0,0.1), xlab = '', ylab = '')
legend(x=700, y=0.1, legend = c('Erro Treino','Erro Teste'), col = c('green','blue'), pch=c('-','-'))
dev.off()
png("CurvaAcuraciaBreastCEq.png", 550, 380)
plot(seq(1:maxepocas), 1-model[[3]], type='l', col = 'green', xlim = c(0,maxepocas), ylim = c(0.90,1), xlab = 'Epoca', ylab = 'Acuracia')
par(new=T)
plot(seq(1:maxepocas), 1-model[[4]], type='l', col = 'blue', xlim = c(0,maxepocas), ylim = c(0.90,1), xlab = '', ylab = '')
legend(x=600, y=0.94, legend = c('Acuracia Treino','Acuracia Teste'), col = c('green','blue'), pch=c('-','-'))
dev.off() |
0ef652e6de5db3645ee86e8e137273da8ef45c10 | 17fa6070d713cc2e4a0ce6d60c255703c4c2b6f2 | /data mining/lab/party.r | 62377275fb6e4f9f40d9643ff2a09a819245224d | [] | no_license | IT-H1/6Sem | 464cd38958c6b951e80c00fc35c1594e9531d52d | 768cb217af42c394f0fd98b2f2b354651a347bc0 | refs/heads/master | 2022-03-24T04:22:51.026831 | 2019-11-07T04:57:48 | 2019-11-07T04:57:48 | 117,665,087 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 992 | r | party.r | library(readr)
library(dplyr)
library(party)
library(rpart)
library(rpart.plot)
library(ROCR)
set.seed(1200)
titanic3="https://goo.gl/At238b"%>%
read.csv%>%#read in the data
select(survived,embarked,sex,sibsp,parch,fare)%>%
mutate(embarked=factor(embarked),
sex=factor(sex))
summary(titanic3$fare)
summary(titanic3$sibsp)
summary(titanic3$survived)
.data <- c("training", "test") %>%
sample(nrow(titanic3), replace = T) %>%
split(titanic3,.)
rtree_fit <- rpart(survived ~ .,
.data$training)
rpart.plot(rtree_fit)
print(rtree_fit)
ctree_fit <- ctree(survived ~ .,
data = .data$training)
ctree_roc <- ctree_fit %>%
predict(newdata = .data$test) %>%
prediction(.data$test$survived) %>%
performance("tpr", "fpr")
ROCR::plot(ctree_roc)
rtree_roc <- rtree_fit %>%
predict(newdata = .data$test) %>%
prediction(.data$test$survived) %>%
performance("tpr", "fpr")
ROCR::plot(rtree_roc)
ROCR::plot(ctree_roc,col=70,add=TRUE)
|
ee1cf54fe811ca334708d6dbec36809e752a4fd7 | 0284cda1023b82fcb23f46373fe8d406273494dd | /man/simmLW.Rd | 3b04405d995692f7ef615de91d88cc097240eeaa | [] | no_license | flyingxiang/CCRWvsLW | b206569dd094a795a391c1bda8f8e821bf221f5b | fdccfa229695f7d92ffd6a709cb524a43d25e013 | refs/heads/master | 2021-01-22T17:39:56.325678 | 2016-06-10T20:36:30 | 2016-06-10T20:36:30 | 65,390,605 | 1 | 0 | null | 2016-08-10T14:45:43 | 2016-08-10T14:45:42 | null | UTF-8 | R | false | false | 1,054 | rd | simmLW.Rd | \name{simmLW}
\alias{simmLW}
\title{Simulate a Levy Walk (LW)}
\description{Simulate a Levy Walk (LW). This model represent the movement of the Levy search strategy.}
\usage{
simmLW(n, mu, a)
}
\arguments{
\item{n}{one integer value for the sample size. Note that this sample size represent the number of final step lengths and turning angles wanted (when TAc = 0). The ltraj object returned will be longer because a minimum of 3 locations are required to calculate a relative turning angle}
\item{mu}{one numeric (1<mu<=3) representing the mu value for the Pareto distribution of the step lengths}
\item{a}{one numeric and positive value representing the minimum step length value}
}
\details{Simulates a LW and return a ltraj object}
\references{
Please refer to Auger-Methe, M., A.E. Derocher, M.J. Plank, E.A. Codling, M.A. Lewis (2015-In Press) Differentiating the Levy walk from a composite correlated random walk. Methods in Ecology and Evolution. Preprint available at \url{http://arxiv.org/abs/1406.4355}
}
\seealso{\code{\link{simmTLW}}}
|
075206047c5b5d4bfd882370a41116ba8f6f4e1b | 8c9e734b89de7c252d968631ba2cc57b43403497 | /man/detectMultipleExcursions.Rd | c43a2b99adfd7ed5fd0c18f60858ed90bf57c8fb | [
"MIT"
] | permissive | LinkedEarth/actR | 2e5e630fb24d0ed4b66f2a8e798edbdf5a2a3082 | 08bf459531002ab8520e59afc83457c922427e76 | refs/heads/main | 2023-06-24T02:42:36.839094 | 2023-06-16T23:37:07 | 2023-06-16T23:37:07 | 359,257,314 | 5 | 0 | NOASSERTION | 2023-03-30T22:58:10 | 2021-04-18T21:34:02 | HTML | UTF-8 | R | false | true | 3,295 | rd | detectMultipleExcursions.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/excursion.R
\name{detectMultipleExcursions}
\alias{detectMultipleExcursions}
\title{Detect an excursion in many timeseries}
\usage{
detectMultipleExcursions(
ltt = NA,
n.ens = 100,
surrogate.method = "isospectral",
null.hypothesis.n = 100,
event.yr,
event.window,
ref.window,
sig.num = 2,
n.consecutive = 2,
exc.type = "either",
min.vals = 8,
na.rm = TRUE,
simulate.time.uncertainty = FALSE,
simulate.paleo.uncertainty = FALSE,
seed = as.integer(Sys.time())
)
}
\arguments{
\item{ltt}{A LiPD-timeseries-tibble, a tibble or data.frame that has the variable(s) of interest, a time variable (age, year or time) along with their metadata, aranged in rows. If ltt = NA, then one in is created from other inputs}
\item{n.ens}{How many ensembles to use for error propagation? (default = 100)}
\item{surrogate.method}{What method to use to generage surrogate data for hypothesis testing? Options include: \itemize{
\item 'isospectral': (Default) Following Ebisuzaki (1997), generate surrogates by scrambling the phases of the data while preserving their power spectrum. This uses the To generate these “isospectral” surrogates. Uses the rEDM::make_surrogate_data() function
\item 'isopersistent': Generates surrogates by simulating from an autoregressive process of order 1 (AR(1)), which has been fit to the data. Uses the geoChronR::createSyntheticTimeseries() function
\item 'shuffle': Randomly shuffles the data to create surrogates. Uses the rEDM::make_surrogate_data() function
}}
\item{null.hypothesis.n}{How many simulations to run for null hypothesis testing (default = 100)}
\item{event.yr}{time at the center of the excursion window}
\item{event.window}{width (in time units) of the excursion window}
\item{ref.window}{width (in time units) of the reference windows}
\item{sig.num}{how many standard deviations required outside the reference windows must be exceeded for this to be considered an excursion? (default = 2)}
\item{n.consecutive}{how many consecutive points are required for this to be considered an excursion? (default = 2)}
\item{exc.type}{Type of excursion to look for. "positive", "negative", "either" or "both" (default = "either")}
\item{min.vals}{Minimum effective sample size (adjusted by autocorrelation) required in reference and event windows (default = 4)}
\item{na.rm}{Remove NAs? (default = TRUE)}
\item{simulate.time.uncertainty}{TRUE or FALSE. If an ensemble is not included, do you want to simulate time ensembles (default = TRUE)}
\item{simulate.paleo.uncertainty}{TRUE or FALSE. If an ensemble is not included, do you want to simulate paleo ensembles (default = TRUE)}
\item{seed}{Set a seed for reproducibility. By default it will use current time meaning it will not be reproducible.}
}
\value{
a tibble that describes the positive and negative excursion results
}
\description{
Determines whether an excursion event has occurred within the specified event window for a lipd-ts-tibble of timeseries. Excursion events are defined as n.consecutive values within the event window that are more extreme than the avg +/- sig.num standard deviations of the reference windows.
}
\references{
Morrill
}
\author{
Hannah Kolus
Nick McKay
}
|
066d20d7dc9686df125fd7efca296842303c3b4c | 0217dd9843c9462108ebf43d5b5f6feff1bbb65c | /man/add_privilege_to_cohort.Rd | 9b3af058162d262ddbbd8962176d8f85e477f39f | [] | no_license | aneuraz/DWHtools2 | 44f833f07dc15735d4134e57ce119a90949ec38c | 4c3eb585c331c680b4212352215708a763a54c71 | refs/heads/master | 2021-03-30T17:20:43.931314 | 2019-09-06T09:55:33 | 2019-09-06T09:55:33 | 90,238,636 | 0 | 1 | null | 2019-09-06T09:54:38 | 2017-05-04T08:18:04 | R | UTF-8 | R | false | true | 523 | rd | add_privilege_to_cohort.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/queries.R
\name{add_privilege_to_cohort}
\alias{add_privilege_to_cohort}
\title{add_privilege_to_cohort
For Dr. Warehouse}
\usage{
add_privilege_to_cohort(num_cohorte, username, privilege, config)
}
\arguments{
\item{num_cohorte}{cohort number}
\item{username}{username}
\item{privilege}{privilege}
\item{config}{a config environment created by the function getConfig.}
}
\value{
0
}
\description{
add_privilege_to_cohort
For Dr. Warehouse
}
|
c7eeb96c41c4d0407e32d8b30094743f1b1c5bbc | 5e6caa777731aca4d6bbc88fa92348401e33b0a6 | /man/data_ratings.Rd | ce7c464d208a01d68fcd07fbd22b2466ebbc5025 | [
"MIT"
] | permissive | metamelb-repliCATS/aggreCAT | e5c57d3645cb15d2bd6d2995992ad62a8878f7fb | 773617e4543d7287b0fca4a507ba4c94ee8f5e60 | refs/heads/master | 2023-05-22T19:51:20.949630 | 2023-03-31T05:21:39 | 2023-03-31T05:21:39 | 531,484,296 | 6 | 1 | NOASSERTION | 2023-03-31T05:03:05 | 2022-09-01T11:15:36 | R | UTF-8 | R | false | true | 1,144 | rd | data_ratings.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{data_ratings}
\alias{data_ratings}
\title{P1_ratings}
\format{
A table with 6880 rows and 7 columns:
\describe{
\item{round}{character string identifying whether the round was 1 (pre-discussion) or 2 (post-discussion)}
\item{paper_id}{character string of the claim ids (25 unique claims total)}
\item{user_name}{character string of anonymized IDs for each participant (25 participants included in this dataset)}
\item{question}{character string for the question type, with four options: direct_replication, involved_binary, belief_binary, or comprehension}
\item{element}{character string for the type of response coded in the row, with five options: three_point_lower, three_point_best, three_point_upper, binary_question, or likert_binary}
\item{value}{numeric value for the participant's response}
\item{group}{character string of group IDs that contained the participants}
}
}
\usage{
data_ratings
}
\description{
Anonymized expert judgements of known-outcome
claims, assessed at the 2019 SIPS repliCATS workshop
}
\keyword{datasets}
|
da2ff07192b4d045d4e57c36d84d55499755b9c4 | ff06aa938c67437e4c65bb65d23851254e287cd3 | /man/scoreMDscan.Rd | 11265e16944f88febf71c60f55920e7e734ee401 | [] | no_license | mikepipo/MEET | dc9acad5dad5d89b2c55295f60e5f699b5ade613 | c5c94fddbbe4188dd5234bd3229d94e1810a548d | refs/heads/master | 2021-01-19T21:32:21.066281 | 2012-12-12T00:00:00 | 2012-12-12T00:00:00 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 607 | rd | scoreMDscan.Rd | \name{scoreMDscan}
\alias{scoreMDscan}
\title{
Output MDscan method
}
\description{
This function writes the output MDscan method. The output contains the next fields:sequence, direction, score MDscan.}
\usage{
scoreMDscan(input, k, matriu, direction)
}
\arguments{
\item{input}{Score MDscan}
\item{k}{Length DNA sequence}
\item{matriu}{A set of aligned nucleotide sequence}
\item{direction}{Direction of DNA sequence}
}
\details{
Output's run.read.MDscan is input's scoreMDscan}
\author{
Erola Pairo <epeiroatibec.pcb.ub.es> and Joan Maynou <joan.maynouatupc.edu>
}
\seealso{
run.read.MDscan
}
|
819f58434d2f2f0f808c180e41d02acadac43df9 | 97f677c4c25409815436fc2cb549b620c505a4cd | /load_refs_from_bib_isaac.R | 4b8230356424627d5a576b3a2dfa23f8d21a7eb0 | [] | no_license | ascelin/interdiscp_cons_sci | 041c286b79aca9a40ae59d915d57c038ec6f155b | cee36aeb99dcc882e283f3919bec6a9c2fd722a5 | refs/heads/master | 2020-03-17T08:57:07.136931 | 2018-05-15T03:48:09 | 2018-05-15T03:48:09 | 133,455,810 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,397 | r | load_refs_from_bib_isaac.R | rm(list = ls())
library(RefManageR)
load_ref_number <- function(filename){ #load the citation from each bobtex reference and place into vector
refs <- ReadBib(filename)
ref_length = length(refs)
ref_nums <- array(0, ref_length)
for (ref_ind in seq_len(ref_length)){
ref_nums[ref_ind] <- as.numeric(refs[[ref_ind]]$"number-of-cited-references")
}
return(ref_nums)
}
load_refs <- function(filename){ #load the full reference list and place into list elements, with each list index corresponding to each bibtex article
refs <- ReadBib(filename)
ref_length = length(refs)
ref_list = vector('list', ref_length)
ref_nums <- array(0, ref_length)
for (ref_ind in seq_len(ref_length)){
current_ref_list <- refs[[ref_ind]]$"cited-references"
if (length(current_ref_list) > 0){
ref_list[[ref_ind]] <- strsplit(refs[[ref_ind]]$"cited-references", '\n')
} else {
ref_list[[ref_ind]] = vector()}
}
return(ref_list)
}
# note that this requires library(RefManageR) to work
# reads in a bibtex file - note that all fields with "{{" or "}}" should be changed to "{" and "}" respectively in the bibtex file
# otherwise load_ref_number cannot coerce string to numeric, load_refs will work but will add "{" to the first entry and "}" to the last unless the "{{" and "}}" are removed
filename = '~/Desktop/savedrecs.bib'
ref_list <- load_refs(filename)
|
a9a21c7851a4bf93532fb8dee04d2a0b762192b6 | e58eefe6af8f9826ef1a11c1408ca4a36e1078cd | /tests/testthat/simulate_data.R | 3a8c81ab48ee288506c066f5c2de4cfc515172f2 | [
"GPL-2.0-only",
"GPL-1.0-or-later",
"BSD-3-Clause"
] | permissive | rfherrerac/MOSS | 53d35699f91fbb9429df7931984798717a0839a1 | 6359ee0b2c75f6c1fef86c9ae0407a97db80e93e | refs/heads/master | 2020-09-16T13:40:06.433364 | 2019-04-30T00:42:23 | 2019-04-30T00:42:23 | 223,786,850 | 0 | 1 | BSD-3-Clause | 2019-11-24T17:59:08 | 2019-11-24T17:59:07 | null | UTF-8 | R | false | false | 1,721 | r | simulate_data.R | simulate_data <- function(n_sim = 2e2) {
library(simcausal)
D <- DAG.empty()
D <- D +
node("W1", distr = "rbinom", size = 1, prob = .5) +
node("W", distr = "runif", min = 0, max = 1.5) +
node("A", distr = "rbinom", size = 1, prob = .15 + .5 * as.numeric(W > .75)) +
node("Trexp", distr = "rexp", rate = 1 + .7 * W^2 - .8 * A) +
node("Cweib", distr = "rweibull", shape = 1 + .5 * W, scale = 75) +
node("T", distr = "rconst", const = round(Trexp * 2)) +
node("C", distr = "rconst", const = round(Cweib * 2)) +
# Observed random variable (follow-up time):
node("T.tilde", distr = "rconst", const = ifelse(T <= C, T, C)) +
# Observed random variable (censoring indicator, 1 - failure event, 0 - censored):
node("Delta", distr = "rconst", const = ifelse(T <= C, 1, 0))
setD <- set.DAG(D)
dat <- sim(setD, n = n_sim)
# only grab ID, W's, A, T.tilde, Delta
Wname <- grep("W", colnames(dat), value = TRUE)
dat <- dat[, c("ID", Wname, "A", "T.tilde", "Delta")]
# input: scalar q, W vector. computes for all W, the S(q|A,W)
true_surv_one <- function(q, W, A = 1) sapply(W, function(w) {
1 - pexp(q, rate = 1 + .7 * w^2 - .8 * A)
})
# input: vector q. mean(S(q|A,W)|A), average out W. loop over q
true_surv <- function(q_grid, surv_fn, A) {
W_grid <- seq(0, 1.5, .01)
survout <- numeric()
for (q in q_grid) survout <- c(survout, mean(surv_fn(q = q / 2, W = W_grid, A = A)))
return(survout)
}
truth_surv <- function(q) true_surv(q_grid = q, surv_fn = true_surv_one, A = 1)
truth_surv0 <- function(q) true_surv(q_grid = q, surv_fn = true_surv_one, A = 0)
return(list(dat = dat, true_surv1 = truth_surv, true_surv0 = truth_surv0))
}
|
99d1bfb7f5d7caa1abc1ae4cda4a5ac0a57b2187 | d73f9baee755dd89d05dd168d8018e8a8ea8c54e | /bare_peat_indices.r | 119e800b3f0f8745068c0a6d3ae62e35980601ae | [] | no_license | duncansnh/Bare-peat | caffd8e83630d64d33302a123233e2e13b3720da | 56889a8baa9fd6d84c1b6a151f6dfda97a62f5ad | refs/heads/master | 2022-04-05T22:55:36.468787 | 2020-02-26T17:00:33 | 2020-02-26T17:00:33 | 238,246,969 | 4 | 0 | null | 2020-02-21T10:56:10 | 2020-02-04T16:05:20 | null | UTF-8 | R | false | false | 3,426 | r | bare_peat_indices.r | # calculates various indices for bare peat mapping
# vectors too large to apply in one script, therefore carried out with single raster at a time, controlled
# input by moving rasters between folders on remote sensing drive.
library(raster)
library(stringr)
setwd("B:/92279_Bare_peat_Duncan/R_classification")
inRaster <- "S2_MLarea4.tif"
img_br <- brick(inRaster)
###### functions to calculate various ndvi indices, water indices and blue:SWIR1 ratio
red <- img_br[[3]]
green <- img_br[[2]]
blue <- img_br[[1]]
Re5 <- img_br[[4]]
Re6 <- img_br[[5]]
Re7 <- img_br[[6]]
Re8A <- img_br[[8]]
NIR <- img_br[[7]]
SWIR1 <- img_br[[9]]
SWIR2 <- img_br[[10]]
ndvi <- function(NIR, red) {
(NIR - red) / (NIR+red)
}
ndvi_ <- ndvi(NIR,red)
Gndvi <- function(NIR, green) {
(NIR - green) / (NIR+green)
}
Gndvi_ <- Gndvi(NIR,green)
Bndvi <- function(NIR, blue) {
(NIR - blue) / (NIR+blue)
}
Bndvi_ <- Bndvi(NIR,blue)
Re5ndvi <- function(Re5, red) {
(Re5 - red) / (Re5+red)
}
Re5ndvi_ <-Re5ndvi(Re5,red)
Re6ndvi <- function(Re6, red) {
(Re6 - red) / (Re6+red)
}
Re6ndvi_<- Re6ndvi(Re6,red)
Re7ndvi <- function(Re7, red) {
(Re7 - red) / (Re7+red)
}
Re7ndvi_<- Re7ndvi(Re7,red)
Re8Andvi <- function(Re8A, red) {
(Re8A - red) / (Re8A+red)
}
Re8Andvi_<- Re8Andvi(Re8A, red)
Re6Gndvi <- function(Re6, green) {
(Re6 - green) / (Re6+green)
}
Re6Gndvi_<- Re6Gndvi(Re6,green)
Re7Gndvi <- function(Re7, green) {
(Re7 - green) / (Re7+green)
}
Re7Gndvi_ <-Re7Gndvi(Re7, green)
Re8AGndvi <- function(Re8A, green) {
(Re8A - green) / (Re8A+green)
}
Re8AGndvi_ <-Re8AGndvi(Re8A, green)
NDWI <- function (green, NIR){
(green-NIR)/(green+NIR)
}
NDWI_ <-NDWI(green,NIR)
mDNWI <- function (green,SWIR1){
(green-SWIR1)/(green+SWIR1)
}
mDNWI_ <-mDNWI(green,SWIR1)
mNDVI <- function(Re7,Re5){
(Re7-Re5)/(Re7+Re5)
}
mNDVI_ <-mNDVI(Re7,Re5)
darkness <- function(red,green,blue){
(red+green+blue)/3
}
darkness_ <- darkness(red,green,blue)
ratioblueSWIR <- function(blue,SWIR1){
(blue/SWIR1)
}
ratioblueSWIR_ <- ratioblueSWIR(blue,SWIR1)
# rm(BLUEbandVector)
ratioNIRgreen <- function(NIR,green){
(NIR/green)
}
ratioNIRgreen_ <- ratioNIRgreen(NIR, green)
ratioredSWIR1 <- function(red,SWIR1){
(red/SWIR1)
}
ratioredSWIR1_ <-ratioredSWIR1(red,SWIR1)
ratioNIRSWIR1 <- function(NIR,SWIR1){
(NIR/SWIR1)
}
ratioNIRSWIR1_ <-ratioNIRSWIR1(NIR,SWIR1)
ratioRe5SWIR1 <- function(Re5,SWIR1){
(Re5/SWIR1)
}
ratioRe5SWIR1_ <-ratioRe5SWIR1(Re5,SWIR1)
ratioRe6SWIR1 <- function(Re6,SWIR1){
(Re6/SWIR1)
}
ratioRe6SWIR1_ <-ratioRe6SWIR1(Re6,SWIR1)
ratioRe7SWIR1 <- function(Re7,SWIR1){
(Re7/SWIR1)
}
ratioRe7SWIR1_ <-ratioRe7SWIR1(Re7,SWIR1)
#----------------------- stacking of outputs--------------------------#
filename <- unlist(strsplit(inRaster, ".", fixed = TRUE))
outStack <- paste(filename[1], "_Indices_Extra", ".tif", sep="")
print (outStack)
newStack <- stack(img_br, ndvi_,Gndvi_ ,Bndvi_, Re5ndvi_, Re6ndvi_, Re7ndvi_,Re8Andvi_,Re6Gndvi_, Re7Gndvi_, Re8AGndvi_, NDWI_, mDNWI_, mNDVI_, darkness_ , ratioblueSWIR_, ratioNIRgreen_ , ratioredSWIR1_, ratioNIRSWIR1_, ratioRe5SWIR1_, ratioRe6SWIR1_, ratioRe7SWIR1_)
writeRaster(newStack, filename= outStack, format='GTiff', datatype='FLT4S', overwrite=TRUE)
|
d073e7d42ffcef9bb5bf56f6bb68fc12a76e22a5 | 737ad8357d5705871db68801e2af5f5e7797684e | /descargas/archivados/whileArchived.R | 1e17472d623492f2e4eda7f8879f1c95cf66f372 | [] | no_license | arielolafsalgado/rnetwork | 2a3bb8b4d4d545fd8a56547e8ae296083c3785ca | 4b0df3d1bc8342cadd8c439f72a38ad8ad5d45e3 | refs/heads/main | 2023-04-11T20:35:59.891530 | 2021-05-21T15:48:25 | 2021-05-21T15:48:25 | 369,582,453 | 3 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,410 | r | whileArchived.R | require(rvest)
require(XML)
require(stringr)
packages = dir('archive-pages')
folder = 'archive-webpages'
dir.create(folder)
baseURL = 'https://cran.r-project.org/web/packages/DUMMYTEXT/index.html'
for(pack in packages){
url = sub('DUMMYTEXT',pack,baseURL)
fileName = paste(folder,pack,sep='/')
if(!file.exists(fileName)) download_html(url,fileName)
}
generalPattern = 'Package ‘DUMMYTEXT’ was removed'
archivedPattern = 'Archived on '
output = data.frame('Package'=packages,'Removed'=NA,'While'=NA,stringsAsFactors=F)
for(pack in packages){
filePath = paste(folder,pack,sep='/')
fileText = readLines(filePath)
searchPattern = sub('DUMMYTEXT',pack,generalPattern)
removedText = grep(searchPattern,fileText,value=T)
if(length(removedText)>0){
output[output$Package==pack,'Removed'] = TRUE
whileText = grep(archivedPattern,fileText,value=T)
if(length(whileText)>0){
whileText = str_split(whileText,archivedPattern)[[1]]
whileText = whileText[length(whileText)]
whileText = str_split(gsub(',',' ',whileText),' ')[[1]]
whileText = as.Date(whileText,format='%Y-%m-%d')
output[output$Package==pack,'While'] = as.character(whileText[!is.na(whileText)][1])
}
}else{
output[output$Package==pack,'Removed'] = FALSE
}
}
write.csv(output,'whenArchived.csv',row.names=F) |
f6b9da557e4aba0a66e92cfc30b548f4bf94e1f2 | 81b4d0b3d84cbfebf71dd99a399944e632af4adc | /old_education_content/smbc_r_workshop/lesson4_code.R | be07fd099f83cfe3dbc121a541cabff45beded4b | [] | no_license | SMBC-NZP/smbc-nzp.github.io | 5f6ef40ef6b6439f46ca2a54cf0e3036dc8f63ce | 48b79a5e9e4d5e983cccf8f85c80bc944e293817 | refs/heads/master | 2023-06-04T00:53:30.749944 | 2021-06-23T13:54:52 | 2021-06-23T13:54:52 | 116,872,663 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 11,734 | r | lesson4_code.R | # R worksheet script, lesson 4
#=================================================================================*
# ---- set-up ----
#=================================================================================*
# Load RCurl library (allows you to read online data and code to be read):
library(RCurl)
# Note: If you have not yet installed RCurl, please use the following to do so:
# install.packages(RCurl)
# Load a script that provides source code, automatically reading in data
# associated with this worksheet:
script <-
getURL(
"https://raw.githubusercontent.com/bsevansunc/workshop_languageOfR/master/sourceCode.R"
)
# Evaluate the source code and then remove the file from your working environment:
eval(parse(text = script))
rm(script)
#=================================================================================*
# ---- why would you use for loops? ----
#=================================================================================*
# Filter irisTbl to setosa:
irisTbl[irisTbl$species == 'setosa', ]
# Extract the petalLength field (column):
irisTbl[irisTbl$species == 'setosa', ]$petalLength
# Calculate the mean of petal lengths:
mean(irisTbl[irisTbl$species == 'setosa', ]$petalLength)
#=================================================================================*
# ---- exercise one ----
#=================================================================================*
# Mean petal lengths, matrix notation:
# Mean petal lengths, function method:
#=================================================================================*
# ---- indexing review ----
#=================================================================================*
# Explore vector v:
v
class(v)
str(v)
length(v)
# Explore vector v using indexing:
i <- 3
v[i]
v[3]
v[3] == v[i]
# Add 1 to the value of v at position three:
i <- 3
v[3] + 1
v[i] + 1
#=================================================================================*
# ---- for loops, simple example ----
#=================================================================================*
# Define a vector for output:
vNew <- vector('numeric', length = length(v))
str(vNew)
# Explore filling values of vNew by index:
i <- 3
v[i]
vNew[i] <- v[i] + 1
vNew[i]
v[i] + 1 == vNew[i]
# For loop sequence:
v
1:5
1:length(v)
seq_along(v)
# Example for loop sequence statements:
# for(i in 1:length(v))
# for(i in seq_along(v))
# For loop body:
i <- 3
vNew[i] <- v[i] + 1
#---------------------------------------------------------------------------------*
# ---- for loop, putting it all together (simple) ----
#---------------------------------------------------------------------------------*
# For loop output:
vNew <- vector('numeric',length = length(v))
# For loop sequence:
for(i in seq_along(v)){
# For loop body:
vNew[i] <- v[i] + 1
}
# Explore first for loop output:
vNew
vNew == v + 1
#=================================================================================*
# ---- exercise two ----
#=================================================================================*
# 2.1 Convert to a function with arguments m, b, and x
# 2.2 Generate a sequential vector of values containing all integers from 1-10.
# Assign the name x to the vector object.
# 2.3 Use a for loop and the function above to calculate values of y where:
# m = 0.5, b = 1.0, and x refers to the vector x above (Note: A for loop is not
# really required here)
#=================================================================================*
# ---- subsetting with for loops (split, apply, combine) ----
#=================================================================================*
# Mean petal lengths of Iris species without a for loop:
mean(irisTbl[irisTbl$species == 'setosa', ]$petalLength)
mean(irisTbl[irisTbl$species == 'versicolor', ]$petalLength)
mean(irisTbl[irisTbl$species == 'virginica', ]$petalLength)
# Make a vector of species to loop across:
irisSpecies <- levels(irisTbl$species)
irisSpecies
# For loop output statement:
petalLengths <- vector('numeric',length = length(irisSpecies))
petalLengths
# Exploring the iris data, subsetting by species:
i <- 3
irisSpecies[i]
irisTbl[irisTbl$species == irisSpecies[i], ]
# Split:
iris_sppSubset <- irisTbl[irisTbl$species == irisSpecies[i], ]
# Calculate mean petal length of each subset (apply):
mean(iris_sppSubset$petalLength)
#---------------------------------------------------------------------------------*
# ---- for loop, putting it all together (subsetting) ----
#---------------------------------------------------------------------------------*
# Make a vector of species to loop across:
irisSpecies <- levels(irisTbl$species)
# For loop output statement:
petalLengths <- vector('numeric',length = length(irisSpecies))
# For loop:
for(i in seq_along(irisSpecies)){
# Split:
iris_sppSubset <- irisTbl[irisTbl$species == irisSpecies[i], ]
# Apply:
petalLengths[i] <- mean(iris_sppSubset$petalLength)
}
# Make a tibble data frame of the for loop output (combine):
petalLengthFrame <-
data_frame(species = irisSpecies, count = petalLengths)
petalLengthFrame
#=================================================================================*
# ---- exercise three ----
#=================================================================================*
birdHabits
# Use a for loop and the birdHabits data frame to calculate the number species in
# each diet guild.
#=================================================================================*
# ---- for loops across data objects ----
#=================================================================================*
# Explore the bird count data:
head(birdCounts)
str(birdCounts)
# Explore the bird trait data:
head(birdHabits)
str(birdHabits)
#---------------------------------------------------------------------------------*
# ---- for loops across data objects: example, apples and omnivores ----
#---------------------------------------------------------------------------------*
# Extract vector of omnivorous species:
omnivores <- birdHabits[birdHabits$diet == 'omnivore',]$species
# Subset the counts to omnivores:
birdCounts[birdCounts$species %in% omnivores, ]$count
# Calculate the sum of counts:
sum(birdCounts[birdCounts$species %in% omnivores, ]$count)
# Subset the omnivore counts to site apple:
birdCounts[birdCounts$species %in% omnivores &
birdCounts$site == 'apple', ]
# Extract the count column:
birdCounts[birdCounts$species %in% omnivores &
birdCounts$site == 'apple', ]$count
# Calculate the sum:
sum(birdCounts[birdCounts$species %in% omnivores &
birdCounts$site == 'apple', ]$count)
#=================================================================================*
# ---- exercise 4 ----
#=================================================================================*
# Using the birdHabits and birdCounts data frames, modify the function below such
# that it will calculate the number of species of a given guild at a selected
# site.
richnessSiteGuild <- function(site, guild){
guildSpp <- birdHabits[birdHabits$foraging # COMPLETE
countSppSubset <- birdCounts[birdCounts$ # COMPLETE
countSppSiteSubset <- countSppSubset[# COMPLETE
nSpp <- # COMPLETE
return(nSpp)
}
richnessSiteGuild('apple', 'ground')
#=================================================================================*
# ---- for loops across data objects (continued) ----
#=================================================================================*
# Extract vector of omnivorous species:
omnivores <- birdHabits[birdHabits$diet == 'omnivore',]$species
# Generate a vector of unique sites:
sites <- unique(birdCounts$site)
# Site at position i:
i <- 3
sites[i]
# Subset data:
birdCounts_siteSubset <- birdCounts[birdCounts$site == sites[i],]
birdCounts_siteSubset
# Just a vector of omnivore counts:
countVector <-
birdCounts_siteSubset[birdCounts_siteSubset$species %in%
omnivores,]$count
# Get total number of omnivores at the site:
nOmnivores <- sum(countVector)
#---------------------------------------------------------------------------------*
# ---- for loops across data objects: complete for loop, method 1 ----
#---------------------------------------------------------------------------------*
sites <- unique(birdCounts$site)
outVector <- vector('numeric', length = length(sites))
for(i in seq_along(sites)){
birdCounts_siteSubset <- birdCounts[birdCounts$site == sites[i],]
countVector <-
birdCounts_siteSubset[birdCounts_siteSubset$species %in%
omnivores, ]$count
outVector[i] <- sum(countVector)
}
# Combine:
data_frame(site = sites, nOmnivores = outVector)
#---------------------------------------------------------------------------------*
# ---- for loops across data objects: complete for loop, method 2 ----
#---------------------------------------------------------------------------------*
sites <- unique(birdCounts$site)
outList <- vector('list', length = length(sites))
for(i in seq_along(sites)){
birdCounts_siteSubset <- birdCounts[birdCounts$site == sites[i],]
countVector <-
birdCounts_siteSubset[birdCounts_siteSubset$species %in%
omnivores,]$count
outList[[i]] <- data_frame(
site = sites[i],
nOmnivores = sum(countVector))
}
# Combine:
bind_rows(outList)
#=================================================================================*
# ---- exercise 5 ----
#=================================================================================*
# Using the richnessSiteGuild function you created in Exercies Four and the
# birdHabits and birdCounts data frames, modify the for loop code below to count
# the number of species that are ground foragers at each site.
sites <- unique(# COMPLETE
outList <- vector('list', length = # COMPLETE
for(i in # COMPLETE
outList[[i]] <- data_frame(site = sites[i],
# COMPLETE
}
bind_rows(# COMPLETE
#=================================================================================*
# ---- simulation with for loops ----
#=================================================================================*
# For loop output:
n <- vector('numeric', length = 5)
n
# Set the seed value:
n[1] <- 10
n
# For loop sequence:
# for(i in 2:length(n))
# Exploring the construction of the for loop body:
i <- 2
n[i]
n[i-1]
n[i] <- 2*n[i-1]
n
#---------------------------------------------------------------------------------*
# ---- simulation with for loops, complete for loop ----
#---------------------------------------------------------------------------------*
# Output:
n <- vector('numeric', length = 5)
# Seed:
n[1] <- 10
# For loop:
for(i in 2:5){
n[i] = n*v[i-1]
}
#=================================================================================*
# ---- exercise 6 ----
#=================================================================================*
# One of my favorite <i>for loops</i> was created by Leonardo Bonacci (Fibonacci).
# He created the first known population model, from which the famous Fibonacci
# number series was created. He described a population (N) of rabbits at time t
# as the sum of the population at the previous time step plus the time step before
# that:
# 6.1 Create an output vector of 20 numeric values.
# 6.2 Seed the vector with the first two values, 0 and 1.
# 6.3 Use the formula above and your seed vector to generate the first 20 numbers
# of the Fibonacci number sequence.
# END #
|
e47e803630fcd619a135c462b212df3add4155e8 | 0ec591e5f04ea9da677f2b804f5f60a90447ff89 | /laggedOrderedLassoNetwork/R/timeLagLassoNetworkReconstruction.R | 8a3f52759202b68507f9977c8cb74e1bd0e00c35 | [] | no_license | pn51/laggedOrderedLassoNetwork | 1027ab60579a291ce97b96eace0eaa9b7fa22541 | 91f66c1f89f5edfc9e1a7b8bbfe465ff3134c528 | refs/heads/master | 2021-06-27T07:32:48.785442 | 2019-05-17T05:34:26 | 2019-05-17T05:34:26 | 137,845,108 | 4 | 1 | null | null | null | null | UTF-8 | R | false | false | 14,371 | r | timeLagLassoNetworkReconstruction.R |
#' @title .timeLagLassoLaggedData
#' @description .timeLagLassoLaggedData
#' @keywords internal
#' @param xData lagged expression matrix (n x (p * maxLag))
#' @param yData output expression or change-in-expression vector of length n
#' @param maxLag maximum predictor lag
#' @param lambda vector of penalization parameters, containing one or p elements
#' @param intercept if \code{TRUE}, include a model intercept
#' @param beta_pos optional vector containing positive parts of the model coefficients (\code{NULL} or vector of length p * maxLag)
#' @param beta_neg optional vector containing negative parts of the model coefficients (\code{NULL} or vector of length p * maxLag)
#' @param method underlying ordered lasso optimization method ('Solve.QP' or 'GG')
#' @param strongly.ordered if \code{TRUE} (\code{FALSE}), use the strongly (weakly) ordered lasso
#' @param maxiter maximum number of time lagged ordered lasso iterations
#' @param inneriter maximum number ordered lasso iterations
#' @param iter.gg maximum number of generalized gradient iterations
#' @param epsilon convergence error tolerance
#' @return a list of ordered lasso coefficients corresponding to the positive part of the weakly ordered solution (\code{bp}), the negative part of the weakly ordered solution (\code{bn}), the weakly ordered solution (\code{beta}), the weakly ordered intercept (\code{b0}), the strongly ordered intercept (\code{b0.ordered}), and the strongly ordered solution (\code{beta.ordered}).
.timeLagLassoLaggedData <- function(xData, yData, maxLag, lambda, intercept=TRUE,
beta_pos=NULL, beta_neg=NULL, method="Solve.QP", strongly.ordered=FALSE,
maxiter=500, inneriter=100, iter.gg=100, epsilon=1e-6){
# a modified version of timeLagLasso that can be used with multiple time series
# and only returns variables that are (possibly) relevant for network reconstruction
# assumes data has been normalized/lagged/combined already; x and y have been obtained using functions from dataLagging.R
if (is.null(beta_pos)){
beta_pos <- rep(0, ncol(xData))
}
if (is.null(beta_neg)){
beta_neg <- rep(0, ncol(xData))
}
################
# if one value of lambda is given, use it for all genes and lags;0
# otherwise, for each gene, use the same penalization across all lags
if(length(lambda)==1){
lambda <- rep(lambda, ncol(xData))
} else{
lambda <- rep(lambda, each=maxLag)
}
#solve for coefficients using TL ordered lasso
est <- .timeLagLassoEstOrdered(x=xData, y=yData, lambda=lambda, maxlag=maxLag, intercept=intercept,
beta_pos=beta_pos, beta_neg=beta_neg,
stdeviation_inverse_scaled=1, standardize=FALSE, strongly.ordered=strongly.ordered,
method=method, maxiter=maxiter, inneriter=inneriter, iter.gg=iter.gg, epsilon=epsilon)
list(bp=est$beta_pos, bn=est$beta_neg, beta=est$beta, b0=est$b0,
b0.ordered=est$b0.ordered, beta.ordered=est$beta.ordered)
}
####################
####################
#' @title .timeLagLassoNetworkLaggedData
#' @description .timeLagLassoNetworkLaggedData
#' @keywords internal
#' @param xData lagged expression matrix (n x (p * maxLag))
#' @param yData output expression or change-in-expression matrix (n x p)
#' @param maxLag maximum predictor lag
#' @param lambda a scalar or p x p matrix of penalization parameters. If a scalar, all coefficients are subject to the same penalization. Otherwise,\code{lambda[j,i]} is the penalization on variable \code{j} in the model for gene \code{i}
#' @param self if \code{TRUE}, include loops in time-lagged regression models
#' @param method underlying ordered lasso optimization method ('Solve.QP' or 'GG')
#' @param strongly.ordered if \code{TRUE} (\code{FALSE}), use the strongly (weakly) ordered lasso
#' @param maxiter maximum number of time lagged ordered lasso iterations
#' @param inneriter maximum number ordered lasso iterations
#' @param iter.gg maximum number of generalized gradient iterations
#' @param cores number of parallel cores
#' @import parallel
#' @return a list of coefficient matrices, with each matrix corresponding to a lag and ordered by increasing lag
.timeLagLassoNetworkLaggedData <- function(xData, yData, maxLag, lambda,
self=TRUE, method='Solve.QP', strongly.ordered=FALSE,
maxiter=500, inneriter=100, iter.gg=100,
cores=1){
# assume that x and y have been obtained using functions from dataLagging.R
#if one value of lambda is given, use it for all genes
if(length(lambda)==1){
lambda <- matrix(lambda, ncol(yData), ncol(yData))
}
if(!self){
# no loops in the regression models
coeffsByGene <- parallel::mclapply(seq(ncol(yData)), mc.cores=cores, FUN=function(ii){
# iterate through the genes, learn a model for each
xInd <- ((ii-1) * maxLag + 1) : (ii * maxLag) #indices corresponding to loops
.timeLagLassoLaggedData(xData[,-xInd], yData[,ii], maxLag, lambda[-ii, ii],
beta_pos=NULL, beta_neg=NULL, method=method, strongly.ordered=strongly.ordered,
maxiter=maxiter, inneriter=inneriter, iter.gg=iter.gg)
})
coeffLags <- rep(seq(maxLag), ncol(yData)-1) #lags of each column in y
# create coefficient matrices for each lag and return
lapply(seq(maxLag), function(ii){
subsetIndex <- coeffLags == ii
coeffMatrix <- matrix(0, ncol(yData), ncol(yData),
dimnames=list(colnames(yData), colnames(yData)))
#fill in coefficient matrices
for(jj in seq_along(coeffsByGene)){
if(strongly.ordered){
coeffMatrix[-jj,jj] <- coeffsByGene[[jj]]$beta.ordered[subsetIndex]
} else{
coeffMatrix[-jj,jj] <- coeffsByGene[[jj]]$beta[subsetIndex]
}
}
coeffMatrix
})
} else{
#include loops in the regression models
coeffsByGene <- parallel::mclapply(seq(ncol(yData)), mc.cores=cores, FUN=function(ii){
.timeLagLassoLaggedData(xData, yData[,ii], maxLag, lambda[, ii],
beta_pos=NULL, beta_neg=NULL, method=method, strongly.ordered=strongly.ordered,
maxiter=maxiter, inneriter=inneriter, iter.gg=iter.gg,)
})
coeffLags <- rep(seq(maxLag), ncol(yData))
# create coefficient matrices for each lag and return
lapply(seq(maxLag), function(ii){
subsetIndex <- coeffLags == ii
coeffMatrix <- matrix(0, ncol(yData), ncol(yData),
dimnames=list(colnames(yData), colnames(yData)))
#fill in coefficient matrices
for(jj in seq_along(coeffsByGene)){
if(strongly.ordered){
coeffMatrix[,jj] <- coeffsByGene[[jj]]$beta.ordered[subsetIndex]
} else{
coeffMatrix[,jj] <- coeffsByGene[[jj]]$beta[subsetIndex]
}
}
coeffMatrix
})
}
}
####################
####################
#' @title .convertCoefficientsToAdjMatrix
#' @description .convertCoefficientsToAdjMatrix
#' @keywords internal
#' @param coeffMatricesByLag list of coefficient matrices, with each matrix corresponding to a lag and ordered by increasing lag
#' @param maxLag maximum lag to use for network prediction, less than or equal to the regression model lag
#' @param epsilon tolerance or threshold for edge prediction
#' @return a predicted adjacency matrix
.convertCoefficientsToAdjMatrix <- function(coeffMatricesByLag, maxLag=1, epsilon=1e-8){
maxLag <- min(maxLag, length(coeffMatricesByLag))
#threshold absolute value of the coefficients
adjByLag <- lapply(coeffMatricesByLag[1:maxLag], function(ii){
abs(ii) > epsilon
})
#aggregate lags for each gene-pair
adjPredicted <- Reduce('+', adjByLag) > 0
diag(adjPredicted) <- 0
adjPredicted
}
####################
####################
#combining everything together:
#' @title timeLaggedOrderedLassoNetwork
#' @description Predicts a gene regulatory network from expression data based on the time-lagged ordered lasso.
#' @param exprDataList list of expression datasets (timepoints x genes) for p genes
#' @param output expression output type ('expr' or 'change expr.')
#' @param maxLag maximum predictor lag
#' @param lambda a scalar or p x p matrix of penalization parameters. If a scalar, all coefficients are subject to the same penalization. Otherwise,\code{lambda[j,i]} is the penalization on variable \code{j} in the model for gene \code{i}
#' @param self if \code{TRUE}, include loops in time-lagged regression models
#' @param method underlying ordered lasso optimization method ('Solve.QP' or 'GG')
#' @param strongly.ordered if \code{TRUE} (\code{FALSE}), use the strongly (weakly) ordered lasso
#' @param rescale if \code{TRUE}, rescale input exprssion data
#' @param rescaleSeparately if \code{TRUE}, rescale each dataset separately
#' @param maxiter maximum number of time lagged ordered lasso iterations
#' @param inneriter maximum number ordered lasso iterations
#' @param iter.gg maximum number of generalized gradient iterations
#' @param cores number of parallel cores
#' @return a predicted adjacency matrix
#' @export
timeLaggedOrderedLassoNetwork <- function(exprDataList,
output='expr.', maxLag=2, lambda=1, self=TRUE,
method='Solve.QP', strongly.ordered=FALSE,
rescale=TRUE, rescaleSeparately=FALSE,
maxiter=500, inneriter=100, iter.gg=100,
cores=1){
p <- ncol(exprDataList[[1]])
#checking inputs:
if(any(sapply(exprDataList, ncol) != p)){
stop('exprDataList: must have the same number of genes in each dataset')
} else if(length(unique(lapply(exprDataList, function(ii){sort(colnames(ii))}))) > 1){
stop('exprDataList: must have the same genes in each dataset')
}
if(output != 'expr.' && output != 'change expr.'){
warning("output: must be 'expr.' or 'change expr.'; using 'expr.'", call. = FALSE)
output <- 'expr.'
}
if(method != 'Solve.QP' && method != 'GG'){
warning("method: must be 'Solve.QP' or 'GG'; using 'Solve.QP")
method <- 'Solve.QP'
}
if(!is.numeric(lambda) && !is.integer(lambda)){
stop('lambda: must be of class numeric or integer')
} else if(length(lambda) != 1 && !all(dim(lambda) == c(p, p))){
stop('lambda: must be a scalar or p x p matrix')
}
if(!is.numeric(maxLag) || maxLag < 1){
stop('maxLag: must be a scalar greater than or equal to 1')
} else if(floor(maxLag) < maxLag){
warning('maxLag: rounding down to an integer')
maxLag <- floor(maxLag)
}
if(!is.logical(strongly.ordered)){
warning('strongly.ordered: must be a logical; using FALSE')
strongly.ordered <- FALSE
}
if(!is.logical(rescale)){
warning('rescale: must be a logical; using TRUE')
rescale <- TRUE
}
if(!is.logical(rescaleSeparately)){
warning('rescaleSeparately: must be a logical; using FALSE')
rescaleSeparately <- FALSE
}
if(!is.numeric(maxiter) && !is.integer(maxiter)){
warning('maxiter: must be a scalar; using 500')
maxiter <- 500
}
if(!is.numeric(inneriter) && !is.integer(inneriter)){
warning('inneriter: must be a scalar; using 100')
inneriter <- 100
}
if(!is.numeric(iter.gg) && !is.integer(iter.gg)){
warning('iter.gg: must be a scalar; using 100')
iter.gg <- 100
}
##########################
##########################
#preprocess expr. data
if(rescale){
if(rescaleSeparately){
rescaledList <- .rescaleDataSeparate(exprDataList)
} else{
rescaledList <- .rescaleData(exprDataList)
}
}
#lag expr. data
if(output == 'expr.'){
transformedList <- .transformListMultiLag(rescaledList, maxLag)
} else if(output == 'change expr.'){
transformedList <- .transformListMultiLagChange(rescaledList, maxLag)
}
xData <- transformedList$xData
yData <- transformedList$yData
rm(rescaledList, transformedList)
if(length(lambda)==1){
lambda <- matrix(lambda, p, p)
}
##########
#compute coefficients
coefficientsByLag <- .timeLagLassoNetworkLaggedData(xData, yData, maxLag, lambda,
self=self, method=method, strongly.ordered=strongly.ordered,
maxiter=maxiter, inneriter=inneriter, iter.gg=iter.gg,
cores=cores)
##########
#compute adj. matrix
.convertCoefficientsToAdjMatrix(coefficientsByLag, maxLag) #or replace maxLag with 1 because of the monotonicity constraint
}
#combining everything together:
#' @title timeLaggedOrderedLassoSemiSupervisedNetwork
#' @description Predicts a posterior gene regulatory network from a prior network and expression data based on the time-lagged ordered lasso.
#' @param exprDataList list of expression datasets (timepoints x genes) for p genes
#' @param adjMatrix prior network adjacency matrix
#' @param output expression output type ('expr' or 'change expr.')
#' @param maxLag maximum predictor lag
#' @param lambdaEdge a scalar edge penalization parameter.
#' @param lambdaNonEdge a scalar non-edge penalization parameter.
#' @param self if \code{TRUE}, include loops in time-lagged regression models
#' @param method underlying ordered lasso optimization method ('Solve.QP' or 'GG')
#' @param strongly.ordered if \code{TRUE} (\code{FALSE}), use the strongly (weakly) ordered lasso
#' @param rescale if \code{TRUE}, rescale input exprssion data
#' @param rescaleSeparately if \code{TRUE}, rescale each dataset separately
#' @param maxiter maximum number of time lagged ordered lasso iterations
#' @param inneriter maximum number ordered lasso iterations
#' @param iter.gg maximum number of generalized gradient iterations
#' @param cores number of parallel cores
#' @return a predicted adjacency matrix
#' @export
timeLaggedOrderedLassoSemiSupervisedNetwork <- function(exprDataList,
adjMatrix,
output='expr.', maxLag=2, lambdaEdge=1, lambdaNonEdge=1,
self=TRUE,
method='Solve.QP', strongly.ordered=FALSE,
rescale=TRUE, rescaleSeparately=FALSE,
maxiter=500, inneriter=100, iter.gg=100,
cores=1){
p <- ncol(exprDataList[[1]])
genes <- colnames(exprDataList[[1]])
#checking inputs:
if(!is.numeric(lambdaEdge) && !is.integer(lambdaEdge)){
stop('lambda: must be of class numeric or integer')
} else if(length(lambdaEdge) != 1){
stop('lambda: must be a scalar')
}
if(!is.numeric(lambdaNonEdge) && !is.integer(lambdaNonEdge)){
stop('lambda: must be of class numeric or integer')
} else if(length(lambdaEdge) != 1){
stop('lambda: must be a scalar')
}
if(!all(dim(adjMatrix) == c(p, p))){
stop('adjMatrix: must be a p x p adjacency matrix')
}
adjMatrix <- adjMatrix[genes, genes]
lambda <- adjMatrix * lambdaEdge + (adjMatrix == 0) * lambdaNonEdge
timeLaggedOrderedLassoNetwork(exprDataList,
output=output, maxLag=maxLag, lambda=lambda, self=self,
method=method, strongly.ordered=strongly.ordered,
rescale=rescale, rescaleSeparately=rescaleSeparately,
maxiter=maxiter, inneriter=inneriter, iter.gg=iter.gg,
cores=cores)
} |
543c8afcc85b350c53cf6b3c3eb31a4d70ae3e36 | 34920b982a3142f9d8dcdcc20cde5e09fcb11701 | /_code/R/array-analysis.R | c6618f02e7d549353233b2a4a12615f0eb5ad567 | [
"LicenseRef-scancode-public-domain",
"MIT",
"CC-BY-4.0"
] | permissive | nesi/hpc_training | 2eddf24ccc859896523123405c6cef3cde10efbf | 32d765cfdf654c507a80027e92398660c96b6c09 | refs/heads/gh-pages | 2020-06-27T05:01:24.167975 | 2019-01-14T22:54:04 | 2019-01-14T22:54:04 | 97,046,680 | 6 | 21 | NOASSERTION | 2019-01-14T22:54:05 | 2017-07-12T19:54:00 | HTML | UTF-8 | R | false | false | 481 | r | array-analysis.R | # array-analysis.R
#
# Code is from readings-02.R as part of SWC lesson http://swcarpentry.github.io/r-novice-inflammation/05-cmdline/
#
# Date: 7 March 2017
#
# first argument to script is filename
# file is read in and the mean per patient is outputted
main <- function() {
args <- commandArgs(trailingOnly = TRUE)
filename <- args[1]
dat <- read.csv(file = filename, header = FALSE)
mean_per_patient <- apply(dat, 1, mean)
cat(mean_per_patient, sep = "\n")
}
main()
|
750adf39444752abdcbb7255c5e426162990f5de | 06c5b53309119df677dfad0dd098587fa4eb5a41 | /Module/old/SimulationStudy.R | f0d9de2c295db32ba26529a9fc18f37964ce50ea | [] | no_license | SebastianGPedersen/masters-thesis | 15025eba545d480bb803e8723a96545340a94d3d | 5d8ab1ec79c47bd641d15e923a0896e66ae223e0 | refs/heads/master | 2021-06-05T08:34:07.871365 | 2020-09-28T20:56:23 | 2020-09-28T20:56:23 | 112,249,633 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,743 | r | SimulationStudy.R | #######################################################################
# #
# FROM HERE WE CHAIN EVERYTHING TOGETHER TO FORM THE SIMULATION STUDY #
# #
#######################################################################
# working directory should be "masters-thesis"
setwd(Sys.getenv("masters-thesis"))
source("simulation/heston.R")
source("simulation/bursts.R")
source("simulation/jumps.R")
source("estimation/estimates.R")
source("estimation/rho.R")
source("estimation/teststat.R")
source("kernels/kernels.R")
source("module/SimStudyFunction.R")
# params
hset = c(120,300,600)/(3600*24*7*52)
alphaset = c(0.55, 0.65, 0.75)
betaset = c(0, 0.1, 0.2 ,0.3 ,0.4)
setting<-sim.setup(Nsteps = 23400, Npath = 10)
# find T points
tind<-seq(from = 60, to = 23399, by = 60)
masterLength <- length(betaset)*length(alphaset)*length(hset)
master <- numeric(masterLength)
i <- 0
timeElapsed <- proc.time()[3]
for(beta in betaset){
for(alpha in alphaset){
for(h in hset){
i <- i + 1
# if(i>10){
timeElapsed <- (proc.time()[3]- timeElapsed)
print(c(i, masterLength, timeElapsed))
#create setting
burstset <- sim.burstsetting(alpha = alpha, beta = beta,
burst_time = burst_time, interval_length = interval_length,
c_1 = 0.1, c_2 = 0.1)
# master will be a vector of lists(h/alpha/beta) of lists(input/output)
master[i] <- study(setting = setting, hd = h, hv = h, t.index = tind,
conf = 0.95, burstsetting = burstset)
# }
}
}
}
}
|
0fcc5b76033a1b14eea700009e310f86bad8b0ee | 271b855741c31fb6e7448debb33bcfe9ee9a8aeb | /4.Classification/KNN.R | 8a4285bc1d94c00332adaff36ef779ca3a791413 | [] | no_license | mrech/StatisticalLearning_R | 94a74fe719570665865e7d841b6040a2a5278f74 | 472e037bb82d17df2e5a6a316038af67b453e7d9 | refs/heads/master | 2020-04-24T14:38:54.555621 | 2019-08-20T18:48:34 | 2019-08-20T18:48:34 | 172,029,091 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 526 | r | KNN.R | ## K-Nearest Neighbors
library(class)
?knn
attach(Smarket)
objects(2)
# build a matrix of lag1 and lag2
Xlag = cbind(Lag1, Lag2)
Xlag[1:5,]
train = Year<2005
# classify a new observation, based on the trining set in the x space
# you look for the trining obs closest to your test point in
# Euclidean distance and classify to its class
knn.pred=knn(Xlag[train,],Xlag[!train,],Direction[train], k=1)
table(knn.pred,Direction[!train])
mean(knn.pred==Direction[!train])
#0.5 is useless. 1NN did no better than flipping a coin
|
eefadf381c027f648923300d2d945a5e10c8ef1f | 1348830c4ac089d25b841bbb87283937d6362769 | /MDS 570 - 2019 Fall A/Notes.R | e782069a89762a44f6b7a096163787fab451bbf8 | [] | no_license | megancusey/DataScienceCoursework | 1a14938bc2070f21c64af44e92881e1ebe3e9c60 | 77b0799055e2d09f3fa98a318b52b96f27f5744c | refs/heads/master | 2020-08-26T22:43:12.674912 | 2020-06-15T02:49:39 | 2020-06-15T02:49:39 | 217,170,696 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,065 | r | Notes.R | ## GENERAL:
## Level: The average value in the series.
## Trend:The increasing or decreasing value in the series.
## Seasonality:The repeating short - term cycle in the series.
## Noise:The random variation in the series.
##
## Additive:
## * Components are added together
## y(t) = Level + Trend + Seasonality + Noise
## * Used for datasets where changes over time are consist.
## The trend is about the same throughout the model.
## The seasonality has the same frequency and amplitude
## throughout the data.
##
## RSME - Route Square Mean of Errors
## AIC - Akaike's Information Criterion
## * Model with the minimum value of AIC indicates the
## better model when considering this metric.
## * Estimate the information that would be lost if a certain
## model were to be produced with real data.
## * Balances the trade-offs between complexity of a model and
## how well it fits the data.
## In sample fit = Building a model with only training data and getting the values of
## the training observe values and fitted values? I think. |
bdf87b4c93ad2397846c8631c4c1f895155f5c2b | acba28164b85a58690a4222eb54bfffc9a2a9b1a | /codesnippet.R | 6732819d2a542a28230ce5f777e6d9f9000e3970 | [] | no_license | sanyalh/Learning-R | 8d15dcd9d1334acc70500d7f07be355bb90c95b3 | a05e71248e6c6aa0ea69d39cfbf92449ba6573b7 | refs/heads/master | 2022-11-14T17:50:04.534987 | 2020-06-17T15:25:42 | 2020-06-17T15:25:42 | 268,294,177 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 166 | r | codesnippet.R | ### 1
x <- tibble(
x=1,
y=10,
z=25)
x
### 2
abc <- tribble(
~x, ~y, ~z,
#--/--/--
"a", 10, 5,
"b", -2, 3,
"z", 5, -9
)
abc
|
be9f3b7484f44a3ee04185deeeae6df4fbc5b75e | 80801619972937f36b5bbee36e0e23bb05c16dbf | /debug/9/script.R | a923b898dff94afd9fa01923726a8b7c6e5ea45c | [] | no_license | ashtonbaker/pomp-practice | cf15ea4fa39a107454f09bc8338274ec8c20ef12 | b1e7820e7c28bef20b9bdf126e74cea4889ca447 | refs/heads/master | 2020-05-21T20:51:27.863302 | 2016-11-28T22:00:45 | 2016-11-28T22:00:45 | 61,384,320 | 0 | 1 | null | 2016-10-28T15:44:14 | 2016-06-17T15:37:43 | Jupyter Notebook | UTF-8 | R | false | false | 9,518 | r | script.R | library(ggplot2)
library(plyr)
library(reshape2)
library(pomp)
library(magrittr)
library(reshape2)
library(foreach)
#options(echo = FALSE)
stopifnot(packageVersion("pomp")>="1.8.8.1")
read.csv("./data/data.csv") %>%
subset(weeks <= 40, select=c(weeks,rep,L_obs,P_obs,A_obs)) -> dat
dat %>%
melt(id=c("weeks","rep")) %>%
acast(variable~rep~weeks) -> datarray
stages.E <- 7
stages.L <- 7
stages.P <- 7
stages.A <- 1
glob_snippet <- Csnippet(sprintf("
#include <math.h>
#define ESTAGES %d
#define LSTAGES %d
#define PSTAGES %d
#define ASTAGES %d
#define L_0 250
#define P_0 5
#define A_0 100
", stages.E, stages.L, stages.P, stages.A))
init_snippet <-
Csnippet("
double *E = &E1;
double *L = &L1;
double *P = &P1;
int k = 0;
for(k = 0; k < 7; k++) E[k] = 0;
for(k = 0; k < 5; k++) L[k] = 36;
for(k = 5; k < 7; k++) L[k] = 35;
for(k = 0; k < 5; k++) P[k] = 1;
for(k = 5; k < 7; k++) P[k] = 0;
A = 100;")
rproc_snippet <-
Csnippet("
double *E = &E1;
double *L = &L1;
double *P = &P1;
int k;
double L_tot = 0;
for (k = 0; k < LSTAGES; k++) L_tot += L[k];
double gamma_E = (ESTAGES / tau_E) * exp((-cel * L_tot - cea * A)/ESTAGES);
double gamma_L = (LSTAGES / tau_L) * (1 - mu_L);
double gamma_P = (PSTAGES / tau_P) * exp((-cpa * A) / PSTAGES);
double mu_e = (ESTAGES / tau_E) - gamma_E;
double mu_l = (LSTAGES / tau_L) - gamma_L;
double mu_p = (PSTAGES / tau_P) - gamma_P;
double etrans[2*ESTAGES], ltrans[2*LSTAGES], ptrans[2*PSTAGES], adeath;
// Calculate who goes where
for (k = 0; k < ESTAGES; k++) {
// Eggs growing to next stage
etrans[2*k] = rbinom(E[k], gamma_E);
// Eggs dying
etrans[2*k+1] = rbinom(E[k] - etrans[2*k] , mu_e/(1 - gamma_E) );
}
for (k = 0; k < LSTAGES; k++) {
// Larvae growing to next stage
ltrans[2*k] = rbinom(L[k], gamma_L);
// Larvae dying
ltrans[2*k+1] = rbinom(L[k]-ltrans[2*k], mu_l/(1 - gamma_L));
}
for (k = 0; k < PSTAGES; k++) {
// Pupae growing to next stage
ptrans[2*k] = rbinom(P[k], gamma_P);
// Pupae dying
ptrans[2*k+1] = rbinom(P[k]-ptrans[2*k], mu_p/(1 - gamma_P) );
}
adeath = rbinom(A, mu_A);
// Bookkeeping
E[0] += rpois(b*A); // oviposition
for (k = 0; k < ESTAGES; k++) {
E[k] -= (etrans[2*k]+etrans[2*k+1]);
E[k+1] += etrans[2*k]; // E[ESTAGES] == L[0]!!
}
for (k = 0; k < LSTAGES; k++) {
L[k] -= (ltrans[2*k]+ltrans[2*k+1]);
L[k+1] += ltrans[2*k]; // L[LSTAGES] == P[0]!!
}
for (k = 0; k < PSTAGES; k++) {
P[k] -= (ptrans[2*k]+ptrans[2*k+1]);
P[k+1] += ptrans[2*k]; // P[PSTAGES] == A[0]!!
}
A -= adeath;
")
dmeas_snippet <-Csnippet(
"
const double *L = &L1;
const double *P = &P1;
double fudge = 1e-9;
int k;
double L_tot = 0;
double P_tot = 0;
for (k = 0; k < LSTAGES; k++) L_tot += L[k];
for (k = 0; k < PSTAGES; k++) P_tot += P[k];
lik = dnbinom_mu(L_obs, 1/od, L_tot+fudge, 1) +
dnbinom_mu(P_obs, 1/od, P_tot+fudge, 1) +
dnbinom_mu(A_obs, 1/od, A+fudge, 1);
// if(lik < -138){
// Rprintf(\"\\n\\nweeks %f\", t);
// Rprintf(\"\\nL_tot %f\", L_tot);
// Rprintf(\"\\nP_tot %f\", P_tot);
// Rprintf(\"\\nA_tot %f\", A);
// Rprintf(\"\\nL_obs %f\", L_obs);
// Rprintf(\"\\nP_obs %f\", P_obs);
// Rprintf(\"\\nA_obs %f\", A_obs);
// Rprintf(\"\\nloglik %f\",lik);
// }
lik = (give_log) ? lik : exp(lik);
")
rmeas_snippet <-
Csnippet("
const double *L = &L1;
const double *P = &P1;
double fudge = 1e-9;
int k;
double L_tot = 0;
double P_tot = 0;
for (k = 0; k < LSTAGES; k++) L_tot += L[k];
for (k = 0; k < PSTAGES; k++) P_tot += P[k];
L_obs = rnbinom_mu(1/od,L_tot+fudge);
P_obs = rnbinom_mu(1/od,P_tot+fudge);
A_obs = rnbinom_mu(1/od,A+fudge);")
from_est <-
Csnippet("
Tb = exp(b);
Tcea = expit(cea);
Tcel = expit(cel);
Tcpa = expit(cpa);
Tmu_A = expit(mu_A);
Tmu_L = expit(mu_L);
Ttau_E = ESTAGES+exp(tau_E);
Ttau_L = LSTAGES+exp(tau_L);
Ttau_P = PSTAGES+exp(tau_P);
Tod = exp(od);")
to_est <-
Csnippet("
Tb = log(b);
Tcea = logit(cea);
Tcel = logit(cel);
Tcpa = logit(cpa);
Tmu_A = logit(mu_A);
Tmu_L = logit(mu_L);
Ttau_E = log(tau_E-ESTAGES);
Ttau_L = log(tau_L-LSTAGES);
Ttau_P = log(tau_P-PSTAGES);
Tod = log(od);")
pomp(
data = subset(dat, rep==4, select=-rep),
times="weeks", t0=0,
statenames = c(sprintf("E%d",1:stages.E),
sprintf("L%d",1:stages.L),
sprintf("P%d",1:stages.P),"A"),
paramnames = c("b", "cea", "cel", "cpa", "mu_A", "mu_L",
"tau_E", "tau_L", "tau_P","od"),
globals = glob_snippet,
initializer = init_snippet,
rprocess = discrete.time.sim(
step.fun = rproc_snippet,
delta.t = 1/7),
dmeasure = dmeas_snippet,
rmeasure = rmeas_snippet,
toEstimationScale = to_est,
fromEstimationScale = from_est,
params = c(b=1.18702207924403,
cea=0.0132088702404268,
cel=0.0172244842038504,
cpa=0.00466955565765198,
mu_A=1.89532307252467e-05,
mu_L=0.0158937470126093,
tau_E=15.7219226675806,
tau_L=7.18906255435284,
tau_P=18.0248791283609,
od = 1
)) -> model
model %>% simulate(as.data.frame=T,nsim=5) %>%
melt(id=c("time","sim")) %>%
subset(variable %in% c("L_obs","P_obs","A_obs")) %>%
ggplot(aes(x=time,y=value,color=variable,group=sim))+
geom_line()+
facet_wrap(~variable,ncol=1,scales="free_y")
pf <- pfilter(model, Np=1000)
logLik(pf)
library(foreach)
library(doParallel)
registerDoParallel(cores=30)
print("Starting initial pfilter")
stew(file="./output/pf.rda",{
t_pf <- system.time(
pf <- foreach(i=1:10,.packages='pomp',
.options.multicore=list(set.seed=TRUE),
.export=c("model")
) %dopar% {
pfilter(model,Np=10000)
}
)
n_pf <- getDoParWorkers()
},seed=625904618,kind="L'Ecuyer")
print("Finished initial pfilter")
(L_pf <- logmeanexp(sapply(pf,logLik),se=TRUE))
results <- as.data.frame(as.list(c(coef(pf[[1]]),
loglik=L_pf[1],
loglik=L_pf[2])))
write.csv(results,file="./output/model_params.csv",row.names=FALSE)
print("Starting local box search")
stew(file="./output/box_search_local.rda",{
t_local_mif <- system.time({
mifs_local <- foreach(i=1:20,
.packages='pomp',
.combine=c,
.options.multicore=list(set.seed=TRUE),
.export=c("model")
) %dopar%
{
mif2(
model,
Np=2000,
Nmif=50,
cooling.type="geometric",
cooling.fraction.50=0.5,
transform=TRUE,
rw.sd=rw.sd(b=0.02, cea=0.02, cel=0.02, cpa=0.02,
mu_A=0.02, mu_L=0.02, od=0.02,
tau_E=0.02, tau_L=0.02, tau_P=0.02)
)
}
})
},seed=482947940,kind="L'Ecuyer")
print("Finished local box search")
print("Starting lik_local")
stew(file="./output/lik_local.rda",{
t_local_eval <- system.time({
results_local <- foreach(mf=mifs_local,
.packages='pomp',
.combine=rbind,
.options.multicore=list(set.seed=TRUE)
) %dopar%
{
evals <- replicate(10, logLik(pfilter(mf,Np=20000)))
ll <- logmeanexp(evals,se=TRUE)
c(coef(mf),loglik=ll[1],loglik=ll[2])
}
})
},seed=900242057,kind="L'Ecuyer")
print("Finished lik_local")
results_local <- as.data.frame(results_local)
results <- rbind(results,results_local[names(results)])
write.csv(results,file="./output/model_params.csv",row.names=FALSE)
params_box <- rbind(
b=c(0, 20),
cea=c(0, 1),
cel = c(0, 1),
cpa = c(0, 1),
mu_A = c(0, 1),
mu_L = c(0, 1),
tau_E = c(7, 14),
tau_L = c(7, 14),
tau_P = c(7, 14),
od = c(1,1)
)
print("Starting global search")
stew(file="./output/box_search_global.rda",{
n_global <- getDoParWorkers()
t_global <- system.time({
mf1 <- mifs_local[[1]]
guesses <- as.data.frame(apply(params_box,1,function(x)runif(30,x[1],x[2])))
results_global <- foreach(guess=iter(guesses,"row"),
.packages='pomp',
.combine=rbind,
.options.multicore=list(set.seed=TRUE),
.export=c("mf1")
) %dopar%
{
mf <- mif2(mf1,start=c(unlist(guess)),tol=1e-60)
mf <- mif2(mf,Nmif=100)
ll <- replicate(10,logLik(pfilter(mf,Np=100000)))
ll <- logmeanexp(ll,se=TRUE)
c(coef(mf),loglik=ll[1],loglik=ll[2])
}
})
},seed=1270401374,kind="L'Ecuyer")
results_global <- as.data.frame(results_global)
results <- rbind(results,results_global[names(results)])
write.csv(results,file="./output/model_params.csv",row.names=FALSE)
print("Finished global search")
sink("message.txt", append=FALSE, split=FALSE)
proc.time()
sink()
q(runLast = FALSE)
|
5c1be39267b3be3f0c85432ec99e4daf4ecf35ed | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/multiplex/examples/semigroup.Rd.R | 281c93781e522bdfaf1d1910a6a4d97595ad6574 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 418 | r | semigroup.Rd.R | library(multiplex)
### Name: semigroup
### Title: Constructing the Semigroup of Relations
### Aliases: semigroup
### Keywords: algebra math
### ** Examples
## Create the data: 2 binary relations among 3 elements
arr <- round( replace( array(runif(18), c(3,3,2)), array(runif(18),
c(3,3,2))>.5, 1 ) )
## optional: put labels
dimnames(arr)[[3]] <- list("n", "m")
## look at the semigroup
semigroup(arr)
|
d0ca86886ed2914a67a475d2df73bad8adbe1685 | f42d1165f3ebcff06bc2d555d42a2d4770db687c | /man/mlb_player_full.Rd | f9aa7cb2fdc83385cc405daae24d50943c2a1fd4 | [] | no_license | IvoVillanueva/mlbstatsR | 11fc14d7d6a070d46dcde76fc451e204333afe66 | 2f20733d75be7cc11d88c452e4eea7d47788d750 | refs/heads/main | 2023-08-04T01:33:34.286957 | 2021-09-09T17:57:27 | 2021-09-09T17:57:27 | 361,255,437 | 3 | 0 | null | null | null | null | UTF-8 | R | false | true | 839 | rd | mlb_player_full.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mlb_player_full.R
\name{mlb_player_full}
\alias{mlb_player_full}
\title{Estadistica oficial bateo y pitcheo de los jugadores de la MLB (baseball)}
\usage{
mlb_player_full(year = year_actual, stats = "hitting", season_type = "regular")
}
\arguments{
\item{year}{numeric}
\item{stats}{character}
\item{season_type}{character}
}
\value{
Estadisticas de la pagina oficial mlb.com
}
\description{
Estadistica oficial bateo y pitcheo de los jugadores de la MLB (baseball)
}
\examples{
# year = year que queremos visualizar
# stats = hitting o pitching
# season_type = 'regular', 'playoffs', 'wildcard',
# 'divisionales', 'championship','mundiales' o 'pretemporada
# Get las estadisticas de pitcheo del 2018
\donttest{mlb_team_stats(2018, "pitching", "regular")}
}
|
63aeac380ef9b899f7dbd0670c136d58faa976d0 | 94a4c2d5e3b95b8b0eed54d6a49a3916351a6d75 | /R/documents.R | 20cd536809898eaa3fc1c7d001ecefe7b567469a | [] | no_license | lee269/companies_house | e5f1c2f6b2861734bc0a3d7d34ca61429e0be844 | ef4cc73fb123d7532551c85040d747f67e53b6a9 | refs/heads/master | 2020-06-14T20:25:48.798956 | 2019-08-31T16:53:32 | 2019-08-31T16:53:32 | 195,116,267 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,415 | r | documents.R | # Documents: https://forum.aws.chdev.org/t/cant-access-documents-from-amazons3-server/1871
# Sys.setenv(http_proxy="http://10.85.4.54:8080", https_proxy="https://10.85.4.54:8080")
# retrieve filing history table for a company
get_filings <- function(company_number, key, category = NULL, items_per_page = NULL){
params <- NULL
baseurl <- "https://api.companieshouse.gov.uk/company/"
url <- paste0(baseurl, company_number, "/filing-history")
if(length(c(category, items_per_page)) == 2){
params <- paste0("?", "category=", category, "&", "items_per_page=", items_per_page)
}
if(length(c(category, items_per_page)) == 1){
if(!is.null(category)) params <- paste0("?", "category=", category)
if(!is.null(items_per_page)) params <- paste0("?", "items_per_page=", items_per_page)
}
url <- paste0(url, params)
result <- httr::GET(url, httr::authenticate(key, ""))
z <- jsonlite::fromJSON(httr::content(result, as = "text", encoding = "utf-8"), flatten = TRUE)
filings <- z$items
return(filings)
}
# check for availability of xbrl document
has_xbrl <- function(metadata_url, key){
auth <- paste0("Basic ", jsonlite::base64_enc(paste0(key, ":")))
meta <- httr::GET(metadata_url, httr::add_headers(Authorization = auth))
metaparsed <- jsonlite::fromJSON(httr::content(meta, as = "text", encoding = "utf-8"), flatten = TRUE)
x <- as.data.frame(unlist(purrr::map(metaparsed, unlist))) %>% row.names() %>% stringr::str_detect("xml") %>% sum()
if (x > 0) return(TRUE) else return(FALSE)
}
# download a pdf document from company filing history
get_document <- function(metadata_url, key, type, filename){
auth <- paste0("Basic ", jsonlite::base64_enc(paste0(key, ":")))
meta <- httr::GET(metadata_url, httr::add_headers(Authorization = auth))
metaparsed <- jsonlite::fromJSON(httr::content(meta, as = "text", encoding = "utf-8"), flatten = TRUE)
content_url <- metaparsed$links$document
if (type == "pdf") {accept <- "application/pdf"}
if (type == "xbrl") {accept <- "application/xhtml+xml"}
content_get <- httr::GET(content_url, httr::add_headers(Authorization = auth, Accept = accept), httr::config(followlocation = FALSE))
finalurl <- content_get$headers$location
finaldoc <- httr::GET(finalurl, httr::add_headers(Accept = accept))
writeBin(httr::content(finaldoc, "raw"), filename)
}
|
0706a9da6ad5d72cfeda2ad8e56d03df7beee85d | a859b51a4581b12dc5415689db308db6f11d7bd1 | /R/search_candidates.R | 1cb634abf7a7fec170366de56cf83c89f7ca4ae7 | [
"MIT"
] | permissive | gpli/DNB | e93a71c269790185a2a10d599f01ebf80058aa03 | 534e1f8080e2f583ca4432127d28451b10c489f7 | refs/heads/master | 2022-09-27T22:32:52.468486 | 2022-09-16T06:23:36 | 2022-09-16T06:23:36 | 166,069,293 | 4 | 1 | null | null | null | null | UTF-8 | R | false | false | 2,132 | r | search_candidates.R | #' Search Candidate DNB genes
#'
#' Cluster by correlation, caculate DNB scores of all subtrees, and get best DNB
#' score for each time point
#' @param dnb a DNB object
#' @param min_size minimum gene number in subtree. Default 2.
#' @param max_size maximum gene number in subtree. Default Inf.
#' @param included_genes genes to be included in subtree. Default NULL.
#' @param all should all \@code{included_genes} be in subtree. Default False.
#' @param with_ctrl if consider control group. Default T.
#' @param verbose output progress.
#' @return a DNB object
#' @export
search_candidates <- function(dnb, min_size = 2, max_size = Inf, included_genes = NULL, all = F, with_ctrl = T, verbose = T) {
for (tp in levels(dnb$time)) {
if (verbose) cat("time point", tp, "\n")
dnb_lite <- NULL
if (is.null(dim(dnb$correlation[[tp]]))) {
cat("Loading correlation data files ...\n")
dnb_lite <- dnb
dnb$correlation[[tp]] <- get_correlation(dnb, tp)
if (!is.null(dnb$group) && isTRUE(with_ctrl)) {
dnb$correlation_ctrl <- get_correlation(dnb, tp, "correlation_ctrl")
}
}
cat("Hierarchical clustering genes ...\n")
dend <- as.dendrogram(hclust(as.dist(1 - dnb$correlation[[tp]])))
modules <- dendextend::partition_leaves(dend)
member_nums <- sapply(modules, length)
modules <- modules[member_nums >= min_size & member_nums <= max_size]
if (!is.null(included_genes)) {
if (all == T) {
modules <- modules[sapply(modules, function(x) {all(included_genes %in% x)})]
}
else {
modules <- modules[sapply(modules, function(x) {any(included_genes %in% x)})]
}
}
cat("Calculating module attributes...\n")
module_dnbs <- lapply(modules, function(m) get_DNB_attr(dnb, tp, m, with_ctrl = with_ctrl))
if (!is.null(dnb_lite)) {
dnb <- dnb_lite
}
dnb$candidates[[tp]] <- module_dnbs[[which.max(sapply(module_dnbs, "[[", "score"))]]
}
return(dnb)
}
|
48106255781f822dd09a28ed251c1cf528e2d4c3 | 3f0ad54d14c701ce6017a3f7b7393b85fd5d6239 | /low_valence.R | b3ab93accf53af49c19ee986178af06aedaa15fa | [] | no_license | luciasalar/WeiboDepression | d4396e2a7af65fb90ac9433d88299031dd44dba2 | b5b1664e03197cf7c21a82302dcd0b320c04deb8 | refs/heads/master | 2020-04-18T19:30:48.226334 | 2019-02-15T16:53:31 | 2019-02-15T16:53:31 | 167,713,586 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 9,527 | r | low_valence.R | require(reshape2)
#in this script, we will see if there's any characteristics in the low valence group
#87345 posts in 3 years
# The valence statistics of the whole sample (n = 1629) in 3 years
# #Min. 1st Qu. Median Mean 3rd Qu. Max.
# #-0.800000 -0.009214 0.113420 0.122521 0.255556 0.950000
#
# 285 answered the question if they have any life changes happened recently
# 279 answered the question and fill out CESD
# 51 among 279 have a mean valence lower than the first quantile 0.18
# 57 among 279 have high CESD score ( >= 28 ) 0.204
#
# 77 among 279 reported negative life event happened recently
# 34 among 77 have high CESD score 0.44
#
# Among those with low valence in 3 years (n = 51) , 13 of them have high CESD 0.26
# Among those with low valence in 3 years but no negative life events happen recently (n = 174), 23 of them have high CESD 0.13
# Among those with low valence in 3 years and reported negative life events (n = 16), 9 of them have high CESD 0.56
#
# Among the 77 people reported negative life events happened recently:
# 33 reported break up with a partner (median cesd: 29 median valence: 0.13 )
# 23 reported a family member was diagnosed with a severe illness (median cesd: 23 median valence: 0.13 )
# 12 reported unemployment (median cesd: 29 median valence: 0.014 )
# 31 reported other (median cesd: 25 median valence: 0.15 )
# 17 reported more than two negative life events (median cesd: 26 median valence: 0.12 )
#conclusion:negative life changes and long time low valence are important factor to predict depression score, there's an accumulating
#effect of these two factors.
setwd("~/phd_work/depression dessertation/data")
cesd<- read.csv('~/phd_work/depression dessertation/data/depression_fix_nohis.csv')
event<- read.csv('~/phd_work/depression dessertation/data/all_users_history copy.csv') ##70 users reported they have big change in life recent
event <- event[,1:6] #discard the cesd score in this table
file<- read.csv('~/phd_work/depression dessertation/data/3year_liwc.csv')
#let's see the sentiment data
summary(file$sentiment)
#Min. 1st Qu. Median Mean 3rd Qu. Max.
#-1.68750 -0.33750 0.16667 0.09758 0.60000 1.52000
###sentiment for h and l group
senti <- file[c('userid','sentiment','day')]
cesd <- cesd[!duplicated(cesd$userid),]
senti_d <- merge(senti,cesd, by ='userid')
senti_d$cesd_sum <- rowSums(senti_d[,5:15])
c <- senti_d[senti_d$senti < 0, ]
senti_h <- senti_d[senti_d$cesd_sum > 22, ]
senti_l <- senti_d[senti_d$cesd_sum <= 22, ]
#select recent 3 months
senti_h <- senti_l
m <- senti_h[senti_h$day <= 30 , ]
summary(m$cesd_sum)
summary(m$senti)
#m = 0.175, [-0.33, 0.60] 1st/3rd
oneyear <- senti_h[senti_h$day <= 365 , ]
summary(oneyear$cesd_sum)
summary(oneyear$senti)
#m = 0.175, [-0.33, 0.59] 1st/3rd
three<- senti_h[senti_h$day <= 1095 , ]
summary(three$cesd_sum)
summary(three$senti)
#m = 0.16, [-0.34, 0.60] 1st/3rd
################################low valence group
userSenti <- aggregate(file$sentiment, list(file$userid), sum)
weiboCount <- aggregate(file$userid, list(file$userid), length)
weiboCount2 <- cbind(userSenti,weiboCount)
colnames(weiboCount2) <- c('userid','SentiSum','userid2','count')
weiboCount2$userid2 <- NULL
weiboCount2$Mvalence <- weiboCount2$SentiSum/weiboCount2$count
summary(weiboCount2$Mvalence)
#Min. 1st Qu. Median Mean 3rd Qu. Max.
#-0.800000 -0.009214 0.113420 0.122521 0.255556 0.950000
#here we have users with low valence
lowValence <- weiboCount2[weiboCount2$Mvalence < -0.009, ]
#among people who fill out the life change questionnaire how many of them have low valence
#let's see if they have any life changes happen
lowValenceEvent <- merge(lowValence,event, by.x = 'userid', by.y = 'user_id')
##51 answer big changes in life questions have lower valence (1st Quantile)
#let's see their cesd score
lowValenceCESD <- merge(lowValence,cesd, by.x = 'userid', by.y = 'userid')
lowValenceCESD$cesd_sum <- rowSums(lowValenceCESD[,6:16])
summary(lowValenceCESD$cesd_sum)
# Min. 1st Qu. Median Mean 3rd Qu. Max.
# 11.00 19.00 23.00 24.18 28.00 44.00
dep <- lowValenceCESD[lowValenceCESD$cesd_sum > 22, ]
#among those with life changes, do those with low valence tend to have high cesd
#see the cesd of those with life changes, they dont have particular high cesd
eventCESD <- merge(event,cesd, by.x = 'user_id', by.y = 'userid')
eventCESD <- eventCESD[!duplicated(eventCESD$user_id), ]
eventCESD$cesd_sum <- rowSums(eventCESD[,8:18])
sd(eventCESD$cesd_sum, na.rm=TRUE)
# Min. 1st Qu. Median Mean 3rd Qu. Max. NA's
# 11.00 19.00 22.00 23.99 27.00 47.00 1
#let's merge this with valence
# 279 people fill out the event scale
eventCesdValence <- merge(eventCESD,weiboCount2, by.x = 'user_id', by.y = 'userid')
#among the whole sample how many people have high depression score
dep <- eventCesdValence[eventCesdValence$cesd_sum > 22, ] #n =57 / 137
57/251 # 0.204 of them have high depression score
119/251 #0.474
#among those with big changes in life, how many of them have high depression score
change <- eventCesdValence[eventCesdValence$Big_change == 1, ]
dep2 <- change[change$cesd_sum > 22, ] #55
34/77 # 0.44 of them have high depression score
53/77 #0.68.8
Nochange <- eventCesdValence[eventCesdValence$Big_change == 2, ]
d <- Nochange[Nochange$cesd_sum > 22, ]
66/174
#among those with low valence all time, how many of them have high depression score
lowValence2 <- eventCesdValence[eventCesdValence$Mvalence < -0.009, ] #n = 50 have lower than mean valence
dep3 <- lowValence2[lowValence2$cesd_sum >= 22, ] # n = 13 of them have depression #30
13/50 #0.26 of them have high depression score
30/50 ##0.6
highValence2 <- eventCesdValence[eventCesdValence$Mvalence >= -0.009, ] #n = 50 have lower than mean valence
dep6 <- highValence2[highValence2$cesd_sum >= 22, ]
108/201
changeH <- highValence2[highValence2$Big_change == 1, ]
dep7 <- changeH[changeH$cesd_sum > 22, ]
42/61
#among those with low valence all time, how many of them have high depression score even when no changes happen in life?
Nochange <- eventCesdValence[eventCesdValence$Big_change == 2, ] #174
lowValenceNoChange <- Nochange[Nochange$Mvalence < -0.009, ] #34
depp <- lowValenceNoChange[lowValenceNoChange$cesd_sum >= 22, ] #23 #19
19/34# 0.67 of them have high depression score #0.558
highValenceNoChange <- Nochange[Nochange$Mvalence >= -0.009, ] #140
de <- Nochange[Nochange$cesd_sum >= 22, ] #23 #82
23/174
82/140 #0.58
#among those with low valence all time and big change in life, how many of them have high depression score after big changes happen?
lowValence3 <- change[change$Mvalence < -0.009, ] #n= 16
dep4 <- lowValence3[lowValence3$cesd_sum >= 22, ] #n = 11
11/16 #0.5625 of them have high depression score #0.6875
#conclusion:negative life changes and long time low valence are important factor to predict depression score, there's an accumulating
#effect of these two factors.
#change_L: break up with a partner
#change_R: a family member was diagnosed with a severe illness
#change_R: unemployment
#change_O: other
#now let's see the 77 people reported life changes
change_L <- change[change$change_L == 1, ] # 33
change_R <- change[change$change_R == 1, ] # 23
change_J <- change[change$change_J == 1, ] # 12
change_O <- change[change$change_Other == 1, ] # 31
#people with more than two changes # 17
change$changeT <- rowSums(change[,3:6])
Twochange <- change[change$changeT >= 2, ]
#cesd score
summary(change_L$cesd_sum) # median 29
summary(change_R$cesd_sum) #median 23
summary(change_J$cesd_sum) #median 29
summary(change_O$cesd_sum) #median 25
summary(Twochange$cesd_sum) #median 26
#people with L and J are more likely to have high CESD
#median valence in the sample is 0.11
summary(change_L$Mvalence) # median 0.13
summary(change_R$Mvalence) #median 0.13
summary(change_J$Mvalence) #median 0.014
summary(change_O$Mvalence) #median 0.15
summary(Twochange$Mvalence) #median 0.12
#people with with unemployemnt has the lowest valence all time and highest CESD
#these people are struggling with a predisposing factor # need to see the qualitative data
##get posts from low valence group who has big changes and high CES-D
file2<- read.csv('~/phd_work/depression dessertation/data/sub_3year.csv')
posts <- file2[c('userid','weibo','time','day','sentiment')]
sample <- merge(posts, dep4, by.x ='userid', by.y='user_id') #(n = 205)
write.csv(sample,'qualitative.csv')
#get posts from recent 6 months
#among the four events, what event people tend to show more sign #7 ppl indicated, show signs of rumination
sample2 <- merge(posts, change_L, by.x ='userid', by.y='user_id') #(n = 1194)
de <- change_L[change_L$cesd_sum > 27, ] # 18 shows high cesd, 4 of them show signs of rumination
sample2 <- sample2[sample2$day <= 180, ]
write.csv(sample2,'qualitative_change_L.csv')
#social media data
#unemployment #no one shows sign in weibo
sample2 <- merge(posts, change_J, by.x ='userid', by.y='user_id') #(n = 1194)
sample2 <- sample2[sample2$day <= 180, ]
write.csv(sample2,'qualitative_change_J.csv')
#unemployment #no one shows sign in weibo only 2 ppl indicate
sample2 <- merge(posts, change_R, by.x ='userid', by.y='user_id') #(n = 1194)
sample2 <- sample2[sample2$day <= 180, ]
write.csv(sample2,'qualitative_change_R.csv')
|
f7c3101a89e41ef0438b4c333f27371e063376ff | 7a80b37618e249375c41d76cd34f21747daf94ff | /day06/r10.R | 20d9bd477198ca46c4f39dcd486e2cfeb19762fd | [] | no_license | gangdor/R | 57080ffcf7b3c2b56077af6eaf5ff5bb607cdff9 | 7e3ed6e7e79ab1d7a92be90b29ffed96585780da | refs/heads/master | 2020-03-07T09:19:41.099166 | 2018-04-09T12:09:39 | 2018-04-09T12:09:39 | 127,404,701 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 471 | r | r10.R | # 함수만들어서 그래프 저장하
g1 <- function(a){
library(ggplot2)
if(a==1){
p = ggplot(data=mpg, aes(x=displ,y=hwy))+geom_col()
jpeg(filename = 'c:/rproject/ais1.jpg', width=680,
height = 680, quality = 100);
}
else{
p = ggplot(data=mpg, aes(x=displ,y=hwy))+geom_col()
jpeg(filename = 'c:/rproject/aisnot1.jpg', width=680,
height = 680, quality = 100);
}
print(p)
dev.off()
}
|
4e6aaffb7ba3d7c3fa25e557fc8876afa813e150 | 9aafde089eb3d8bba05aec912e61fbd9fb84bd49 | /codeml_files/newick_trees_processed/6123_0/rinput.R | 341577cb847a6c78a1dfc6d13e945e4d692c5f5e | [] | no_license | DaniBoo/cyanobacteria_project | 6a816bb0ccf285842b61bfd3612c176f5877a1fb | be08ff723284b0c38f9c758d3e250c664bbfbf3b | refs/heads/master | 2021-01-25T05:28:00.686474 | 2013-03-23T15:09:39 | 2013-03-23T15:09:39 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 135 | r | rinput.R | library(ape)
testtree <- read.tree("6123_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="6123_0_unrooted.txt") |
af0591f5a769687b1adbfe0978f9a9e58a94c5c7 | 8a87ac5a13ed42ef99a7f0e566e07e32207c885a | /refm/api/src/rubygems/config_file.rd | b6ac125c7870d641fe95568f678740729a9333a9 | [] | no_license | mrkn/rubydoc | cf096e253afbe11d94a7b390645f18872754e5f1 | dff08665e8c537073d3ebab5b98d4bcd175055b5 | refs/heads/master | 2021-01-18T14:54:27.743911 | 2010-08-15T06:15:58 | 2010-08-15T06:15:58 | 876,651 | 1 | 0 | null | null | null | null | EUC-JP | R | false | false | 4,866 | rd | config_file.rd | require rubygems
設定ファイルに書かれている gem コマンドのオプションを
オブジェクトに保存するためのライブラリです。
= class Gem::ConfigFile
設定ファイルに書かれている gem コマンドのオプションを
オブジェクトに保存するためのクラスです。
このクラスのインスタンスはハッシュのように振る舞います。
== Public Instance Methods
--- [](key) -> object
#@todo
引数で与えられたキーに対応する設定情報を返します。
@param key 設定情報を取得するために使用するキーを指定します。
--- []=(key, value)
#@todo
引数で与えられたキーに対応する設定情報を自身に保存します。
@param key 設定情報をセットするために使用するキーを指定します。
@param value 設定情報の値を指定します。
--- args -> Array
#@todo
設定ファイルオブジェクトに与えられたコマンドライン引数のリストを返します。
--- backtrace -> bool
#@todo
エラー発生時にバックトレースを出力するかどうかを返します。
真の場合はバックトレースを出力します。そうでない場合はバックトレースを出力しません。
--- backtrace=(backtrace)
#@todo
エラー発生時にバックトレースを出力するかどうか設定します。
@param backtrace 真を指定するとエラー発生時にバックトレースを出力するようになります。
--- benchmark -> bool
#@todo
真の場合はベンチマークを実行します。
--- benchmark=(benchmark)
#@todo
ベンチマークを実行するかどうか設定します。
@param benchmark 真を指定するとベンチマークを実行するようになります。
--- bulk_threshold -> Integer
#@todo
Bulk threshold value. If the number of missing gems are above
this threshold value, then a bulk download technique is used.
--- bulk_threshold=(bulk_threshold)
#@todo
Bulk threshold value. If the number of missing gems are above
this threshold value, then a bulk download technique is used.
--- config_file_name -> String
#@todo
設定ファイルの名前を返します。
--- each{|key, value| ... }
#@todo
設定ファイルの各項目のキーと値をブロック引数として与えられたブロックを評価します。
--- handle_arguments(arg_list)
#@todo
コマンドに渡された引数を処理します。
@param arg_list コマンドに渡された引数の配列を指定します。
--- load_file(file_name) -> object
#@todo
与えられたファイル名のファイルが存在すれば YAML ファイルとしてロードします。
@param file_name YAML 形式で記述された設定ファイル名を指定します。
--- path -> String
#@todo
Gem を探索するパスを返します。
--- path=(path)
#@todo
Gem を探索するパスをセットします。
--- really_verbose -> bool
#@todo
このメソッドの返り値が真の場合は verbose モードよりも多くの情報を表示します。
--- update_sources -> bool
#@todo
真の場合は [[c:Gem::SourceInfoCache]] を毎回更新します。
そうでない場合は、キャッシュがあればキャッシュの情報を使用します。
--- update_sources=(update_sources)
#@todo
@param update_sources 真を指定すると毎回 [[c:Gem::SourceInfoCache]] を更新します。
--- verbose -> bool | Symbol
#@todo
ログの出力レベルを返します。
@see [[m:Gem::ConfigFile#verbose=]]
--- verbose=(verbose_level)
#@todo
ログの出力レベルをセットします。
以下の出力レベルを設定することができます。
: false
何も出力しません。
: true
通常のログを出力します。
: :loud
より多くのログを出力します。
@param verbose_level 真偽値またはシンボルを指定します。
--- write
#@todo
自身を読み込んだ設定ファイルを書き換えます。
== Protected Instance Methods
--- hash -> Hash
#@todo
== Constants
--- DEFAULT_BACKTRACE -> false
#@todo
デフォルトでバックトレースが表示されるかどうか
--- DEFAULT_BENCHMARK -> false
#@todo
--- DEFAULT_BULK_THRESHOLD -> 1000
#@todo
--- DEFAULT_UPDATE_SOURCES -> true
#@todo
--- DEFAULT_VERBOSITY -> true
#@todo
--- OPERATING_SYSTEM_DEFAULTS -> {}
#@todo
Ruby をパッケージングしている人がデフォルトの設定値をセットするために使用します。
使用するファイルは rubygems/defaults/operating_system.rb です。
--- PLATFORM_DEFAULTS -> {}
#@todo
Ruby の実装者がデフォルトの設定値をセットするために使用します。
使用するファイルは rubygems/defaults/#{RUBY_ENGINE}.rb です。
--- SYSTEM_WIDE_CONFIG_FILE -> String
#@todo
|
b03a2878e00b533a6b556e9945e5832cbb1f53c3 | 091771e6a8bf3a8f63b0e8e21368ad1fccda9979 | /SVMcode.R | 78e9ec1a77788cbc91e836ff8afb1a65332f6c58 | [
"Apache-2.0"
] | permissive | CarlosSequi/DataMining-KaggleCompetition-SVM-KNN-DecisionTree | d1f89b437c4e6aa828bbbb7fdcf48548a1a2c82f | bcb19fe4cf483e7152a6d2cf2a54e6c6dd978b33 | refs/heads/master | 2020-06-11T07:01:15.774182 | 2019-06-26T10:42:05 | 2019-06-26T10:42:05 | 193,884,795 | 0 | 0 | null | null | null | null | ISO-8859-13 | R | false | false | 1,674 | r | SVMcode.R | # Cargamos la librerķa con las funciones para SVM
library(e1071)
library(caret)
library(mRMRe)
library(NoiseFiltersR)
library(mice)
library(DMwR2)
library(unbalanced)
set.seed(42)
train = as.data.frame(read.csv("data/train.csv", na.strings = c("?")))
test = as.data.frame(read.csv("data/test.csv"), na.strings = c("?"))
train.preprocessed <- train
train.preprocessed <- train.preprocessed[!duplicated(train.preprocessed),]
for (colidx in 1:50){
x <- train.preprocessed[,colidx]
# Quitamos outliers
x.out.low_err <- which(x < -50000)
train.preprocessed[c(x.out.low_err), colidx] <- NA
}
train.clean <- na.omit(train.preprocessed)
sel_vars <- 1:50
train.x <- train.clean[,sel_vars]
train.y <- factor(train.clean$C)
# Ya tendrķamos los datos listos para el clasificador
system.time(model <- svm(x=train.x, y=train.y, kernel = "radial", cost = 1, cross = 5))
summary(model)
# Calculamos la precision
train.x.pred <- predict(model, newdata = train.full.imputed.x)
cm <- confusionMatrix(train.x.pred, train.y)
cm
# Para el dataset completo
train.pred <- predict(model, newdata = train.full.imputed[,1:50])
cm <- confusionMatrix(train.pred, factor(train.full.imputed[,51]))
cm
test.pred <- as.numeric(predict(model, newdata = test))-1
test.preprocessed.pred <- as.numeric(predict(model, newdata = test.preprocessed))-1
output.test.pred <- data.frame("Id"=seq.int(length(test.pred)), "Prediction"=test.pred)
head(test.pred)
# Comparamos...
length(test.pred)
length(which(test.pred != test.preprocessed.pred))
# Si son iguales, no merece la pena subirlas
write.csv(output.test.pred, file = "data/predictionSubmission.csv", quote = FALSE, row.names = FALSE)
|
c30c235912d78fcc0560934e7a3fb0cc48f1c2b7 | f9e21f8402098e77b879271ac6aafe7954f73bcd | /Guertin_week4/section6.2.R | e84f351af3bfe03601d4583eb3b7e92c91aa164d | [] | no_license | guertinlab/bioc8145 | fd87e78e49c2e069a385d382213d12200964ad6a | 358c56e68a38a66d8da70db96dc6d72dbab13fa6 | refs/heads/master | 2023-05-13T20:57:11.322968 | 2021-06-11T15:07:00 | 2021-06-11T15:07:00 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,791 | r | section6.2.R | # if on rivanna
#module load gcc openmpi R/3.6.1
#you only need to run the next line once to install the packages
source('install_pkgs.R')
library(lattice)
library(DESeq2)
library(dplyr)
library(fgsea)
library(ggplot2)
library(gage)
library(limma)
source('https://raw.githubusercontent.com/mjg54/znf143_pro_seq_analysis/master/docs/ZNF143_functions.R')
path.dir='/scratch/mjg7y/'
file.suffix='.gene.counts.txt'
setwd(path.dir)
#keep track of the experiment names in this list so I can go back and label the columns
vec.names = c()
#we only want generate a data frame label the rows for the first file we loop through
count = 0
#here we will loop through our suffix-defined files in the defined directory
for (txt in Sys.glob(file.path(path.dir, paste0("*", file.suffix)))) {
count = count + 1
#this complicated code simply splits strings to extract the experiment name from the file
experiment.name = strsplit(strsplit(txt,
"/")[[1]][length(strsplit(txt, "/")[[1]])], file.suffix)[[1]][1]
print(experiment.name)
#only do this for the first file, since count is greater than 1 with every other file
if (count == 1) {
#generate a data frame with the gene row names and no columns
all.counts = data.frame(row.names = read.table(txt)[,1])
}
#add the experiment name to a growing list of column names
vec.names = c(vec.names, experiment.name)
#for each file (including the first file) add a column with the counts information
all.counts = cbind(all.counts, data.frame(read.table(txt)[,2]))
}
#the last 5 lines are not gene counts, so delete them
all.counts = all.counts[1:(nrow(all.counts) - 5),]
#name the columns
colnames(all.counts) = vec.names
head(all.counts)
dim(all.counts)
|
01ce68eb01e0820fdba129987cd695854a319fda | 1a50b4f1ec326c3c876071f7455b623abf5e84c3 | /man/getCoefficients.Rd | cd03e8171e9142d0f2b4b74c3d1c67c7aa6a6d6e | [] | no_license | larajiuk/screenr | 671d3201c8a6d2d269d4c236afef7558cc64e7ab | d5dc80934258f3f68350c0318ccf31d4617e952f | refs/heads/master | 2022-10-20T16:24:50.804955 | 2020-06-23T19:56:52 | 2020-06-23T19:56:52 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 481 | rd | getCoefficients.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/binomialScreening.R
\name{getCoefficients}
\alias{getCoefficients}
\title{Extract the estimated coefficients from \code{binomscreenr} objects}
\usage{
getCoefficients(x)
}
\arguments{
\item{x}{An object of class \code{biomscreenr}}
}
\value{
A numeric vector containing the estimated coefficients on the logit
scale.
}
\description{
Extract the estimated coefficients from \code{binomscreenr} objects
}
|
962bfe3af9420de1c9fffa521496656187f2aaf0 | c0fd3479d07b93417b5ec2b6f59d2e20795ce1e1 | /Broad Analysis/Analysis Fold Cange Matrix.R | e834f2b27a045e8c2693a44b72474d59d247ea09 | [] | no_license | datascience-mobi/project-02-group-05 | 3f16460cacde49d12b5dfefab123a429c72a3e64 | 2af103e768f597ac03d23fcfa6240bebaffef23d | refs/heads/master | 2020-05-07T10:10:18.965108 | 2019-07-24T09:35:56 | 2019-07-24T09:35:56 | 180,405,392 | 0 | 2 | null | 2019-07-04T22:40:10 | 2019-04-09T16:12:44 | HTML | UTF-8 | R | false | false | 4,501 | r | Analysis Fold Cange Matrix.R |
################################# Analyse Fold Change Matrix #####################################
##################################################################################################
##################################################################################################
########################################### old way ##############################################
# creat FC data
FC_all = (Treated - Untreated)
FC_all_mean=colMeans(FC_all)
#FC_all_mean_mat=as.matrix(FC_all_mean)
# creat names vector
all_drug_names = Metadata$drug
length(all_drug_names)
# prepare barplot
#farben die spaeter im plot benutzt werden
color_vector_rainbow = rainbow(15)
#index fuer das colorvector_rainbow array
index = -1
#string um zu checken, ob eine neue categorie ist
old_name = ""
#array indem spaeter die farben gespeichert werden
color_array = c()
#loop ueber alle namen
for(i in 0:length(all_drug_names)){
#print(paste(all_drug_names[i], " - ",old_name, "Bool:", !identical(old_name, all_drug_names[i])))
# Wenn der gen name sich aendert zaehlen wir den index hoch, da wir dann eine andere farben haben moechten
if(!identical(old_name, all_drug_names[i])){
index = index + 1
old_name = all_drug_names[i]
}
# fuer jeden namen fuege die aktuelle farbe hinzu
color_array[i] <- color_vector_rainbow[index]
}
# erstelle dataframe mit den spalten
df_test <- data.frame("ID" = 1:819, "Color" = color_array, "Name" = all_drug_names, "MeanValue" = as.vector(FC_all_mean))
#barplot
barplot( height = df_test$MeanValue, names= FALSE, col = df_test$Color, border = NA)
##################################################################################################
##################################################################################################
########################################### new way ##############################################
# creat FC data
FC_all = (Treated - Untreated)
FC_all_mean=colMeans(FC_all)
# creat levels for coloring
drug <- Metadata$drug
palette(rainbow(15))
# creat boxplot
barplot( height = FC_all_mean, names= FALSE, col = drug, border = NA,main= "Fold change")
# creat legend
levels <- as.factor(levels(drug))
legend("topright", inset = c(-0.3,0), legend= levels(drug), xpd = TRUE, pch=19, col = levels, title = "drugs")
##################################################################################################
##################################################################################################
######################################### scatter plot ###########################################
### drug
plot(FC_all_mean, col= Metadata$drug, main="Fold change with drugs")
# legend
drug <- Metadata$drug
levels <- as.factor(levels(drug))
legend("topright", inset = c(-0.3,0), legend= levels(drug), xpd = TRUE, pch=19, col = levels, title = "drugs")
# in general similar FC values, only 5-Azacytidine and bortezomib have clear outliners
### tissue
plot(FC_all_mean, col= Metadata$tissue,main="Fold change with tissues")
# legend
tissue <- Metadata$tissue
levels <- as.factor(levels(tissue))
legend("topright", inset = c(-0.3,0), legend= levels(tissue), xpd = TRUE, pch=19, col = levels, title = "tissues")
# no correlation with tissue
##################################################################################################
##################################################################################################
######################################### density plot ###########################################
plot(density(FC_all_mean), main= "density Fold change")
# normally distributed
# most values between -0.75 and 0.5
##################################################################################################
##################################################################################################
#################################### find Top 10 values #########################################
### find all min and max
FC_all_min= (apply(FC_all,2,min))
FC_all_max= (apply(FC_all,2,max))
### sort min and max
# most down regulated gene
largest10_FC_all_min <- (sort(FC_all_min, decreasing = F)[1:10])
largest10_FC_all_min
# frequent drugs: vorinostat (3), bortezomib (3)
# fequent cell line: OVCAR-4 (3)
# most up regulated genes
largest10_FC_all_max <- (sort(FC_all_max, decreasing = T)[1:10])
largest10_FC_all_max
# frequent drugs: bortezomib (6)
# fequent cell line: none
|
c38b19be85302e684ad487dcda6546bc4e5511fe | 902c3d15223c86fb0170abb1d72c144d6277527e | /run.R | d1eaf8ba3c6c4912e54549b7b1506bc4b74ed516 | [] | no_license | selinerguncu/Attraction-Effect | 082a662ae3e8e85bef6603e2e64eb6b47c0c40f0 | 8caf4cac9c65a044248199731ee7d3d290f6fac0 | refs/heads/master | 2021-01-01T05:20:56.297195 | 2016-05-16T16:01:27 | 2016-05-16T16:01:27 | 58,946,404 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 209 | r | run.R | install.packages('R2WinBUGS')
library('R2WinBUGS')
source('./1_cleaning.R')
source('./2_brandset.R')
source('./3_pairs.R')
source('./4_distance.R')
source('./5_competitors.R')
source('./6_allBrandsMerged.R')
|
1de14f3da86a49c52e1943231c8c56ef8cc3bb52 | 5900034ca65f07b3af5898e60b54aa81d8020a7e | /man/is_vectorised.Rd | 191a3f0082eee8b88a41608a53615ae5cf05c37e | [
"MIT"
] | permissive | pmcharrison/seqopt | c2ea9d7176d204a5b55c61f84d655958c9c3a43c | b1ef5dfbc5c68f21500a8bbc15d61fe8a1875d47 | refs/heads/master | 2020-03-26T00:43:10.412179 | 2019-03-28T10:39:55 | 2019-03-28T10:39:55 | 144,333,932 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 311 | rd | is_vectorised.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cost-fun.R
\name{is_vectorised}
\alias{is_vectorised}
\title{Is vectorised?}
\usage{
is_vectorised(x)
}
\arguments{
\item{x}{Object to check.}
}
\value{
Logical scalar.
}
\description{
Checks whether a cost function is vectorised.
}
|
6dca1047075bc676a152e8506136fc9c1fdb40ba | 94af76e0b5c187a0dbf55d6443d62714f0d6590f | /TP4/tp4/phonemes.r | 5e65597a99f92906f89f9d81593d5646c2fba046 | [] | no_license | paulvale/SY19-MachineLearning | cca6898cc0e335340daae23ce7dbdb4e6bbf7507 | bdf76041db0d4f99ac0b620fce98571d3c32e599 | refs/heads/master | 2020-12-14T09:51:55.435480 | 2017-01-11T13:21:07 | 2017-01-11T13:21:07 | 68,795,837 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 30,991 | r | phonemes.r | # LOAD DATA
library(MASS)
library(pROC)
library(nnet)
library(glmnet)
library(leaps)
library("FNN", character.only = TRUE)
library("hydroGOF", character.only = TRUE)
library(tree)
library(e1071)
phoneme = read.table("data/phoneme.data.txt",header=T,sep=",")
# ====
# x.1, x.2, ... x.256, g, speaker
# ====
x <- phoneme$g
y <- phoneme$speaker
# Pour que tu ne sois pas paumé et que tu comprennes ce que j'ai fait, je laisse dans le script pour le moment
# et il te suffit de print a chaque fois
# 1ere question : devons nous entrainer notre modele a reconnaitre les mots d'un certain locuteur
# ou devons nous reussir a reconnaitre pour n'importe qui et c'est la reconnaissance du mot qui
# est importante ?
# Marceau : De mon point de vue, ce jeu de données est fait pour aider a la reconnaissance de discours ("from the TIMIT database
# which is a widely used resource for research in speech recognition"). On entend par la que les personnes qui veulent creer des programmes de reconnaissances vocales utilisent cette base de donnees
# En fouillant un peu sur internet on peut se rendre copte assez facilement qu'au final on a aucune info sur ces speakers. Je penche vers la version suivante :
# Nous devons entrainer notre modele pour reussir a reconnaitre pour n'importe qui et c'est la reconnaissance du mot qui
# est importante
# cette prise de position est primordiale, car elle va notamment impliquer une difference au niveau
# de notre division des datas ( test et apprentissage)
# - si on veut bien reconnaitre que les personnes presentes dans nos datas alors il faut diviser notre
# ensemble de maniere a avoir 2/3 des datas d'une personnes dans l'apprentissage et 1/3 de
# ses enregistrements dans le test.
# - si on veut reconnaitre maintenant avant tous l'utilisation des mots dans le discours,
# alors il est plus interessant de garder 2/3 des personnes et donc tout leur enregistrements
# dans les donnees d'apprentissage et de garder 1/3 des personnes inconnus par notre modele.
# Marceau : Je penche donc vers ce deuxieme modele. J'aurai cependant une petite remarque a faire la dessus : comme nous cherchons ici a reconnaitre
# seuleemnt certains mots (sh, dcl ...), le but de notre modele sera de dire : tiens le mot que je viens d'entendre c'est "sh".
# Il serait peut etre plus judicieux de prendre 2/3 des enregistrements de chaque mot à reconnaitre.
# === Description de nos datas ===
# je t'invite deja a lire et relire le doc phoneme.info.txt qui nous ait fournis
# j'avoue que j'ai mis du temps a comprendre mais voila ce que j'ai retenu sur nos datas :
# on fait de la reconnaissance vocale sur des discours, ce que l'on veut retenir nous
# ce n'est que 5 phonemes 'aa', 'sh', 'dck', 'ao', et 'iy' (contrairement a la vraie reconnaissance vocale ou tu as
# beaucoup plus de phonemes a prendre en compte )
# Pour pouvoir creer un modele de reconnaissance de ces phonemes, on a eu tout d'abord
# 50 hommes qui ont fait des discours, on en a retire 4509 petits bouts de discours de 32ms
# avec environ 2 examples de phonemes par speaker.
# 1 frame ( petit bouts de discours ) = 1 phoneme = 512 samples ( on le divise en 512 petits bouts again)
# Voila la repartition de nos differents frames
# aa ao dcl iy sh
# 695 1022 757 1163 872
# Pour chacun de nos frames, on fait un log-periodogram, technique utilisé dans la reconnaissance vocale
# et ce log-periodogram comprend 256 features
# nos datas
# --------------------------------------------- SCALING DATAS ---------------------------------------------------------
print("scaling datas")
# Toutes les datas ont plus ou moins une variance et une une moyenne identique, nous centrons et reduisons quand meme
# pour etre sur de rendre ces variables comparables
phoneme[,2:257] <- as.data.frame(scale(phoneme[,2:257]))
# -------------------------------------------- DATA SEPARATION --------------------------------------------------------
print("data separation")
# Marceau : Je propose la dessus de respecter les 5 criteres de separation donnes par TIMIT
# 1 : 2/3 de train et 1/3 de test
# 2 : Aucun speaker ne doit apparaitre dans train et test (il suffit de faire attention pour cela a bien basculer tous les enregistrements
# par speakers quand on fait un ensemble)
# 3 : Toutes les regions doivent apparaitre dans nos ensembles de test/train, avec au moins un speaker de chaque sexe (a verifier a la main)
# 4 : Tous les phonemes doivent apparaitre (ici on fait justement attention a ca, en prenant la meme proportion de chaque phonemes)
# J'ai analysé chaque jeu de donnees suivant le phoneme, ca m'a permis de creer a la main les ensembles d'apprentissage et de test en faisant attention
# aux criteres annoces ci dessus. On a bien des ensembles avec 2/3 de train et 1/3 de test, aucun speaker n'apparait dans le test et dans le train.
# les speaker de chaque region + pour chaque sexe + pour chaque phoneme sont presents dans nos ensembles de test ET train
# --- creation de l'ensemble pour les aa ---
aa <- subset(phoneme,g=='aa')
#il faut en rajouter 56
aa.train <- rbind(aa[1:450,],aa[495:507,])
aa.test <- rbind(aa[451:494,],aa[508:695,])
# --- creation de l'ensemble pour les sh ---
sh <- subset(phoneme,g=='sh')
#il faut en rajouter 67
sh.train <- rbind(sh[1:569,],sh[621:635,])
sh.test <- rbind(sh[570:622,],sh[636:872,])
# --- creation de l'ensemble pour les dcl ---
dcl <- subset(phoneme,g=='dcl')
#il faut en rajouter 57
dcl.train <- rbind(dcl[1:493,],dcl[540:552,])
dcl.test <- rbind(dcl[494:539,],dcl[553:757,])
# --- creation de l'ensemble pour les ao ---
ao <- subset(phoneme,g=='ao')
#il en manque 77
ao.train <- rbind(ao[1:667,],ao[730:744,])
ao.test <- rbind(ao[668:729,],ao[745:1022,])
# --- creation de l'ensemble pour les iy ---
iy <- subset(phoneme,g=='iy')
#il en manque 77
iy.train <- rbind(iy[1:756,],iy[811:832,])
iy.test <- rbind(iy[757:812,],iy[833:1163,])
# --- assemblage de tous les dataframes ---
phoneme.train <- rbind(aa.train, sh.train, ao.train, dcl.train, iy.train)
phoneme.train.data <- phoneme.train[,2:257]
phoneme.train.label <- phoneme.train$g
phoneme.test <- rbind(aa.test, sh.test, ao.test, dcl.test, iy.test)
phoneme.test.data <- phoneme.test[,2:257]
phoneme.test.label <- phoneme.test[,258]
# ------------------------------------------------ CONSTRUCTION OF MODELS -------------------------------------------------
print("Prédiction des modeles sans travail préalable des données")
#on doit ici predire une classe et non une valeur, on utilisera donc des methodes de classification et non de regression
# --- LDA - 8.6% d erreur ---
print("LDA - Error : ")
phoneme.lda <- lda(phoneme.train.label~.,data=phoneme.train.data)
phoneme.lda.pred <- predict(phoneme.lda, newdata=phoneme.test.data)
phoneme.lda.perf <- table(phoneme.test.label,phoneme.lda.pred$class)
phoneme.lda.error <- 1 - sum(diag(phoneme.lda.perf))/(nrow(phoneme.test))
print(phoneme.lda.error)
# --- QDA - 18.93% d erreur ---
print("QDA - Error : ")
phoneme.qda <- qda(phoneme.train.label~.,data=phoneme.train.data)
phoneme.qda.pred <- predict(phoneme.qda, newdata=phoneme.test.data)
phoneme.qda.perf <- table(phoneme.test.label,phoneme.qda.pred$class)
phoneme.qda.error <- 1 - sum(diag(phoneme.qda.perf))/(nrow(phoneme.test))
print(phoneme.qda.error)
# --- Regression logistique - 11.13% d'erreur ---
print("Regression logistique - Error : ")
phoneme.glmnet <- glmnet(as.matrix(phoneme.train.data),y=phoneme.train.label,family="multinomial")
phoneme.glmnet.pred <- predict(phoneme.glmnet,newx=as.matrix(phoneme.test.data),type="response",s=phoneme.glmnet$lambda.min)
# phoneme.glmnet.pred est un tableau 3 dimensions :
# - La premiere est sur nos obsvervations (1500 dans l'ensemble de test)
# - La deuxieme est sur nos types de phonemes (5types de phonemes)
# - La troisieme est sur l'iteration
phoneme.glmnet.res<-c(rep(0,1500))
for (i in 1:1500)
{
class <- ""
res<-which.max(phoneme.glmnet.pred[i,1:5,100])
{
if(res==1)
{
class <- "aa"
}
else if(res==2){
class <- "ao"
}
else if(res==3){
class <- "dcl"
}
else if(res==4){
class <- "iy"
}
else{
class <- "sh"
}
}
phoneme.glmnet.res[i] <- class
}
phoneme.glmnet.perf <- table(phoneme.test.label,phoneme.glmnet.res)
phoneme.glmnet.error <- 1 - sum(diag(phoneme.glmnet.perf))/(nrow(phoneme.test))
print(phoneme.glmnet.error)
# --- KPPV - 9.27% d'erreur - koptimal 8 ---
print("KNN - Errors : ")
phoneme.knn.error<-rep(0,20)
for(k in 8:8)
{
phoneme.knn <- knn(phoneme.train.data, phoneme.test.data, phoneme.train.label,k=k)
phoneme.knn.error[k] <- (length(which(FALSE==(phoneme.knn==phoneme.test.label))))/length(phoneme.test.label)
}
print(phoneme.knn.error)
# --- Classifiation tree - 14.8% d'erreur ---
print("TREE - Errors : ")
phoneme.tree<- tree(phoneme.train.label~ ., data=phoneme.train.data)
phoneme.tree.pred<-predict(phoneme.tree, phoneme.test.data, type="class")
phoneme.tree.perf <- table(phoneme.tree.pred, phoneme.test.label)
phoneme.tree.error <- (sum(phoneme.tree.perf)-sum(diag(phoneme.tree.perf)))/nrow(phoneme.test.data)
print(phoneme.tree.error)
# --- Classifieur bayesien naif - 12.53% d'erreur ---
print("BAYES - Errors : ")
phoneme.naive<- naiveBayes(phoneme.train.label~., data=phoneme.train.data)
phoneme.naive.pred<-predict(phoneme.naive,newdata=phoneme.test.data)
phoneme.naive.perf <-table(phoneme.test.label,phoneme.naive.pred)
phoneme.naive.error <- 1-sum(diag(phoneme.naive.perf))/nrow(phoneme.test.data)
print(phoneme.naive.error)
#regarder si toutes les varibles sont importants (regarder la correlation des variables)
#regularisation : regarder si rajouter un terme on a pas un meilleur modele (regarder la correlation des variables)
#facteur analysis : changemet de repere
#faire un ACP pour peut etre reduire
# ------------------------------------------------ SUBSET SELECTION -------------------------------------------------
print("Reduction du nombre de variable en utilisant la subset selection")
reg.fit<- regsubsets(phoneme.train.label~.,data=phoneme.train.data,method='forward',nvmax=256)
summary.regsubsets <- summary(reg.fit)
summary.regsubsets.which<-summary.regsubsets$which #permet de savoir quels variables sont dans quels modeles. (il faut decaler de 2)
LDA_ERROR <- matrix(0,ncol=2,nrow=256)
QDA_ERROR <- matrix(0,ncol=2,nrow=256)
KNN_ERROR <- matrix(0,ncol=2,nrow=256)
TREE_ERROR <- matrix(0,ncol=2,nrow=256)
BAYES_ERROR <- matrix(0,ncol=2,nrow=256)
GLMNET_ERROR <- matrix(0,ncol=2,nrow=256)
lda.min <- 100
qda.min <- 100
knn.min <- 100
tree.min <- 100
bayes.min <- 100
glmnet.min <- 100
lda.subset <- summary.regsubsets.which[2,3:257]
qda.subset <- summary.regsubsets.which[2,3:257]
knn.subset <- summary.regsubsets.which[2,3:257]
tree.subset <- summary.regsubsets.which[2,3:257]
bayes.subset <- summary.regsubsets.which[2,3:257]
glmnet.subset <- summary.regsubsets.which[2,3:257]
new.phoneme.knn.error <- c(0,20)
k.opt <- 0
for(i in 2:256)#ca sert a rien de le faire jusqu'a 256 on a deja les resultats plus haut.
{
if(i==37 || i==132 || i==48 || i==46 ||i==60 || i==57) # Ceci nous permet de gagner en temps car on connait deja les k optimaux
{
# selection des nouveaux jeux de données selon le nombre de variables gardés.
new.phoneme.train.data<-phoneme.train.data[,summary.regsubsets.which[i,3:257]]
new.phoneme.train.data<-as.data.frame(new.phoneme.train.data)
new.phoneme.test.data<-phoneme.test.data[,summary.regsubsets.which[i,3:257]]
new.phoneme.test.data<-as.data.frame(new.phoneme.test.data)
#calcul des nouveaux taux d'erreur de chaque modele
# --- LDA - 7.87% d erreur - 132 variables gardées ---
new.phoneme.lda <- lda(phoneme.train.label~.,data=new.phoneme.train.data)
new.phoneme.lda.pred <- predict(new.phoneme.lda, newdata=new.phoneme.test.data)
new.phoneme.lda.perf <- table(phoneme.test.label,new.phoneme.lda.pred$class)
LDA_ERROR[i,2] <- 1 - sum(diag(new.phoneme.lda.perf))/(nrow(phoneme.test))
LDA_ERROR[i,1] <- i
if(LDA_ERROR[i,2]<lda.min)
{
lda.min <- LDA_ERROR[i,2]
lda.subset <- summary.regsubsets.which[i,3:257]
}
# --- QDA - 7.8% d erreur - 37 variables gardées ---
new.phoneme.qda <- qda(phoneme.train.label~.,data=new.phoneme.train.data)
new.phoneme.qda.pred <- predict(new.phoneme.qda, newdata=new.phoneme.test.data)
new.phoneme.qda.perf <- table(phoneme.test.label,new.phoneme.qda.pred$class)
QDA_ERROR[i,2] <- 1 - sum(diag(new.phoneme.qda.perf))/(nrow(phoneme.test))
QDA_ERROR[i,1] <- i
if(QDA_ERROR[i,2]<qda.min)
{
qda.min <- QDA_ERROR[i,2]
qda.subset <- summary.regsubsets.which[i,3:257]
}
# --- KNN - 7.87% d erreur - k optimal 8 - 48 variables gardées ---
for(k in 1:20)
{
new.phoneme.knn <- knn(new.phoneme.train.data, new.phoneme.test.data, phoneme.train.label,k=k)
KNN_ERROR[i,2] <- (length(which(FALSE==(new.phoneme.knn==phoneme.test.label))))/length(phoneme.test.label)
KNN_ERROR[i,1] <- i
new.phoneme.knn.error[k]<-(length(which(FALSE==(new.phoneme.knn==phoneme.test.label))))/length(phoneme.test.label)
if(KNN_ERROR[i,2]<knn.min)
{
knn.min <- KNN_ERROR[i,2]
knn.subset <- summary.regsubsets.which[i,3:257]
k.opt <- k
}
}
# --- Classifiation tree - 12.53% d'erreur avec 60 variables ---
new.phoneme.tree<- tree(phoneme.train.label~ ., data=new.phoneme.train.data)
new.phoneme.tree.pred<-predict(new.phoneme.tree, new.phoneme.test.data, type="class")
new.phoneme.tree.perf <- table(new.phoneme.tree.pred, phoneme.test.label)
TREE_ERROR[i,2] <- (sum(new.phoneme.tree.perf)-sum(diag(new.phoneme.tree.perf)))/nrow(phoneme.test)
TREE_ERROR[i,1] <- i
if(TREE_ERROR[i,2]<tree.min)
{
tree.min <- TREE_ERROR[i,2]
tree.subset <- summary.regsubsets.which[i,3:257]
}
# --- Classifieur bayesien naif - 10.27% d'erreur avec 57 variables ---
new.phoneme.naive<- naiveBayes(phoneme.train.label~., data=new.phoneme.train.data)
new.phoneme.naive.pred<-predict(new.phoneme.naive,newdata=new.phoneme.test.data)
new.phoneme.naive.perf <-table(phoneme.test.label,new.phoneme.naive.pred)
new.phoneme.naive.error <- 1-sum(diag(new.phoneme.naive.perf))/nrow(phoneme.test)
BAYES_ERROR[i,2] <- 1-sum(diag(new.phoneme.naive.perf))/nrow(phoneme.test)
BAYES_ERROR[i,1] <- i
if(BAYES_ERROR[i,2]<bayes.min)
{
bayes.min <- BAYES_ERROR[i,2]
bayes.subset <- summary.regsubsets.which[i,3:257]
}
# --------- regression lineaire 7.73% d erreur pour 46 variables-----------------
new.phoneme.glmnet <- glmnet(as.matrix(new.phoneme.train.data),y=phoneme.train.label,family="multinomial")
new.phoneme.glmnet.pred <- predict(new.phoneme.glmnet,newx=as.matrix(new.phoneme.test.data),type="response",s=new.phoneme.glmnet$lambda.min)
new.phoneme.glmnet.res<-c(rep(0,1500))
for (h in 1:dim(new.phoneme.test.data)[1])
{
class <- ""
res<-which.max(new.phoneme.glmnet.pred[h,1:5,72])
{
if(res==1)
{
class <- "aa"
}
else if(res==2){
class <- "ao"
}
else if(res==3){
class <- "dcl"
}
else if(res==4){
class <- "iy"
}
else{
class <- "sh"
}
}
new.phoneme.glmnet.res[h] <- class
}
new.phoneme.glmnet.perf <- table(phoneme.test.label,new.phoneme.glmnet.res)
print(1 - sum(diag(new.phoneme.glmnet.perf))/(nrow(new.phoneme.test.data)))
GLMNET_ERROR[i,2] <- 1 - sum(diag(new.phoneme.glmnet.perf))/(nrow(new.phoneme.test.data))
GLMNET_ERROR[i,1] <- i
if(GLMNET_ERROR[i,2]<glmnet.min)
{
glmnet.min <- GLMNET_ERROR[i,2]
glmnet.subset <- summary.regsubsets.which[i,3:257]
}
}
}
print("Apres subset selection : ")
print("LDA error minimale : ")
print(lda.min)
print("QDA error minimale : ")
print(qda.min)
print("KNN error minimale : ")
print(knn.min)
print("TREE error minimale : ")
print(tree.min)
print("BAYES error minimale : ")
print(bayes.min)
# On peut deduire de ce modele qu'une selection de variable reduit notre erreur moyenne de nos modele.
# Pour l analyse discriminnate lineaire l erreur descend a 7.87% lorsqu'on garde 132 variables
# Pour l analyse discriminante quadratique l erreur descend a 7.4% lorsqu on garde 37 variables
# Pour les KNN l erreur descend a 7.87% lorsqu'on garde 48 variables avec un k optimnal qui reste a 8
# ------------------------------------------------ REGRESSION RIDGE & LASSO -------------------------------------------------
# Permet de estimer un modele avec des variables fortement correlees
# Avantage de la regression ridge : les variables explicatrices tres correlees se combinent pour se renforcer mutuellement
# Cette methode garde toutes les variables mais aucun moyen de savoir lesquelles ont plus de poids que d'autres
# La methode LASSO met a 0 les variables peu explicatives
# Si les variables sont correlees, l'algorithme en choisi une et la met a 0
# ------------------------------------------------ ANALYSE EN COMPOSANTE PRINCIPALE -------------------------------------------------
print("ACP")
# Nous avons effectué une analyse en composante principale
# l'idee serait de creer un nouvel axe factoriel et de creer un modele a partir de la
phoneme.acp <- princomp(phoneme.train.data)
phoneme.acp.train.scores <- as.data.frame(phoneme.acp$scores)
phoneme.acp.test <- princomp(phoneme.test.data)
phoneme.acp.test.scores <- as.data.frame(phoneme.acp.test$scores)
# Lorsqu'on regarde nos vecteurs propres, on remarque qu on peut en garder 2 ou 3 pour expliquer nos 257 variables de maniere efficace
plot(phoneme.acp$scores[1:dim(phoneme.acp$scores)[1],1:2], col=c('red','green','yellow','black','blue')[phoneme.train.label])
# Les aa(red) les sh(green) et les dcl(black) se ressemblent beaucoup avec une acp avec le premier et deuxieme axe factoriel
plot(phoneme.acp$scores[1:dim(phoneme.acp$scores)[1],2:3], col=c('red','green','yellow','black','blue')[phoneme.train.label])
# Les dcl se démarquent tres bien des aa et des sh avec le deuxieme et le troisieme axe factoriel
plot(phoneme.acp$scores[1:dim(phoneme.acp$scores)[1],1],phoneme.acp$scores[1:dim(phoneme.acp$scores)[1],3], col=c('red','green','yellow','black','blue')[phoneme.train.label])
# L utilisation des la derniere combinaison d'axes factoriels ne sufit pas à séparer aa et sh qui restent tres corrélés.
# --- LDA - 13% d erreur ---
phoneme.acp.lda <- lda(phoneme.train.label~.,data=phoneme.acp.train.scores[,1:5])
phoneme.acp.lda.pred <- predict(phoneme.acp.lda, newdata=phoneme.acp.test.scores[,1:5])
phoneme.acp.lda.perf <- table(phoneme.test.label,phoneme.acp.lda.pred$class)
phoneme.acp.lda.error <- 1 - sum(diag(phoneme.acp.lda.perf))/(nrow(phoneme.test))
print("LDA avec ACP 5 dimensions: ")
print(phoneme.acp.lda.error)
# --- QDA - 10.87% d erreur ---
phoneme.acp.qda <- qda(phoneme.train.label~.,data=phoneme.acp.train.scores[,1:5])
phoneme.acp.qda.pred <- predict(phoneme.acp.qda, newdata=phoneme.acp.test.scores[,1:5])
phoneme.acp.qda.perf <- table(phoneme.test.label,phoneme.acp.qda.pred$class)
phoneme.acp.qda.error <- 1 - sum(diag(phoneme.acp.qda.perf))/(nrow(phoneme.test))
print("QDA avec ACP 5 dimensions: ")
print(phoneme.acp.qda.error)
# --- Regression logistique - 11.07% d'erreur ---
phoneme.acp.glmnet <- glmnet(as.matrix(phoneme.acp.train.scores[,1:5]),y=phoneme.train.label,family="multinomial")
phoneme.acp.glmnet.pred <- predict(phoneme.acp.glmnet,newx=as.matrix(phoneme.acp.test.scores[,1:5]),type="response",s=phoneme.acp.glmnet$lambda.min)
# phoneme.glmnet.pred est un tableau 3 dimensions :
# - La premiere est sur nos obsvervations (1500 dans l'ensemble de test)
# - La deuxieme est sur nos types de phonemes (5types de phonemes)
# - La troisieme est sur l'iteration
phoneme.acp.glmnet.res<-c(rep(0,1500))
for (i in 1:1500)
{
class <- ""
res<-which.max(phoneme.acp.glmnet.pred[i,1:5,100])
{
if(res==1)
{
class <- "aa"
}
else if(res==2){
class <- "ao"
}
else if(res==3){
class <- "dcl"
}
else if(res==4){
class <- "iy"
}
else{
class <- "sh"
}
}
phoneme.acp.glmnet.res[i] <- class
}
phoneme.acp.glmnet.perf <- table(phoneme.test.label,phoneme.acp.glmnet.res)
phoneme.acp.glmnet.error <- 1 - sum(diag(phoneme.acp.glmnet.perf))/(nrow(phoneme.test))
print("Regression logistique avec ACP 5 dimensions : ")
print(phoneme.acp.glmnet.error)
# --- KNN - 11.2% d'erreur - koptimal 8 ---
phoneme.acp.knn.error<-rep(0,20)
for(k in 1:10)
{
phoneme.acp.knn <- knn(phoneme.acp.train.scores[,1:5], phoneme.acp.test.scores[,1:5], phoneme.train.label,k=k)
phoneme.acp.knn.error[k] <- (length(which(FALSE==(phoneme.acp.knn==phoneme.test.label))))/length(phoneme.test.label)
}
print("KNN avec ACP 5 dimensions: ")
print(phoneme.acp.knn.error)
# --- Classifiation tree - 15.73% d'erreur ---
print("TREE avec 5 dimensions : ")
phoneme.acp.tree<- tree(phoneme.train.label~ ., data=phoneme.acp.train.scores[,1:5])
phoneme.acp.tree.pred<-predict(phoneme.acp.tree, phoneme.acp.test.scores[,1:5], type="class")
phoneme.acp.tree.perf <- table(phoneme.acp.tree.pred, phoneme.test.label)
phoneme.acp.tree.error <- (sum(phoneme.acp.tree.perf)-sum(diag(phoneme.acp.tree.perf)))/nrow(phoneme.test.data)
print(phoneme.acp.tree.error)
# --- Classifieur bayesien naif - 12.47% d'erreur ---
print("BAYES avec 5 dimensions : ")
phoneme.acp.naive<- naiveBayes(phoneme.train.label~., data=phoneme.acp.train.scores[,1:5])
phoneme.acp.naive.pred<-predict(phoneme.acp.naive,newdata=phoneme.acp.test.scores[,1:5])
phoneme.acp.naive.perf <-table(phoneme.test.label,phoneme.acp.naive.pred)
phoneme.acp.naive.error <- 1-sum(diag(phoneme.acp.naive.perf))/nrow(phoneme.test.data)
print(phoneme.acp.naive.error)
# On peut donc déduire de cette analyse 2 choses
# - Nos deux phonemes a et sh sont tres ressemblant et n'arrivent pas a se dissocier sur les axes factoriels
# - Il serait interessant de recreer des modeles en utilisant les premiers et troisieme axes factoriels qui séparent bien nos variables (sauf aa et sh)
# ------------------------------------------------ FDA -------------------------------------------------
print("FDA")
phoneme.fda.lda<-lda(phoneme.train.label~. ,data=phoneme.train.data)
U <- phoneme.fda.lda$scaling
X <- as.matrix(phoneme.train.data)
Z <- X%*%U
phoneme.fda.lda.test<-lda(phoneme.test.label~. ,data=phoneme.test.data)
Utest <- phoneme.fda.lda.test$scaling
Xtest <- as.matrix(phoneme.test.data)
Ztest <- Xtest%*%Utest
cp1 <- 1
cp2 <- 2
plot(Z[phoneme.train.label=="aa",cp1],Z[phoneme.train.label=="aa",cp2],xlim=range(Z[,1]),ylim=range(Z[,2]),xlab="Z1",ylab="Z2")
points(Z[phoneme.train.label=="ao",cp1],Z[phoneme.train.label=="ao",cp2],pch=2,col="blue")
points(Z[phoneme.train.label=="dcl",cp1],Z[phoneme.train.label=="dcl",cp2],pch=3,col="red")
points(Z[phoneme.train.label=="iy",cp1],Z[phoneme.train.label=="iy",cp2],pch=4,col="pink")
points(Z[phoneme.train.label=="sh",cp1],Z[phoneme.train.label=="sh",cp2],pch=5,col="yellow")
legend("topleft", inset=.05, title="Phoneme", c("aa", "ao", "dcl","iy","sh"), fill=c("black","blue","red","pink","yellow"), horiz=TRUE)
# --- LDA - 5.47% d erreur ---
phoneme.fda.lda <- lda(phoneme.train.label~.,data=as.data.frame(Z))
phoneme.fda.lda.pred <- predict(phoneme.fda.lda, newdata=as.data.frame(Ztest))
phoneme.fda.lda.perf <- table(phoneme.test.label,phoneme.fda.lda.pred$class)
phoneme.fda.lda.error <- 1 - sum(diag(phoneme.fda.lda.perf))/(nrow(phoneme.test))
print("LDA avec FDA : ")
print(phoneme.fda.lda.perf)
print(phoneme.fda.lda.error)
# --- QDA - 5.67% d erreur ---
phoneme.fda.qda <- qda(phoneme.train.label~.,data=as.data.frame(Z))
phoneme.fda.qda.pred <- predict(phoneme.fda.qda, newdata=as.data.frame(Ztest))
phoneme.fda.qda.perf <- table(phoneme.test.label,phoneme.fda.qda.pred$class)
phoneme.fda.qda.error <- 1 - sum(diag(phoneme.fda.qda.perf))/(nrow(phoneme.test))
print("QDA avec FDA : ")
print(phoneme.fda.qda.perf)
print(phoneme.fda.qda.error)
# --- Regression logistique - 5.27% d'erreur ---
phoneme.fda.glmnet <- glmnet(as.matrix(Z),y=phoneme.train.label,family="multinomial")
phoneme.fda.glmnet.pred <- predict(phoneme.fda.glmnet,newx=as.matrix(Ztest),type="response",s=phoneme.fda.glmnet$lambda.min)
# phoneme.glmnet.pred est un tableau 3 dimensions :
# - La premiere est sur nos obsvervations (1500 dans l'ensemble de test)
# - La deuxieme est sur nos types de phonemes (5types de phonemes)
# - La troisieme est sur l'iteration
phoneme.fda.glmnet.res<-c(rep(0,1500))
for (i in 1:1500)
{
class <- ""
res<-which.max(phoneme.fda.glmnet.pred[i,1:5,100])
{
if(res==1)
{
class <- "aa"
}
else if(res==2){
class <- "ao"
}
else if(res==3){
class <- "dcl"
}
else if(res==4){
class <- "iy"
}
else{
class <- "sh"
}
}
phoneme.fda.glmnet.res[i] <- class
}
phoneme.fda.glmnet.perf <- table(phoneme.test.label,phoneme.fda.glmnet.res)
phoneme.fda.glmnet.error <- 1 - sum(diag(phoneme.fda.glmnet.perf))/(nrow(phoneme.test))
print("Regression logistique avec FDA : ")
print(phoneme.fda.glmnet.perf)
print(phoneme.fda.glmnet.error)
# --- KNN - 5% d'erreur - koptimal 7 ---
phoneme.fda.knn.error<-rep(0,20)
for(k in 1:10)
{
phoneme.fda.knn <- knn(as.data.frame(Z), as.data.frame(Ztest), phoneme.train.label,k=k)
phoneme.fda.knn.error[k] <- (length(which(FALSE==(phoneme.fda.knn==phoneme.test.label))))/length(phoneme.test.label)
}
print("KNN avec FDA : ")
print(phoneme.fda.knn.error)
# --- Classifiation tree - 5.2% d'erreur ---
print("TREE avec FDA : ")
phoneme.fda.tree<- tree(phoneme.train.label~ ., data=as.data.frame(Z))
phoneme.fda.tree.pred<-predict(phoneme.fda.tree, as.data.frame(Ztest), type="class")
phoneme.fda.tree.perf <- table(phoneme.fda.tree.pred, phoneme.test.label)
phoneme.fda.tree.error <- (sum(phoneme.fda.tree.perf)-sum(diag(phoneme.fda.tree.perf)))/nrow(phoneme.test.data)
print(phoneme.fda.tree.perf)
print(phoneme.fda.tree.error)
# --- Classifieur bayesien naif - 5.6% d'erreur ---
print("BAYES avec FDA: ")
phoneme.fda.naive<- naiveBayes(phoneme.train.label~., data=as.data.frame(Z))
phoneme.fda.naive.pred<-predict(phoneme.fda.naive,newdata=as.data.frame(Ztest))
phoneme.fda.naive.perf <-table(phoneme.test.label,phoneme.fda.naive.pred)
phoneme.fda.naive.error <- 1-sum(diag(phoneme.fda.naive.perf))/nrow(phoneme.test.data)
print(phoneme.fda.naive.perf)
print(phoneme.fda.naive.error)
# ------------------------------------------------ FDA + ACP -------------------------------------------------
print("FDA + ACP")
phoneme.fda.lda<-lda(phoneme.train.label~. ,data=as.data.frame(phoneme.acp.train.scores[,1:5]))
U <- phoneme.fda.lda$scaling
X <- as.matrix(phoneme.acp.train.scores[,1:5])
Z <- X%*%U
phoneme.fda.lda.test<-lda(phoneme.test.label~. ,data=phoneme.acp.test.scores[,1:5])
Utest <- phoneme.fda.lda.test$scaling
Xtest <- as.matrix(phoneme.acp.test.scores[,1:5])
Ztest <- Xtest%*%Utest
cp1 <- 1
cp2 <- 2
plot(Z[phoneme.train.label=="aa",cp1],Z[phoneme.train.label=="aa",cp2],xlim=range(Z[,1]),ylim=range(Z[,2]),xlab="Z1",ylab="Z2")
points(Z[phoneme.train.label=="ao",cp1],Z[phoneme.train.label=="ao",cp2],pch=2,col="blue")
points(Z[phoneme.train.label=="dcl",cp1],Z[phoneme.train.label=="dcl",cp2],pch=3,col="red")
points(Z[phoneme.train.label=="iy",cp1],Z[phoneme.train.label=="iy",cp2],pch=4,col="pink")
points(Z[phoneme.train.label=="sh",cp1],Z[phoneme.train.label=="sh",cp2],pch=5,col="yellow")
legend("topleft", inset=.05, title="Phoneme", c("aa", "ao", "dcl","iy","sh"), fill=c("black","blue","red","pink","yellow"), horiz=TRUE)
# --- LDA - 11% d erreur ---
phoneme.fda.lda <- lda(phoneme.train.label~.,data=as.data.frame(Z))
phoneme.fda.lda.pred <- predict(phoneme.fda.lda, newdata=as.data.frame(Ztest))
phoneme.fda.lda.perf <- table(phoneme.test.label,phoneme.fda.lda.pred$class)
phoneme.fda.lda.error <- 1 - sum(diag(phoneme.fda.lda.perf))/(nrow(phoneme.test))
print("LDA avec FDA + ACP : ")
print(phoneme.fda.lda.error)
# --- QDA - 10.73% d erreur ---
phoneme.fda.qda <- qda(phoneme.train.label~.,data=as.data.frame(Z))
phoneme.fda.qda.pred <- predict(phoneme.fda.qda, newdata=as.data.frame(Ztest))
phoneme.fda.qda.perf <- table(phoneme.test.label,phoneme.fda.qda.pred$class)
phoneme.fda.qda.error <- 1 - sum(diag(phoneme.fda.qda.perf))/(nrow(phoneme.test))
print("QDA avec FDA + ACP : ")
print(phoneme.fda.qda.error)
# --- Regression logistique - 11.4% d'erreur ---
phoneme.fda.glmnet <- glmnet(as.matrix(Z),y=phoneme.train.label,family="multinomial")
phoneme.fda.glmnet.pred <- predict(phoneme.fda.glmnet,newx=as.matrix(Ztest),type="response",s=phoneme.fda.glmnet$lambda.min)
# phoneme.glmnet.pred est un tableau 3 dimensions :
# - La premiere est sur nos obsvervations (1500 dans l'ensemble de test)
# - La deuxieme est sur nos types de phonemes (5types de phonemes)
# - La troisieme est sur l'iteration
phoneme.fda.glmnet.res<-c(rep(0,1500))
for (i in 1:1500)
{
class <- ""
res<-which.max(phoneme.fda.glmnet.pred[i,1:5,100])
{
if(res==1)
{
class <- "aa"
}
else if(res==2){
class <- "ao"
}
else if(res==3){
class <- "dcl"
}
else if(res==4){
class <- "iy"
}
else{
class <- "sh"
}
}
phoneme.fda.glmnet.res[i] <- class
}
phoneme.fda.glmnet.perf <- table(phoneme.test.label,phoneme.fda.glmnet.res)
phoneme.fda.glmnet.error <- 1 - sum(diag(phoneme.fda.glmnet.perf))/(nrow(phoneme.test))
print("Regression logistique avec FDA + ACP : ")
print(phoneme.fda.glmnet.error)
# --- KPPV - 10.47% d'erreur - koptimal 16 ---
phoneme.fda.knn.error<-rep(0,20)
for(k in 1:20)
{
phoneme.fda.knn <- knn(as.data.frame(Z), as.data.frame(Ztest), phoneme.train.label,k=k)
phoneme.fda.knn.error[k] <- (length(which(FALSE==(phoneme.fda.knn==phoneme.test.label))))/length(phoneme.test.label)
}
print("KNN avec FDA + ACP : ")
print(phoneme.fda.knn.error)
# --- Classifiation tree - 15.27% d'erreur ---
print("TREE - Errors avec FDA + ACP : ")
phoneme.fda.tree<- tree(phoneme.train.label~ ., data=as.data.frame(Z))
phoneme.fda.tree.pred<-predict(phoneme.fda.tree, as.data.frame(Ztest), type="class")
phoneme.fda.tree.perf <- table(phoneme.fda.tree.pred, phoneme.test.label)
phoneme.fda.tree.error <- (sum(phoneme.fda.tree.perf)-sum(diag(phoneme.fda.tree.perf)))/nrow(phoneme.test.data)
print(phoneme.fda.tree.error)
# --- Classifieur bayesien naif - 11.2% d'erreur ---
print("BAYES - Errors avec FDA + ACP : ")
phoneme.fda.naive<- naiveBayes(phoneme.train.label~., data=as.data.frame(Z))
phoneme.fda.naive.pred<-predict(phoneme.fda.naive,newdata=as.data.frame(Ztest))
phoneme.fda.naive.perf <-table(phoneme.test.label,phoneme.fda.naive.pred)
phoneme.fda.naive.error <- 1-sum(diag(phoneme.fda.naive.perf))/nrow(phoneme.test.data)
print(phoneme.fda.naive.error)
# L AFD seule nous donne des meilleurs résultats car une meilleure séparation des clusters
# ------------------------------------------------ INTERPRETATION -------------------------------------------------
# Les phonemes aa et ao sont tres ressemblants, la plupart des errerus de classification concernent ces deux phonemes.
#Pour le taux d'erreur je choisis le taux de mauvaise classification
|
4a1c6d58f0d5c63693d510925d423cbe0969be65 | 90659cbbb3f4f79c9fff3679e6f8bb35e14643ee | /test.R | f2b1a259faf06913bfc17447865f28e0853d70cc | [] | no_license | xinye1/beta_strat_backtesting | 2be9c17ed4a97b07a1253cbaa67b9c8d7772f045 | dba1a6490a1dbbe9a72472f72e732ceac9e98e77 | refs/heads/main | 2023-01-27T16:19:11.459456 | 2020-11-25T15:07:04 | 2020-11-25T15:07:04 | 315,340,435 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,443 | r | test.R | index_col <- 'Eurostoxx600'
target_year <- 2019
target_cycle <- 1
holding_months <- 4
portf_size <- 30
beta_history <- 6
cycle_cal <- prices %>%
select(date) %>%
getCycleCal(start_year, end_year, holding_months)
test <- selectStocks(
prices, index_col,
cycle_cal, target_year, target_cycle,
holding_months, portf_size,
beta_history, T)
function(prices_df, beta_df, start_year, end_year, h_mths, b_hist, p_size, p_type, p_wts)
prices_df <- prices
beta_df <- beta_all
h_mths <- 3
b_hist <- 6
p_size <- 30
p_type <- 'high'
p_wts <- 'equal'
# t_year <- 2019
# t_cycle <- 1
if (p_wts == 'equal') {
weights <- rep(1, p_size) / p_size
} else {
weights <- rep(1, p_size) / p_size
}
cycle_cal <- prices_df %>%
select(date) %>%
getCycleCal(start_year, end_year, h_mths)
cycle_grid <- expand_grid(
t_year = start_year:end_year,
t_cycle = 1:(12 / h_mths))
tmp_return <- cycle_grid %>%
pmap(function(t_year, t_cycle) {
cycle_dates <- cycle_cal %>%
getCycleDates(t_year, t_cycle)
tickers <- beta_df %>%
filter(
year == t_year &
cycle == t_cycle &
holding_months == h_mths &
portf_size == p_size &
beta_history == b_hist &
beta_type == p_type) %>%
pull(ticker)
prices_df %>%
getPortfRet(
tickers,
cycle_dates[1], cycle_dates[2],
weights)
})
cycle_returns <- tmp_return %>%
map_dbl(~ tail(.[['return']], 1))
cumul_returns <- cumprod(cycle_returns)
daily_returns <- list(
df = tmp_return,
ret = cumul_returns
) %>%
pmap(function(df, ret) {
df[['return']] <- df[['return']] * ret
df
}) %>%
bind_rows()
test <- getStratRet(
prices, beta_all, start_year, end_year,
h_mths, b_hist, p_size, p_type, p_wts)
# Sortino ----
s_ratios <- portf_types %>% # calculate for low and high strategies
map_dbl(function(x) {
daily_returns %>%
filter(portf_type == x) %>%
pull(return) %>%
getReturn() %>% # convert the normalised prices to returns
SortinoRatio()
})
# names(s_ratios) <- portf_types
data.frame(
type = str_to_title(portf_types),
s_ratios = round(s_ratios, 3)
) %>%
kableExtra::kable(
col.names = c('Type', 'Sortino Ratio'),
format = 'html') %>%
kable_styling(full_width = F)
daily_returns %>%
mutate(investment_value = return * initial_capital) %>%
ggplot(aes(date, return, colour = portf_type)) +
geom_line()
|
942783871cb2e48b3c2635b908fc19b5b5e364ad | 3ecb301d5a93745c00cdb8074982c6d386512668 | /Scripts/UrbanData.R | b7b23b98b0b1c76572482bf2b810055abcdb3f3a | [] | no_license | stevenmosher/BerkeleyEarthFunctions | 2b257a269808cf978fe1ba160a18d2b2c8b77799 | 0d4df017e00e820e5b851309f23f39d6b1e3808e | refs/heads/master | 2021-01-20T20:37:03.679288 | 2016-08-25T18:23:56 | 2016-08-25T18:23:56 | 62,077,511 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,209 | r | UrbanData.R | source("Initialize.R")
d1 <- "G:\\BerkeleyEarthFunctions\\Data\\GlobeLand30_ATS2010_1"
d2 <- "G:\\BerkeleyEarthFunctions\\Data\\GlobeLand30_ATS2010_2"
d3 <- "G:\\BerkeleyEarthFunctions\\Data\\GlobeLand30_ATS2010_3"
F1 <- list.files(d1, full.names=T,recursive = T, pattern= 'TIF')
F2 <- list.files(d2, full.names=T,recursive = T, pattern= 'TIF')
F3 <- list.files(d3, full.names=T,recursive = T, pattern= 'TIF')
Files <- c(F1,F2,F3)
fn <- basename(Files)
length(unique(fn))
DF <- tbl_df(data.frame(Files=Files, Name=fn,stringsAsFactors = FALSE))
DF <- DF %>% mutate(LLcode= str_sub(Name,1,6)) %>% mutate(Lat=as.numeric(str_sub(Name,5,6))) %>%
mutate(NS=str_sub(Name,1,1)) %>% mutate(dex=as.numeric(str_sub(Name,2,3))) %>%
mutate(Lat =ifelse(NS=="N",Lat*1,Lat*-1)) %>%
mutate(LatMin = ifelse(NS=="N",Lat,Lat-5)) %>% mutate(LatMax = ifelse(NS=="N",Lat+5,Lat)) %>%
mutate(LonMin =-180 +((dex-1)*6)) %>% mutate(LonMax =-180 + dex*6)
s<- seq(from=1,to=59,by=2)
idex <- which(abs(DF$LatMax)>60 & DF$dex %in% s)
DF$LonMin[idex] <- -180 + (DF$dex[idex]%/%2)*12
DF$LonMax[idex] <- -180 + ((DF$dex[idex]%/%2)+1)*12
write.csv(DF, "UrbanFileMetaData.csv")
|
9af7158aa8acb5db30fcac61d01a6376bbb8d9ce | 4a35619d33a38a5294f8a3de11f77afed55c8101 | /cachematrix.R | f0d75e46d779e524c71ff2b221b6ddd88203b32b | [] | no_license | ericmwalkup/ProgrammingAssignment2 | cb84d4663c734bfee42d856eeec19103da70113a | 1d1b5075ffce04f8ab176e7813b52a4968adf9bd | refs/heads/master | 2021-01-13T07:59:28.419386 | 2016-10-23T10:03:33 | 2016-10-23T10:03:33 | 71,693,810 | 0 | 0 | null | 2016-10-23T09:40:21 | 2016-10-23T09:40:21 | null | UTF-8 | R | false | false | 1,571 | r | cachematrix.R | ## Put comments here that give an overall description of what your
## functions do
## Creates a matrix, creates functions, makes a list of the functions
MakeCacheMatrix<-function(x,nr,nc)
{
minv<-NULL # set inverse to null in function env
mtx<-matrix(c(x),nr,nc) # create matrix from input 1xN vector
getmtx<-function() mtx # function to return the matrix
getinv<-function() minv # function to return the inverse
setinv<-function(inv) minv<<-inv # function to set inverse
# create a list with function item names
# and the functions items
list(getmtx=getmtx, getinv=getinv, setinv=setinv)
}
## Checks to see if inverse exists
CacheSolve<-function(xl)
{
minv<-xl$getinv() # use input list item named getinv to get inverse
if(!is.null(minv)) # if the inverse is NOT null cache it
# return skips remaining code (no need to recalc inv)
{
message("getting cached data")
return(minv)
}
mtx<-xl$getmtx() # if minv is NULL, get the matrix from MakeCacheList
minv<-solve(mtx) # compute the inverse
xl$setinv(minv) # set the inverse to minv
minv # function returns minv
}
##x<-c(2, 4, 3, 1, 5, 7, 6, 5, 4)
##listfunc<-MakeCacheMatrix(x,3,3)
##CacheSolve(listfunc)
##CacheSolve(listfunc) |
6d01d9fb927ff89f6c3798ba959284e8cbcf1d8c | 203d706a170d47dce247f438a9c5dd17dfbabff6 | /man/LogLikeliHood_byObs.Rd | beaa3686dee02beec83d2b6915f3e31fe8b1281c | [] | no_license | minister3000/TVTP2 | 6ff06c448975a17e3357469b87ee331cb6648ac7 | 7d16f5604c7520cadcc85dea43c568efed901139 | refs/heads/master | 2022-01-15T13:15:06.527404 | 2019-07-23T04:52:17 | 2019-07-23T04:52:17 | 198,351,311 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 484 | rd | LogLikeliHood_byObs.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{LogLikeliHood_byObs}
\alias{LogLikeliHood_byObs}
\title{LogLikeliHood_byObs}
\usage{
LogLikeliHood_byObs(param, datax, n_explanatory, n_transition,
prob_initial_state_0, printLogLike)
}
\description{
\code{LogLikeliHood_byObs} is a convenience wrapper for function 'LogLikeliHood_FULL' that returns all observations'
log-likelihood constributions as required by certain optimizers.
}
|
8a1abb347a9b06f1249b79290441cd865d73f5c2 | 1a9b47bd2faf66aa674470811a53e0845bfb7286 | /test.R | aba0adb174444cf644beb514b805c9fb92d59593 | [] | no_license | kmina11/test-repo | 9edce92a98b8a73d865f94330498b3740bfe673a | b387c507c9767f327ba209e7c7a1ffec32166d49 | refs/heads/master | 2016-09-14T19:07:56.802855 | 2016-05-18T03:49:51 | 2016-05-18T03:49:51 | 57,351,787 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 58 | r | test.R | ####This is test for git push and pull
a<-5
b<-3
c<-b/a |
ee0586e460a44448a5746a5fc455511233682329 | 4570bdd4f6a1ef5f9fb6935da05338624d919d84 | /data-raw/building_permits.R | 5c1fa9ae7c7432ca41d3a82df120969517a44c9b | [
"CC0-1.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | hfboyce/datateachr | 41de4ffa4e0847b98ce5714231badd6d853afcf7 | 78d391f4ee0d3f3979ed38d4192c66d18361590b | refs/heads/master | 2022-12-07T09:42:43.755585 | 2020-08-21T20:32:41 | 2020-08-21T20:32:41 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,014 | r | building_permits.R | library(dplyr)
library(tidyverse)
src <-
"https://opendata.vancouver.ca/explore/dataset/issued-building-permits/download/?format=csv&timezone=America/Los_Angeles&lang=en&use_labels_for_header=true&csv_separator=%3B"
raw <-
read_delim(src,
delim = ";",
col_types = cols(ProjectDescription = col_character()))
building_permits <- raw %>%
rename(
permit_number = PermitNumber,
issue_date = IssueDate,
project_value = ProjectValue,
type_of_work = TypeOfWork,
address = Address,
project_description = ProjectDescription,
building_contractor = BuildingContractor,
building_contractor_address = BuildingContractorAddress,
applicant = Applicant,
applicant_address = ApplicantAddress,
property_use = PropertyUse,
specific_use_category = SpecificUseCategory,
year = Year,
bi_id = BI_ID
)
save(building_permits, file = "data/building_permits.rda", compress='bzip2')
saveRDS(building_permits, "tests/testthat/building_permits.rds")
|
8816fd2ead183cda0c7721ad9079b8871106a770 | 980f570c0e740594473d42757816981de58b5470 | /Rsrc/R/plot-cpue.R | 2437df3127e3a856766911243a484ca75a0a23f0 | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | LarryJacobson/gmacs | 334f24fee6cd651f5e580d2225e1fac026d9757b | e5e999d3fdff2f392d6e48c11bdfe290bc672434 | refs/heads/master | 2021-01-14T12:44:45.044908 | 2015-01-16T01:05:01 | 2015-01-16T01:05:01 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,077 | r | plot-cpue.R | #' Plot cpue or other indices
#'
#' @param replist List object created by read_admb function
#' @return Plot of all observed and predicted incices
#' @export
plot_cpue <- function(replist){
df <- get_cpue(replist)
p <- ggplot(df,aes(year,cpue))
# p <- p + geom_point(aes(col=sex))
p <- p + geom_pointrange(aes(year,cpue,ymax=ub,ymin=lb,col=sex))
p <- p + labs(x="Year",y="CPUE",col="Sex")
pCPUE <- p + facet_wrap(~fleet+sex,scales="free")
# Fitted CPUE
pCPUEfit <- pCPUE + geom_line(data=df,aes(year,pred))
return(pCPUEfit)
}
#' Plot residuals of cpue or other indices
#'
#' @param replist List object created by read_admb function
#' @return Plot of fit indices residuals
#' @export
plot_cpue_res <- function(replist){
# CPUE residuals
df <- get_cpue(replist)
p <- ggplot(df,aes(factor(year),resd))
p <- p + geom_bar(aes(fill=factor(sex)),stat = "identity", position="dodge")
p <- p + scale_x_discrete(breaks=pretty(df$year))
p <- p + labs(x="Year",y="CPUE Residuals",fill="Sex")
pCPUEres <- p + facet_wrap(~fleet,scales="free_x")
return(pCPUEres)
} |
97d1f3433f7acb2f13e79494b4e3e9f208fbc3ac | eda751fd8916aafb27e6a7ec01287615f0a6b220 | /Scripts/DeepLearning_H2O.R | cfef52853901b07fcb0816f22f453655d54b3e9b | [
"MIT"
] | permissive | Miyake-Diogo/Artificial_Inteligence_and_MachineLearning_Formation-Udemy | fb9df3cfa64b79d9b8e871a1625213fdadd0ac4e | bc715e831e7d07bc72c01d4d2b4f01a8063992a6 | refs/heads/master | 2020-03-22T03:51:33.989826 | 2018-07-06T12:06:09 | 2018-07-06T12:06:09 | 139,456,460 | 0 | 0 | null | null | null | null | ISO-8859-1 | R | false | false | 2,504 | r | DeepLearning_H2O.R | # Formação IA e ML - UDEMY
## Deep Learningo com o H2O
#warnb , desligar os warnings sem relevancia
options(warn=-1)
#importa arquivos de digitos, importar treino
digitos <- read.csv(gzfile(file.choose()), header=F)
# cada linha possui 785 colunas, são 784 pixels e mais uma coluna para a classe (digito)
dim(digitos)
head(digitos)
# visualizar 4 digitos compartilhando a tela
split.screen(figs=c(2,2))
#precisamos transformar a imagem que esta na linha em uma matriz de duas dimensões
#784 é 28 x 28, ou seja, uma imagem de 28 x 28 pixels
dig = t(matrix(unlist(digitos[20,-784]), nrow = 28, byrow = F))
dig = t(apply(dig, 2, rev))
#ver imagem em "pixels"
dig
screen(1)
image(dig, col = grey.colors(255))
#conferindo se é o digito 4
digitos[20,785]
# Faz o mesmo para as outras áreas
screen(2)
dig = t(matrix(unlist(digitos[2,-784]), nrow = 28, byrow = F))
dig = t(apply(dig, 2, rev))
image(dig,col=grey.colors(255))
screen(3)
dig = t(matrix(unlist(digitos[4,-784]), nrow = 28, byrow = F))
dig = t(apply(dig, 2, rev))
image(dig,col=grey.colors(255))
screen(4)
dig = t(matrix(unlist(digitos[5,-784]), nrow = 28, byrow = F))
dig = t(apply(dig, 2, rev))
image(dig,col=grey.colors(255))
close.screen(all=TRUE)
# Feito a visualização do reconhecimento dos digitos, hora de por a mão na massa
install.packages("h2o")
library(h2o)
# precisa ser inicializado
h2o.init()
#importa os dados de treino e teste
treino <- h2o.importFile(file.choose())
teste <- h2o.importFile(file.choose())
dim(treino)
head(treino)
colnames(treino)
#transforma a classe em factor - exigencia do funcionamento da rede neural profunda
treino[,785] <- as.factor(treino[,785])
teste[,785] <- as.factor(teste[,785])
# Criação do modelo da Rede neural
# h2o.deeplearning(variaveis independentes, classe, objeto treino, objeto de validação,
# Tipo de distribuição, Tipo de ativação, camadas, tratamento de dados esparsos,
# quantidade de epochs)
modelo <- h2o.deeplearning(x = colnames(treino[,1:784]), y = "C785", training_frame = treino, validation_frame = teste, distribution = "AUTO", activation = "RectifierWithDropout", hidden = c(64,64,64), sparse = TRUE, epochs = 20)
# plotagem do modelo
plot(modelo)
h2o.performance(modelo)
#previsao de novos dados
#vimos que na linha 20 tinha o numero 4
#vamos conferir
treino[20,785]
#fazendo previsão
pred <- h2o.predict(modelo, newdata = treino[20,1:784])
#verificando a previsão
pred$predict
|
6a0d1ed6c769bfe4ba84de67eec5176f07c19553 | 8a5ccdad17eff711fb5a4ad320c088a531b5e69d | /R_scripts/ggplot2_dplyr/003-dplyrDemo.R | 1b0174514c3b00676e30525624fa8a830f59ba59 | [] | no_license | DawnEve/bioToolKit | 6ec41245ad63246f470350d1d3996a512011d3a8 | 148762dec920374174563969e87b5b2313cc1dc4 | refs/heads/master | 2023-02-07T19:11:55.487040 | 2022-12-16T13:30:53 | 2022-12-16T13:30:53 | 77,501,142 | 4 | 2 | null | null | null | null | UTF-8 | R | false | false | 15,087 | r | 003-dplyrDemo.R | #############
# 学习dplyr包
# version: v0.2
#
# 特别提示:
# help: 光标在关键字上,使用f1查询帮助文件。
# docs: https://dplyr.tidyverse.org/
#
# dplyr basics: http://r4ds.had.co.nz/transform.html
# local docs: http://127.0.0.1:27228/library/dplyr/doc/dplyr.html
#############
#单表操作函数
# mutate (变形/计算函数)添加新的变量 adds new variables that are functions of existing variables
# select 使用列名选出列 picks variables based on their names.
# filter 保留满足条件的行 picks cases based on their values.
# summarise 分类汇总 reduces multiple values down to a single summary.
# arange 行排序 changes the ordering of the rows.
# group_by 分组函数
# 随机抽样函数 sample_n, sample_frac
# pipe operator 管道操作 / 多步操作连接符 %>%
library('dplyr')
vignette("dplyr")
#1.数据集类型转换?不知道为啥要做这一步
class(mtcars)
ds=as_tibble(mtcars)
class(ds)
head(ds)
#
#2. 筛选: filter() picks cases based on their values.
#过滤出cyl=8的行
rs1=filter(mtcars,cyl==8)
rs1
#过滤出cyl<6且vs==1的行
filter(mtcars,cyl<6 & vs==1)
#或
filter(mtcars,cyl<6,vs==1)
#过滤出cyl<6或者vs==1的行
filter(mtcars, cyl<6 | vs==1)
#过滤出cyl为4或者6的行
filter(mtcars,cyl %in% c(4,6))
#
# slice() 函数通过行号选取数据
# 缺点是:会丢失行号
#选取第2行
slice(mtcars,2)
head(mtcars)
slice(mtcars,2L) #为什么小数点位数比原来少?//todo
filter(mtcars,row_number()==2L) #filter小数点位数和原始一致
#选取最后一行数据
x=dim(mtcars)[1]; mtcars[x,]; #我的解
slice(mtcars,n())
filter(mtcars,row_number()==n())
#选取第5行到最后一行的数据
slice(mtcars, 5:n())
rs=filter(mtcars, between(row_number(),5,n()))
class(rs) #[1] "data.frame"
#
# 3.排列: arrange() changes the ordering of the rows.
#arrange()按给定的列名依次对行进行排序,类似于base::order()函数。
#默认是按照升序排序,对列名加 desc() 可实现倒序排序。原数据集行名称会被过滤掉。
#以cyl和disp联合升序排序
arrange(mtcars,cyl,disp)
#dim(mtcars)
arrange(mtcars,cyl,desc(disp))
#
# 4.选择: select() picks variables based on their names.
#select()用列名作参数来选择子数据集。
#dplyr包中提供了些特殊功能的函数与select函数结合使用, 用于筛选变量,
#包括starts_with,ends_with,contains,matches,one_of,num_range和everything等。
#用于重命名时,select()只保留参数中给定的列,rename()保留所有的列,只对给定的列重新命名。
#原数据集行名称会被过滤掉。
class(iris)
head(iris)
iris2=iris[1:10,] #用前十行举例
#选取变量名前缀包含Petal的列
select(iris2,starts_with("Petal"))
#选取变量名前缀不高含Petal的列
select(iris2,-starts_with("Petal"))
#选取变量名后缀不包含Width的列
select(iris2, -ends_with("Width"))
#选取变量名包含etal的列
select(iris2,contains("etal"))
#使用正则表达式,返回变量名中包含en的列
select(iris2,matches(".en."))
#使用正则表达式,返回变量名结尾不是h的列
#select(iris2,matches(".+h$"))
select(iris2, -matches(".+h$"))
select(iris2,matches(".+[^h]$"))
#直接选取列
select(iris2,Petal.Length,Petal.Width)
select(iris2,'Petal.Length','Petal.Width')
#直接选取其余列
select(iris2,-Petal.Length,-Petal.Width)#变量名加''后报错
#select(iris2,-"Petal.Length",-"Petal.Width")
#Error in -"Petal.Length" : invalid argument to unary operator
#直接使用冒号连接列名,选择连续的多个列
select(iris2, Sepal.Width:Species)
#选择字符向量中的列,select中不能直接使用字符向量筛选,需要使用one_of函数
vars=c("Sepal.Length","Petal.Width",'Species')
select(iris2,one_of(vars))
#反向选择
select(iris2, -one_of(vars))
#返回所有列,一般调整数据集中变量顺序时使用
select(iris2,everything())
#调整列顺序,把Species列放到最前面
select(iris2,Species,everything())
# 举例2
df=as.data.frame(matrix(runif(100),nrow=10))
df
dim(df)
#选择V4,V5,V6三列
select(df,V4:V6)
select(df, num_range("V", 4:6))
# 举例3
#重命名列名字
#重命名Petal.Length,返回子数据集只包含重命名的列
select(iris2,petal_length=Petal.Length)
#重命名所有以Peatl为前缀的列,返回子数据集只包含重命名的列
select(iris2, petal=starts_with("Petal"))
#重命名Petal.Length,返回全部咧
rename(iris2,petal_length=Petal.Length)
#
# 5.变形:mutate() adds new variables that are functions of existing variables
# mutate()和transmute()函数对已有列进行数据运算并添加为新列,类似于base::transform() 函数,
#不同的是可以在同一语句中对刚增添加的列进行操作。
#mutate()返回的结果集会保留原有变量,transmute()只返回扩展的新变量。原数据集行名称会被过滤掉。
# 添加新列wt_kg和wt_t,在同一语句中可以使用刚添加的列。
mtcars2=mtcars[1:10,]
mutate(mtcars2, wt_kg=wt*453, wt_t=wt_kg/1000)
# 使用 transmute时,只返回新添加的列
transmute(mtcars2, wt_kg=wt*453, wt_t=wt_kg/1000)
#
# 6.去重 distinct
# distinct()用于对输入的tbl进行去重,返回无重复的行,类似于 base::unique() 函数,但是处理速度更快。
#原数据集行名称会被过滤掉
df=data.frame(
x=sample(10,100,rep=T),
y=sample(10,100,rep=T)
)
# 以全部两个变量去重,返回去重后的行数
nrow(distinct(df))
nrow(distinct(df,x,y))
# 以变量x去重,只返回去重后的x值
distinct(df,x)
# 以变量x去重,返回所有变量
distinct(df, x, .keep_all=TRUE)
#对变量运算后的结果进行去重
distinct(df, diff=abs(x-y))
distinct(df, diff=abs(x-y), .keep_all = T)
distinct(df, diff=x-y, .keep_all = T) #不加abs结果中行会更多
#
# 7.概括: summarise() reduces multiple values down to a single summary.
#对数据框调用函数进行汇总操作, 返回一维的结果。返回多维结果时会报如下错误:
#Error: expecting result of length one, got : 2
#原数据集行名称会被过滤掉。
#返回数据框中变量disp的均值
summarise(mtcars,mean(disp))
#返回数据框中变量disp的标准差
summarise(mtcars, sd(disp))
sd(mtcars$disp)
#返回最大和最小值
summarise(mtcars, max(disp), min(disp))
#返回行数
summarise(mtcars, n())
dim(mtcars)[1];nrow(mtcars)
#返回unique的gear数
summarise(mtcars, n_distinct(gear))
factor(mtcars$gear) #从factor的水平个数看
#返回disp的第一个值
summarise(mtcars, first(disp))
#返回disp的最后一个值
summarise(mtcars, last(disp))
#
# 8.抽样 sample
#抽样函数,sample_n()随机抽取指定数目的样本,sample_frac()随机抽取指定百分比的样本,
#默认都为不放回抽样,通过设置replacement = TRUE可改为放回抽样,可以用于实现Bootstrap抽样。
# 语法 :sample_n(tbl, size, replace = FALSE, weight = NULL, .env = parent.frame())
#随机无重复的取10行数据
sample_n(mtcars,10)
#随机有重复的取5行数据
sample_n(mtcars, 5, replace=T)
#随机无重复的以mpg值做权重取10行数据
sample_n(mtcars, 10, weight=mpg) #不懂权重怎么影响抽样 //todo
#语法 :sample_frac(tbl, size = 1, replace = FALSE, weight = NULL,.env = parent.frame())
# 默认size=1,相当于对全部数据无重复重新抽样
sample_frac(mtcars)
#随机无重复的取10%的数据
sample_frac(mtcars, 0.1)
#随机有重复的取总行数的1.5倍的数据
sample_frac(mtcars, 1.5, replace=TRUE)
#随机无重复的以1/mpg值做权重取10%的数据
sample_frac(mtcars,0.1,weight=1/mpg)
#
# 9.分组 group_by()
#group_by()用于对数据集按照给定变量分组,返回分组后的数据集。
#对返回后的数据集使用以上介绍的函数时,会自动的对分组数据操作。
#使用变量cyl对mtcars分组,返回分组后的数据集
by_cyl=group_by(mtcars,cyl)
by_cyl #不懂发生了什么变化 //todo
class(by_cyl)
#返回每个分组中最大disp所在的行
filter(by_cyl, disp==max(disp))
#返回每个分组中变量名包含d的列,始终返回分组列cyl
select(by_cyl, contains("d"))
#使用mpg对每个分组排序
tmp=arrange(by_cyl, mpg)
View(tmp)
#对每个分组无重复的取2行记录
sample_n(by_cyl,2)
#例子2 分组后使用聚合函数
# 返回每个分组的记录数
summarise(by_cyl, n())
#求每个分组中disp和hp的均值
summarise(by_cyl, mean(disp), mean(hp))
#返回每个分组中唯一的gear值
summarise(by_cyl, n_distinct(gear))
#返回每组第一个和最后一个disp值
summarise(by_cyl, first(disp))
summarise(by_cyl, last(disp))
#返回每个分组中最小的disp值
summarise(by_cyl, min(disp))
summarise(arrange(by_cyl,disp), min(disp))
#返回每个分组中最大的disp值
summarize(by_cyl, max(disp))
summarize(arrange(by_cyl,disp), max(disp))
#返回每个分组中disp第二个disp值
summarise(by_cyl, nth(disp,2))
#例子3
#获取分组数据集使用的分组变量
groups(by_cyl)
#从数据集中移除分组信息,因此返回的分组变量为NULL
groups(ungroup(by_cyl))
#例子4 返回每条记录所在分组id组成的向量
group_indices(mtcars, cyl)
#[1] 2 2 1 2 3 2 3 1 1 2 2 3 3 3 3 3 3 1 1 1 1 3 3 3 3 1 1 1 3 2 3 1
#例子5 返回每个分组记录数组成的向量
group_size(by_cyl)
summarise(by_cyl, n())
table(mtcars$cyl)
#返回所分的组数
n_groups(by_cyl)
length(group_size(by_cyl))
#
# 对数据集的每个分组计数,类似于base::table()函数。
#其中count已经group_by分组,而tally需要对数据集调用group_by后对分组数据计数。
#使用count对分组计数,数据已按变量分组
count(mtcars, cyl)
#设置sort=TRUE,对分组计数按降序排列
count(mtcars, cyl, sort=TRUE)
#使用tally对分组计数,需要使用group_by分组
tally(group_by(mtcars, cyl))
# 使用summarise对分组计数
summarise(group_by(mtcars, cyl),n())
#按cyl分组,并对分组数据计算变量gear的和
tally(group_by(mtcars,cyl), wt=gear)
#
# 10.数据关联 join
#数据框中经常需要将多个表进行连接操作,如左连接、右连接、内连接等,
#dplyr包也提供了数据集的连接操作,类似于base::merge()函数。
df1=data.frame(
CustomerId=c(1:6),
sex=c('f','m','f','f','m','m'),
Product=c(rep('Toaster',3), rep('Radio',3))
)
df2=data.frame(
CustomerId=c(2,4,6,7),
sex=c('m','f','m','f'),
State=c(rep('Alabama',3), rep('Ohio',1))
)
#内连接,合并数据仅保留匹配的记录
#inner_join(x,y, by = NULL, copy = FALSE, suffix = c(".x", ".y"), ...)
#内连接,默认使用Customer和sex连接
inner_join(df1,df2) #仅保留匹配的
#左连接,向数据集x中加入匹配的数据集y记录
#left_join(x,y, by = NULL, copy = FALSE, suffix = c(".x", ".y"), ...)
#左连接,默认使用"CustomerId"和"sex"连接
left_join(df1,df2) #保留左侧全部行
#同理,右连接以右边为主,补充信息到右边。
right_join(df1,df2) #保留右侧全部行
#全连接,合并数据保留所有记录,所有行
# full_join(x,y, by = NULL, copy = FALSE, suffix = c(".x", ".y"), ...)
full_join(df1,df2) #全连接和内连接有啥区别?全连接是保留全部信息,内连接是仅保留匹配的
#内连接,使用CustomerId连接,则同名字段sex会被加上后缀
inner_join(df1,df2,by=c('CustomerId'="CustomerId"))
#返回能够与y表匹配的x表所有记录
#semi_join(x,y, by = NULL, copy = FALSE, ...)
semi_join(df1,df2,by=c('CustomerId'='CustomerId'))
df1
#以CustomerId和sex连接,返回df1中与df2不匹配的记录
anti_join(df1, df2)
df2
#
# 11. 集合操作 set。dplyr也提供了集合操作函数,实际上是对base包中的集合操作的重写,
# 但是对数据框和其它表格形式的数据操作更加高效。
mtcars$model=rownames(mtcars)
g1=mtcars[1:20,]
g2=mtcars[10:32,]
#取两个集合的交集,丢失行名字
intersect(g1,g2)
#取并集,并去重
union(g1,g2)
#取差集,返回g1中有但g2中没有的记录
setdiff(g1,g2)
setdiff(g2,g1) #这个是g2-g1
#取两个集合的交集,不去重
union_all(g1,g2)
#判断两个集合是否相等
setequal(g1,g1[20:1,]) #TRUE
setequal(g1,mtcars[1:20,]) #TRUE
setequal(g1,g2) #FALSE: Different number of rows
setequal(g1,g2[1:20,]) #FALSE: Rows in x but not y...
#
# 12.数据合并 bind
#对数据框按照行/列合并。
one=mtcars[1:4,]; one
two=mtcars[11:14,]; two
#按行合并数据框one和two
bind_rows(one, two)
# 按行合并元素为数据框的列表
bind_rows(list(one,two))
#按行合并数据框,生成id列指明数据来自的数据源数据框
bind_rows(list(one, two), .id='id') # id列为数字代替
bind_rows(list(a=one, b=two), .id="id") #id 列的值为数据框名
# 合并数据框,列名不匹配,因此使用NA替代,使用rbind直接报错
bind_rows(data.frame(x=1:3), data.frame(y=1:4))
# 合并因子
f1=factor('a')
f2=factor('b')
c(f1,f2)
unlist(list(f1,f2))
#因子level不同,强制转换为字符型
combine(f1,f2) #报Warning
combine(list(f1,f2)) #报Warning
#
# 13.条件语句 ifelse
# dplyr包也提供了更加严格的条件操作语句,fi_else函数类似于base::ifelse(),
#不同的是true和false对应的值必须要有相同的类型,这样使得输出类型更容易预测,执行效率更高。
x=c(-5:5, NA);x
if_else(x<0, NA_integer_, x)
#使用字符串missing替换原数据中的NA元素
if_else(x<0, 'negetive', 'positive','missing')
#if_else不支持类型不一致,但是ifelse可以
if_else(x<0, 'negative', 1) #Error: `false` must be type character, not double
ifelse(x<0, 'negative', 1) #1被强制转换为字符了
#例2
set.seed(100)
x <- factor(sample(letters[1:5], 10, replace=TRUE));x
#if_else会保留原有数据类型
if_else(x %in% c('a','b','c'), x, factor(NA))
ifelse(x %in% c('a','b','c'), x, factor(NA))
ifelse(x %in% c('a','b','c'), as.character(x), factor(NA)) #输出强制字符串x
#################
# 管道操作 %>% 或 %.%
#将上一个函数的输出作为下一个函数的输入, %.%已废弃。
mtcars %>%
group_by(cyl) %>%
summarise(total = sum(gear)) %>%
arrange(desc(total))
#写法2(嵌套太深,不好排错)
head(
arrange(
summarise(
group_by(mtcars, cyl), total=sum(gear)
), desc(total)
), 5
)
#################
# dplyr连接mysql数据框(windows连接失败)
#如果需要获取MySQL数据库中的数据时,可以直接使用dplyr包中的src_mysql()函数:
#src_mysql(dbname,host = NULL,port = 0L,user = “root”,password = “password”,…)
library(dplyr)
#dplyr连接mysql数据库
my_db <- src_mysql(dbname = "mysql",
host = 'localhost',
port = 3306,
user = "root",
password = "")
#Error: Condition message must be a string
#获取指定表中的数据
#tbl(src, from = 'diff')
my_tbl <- tbl(my_db,from = "user") #my_table为数据库中的表
my_tbl
##############
#refer:
#
# http://blog.csdn.net/achuo/article/details/54693211
|
fde079dd9a105b35cf0646255fb62da8818a3482 | ab43e673e5e6a3f2152a976b0201ab26bd666b2c | /man/play_n_games.Rd | a717d560b97c8a45b679c7365884285105f1bafe | [] | no_license | herbertm533/montyhallgame | 7bbea4dd4280e5de044edbbf6103057e7c25fc81 | ebd62868d076475927b2707d6df1721c6d2b7f9c | refs/heads/master | 2022-12-13T01:37:35.980216 | 2020-09-20T07:28:03 | 2020-09-20T07:28:03 | 297,012,457 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,235 | rd | play_n_games.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/monty-hall-problem.R
\name{play_n_games}
\alias{play_n_games}
\title{Play the Monty Hall Problem game a set number of times.}
\usage{
play_n_games(n = 100)
}
\arguments{
\item{n}{= The number of times the Monty Hall game will be run.}
}
\value{
The function returns a data frame with the results of
the Monty Hall game simulations. The rows contain the game strategy
information. The columns contain the game outcome information.
}
\description{
\code{play_n_games()} plays the Monty Hall Problem game a
assigned amount of times.
}
\details{
The game replicates the game on the TV show "Let's
Make a Deal" where there are three doors for a contestant
to choose from, one of which has a car behind it and two
have goats. The contestant selects a door, then the host
opens a door to reveal a goat, and then the contestant is
given an opportunity to stay with their original selection
or switch to the other unopened door. There was a famous
debate about whether it was optimal to stay or switch when
given the option to switch, so this simulation was created
to test both strategies.
}
\examples{
play_n_games( n=100 )
\dontrun{
play_n_games( n=100 )
}
}
|
7c33b0a1a9b26fc91ed0acad80c06bbf803a3a9c | a28fc38a0378905ac32e969dbff37565cf6aec60 | /Final Project.R | 29c8db113af59ba7fa07e7fcaa87cd09863c9aa5 | [] | no_license | DeeptiChevvuri/Inventory-Management-Analysis--Manufacturing-Industry | a6dcee6f5dfd4d9573372223801b7cf77685fa69 | da5d48699707149832bf7d4ec496cd81d8723c9a | refs/heads/master | 2021-05-14T13:23:40.109615 | 2018-01-05T23:57:45 | 2018-01-05T23:57:45 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 12,574 | r | Final Project.R | #Final Project
InvDataSet<-read.table("G:/MIS/Sem 2/Stat 526/Project/Final Project/InvDataSet2.csv",header=T,sep=",",quote = "\"")
summary(InvDataSet)
head(InvDataSet)
plot(InvDataSet)
plot(X6~X5,data = InvDataSet, main = "$ On Hand vs. Item Cost",pch=20,xlab = "Item Cost",ylab = "$ On Hand")
plot(X6~X7,data = InvDataSet, main = "$ On Hand vs. Annual Usage Qty",pch=20,xlab = "Annual Usage Qty",ylab = "$ On Hand")
plot(X6~X8,data = InvDataSet, main = "$ On Hand vs. Average Daily Usage Qty",pch=20,xlab = "Average Daily Usage Qty",ylab = "$ On Hand")
plot(X6~X9,data = InvDataSet, main = "$ On Hand vs. Minimum Order Qty",pch=20,xlab = "Minimum Order Qty",ylab = "$ On Hand")
plot(X6~X10,data = InvDataSet, main = "$ On Hand vs. Box Size Qty",pch=20,xlab = "Box Size Qty",ylab = "$ On Hand")
plot(X6~X11,data = InvDataSet, main = "$ On Hand vs. Lead Time (days)",pch=20,xlab = "Lead Time (days)",ylab = "$ On Hand")
plot(X6~X12,data = InvDataSet, main = "$ On Hand vs. Annual Usage $",pch=20,xlab = "Annual Usage $",ylab = "$ On Hand")
plot(X6~X13,data = InvDataSet, main = "$ On Hand vs. Manufacturing Site",pch=20,xlab = "Manufacturing Site",ylab = "$ On Hand")
plot(X6~X14,data = InvDataSet, main = "$ On Hand vs. Planner Code",pch=20,xlab = "Planner Code",ylab = "$ On Hand")
plot(X6~X16,data = InvDataSet, main = "$ On Hand vs. Safety Stock Qty",pch=20,xlab = "Safety Stock Qty",ylab = "$ On Hand")
plot(X6~X17,data = InvDataSet, main = "$ On Hand vs. On Time Delivery",pch=20,xlab = "% Received On Time",ylab = "$ On Hand")
plot(X6~X18,data = InvDataSet, main = "$ On Hand vs. Delivery Frequency",pch=20,xlab = "Delivery Frequency",ylab = "$ On Hand")
plot(X6~X19,data = InvDataSet, main = "$ On Hand vs. Average Daily Usage $",pch=20,xlab = "Average Daily Usage $",ylab = "$ On Hand")
plot(X6~X20,data = InvDataSet, main = "$ On Hand vs. Minimum Order ($)",pch=20,xlab = "Minimum Order ($)",ylab = "$ On Hand")
plot(X6~X21,data = InvDataSet, main = "$ On Hand vs. Box Size ($)",pch=20,xlab = "Box Size ($)",ylab = "$ On Hand")
plot(X6~X22,data = InvDataSet, main = "$ On Hand vs. Safety Stock ($)",pch=20,xlab = "Safety Stock ($)",ylab = "$ On Hand")
#estimating the corelation coef - not sure if this is correct????
cor(InvDataSet$X6,InvDataSet$X5)
#a few examples of using just simple linear modeling
#is the daily usage in dollars significant?
mod0=lm(X6~X19,data=InvDataSet)
mod0
summary(mod0)
#is on-time delivery significant?
mod1=lm(X6~X17,data=InvDataSet)
mod1
summary(mod1)
#is the delivery frequency significant?
mod2=lm(X6~X18,data=InvDataSet)
mod2
summary(mod2)
#is the planning method significant?
mod3=lm(X..on.Hand~Planning.Method,data=InvDataSet)
mod3
summary(mod3)
#is the planner code significant?
mod4=lm(X..on.Hand~Planner.Code,data=InvDataSet)
mod4
summary(mod4)
#is the manufacturing site significant?
mod5=lm(X..on.Hand~Mfg_Site,data=InvDataSet)
mod5
summary(mod5)
#is the supplier significant?
mod6=lm(X..on.Hand~Supplier,data=InvDataSet)
mod6
summary(mod6)
#is the daily usage in dollars + Planner.Code significant?
mod10=lm(X..on.Hand~Average.Daily.Usage.Qty*Item.Cost+Planner.Code,data=InvDataSet)
mod10=lm(X6~X5*X8+X14,data=InvDataSet)
mod10
summary(mod10)
#From HW6 analysis
library(car)
install.packages("leaps")
library(leaps)
library(MASS)
############################
#First analysis was to review the data without any categorical variables to see what the model would look like
#This first set of models is only using the annual data to help build a model for current inventory $ on hand
#Step AIC
mod.simple<-lm(X6~1,data=InvDataSet)
mod.full<-lm(X6~X5+X7+X9+X10+X11+X12+X16+X17+X20+X21+X22,data=InvDataSet)
summary(mod.full)
summary(mod.simple)
stepmod<-stepAIC(mod.full)
vif(mod.full)
AIC(mod.full)
#Step backward
stepback<-stepAIC(mod.full,scope=list(upper=mod.full,lower=mod.simple),direction="backward")
stepback$anova
mod.back<-lm(X6 ~ X7 + X12 + X16 + X17 + X20 + X21 + X22,data=InvDataSet)
summary(mod.back)
vif(mod.back)
AIC(mod.back)
#Step forward
stepforward<-stepAIC(mod.simple,scope=list(upper=mod.full,lower=mod.simple),direction="forward")
stepforward$anova
mod.forward<-lm(X6 ~ X22 + X12 + X21 + X20 + X17,data=InvDataSet)
summary(mod.forward)
vif(mod.forward)
AIC(mod.forward)
#Step Mixed
stepmixed<-stepAIC(mod.simple,scope=list(upper=mod.full,lower=mod.simple),direction="both")
stepmixed$anova
mod.mixed<-lm(X6 ~ X22 + X12 + X21 + X20 + X17,data=InvDataSet)
summary(mod.mixed)
vif(mod.mixed)
AIC(mod.mixed)
#1.g
sub<-regsubsets(X6~X11+X20+X12+X21+X17,data=InvDataSet,nbest=1,nvmax=8)
plot(sub,scale="bic",main="Model Selection using BIC Criterion")
sub.mod<-lm(X6~X11+X20+X12+X21+X17,data=InvDataSet)
summary(sub.mod)
AIC(sub.mod)
stepmod<-stepAIC(sub.mod,scope=list(upper=sub.mod,lower=sub.mod),direction="both")
stepmod<-stepAIC(sub.mod)
vif(stepmod)
############################
#Second analysis was to review the data without any categorical variables to see what the model would look like
#This second set of models is only using the daily data to help build a model for current inventory $ on hand
#Step AIC
mod.simple<-lm(X6~1,data=InvDataSet)
mod.full<-lm(X6~X5+X8+X9+X10+X11+X16+X17+X19+X20+X21+X22,data=InvDataSet)
mod.full
summary(mod.full)
summary(mod.simple)
stepmod<-stepAIC(mod.full)
vif(mod.full)
AIC(mod.full)
#Step backward
stepback<-stepAIC(mod.full,scope=list(upper=mod.full,lower=mod.simple),direction="backward")
stepback$anova
mod.back<-lm(X6 ~ X17 + X19 + X20 + X21 + X22,data=InvDataSet)
summary(mod.back)
vif(mod.back)
AIC(mod.back)
#Step forward
stepforward<-stepAIC(mod.simple,scope=list(upper=mod.full,lower=mod.simple),direction="forward")
stepforward$anova
mod.forward<-lm(X6 ~ X22 + X19 + X21 + X20 + X17,data=InvDataSet)
summary(mod.forward)
vif(mod.forward)
AIC(mod.forward)
#Step Mixed
stepmixed<-stepAIC(mod.simple,scope=list(upper=mod.full,lower=mod.simple),direction="both")
stepmixed$anova
mod.mixed<-lm(X6 ~ X22 + X19 + X21 + X20 + X17,data=InvDataSet)
summary(mod.mixed)
vif(mod.mixed)
AIC(mod.mixed)
#1.g
sub<-regsubsets(X6~X11+X20+X19+X21+X17,data=InvDataSet,nbest=1,nvmax=8)
plot(sub,scale="bic",main="Model Selection using BIC Criterion")
sub.mod<-lm(X6~X11+X20+X19+X21+X17,data=InvDataSet)
summary(sub.mod)
AIC(sub.mod)
stepmod<-stepAIC(sub.mod,scope=list(upper=sub.mod,lower=sub.mod),direction="both")
stepmod<-stepAIC(sub.mod)
vif(stepmod)
############################
#Third analysis was to review the data including the categorical variables to see what the model would look like
#Includes both daily & annual data to allow the model to select the most appropriate values
#We chose not to include the categorical variables of Supplier, Part Number, Item UOM, Planner Code, and Planning Method
#Step AIC
mod.simple<-lm(X6~1,data=InvDataSet)
mod.full<-lm(X6~X5+X7+X8+X9+X10+X11+X12+X13+X16+X17+X18+X19+X20+X21+X22,data=InvDataSet)
summary(mod.full)
mod.full
summary(mod.simple)
stepmod<-stepAIC(mod.full)
vif(mod.full)
AIC(mod.full)
#Step backward
stepback<-stepAIC(mod.full,scope=list(upper=mod.full,lower=mod.simple),direction="backward")
stepback$anova
mod.back<-lm(X6 ~ X12 + X13 + X20 + X21 + X22,data=InvDataSet)
summary(mod.back)
vif(mod.back)
AIC(mod.back)
#Step forward
stepforward<-stepAIC(mod.simple,scope=list(upper=mod.full,lower=mod.simple),direction="forward")
stepforward$anova
mod.forward<-lm(X6 ~ X22 + X19 + X21 + X13 + X20,data=InvDataSet)
summary(mod.forward)
vif(mod.forward)
AIC(mod.forward)
#Step Mixed
stepmixed<-stepAIC(mod.simple,scope=list(upper=mod.full,lower=mod.simple),direction="both")
stepmixed$anova
mod.mixed<-lm(X6 ~ X22 + X19 + X21 + X13 + X20,data=InvDataSet)
summary(mod.mixed)
vif(mod.mixed)
AIC(mod.mixed)
#All combinations
mod.all<-lm(X6 ~ X12 + X13 + +X19 + X20 + X21 + X22,data=InvDataSet)
summary(mod.all)
vif(mod.all)
AIC(mod.all)
#1.g
sub<-regsubsets(X6~X11+X20+X19+X21+X17,data=InvDataSet,nbest=1,nvmax=8)
plot(sub,scale="bic",main="Model Selection using BIC Criterion")
sub.mod<-lm(X6~X11+X20+X19+X21+X17,data=InvDataSet)
summary(sub.mod)
AIC(sub.mod)
stepmod<-stepAIC(sub.mod,scope=list(upper=sub.mod,lower=sub.mod),direction="both")
stepmod<-stepAIC(sub.mod)
vif(stepmod)
############################
#4th analysis was to review the data including the categorical variables to see what the model would look like
#Includes both daily & annual data to allow the model to select the most appropriate values
#We chose not to include the categorical variables of Supplier, Part Number, Item UOM, Planner Code, and Planning Method
#Removed variables that were "internally set" vs. "externally set"
#Step AIC
mod.simple<-lm(X6~1,data=InvDataSet)
mod.full<-lm(X6~X5+X7+X8+X9+X10+X11+X12+X17+X18+X19+X20+X21,data=InvDataSet)
mod.full
summary(mod.full)
summary(mod.simple)
stepmod<-stepAIC(mod.full)
vif(mod.full)
AIC(mod.full)
#Step backward
stepback<-stepAIC(mod.full,scope=list(upper=mod.full,lower=mod.simple),direction="backward")
stepback$anova
mod.back<-lm(X6 ~ X11 + X17 + X19 + X20 + X21,data=InvDataSet)
summary(mod.back)
vif(mod.back)
AIC(mod.back)
#Step forward
stepforward<-stepAIC(mod.simple,scope=list(upper=mod.full,lower=mod.simple),direction="forward")
stepforward$anova
mod.forward<-lm(X6 ~ X19 + X21 + X20 + X17 + X11,data=InvDataSet)
mod.forward
summary(mod.forward)
vif(mod.forward)
AIC(mod.forward)
#Step Mixed
stepmixed<-stepAIC(mod.simple,scope=list(upper=mod.full,lower=mod.simple),direction="both")
stepmixed$anova
mod.mixed<-lm(X6 ~ X19 + X21 + X20 + X17 + X11,data=InvDataSet)
mod.mixed
summary(mod.mixed)
vif(mod.mixed)
AIC(mod.mixed)
#1.g
sub<-regsubsets(X6~X11+X20+X19+X21+X17,data=InvDataSet,nbest=1,nvmax=8)
plot(sub,scale="bic",main="Model Selection using BIC Criterion")
sub.mod<-lm(X6~X11+X20+X19+X21+X17,data=InvDataSet)
summary(sub.mod)
AIC(sub.mod)
stepmod<-stepAIC(sub.mod,scope=list(upper=sub.mod,lower=sub.mod),direction="both")
stepmod<-stepAIC(sub.mod)
vif(stepmod)
#Final Dataset Review
InvDataSet<-read.table("G:/MIS/Sem 2/Stat 526/Project/Final Project/InvDataSet2.csv",header=T,sep=",",quote = "\"")
summary(InvDataSet)
head(InvDataSet)
plot(InvDataSet)
mod.final<-lm(X6 ~ X19 + X21 + X20 + X17 + X11,data=InvDataSet)
summary(mod.final)
mod.final
AIC(mod.final)
vif(mod.final)
#Check fit
fit.lm<-lm(X6~X11,data=InvDataSet)
plot(fit.lm$fitted.values,fit.lm$residuals,pch=20,xlab = "Predicted", ylab = "Residuals")
abline(h=0)
fit.lm<-lm(X6~X17,data=InvDataSet)
plot(fit.lm$fitted.values,fit.lm$residuals,pch=20,xlab = "Predicted", ylab = "Residuals")
abline(h=0)
fit.lm<-lm(X6~X19,data=InvDataSet)
plot(fit.lm$fitted.values,fit.lm$residuals,pch=20,xlab = "Predicted", ylab = "Residuals")
abline(h=0)
fit.lm<-lm(X6~X20,data=InvDataSet)
plot(fit.lm$fitted.values,fit.lm$residuals,pch=20,xlab = "Predicted", ylab = "Residuals")
abline(h=0)
fit.lm<-lm(X6~X21,data=InvDataSet)
plot(fit.lm$fitted.values,fit.lm$residuals,pch=20,xlab = "Predicted", ylab = "Residuals")
abline(h=0)
library(car)
qqplot(fit.lm$resid,ylab = "Residuals")
fit.lm<-lm(X6~X22,data=InvDataSet)
fit.lm
abline(fit.lm,col='red')
qqplot(fit.lm$resid,ylab = "Residuals")
summary(fit.lm)
# check for outliers
RMSE<-1153
lev <- c(hatvalues(mod.final))> 2*(k+1)/n
plot(c(5,summary(mod.final)$residuals/RMSE)~c(.3,hatvalues(mod.final)),
pch = 19, ylab = "Standardized Residuals",
xlab = "Leverage",col = 0)
abline(a = -3,b=0 , col = 1)
abline(a = 3,b=0 , col = 1)
outlier <- (abs(summary(mod.final)$residuals/RMSE))>3
points((summary(mod.final)$residuals/RMSE)[outlier]~hatvalues(mod.final)[outlier],col = 1,pch = 19)
notoutlier <- (abs(summary(mod.final)$residuals/RMSE))<3
points((summary(mod.final)$residuals/RMSE)[notoutlier]~hatvalues(mod.final)[notoutlier],pch = 19)
points((summary(mod.final)$residuals/RMSE)[lev]~hatvalues(mod.final)[lev],pch = 19,col = 5)
abline(v = 2*(k+1)/n,col = 5) #Line to Identify high leverage obs
un<- (abs(summary(mod.final)$residuals/RMSE))>3&c(hatvalues(mod.final))> 2*(k+1)/n
points((summary(mod.final)$residuals/RMSE)[un]~hatvalues(mod.final)[un],pch = 19,col = 'red')
#Cook's Distance--Effect of Deleting an Individual Observation on the Model Fit
plot(cooks.distance(mod.final), ylab='Cook Distance', xlab='Observation number',pch=19)
cooksdistaceall<-cooks.distance(mod.final)
top5<-sort(cooksdistaceall, decreasing = TRUE)
top5[1:5]# its shows top 3 have cooks distance >1 hence outliers
non_influ <- cooks.distance(mod.final)<1 ### Non - Influential points.
fast_small <- InvDataSet[non_influ,]
#Fit Model on Subset
out_small <-lm(X6 ~ X19 + X21 + X20 + X17 + X11, data = fast_small)
summary(out_small)
|
88258e608da0083fd8d3202c0d5df151d17ebdc1 | f3c36c1e579c79392d968c1555e4a8a6809193b0 | /2014/Scripts/length_distribution_hist.R | fd2512652b1e5a98839420e27bd763c1492b839d | [] | no_license | taylorstewart/Predator-Diets | 7e6979803839d8049f806f18828fd14303efd290 | 4b698e8167c5682f4d756ec4f99c6be13cf7bc41 | refs/heads/master | 2021-01-21T13:14:27.360035 | 2016-05-24T13:34:03 | 2016-05-24T13:34:03 | 49,217,318 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,091 | r | length_distribution_hist.R | ## Load Data
source("2014/Scripts/data_init.R")
diet_spring <- readWorksheet(db1,sheet="Calculated Data")
lw_spring <- readWorksheet(db1,sheet="Spring LW Data")
diet_fall <- readWorksheet(db2,sheet="Calculated Data")
lw_fall <- readWorksheet(db2,sheet="Fall LW Data")
## -----------------------------------------------------------
## Filter data and remove duplicates, leaving only unique Fish IDs.
## -----------------------------------------------------------
yp_spring_full <- as.data.frame(unique(filter(diet_spring,!is.na(food_item) & species == "yellow perch")$fid))
colnames(yp_spring_full) <- "fish_id"
wp_spring_full <- as.data.frame(unique(filter(diet_spring,!is.na(food_item) & species == "white perch")$fid))
colnames(wp_spring_full) <- "fish_id"
yp_fall_full <- as.data.frame(unique(filter(diet_fall,!is.na(food_item) & species == "yellow perch")$fid))
colnames(yp_fall_full) <- "fish_id"
wp_fall_full <- as.data.frame(unique(filter(diet_fall,!is.na(food_item) & species == "white perch")$fid))
colnames(wp_fall_full) <- "fish_id"
yp_spring_empty <- as.data.frame(unique(filter(diet_spring,is.na(food_item) & species == "yellow perch")$fid))
colnames(yp_spring_empty) <- "fish_id"
wp_spring_empty <- as.data.frame(unique(filter(diet_spring,is.na(food_item) & species == "white perch")$fid))
colnames(wp_spring_empty) <- "fish_id"
yp_fall_empty <- as.data.frame(unique(filter(diet_fall,is.na(food_item) & species == "yellow perch")$fid))
colnames(yp_fall_empty) <- "fish_id"
wp_fall_empty <- as.data.frame(unique(filter(diet_fall,is.na(food_item) & species == "white perch")$fid))
colnames(wp_fall_empty) <- "fish_id"
## -----------------------------------------------------------
## Merge effort and length-weight data, and assign season (Spring, Fall)
## and whether or not it contained stomach contents (Y/N).
## -----------------------------------------------------------
yp_spring_full_len <- merge(yp_spring_full,lw_spring,by="fish_id")
yp_spring_full_len$contents <- "Y"
yp_spring_full_len$season <- "spring"
wp_spring_full_len <- merge(wp_spring_full,lw_spring,by="fish_id")
wp_spring_full_len$contents <- "Y"
wp_spring_full_len$season <- "spring"
yp_fall_full_len <- merge(yp_fall_full,lw_fall,by="fish_id")
yp_fall_full_len$contents <- "Y"
yp_fall_full_len$season <- "fall"
wp_fall_full_len <- merge(wp_fall_full,lw_fall,by="fish_id")
wp_fall_full_len$contents <- "Y"
wp_fall_full_len$season <- "fall"
yp_spring_empty_len <- merge(yp_spring_empty,lw_spring,by="fish_id")
yp_spring_empty_len$contents <- "N"
yp_spring_empty_len$season <- "spring"
wp_spring_empty_len <- merge(wp_spring_empty,lw_spring,by="fish_id")
wp_spring_empty_len$contents <- "N"
wp_spring_empty_len$season <- "spring"
yp_fall_empty_len <- merge(yp_fall_empty,lw_fall,by="fish_id")
yp_fall_empty_len$contents <- "N"
yp_fall_empty_len$season <- "fall"
wp_fall_empty_len <- merge(wp_fall_empty,lw_fall,by="fish_id")
wp_fall_empty_len$contents <- "N"
wp_fall_empty_len$season <- "fall"
## -----------------------------------------------------------
## Combine into a final data frame
## -----------------------------------------------------------
lw_comb <- rbind(yp_spring_full_len,wp_spring_full_len,yp_fall_full_len,
wp_fall_full_len,yp_spring_empty_len,wp_spring_empty_len,
yp_fall_empty_len,wp_fall_empty_len)
## -----------------------------------------------------------
### A helper function for plotting
## -----------------------------------------------------------
dietHist <- function(df,spec,seas,brks,xlim,ylim,clr,show.xaxis,show.yaxis,len.ticks,freq.ticks,...) {
par(mgp=c(0,0.4,0),tcl=-0.25,las=1,xaxs="i",yaxs="i")
lm <- bm <- 1.5; lrm <- btm <- 1.0
if (show.xaxis & show.yaxis) { par(mar=c(bm,lm,btm,lrm))
} else if (show.xaxis & !show.yaxis) { par(mar=c(bm,lm,btm,lrm))
} else if (!show.xaxis & show.yaxis) { par(mar=c(bm,lm,btm,lrm))
} else if (!show.xaxis & !show.yaxis) { par(mar=c(bm,lm,btm,lrm))
}
tmp <- filter(df,season == seas & species == spec & contents == "Y")
tmp2 <- filter(df,season == seas & contents == "N")
h1 <- hist(tmp2$tl,breaks=brks,plot=FALSE,right=FALSE)
plot(h1,xlim=xlim,ylim=ylim,xaxt="n",yaxt="n",xlab="",ylab="",main="",col="gray80")
h2 <- hist(tmp$tl,breaks=brks,plot=FALSE,right=FALSE)
plot(h2,col=clr,add=TRUE)
}
## -----------------------------------------------------------
## Set some constants for plotting
## -----------------------------------------------------------
clr <- "gray35"
brks <- seq(150,330,10)
xlmt <- range(brks)
len.ticks <- seq(150,330,10)
freq.ticks <- seq(0,15,5)
prob <- TRUE
ylmt <- range(freq.ticks)
# number of rows and cols of actual plots
nrow <- 2
ncol <- 2
# sets the base width for each plot
basew <- 5.0
baseh <- basew*0.6
## -----------------------------------------------------------
## Make a base plot
## -----------------------------------------------------------
# make the layout
layout(rbind(cbind(rep(1,nrow),
matrix(4:7,nrow=nrow,byrow=FALSE)),
c(0,rep(2,ncol)),
c(0,rep(3,ncol))),
widths=c(1,basew,rep(basew,ncol-1),1),
heights=c(rep(baseh,nrow-1),baseh,1),
respect=TRUE)
# put on some axis labels
par(mar=c(0,0,0,0))
plot.new(); text(0.6,0.5,"Count",srt=90,cex=1.8)
plot.new(); text(0.5,0.6,"Length Group (mm)",cex=1.8)
plot.new(); legend("top",c("Contained prey items","Empty"),fill=c(clr,"gray80"),cex=1.6)
## ---------------------------------------------------
## Put on individual histograms
## ---------------------------------------------------
## Top-left
dietHist(lw_comb,"yellow perch","spring",brks,xlmt,ylmt,clr,
len.ticks,freq.ticks
)
axis(1,len.ticks,labels=NA,tcl=-0.3,col="gray55")
axis(2,freq.ticks,labels=TRUE,tcl=-0.3,col="gray55",cex.axis=1.1)
## Bottom-left
dietHist(lw_comb,"white perch","spring",brks,xlmt,ylmt,clr,
len.ticks,freq.ticks
)
axis(1,len.ticks,labels=TRUE,tcl=-0.3,col="gray55",cex.axis=1.1)
axis(2,freq.ticks,labels=TRUE,tcl=-0.3,col="gray55",cex.axis=1.1)
## Top-right
dietHist(lw_comb,"yellow perch","fall",brks,xlmt,ylmt,clr,
len.ticks,freq.ticks
)
axis(1,len.ticks,labels=NA,tcl=-0.3,col="gray55")
axis(2,freq.ticks,labels=NA,tcl=-0.3,col="gray55")
## Bottom-right
dietHist(lw_comb,"white perch","fall",brks,xlmt,ylmt,clr,
len.ticks,freq.ticks
)
axis(1,len.ticks,labels=TRUE,tcl=-0.3,col="gray55",cex.axis=1.1)
axis(2,freq.ticks,labels=NA,tcl=-0.3,col="gray55")
## -----------------------------------------------------------
## Clean up environment
## -----------------------------------------------------------
rm(yp_spring_full,wp_spring_full,yp_fall_full,wp_fall_full,
yp_spring_empty,wp_spring_empty,yp_fall_empty,wp_fall_empty,
yp_spring_full_len,wp_spring_full_len,yp_fall_full_len,
wp_fall_full_len,yp_spring_empty_len,wp_spring_empty_len,
yp_fall_empty_len,wp_fall_empty_len,lw_spring,lw_fall,diet_fall,
diet_spring,lw_comb,baseh,basew,brks,clr,freq.ticks,len.ticks,
ncol,nrow,prob,xlmt,ylmt,dietHist)
|
b0f36d7e4f5c5a93a45664150f62514158cfe2aa | 3e692132e9a1372d5c9181fed53871febc547298 | /Bootstrap of wighted variance/analysis result of bootstrap.R | 256eaf248f54c7cc1b38534d5ddfb35b61e8f9ba | [] | no_license | Duanchenyang/WeightedADMM | df00cdaa0fb2df7c17922de9b1e5d65ee01741ca | 97c5d75f8aa1f50dc89605bb682aa7610e3c7e97 | refs/heads/master | 2022-12-15T08:03:19.222045 | 2020-09-08T12:08:18 | 2020-09-08T12:08:18 | 266,042,696 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,569 | r | analysis result of bootstrap.R |
#####calculate the variance of relative abundance of weighted data
varweightq <- seq(0,0.98,0.01)
for(q in 1:length(seq(0,0.98,0.01))){
varianceweight<-rep(1,500)
for (i in 1:500){
print(i)
try_csv <- try(read.csv(paste('~/Desktop/data saver/weighted_ADMM',i,'.csv',sep="")),silent = TRUE)
try_answer<-'try-error' %in% class(try_csv)
if (try_answer==FALSE){
weighted_ADMM <- read.csv(paste('~/Desktop/data saver/weighted_ADMM',i,'.csv',sep=""))
weighted_ADMM1 <- matrix(weighted_ADMM[,2],ncol=12)
B00 <- bsplineS(seq(0,0.98,0.01)[q] ,knots_eq3(simulate_data_new$time, k = order, m = nknots), norder = order)
varianceweight[i]<- (B00%*%weighted_ADMM1)[1,5]
}
else{varianceweight[i]<-NA}
}
varweightq[q]<- var(varianceweight,na.rm = TRUE)
}
#####calculate the variance of relative abundance of unweighted data
varregularq <- seq(0,0.98,0.01)
for(q in 1:length(seq(0,0.98,0.01))){
varianceregular<-rep(1,500)
for (i in 1:500){
print(i)
try_csv <- try(read.csv(paste('~/Desktop/data saver/regular_ADMM',i,'.csv',sep="")),silent = TRUE)
try_answer<-'try-error' %in% class(try_csv)
if (try_answer==FALSE){
weighted_ADMM <- read.csv(paste('~/Desktop/data saver/regular_ADMM',i,'.csv',sep=""))
weighted_ADMM1 <- matrix(weighted_ADMM[,2],ncol=12)
B00 <- bsplineS(seq(0,0.98,0.01)[q],knots_eq3(simulate_data_new$time, k = order, m = nknots), norder = order)
varianceregular[i]<- (B00%*%weighted_ADMM1)[1,5]
}
else{varianceregular[i]<-NA}
}
varregularq[q]<- var(varianceregular,na.rm = TRUE)
}
######plot#######
varq <- seq(0,0.98,0.01)
data.frame(varq,varweightq,varregularq)%>%
ggplot()+
geom_point(aes(x=varq,y=varweightq),col="blue")+
geom_point(aes(x=varq,y=varregularq))
####compare beta variance#######
beta_matrix_wighted <- matrix(nrow=72)
beta_matrix_regular <- matrix(nrow=72)
for (i in 1:500){
try_csv <- try(read.csv(paste('~/Desktop/data saver/regular_ADMM',i,'.csv',sep="")),silent = TRUE)
try_answer<-'try-error' %in% class(try_csv)
if (try_answer==FALSE){
regular_ADMM <- read.csv(paste('~/Desktop/data saver/regular_ADMM',i,'.csv',sep=""))
weighted_ADMM <- read.csv(paste('~/Desktop/data saver/weighted_ADMM',i,'.csv',sep=""))
beta_matrix_wighted<-cbind(beta_matrix_wighted,matrix(weighted_ADMM[,2]))
beta_matrix_regular<- cbind(beta_matrix_regular,matrix(regular_ADMM[,2]))
}
}
###variance of weighted beta
apply(beta_matrix_wighted[,c(-1,-70)],1,var)
###variance of unweighted beta
apply(beta_matrix_regular[,-1],1,var)
|
5dd910636081d73793eda1b2536b43c225013c62 | 4381cdab9ed7922c2cd16fdebf129fcf95487a2d | /plot4.r | 1cd9987757987e51bc41a3fdfaac28789a9790a0 | [] | no_license | aspiringguru/Exploratory-Data-Analysis-Project-2 | d225d30e95eb2ed0203cc4234f62d47f5e1e6e88 | 4ce1c128a58f8b0dacf6383bd7d95e25a66ebe47 | refs/heads/master | 2016-09-06T14:35:23.924717 | 2015-04-10T00:25:53 | 2015-04-10T00:25:53 | 31,008,075 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,321 | r | plot4.r | ##-----------------------------------------------------------------------------------------------
##Across the United States, how have emissions from coal combustion-related sources changed from 1999–2008?
##-----------------------------------------------------------------------------------------------
library(reshape)
library(reshape2)
library(downloader)
library(mgcv)
library(ggplot2) ## needed for qplot
library(lattice)
## set working directory
setwd("G:/2015/coursera/data_science/Exploratory Data Analysis/project2/09-4-15/")
## download zip file & unzip
download("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip", dest="dataset.zip", mode="wb")
unzip ("dataset.zip")
##
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
##length(unique(SCC$Short.Name)) ## [1] 11238
## pseudocode - select SCC$Short.Name with "Coal" or "coal" in name, plot sum of emissions for each year.
SCC$coal <- grepl("[Cc]oal", SCC$Short.Name)
coalSCC <- subset(SCC, coal, select = SCC)
## now select rows from NEI where NEI$SCC is in coalSCC.
NEIcoal <- merge(NEI, coalSCC, by.x="SCC", by.y="SCC", all.x = FALSE, all.y = FALSE)
## quick comparison of data before/after merging.
dim(NEIcoal) ##[1] 53400 6
dim(NEI) ##[1] 6497651 6
dim(coalSCC) ##[1] 239 1
dim(SCC) ##[1] 11717 16
## quick comparison of NEI & NEIcoal incomplete rows (has na values)
NEIcoal[!complete.cases(NEIcoal),]
## [1] SCC fips Pollutant Emissions type year
## <0 rows> (or 0-length row.names)
coalSCC[!complete.cases(coalSCC)]
## data frame with 0 columns and 239 rows
## reusing method from Q1.
NEIcoalMelt <- melt(NEIcoal, id=c("fips", "SCC", "Pollutant", "type", "year"), measure.vars="Emissions")
NEIcoal.year.emissions <- dcast(NEIcoalMelt, year ~ variable, sum)
## now plot, reuse plot method from Q1 and adjust title
## should probably make this a variable
barplot(NEIcoal.year.emissions$Emissions, names = NEIcoal.year.emissions$year,
xlab = "Year", ylab = "Emissions (PM2.5)",
main = "Total PM2.5 emission from coal sources")
## now print plot to file
png("plot4.png")
barplot(NEIcoal.year.emissions$Emissions, names = NEIcoal.year.emissions$year,
xlab = "Year", ylab = "Emissions (PM2.5)",
main = "Total PM2.5 emission from coal sources")
dev.off() |
0d8df158124818c148e7bf054fa111b71cd340e7 | a8f726a887f60405dbc1714d06ef49787d8f6f0c | /3. getting-cleaing-data/week1/readfor.R | 71a169ef7d88e8e4875779c95867a45664ca3157 | [] | no_license | antitov/Data-Science-Specialization-Coursera | e834e7dbccb63fff8fb182e17f079d84ac96b60f | cb44e01e8894684f252a29a2a39fb94b2e6009c8 | refs/heads/master | 2020-12-26T00:19:50.373157 | 2016-11-01T21:03:49 | 2016-11-01T21:03:49 | 33,913,965 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 216 | r | readfor.R |
fileURL <- "https://d396qusza40orc.cloudfront.net/getdata%2Fwksst8110.for"
download.file(fileURL, destfile = "data.for")
sst <- read.fwf("data.for", widths = c(15,4,9,4,9,4,9,4,4), skip = 4)
file.remove("data.for") |
1822f27fe9efca6e040009bb259d439afedf2439 | 530eea15263a914e4bed5e14291fda1b07939232 | /packages/fsParams/man/getMultiCombinations.Rd | 1743b0ea9f75e31549cd7c8bea8a165619466654 | [] | no_license | seventm/fsproj_alzheimeroptimaldiet | cc3dddfcf79f00df5ae62bbdb7c558d6061b2c9e | ba0b3643e13999735bae57f5f2754db08399ff7f | refs/heads/master | 2020-11-23T23:21:05.141878 | 2019-05-24T10:32:20 | 2019-05-24T10:32:20 | 227,861,901 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 381 | rd | getMultiCombinations.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/params.R
\name{getMultiCombinations}
\alias{getMultiCombinations}
\title{Get combination for list of vectors}
\usage{
getMultiCombinations(x)
}
\arguments{
\item{x}{list of vectors}
}
\value{
list with replicated vectors
}
\description{
Get combination for list of vectors
}
\author{
Konrad J. Debski
}
|
26816fdc485870869b313417ffc3199390ec1fe0 | eb99455fad40524d703391d42ccde6eb606dea9c | /code/fig_3.r | 8a6eb073690c12a570900d0b46a4320402807da1 | [
"MIT"
] | permissive | yangxhcaf/traitsyndromes | a61bfe0d5e512a1a3087a0e9db62413c146b42a9 | 41d3d8666a1411187e88a01322f3ec94f2b5eb6f | refs/heads/master | 2022-12-20T00:35:21.500311 | 2020-09-21T14:16:09 | 2020-09-21T14:16:09 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,912 | r | fig_3.r | # Code for reproducing fig 4 on climate change scenario
#library(ggplot2)
#library(reshape2)
#library(gridExtra)
#library(splitstackshape)
#library(extrafont)
#root = "/home/sonia/Dropbox/Shared_SimonBenateau/MarinaProject"
#setwd(root)
source("code/functions.R")
# Where do I save my figures ?
if(!dir.exists("figures/")) dir.create("figures/")
saveFolder <- "figures/"
#extractData() sequentially extracts information from all result files, which takes some time
repDf <- extractData('simresults/constant/results_file/', extH = TRUE)
repDf1 <- subset(repDf$stoDf, env == 0.6 & ho == 6.0)
repDf2 <- ddply(repDf1, .variables = c("c","f"), meanh)
repDf2$loss <- repDf2$countNoNA/repDf2$count*100
parameters <- read.table('simresults/change/parameters.txt', header =TRUE)
#extractData() sequentially extracts information from all result files, which takes some time
repDfC <- extractData('simresults/change/results_file/', extH = TRUE)
#filter environment
repDfC2 <- ddply(subset(repDfC$stoDf, ho == 6.0) , .variables = c("c","f"), meanh)
repDfC2$loss <- repDfC2$countNoNA/repDfC2$count*100
vegetation <- colorRampPalette(c("#D4D4D4", "darkgreen") )
pdf(paste0(saveFolder,"fig3.pdf"), width = 7, height = 12)
#layout(matrix(c(1:12), ncol = 6, byrow = TRUE), widths = c(8,1,8,1,8,1))
layout(matrix(c(1:15), ncol = 3, byrow = TRUE), widths = c(5,5,2))
par(oma = c(2,2,5,1))
marlegend <- c(3,2,5,4)
# --
plotMatrixCF(repDf2,"group", range = c(0,8))
mtext('constant climate', adj = 0, cex = 1.2, at = 0, line = 2.5)
mtext('E = 0.6', adj = 0, cex = 1, at = 0, line = 0.5)
mtext('A', adj = 0, cex = 1.5, at = -0.35, line = -1.2)
mtext(side = 2, line = 3.5, 'local competition')
mtext(side = 2, line = 2, expression( italic(c[l])))
plotMatrixCF(repDfC2,"group", range = c(0,8))
mtext('F', adj = 0, cex = 1.5, at = -0.35, line = -1.2)
mtext('climate change', adj = 0, cex = 1.2, at = 0, line = 2.5)
#mtext(side = 1, line = 2.5, expression(italic(f)))
plot_colscale(repDf2$group,
colpal = rev(heat.colors(8)), range = c(0,8), mar = marlegend)
mtext('number of \n ecotypes', adj = 0, cex = 0.8, at = 0, line = 1)
# --
plotMatrixCF(repDf2,"h", rainbow(32), range = c(0, 15))
mtext('B', adj = 0, cex = 1.5, at = -0.35, line = -1.2)
mtext(side = 2, line = 3.5, 'local competition')
mtext(side = 2, line = 2, expression( italic(c[l])))
plotMatrixCF(repDfC2,"h", rainbow(32), range = c(0, 15))
mtext('G', adj = 0, cex = 1.5, at = -0.35, line = -1.2)
plot_colscale(repDf2$h,
colpal = rainbow(32), range = c(0,15),mar = marlegend)
mtext('average \n trait value', adj = 0, cex = 0.8, at = 0, line = 1)
# --
plotMatrixCF(repDf2,"loss", range = c(0,100), gray.colors(10))
mtext('C', adj = 0, cex = 1.5, at = -0.35, line = -1.2)
mtext(side = 2, line = 3.5, 'local competition')
mtext(side = 2, line = 2, expression( italic(c[l])))
plotMatrixCF(repDfC2,"loss", range = c(0,100), gray.colors(10))
mtext('H', adj = 0, cex = 1.5, at = -0.35, line = -1.2)
plot_colscale(repDfC2$loss,
colpal = (gray.colors(10)), at = c(0,25,50,75), labels = c('0%', '25%', '50%', '75%'), mar = marlegend)
mtext('stability of \n vegetation', adj = 0, cex = 0.8, at = 0, line = 1)
# --
repDf2$clus_cutoff <- repDf2$clus
repDf2$clus_cutoff[repDf2$clus >= 1.2 & !is.na(repDf2$clus)] <- 1.1999
repDfC2$clus_cutoff <- repDfC2$clus
repDfC2$clus_cutoff[repDfC2$clus >= 1.2 & !is.na(repDfC2$clus)] <- 1.1999
plotMatrixCF(repDf2,"clus_cutoff", rev(heat.colors(6)),steps = 6, range = c(0.9,1.2))
mtext('D', adj = 0, cex = 1.5, at = -0.35, line = -1.2)
mtext(side = 2, line = 3.5, 'local competition')
mtext(side = 2, line = 2, expression( italic(c[l])))
plotMatrixCF(repDfC2,"clus_cutoff",rev(heat.colors(6)), steps = 6, range = c(0.9,1.2))
mtext('I', adj = 0, cex = 1.5, at = -0.35, line = -1.2)
plot_colscale(repDf2$clus_cutoff,
colpal = rev(heat.colors(6)), range = c(0.9,1.2), mar = marlegend, labels = c("0.9","1.0", "1.1", expression("">=1.2)), at = c(0.9,1,1.1,1.2))
mtext('average \n clustering', adj = 0, cex = 0.8, at = 0, line = 1)
# --
plotMatrixCF(repDf2,"rho", vegetation(5), steps = 5, range = c(0, 1))
mtext('E', adj = 0, cex = 1.5, at = -0.35, line = -1.2)
mtext(side = 2, line = 3.5, 'local competition')
mtext(side = 2, line = 2, expression( italic(c[l])))
mtext(side = 1, line = 2, expression(italic(f)))
mtext(side = 1, line = 3.5, 'local facilitation')
plotMatrixCF(repDfC2,"rho", vegetation(5), steps = 5, range = c(0, 1))
mtext('J', adj = 0, cex = 1.5, at = -0.35, line = -1.2)
mtext(side = 1, line = 2, expression(italic(f)))
mtext(side = 1, line = 3.5, 'local facilitation')
plot_colscale(repDfC2$h,
colpal = vegetation(5), range = c(0,1),mar = marlegend, labels = c('0%', '20%', '40%', '60%', '80%', '100%'), at = seq(0,1,0.2))
mtext('average \n veget. cover', adj = 0, cex = 0.8, at = 0, line = 1)
dev.off()
|
2d393be35ad38cbcdd6b5917d957d796e982c062 | 29ce175254b27a361714074b99fe5644ca2c3158 | /man/links.delaunay.Rd | baaa52c4a1f716450683dddc62b5b72c1688c87f | [] | no_license | raz1/Momocs | 148903d3f05428e72d7cef8ede46c0bba7f80f04 | 09a817bb0720d87969d48dd9e0f16516e042e13e | refs/heads/master | 2021-01-17T18:09:55.724182 | 2014-08-15T14:49:17 | 2014-08-15T14:49:17 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 625 | rd | links.delaunay.Rd | % Generated by roxygen2 (4.0.0): do not edit by hand
\name{links.delaunay}
\alias{links.delaunay}
\title{Create links (Delaunay triangulation) between landmarks}
\usage{
links.delaunay(coo)
}
\arguments{
\item{coo}{a matrix (or a list) of (x; y) coordinates}
}
\value{
a matrix that can be passed to \link{ldk.links}, etc. The columns
are the row ids of the original shape.
}
\description{
Create links (Delaunay triangulation) between landmarks
}
\details{
uses \link{delaunayn} in the \code{geometry} package.
}
\examples{
data(wings)
w <- wings[1]
coo.plot(w, poly=FALSE)
links <- links.delaunay(w)
ldk.links(w, links)
}
|
2555483b2f01c788c44112f92e8da311cf786f8c | d062ed2119a3cee565eaf62931b6b963ac1f550d | /R/Dataset_data_task.R | f810c690430a06174cac183fd92fbea59f3db464 | [] | no_license | FredHutch/JDRFCAV | b06927fad7f9e83823689f8af1ed17857de68418 | 905178ca1d4bbf11365c894e229fec7accf8170b | refs/heads/master | 2020-04-11T10:09:42.301715 | 2019-06-28T22:24:56 | 2019-06-28T22:24:56 | 161,705,304 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 301 | r | Dataset_data_task.R | #' Dataset, Multi assay data in mlr's task format
#'
#' mlr's task (NO functionals implemented YET), therefore, use sep to at analyte name to exatract assay annotation
#'
#' @format mlr's task
#' \describe{
#' \item{data_task}{task}
#' ...
#' }
#' @source JDRF multi center efforts
"data_task"
|
4b210bf13ebbd7448d2a3e6140048e971c5d86a6 | 58f46e3a93ef5369ad8dc73ea3b21db829f1dcac | /man/cs_get_cert.Rd | b6ea4417d2543b36a5379a20d9bfb2d2a6b105a4 | [] | no_license | firebitsbr/sslsaran | 4702023b7eae80a60e95e8021aa7ee67b488e332 | 059dce4fb35e18596642216e05895ca3e18f9e6c | refs/heads/master | 2020-04-07T21:23:27.975802 | 2018-02-20T17:51:43 | 2018-02-20T17:51:43 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 968 | rd | cs_get_cert.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cs-get-cert.r
\name{cs_get_cert}
\alias{cs_get_cert}
\title{Get Certificate Object}
\usage{
cs_get_cert(sha256, as = c("df", "pem", "der"),
sslmate_api_key = Sys.getenv("SSLMATE_API_KEY"))
}
\arguments{
\item{sha256}{hex-encoded SHA-256 digest of the (pre-)certificate.}
\item{as}{determines the return type. \code{df} returns a data frame, \code{pem} returns
an X.509 cert in PEM format (\code{raw}), \code{der} returns a DER encoded X.509 cert
that is decoded with \code{\link[openssl:read_cert]{openssl::read_cert()}}.}
\item{sslmate_api_key}{(optional) See \code{Note}.}
}
\description{
Uses the \href{https://sslmate.com/certspotter/api}{sslmate Cert Spotter API} to
retrieve the certificate object for a given SHA256 hash.
}
\note{
Unauthenticated access to the API may be subject to rate limits.
Get an API key \href{https://sslmate.com/account/api_credentials?login=1}{here}.
}
|
785d83018bfeac06bad2d8d0870c0ab8d36728fe | 843604a4d2f1a4eb21f576f098ec3c8c3be096c6 | /R/61_model_checks_coefs.R | 2f0f1207facbaac761cc2e1efa3591e0e749af2f | [] | no_license | bmait101/swass | bdc8bc27db591cf379219a97f7fe815157af000d | faeaaddcd3774ff207b520712317be9615777510 | refs/heads/main | 2023-04-18T21:52:32.101132 | 2023-02-07T16:42:55 | 2023-02-07T16:42:55 | 476,374,093 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,625 | r | 61_model_checks_coefs.R |
theme_clean <- function() {
theme_minimal(base_family = "sans", base_size = 12) +
theme(
plot.background = element_rect(fill = "white", color = NA),
panel.background = element_rect(fill = "white", color = "black",
size = .5),
panel.border = element_rect(fill = NA, color = "black", size = .5),
panel.grid.major.x = element_blank(),
panel.grid.minor.x = element_blank(),
panel.grid.major.y = element_line(linetype = 2, size = 0.25, color = "grey"),
panel.grid.minor.y = element_blank(),
panel.spacing = unit(.5, "lines"),
axis.ticks = element_line(size = 0.5, color = "black"),
axis.ticks.length = unit(.25, 'cm'),
strip.text = element_blank(),
strip.background = element_blank(),
legend.margin = margin(0,0,0,0,'cm'),
legend.position = "none"
)
}
theme_set(theme_clean())
theme_set(theme_bw())
# Prep =================
vars_f <- tibble(
params = factor(vars, levels = c(
"b_mean.tmax_summer",
"b_mean.tmax_autumn",
"b_mean.tmax_winter",
"b_mean.tmax_spring",
"b_Imean.tmax_springE2",
"b_total.prcp_summer",
"b_total.prcp_autumn",
"b_total.prcp_winter",
"b_total.prcp_spring",
"b_Itotal.prcp_springE2"
)))
vars2_f <- tibble(
params = factor(vars2, levels = c(
"b_mean.tmax_summer:latitude_s",
"b_mean.tmax_autumn:latitude_s",
"b_mean.tmax_winter:latitude_s",
"b_mean.tmax_spring:latitude_s",
"b_Imean.tmax_springE2:latitude_s",
"b_total.prcp_summer:latitude_s",
"b_total.prcp_autumn:latitude_s",
"b_total.prcp_winter:latitude_s",
"b_total.prcp_spring:latitude_s",
"b_Itotal.prcp_springE2:latitude_s"
)))
# Extract Coefficients and posterior samples/draws
draws.b.bkt <- bkt.mod %>% gather_draws(`b_.*`, regex = TRUE)
draws.b.bnt <- bnt.mod %>% gather_draws(`b_.*`, regex = TRUE)
# draws.r.bkt <- bkt.mod %>% gather_draws(r_reach_id[reachid,covar])
# draws.r.bnt <- bnt.mod %>% gather_draws(r_reach_id[reachid,covar])
# draws.sd.bkt <- bkt.mod %>% gather_draws(`sd_.*`, regex = TRUE)
# draws.sd.bnt <- bnt.mod %>% gather_draws(`sd_.*`, regex = TRUE)
# draws.sds.bkt <- bkt.mod %>% gather_draws(`sds_.*`, regex = TRUE)
# draws.sds.bnt <- bnt.mod %>% gather_draws(`sds_.*`, regex = TRUE)
# draws.shape.bkt <- bkt.mod %>% gather_draws(`shape.*`, regex = TRUE)
# draws.shape.bnt <- bnt.mod %>% gather_draws(`shape.*`, regex = TRUE)
# Hypothesis tests
bkt.hyp.post <- c(
hypothesis(bkt.mod, 'mean.tmax_summer<0')$hypothesis[[7]],
hypothesis(bkt.mod, 'mean.tmax_autumn>0')$hypothesis[[7]],
hypothesis(bkt.mod, 'mean.tmax_winter<0')$hypothesis[[7]],
hypothesis(bkt.mod, 'mean.tmax_spring<0')$hypothesis[[7]],
hypothesis(bkt.mod, 'Imean.tmax_springE2<0')$hypothesis[[7]],
hypothesis(bkt.mod, 'total.prcp_summer>0')$hypothesis[[7]],
hypothesis(bkt.mod, 'total.prcp_autumn<0')$hypothesis[[7]],
hypothesis(bkt.mod, 'total.prcp_winter<0')$hypothesis[[7]],
hypothesis(bkt.mod, 'total.prcp_spring<0')$hypothesis[[7]],
hypothesis(bkt.mod, 'Itotal.prcp_springE2<0')$hypothesis[[7]]
)
bkt.hyp.post2 <- c(
hypothesis(bkt.mod, 'mean.tmax_summer:latitude_s>0')$hypothesis[[7]],
hypothesis(bkt.mod, 'mean.tmax_autumn:latitude_s<0')$hypothesis[[7]],
hypothesis(bkt.mod, 'mean.tmax_winter:latitude_s>0')$hypothesis[[7]],
hypothesis(bkt.mod, 'mean.tmax_spring:latitude_s<0')$hypothesis[[7]],
hypothesis(bkt.mod, 'Imean.tmax_springE2:latitude_s<0')$hypothesis[[7]],
hypothesis(bkt.mod, 'total.prcp_summer:latitude_s>0')$hypothesis[[7]],
hypothesis(bkt.mod, 'total.prcp_autumn:latitude_s<0')$hypothesis[[7]],
hypothesis(bkt.mod, 'total.prcp_winter:latitude_s>0')$hypothesis[[7]],
hypothesis(bkt.mod, 'total.prcp_spring:latitude_s>0')$hypothesis[[7]],
hypothesis(bkt.mod, 'Itotal.prcp_springE2:latitude_s>0')$hypothesis[[7]]
)
# Hypothesis tests
bnt.hyp.post <- c(
hypothesis(bnt.mod, 'mean.tmax_summer>0')$hypothesis[[7]],
hypothesis(bnt.mod, 'mean.tmax_autumn>0')$hypothesis[[7]],
hypothesis(bnt.mod, 'mean.tmax_winter<0')$hypothesis[[7]],
hypothesis(bnt.mod, 'mean.tmax_spring>0')$hypothesis[[7]],
hypothesis(bnt.mod, 'Imean.tmax_springE2<0')$hypothesis[[7]],
hypothesis(bnt.mod, 'total.prcp_summer>0')$hypothesis[[7]],
hypothesis(bnt.mod, 'total.prcp_autumn<0')$hypothesis[[7]],
hypothesis(bnt.mod, 'total.prcp_winter<0')$hypothesis[[7]],
hypothesis(bnt.mod, 'total.prcp_spring<0')$hypothesis[[7]],
hypothesis(bnt.mod, 'Itotal.prcp_springE2<0')$hypothesis[[7]]
)
bnt.hyp.post2 <- c(
hypothesis(bnt.mod, 'mean.tmax_summer:latitude_s<0')$hypothesis[[7]],
hypothesis(bnt.mod, 'mean.tmax_autumn:latitude_s<0')$hypothesis[[7]],
hypothesis(bnt.mod, 'mean.tmax_winter:latitude_s>0')$hypothesis[[7]],
hypothesis(bnt.mod, 'mean.tmax_spring:latitude_s<0')$hypothesis[[7]],
hypothesis(bnt.mod, 'Imean.tmax_springE2:latitude_s<0')$hypothesis[[7]],
hypothesis(bnt.mod, 'total.prcp_summer:latitude_s<0')$hypothesis[[7]],
hypothesis(bnt.mod, 'total.prcp_autumn:latitude_s<0')$hypothesis[[7]],
hypothesis(bnt.mod, 'total.prcp_winter:latitude_s<0')$hypothesis[[7]],
hypothesis(bnt.mod, 'total.prcp_spring:latitude_s<0')$hypothesis[[7]],
hypothesis(bnt.mod, 'Itotal.prcp_springE2:latitude_s>0')$hypothesis[[7]]
)
hyp.post <- tibble(
params = rep(vars_f$params, 2),
species = c(rep("brook_trout",10),rep("brown_trout",10)),
hyp = c(bkt.hyp.post, bnt.hyp.post)
)
hyp.post2 <- tibble(
params = rep(vars2_f$params, 2),
species = c(rep("brook_trout",10),rep("brown_trout",10)),
hyp = c(bkt.hyp.post2, bnt.hyp.post2)
)
# Plot ==========
p.coefs <- draws.b.bkt %>%
mutate(species = "brook_trout") %>%
bind_rows(draws.b.bnt %>% mutate(species = "brown_trout")) %>%
filter(.variable %in% vars) %>%
group_by(species, .variable) %>%
mean_qi(mean = .value, .width = c(.8)) %>%
left_join(hyp.post, by = c(".variable"="params","species")) %>%
mutate(.variable = factor(
.variable,
levels = levels(vars_f$params),
labels = c(
'Summer max temperature',
'Autumn max temperature',
'Winter max temperature',
'Spring max temperature',
'Spring max temperature^2',
'Summer precipitation',
'Autumn precipitation',
'Winter precipitation',
'Spring precipitation',
'Spring precipitation^2'
))) %>%
ggplot(aes(y = .variable, x = mean, xmin = .lower, xmax = .upper)) +
facet_wrap(vars(species)) +
geom_pointinterval(aes(fill = hyp), shape = 21, size = 5) +
geom_vline(xintercept = 0, color = "black", size = .5) +
scale_fill_gradient(low = "white", high = "red") +
scale_y_discrete(limits=rev) +
labs(y = "", x = "Effect size")
p.coefs
p.coefs.int <- draws.b.bkt %>%
mutate(species = "brook_trout") %>%
bind_rows(draws.b.bnt %>% mutate(species = "brown_trout")) %>%
filter(.variable %in% vars2) %>%
group_by(species, .variable) %>%
mean_qi(mean = .value, .width = c(.8)) %>%
mutate(.variable = factor(.variable, levels = levels(vars2_f$params))) %>%
left_join(hyp.post2, by = c(".variable"="params","species")) %>%
mutate(.variable = factor(
.variable,
levels = levels(vars2_f$params),
labels = c(
'Summer temperature:latitude_s',
'Autumn temperature:latitude_s',
'Winter temperature:latitude_s',
'Spring temperature:latitude_s',
'Spring temperature^2:latitude_s',
'Summer precipitation:latitude_s',
'Autumn precipitation:latitude_s',
'Winter precipitation:latitude_s',
'Spring precipitation:latitude_s',
'Spring precipitation^2:latitude_s'
))) %>%
ggplot(aes(y = .variable, x = mean, xmin = .lower, xmax = .upper)) +
facet_wrap(vars(species)) +
geom_pointinterval(aes(fill = hyp), shape = 21, size = 5) +
geom_vline(xintercept = 0, color = "black", size = .5) +
scale_fill_gradient(low = "white", high = "red") +
scale_y_discrete(limits=rev) +
labs(y = "", x = "Effect size")
p.coefs.int
p.coef.panel <- p.coefs / p.coefs.int
p.coef.panel
ggsave(here("output","figs","brms_coef_plot.png"),
device=ragg::agg_png, res=300, height = 6, width = 8)
ggsave(here("output","figs","brms_coef_plot.pdf"),
device=cairo_pdf, height = 6, width = 8)
# save plot
path <- here::here("output","figs1","fig3_coef_plot")
ggsave(
glue::glue("{path}.pdf"),
plot = p.coef.panel,
width = 8,
height = 6,
device = cairo_pdf
)
# manually add fish images then covert
pdftools::pdf_convert(
pdf = glue::glue("{path}.pdf"),
filenames = glue::glue("{path}.png"),
format = "png",
dpi = 600
)
|
9b649a2dbc759186057cf73d74ad1bf7ba97ed82 | b29ab6b419491c455d362b370c2ee54e551c3dbd | /Quadrat_Analysis_example.R | 44213b149546d3d945bae90c9f3fce635d616e5d | [] | no_license | Junkai0727/GIS | 7218b62ca49f6a523615a67dcc338229b9a60c27 | c8ea36c14752fe18bea472cf94d78c2761c5cba7 | refs/heads/main | 2023-02-16T05:06:44.842971 | 2021-01-09T12:43:48 | 2021-01-09T12:43:48 | 326,631,828 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,920 | r | Quadrat_Analysis_example.R | library(spatstat)
library(here)
library(sp)
library(rgeos)
library(maptools)
library(GISTools)
library(tmap)
library(sf)
library(geojson)
library(geojsonio)
library(tmaptools)
LondonBoroughs <- st_read(here::here("DATA", "statistical-gis-boundaries-london", "ESRI", "London_Borough_Excluding_MHW.shp"))
library(stringr)
BoroughMap <- LondonBoroughs %>%
dplyr::filter(str_detect(GSS_CODE, "^E09"))%>%
st_transform(., 27700)
qtm(BoroughMap)
summary(BoroughMap)
BluePlaques <- st_read("https://s3.eu-west-2.amazonaws.com/openplaques/open-plaques-london-2018-04-08.geojson")
BluePlaques <- st_read(here::here("DATA",
"open-plaques-london-2018-04-08.geojson")) %>%
st_transform(.,27700)
summary(BluePlaques)
tmap_mode("view")
tm_shape(BoroughMap) +
tm_polygons(col = NA, alpha = 0.5) +
tm_shape(BluePlaques) +
tm_dots(col = "blue")
library(tidyverse)
library(sf)
BluePlaques <- distinct(BluePlaques)
BluePlaquesSub <- BluePlaques[BoroughMap,]
#check to see that they've been removed
tmap_mode("view")
tm_shape(BoroughMap) +
tm_polygons(col = NA, alpha = 0.5) +
tm_shape(BluePlaquesSub) +
tm_dots(col = "blue")
Harrow <- BoroughMap %>%
filter(., NAME=="Harrow")
#Check to see that the correct borough has been pulled out
tm_shape(Harrow) +
tm_polygons(col = NA, alpha = 0.5)
BluePlaquesSub <- BluePlaques[Harrow,]
#check that it's worked
tmap_mode("view")
tm_shape(Harrow) +
tm_polygons(col = NA, alpha = 0.5) +
tm_shape(BluePlaquesSub) +
tm_dots(col = "blue")
#now set a window as the borough boundary
window <- as.owin(Harrow)
plot(window)
#create a ppp object
BluePlaquesSub<- BluePlaquesSub %>%
as(., 'Spatial')
BluePlaquesSub.ppp <- ppp(x=BluePlaquesSub@coords[,1],
y=BluePlaquesSub@coords[,2],
window=window)
BluePlaquesSub@coords[,1]
BluePlaquesSub.ppp %>%
plot(.,pch=16,cex=0.5,
main="Blue Plaques Harrow")
BluePlaquesSub.ppp %>%
density(., sigma=1000) %>%
plot()
#First plot the points
plot(BluePlaquesSub.ppp,
pch=16,
cex=0.5,
main="Blue Plaques in Harrow")
#now count the points in that fall in a 6 x 6
#grid overlaid across the windowBluePlaquesSub.ppp2<-BluePlaquesSub.ppp %>%
BluePlaquesSub.ppp %>%
quadratcount(.,nx = 6, ny = 6)%>%
plot(., add=T, col="red")
#run the quadrat count
Qcount <- BluePlaquesSub.ppp %>%
quadratcount(.,nx = 6, ny = 6) %>%
as.data.frame() %>%
dplyr::count(Var1=Freq)%>%
dplyr::rename(Freqquadratcount=n)
Qcount %>%
summarise_all(class)
sums <- Qcount %>%
#calculate the total blue plaques (Var * Freq)
mutate(total = Var1 * Freqquadratcount) %>%
dplyr::summarise(across(everything(), sum))%>%
dplyr::select(-Var1)
lambda<- Qcount%>%
#calculate lambda
mutate(total = Var1 * Freqquadratcount)%>%
dplyr::summarise(across(everything(), sum)) %>%
mutate(lambda=total/Freqquadratcount) %>%
dplyr::select(lambda)%>%
pull(lambda)
QCountTable <- Qcount %>%
mutate(Pr=((lambda^Var1)*exp(-lambda))/factorial(Var1))%>%
#now calculate the expected counts based on our total number of plaques
#and save them to the table
mutate(Expected= (round(Pr * sums$Freqquadratcount, 0)))
#Compare the frequency distributions of the observed and expected point patterns
plot(c(1,10),c(0,14), type="n",
xlab="Number of Blue Plaques (Red=Observed,Blue=Expected)",
ylab="Frequency of Occurances")
points(QCountTable$Freqquadratcount,
col="Red",
type="o",
lwd=3)
points(QCountTable$Expected,
col="Blue",
type="o",
lwd=3)
teststats <- quadrat.test(BluePlaquesSub.ppp, nx = 6, ny = 6)
plot(BluePlaquesSub.ppp,pch=16,cex=0.5, main="Blue Plaques in Harrow")
plot(teststats, add=T, col = "red")
|
1a5d85718a255760729e31ba016d82a92fad43b8 | d4288dc5baf4ca0f59fe414f55b1ad7a442abc7e | /man/plot_captured_demog.Rd | 41cdb3de887e5bfe7b96886cca5c7fb2a8a40681 | [] | no_license | courtiol/SileR | c9c3d3e091d207c685bb6729ceb6aaf8fdb7207e | e914b6090aba8a8eacad5feec0839d8a52625035 | refs/heads/master | 2021-06-03T17:23:15.030331 | 2020-05-09T12:54:34 | 2020-05-09T12:54:34 | 130,906,264 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 635 | rd | plot_captured_demog.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/figures.R
\name{plot_captured_demog}
\alias{plot_captured_demog}
\title{Plot demographic information for the captured elephants}
\usage{
plot_captured_demog(data, save_pdf = FALSE)
}
\arguments{
\item{data}{The dataset to be used}
\item{save_pdf}{A boolean indicating whether saving the plot as a *.pdf object or not}
}
\value{
Nothing, the function only plots.
}
\description{
This is a main function of this package. It plots information about when
elephants have been captured.
}
\examples{
plot_captured_demog(ElesCaptured)
}
\seealso{
ElesCaptured
}
|
3f993bc6762c7f20f11b06390d8248dd9afeaf9f | 34044ff876022bb64b70b5d73aa3e3545f8ca9f1 | /.Rprofile | b4eda612c67525919ec4e04fe0b5b0fd8f500b3c | [] | no_license | popgenomics/configFiles | 8aaf66edcce63bc7e6dddc666ab97929004840c5 | 54dbe2198af6cb6c195b99b6b813b1800fad83c4 | refs/heads/master | 2021-01-17T05:06:47.472557 | 2019-03-22T14:32:40 | 2019-03-22T14:32:40 | 58,917,186 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 28,978 | rprofile | .Rprofile | library(colorout)
setOutputColors256(202, 214, 209, 184, 172, 179, verbose=FALSE)
options(width=160)
library(txtplot)
library(NostalgiR)
mode=function(x){
dst=density(x)
return(dst$x[which(dst$y==max(dst$y))])
}
HPD=function(x,P){
#x=vecteur
#P=90 si on veut 90% de la distribution
return(x[which(x>quantile(x,(100-P)/200) & x<quantile(x, 1-(100-P)/200))])
}
source("~/Programmes/R/twobar.R")
#fonction pour mieux visualiser une relation avec un très grand nombre de points
plotbin=function(a, b, quantiles=5, nameA=NULL, nameB=NULL, main="", span=0.5, cex=1, cex.lab=1, ylim=c(0,1), xlim=c(min(a), max(a)), lwd=1, cex.axis=1){
#a=expression ; b=Fop
tmp.a=na.omit(a)
tmp.a=na.action(tmp.a)
tmp.b=na.omit(b)
tmp.b=na.action(tmp.b)
tmp=union(as.vector(tmp.a), as.vector(tmp.b))
if(length(tmp)>0){
a=a[-tmp]
b=b[-tmp]
}
L=length(a)
binsize=round(L*quantiles/100)
val=L
while(val%%binsize!=0){
val=val-1
}
bins=seq(from=L-val, to=L, by=binsize)
cov.ordered=order(a)
res.fop=res.cov=sd.fop=NULL
for(i in 1:(length(bins)-1)){
sub.tmp=cbind(a,b)[cov.ordered,][(bins[i]:(bins[i+1]-1)),]
res.cov=c(res.cov, mean(as.numeric(sub.tmp[,1]), na.rm=T))
res.fop=c(res.fop, mean(as.numeric(sub.tmp[,2]), na.rm=T))
sd.fop=c(sd.fop, sd(as.numeric(sub.tmp[,2]), na.rm=T))
}
# plot(res.cov, res.fop,xlab="expression", ylab=expression(F["op"]), pch=16, col="black")
# plot(res.cov, res.fop,xlab=nameA, ylab=nameB, main=main, pch=16, col="black", ylim=c(c(min(res.fop, sd.fop)), max(c(res.fop, sd.fop))))
# plot(res.cov, res.fop,xlab=nameA, ylab=nameB, cex.lab=cex.lab, main=main, pch=16, col="black", ylim=c(min(res.fop)-max(sd.fop),max(res.fop)+max(sd.fop)), cex=cex) #THE GOOD
plot(res.cov, res.fop,xlab=nameA, ylab=nameB, main=main, pch=16, col="black", ylim=ylim, xlim=xlim, cex=cex, cex.lab=cex.lab, cex.axis=cex.axis)
for(i in 1:(length(bins)-1)){
segments(res.cov[i], res.fop[i]+sd.fop[i], res.cov[i], res.fop[i]-sd.fop[i])
}
mod=loess(res.fop~res.cov, span=span)
lines(res.cov, fitted(mod), col="red", lwd=lwd)
}
#fonction image avec couleur spéciale pour les cases "nan"
image.nan=function(z, zlim, col, na.color='gray', outside.below.color='black', outside.above.color='white',...)
{
zstep <- (zlim[2] - zlim[1]) / length(col); # step in the color palette
newz.below.outside <- zlim[1] - 2 * zstep # new z for values below zlim
newz.above.outside <- zlim[2] + zstep # new z for values above zlim
newz.na <- zlim[2] + 2 * zstep # new z for NA
z[which(z<zlim[1])] <- newz.below.outside # we affect newz.below.outside
z[which(z>zlim[2])] <- newz.above.outside # we affect newz.above.outside
z[which(is.na(z>zlim[2]))] <- newz.na # same for newz.na
zlim[1] <- zlim[1] - 2 * zstep # extend lower limit to include below value
zlim[2] <- zlim[2] + 2 * zstep # extend top limit to include the two new values above and na
col <- c(outside.below.color, col[1], col, outside.above.color, na.color) #correct by including col[1] at bottom of range
image(z=z, zlim=zlim, col=col, ...) # we finally call image(...)
}
pvalue=function(obs=x, ntrl=a){
#calcule la pvalue de la valeur observée "x" par rapport à la distribution "a"
if(obs<median(ntrl)){
return(length(which(ntrl<obs))/length(ntrl))
}else{
return(length(which(ntrl>obs))/length(ntrl))
}
}
newSample=function(data, size, weights){
#échantillonne dans un vecteur "data" un nombre n=size éléments (avec remise), en tenant compte des poids contenus dans weights
#exemple: newSample(data=letters[1:10], size=5, weights=1:10)
tirage=rmultinom(1, size, weights)
return(rep(data, tirage))
}
fst=function(nDemes, nIndPerDem, extinction, nRecolonizer, migration){
numQ = 1/(2*nIndPerDem) + extinction/(2 * nRecolonizer) - extinction/(2 * nRecolonizer * 2 * nIndPerDem)
denomQ = 1- (1-1/(2*nIndPerDem)) * ((1-migration)^2 * (1-extinction) + extinction * (1-1/(2 * nRecolonizer)) * 1/(2*nRecolonizer - 1))
# phi = 1/(2*nRecolonizer - 1)
#numQ = (1 - extinction) * (1/(2*nIndPerDem)) + extinction / (2 * nRecolonizer)
#denomQ = 1 - (1 - extinction) * ( 1 - migration)^2 * (1 - 1/(2 * nIndPerDem)) - extinction * 1/(2*nRecolonizer - 1) * (1-1/(2 * nRecolonizer))
QR = numQ/denomQ
return((QR-1/(2*nIndPerDem))*(2*nIndPerDem)/(2*nIndPerDem-1))
}
testFst=function(x, e){
y = x[which(x$extRate == e), ]
maxMig = max(y$migRate)
plot(y$migRate, y$Fwc_ST, main=paste("ext.rate = ", e, sep=""), xlab = "N.m", ylab = expression(F["ST"]), pch=16, cex=1.25, ylim = c(0, 1))
lines((0:(10*maxMig))/10, fst(y$nDemes[1], y$nInd[1], e, y$recolonization[1], (0:(10*maxMig))/10/maxMig), col="red")
}
twoden = function(vec1,vec2,xbks="auto",ybks="auto",space=1,L=0,xl="",yl="",mn="",lowx="",hix="",lowy="",hiy="",limy=0,hpoint=F,hcol="white",leg=T,return.data=F){
total=length(vec1);
if(xbks=="auto"){
xs=hist(vec1,plot=F)$breaks
bklongx=space*length(xs)
xs=hist(vec1,plot=F,breaks=bklongx)$breaks
}
else {
xs=xbks;
}
if(ybks=="auto"){
ys=hist(vec2,plot=F)$breaks
bklongy=space*length(ys)
ys=hist(vec2,plot=F,breaks=bklongy)$breaks
}
else {
ys=ybks;
}
comb=data.frame(vec1,vec2);
library(grDevices);
c=matrix(0,(length(xs)-1),(length(ys)-1))
for( i in 1:(length(xs)-1)){
for( j in 1:(length(ys)-1)){
ay=subset(comb,comb$vec1>=xs[i]);
bee=subset(ay,ay$vec1<xs[i+1]);
cee=subset(bee,bee$vec2>=ys[j]);
d=subset(cee,cee$vec2<ys[j+1])
c[i,j]=length(d$vec2)/total;
}
}
if(leg==T){
layout(matrix(c(1,2),2,2,byrow=TRUE), c(3.75,.5), TRUE);
par(mar=c(6,5,1.5,1.5));
}
if(lowy==""){ lowy=ys[1] }
if(lowx==""){ lowx=xs[1] }
if(hix==""){ hix=xs[length(xs)] }
if(hiy==""){ hiy=ys[length(ys)] }
plot(vec1[1]~vec2[1],col="white",xlim=c(lowx,hix),ylim=c(lowy,hiy),xlab=xl,ylab=yl,main=mn)
for( i in 1:(length(xs)-1)){
for( j in 1:(length(ys)-1)){
den=c[i,j]/max(c);
rect(xs[i],ys[j],xs[i+1],ys[j+1],border=rgb(red=0,blue=0,green=0,alpha=L),col=(rgb(red=0,blue=0,green=0,alpha=den)))
if(den==1 && hpoint==T){ points((xs[i+1]+xs[i])/2,(ys[j+1]+ys[j])/2,pch=19,col=hcol); }
}
}
if(leg==T){
#empty plot
par(mar=c(8,0,6,4));
plot(0:10/10,0:10/10,ylim=c(0,1),xlab="",xaxt="n",cex.main=0.5,yaxt="n",ylab="", cex=0,cex.lab=1);
#draw gradient
for(i in 1:99){ rect(0,(i-1)/100,1,(i+2)/100,lwd=0,lty=0, col=rgb(red=0,blue=0,green=0,alpha=i/100)) };
axis(side=4,at=0:10/10,cex.axis=0.8);
text(4,0.5, srt = 90, labels = "relative density", xpd = TRUE) ;
rect(0,0.99,1,1.05,col=rgb(red=0,blue=0,green=0,alpha=1),lty=0, lwd=0)
}
if(return.data==T){ return(xs,ys,c); }
}
plot_quantiSex=function(x, y){
# x = path to the quantiSex output file
# y = number of generations to display
x=read.table(x,h=T)
layout(matrix(1:2, ncol=2), width=c(2/3, 1/3))
par(mar=c(5,4,4,0), las=1)
if(x$sexSystem[1] == 0){sex="\nherma only\n"}
if(x$sexSystem[1] == 1){sex="\nandrodioecy\n"}
if(x$sexSystem[1] == 2){sex="\ngynodioecy\n"}
plot(x$atGeneration, x$meanFemAllocCosexual, xlim=c(0, y), type="l", ylim=c(0,1), lwd=2, xlab="Generation", ylab="frequencies", cex.lab=1.1, main=paste("M=", x$migRate[1], "\tE=", x$extRate[1], "\tk=", x$recolonization[1], sex, "unisex advantage=", x$sexAvantage[1], sep=""))
lines(x$atGeneration, x$cosexualProportion, col="red", lwd=1.5)
abline(h=0.5, lty=2)
plot.new()
par(mar=c(5,0,4,2))
legend("left", col=c("black", "red"), c("% Fem. alloc.\nin cosexuals" ,"% of cosexuals"), lty=1, bty="n", lwd=c(2,2), inset=-0.5)
}
image.scale <- function(z, zlim, col = heat.colors(12), breaks, horiz=TRUE, ylim=NULL, xlim=NULL, ...){
if(!missing(breaks)){
if(length(breaks) != (length(col)+1)){
stop("must have one more break than colour")
}
}
if(missing(breaks) & !missing(zlim)){
breaks <- seq(zlim[1], zlim[2], length.out=(length(col)+1))
}
if(missing(breaks) & missing(zlim)){
zlim <- range(z, na.rm=TRUE)
zlim[2] <- zlim[2]+c(zlim[2]-zlim[1])*(1E-3)#adds a bit to the range in both directions
zlim[1] <- zlim[1]-c(zlim[2]-zlim[1])*(1E-3)
breaks <- seq(zlim[1], zlim[2], length.out=(length(col)+1))
}
poly <- vector(mode="list", length(col))
for(i in seq(poly)){
poly[[i]] <- c(breaks[i], breaks[i+1], breaks[i+1], breaks[i])
}
xaxt <- ifelse(horiz, "s", "n")
yaxt <- ifelse(horiz, "n", "s")
if(horiz){
YLIM<-c(0,1)
XLIM<-range(breaks)
}
if(!horiz){
YLIM<-range(breaks)
XLIM<-c(0,1)
}
if(missing(xlim)) xlim=XLIM
if(missing(ylim)) ylim=YLIM
plot(1,1,t="n",ylim=ylim, xlim=xlim, xaxt=xaxt, yaxt=yaxt, xaxs="i", yaxs="i", ...)
for(i in seq(poly)){
if(horiz){
polygon(poly[[i]], c(0,0,1,1), col=col[i], border=NA)
}
if(!horiz){
polygon(c(0,0,1,1), poly[[i]], col=col[i], border=NA)
}
}
}
plot_time=function(x){
n = 12 # nDemes to plot
nGen = 50
x=read.table(x, h=F)
toPlot = seq(nrow(x)-nGen, nrow(x), 1)
par(mfrow=c(3,4), mar=c(4, 3.5, 1, 0.5))
for(i in 1:n){
# plot(1:nrow(x), x[,i], type="l", xlab="Time (generation)", ylab="nInd", ylim=c(0, max(x)), lwd=5)
# polygon(c(0,1:nrow(x),nrow(x)), c(0,x[,i],0), col=rainbow(12)[i], border=NA)
plot(toPlot, x[toPlot, i], type="l", xlab="", ylab="", ylim=c(0, max(x)), lwd=5, axes=F)
polygon(c(toPlot[1], toPlot, nrow(x), toPlot[1]), c(x[toPlot[1], i], x[toPlot, i], par("usr")[3], par("usr")[3]), col=rainbow(n)[i], border=NA)
axis(side=1, at = seq(min(toPlot)-1, max(toPlot)-1, 10), labels = c(seq(min(toPlot)-nrow(x)+nGen, max(toPlot)-nrow(x)+nGen, 10)), cex.axis=1.2)
mtext("Generations", side = 1, line = 2.2)
axis(side=2, at = round(seq(0, max(x), length.out=2), 0), cex.axis=1.2)
mtext("#Individus", side = 2, line = 2.2)
}
}
compare_matrix=function(x, y, xlab="", ylab="", zlab="", zlim = c(min(c(x,y)), max(c(x,y))), zlimResiduals=c(min(x-y), max(x-y)), cex.lab=1, couleurs=c("green", "white", "red"), watermark=F){
# plot 4 graphes to show matrices x, y and x-y, as well as the distribution of x-y values
# x = matrix of expected values
# y = matrix of observed values
gradient = colorRampPalette(couleurs)
dev.new(width=8, height=7)
layout(matrix(c(1,2,3,4,5,6,7,7), byrow=T, nrow=2), width=c(4/5, 1/5, 4/5, 1/5, 4/5, 1/5, 1/2,1/2))
par(mar=c(4.5, 4, 4, 1), las=1)
# matrice x
if(is.null(colnames(x))){
plot_axes = T
}else{
plot_axes = F
}
image(x, xlab="", ylab="", col=gradient(100), cex.axis=cex.lab, axes=plot_axes, zlim=zlim)
mtext(side=3, text="expected", line=0.75, cex=cex.lab)
if(watermark){watermark()}
if(is.null(colnames(x))){
mtext(side=1, text=xlab, line=2.5, cex=cex.lab)
par(las=3)
mtext(side=2, text=ylab, line=2.75, cex=cex.lab)
}else{
# axe des x
migRates = rownames(x)
posX = c((seq(1, length(migRates), 2)), length(migRates))
axis(1, at=(posX-1)/(length(migRates)-1), labels = migRates[posX])
mtext(xlab, 1, line=2.5, cex=cex.lab)
# axe des y
extRates = colnames(x)
posY = c((seq(1, length(extRates), 2)), length(extRates))
axis(2, at=(posY-1)/(length(extRates)-1), labels = extRates[posY])
par(las=0)
mtext(ylab, 2, line=2.75, cex=cex.lab)
}
par(las=1)
image.scale(x, horiz=F, col=gradient(100), xlab="", ylab="", cex.lab=cex.lab, cex.axis=cex.lab, zlim=zlim)
par(las=3)
mtext(side=2, text=zlab, line=2.5, cex=cex.lab)
# matrice y
par(las=1)
if(is.null(colnames(x))){
plot_axes = T
}else{
plot_axes = F
}
image(y, xlab="", ylab="", col=gradient(100), cex.axis=cex.lab, axes=plot_axes, zlim=zlim)
mtext(side=3, text="simulated", line=0.75, cex=cex.lab)
if(watermark){watermark()}
if(is.null(colnames(y))){
mtext(side=1, text=xlab, line=2.5, cex=cex.lab)
par(las=3)
mtext(side=2, text=ylab, line=2.75, cex=cex.lab)
}else{
# axe des x
migRates = rownames(y)
posX = c((seq(1, length(migRates), 2)), length(migRates))
axis(1, at=(posX-1)/(length(migRates)-1), labels = migRates[posX])
mtext(xlab, 1, line=2.5, cex=cex.lab)
# axe des y
extRates = colnames(y)
posY = c((seq(1, length(extRates), 2)), length(extRates))
axis(2, at=(posY-1)/(length(extRates)-1), labels = extRates[posY])
par(las=0)
mtext(ylab, 2, line=2.75, cex=cex.lab)
}
par(las=1)
image.scale(y, horiz=F, col=gradient(100), xlab="", ylab="", cex.lab=cex.lab, cex.axis=cex.lab, zlim=zlim)
par(las=3)
mtext(side=2, text=zlab, line=2.5, cex=cex.lab)
# residuals = x - y
par(las=1)
if(is.null(colnames(x))){
plot_axes = T
}else{
plot_axes = F
}
image(x-y, xlab="", ylab="", col=gradient(100), cex.axis=cex.lab, axes=plot_axes, zlim=zlimResiduals)
mtext(side=3, text="expected - simulated", line=0.75, cex=cex.lab)
if(watermark){watermark()}
if(is.null(colnames(y))){
mtext(side=1, text=xlab, line=2.5, cex=cex.lab)
par(las=3)
mtext(side=2, text=ylab, line=2.75, cex=cex.lab)
}else{
# axe des x
migRates = rownames(y)
posX = c((seq(1, length(migRates), 2)), length(migRates))
axis(1, at=(posX-1)/(length(migRates)-1), labels = migRates[posX])
mtext(xlab, 1, line=2.5, cex=cex.lab)
# axe des y
extRates = colnames(y)
posY = c((seq(1, length(extRates), 2)), length(extRates))
axis(2, at=(posY-1)/(length(extRates)-1), labels = extRates[posY])
par(las=0)
mtext(ylab, 2, line=2.75, cex=cex.lab)
}
par(las=1)
image.scale(x-y, horiz=F, col=gradient(100), xlab="", ylab="", cex.lab=cex.lab, cex.axis=cex.lab, zlim=zlimResiduals)
par(las=3)
mtext(side=2, text="residuals", line=2.75, cex=cex.lab)
z=c(x,y)
par(mar=c(4.5, 4, 4, 3), las=1)
hist(x-y, xlab="", ylab="", main="", cex.lab=cex.lab, cex.axis=cex.lab, xlim=c(-max(z), max(z)), n=20)
mtext(side=1, text="residuals", line=2.5, cex=cex.lab)
if(watermark){watermark()}
}
compare_matrix_fst=function(x, y, xlab="", ylab="", zlab="", cex.lab=1, couleurs=c("green", "white", "red"), watermark=F){
# plot 4 graphes to show matrices x, y and x-y, as well as the distribution of x-y values
# x = matrix of expected values
# y = matrix of observed values
gradient = colorRampPalette(couleurs)
dev.new(width=8, height=7)
layout(matrix(c(1,2,3,4,5,6,7,7), byrow=T, nrow=2), width=c(4/5, 1/5, 4/5, 1/5, 4/5, 1/5, 1/2,1/2))
par(mar=c(4.5, 4, 4, 1), las=1)
# matrice x
if(is.null(colnames(x))){
plot_axes = T
}else{
plot_axes = F
}
image(x, xlab="", ylab="", col=gradient(100), cex.axis=cex.lab, axes=plot_axes)
if(watermark==T){
watermark()
}
mtext(side=3, text="expected", line=0.75, cex=cex.lab)
if(is.null(colnames(x))){
mtext(side=1, text=xlab, line=2.5, cex=cex.lab)
par(las=3)
mtext(side=2, text=ylab, line=2.75, cex=cex.lab)
}else{
# axe des x
migRates = rownames(x)
posX = c((seq(1, length(migRates), 2)), length(migRates))
axis(1, at=(posX-1)/(length(migRates)-1), labels = migRates[posX])
mtext(xlab, 1, line=2.5, cex=cex.lab)
# axe des y
extRates = colnames(x)
posY = c((seq(1, length(extRates), 2)), length(extRates))
axis(2, at=(posY-1)/(length(extRates)-1), labels = extRates[posY])
par(las=0)
mtext(ylab, 2, line=2.75, cex=cex.lab)
}
par(las=1)
image.scale(x, horiz=F, col=gradient(100), xlab="", ylab="", cex.lab=cex.lab, cex.axis=cex.lab)
par(las=3)
mtext(side=2, text=zlab, line=2.5, cex=cex.lab)
# matrice y
par(las=1)
if(is.null(colnames(x))){
plot_axes = T
}else{
plot_axes = F
}
image(y, xlab="", ylab="", col=gradient(100), cex.axis=cex.lab, axes=plot_axes)
mtext(side=3, text="simulated", line=0.75, cex=cex.lab)
if(watermark==T){
watermark()
}
if(is.null(colnames(y))){
mtext(side=1, text=xlab, line=2.5, cex=cex.lab)
par(las=3)
mtext(side=2, text=ylab, line=2.75, cex=cex.lab)
}else{
# axe des x
migRates = rownames(y)
posX = c((seq(1, length(migRates), 2)), length(migRates))
axis(1, at=(posX-1)/(length(migRates)-1), labels = migRates[posX])
mtext(xlab, 1, line=2.5, cex=cex.lab)
# axe des y
extRates = colnames(y)
posY = c((seq(1, length(extRates), 2)), length(extRates))
axis(2, at=(posY-1)/(length(extRates)-1), labels = extRates[posY])
par(las=0)
mtext(ylab, 2, line=2.75, cex=cex.lab)
}
par(las=1)
image.scale(y, horiz=F, col=gradient(100), xlab="", ylab="", cex.lab=cex.lab, cex.axis=cex.lab)
par(las=3)
mtext(side=2, text=zlab, line=2.5, cex=cex.lab)
# residuals = x - y
par(las=1)
if(is.null(colnames(x))){
plot_axes = T
}else{
plot_axes = F
}
image(x-y, xlab="", ylab="", col=gradient(100), cex.axis=cex.lab, axes=plot_axes, zlim = c(-1, 1))
mtext(side=3, text="expected - simulated", line=0.75, cex=cex.lab)
if(watermark==T){
watermark()
}
if(is.null(colnames(y))){
mtext(side=1, text=xlab, line=2.5, cex=cex.lab)
par(las=3)
mtext(side=2, text=ylab, line=2.75, cex=cex.lab)
}else{
# axe des x
migRates = rownames(y)
posX = c((seq(1, length(migRates), 2)), length(migRates))
axis(1, at=(posX-1)/(length(migRates)-1), labels = migRates[posX])
mtext(xlab, 1, line=2.5, cex=cex.lab)
# axe des y
extRates = colnames(y)
posY = c((seq(1, length(extRates), 2)), length(extRates))
axis(2, at=(posY-1)/(length(extRates)-1), labels = extRates[posY])
par(las=0)
mtext(ylab, 2, line=2.75, cex=cex.lab)
}
par(las=1)
image.scale(x-y, horiz=F, col=gradient(100), xlab="", ylab="", cex.lab=cex.lab, cex.axis=cex.lab, zlim = c(-1, 1))
par(las=3)
mtext(side=2, text="residuals", line=2.75, cex=cex.lab)
z=c(x,y)
par(mar=c(4.5, 4, 4, 3), las=1)
hist(x-y, xlab="", ylab="", main="", cex.lab=cex.lab, cex.axis=cex.lab, xlim=c(-max(z), max(z)), n=20)
mtext(side=1, text="residuals", line=2.5, cex=cex.lab)
if(watermark==T){
watermark()
}
}
watermark = function(){
tag1 = "TAKE CARE\nJEAN-PIERRE"
tag2 = ""
#tag2 = "camille.roux.1@unil.ch"
run.date <- format(Sys.Date(), "%m-%d-%Y")
text(x = grconvertX(0.5, from = "npc"), # aligner au centre des X
y = grconvertY(0.5, from = "npc"), # aligner au centre des Y
labels = tag1, # filigrane central
cex = 5, font = 2, # en gros et gras
col = rgb(1, 0, 0, .15), # transparent
srt = 45) # angle du texte = 45°
texte = paste(tag2, run.date)
mtext(texte, side = 1, line = -1, adj = 1, col = rgb(1, 0, 0, .15), cex = 1.5)
}
cleanQuantiSexTable = function(x){
collage = function(a){
return(paste(a, collapse="_"))
}
y = cbind(x$nDemes, x$nIndMaxPerDeme, x$nQuantiLoci, x$selfingRate, x$fecundity, x$migRate, x$extRate, x$recolonization, x$sexSystem, x$sexAvantage)
z = apply(y, MARGIN=1, FUN="collage")
res = NULL
for(i in names(table(z))){
tmp = which(z==i)
tmp = x[tmp,]
res = rbind(res, apply(tmp, MARGIN=2, FUN="mean"))
}
return(res)
}
triVariableTable = function(x, y, z){
# produces a table 'res' with x as rows, y as columns: res[x,y] = z
# x = migration rates
# y = extinction rates
# z = Fst
xValues = as.numeric(names(table(x)))
yValues = as.numeric(names(table(y)))
res = matrix(NA, ncol = length(xValues), nrow = length(yValues))
colonne = 0
for(i in xValues){
ligne = 0
colonne = colonne + 1
for(j in yValues){
ligne = ligne + 1
tmp = mean(z[which(x == i & y == j)], na.rm = T)
res[ligne, colonne] = tmp
}
}
colnames(res) = xValues
rownames(res) = yValues
return(res)
}
plot3var = function(x, xlab="", ylab="", zlab="", main="", cex.lab=1, couleurs=c("green", "white", "red"), zlim = c(min(x), max(x)), watermark=F, nlevels=10){
# x = table with values z as a function of 'rows' and 'columns'
# plot z as a function of 2 variables
# best continuous gradient: c("#ffffd9", "#edf8b1", "#c7e9b4", "#7fcdbb", "#41b6c4", "#1d91c0", "#225ea8", "#253494", "#081d58")
gradient = colorRampPalette(couleurs)
dev.new(width=8, height=7)
layout(matrix(c(1,2), byrow=T, ncol=2), width=c(4/5, 1/5))
par(mar=c(4.5, 4, 4, 1), las=1)
# matrice x
if(is.null(colnames(x))){
plot_axes = T
}else{
plot_axes = F
}
image(x, xlab="", ylab="", col=gradient(nlevels), cex.axis=cex.lab, axes=plot_axes, zlim=zlim)
mtext(side=3, text=main, line=0.75, cex=cex.lab)
if(is.null(colnames(x))){
mtext(side=1, text=xlab, line=2.5, cex=cex.lab)
par(las=3)
mtext(side=2, text=ylab, line=2.75, cex=cex.lab)
}else{
# axe des x
migRates = rownames(x)
posX = c((seq(1, length(migRates), 2)), length(migRates))
axis(1, at=(posX-1)/(length(migRates)-1), labels = migRates[posX])
mtext(xlab, 1, line=2.5, cex=cex.lab)
# axe des y
extRates = colnames(x)
posY = c((seq(1, length(extRates), 2)), length(extRates))
axis(2, at=(posY-1)/(length(extRates)-1), labels = extRates[posY])
par(las=0)
mtext(ylab, 2, line=2.75, cex=cex.lab)
}
if(watermark){watermark()}
par(las=1)
image.scale(x, horiz=F, col=gradient(nlevels), xlab="", ylab="", cex.lab=cex.lab, cex.axis=cex.lab, zlim=zlim)
par(las=3)
mtext(side=2, text=zlab, line=2.5, cex=cex.lab)
}
correlation = function(x, y){
# function that performs cor.test with the 3 methods
# and returns the $estimate and the $p.value
res = NULL
for(i in c("pearson", "spearman", "kendall")){
tmp = cor.test(x, y, method=i)
res = rbind(res, c(tmp$estimate, tmp$p.value))
}
rownames(res) = c("pearson", "spearman", "kendall")
colnames(res) = c("coefficient", "pvalue")
return(round(res, 5))
}
plotpi = function (pi, pos, par, i, plot_std = T, xlab="", ylab=""){
maxPi = max(as.numeric(unlist(pi[, grep("avg", colnames(pi))])))
avg = as.numeric(pi[i, grep("avg", colnames(pi))])
std = as.numeric(pi[i, grep("std", colnames(pi))])
pos = as.numeric(pos[i, ])
if(plot_std==T){
ylim = c(0, max(avg + std) * 1.05)
}else{
ylim = c(0, maxPi*1.01)
}
titre1 = paste(colnames(par), collapse = "\t")
titre2 = paste(par[i, ], collapse = "\t")
titre = paste(titre1, titre2, sep = "\n")
plot(pos, avg, xlim=c(0,1), ylim = ylim, col = "white", main = titre, xlab=xlab, ylab=ylab)
if(plot_std==T){
polygon(c(pos, rev(pos)), c(avg + std, rev(avg - std)), col = rgb(1, 0, 0, 0.25))
}
points(pos, avg, pch = 16, cex = 1.1)
}
plotABCsweep = function(i){
# i = Id of replicate
par(mfrow=c(3,2), mar=c(4.5, 3.95, 3, 1.95))
plot(as.numeric(pos[i,]), as.numeric(tajD[i,]), xlim=c(0,1), xlab="position", ylab="tajD", type="l", cex.lab=1.2, lwd=2); abline(v=par$Sp[i], col="red")
plot(as.numeric(pos[i,]), as.numeric(achazY[i,]), xlim=c(0,1), xlab="position", ylab="achazY", type="l", cex.lab=1.2, lwd=2); abline(v=par$Sp[i], col="red")
plot(as.numeric(pos[i,]), as.numeric(pi_avg[i,]), xlim=c(0,1), xlab="position", ylab="pi avg", type="l", cex.lab=1.2, lwd=2); abline(v=par$Sp[i], col="red")
plot(as.numeric(pos[i,]), as.numeric(pi_std[i,]), xlim=c(0,1), xlab="position", ylab="pi std", type="l", cex.lab=1.2, lwd=2); abline(v=par$Sp[i], col="red")
plot(as.numeric(pos[i,]), as.numeric(pearsonR[i,]), xlim=c(0,1), xlab="position", ylab="pearsonR", type="l", cex.lab=1.2, lwd=2); abline(v=par$Sp[i], col="red")
plot(as.numeric(pos[i,]), as.numeric(pearsonP[i,]), xlim=c(0,1), xlab="position", ylab="pearsonP", type="l", cex.lab=1.2, lwd=2); abline(v=par$Sp[i], col="red")
}
#plot3var_v2 = function (x, y, z, xlab = "", ylab = "", zlab = "", main = "", cex.lab = 1, couleurs = c("#ffffd9","#edf8b1","#c7e9b4","#7fcdbb","#41b6c4","#1d91c0","#225ea8","#253494","#081d58"), zlim = NULL, watermark = F, nlevels = 10){
#
# median_z = c()
#
# mat = matrix(NA, length(table(y)), length(table(x)))
# colnames(mat) = names(table(x))
# rownames(mat) = names(table(y))
# # convert the 3 vectors x, y and z in a matrix mat[y, x] = z
# ligne = 0
# colonne = 0
# for( x_i in as.numeric(names(table(x))) ){ # variable 'x' in column
# colonne = colonne + 1
# ligne = 0
# for( y_i in as.numeric(names(table(y))) ){ # variable 'y' in raw
# ligne = ligne + 1
# mat[ligne, colonne] = median(z[which(x==x_i & y==y_i)])
#
# median_z = c(median_z, mat[ligne, colonne])
#
# }
# }
#
# min_arr = which(mat==min(mat), arr.ind = T)
# max_arr = which(mat==max(mat), arr.ind = T)
#
# min_x = min_arr[,2]
# max_x = max_arr[,2]
# min_y = min_arr[,1]
# max_y = max_arr[,1]
# min_z = min(mat)
# max_z = max(mat)
#
# # create a gradient of colors and a graphic window
# gradient = colorRampPalette(couleurs)
# dev.new(width = 8, height = 7)
# layout(matrix(c(1, 2), byrow = T, ncol = 2), width = c(4/5, 1/5))
# par(mar = c(4.5, 4, 4, 1), las = 1)
#
# # plot
# if (is.null(zlim)){
# zlim = range(mat)
# }
# print(mat)
# image(t(mat), xlab = "", ylab = "", col = gradient(nlevels), cex.axis = cex.lab, axes = F, zlim = zlim)
# mtext(side = 3, text = main, line = 0.75, cex = cex.lab)
#
# text( (min_x-1)/(length(table(x))-1), (min_y-1)/(length(table(y))-1), round(min_z, 2), col=gradient(nlevels)[nlevels])
# text( (max_x-1)/(length(table(x))-1), (max_y-1)/(length(table(y))-1), round(max_z, 2), col=gradient(nlevels)[1])
#
#
# if (is.null(colnames(mat))) {
# mtext(side = 1, text = xlab, line = 2.5, cex = cex.lab)
# par(las = 3)
# mtext(side = 2, text = ylab, line = 2.75, cex = cex.lab)
# }
# else {
# migRates = rownames(mat)
# posX = c((seq(1, length(migRates), 2)), length(migRates))
# axis(1, at=0:(length(table(x))-1)/(length(table(x))-1), labels=names(table(x)))
#
# mtext(xlab, 1, line = 2.5, cex = cex.lab)
# extRates = colnames(mat)
# posY = c((seq(1, length(extRates), 2)), length(extRates))
# axis(2, at=0:(length(table(y))-1)/(length(table(y))-1), labels=names(table(y)))
# par(las = 0)
# mtext(ylab, 2, line = 2.75, cex = cex.lab)
# }
# if (watermark) {
# watermark()
# }
# par(las = 1)
# image.scale(mat, horiz = F, col = gradient(nlevels), xlab = "",
# ylab = "", cex.lab = cex.lab, cex.axis = cex.lab, zlim = zlim)
# par(las = 3)
# mtext(side = 2, text = zlab, line = 2.5, cex = cex.lab)
#}
proba_bayes = function(obs, dé, prior){
p_obs_dé = dé; for(i in 1:length(dé)){if(obs[1]>dé[i]){p_obs_dé[i]=0}else{p_obs_dé[i]=1/dé[i]}}
p_obs = sum(prior*p_obs_dé)
p_dé_obs = prior*p_obs_dé/p_obs
return(p_dé_obs)
}
plot3var_v2 = function (x, y, z, xlab = "", ylab = "", zlab = "", main = "",
cex.lab = 1, couleurs = c("#ffffd9", "#edf8b1", "#c7e9b4",
"#7fcdbb", "#41b6c4", "#1d91c0", "#225ea8", "#253494",
"#081d58"), zlim = NULL, watermark = F, nlevels = 10)
{
median_z = c()
mat = matrix(NA, length(table(y)), length(table(x)))
colnames(mat) = names(table(x))
rownames(mat) = names(table(y))
ligne = 0
colonne = 0
for (x_i in as.numeric(names(table(x)))) {
colonne = colonne + 1
ligne = 0
for (y_i in as.numeric(names(table(y)))) {
ligne = ligne + 1
mat[ligne, colonne] = median(z[which(x == x_i & y ==
y_i)])
median_z = c(median_z, mat[ligne, colonne])
}
}
min_arr = which(mat == min(mat), arr.ind = T)
max_arr = which(mat == max(mat), arr.ind = T)
min_x = min_arr[, 2]
max_x = max_arr[, 2]
min_y = min_arr[, 1]
max_y = max_arr[, 1]
min_z = min(mat)
max_z = max(mat)
gradient = colorRampPalette(couleurs)
dev.new(width = 8, height = 7)
layout(matrix(c(1, 2), byrow = T, ncol = 2), width = c(4/5,
1/5))
par(mar = c(4.5, 4, 4, 1), las = 1)
if (is.null(zlim)) {
zlim = range(mat)
}
print(mat)
if(length(min_x) == 1 && length(max_x) == 1){
image(t(mat), xlab = "", ylab = "", col = gradient(nlevels),
cex.axis = cex.lab, axes = F, zlim = zlim)
mtext(side = 3, text = main, line = 0.75, cex = cex.lab)
text((min_x - 1)/(length(table(x)) - 1), (min_y - 1)/(length(table(y)) -
1), round(min_z, 2), col = gradient(nlevels)[nlevels])
text((max_x - 1)/(length(table(x)) - 1), (max_y - 1)/(length(table(y)) -
1), round(max_z, 2), col = gradient(nlevels)[1])
}else{
image(t(mat), xlab = "", ylab = "", col = gradient(nlevels),
cex.axis = cex.lab, axes = F, zlim = zlim)
mtext(side = 3, text = main, line = 0.75, cex = cex.lab)
text((min_x[1] - 1)/(length(table(x)) - 1), (min_y[1] - 1)/(length(table(y)) -
1), round(min_z[1], 2), col = gradient(nlevels)[nlevels])
text((max_x[1] - 1)/(length(table(x)) - 1), (max_y[1] - 1)/(length(table(y)) -
1), round(max_z[1], 2), col = gradient(nlevels)[1])
}
if (is.null(colnames(mat))) {
mtext(side = 1, text = xlab, line = 2.5, cex = cex.lab)
par(las = 3)
mtext(side = 2, text = ylab, line = 2.75, cex = cex.lab)
}
else {
migRates = rownames(mat)
posX = c((seq(1, length(migRates), 2)), length(migRates))
axis(1, at = 0:(length(table(x)) - 1)/(length(table(x)) -
1), labels = names(table(x)))
mtext(xlab, 1, line = 2.5, cex = cex.lab)
extRates = colnames(mat)
posY = c((seq(1, length(extRates), 2)), length(extRates))
axis(2, at = 0:(length(table(y)) - 1)/(length(table(y)) -
1), labels = names(table(y)))
par(las = 0)
mtext(ylab, 2, line = 2.75, cex = cex.lab)
}
if (watermark) {
watermark()
}
par(las = 1)
image.scale(mat, horiz = F, col = gradient(nlevels), xlab = "",
ylab = "", cex.lab = cex.lab, cex.axis = cex.lab, zlim = zlim)
par(las = 3)
mtext(side = 2, text = zlab, line = 2.5, cex = cex.lab)
}
|
c43015726204c1aebb7a38d19b3356b994e40b0c | 07443e2cb1c97e384313a6c4a6d1dd76caeb67a6 | /plot2.R | 54d26aa06eba58794f50fc3d45a50d120004184d | [] | no_license | liady/ExData_Plotting1 | 0d2021d06a2cb98a610a6c451dcfb19daeeb942f | b0be25d86bbc395a85a7255dc70f29252f5c8de3 | refs/heads/master | 2023-08-19T01:00:50.848543 | 2014-06-07T22:34:22 | 2014-06-07T22:34:22 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,132 | r | plot2.R | # This file reads the power consumption data from a file,
# and writes to a PNG file a plot of the Global Active Power
# This function reads the data from the file, subsets to desired dates,
# fixes all variables, and returns the data frame
getData <- function (){
# read all columns as characters (for better performance)
data <- read.table("household_power_consumption.txt", sep=";",
header=TRUE, na.string = "?", colClasses="character")
# subset to desired dates
data <- subset(data, Date =="1/2/2007" | Date =="2/2/2007")
# set the datetime variable
data$DateTime<-strptime(paste(data$Date, data$Time, sep = " "),
"%d/%m/%Y %H:%M:%S")
# coerce all other variables to numeric
data[3:9]<-lapply(data[3:9],as.numeric)
# return data
data
}
# get the data from file
power_consumption <- getData()
# open png file
png("plot2.png", 480, 480)
# write plot
plot(power_consumption$DateTime, power_consumption$Global_active_power,
type="l", xlab="", ylab = "Global Active Power (kilowatts)")
#close file
dev.off() |
93726839ccafad6d0b3343f93ee697b757090223 | dcaf408194e93b10ee1b9fa565c95b77f29adb0c | /DTS350TemplateMaster/DTS400/GE2 Plots of continuous vars2.R | 28d8e7caf3a517bc4abca12f019a6a03d5182efb | [] | no_license | WJC-Data-Science/DTS350-hollinbergert | 75efd19f33f5298558f7e6a70b9e337d0e62a5b5 | 65524d55ef4ad995306dd0d36e5de1f54c068c67 | refs/heads/master | 2023-02-01T07:56:17.089980 | 2020-12-18T14:03:31 | 2020-12-18T14:03:31 | 293,891,064 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 106,912 | r | GE2 Plots of continuous vars2.R | #' ---
#' title: "GE2 Plots of continuous vars2 "
#' author: "TomHollinberger"
#' date: "11/22/2020"
#' output:
#' html_document:
#' keep_md: yes
#' toc: TRUE
#' toc_depth: 6
#' #' code_folding: hide
#' results: 'hide'
#' message: FALSE
#' warning: FALSE
#' ---
#' ---
#' THIS RSCRIPT USES ROXYGEN CHARACTERS.
#' YOU CAN PRESS ctrl+shift+K AND GO STRAIGHT TO A HTML.
#' SKIPS THE HANDWORK OF CREATING A RMD, AFTER THE ORIGINAL WORK IS NONE IN A RSCRIPT.
#' sample filepath E:/000 DTS 350 Data Visualization/DTS350-hollinbergert/DTS350TemplateMaster/DTS400/
# CONTINUOUS VARIABLES graph a series of scatterplots of continuous variables vs wgpa, with color = ext
library(tidyverse)
library(dplyr)
library(readxl)
library(ggplot2)
#library(ggpmisc) #for annotate with npc #doesn't load without crashing
setwd("E:/000 DTS 350 Data Visualization/DTS350-hollinbergert/DTS350TemplateMaster/DTS400/")
#download.file("E:/000 DTS 400 Internship/Orig Lists/WORKFILE3",
# "workfiletmp.xlsx", mode = "wb")
#This excel file contains a number of tables on different sheets of the workbook. We can see a listing of the sheets using the excel_sheets function.
excel_sheets("WORKFILE3.xlsx")
#'Now we will load our data using the read_excel function. We will load the data from the Purchase Date April 2019 sheet.
GEwofb <- read_excel("WORKFILE3.xlsx", sheet = "GandE without filled blanks")
GEwofb
#Take out the "No GPA on record, so now it's: Grads and Exits with gpas, and without filled blanks
GEwgpawofb <- filter(GEwofb, status != "No GPA on record")
GEwgpawofb
unique(GEwgpawofb$status) #confirms that only two options exist: "Didn't Graduate, but had GPA" "Grad with GPA"
GEwgpawofb$seg <- as.factor(GEwgpawofb$seg)
GEwgpawofb$status <- as.factor(GEwgpawofb$status)
#Create Means for each continuous variable
GEwgpawofbstatgrp <- group_by(GEwgpawofb, status)
GEwgpawofbstatgrp <- (summarize(GEwgpawofbstatgrp,
wgpabar = mean(wgpa, na.rm = TRUE),
rnkbar = mean(rnk, na.rm = TRUE),
sizbar = mean(siz, na.rm = TRUE),
prnkbar = mean(prnk, na.rm = TRUE),
engsembar = mean(engsem, na.rm = TRUE),
tcrbar = mean(tcr, na.rm = TRUE),
hsgpabar = mean(hsgpa, na.rm = TRUE),
cmpbar = mean(cmp, na.rm = TRUE),
matbar = mean(mat, na.rm = TRUE),
scibar = mean(sci, na.rm = TRUE),
engbar = mean(eng, na.rm = TRUE),
rdgbar = mean(rdg, na.rm = TRUE),
dstbar = mean(dst, na.rm = TRUE)))
# this gets a warning: "`summarise()` regrouping output by 'year' (override with `.groups` argument)
GEwgpawofbstatgrp
#View(graph2data)
GEwgpawofbstatgrprnd <- mutate(GEwgpawofbstatgrp,
wgpa = round(wgpabar, digits = 2),
rnk = round(rnkbar,digits = 1),
siz = round(sizbar,digits = 1),
prnk = round(prnkbar,digits = 2),
engsem = round(engsembar,digits = 1),
tcr = round(tcrbar,digits = 1),
hsgpa = round(hsgpabar,digits = 2),
cmp = round(cmpbar,digits = 1),
mat = round(matbar,digits = 1),
sci = round(scibar,digits = 1),
eng = round(engbar,digits = 1),
rdg = round(rdgbar,digits = 1),
dst = round(dstbar,digits = 0))
GEwgpawofbstatgrprnd # has 2 rows, one for grads, one for exits
#Split in to two df's. One for Grads one for Exits
Gradwgpawofbstatgrprnd <-filter(GEwgpawofbstatgrprnd, status == "Grad with GPA")
Gradwgpawofbstatgrprnd
Exitwgpawofbstatgrprnd <-filter(GEwgpawofbstatgrprnd, status == "Didn't Graduate, but had GPA")
Exitwgpawofbstatgrprnd
#scatter plots with CI and stats
library(ggpubr)
listofcolors <- c("red","springgreen4")
#This function results in Count = 123
stat_box_data <- function(y, upper_limit = 4 * 1.15) {
return(
data.frame(
y = 0.95 * upper_limit,
label = paste('count=', length(y), '\n',
'mean =', round(mean(y), 1), '\n')
)
)
}
#This function results in mirror image: 123 = Count
stat_box_data2 <- function(y, upper_limit = 4 * 1.15) {
return(
data.frame(
y = 0.95 * upper_limit,
label = paste(length(y), '= count', '\n',
round(mean(y),1), '= mean', '\n')
)
)
}
#create subset of only exits or only grads, these will be called when writing the count and mean above or below
#as oppsoed to the Categorical and Binary situations, this DOES NOT need to be decentralized in each plot, so that yrfct variable from the previous step is in these two dataframes that about to be created.
GEwgpawofbexit <- filter(GEwgpawofb, extyn == "Yes")
GEwgpawofbexit #21 rows
GEwgpawofbgrad <- filter(GEwgpawofb, extyn == "No")
GEwgpawofbgrad #151 rows
library(directlabels) #to allow for adding labels that call out the average points
GEwgpawofbstatgrp
Gradwgpawofbstatgrprnd$rnkbar
#WGPAxRNK NOT A GOOD LAYOUT OF ANNOTATIONS
w <- ggplot(GEwgpawofb, aes(rnk, wgpa, color = status)) +
geom_point() +
geom_smooth(method = lm, lwd=1) +
geom_point(GEwgpawofbstatgrp, mapping = aes(x = rnkbar, y = wgpabar, size = 12, color = status), shape = 8) +
annotate("text", x = 200, y = 2.15, hjust = 0, vjust = 0, color = "black", label = (str_c("Average : ( ", "Rank , WJC GPA)"))) +
annotate("text", x = 200, y = 2, hjust = 0, vjust = 0, color = "springgreen4", label = (str_c("Average : ( ", round(Gradwgpawofbstatgrprnd$rnkbar,digits = 1), " , " , round(Gradwgpawofbstatgrprnd$wgpabar, digits = 2), ")" ))) +
annotate("text", x = 200, y = 1.84, hjust = 0, vjust = 0, color = "red", label = (str_c("Average : (", round(Exitwgpawofbstatgrprnd$rnkbar,digits = 1), " , " , round(Exitwgpawofbstatgrprnd$wgpabar, digits = 2), ")" ))) +
stat_cor(aes(label = paste(..r.label.., ..p.label.., sep = "~`,`~")), method = "pearson", label.x.npc = "left", label.y.npc = "bottom") +
scale_color_manual(values = listofcolors) +
labs(title = "The Effect of High School Class RANK on WJC GPA ",
subtitle = "All Students with a WJC GPA on record, whether or not they graduated",
caption = "Source: WJC Admissions Records",
x = "High School Class RANK",
y = "William Jewell College GPA",
color = "Student Status:",
size = " Avg X, Avg Y:") +
guides(color = guide_legend(reverse = TRUE)) +
theme(legend.position = "bottom", legend.box = "horizontal")
w
ggsave("E:/000 DTS 400 Internship/PLOTS3/wgpaxrnk.jpeg", width = 9, height = 7, units = "in")
#WGPAxSIZ NOT A GOOD LAYOUT OF ANNOTATIONS
gradslope <- round(coef(lm(GEwgpawofbgrad$wgpa~GEwgpawofbgrad$siz)), digits = 5)[2] #[2] means the 2nd coeff, i.e., gradient of the abline. 1st coeff is the y-intercept https://stackoverflow.com/questions/19661766/how-to-get-gradient-of-abline-in-r
gradslope
exitslope <- round(coef(lm(GEwgpawofbexit$wgpa~GEwgpawofbexit$siz)), digits = 5)[2]
exitslope
w <- ggplot(GEwgpawofb, aes(siz, wgpa, color = status)) +
geom_point() +
geom_smooth(method = lm, lwd=1) +
geom_point(GEwgpawofbstatgrp, mapping = aes(x = sizbar, y = wgpabar, size = 12, color = status), shape = 8) +
# ggrepel::geom_label_repel(aes(label = paste("Avg: ", "( ", (round(GEwgpawofbstatgrprnd$sizbar, digits = 1))," , ",(round(GEwgpawofbstatgrprnd$wgpabar, digits = 2)), " )"), sep = " ",
# color = status), data = GEwgpawofbstatgrprnd) +
annotate("text", x = 10, y = 2.15, hjust = 0, vjust = 0, color = "black", label = (str_c("Correlation, Significance"))) +
annotate("text", x = 400, y = 2.15, hjust = 0, vjust = 0, color = "black", label = (str_c("Slope of Line, Avg Size , Avg WJC GPA)"))) +
annotate("text", x = 400, y = 2, hjust = 0, vjust = 0, color = "springgreen4", label = (str_c(gradslope, " Average : (", round(Gradwgpawofbstatgrprnd$sizbar,digits = 1), " , " , round(Gradwgpawofbstatgrprnd$wgpabar, digits = 2), ")" ))) +
annotate("text", x = 400, y = 1.84, hjust = 0, vjust = 0, color = "red", label = (str_c(exitslope, " Average : (", round(Exitwgpawofbstatgrprnd$sizbar,digits = 1), " , " , round(Exitwgpawofbstatgrprnd$wgpabar, digits = 2), ")" ))) +
stat_cor(aes(label = paste(..r.label.., ..p.label.., sep = "~`, `~")), method = "pearson", label.x.npc = "left", label.y.npc = "bottom") +
scale_color_manual(values = listofcolors) +
labs(title = "The Effect of High School Class SIZE on WJC GPA ",
subtitle = "All Students with a WJC GPA on record, whether or not they graduated",
caption = "Source: WJC Admissions Records",
x = "High School Class SIZE",
y = "William Jewell College GPA",
color = "Student Status:",
size = " Avg X, Avg Y:") +
guides(color = guide_legend(reverse = TRUE)) +
theme(legend.position = "bottom", legend.box = "horizontal")
w
#WGPAxRNK
gradslope <- round(coef(lm(GEwgpawofbgrad$wgpa~GEwgpawofbgrad$rnk)), digits = 5)[2] #[2] means the 2nd coeff. 1st coeff is the y-intercept
gradslope
exitslope <- round(coef(lm(GEwgpawofbexit$wgpa~GEwgpawofbexit$rnk)), digits = 5)[2]
exitslope
GEwgpawofbgrad$rnk
GEwgpawofbgrad$wgpa
cor(GEwgpawofbgrad$rnk,GEwgpawofbgrad$wgpa, use = "pairwise.complete.obs") #http://www.r-tutor.com/elementary-statistics/numerical-measures/correlation-coefficient
round(cor(GEwgpawofbgrad$rnk,GEwgpawofbgrad$wgpa, use = "pairwise.complete.obs"), digits = 2)
corrsltsgrad <- cor.test(GEwgpawofbgrad$rnk,GEwgpawofbgrad$wgpa) #http://www.sthda.com/english/wiki/correlation-test-between-two-variables-in-r
round(corrsltsgrad$p.value, digits = 3)
corrsltsexit <- cor.test(GEwgpawofbexit$rnk,GEwgpawofbexit$wgpa) #http://www.sthda.com/english/wiki/correlation-test-between-two-variables-in-r
round(corrsltsexit$p.value, digits = 3)
# library(ggpmisc) #for annotate with npc #doesn't load without crashing
library(ggplot2) # because ggpmisc messes with ggplot, which couldn't be found
#from https://www.rdocumentation.org/packages/ggpmisc/versions/0.3.6/topics/annotate
#Run the Plot
w <- ggplot(GEwgpawofb, aes(rnk, wgpa, color = status)) +
geom_point() +
geom_smooth(method = lm, lwd=1) +
geom_point(GEwgpawofbstatgrp, mapping = aes(x = rnkbar, y = wgpabar, size = 12, color = status), shape = 8) +
# ggrepel::geom_label_repel(aes(label = paste("Avg: ", "( ", (round(GEwgpawofbstatgrprnd$rnkbar, digits = 1))," , ",(round(GEwgpawofbstatgrprnd$wgpabar, digits = 2)), " )"), sep = " ",
# color = status), data = GEwgpawofbstatgrprnd) +
# annotate("text", x = 300, y = 2.10, hjust = 0, vjust = 0, color = "black",
# label = (str_c("Correlation | Significant | Slope of | Avg | Avg "))) +
# annotate("text", x = 300, y = 2., hjust = 0, vjust = 0, color = "black",
# label = (str_c(" Coeff | if p < .05 | Line | rnke | WJC GPA "))) +
annotate("text", x = 150, y = .35, hjust = 0, vjust = 0, cex = 4, color = "springgreen4",
label = (str_c(" R= ",
round(cor(GEwgpawofbgrad$rnk,GEwgpawofbgrad$wgpa, use = "pairwise.complete.obs"), digits = 2),
" , p= ",
round(corrsltsgrad$p.value, digits = 3),
" , ",
gradslope,
" , " ,
round(Gradwgpawofbstatgrprnd$rnkbar,digits = 1),
" , " ,
round(Gradwgpawofbstatgrprnd$wgpabar, digits = 2), ))) +
annotate("text", x = 150, y = .2, hjust = 0, vjust = 0, color = "red", #can't get it to use npc coords
label = (str_c(" R= ",
round(cor(GEwgpawofbexit$rnk,GEwgpawofbexit$wgpa, use = "pairwise.complete.obs"), digits = 2),
" , p= ",
round(corrsltsexit$p.value, digits = 3),
" , ",
exitslope,
" , " ,
round(Exitwgpawofbstatgrprnd$rnkbar
,digits = 1),
" , " ,
round(Exitwgpawofbstatgrprnd$wgpabar, digits = 2), ))) +
# stat_cor(aes(label = paste(..r.label.., ..p.label.., sep = "~`, `~")), method = "pearson", label.x.npc = "left", label.y.npc = "bottom") +
scale_color_manual(values = listofcolors) +
labs(title = "The Effect of High School Class RANK on WJC GPA ",
subtitle = "All Students with a WJC GPA on record, whether or not they graduated",
caption = "Source: WJC Admissions Records",
x = "High School Class RANK",
y = "William Jewell College GPA",
color = "Student Status:",
size = " Avg X, Avg Y:") +
guides(color = guide_legend(reverse = TRUE)) +
theme(legend.position = "bottom", legend.box = "horizontal")
#Build vertical bars
vb0 <- annotation_custom(grid::textGrob(label = "|", #can't get it to use color = "red",
x = unit(.40, "npc"), y = unit(.145, "npc"), vjust = 0,
gp = grid::gpar(cex = 2)))
vb1 <- annotation_custom(grid::textGrob(label = "|",
x = unit(.55, "npc"), y = unit(.145, "npc"), vjust = 0,
gp = grid::gpar(cex = 2)))
vb2 <- annotation_custom(grid::textGrob(label = "|",
x = unit(.675, "npc"), y = unit(.145, "npc"), vjust = 0,
gp = grid::gpar(cex = 2)))
vb3 <- annotation_custom(grid::textGrob(label = "|",
x = unit(.79, "npc"), y = unit(.145, "npc"), vjust = 0,
gp = grid::gpar(cex = 2)))
vb4 <- annotation_custom(grid::textGrob(label = "|",
x = unit(.88, "npc"), y = unit(.145, "npc"), vjust = 0,
gp = grid::gpar(cex = 2)))
vb5 <- annotation_custom(grid::textGrob(label = "|",
x = unit(1, "npc"), y = unit(.145, "npc"), vjust = 0,
gp = grid::gpar(cex = 2)))
t1 <- annotation_custom(grid::textGrob(label = " Correlation Significant Slope of Avg Avg ",
x = unit(.4, "npc"), y = unit(.170, "npc"), vjust = 0, hjust = 0,
gp = grid::gpar(cex = 1)))
t2 <- annotation_custom(grid::textGrob(label = " Coeff if p < .05 Line RANK WJC GPA ",
x = unit(.4, "npc"), y = unit(.13, "npc"), vjust = 0, hjust = 0,
gp = grid::gpar(cex = 1)))
v <- w + vb0 +vb1 + vb2 + vb3 + vb4 + vb5 + t1 + t2
v
# manually save as PLOTS3 wgpaxrnk at 744 x 581
#ggsave("E:/000 DTS 400 Internship/PLOTS3/wgpaxsiz w annt 27x21 from ggsave while console exact 27x21.jpeg", width = 27, height = 21, units = "cm")
#WGPA x SIZE
#Prep the Annotated Values
gradslope <- round(coef(lm(GEwgpawofbgrad$wgpa~GEwgpawofbgrad$siz)), digits = 5)[2] #[2] means the 2nd coeff. 1st coeff is the y-intercept
gradslope
exitslope <- round(coef(lm(GEwgpawofbexit$wgpa~GEwgpawofbexit$siz)), digits = 5)[2]
exitslope
GEwgpawofbgrad$siz
GEwgpawofbgrad$wgpa
cor(GEwgpawofbgrad$siz,GEwgpawofbgrad$wgpa, use = "pairwise.complete.obs") #http://www.r-tutor.com/elementary-statistics/numerical-measures/correlation-coefficient
round(cor(GEwgpawofbgrad$siz,GEwgpawofbgrad$wgpa, use = "pairwise.complete.obs"), digits = 2)
corrsltsgrad <- cor.test(GEwgpawofbgrad$siz,GEwgpawofbgrad$wgpa) #http://www.sthda.com/english/wiki/correlation-test-between-two-variables-in-r
round(corrsltsgrad$p.value, digits = 3)
corrsltsexit <- cor.test(GEwgpawofbexit$siz,GEwgpawofbexit$wgpa) #http://www.sthda.com/english/wiki/correlation-test-between-two-variables-in-r
round(corrsltsexit$p.value, digits = 3)
# library(ggpmisc) #for annotate with npc #doesn't load without crashing
library(ggplot2) # because ggpmisc messes with ggplot, which couldn't be found
#from https://www.rdocumentation.org/packages/ggpmisc/versions/0.3.6/topics/annotate
#Run the Plot
w <- ggplot(GEwgpawofb, aes(siz, wgpa, color = status)) +
geom_point() +
geom_smooth(method = lm, lwd=1) +
geom_point(GEwgpawofbstatgrp, mapping = aes(x = sizbar, y = wgpabar, size = 12, color = status), shape = 8) +
# ggrepel::geom_label_repel(aes(label = paste("Avg: ", "( ", (round(GEwgpawofbstatgrprnd$sizbar, digits = 1))," , ",(round(GEwgpawofbstatgrprnd$wgpabar, digits = 2)), " )"), sep = " ",
# color = status), data = GEwgpawofbstatgrprnd) +
# annotate("text", x = 300, y = 2.10, hjust = 0, vjust = 0, color = "black",
# label = (str_c("Correlation | Significant | Slope of | Avg | Avg "))) +
# annotate("text", x = 300, y = 2., hjust = 0, vjust = 0, color = "black",
# label = (str_c(" Coeff | if p < .05 | Line | Size | WJC GPA "))) +
annotate("text", x = 300, y = .35, hjust = 0, vjust = 0, cex = 4, color = "springgreen4",
label = (str_c(" R= ",
round(cor(GEwgpawofbgrad$siz,GEwgpawofbgrad$wgpa, use = "pairwise.complete.obs"), digits = 2),
" , p= ",
round(corrsltsgrad$p.value, digits = 3),
" , ",
gradslope,
" , " ,
round(Gradwgpawofbstatgrprnd$sizbar,digits = 1),
" , " ,
round(Gradwgpawofbstatgrprnd$wgpabar, digits = 2), ")" ))) +
annotate("text", x = 300, y = .2, hjust = 0, vjust = 0, color = "red", #can't get it to use npc coords
label = (str_c(" R= ",
round(cor(GEwgpawofbexit$siz,GEwgpawofbexit$wgpa, use = "pairwise.complete.obs"), digits = 2),
" , p= ",
round(corrsltsexit$p.value, digits = 3),
" , ",
exitslope,
" , " ,
round(Exitwgpawofbstatgrprnd$sizbar
,digits = 1),
" , " ,
round(Exitwgpawofbstatgrprnd$wgpabar, digits = 2), ")" ))) +
# stat_cor(aes(label = paste(..r.label.., ..p.label.., sep = "~`, `~")), method = "pearson", label.x.npc = "left", label.y.npc = "bottom") +
scale_color_manual(values = listofcolors) +
labs(title = "The Effect of High School Class SIZE on WJC GPA ",
subtitle = "All Students with a WJC GPA on record, whether or not they graduated",
caption = "Source: WJC Admissions Records",
x = "High School Class SIZE",
y = "William Jewell College GPA",
color = "Student Status:",
size = " Avg X, Avg Y:") +
guides(color = guide_legend(reverse = TRUE)) +
theme(legend.position = "bottom", legend.box = "horizontal")
#Build vertical bars
vb0 <- annotation_custom(grid::textGrob(label = "|", #can't get it to use color = "red",
x = unit(.40, "npc"), y = unit(.145, "npc"), vjust = 0,
gp = grid::gpar(cex = 2)))
vb1 <- annotation_custom(grid::textGrob(label = "|",
x = unit(.55, "npc"), y = unit(.145, "npc"), vjust = 0,
gp = grid::gpar(cex = 2)))
vb2 <- annotation_custom(grid::textGrob(label = "|",
x = unit(.675, "npc"), y = unit(.145, "npc"), vjust = 0,
gp = grid::gpar(cex = 2)))
vb3 <- annotation_custom(grid::textGrob(label = "|",
x = unit(.79, "npc"), y = unit(.145, "npc"), vjust = 0,
gp = grid::gpar(cex = 2)))
vb4 <- annotation_custom(grid::textGrob(label = "|",
x = unit(.88, "npc"), y = unit(.145, "npc"), vjust = 0,
gp = grid::gpar(cex = 2)))
vb5 <- annotation_custom(grid::textGrob(label = "|",
x = unit(1, "npc"), y = unit(.145, "npc"), vjust = 0,
gp = grid::gpar(cex = 2)))
t1 <- annotation_custom(grid::textGrob(label = " Correlation Significant Slope of Avg Avg ",
x = unit(.4, "npc"), y = unit(.170, "npc"), vjust = 0, hjust = 0,
gp = grid::gpar(cex = 1)))
t2 <- annotation_custom(grid::textGrob(label = " Coeff if p < .05 Line SIZE WJC GPA ",
x = unit(.4, "npc"), y = unit(.13, "npc"), vjust = 0, hjust = 0,
gp = grid::gpar(cex = 1)))
v <- w + vb0 +vb1 + vb2 + vb3 + vb4 + vb5 + t1 + t2
v
# manually save as wgpaxsiz at 744 x 581
#ggsave("E:/000 DTS 400 Internship/PLOTS3/wgpaxsiz w annt 27x21 from ggsave while console exact 27x21.jpeg", width = 27, height = 21, units = "cm")
#WGPAxPRNK
gradslope <- round(coef(lm(GEwgpawofbgrad$wgpa~GEwgpawofbgrad$prnk)), digits = 5)[2] #[2] means the 2nd coeff. 1st coeff is the y-intercept
gradslope
exitslope <- round(coef(lm(GEwgpawofbexit$wgpa~GEwgpawofbexit$prnk)), digits = 5)[2]
exitslope
GEwgpawofbgrad$prnk
GEwgpawofbgrad$wgpa
cor(GEwgpawofbgrad$prnk,GEwgpawofbgrad$wgpa, use = "pairwise.complete.obs") #http://www.r-tutor.com/elementary-statistics/numerical-measures/correlation-coefficient
round(cor(GEwgpawofbgrad$prnk,GEwgpawofbgrad$wgpa, use = "pairwise.complete.obs"), digits = 2)
corrsltsgrad <- cor.test(GEwgpawofbgrad$prnk,GEwgpawofbgrad$wgpa) #http://www.sthda.com/english/wiki/correlation-test-between-two-variables-in-r
round(corrsltsgrad$p.value, digits = 3)
corrsltsexit <- cor.test(GEwgpawofbexit$prnk,GEwgpawofbexit$wgpa) #http://www.sthda.com/english/wiki/correlation-test-between-two-variables-in-r
round(corrsltsexit$p.value, digits = 3)
# library(ggpmisc) #for annotate with npc #doesn't load without crashing
library(ggplot2) # because ggpmisc messes with ggplot, which couldn't be found
#from https://www.rdocumentation.org/packages/ggpmisc/versions/0.3.6/topics/annotate
#Run the Plot
w <- ggplot(GEwgpawofb, aes(prnk, wgpa, color = status)) +
geom_point() +
geom_smooth(method = lm, lwd=1) +
geom_point(GEwgpawofbstatgrp, mapping = aes(x = prnkbar, y = wgpabar, size = 12, color = status), shape = 8) +
# ggrepel::geom_label_repel(aes(label = paste("Avg: ", "( ", (round(GEwgpawofbstatgrprnd$rnkbar, digits = 1))," , ",(round(GEwgpawofbstatgrprnd$wgpabar, digits = 2)), " )"), sep = " ",
# color = status), data = GEwgpawofbstatgrprnd) +
# annotate("text", x = 300, y = 2.10, hjust = 0, vjust = 0, color = "black",
# label = (str_c("Correlation | Significant | Slope of | Avg | Avg "))) +
# annotate("text", x = 300, y = 2., hjust = 0, vjust = 0, color = "black",
# label = (str_c(" Coeff | if p < .05 | Line | rnke | WJC GPA "))) +
annotate("text", x = .3, y = .35, hjust = 0, vjust = 0, cex = 4, color = "springgreen4",
label = (str_c(" R= ",
round(cor(GEwgpawofbgrad$prnk,GEwgpawofbgrad$wgpa, use = "pairwise.complete.obs"), digits = 2),
" , p= ",
round(corrsltsgrad$p.value, digits = 3),
" , ",
gradslope,
" , " ,
round(Gradwgpawofbstatgrprnd$prnkbar,digits = 1),
" , " ,
round(Gradwgpawofbstatgrprnd$wgpabar, digits = 2), ")" ))) +
annotate("text", x = .3, y = .2, hjust = 0, vjust = 0, color = "red", #can't get it to use npc coords
label = (str_c(" R= ",
round(cor(GEwgpawofbexit$prnk,GEwgpawofbexit$wgpa, use = "pairwise.complete.obs"), digits = 2),
" , p= ",
round(corrsltsexit$p.value, digits = 3),
" , ",
exitslope,
" , " ,
round(Exitwgpawofbstatgrprnd$prnkbar
,digits = 1),
" , " ,
round(Exitwgpawofbstatgrprnd$wgpabar, digits = 2), ")" ))) +
# stat_cor(aes(label = paste(..r.label.., ..p.label.., sep = "~`, `~")), method = "pearson", label.x.npc = "left", label.y.npc = "bottom") +
scale_color_manual(values = listofcolors) +
labs(title = "The Effect of High School Class RANK / SIZE on WJC GPA ",
subtitle = "All Students with a WJC GPA on record, whether or not they graduated",
caption = "Source: WJC Admissions Records",
x = "High School Class RANK / SIZE",
y = "William Jewell College GPA",
color = "Student Status:",
size = " Avg X, Avg Y:") +
guides(color = guide_legend(reverse = TRUE)) +
theme(legend.position = "bottom", legend.box = "horizontal")
#Build vertical bars
vb0 <- annotation_custom(grid::textGrob(label = "|", #can't get it to use color = "red",
x = unit(.40, "npc"), y = unit(.145, "npc"), vjust = 0,
gp = grid::gpar(cex = 2)))
vb1 <- annotation_custom(grid::textGrob(label = "|",
x = unit(.55, "npc"), y = unit(.145, "npc"), vjust = 0,
gp = grid::gpar(cex = 2)))
vb2 <- annotation_custom(grid::textGrob(label = "|",
x = unit(.675, "npc"), y = unit(.145, "npc"), vjust = 0,
gp = grid::gpar(cex = 2)))
vb3 <- annotation_custom(grid::textGrob(label = "|",
x = unit(.79, "npc"), y = unit(.145, "npc"), vjust = 0,
gp = grid::gpar(cex = 2)))
vb4 <- annotation_custom(grid::textGrob(label = "|",
x = unit(.88, "npc"), y = unit(.145, "npc"), vjust = 0,
gp = grid::gpar(cex = 2)))
vb5 <- annotation_custom(grid::textGrob(label = "|",
x = unit(1, "npc"), y = unit(.145, "npc"), vjust = 0,
gp = grid::gpar(cex = 2)))
t1 <- annotation_custom(grid::textGrob(label = " Correlation Significant Slope of Avg Avg ",
x = unit(.4, "npc"), y = unit(.170, "npc"), vjust = 0, hjust = 0,
gp = grid::gpar(cex = 1)))
t2 <- annotation_custom(grid::textGrob(label = " Coeff if p < .05 Line RANK WJC GPA ",
x = unit(.4, "npc"), y = unit(.13, "npc"), vjust = 0, hjust = 0,
gp = grid::gpar(cex = 1)))
v <- w + vb0 +vb1 + vb2 + vb3 + vb4 + vb5 + t1 + t2
v
# manually save as PLOTS3 wgpaxprnk at 744 x 581
#ggsave("E:/000 DTS 400 Internship/PLOTS3/wgpaxsiz w annt 27x21 from ggsave while console exact 27x21.jpeg", width = 27, height = 21, units = "cm")
#WGPAxENGSEM
#Prep the Annotated Values
gradslope <- round(coef(lm(GEwgpawofbgrad$wgpa~GEwgpawofbgrad$engsem)), digits = 5)[2] #[2] means the 2nd coeff. 1st coeff is the y-intercept
gradslope
exitslope <- round(coef(lm(GEwgpawofbexit$wgpa~GEwgpawofbexit$engsem)), digits = 5)[2]
exitslope
GEwgpawofbgrad$engsem
GEwgpawofbgrad$wgpa
cor(GEwgpawofbgrad$engsem,GEwgpawofbgrad$wgpa, use = "pairwise.complete.obs") #http://www.r-tutor.com/elementary-statistics/numerical-measures/correlation-coefficient
round(cor(GEwgpawofbgrad$engsem,GEwgpawofbgrad$wgpa, use = "pairwise.complete.obs"), digits = 2)
corrsltsgrad <- cor.test(GEwgpawofbgrad$engsem,GEwgpawofbgrad$wgpa) #http://www.sthda.com/english/wiki/correlation-test-between-two-variables-in-r
round(corrsltsgrad$p.value, digits = 3)
corrsltsexit <- cor.test(GEwgpawofbexit$engsem,GEwgpawofbexit$wgpa) #http://www.sthda.com/english/wiki/correlation-test-between-two-variables-in-r
round(corrsltsexit$p.value, digits = 3)
# library(ggpmisc) #for annotate with npc #doesn't load without crashing
library(ggplot2) # because ggpmisc messes with ggplot, which couldn't be found
#from https://www.rdocumentation.org/packages/ggpmisc/versions/0.3.6/topics/annotate
#Run the Plot
w <- ggplot(GEwgpawofb, aes(engsem, wgpa, color = status)) +
geom_point() +
geom_smooth(method = lm, lwd=1) +
geom_point(GEwgpawofbstatgrp, mapping = aes(x = engsembar, y = wgpabar, size = 12, color = status), shape = 8) +
# ggrepel::geom_label_repel(aes(label = paste("Avg: ", "( ", (round(GEwgpawofbstatgrprnd$engsembar, digits = 1))," , ",(round(GEwgpawofbstatgrprnd$wgpabar, digits = 2)), " )"), sep = " ",
# color = status), data = GEwgpawofbstatgrprnd) +
# annotate("text", x = 300, y = 2.10, hjust = 0, vjust = 0, color = "black",
# label = (str_c("Correlation | Significant | Slope of | Avg | Avg "))) +
# annotate("text", x = 300, y = 2., hjust = 0, vjust = 0, color = "black",
# label = (str_c(" Coeff | if p < .05 | Line | EngSem | WJC GPA "))) +
annotate("text", x = 6.2, y = .35, hjust = 0, vjust = 0, cex = 4, color = "springgreen4",
label = (str_c(" R= ",
round(cor(GEwgpawofbgrad$engsem,GEwgpawofbgrad$wgpa, use = "pairwise.complete.obs"), digits = 2),
" , p= ",
round(corrsltsgrad$p.value, digits = 3),
" , ",
gradslope,
" , " ,
round(Gradwgpawofbstatgrprnd$engsembar,digits = 1),
" , " ,
round(Gradwgpawofbstatgrprnd$wgpabar, digits = 2), ")" ))) +
annotate("text", x = 6.2, y = .2, hjust = 0, vjust = 0, color = "red", #can't get it to use npc coords
label = (str_c(" R= ",
round(cor(GEwgpawofbexit$engsem,GEwgpawofbexit$wgpa, use = "pairwise.complete.obs"), digits = 2),
" , p= ",
round(corrsltsexit$p.value, digits = 3),
" , ",
exitslope,
" , " ,
round(Exitwgpawofbstatgrprnd$engsembar
,digits = 1),
" , " ,
round(Exitwgpawofbstatgrprnd$wgpabar, digits = 2), ")" ))) +
# stat_cor(aes(label = paste(..r.label.., ..p.label.., sep = "~`, `~")), method = "pearson", label.x.npc = "left", label.y.npc = "bottom") +
scale_color_manual(values = listofcolors) +
labs(title = "The Effect of NUMBER OF HS ENGLISH SEMESTERS on WJC GPA ",
subtitle = "All Students with a WJC GPA on record, whether or not they graduated",
caption = "Source: WJC Admissions Records",
x = "Number of HS English Semesters",
y = "William Jewell College GPA",
color = "Student Status:",
size = " Avg X, Avg Y:") +
guides(color = guide_legend(reverse = TRUE)) +
theme(legend.position = "bottom", legend.box = "horizontal")
#Build vertical bars
vb0 <- annotation_custom(grid::textGrob(label = "|", #can't get it to use color = "red",
x = unit(.40, "npc"), y = unit(.145, "npc"), vjust = 0,
gp = grid::gpar(cex = 2)))
vb1 <- annotation_custom(grid::textGrob(label = "|",
x = unit(.55, "npc"), y = unit(.145, "npc"), vjust = 0,
gp = grid::gpar(cex = 2)))
vb2 <- annotation_custom(grid::textGrob(label = "|",
x = unit(.675, "npc"), y = unit(.145, "npc"), vjust = 0,
gp = grid::gpar(cex = 2)))
vb3 <- annotation_custom(grid::textGrob(label = "|",
x = unit(.79, "npc"), y = unit(.145, "npc"), vjust = 0,
gp = grid::gpar(cex = 2)))
vb4 <- annotation_custom(grid::textGrob(label = "|",
x = unit(.88, "npc"), y = unit(.145, "npc"), vjust = 0,
gp = grid::gpar(cex = 2)))
vb5 <- annotation_custom(grid::textGrob(label = "|",
x = unit(1, "npc"), y = unit(.145, "npc"), vjust = 0,
gp = grid::gpar(cex = 2)))
t1 <- annotation_custom(grid::textGrob(label = " Correlation Significant Slope of Avg Avg ",
x = unit(.4, "npc"), y = unit(.170, "npc"), vjust = 0, hjust = 0,
gp = grid::gpar(cex = 1)))
t2 <- annotation_custom(grid::textGrob(label = " Coeff if p < .05 Line EngSem WJC GPA ",
x = unit(.4, "npc"), y = unit(.13, "npc"), vjust = 0, hjust = 0,
gp = grid::gpar(cex = 1)))
v <- w + vb0 +vb1 + vb2 + vb3 + vb4 + vb5 + t1 + t2
v
# manually save as wgpaxengsem at 744 x 581
#ggsave("E:/000 DTS 400 Internship/PLOTS3/wgpaxengsem w annt 27x21 from ggsave while console exact 27x21.jpeg", width = 27, height = 21, units = "cm")
#WGPA x TCR
#Prep the Annotated Values
gradslope <- round(coef(lm(GEwgpawofbgrad$wgpa~GEwgpawofbgrad$tcr)), digits = 5)[2] #[2] means the 2nd coeff. 1st coeff is the y-intercept
gradslope
exitslope <- round(coef(lm(GEwgpawofbexit$wgpa~GEwgpawofbexit$tcr)), digits = 5)[2]
exitslope
GEwgpawofbgrad$tcr
GEwgpawofbgrad$wgpa
cor(GEwgpawofbgrad$tcr,GEwgpawofbgrad$wgpa, use = "pairwise.complete.obs") #http://www.r-tutor.com/elementary-statistics/numerical-measures/correlation-coefficient
round(cor(GEwgpawofbgrad$tcr,GEwgpawofbgrad$wgpa, use = "pairwise.complete.obs"), digits = 2)
corrsltsgrad <- cor.test(GEwgpawofbgrad$tcr,GEwgpawofbgrad$wgpa) #http://www.sthda.com/english/wiki/correlation-test-between-two-variables-in-r
round(corrsltsgrad$p.value, digits = 3)
corrsltsexit <- cor.test(GEwgpawofbexit$tcr,GEwgpawofbexit$wgpa) #http://www.sthda.com/english/wiki/correlation-test-between-two-variables-in-r
round(corrsltsexit$p.value, digits = 3)
# library(ggpmisc) #for annotate with npc #doesn't load without crashing
library(ggplot2) # because ggpmisc messes with ggplot, which couldn't be found
#from https://www.rdocumentation.org/packages/ggpmisc/versions/0.3.6/topics/annotate
#Run the Plot
w <- ggplot(GEwgpawofb, aes(tcr, wgpa, color = status)) +
geom_point() +
geom_smooth(method = lm, lwd=1) +
geom_point(GEwgpawofbstatgrp, mapping = aes(x = tcrbar, y = wgpabar, size = 12, color = status), shape = 8) +
# ggrepel::geom_label_repel(aes(label = paste("Avg: ", "( ", (round(GEwgpawofbstatgrprnd$tcrbar, digits = 1))," , ",(round(GEwgpawofbstatgrprnd$wgpabar, digits = 2)), " )"), sep = " ",
# color = status), data = GEwgpawofbstatgrprnd) +
# annotate("text", x = 300, y = 2.10, hjust = 0, vjust = 0, color = "black",
# label = (str_c("Correlation | Significant | Slope of | Avg | Avg "))) +
# annotate("text", x = 300, y = 2., hjust = 0, vjust = 0, color = "black",
# label = (str_c(" Coeff | if p < .05 | Line | TCRs | WJC GPA "))) +
annotate("text", x = 24, y = .35, hjust = 0, vjust = 0, cex = 4, color = "springgreen4",
label = (str_c(" R= ",
round(cor(GEwgpawofbgrad$tcr,GEwgpawofbgrad$wgpa, use = "pairwise.complete.obs"), digits = 2),
" , p= ",
round(corrsltsgrad$p.value, digits = 3),
" , ",
gradslope,
" , " ,
round(Gradwgpawofbstatgrprnd$tcrbar,digits = 1),
" , " ,
round(Gradwgpawofbstatgrprnd$wgpabar, digits = 2), ")" ))) +
annotate("text", x = 24, y = .2, hjust = 0, vjust = 0, color = "red", #can't get it to use npc coords
label = (str_c(" R= ",
round(cor(GEwgpawofbexit$tcr,GEwgpawofbexit$wgpa, use = "pairwise.complete.obs"), digits = 2),
" , p= ",
round(corrsltsexit$p.value, digits = 3),
" , ",
exitslope,
" , " ,
round(Exitwgpawofbstatgrprnd$tcrbar
,digits = 1),
" , " ,
round(Exitwgpawofbstatgrprnd$wgpabar, digits = 2), ")" ))) +
# stat_cor(aes(label = paste(..r.label.., ..p.label.., sep = "~`, `~")), method = "pearson", label.x.npc = "left", label.y.npc = "bottom") +
scale_color_manual(values = listofcolors) +
labs(title = "The Effect of TRANSFER CREDITS on WJC GPA ",
subtitle = "All Students with a WJC GPA on record, whether or not they graduated",
caption = "Source: WJC Admissions Records",
x = "Transfer Credits",
y = "William Jewell College GPA",
color = "Student Status:",
size = " Avg X, Avg Y:") +
guides(color = guide_legend(reverse = TRUE)) +
theme(legend.position = "bottom", legend.box = "horizontal")
#Build vertical bars
vb0 <- annotation_custom(grid::textGrob(label = "|", #can't get it to use color = "red",
x = unit(.40, "npc"), y = unit(.145, "npc"), vjust = 0,
gp = grid::gpar(cex = 2)))
vb1 <- annotation_custom(grid::textGrob(label = "|",
x = unit(.55, "npc"), y = unit(.145, "npc"), vjust = 0,
gp = grid::gpar(cex = 2)))
vb2 <- annotation_custom(grid::textGrob(label = "|",
x = unit(.675, "npc"), y = unit(.145, "npc"), vjust = 0,
gp = grid::gpar(cex = 2)))
vb3 <- annotation_custom(grid::textGrob(label = "|",
x = unit(.79, "npc"), y = unit(.145, "npc"), vjust = 0,
gp = grid::gpar(cex = 2)))
vb4 <- annotation_custom(grid::textGrob(label = "|",
x = unit(.88, "npc"), y = unit(.145, "npc"), vjust = 0,
gp = grid::gpar(cex = 2)))
vb5 <- annotation_custom(grid::textGrob(label = "|",
x = unit(1, "npc"), y = unit(.145, "npc"), vjust = 0,
gp = grid::gpar(cex = 2)))
t1 <- annotation_custom(grid::textGrob(label = " Correlation Significant Slope of Avg Avg ",
x = unit(.4, "npc"), y = unit(.170, "npc"), vjust = 0, hjust = 0,
gp = grid::gpar(cex = 1)))
t2 <- annotation_custom(grid::textGrob(label = " Coeff if p < .05 Line TCRs WJC GPA ",
x = unit(.4, "npc"), y = unit(.13, "npc"), vjust = 0, hjust = 0,
gp = grid::gpar(cex = 1)))
v <- w + vb0 +vb1 + vb2 + vb3 + vb4 + vb5 + t1 + t2
v
# manually save as wgpaxtcr at 744 x 581
#ggsave("E:/000 DTS 400 Internship/PLOTS3/wgpaxtcr w annt 27x21 from ggsave while console exact 27x21.jpeg", width = 27, height = 21, units = "cm")
exitslope <- round(coef(lm(GEwgpawofbexit$wgpa~GEwgpawofbexit$hsgpa)), digits = 5)[2]
exitslope
#WGPA x hsgpa
#Prep the Annotated Values
gradslope <- round(coef(lm(GEwgpawofbgrad$wgpa~GEwgpawofbgrad$hsgpa)), digits = 5)[2] #[2] means the 2nd coeff. 1st coeff is the y-intercept
gradslope
exitslope <- round(coef(lm(GEwgpawofbexit$wgpa~GEwgpawofbexit$hsgpa)), digits = 5)[2]
exitslope
GEwgpawofbgrad$hsgpa
GEwgpawofbgrad$wgpa
cor(GEwgpawofbgrad$hsgpa,GEwgpawofbgrad$wgpa, use = "pairwise.complete.obs") #http://www.r-tutor.com/elementary-statistics/numerical-measures/correlation-coefficient
round(cor(GEwgpawofbgrad$hsgpa,GEwgpawofbgrad$wgpa, use = "pairwise.complete.obs"), digits = 2)
corrsltsgrad <- cor.test(GEwgpawofbgrad$hsgpa,GEwgpawofbgrad$wgpa) #http://www.sthda.com/english/wiki/correlation-test-between-two-variables-in-r
round(corrsltsgrad$p.value, digits = 3)
corrsltsexit <- cor.test(GEwgpawofbexit$hsgpa,GEwgpawofbexit$wgpa) #http://www.sthda.com/english/wiki/correlation-test-between-two-variables-in-r
round(corrsltsexit$p.value, digits = 3)
# library(ggpmisc) #for annotate with npc #doesn't load without crashing
library(ggplot2) # because ggpmisc messes with ggplot, which couldn't be found
#from https://www.rdocumentation.org/packages/ggpmisc/versions/0.3.6/topics/annotate
#Run the Plot
w <- ggplot(GEwgpawofb, aes(hsgpa, wgpa, color = status)) +
geom_point() +
geom_smooth(method = lm, lwd=1) +
geom_point(GEwgpawofbstatgrp, mapping = aes(x = hsgpabar, y = wgpabar, size = 12, color = status), shape = 8) +
# ggrepel::geom_label_repel(aes(label = paste("Avg: ", "( ", (round(GEwgpawofbstatgrprnd$hsgpabar, digits = 1))," , ",(round(GEwgpawofbstatgrprnd$wgpabar, digits = 2)), " )"), sep = " ",
# color = status), data = GEwgpawofbstatgrprnd) +
# annotate("text", x = 300, y = 2.10, hjust = 0, vjust = 0, color = "black",
# label = (str_c("Correlation | Significant | Slope of | Avg | Avg "))) +
# annotate("text", x = 300, y = 2., hjust = 0, vjust = 0, color = "black",
# label = (str_c(" Coeff | if p < .05 | Line | HSGPA | WJC GPA "))) +
annotate("text", x = 3.1, y = .35, hjust = 0, vjust = 0, cex = 4, color = "springgreen4",
label = (str_c(" R= ",
round(cor(GEwgpawofbgrad$hsgpa,GEwgpawofbgrad$wgpa, use = "pairwise.complete.obs"), digits = 2),
" , p= ",
round(corrsltsgrad$p.value, digits = 3),
" , ",
gradslope,
" , " ,
round(Gradwgpawofbstatgrprnd$hsgpabar,digits = 1),
" , ",
round(Gradwgpawofbstatgrprnd$wgpabar, digits = 2), ")" ))) +
annotate("text", x = 3.1, y = .2, hjust = 0, vjust = 0, color = "red", #can't get it to use npc coords
label = (str_c(" R= ",
round(cor(GEwgpawofbexit$hsgpa,GEwgpawofbexit$wgpa, use = "pairwise.complete.obs"), digits = 2),
" , p= ",
round(corrsltsexit$p.value, digits = 3),
" , ",
exitslope,
" , " ,
round(Exitwgpawofbstatgrprnd$hsgpabar
,digits = 1),
" , " ,
round(Exitwgpawofbstatgrprnd$wgpabar, digits = 2), ")" ))) +
# stat_cor(aes(label = paste(..r.label.., ..p.label.., sep = "~`, `~")), method = "pearson", label.x.npc = "left", label.y.npc = "bottom") +
scale_color_manual(values = listofcolors) +
labs(title = "The Effect of HIGH SCHOOL GPA on WJC GPA ",
subtitle = "All Students with a WJC GPA on record, whether or not they graduated",
caption = "Source: WJC Admissions Records",
x = "High School GPA",
y = "William Jewell College GPA",
color = "Student Status:",
size = " Avg X, Avg Y:") +
guides(color = guide_legend(reverse = TRUE)) +
theme(legend.position = "bottom", legend.box = "horizontal")
#Build vertical bars
vb0 <- annotation_custom(grid::textGrob(label = "|", #can't get it to use color = "red",
x = unit(.40, "npc"), y = unit(.145, "npc"), vjust = 0,
gp = grid::gpar(cex = 2)))
vb1 <- annotation_custom(grid::textGrob(label = "|",
x = unit(.55, "npc"), y = unit(.145, "npc"), vjust = 0,
gp = grid::gpar(cex = 2)))
vb2 <- annotation_custom(grid::textGrob(label = "|",
x = unit(.675, "npc"), y = unit(.145, "npc"), vjust = 0,
gp = grid::gpar(cex = 2)))
vb3 <- annotation_custom(grid::textGrob(label = "|",
x = unit(.79, "npc"), y = unit(.145, "npc"), vjust = 0,
gp = grid::gpar(cex = 2)))
vb4 <- annotation_custom(grid::textGrob(label = "|",
x = unit(.88, "npc"), y = unit(.145, "npc"), vjust = 0,
gp = grid::gpar(cex = 2)))
vb5 <- annotation_custom(grid::textGrob(label = "|",
x = unit(1, "npc"), y = unit(.145, "npc"), vjust = 0,
gp = grid::gpar(cex = 2)))
t1 <- annotation_custom(grid::textGrob(label = " Correlation Significant Slope of Avg Avg ",
x = unit(.4, "npc"), y = unit(.170, "npc"), vjust = 0, hjust = 0,
gp = grid::gpar(cex = 1)))
t2 <- annotation_custom(grid::textGrob(label = " Coeff if p < .05 Line HSGPA WJC GPA ",
x = unit(.4, "npc"), y = unit(.13, "npc"), vjust = 0, hjust = 0,
gp = grid::gpar(cex = 1)))
v <- w + vb0 +vb1 + vb2 + vb3 + vb4 + vb5 + t1 + t2
v
# manually save as wgpaxhsgpa at 744 x 581
#ggsave("E:/000 DTS 400 Internship/PLOTS3/wgpaxhsgpa w annt 27x21 from ggsave while console exact 27x21.jpeg", width = 27, height = 21, units = "cm")
#WGPA x cmp
#Prep the Annotated Values
gradslope <- round(coef(lm(GEwgpawofbgrad$wgpa~GEwgpawofbgrad$cmp)), digits = 5)[2] #[2] means the 2nd coeff. 1st coeff is the y-intercept
gradslope
exitslope <- round(coef(lm(GEwgpawofbexit$wgpa~GEwgpawofbexit$cmp)), digits = 5)[2]
exitslope
GEwgpawofbgrad$cmp
GEwgpawofbgrad$wgpa
cor(GEwgpawofbgrad$cmp,GEwgpawofbgrad$wgpa, use = "pairwise.complete.obs") #http://www.r-tutor.com/elementary-statistics/numerical-measures/correlation-coefficient
round(cor(GEwgpawofbgrad$cmp,GEwgpawofbgrad$wgpa, use = "pairwise.complete.obs"), digits = 2)
corrsltsgrad <- cor.test(GEwgpawofbgrad$cmp,GEwgpawofbgrad$wgpa) #http://www.sthda.com/english/wiki/correlation-test-between-two-variables-in-r
round(corrsltsgrad$p.value, digits = 3)
corrsltsexit <- cor.test(GEwgpawofbexit$cmp,GEwgpawofbexit$wgpa) #http://www.sthda.com/english/wiki/correlation-test-between-two-variables-in-r
round(corrsltsexit$p.value, digits = 3)
# library(ggpmisc) #for annotate with npc #doesn't load without crashing
library(ggplot2) # because ggpmisc messes with ggplot, which couldn't be found
#from https://www.rdocumentation.org/packages/ggpmisc/versions/0.3.6/topics/annotate
#Run the Plot
w <- ggplot(GEwgpawofb, aes(cmp, wgpa, color = status)) +
geom_point() +
geom_smooth(method = lm, lwd=1) +
geom_point(GEwgpawofbstatgrp, mapping = aes(x = cmpbar, y = wgpabar, size = 12, color = status), shape = 8) +
# ggrepel::geom_label_repel(aes(label = paste("Avg: ", "( ", (round(GEwgpawofbstatgrprnd$cmpbar, digits = 1))," , ",(round(GEwgpawofbstatgrprnd$wgpabar, digits = 2)), " )"), sep = " ",
# color = status), data = GEwgpawofbstatgrprnd) +
# annotate("text", x = 300, y = 2.10, hjust = 0, vjust = 0, color = "black",
# label = (str_c("Correlation | Significant | Slope of | Avg | Avg "))) +
# annotate("text", x = 300, y = 2., hjust = 0, vjust = 0, color = "black",
# label = (str_c(" Coeff | if p < .05 | Line | ACTCMP | WJC GPA "))) +
annotate("text", x = 23, y = .35, hjust = 0, vjust = 0, cex = 4, color = "springgreen4",
label = (str_c(" R= ",
round(cor(GEwgpawofbgrad$cmp,GEwgpawofbgrad$wgpa, use = "pairwise.complete.obs"), digits = 2),
" , p= ",
round(corrsltsgrad$p.value, digits = 3),
" , ",
gradslope,
" , " ,
round(Gradwgpawofbstatgrprnd$cmpbar,digits = 1),
" , " ,
round(Gradwgpawofbstatgrprnd$wgpabar, digits = 2), ")" ))) +
annotate("text", x = 23, y = .2, hjust = 0, vjust = 0, color = "red", #can't get it to use npc coords
label = (str_c(" R= ",
round(cor(GEwgpawofbexit$cmp,GEwgpawofbexit$wgpa, use = "pairwise.complete.obs"), digits = 2),
" , p= ",
round(corrsltsexit$p.value, digits = 3),
" , ",
exitslope,
" , " ,
round(Exitwgpawofbstatgrprnd$cmpbar
,digits = 1),
" , " ,
round(Exitwgpawofbstatgrprnd$wgpabar, digits = 2), ")" ))) +
# stat_cor(aes(label = paste(..r.label.., ..p.label.., sep = "~`, `~")), method = "pearson", label.x.npc = "left", label.y.npc = "bottom") +
scale_color_manual(values = listofcolors) +
labs(title = "The Effect of ACT COMPOSITE SCORE on WJC GPA ",
subtitle = "All Students with a WJC GPA on record, whether or not they graduated",
caption = "Source: WJC Admissions Records",
x = "ACT Composite Score",
y = "William Jewell College GPA",
color = "Student Status:",
size = " Avg X, Avg Y:") +
guides(color = guide_legend(reverse = TRUE)) +
theme(legend.position = "bottom", legend.box = "horizontal")
#Build vertical bars
vb0 <- annotation_custom(grid::textGrob(label = "|", #can't get it to use color = "red",
x = unit(.40, "npc"), y = unit(.145, "npc"), vjust = 0,
gp = grid::gpar(cex = 2)))
vb1 <- annotation_custom(grid::textGrob(label = "|",
x = unit(.55, "npc"), y = unit(.145, "npc"), vjust = 0,
gp = grid::gpar(cex = 2)))
vb2 <- annotation_custom(grid::textGrob(label = "|",
x = unit(.675, "npc"), y = unit(.145, "npc"), vjust = 0,
gp = grid::gpar(cex = 2)))
vb3 <- annotation_custom(grid::textGrob(label = "|",
x = unit(.79, "npc"), y = unit(.145, "npc"), vjust = 0,
gp = grid::gpar(cex = 2)))
vb4 <- annotation_custom(grid::textGrob(label = "|",
x = unit(.88, "npc"), y = unit(.145, "npc"), vjust = 0,
gp = grid::gpar(cex = 2)))
vb5 <- annotation_custom(grid::textGrob(label = "|",
x = unit(1, "npc"), y = unit(.145, "npc"), vjust = 0,
gp = grid::gpar(cex = 2)))
t1 <- annotation_custom(grid::textGrob(label = " Correlation Significant Slope of Avg Avg ",
x = unit(.4, "npc"), y = unit(.170, "npc"), vjust = 0, hjust = 0,
gp = grid::gpar(cex = 1)))
t2 <- annotation_custom(grid::textGrob(label = " Coeff if p < .05 Line CMP WJC GPA ",
x = unit(.4, "npc"), y = unit(.13, "npc"), vjust = 0, hjust = 0,
gp = grid::gpar(cex = 1)))
v <- w + vb0 +vb1 + vb2 + vb3 + vb4 + vb5 + t1 + t2
v
# manually save as wgpaxcmp at 744 x 581
#ggsave("E:/000 DTS 400 Internship/PLOTS3/wgpaxcmp w annt 27x21 from ggsave while console exact 27x21.jpeg", width = 27, height = 21, units = "cm")
#WGPA x mat
#Prep the Annotated Values
gradslope <- round(coef(lm(GEwgpawofbgrad$wgpa~GEwgpawofbgrad$mat)), digits = 5)[2] #[2] means the 2nd coeff. 1st coeff is the y-intercept
gradslope
exitslope <- round(coef(lm(GEwgpawofbexit$wgpa~GEwgpawofbexit$mat)), digits = 5)[2]
exitslope
GEwgpawofbgrad$mat
GEwgpawofbgrad$wgpa
cor(GEwgpawofbgrad$mat,GEwgpawofbgrad$wgpa, use = "pairwise.complete.obs") #http://www.r-tutor.com/elementary-statistics/numerical-measures/correlation-coefficient
round(cor(GEwgpawofbgrad$mat,GEwgpawofbgrad$wgpa, use = "pairwise.complete.obs"), digits = 2)
corrsltsgrad <- cor.test(GEwgpawofbgrad$mat,GEwgpawofbgrad$wgpa) #http://www.sthda.com/english/wiki/correlation-test-between-two-variables-in-r
round(corrsltsgrad$p.value, digits = 3)
corrsltsexit <- cor.test(GEwgpawofbexit$mat,GEwgpawofbexit$wgpa) #http://www.sthda.com/english/wiki/correlation-test-between-two-variables-in-r
round(corrsltsexit$p.value, digits = 3)
# library(ggpmisc) #for annotate with npc #doesn't load without crashing
library(ggplot2) # because ggpmisc messes with ggplot, which couldn't be found
#from https://www.rdocumentation.org/packages/ggpmisc/versions/0.3.6/topics/annotate
#Run the Plot
w <- ggplot(GEwgpawofb, aes(mat, wgpa, color = status)) +
geom_point() +
geom_smooth(method = lm, lwd=1) +
geom_point(GEwgpawofbstatgrp, mapping = aes(x = matbar, y = wgpabar, size = 12, color = status), shape = 8) +
# ggrepel::geom_label_repel(aes(label = paste("Avg: ", "( ", (round(GEwgpawofbstatgrprnd$matbar, digits = 1))," , ",(round(GEwgpawofbstatgrprnd$wgpabar, digits = 2)), " )"), sep = " ",
# color = status), data = GEwgpawofbstatgrprnd) +
# annotate("text", x = 300, y = 2.10, hjust = 0, vjust = 0, color = "black",
# label = (str_c("Correlation | Significant | Slope of | Avg | Avg "))) +
# annotate("text", x = 300, y = 2., hjust = 0, vjust = 0, color = "black",
# label = (str_c(" Coeff | if p < .05 | Line | MAT | WJC GPA "))) +
annotate("text", x = 24, y = .35, hjust = 0, vjust = 0, cex = 4, color = "springgreen4",
label = (str_c(" R= ",
round(cor(GEwgpawofbgrad$mat,GEwgpawofbgrad$wgpa, use = "pairwise.complete.obs"), digits = 2),
" , p= ",
round(corrsltsgrad$p.value, digits = 3),
" , ",
gradslope,
" , " ,
round(Gradwgpawofbstatgrprnd$matbar,digits = 1),
" , " ,
round(Gradwgpawofbstatgrprnd$wgpabar, digits = 2), ")" ))) +
annotate("text", x = 24, y = .2, hjust = 0, vjust = 0, color = "red", #can't get it to use npc coords
label = (str_c(" R= ",
round(cor(GEwgpawofbexit$mat,GEwgpawofbexit$wgpa, use = "pairwise.complete.obs"), digits = 2),
" , p= ",
round(corrsltsexit$p.value, digits = 3),
" , ",
exitslope,
", " ,
round(Exitwgpawofbstatgrprnd$matbar
,digits = 1),
" , " ,
round(Exitwgpawofbstatgrprnd$wgpabar, digits = 2), ")" ))) +
# stat_cor(aes(label = paste(..r.label.., ..p.label.., sep = "~`, `~")), method = "pearson", label.x.npc = "left", label.y.npc = "bottom") +
scale_color_manual(values = listofcolors) +
labs(title = "The Effect of ACT MATH SCORE on WJC GPA ",
subtitle = "All Students with a WJC GPA on record, whether or not they graduated",
caption = "Source: WJC Admissions Records",
x = "ACT Math Score",
y = "William Jewell College GPA",
color = "Student Status:",
size = " Avg X, Avg Y:") +
guides(color = guide_legend(reverse = TRUE)) +
theme(legend.position = "bottom", legend.box = "horizontal")
#Build vertical bars
vb0 <- annotation_custom(grid::textGrob(label = "|", #can't get it to use color = "red",
x = unit(.40, "npc"), y = unit(.145, "npc"), vjust = 0,
gp = grid::gpar(cex = 2)))
vb1 <- annotation_custom(grid::textGrob(label = "|",
x = unit(.55, "npc"), y = unit(.145, "npc"), vjust = 0,
gp = grid::gpar(cex = 2)))
vb2 <- annotation_custom(grid::textGrob(label = "|",
x = unit(.675, "npc"), y = unit(.145, "npc"), vjust = 0,
gp = grid::gpar(cex = 2)))
vb3 <- annotation_custom(grid::textGrob(label = "|",
x = unit(.79, "npc"), y = unit(.145, "npc"), vjust = 0,
gp = grid::gpar(cex = 2)))
vb4 <- annotation_custom(grid::textGrob(label = "|",
x = unit(.88, "npc"), y = unit(.145, "npc"), vjust = 0,
gp = grid::gpar(cex = 2)))
vb5 <- annotation_custom(grid::textGrob(label = "|",
x = unit(1, "npc"), y = unit(.145, "npc"), vjust = 0,
gp = grid::gpar(cex = 2)))
t1 <- annotation_custom(grid::textGrob(label = " Correlation Significant Slope of Avg Avg ",
x = unit(.4, "npc"), y = unit(.170, "npc"), vjust = 0, hjust = 0,
gp = grid::gpar(cex = 1)))
t2 <- annotation_custom(grid::textGrob(label = " Coeff if p < .05 Line MAT WJC GPA ",
x = unit(.4, "npc"), y = unit(.13, "npc"), vjust = 0, hjust = 0,
gp = grid::gpar(cex = 1)))
v <- w + vb0 +vb1 + vb2 + vb3 + vb4 + vb5 + t1 + t2
v
# manually save as wgpaxmat at 744 x 581
#ggsave("E:/000 DTS 400 Internship/PLOTS3/wgpaxmat w annt 27x21 from ggsave while console exact 27x21.jpeg", width = 27, height = 21, units = "cm")
#WGPA x sci
#Prep the Annotated Values
gradslope <- round(coef(lm(GEwgpawofbgrad$wgpa~GEwgpawofbgrad$sci)), digits = 5)[2] #[2] means the 2nd coeff. 1st coeff is the y-intercept
gradslope
exitslope <- round(coef(lm(GEwgpawofbexit$wgpa~GEwgpawofbexit$sci)), digits = 5)[2]
exitslope
GEwgpawofbgrad$sci
GEwgpawofbgrad$wgpa
cor(GEwgpawofbgrad$sci,GEwgpawofbgrad$wgpa, use = "pairwise.complete.obs") #http://www.r-tutor.com/elementary-statistics/numerical-measures/correlation-coefficient
round(cor(GEwgpawofbgrad$sci,GEwgpawofbgrad$wgpa, use = "pairwise.complete.obs"), digits = 2)
corrsltsgrad <- cor.test(GEwgpawofbgrad$sci,GEwgpawofbgrad$wgpa) #http://www.sthda.com/english/wiki/correlation-test-between-two-variables-in-r
round(corrsltsgrad$p.value, digits = 3)
corrsltsexit <- cor.test(GEwgpawofbexit$sci,GEwgpawofbexit$wgpa) #http://www.sthda.com/english/wiki/correlation-test-between-two-variables-in-r
round(corrsltsexit$p.value, digits = 3)
# library(ggpmisc) #for annotate with npc #doesn't load without crashing
library(ggplot2) # because ggpmisc messes with ggplot, which couldn't be found
#from https://www.rdocumentation.org/packages/ggpmisc/versions/0.3.6/topics/annotate
#Run the Plot
w <- ggplot(GEwgpawofb, aes(sci, wgpa, color = status)) +
geom_point() +
geom_smooth(method = lm, lwd=1) +
geom_point(GEwgpawofbstatgrp, mapping = aes(x = scibar, y = wgpabar, size = 12, color = status), shape = 8) +
# ggrepel::geom_label_repel(aes(label = paste("Avg: ", "( ", (round(GEwgpawofbstatgrprnd$scibar, digits = 1))," , ",(round(GEwgpawofbstatgrprnd$wgpabar, digits = 2)), " )"), sep = " ",
# color = status), data = GEwgpawofbstatgrprnd) +
# annotate("text", x = 300, y = 2.10, hjust = 0, vjust = 0, color = "black",
# label = (str_c("Correlation | Significant | Slope of | Avg | Avg "))) +
# annotate("text", x = 300, y = 2., hjust = 0, vjust = 0, color = "black",
# label = (str_c(" Coeff | if p < .05 | Line | SCI | WJC GPA "))) +
annotate("text", x = 24.5, y = .35, hjust = 0, vjust = 0, cex = 4, color = "springgreen4",
label = (str_c(" R= ",
round(cor(GEwgpawofbgrad$sci,GEwgpawofbgrad$wgpa, use = "pairwise.complete.obs"), digits = 2),
" , p= ",
round(corrsltsgrad$p.value, digits = 3),
" , ",
gradslope,
" , " ,
round(Gradwgpawofbstatgrprnd$scibar,digits = 1),
" , " ,
round(Gradwgpawofbstatgrprnd$wgpabar, digits = 2), ")" ))) +
annotate("text", x = 24.5, y = .2, hjust = 0, vjust = 0, color = "red", #can't get it to use npc coords
label = (str_c(" R= ",
round(cor(GEwgpawofbexit$sci,GEwgpawofbexit$wgpa, use = "pairwise.complete.obs"), digits = 2),
" , p= ",
round(corrsltsexit$p.value, digits = 3),
" , ",
exitslope,
" , " ,
round(Exitwgpawofbstatgrprnd$scibar
,digits = 1),
" , " ,
round(Exitwgpawofbstatgrprnd$wgpabar, digits = 2), ")" ))) +
# stat_cor(aes(label = paste(..r.label.., ..p.label.., sep = "~`, `~")), method = "pearson", label.x.npc = "left", label.y.npc = "bottom") +
scale_color_manual(values = listofcolors) +
labs(title = "The Effect of ACT SCIENCE SCORE on WJC GPA ",
subtitle = "All Students with a WJC GPA on record, whether or not they graduated",
caption = "Source: WJC Admissions Records",
x = "ACT Science Score",
y = "William Jewell College GPA",
color = "Student Status:",
size = " Avg X, Avg Y:") +
guides(color = guide_legend(reverse = TRUE)) +
theme(legend.position = "bottom", legend.box = "horizontal")
#Build vertical bars
vb0 <- annotation_custom(grid::textGrob(label = "|", #can't get it to use color = "red",
x = unit(.40, "npc"), y = unit(.145, "npc"), vjust = 0,
gp = grid::gpar(cex = 2)))
vb1 <- annotation_custom(grid::textGrob(label = "|",
x = unit(.55, "npc"), y = unit(.145, "npc"), vjust = 0,
gp = grid::gpar(cex = 2)))
vb2 <- annotation_custom(grid::textGrob(label = "|",
x = unit(.675, "npc"), y = unit(.145, "npc"), vjust = 0,
gp = grid::gpar(cex = 2)))
vb3 <- annotation_custom(grid::textGrob(label = "|",
x = unit(.79, "npc"), y = unit(.145, "npc"), vjust = 0,
gp = grid::gpar(cex = 2)))
vb4 <- annotation_custom(grid::textGrob(label = "|",
x = unit(.88, "npc"), y = unit(.145, "npc"), vjust = 0,
gp = grid::gpar(cex = 2)))
vb5 <- annotation_custom(grid::textGrob(label = "|",
x = unit(1, "npc"), y = unit(.145, "npc"), vjust = 0,
gp = grid::gpar(cex = 2)))
t1 <- annotation_custom(grid::textGrob(label = " Correlation Significant Slope of Avg Avg ",
x = unit(.4, "npc"), y = unit(.170, "npc"), vjust = 0, hjust = 0,
gp = grid::gpar(cex = 1)))
t2 <- annotation_custom(grid::textGrob(label = " Coeff if p < .05 Line SCI WJC GPA ",
x = unit(.4, "npc"), y = unit(.13, "npc"), vjust = 0, hjust = 0,
gp = grid::gpar(cex = 1)))
v <- w + vb0 +vb1 + vb2 + vb3 + vb4 + vb5 + t1 + t2
v
# manually save as wgpaxsci at 744 x 581
#ggsave("E:/000 DTS 400 Internship/PLOTS3/wgpaxsci w annt 27x21 from ggsave while console exact 27x21.jpeg", width = 27, height = 21, units = "cm")
#WGPA x eng
#Prep the Annotated Values
gradslope <- round(coef(lm(GEwgpawofbgrad$wgpa~GEwgpawofbgrad$eng)), digits = 5)[2] #[2] means the 2nd coeff. 1st coeff is the y-intercept
gradslope
exitslope <- round(coef(lm(GEwgpawofbexit$wgpa~GEwgpawofbexit$eng)), digits = 5)[2]
exitslope
GEwgpawofbgrad$eng
GEwgpawofbgrad$wgpa
cor(GEwgpawofbgrad$eng,GEwgpawofbgrad$wgpa, use = "pairwise.complete.obs") #http://www.r-tutor.com/elementary-statistics/numerical-measures/correlation-coefficient
round(cor(GEwgpawofbgrad$eng,GEwgpawofbgrad$wgpa, use = "pairwise.complete.obs"), digits = 2)
corrsltsgrad <- cor.test(GEwgpawofbgrad$eng,GEwgpawofbgrad$wgpa) #http://www.sthda.com/english/wiki/correlation-test-between-two-variables-in-r
round(corrsltsgrad$p.value, digits = 3)
corrsltsexit <- cor.test(GEwgpawofbexit$eng,GEwgpawofbexit$wgpa) #http://www.sthda.com/english/wiki/correlation-test-between-two-variables-in-r
round(corrsltsexit$p.value, digits = 3)
# library(ggpmisc) #for annotate with npc #doesn't load without crashing
library(ggplot2) # because ggpmisc messes with ggplot, which couldn't be found
#from https://www.rdocumentation.org/packages/ggpmisc/versions/0.3.6/topics/annotate
#Run the Plot
w <- ggplot(GEwgpawofb, aes(eng, wgpa, color = status)) +
geom_point() +
geom_smooth(method = lm, lwd=1) +
geom_point(GEwgpawofbstatgrp, mapping = aes(x = engbar, y = wgpabar, size = 12, color = status), shape = 8) +
# ggrepel::geom_label_repel(aes(label = paste("Avg: ", "( ", (round(GEwgpawofbstatgrprnd$engbar, digits = 1))," , ",(round(GEwgpawofbstatgrprnd$wgpabar, digits = 2)), " )"), sep = " ",
# color = status), data = GEwgpawofbstatgrprnd) +
# annotate("text", x = 300, y = 2.10, hjust = 0, vjust = 0, color = "black",
# label = (str_c("Correlation | Significant | Slope of | Avg | Avg "))) +
# annotate("text", x = 300, y = 2., hjust = 0, vjust = 0, color = "black",
# label = (str_c(" Coeff | if p < .05 | Line | eng | WJC GPA "))) +
annotate("text", x = 22, y = .35, hjust = 0, vjust = 0, cex = 4, color = "springgreen4",
label = (str_c(" R= ",
round(cor(GEwgpawofbgrad$eng,GEwgpawofbgrad$wgpa, use = "pairwise.complete.obs"), digits = 2),
" , p= ",
round(corrsltsgrad$p.value, digits = 3),
" , ",
gradslope,
" , " ,
round(Gradwgpawofbstatgrprnd$engbar,digits = 1),
" , " ,
round(Gradwgpawofbstatgrprnd$wgpabar, digits = 2), ")" ))) +
annotate("text", x = 22, y = .2, hjust = 0, vjust = 0, color = "red", #can't get it to use npc coords
label = (str_c(" R= ",
round(cor(GEwgpawofbexit$eng,GEwgpawofbexit$wgpa, use = "pairwise.complete.obs"), digits = 2),
" , p= ",
round(corrsltsexit$p.value, digits = 3),
" , ",
exitslope,
" , " ,
round(Exitwgpawofbstatgrprnd$engbar
,digits = 1),
" , " ,
round(Exitwgpawofbstatgrprnd$wgpabar, digits = 2), ")" ))) +
# stat_cor(aes(label = paste(..r.label.., ..p.label.., sep = "~`, `~")), method = "pearson", label.x.npc = "left", label.y.npc = "bottom") +
scale_color_manual(values = listofcolors) +
labs(title = "The Effect of ACT ENGLISH SCORE on WJC GPA ",
subtitle = "All Students with a WJC GPA on record, whether or not they graduated",
caption = "Source: WJC Admissions Records",
x = "ACT ENGLISH Score",
y = "William Jewell College GPA",
color = "Student Status:",
size = " Avg X, Avg Y:") +
guides(color = guide_legend(reverse = TRUE)) +
theme(legend.position = "bottom", legend.box = "horizontal")
#Build vertical bars
vb0 <- annotation_custom(grid::textGrob(label = "|", #can't get it to use color = "red",
x = unit(.40, "npc"), y = unit(.145, "npc"), vjust = 0,
gp = grid::gpar(cex = 2)))
vb1 <- annotation_custom(grid::textGrob(label = "|",
x = unit(.55, "npc"), y = unit(.145, "npc"), vjust = 0,
gp = grid::gpar(cex = 2)))
vb2 <- annotation_custom(grid::textGrob(label = "|",
x = unit(.675, "npc"), y = unit(.145, "npc"), vjust = 0,
gp = grid::gpar(cex = 2)))
vb3 <- annotation_custom(grid::textGrob(label = "|",
x = unit(.79, "npc"), y = unit(.145, "npc"), vjust = 0,
gp = grid::gpar(cex = 2)))
vb4 <- annotation_custom(grid::textGrob(label = "|",
x = unit(.88, "npc"), y = unit(.145, "npc"), vjust = 0,
gp = grid::gpar(cex = 2)))
vb5 <- annotation_custom(grid::textGrob(label = "|",
x = unit(1, "npc"), y = unit(.145, "npc"), vjust = 0,
gp = grid::gpar(cex = 2)))
t1 <- annotation_custom(grid::textGrob(label = " Correlation Significant Slope of Avg Avg ",
x = unit(.4, "npc"), y = unit(.170, "npc"), vjust = 0, hjust = 0,
gp = grid::gpar(cex = 1)))
t2 <- annotation_custom(grid::textGrob(label = " Coeff if p < .05 Line ENG WJC GPA ",
x = unit(.4, "npc"), y = unit(.13, "npc"), vjust = 0, hjust = 0,
gp = grid::gpar(cex = 1)))
v <- w + vb0 +vb1 + vb2 + vb3 + vb4 + vb5 + t1 + t2
v
# manually save as wgpaxeng at 744 x 581
#ggsave("E:/000 DTS 400 Internship/PLOTS3/wgpaxeng w annt 27x21 from ggsave while console exact 27x21.jpeg", width = 27, height = 21, units = "cm")
#WGPA x rdg
#Prep the Annotated Values
gradslope <- round(coef(lm(GEwgpawofbgrad$wgpa~GEwgpawofbgrad$rdg)), digits = 5)[2] #[2] means the 2nd coeff. 1st coeff is the y-intercept
gradslope
exitslope <- round(coef(lm(GEwgpawofbexit$wgpa~GEwgpawofbexit$rdg)), digits = 5)[2]
exitslope
GEwgpawofbgrad$rdg
GEwgpawofbgrad$wgpa
cor(GEwgpawofbgrad$rdg,GEwgpawofbgrad$wgpa, use = "pairwise.complete.obs") #http://www.r-tutor.com/elementary-statistics/numerical-measures/correlation-coefficient
round(cor(GEwgpawofbgrad$rdg,GEwgpawofbgrad$wgpa, use = "pairwise.complete.obs"), digits = 2)
corrsltsgrad <- cor.test(GEwgpawofbgrad$rdg,GEwgpawofbgrad$wgpa) #http://www.sthda.com/english/wiki/correlation-test-between-two-variables-in-r
round(corrsltsgrad$p.value, digits = 3)
corrsltsexit <- cor.test(GEwgpawofbexit$rdg,GEwgpawofbexit$wgpa) #http://www.sthda.com/english/wiki/correlation-test-between-two-variables-in-r
round(corrsltsexit$p.value, digits = 3)
# library(ggpmisc) #for annotate with npc #doesn't load without crashing
library(ggplot2) # because ggpmisc messes with ggplot, which couldn't be found
#from https://www.rdocumentation.org/packages/ggpmisc/versions/0.3.6/topics/annotate
#Run the Plot
w <- ggplot(GEwgpawofb, aes(rdg, wgpa, color = status)) +
geom_point() +
geom_smooth(method = lm, lwd=1) +
geom_point(GEwgpawofbstatgrp, mapping = aes(x = rdgbar, y = wgpabar, size = 12, color = status), shape = 8) +
# ggrepel::geom_label_repel(aes(label = paste("Avg: ", "( ", (round(GEwgpawofbstatgrprnd$rdgbar, digits = 1))," , ",(round(GEwgpawofbstatgrprnd$wgpabar, digits = 2)), " )"), sep = " ",
# color = status), data = GEwgpawofbstatgrprnd) +
# annotate("text", x = 300, y = 2.10, hjust = 0, vjust = 0, color = "black",
# label = (str_c("Correlation | Significant | Slope of | Avg | Avg "))) +
# annotate("text", x = 300, y = 2., hjust = 0, vjust = 0, color = "black",
# label = (str_c(" Coeff | if p < .05 | Line | rdg | WJC GPA "))) +
annotate("text", x = 22, y = .35, hjust = 0, vjust = 0, cex = 4, color = "springgreen4",
label = (str_c(" R= ",
round(cor(GEwgpawofbgrad$rdg,GEwgpawofbgrad$wgpa, use = "pairwise.complete.obs"), digits = 2),
" , p= ",
round(corrsltsgrad$p.value, digits = 3),
" , ",
gradslope,
" , " ,
round(Gradwgpawofbstatgrprnd$rdgbar,digits = 1),
" , " ,
round(Gradwgpawofbstatgrprnd$wgpabar, digits = 2), ")" ))) +
annotate("text", x = 22, y = .2, hjust = 0, vjust = 0, color = "red", #can't get it to use npc coords
label = (str_c(" R= ",
round(cor(GEwgpawofbexit$rdg,GEwgpawofbexit$wgpa, use = "pairwise.complete.obs"), digits = 2),
" , p= ",
round(corrsltsexit$p.value, digits = 3),
" , ",
exitslope,
" , " ,
round(Exitwgpawofbstatgrprnd$rdgbar
,digits = 1),
" , " ,
round(Exitwgpawofbstatgrprnd$wgpabar, digits = 2), ")" ))) +
# stat_cor(aes(label = paste(..r.label.., ..p.label.., sep = "~`, `~")), method = "pearson", label.x.npc = "left", label.y.npc = "bottom") +
scale_color_manual(values = listofcolors) +
labs(title = "The Effect of ACT READING SCORE on WJC GPA ",
subtitle = "All Students with a WJC GPA on record, whether or not they graduated",
caption = "Source: WJC Admissions Records",
x = "ACT Reading Score",
y = "William Jewell College GPA",
color = "Student Status:",
size = " Avg X, Avg Y:") +
guides(color = guide_legend(reverse = TRUE)) +
theme(legend.position = "bottom", legend.box = "horizontal")
#Build vertical bars
vb0 <- annotation_custom(grid::textGrob(label = "|", #can't get it to use color = "red",
x = unit(.40, "npc"), y = unit(.145, "npc"), vjust = 0,
gp = grid::gpar(cex = 2)))
vb1 <- annotation_custom(grid::textGrob(label = "|",
x = unit(.55, "npc"), y = unit(.145, "npc"), vjust = 0,
gp = grid::gpar(cex = 2)))
vb2 <- annotation_custom(grid::textGrob(label = "|",
x = unit(.675, "npc"), y = unit(.145, "npc"), vjust = 0,
gp = grid::gpar(cex = 2)))
vb3 <- annotation_custom(grid::textGrob(label = "|",
x = unit(.79, "npc"), y = unit(.145, "npc"), vjust = 0,
gp = grid::gpar(cex = 2)))
vb4 <- annotation_custom(grid::textGrob(label = "|",
x = unit(.88, "npc"), y = unit(.145, "npc"), vjust = 0,
gp = grid::gpar(cex = 2)))
vb5 <- annotation_custom(grid::textGrob(label = "|",
x = unit(1, "npc"), y = unit(.145, "npc"), vjust = 0,
gp = grid::gpar(cex = 2)))
t1 <- annotation_custom(grid::textGrob(label = " Correlation Significant Slope of Avg Avg ",
x = unit(.4, "npc"), y = unit(.170, "npc"), vjust = 0, hjust = 0,
gp = grid::gpar(cex = 1)))
t2 <- annotation_custom(grid::textGrob(label = " Coeff if p < .05 Line RDG WJC GPA ",
x = unit(.4, "npc"), y = unit(.13, "npc"), vjust = 0, hjust = 0,
gp = grid::gpar(cex = 1)))
v <- w + vb0 +vb1 + vb2 + vb3 + vb4 + vb5 + t1 + t2
v
# manually save as wgpaxrdg at 744 x 581
#ggsave("E:/000 DTS 400 Internship/PLOTS3/wgpaxrdg w annt 27x21 from ggsave while console exact 27x21.jpeg", width = 27, height = 21, units = "cm")
#WGPA x dst
#Prep the Annotated Values
gradslope <- round(coef(lm(GEwgpawofbgrad$wgpa~GEwgpawofbgrad$dst)), digits = 5)[2] #[2] means the 2nd coeff. 1st coeff is the y-intercept
gradslope
exitslope <- round(coef(lm(GEwgpawofbexit$wgpa~GEwgpawofbexit$dst)), digits = 5)[2]
exitslope
GEwgpawofbgrad$dst
GEwgpawofbgrad$wgpa
cor(GEwgpawofbgrad$dst,GEwgpawofbgrad$wgpa, use = "pairwise.complete.obs") #http://www.r-tutor.com/elementary-statistics/numerical-measures/correlation-coefficient
round(cor(GEwgpawofbgrad$dst,GEwgpawofbgrad$wgpa, use = "pairwise.complete.obs"), digits = 2)
corrsltsgrad <- cor.test(GEwgpawofbgrad$dst,GEwgpawofbgrad$wgpa) #http://www.sthda.com/english/wiki/correlation-test-between-two-variables-in-r
round(corrsltsgrad$p.value, digits = 3)
corrsltsexit <- cor.test(GEwgpawofbexit$dst,GEwgpawofbexit$wgpa) #http://www.sthda.com/english/wiki/correlation-test-between-two-variables-in-r
round(corrsltsexit$p.value, digits = 3)
# library(ggpmisc) #for annotate with npc #doesn't load without crashing
library(ggplot2) # because ggpmisc messes with ggplot, which couldn't be found
#from https://www.rdocumentation.org/packages/ggpmisc/versions/0.3.6/topics/annotate
#Run the Plot
w <- ggplot(GEwgpawofb, aes(dst, wgpa, color = status)) +
geom_point() +
geom_smooth(method = lm, lwd=1) +
geom_point(GEwgpawofbstatgrp, mapping = aes(x = dstbar, y = wgpabar, size = 12, color = status), shape = 8) +
# ggrepel::geom_label_repel(aes(label = paste("Avg: ", "( ", (round(GEwgpawofbstatgrprnd$dstbar, digits = 1))," , ",(round(GEwgpawofbstatgrprnd$wgpabar, digits = 2)), " )"), sep = " ",
# color = status), data = GEwgpawofbstatgrprnd) +
# annotate("text", x = 300, y = 2.10, hjust = 0, vjust = 0, color = "black",
# label = (str_c("Correlation | Significant | Slope of | Avg | Avg "))) +
# annotate("text", x = 300, y = 2., hjust = 0, vjust = 0, color = "black",
# label = (str_c(" Coeff | if p < .05 | Line | dst | WJC GPA "))) +
annotate("text", x = 27000, y = .35, hjust = 0, vjust = 0, cex = 4, color = "springgreen4",
label = (str_c(" R= ",
round(cor(GEwgpawofbgrad$dst,GEwgpawofbgrad$wgpa, use = "pairwise.complete.obs"), digits = 2),
" , p= ",
round(corrsltsgrad$p.value, digits = 3),
" , ",
gradslope,
" , " ,
round(Gradwgpawofbstatgrprnd$dstbar,digits = 1),
" , " ,
round(Gradwgpawofbstatgrprnd$wgpabar, digits = 2), ")" ))) +
annotate("text", x = 27000, y = .2, hjust = 0, vjust = 0, color = "red", #can't get it to use npc coords
label = (str_c(" R= ",
round(cor(GEwgpawofbexit$dst,GEwgpawofbexit$wgpa, use = "pairwise.complete.obs"), digits = 2),
" , p= ",
round(corrsltsexit$p.value, digits = 3),
" , ",
exitslope,
" , " ,
round(Exitwgpawofbstatgrprnd$dstbar
,digits = 1),
" , " ,
round(Exitwgpawofbstatgrprnd$wgpabar, digits = 2), ")" ))) +
# stat_cor(aes(label = paste(..r.label.., ..p.label.., sep = "~`, `~")), method = "pearson", label.x.npc = "left", label.y.npc = "bottom") +
scale_color_manual(values = listofcolors) +
labs(title = "The Effect of ZIP DISTANCE on WJC GPA ",
subtitle = "All Students with a WJC GPA on record, whether or not they graduated",
caption = "Source: WJC Admissions Records",
x = "Zip Distance",
y = "William Jewell College GPA",
color = "Student Status:",
size = " Avg X, Avg Y:") +
guides(color = guide_legend(reverse = TRUE)) +
theme(legend.position = "bottom", legend.box = "horizontal")
#Build vertical bars
vb0 <- annotation_custom(grid::textGrob(label = "|", #can't get it to use color = "red",
x = unit(.40, "npc"), y = unit(.145, "npc"), vjust = 0,
gp = grid::gpar(cex = 2)))
vb1 <- annotation_custom(grid::textGrob(label = "|",
x = unit(.55, "npc"), y = unit(.145, "npc"), vjust = 0,
gp = grid::gpar(cex = 2)))
vb2 <- annotation_custom(grid::textGrob(label = "|",
x = unit(.675, "npc"), y = unit(.145, "npc"), vjust = 0,
gp = grid::gpar(cex = 2)))
vb3 <- annotation_custom(grid::textGrob(label = "|",
x = unit(.79, "npc"), y = unit(.145, "npc"), vjust = 0,
gp = grid::gpar(cex = 2)))
vb4 <- annotation_custom(grid::textGrob(label = "|",
x = unit(.88, "npc"), y = unit(.145, "npc"), vjust = 0,
gp = grid::gpar(cex = 2)))
vb5 <- annotation_custom(grid::textGrob(label = "|",
x = unit(1, "npc"), y = unit(.145, "npc"), vjust = 0,
gp = grid::gpar(cex = 2)))
t1 <- annotation_custom(grid::textGrob(label = " Correlation Significant Slope of Avg Avg ",
x = unit(.4, "npc"), y = unit(.170, "npc"), vjust = 0, hjust = 0,
gp = grid::gpar(cex = 1)))
t2 <- annotation_custom(grid::textGrob(label = " Coeff if p < .05 Line DST WJC GPA ",
x = unit(.4, "npc"), y = unit(.13, "npc"), vjust = 0, hjust = 0,
gp = grid::gpar(cex = 1)))
v <- w + vb0 +vb1 + vb2 + vb3 + vb4 + vb5 + t1 + t2
v
# manually save as wgpaxdst at 744 x 581
#ggsave("E:/000 DTS 400 Internship/PLOTS3/wgpaxdst w annt 27x21 from ggsave while console exact 27x21.jpeg", width = 27, height = 21, units = "cm")
########
# BOXPLOTS of CATEGORICAL VARIABLES seg, race, hst, alum (then split by status: grad vs non-grad)
#This excel file contains a number of tables on different sheets of the workbook. We can see a listing of the sheets using the excel_sheets function.
# Load libraries
library(tidyverse)
library(dplyr)
library(readxl)
library(ggpubr) #scatter plots with CI and stats
#Set the colors
listofcolors <- c("red","springgreen4")
#Read-In the Worksheet
#This excel file contains a number of tables on different sheets of the workbook. We can see a listing of the sheets using the excel_sheets function.
excel_sheets("WORKFILE3.xlsx")
#'Now we will load our data using the read_excel function. We will load the data from the Purchase Date April 2019 sheet.
GEwofb <- read_excel("WORKFILE2.xlsx", sheet = "GandE without filled blanks")
GEwofb
#Take out the "No GPA on record, so now it's: Grads and Exits with gpas, and without filled blanks
GEwgpawofb <- filter(GEwofb, status != "No GPA on record")
GEwgpawofb #172 rows
#Create function that will write the count and mean above or below the boxplot
# https://medium.com/@gscheithauer/how-to-add-number-of-observations-to-a-ggplot2-boxplot-b22710f7ef80
#This function results in Count = 123
stat_box_data <- function(y, upper_limit = 4 * 1.15) {
return(
data.frame(
y = 0.95 * upper_limit,
label = paste('count=', length(y), '\n',
'mean =', round(mean(y), 1), '\n')
)
)
}
#This function results in mirror image: 123 = Count
stat_box_data2 <- function(y, upper_limit = 4 * 1.15) {
return(
data.frame(
y = 0.95 * upper_limit,
label = paste(length(y), '= count', '\n',
round(mean(y),1), '= mean', '\n')
)
)
}
#wgpaxsegment
seg_levels <- c("Humanities","Nursing","Pre-Professional","Sciences","Social Sciences","Undecided")
GEwgpawofb$segfct <- factor(GEwgpawofb$segment, levels = seg_levels)
GEwgpawofb$segfct
#create subset of only exits or only grads, these will be called when writing the count and mean above or below
#this needs to be decentralized in each plot, so that yrfct variable from the previous step is in these two dataframes that about to be created.
GEwgpawofbexit <- filter(GEwgpawofb, extyn == "Yes")
GEwgpawofbexit #21 rows
GEwgpawofbgrad <- filter(GEwgpawofb, extyn == "No")
GEwgpawofbgrad #151 rows
v <- ggplot(GEwgpawofb, aes(segment, wgpa, color = status)) +
geom_boxplot(, varwidth = TRUE) +
geom_jitter(width = 0.2) +
scale_color_manual(values = listofcolors) +
labs(title = "The Effect of MAJOR on WJC GPA ",
subtitle = "All Students with a WJC GPA on record, whether or not they graduated",
caption = "Source: WJC Admissions Records",
x = "Major Groupings (Segments)",
y = "William Jewell College GPA",
color = "Student Status:",
size = " Avg X, Avg Y:") +
guides(color = guide_legend(reverse = TRUE)) +
theme(legend.position = "bottom", legend.box = "horizontal") +
stat_summary(GEwgpawofbexit, mapping = aes(segfct, wgpa),
fun.data = stat_box_data,
geom = "text", size = 2.5,
hjust = 1,
vjust = 0.9) +
stat_summary(GEwgpawofbgrad, mapping = aes(segfct, wgpa),
fun.data = stat_box_data2,
geom = "text", size = 2.5,
hjust = -0.05,
vjust = 0.9) +
stat_summary(GEwgpawofb, mapping = aes(segfct, wgpa),
inherit.aes = FALSE, size = 2.5,
fun.data = stat_box_data2,
geom = "text",
hjust = .5,
vjust = 15)
v
ggsave("E:/000 DTS 400 Internship/PLOTS/wgpaxsegment.jpeg", width = 9, height = 7, units = "in")
#wgpaxrace
rac_levels <- c("White","Black","Hispanic","Asian","NRA","2 or more","Not Disclosed")
GEwgpawofb$racfct <- factor(GEwgpawofb$race, levels = rac_levels)
GEwgpawofb$racfct
#create subset of only exits or only grads, these will be called when writing the count and mean above or below
#this needs to be decentralized in each plot, so that yrfct variable from the previous step is in these two dataframes that about to be created.
GEwgpawofbexit <- filter(GEwgpawofb, extyn == "Yes")
GEwgpawofbexit #21 rows
GEwgpawofbgrad <- filter(GEwgpawofb, extyn == "No")
GEwgpawofbgrad #151 rows
v <- ggplot(GEwgpawofb, aes(race, wgpa, color = status)) +
geom_boxplot(, varwidth = TRUE) +
geom_jitter(width = 0.2) +
scale_color_manual(values = listofcolors) +
labs(title = "The Effect of RACE on WJC GPA ",
subtitle = "All Students with a WJC GPA on record, whether or not they graduated",
caption = "Source: WJC Admissions Records",
x = "RACE",
y = "William Jewell College GPA",
color = "Student Status:",
size = " Avg X, Avg Y:") +
guides(color = guide_legend(reverse = FALSE)) +
theme(legend.position = "bottom", legend.box = "horizontal") +
stat_summary(GEwgpawofbexit, mapping = aes(racfct, wgpa),
fun.data = stat_box_data,
geom = "text", size = 2.5,
hjust = 1,
vjust = 0.9) +
stat_summary(GEwgpawofbgrad, mapping = aes(racfct, wgpa),
fun.data = stat_box_data2,
geom = "text", size = 2.5,
hjust = -0.05,
vjust = 0.9) +
stat_summary(GEwgpawofb, mapping = aes(racfct, wgpa),
inherit.aes = FALSE, size = 2.5,
fun.data = stat_box_data2,
geom = "text",
hjust = .5,
vjust = 15)
v
ggsave("E:/000 DTS 400 Internship/PLOTS/wgpaxrace.jpeg", width = 9, height = 7, units = "in")
#wgpaxhst
hst_levels <- c("Public","Private/Parochial","Foreign","Home School")
GEwgpawofb$hstypefct <- factor(GEwgpawofb$hstype, levels = hst_levels)
GEwgpawofb$hstypefct
#create subset of only exits or only grads, these will be called when writing the count and mean above or below
#this needs to be decentralized in each plot, so that yrfct variable from the previous step is in these two dataframes that about to be created.
GEwgpawofbexit <- filter(GEwgpawofb, extyn == "Yes")
GEwgpawofbexit #21 rows
GEwgpawofbgrad <- filter(GEwgpawofb, extyn == "No")
GEwgpawofbgrad #151 rows
v <- ggplot(GEwgpawofb, aes(hstypefct, wgpa, color = status)) +
geom_boxplot(, varwidth = TRUE) +
geom_jitter(width = 0.2) +
scale_color_manual(values = listofcolors) +
labs(title = "The Effect of HIGH SCHOOL TYPE on WJC GPA ",
subtitle = "All Students with a WJC GPA on record, whether or not they graduated",
caption = "Source: WJC Admissions Records",
x = "High School Type",
y = "William Jewell College GPA",
color = "Student Status:",
size = " Avg X, Avg Y:") +
guides(color = guide_legend(reverse = FALSE)) +
theme(legend.position = "bottom", legend.box = "horizontal") +
stat_summary(GEwgpawofbexit, mapping = aes(hstypefct, wgpa),
fun.data = stat_box_data,
geom = "text", size = 2.5,
hjust = 1,
vjust = 0.9) +
stat_summary(GEwgpawofbgrad, mapping = aes(hstypefct, wgpa),
fun.data = stat_box_data2,
geom = "text", size = 2.5,
hjust = -0.05,
vjust = 0.9) +
stat_summary(GEwgpawofb, mapping = aes(hstypefct, wgpa),
inherit.aes = FALSE, size = 2.5,
fun.data = stat_box_data2,
geom = "text",
hjust = .5,
vjust = 15)
v
ggsave("E:/000 DTS 400 Internship/PLOTS/wgpaxhst.jpeg", width = 9, height = 7, units = "in")
#wgpaxalum
alum_levels <- c("Zero","One","Two","Three")
GEwgpawofb$alumnifct <- factor(GEwgpawofb$alumni, levels = alum_levels)
GEwgpawofb$alumnifct
#create subset of only exits or only grads, these will be called when writing the count and mean above or below
#this needs to be decentralized in each plot, so that yrfct variable from the previous step is in these two dataframes that about to be created.
GEwgpawofbexit <- filter(GEwgpawofb, extyn == "Yes")
GEwgpawofbexit #21 rows
GEwgpawofbgrad <- filter(GEwgpawofb, extyn == "No")
GEwgpawofbgrad #151 rows
v <- ggplot(GEwgpawofb, aes(alumnifct, wgpa, color = status)) +
geom_boxplot(, varwidth = TRUE) +
geom_jitter(width = 0.2) +
scale_color_manual(values = listofcolors) +
labs(title = "The Effect of NUMBER of ALUMNI CONNECTIONS on WJC GPA ",
subtitle = "All Students with a WJC GPA on record, whether or not they graduated",
caption = "Source: WJC Admissions Records",
x = "Number of Alumni Connections",
y = "William Jewell College GPA",
color = "Student Status:",
size = " Avg X, Avg Y:") +
guides(color = guide_legend(reverse = TRUE)) +
theme(legend.position = "bottom", legend.box = "horizontal") +
stat_summary(GEwgpawofbexit, mapping = aes(alumnifct, wgpa),
fun.data = stat_box_data,
geom = "text", size = 2.5,
hjust = 1,
vjust = 0.9) +
stat_summary(GEwgpawofbgrad, mapping = aes(alumnifct, wgpa),
fun.data = stat_box_data2,
geom = "text", size = 2.5,
hjust = -0.05,
vjust = 0.9) +
stat_summary(GEwgpawofb, mapping = aes(alumnifct, wgpa),
inherit.aes = FALSE, size = 2.5,
fun.data = stat_box_data2,
geom = "text",
hjust = .5,
vjust = 15)
v
ggsave("E:/000 DTS 400 Internship/PLOTS/wgpaxalum.jpeg", width = 9, height = 7, units = "in")
# BINARY VARIABLES -- gndr, nm, yr, ext
# Load libraries
library(tidyverse)
library(dplyr)
library(readxl)
library(ggpubr) #scatter plots with CI and stats
#Set the colors
listofcolors <- c("red","springgreen4")
#Read-In the Worksheet
#This excel file contains a number of tables on different sheets of the workbook. We can see a listing of the sheets using the excel_sheets function.
excel_sheets("WORKFILE3.xlsx")
#'Now we will load our data using the read_excel function. We will load the data from the Purchase Date April 2019 sheet.
GEwofb <- read_excel("WORKFILE3.xlsx", sheet = "GandE without filled blanks")
GEwofb
#Take out the "No GPA on record, so now it's: Grads and Exits with gpas, and without filled blanks
GEwgpawofb <- filter(GEwofb, status != "No GPA on record")
GEwgpawofb #172 rows
#Create function that will write the count and mean above or below the boxplot
# https://medium.com/@gscheithauer/how-to-add-number-of-observations-to-a-ggplot2-boxplot-b22710f7ef80
#This function results in Count = 123
stat_box_data <- function(y, upper_limit = 4 * 1.15) {
return(
data.frame(
y = 0.95 * upper_limit,
label = paste('count=', length(y), '\n',
'mean =', round(mean(y), 1), '\n')
)
)
}
#This function results in mirror image: 123 = Count
stat_box_data2 <- function(y, upper_limit = 4 * 1.15) {
return(
data.frame(
y = 0.95 * upper_limit,
label = paste(length(y), '= count', '\n',
round(mean(y),1), '= mean', '\n')
)
)
}
#wgpaxnm
#Set the order to display categories in the y axis
nmyn_levels <- c("No","Yes")
GEwgpawofb$nmynfct <- factor(GEwgpawofb$nmyn, levels = nmyn_levels)
GEwgpawofb$nmynfct
GEwgpawofb
#create subset of only exits or only grads, these will be called when writing the count and mean above or below
#this needs to be decentralized in each plot, so that yrfct variable from the previous step is in these two dataframes that about to be created.
GEwgpawofbexit <- filter(GEwgpawofb, extyn == "Yes")
GEwgpawofbexit #21 rows
GEwgpawofbgrad <- filter(GEwgpawofb, extyn == "No")
GEwgpawofbgrad #151 rows
v <- ggplot(GEwgpawofb, aes(nmynfct, wgpa, color = status)) +
geom_boxplot(, varwidth = TRUE) +
geom_jitter(width = 0.2) +
scale_color_manual(values = listofcolors) +
labs(title = "The Effect of NATIONAL MERIT SCHOLARSHIP on WJC GPA ",
subtitle = "All Students with a WJC GPA on record, whether or not they graduated",
caption = "Source: WJC Admissions Records",
x = "NATIONAL MERIT SCHOLAR?",
y = "William Jewell College GPA",
color = "Student Status:",
size = " Avg X, Avg Y:") +
guides(color = guide_legend(reverse = FALSE)) +
theme(legend.position = "bottom", legend.box = "horizontal") +
stat_summary(GEwgpawofbexit, mapping = aes(nmynfct, wgpa),
fun.data = stat_box_data,
geom = "text",
hjust = 1.2,
vjust = 0.9) +
stat_summary(GEwgpawofbgrad, mapping = aes(nmynfct, wgpa),
fun.data = stat_box_data2,
geom = "text",
hjust = -.2,
vjust = 0.9) +
stat_summary(GEwgpawofb, mapping = aes(nmynfct, wgpa),
inherit.aes = FALSE,
fun.data = stat_box_data2,
geom = "text",
hjust = .5,
vjust = 9)
v
ggsave("E:/000 DTS 400 Internship/PLOTS/wgpaxnm.jpeg", width = 9, height = 7, units = "in")
#wgpaxgndr
gndr_levels <- c("Female","Male","Unknown")
GEwgpawofb$gndrfct <- factor(GEwgpawofb$sex, levels = gndr_levels)
GEwgpawofb$gndrfct
GEwgpawofb
#create subset of only exits or only grads, these will be called when writing the count and mean above or below
#this needs to be decentralized in each plot, so that yrfct variable from the previous step is in these two dataframes that about to be created.
GEwgpawofbexit <- filter(GEwgpawofb, extyn == "Yes")
GEwgpawofbexit #21 rows
GEwgpawofbgrad <- filter(GEwgpawofb, extyn == "No")
GEwgpawofbgrad #151 rows
v <- ggplot(GEwgpawofb %>% filter(!is.na(sex)), aes(gndrfct, wgpa, color = status)) + #got rid of two blanks which created a NA boxplot
geom_boxplot(, varwidth = TRUE) +
geom_jitter(width = 0.2) +
scale_color_manual(values = listofcolors) +
labs(title = "The Effect of GENDER on WJC GPA ",
subtitle = "All Students with a WJC GPA on record, whether or not they graduated",
caption = "Source: WJC Admissions Records",
x = "GENDER",
y = "William Jewell College GPA",
color = "Student Status:",
size = " Avg X, Avg Y:") +
guides(color = guide_legend(reverse = FALSE)) +
theme(legend.position = "bottom", legend.box = "horizontal") +
stat_summary(GEwgpawofbexit, mapping = aes(gndrfct, wgpa),
fun.data = stat_box_data,
geom = "text",
hjust = 1.05,
vjust = 0.9) +
stat_summary(GEwgpawofbgrad, mapping = aes(gndrfct, wgpa),
fun.data = stat_box_data2,
geom = "text",
hjust = -.15,
vjust = 0.9) +
stat_summary(GEwgpawofb, mapping = aes(gndrfct, wgpa),
inherit.aes = FALSE,
fun.data = stat_box_data2,
geom = "text",
hjust = .5,
vjust = 9)
v
ggsave("E:/000 DTS 400 Internship/PLOTS/wgpaxgndr.jpeg", width = 9, height = 7, units = "in")
#wgpaxyr
yr_levels <- c("2015","2016")
GEwgpawofb$yrfct <- factor(GEwgpawofb$start, levels = yr_levels)
GEwgpawofb$yrfct
GEwgpawofb
#create subset of only exits or only grads, these will be called when writing the count and mean above or below
#this needs to be decentralized in each plot, so that yrfct variable from the previous step is in these two dataframes that about to be created.
GEwgpawofbexit <- filter(GEwgpawofb, extyn == "Yes")
GEwgpawofbexit #21 rows
GEwgpawofbgrad <- filter(GEwgpawofb, extyn == "No")
GEwgpawofbgrad #151 rows
v <- ggplot(GEwgpawofb, aes(yrfct, wgpa, color = status)) +
geom_boxplot(, varwidth = TRUE) +
geom_jitter(width = 0.2) +
scale_color_manual(values = listofcolors) +
labs(title = "The Effect of ADMISSION YEAR on WJC GPA ",
subtitle = "All Students with a WJC GPA on record, whether or not they graduated",
caption = "Source: WJC Admissions Records",
x = "ADMISSION YEAR",
y = "William Jewell College GPA",
color = "Student Status:",
size = " Avg X, Avg Y:") +
guides(color = guide_legend(reverse = FALSE)) +
theme(legend.position = "bottom", legend.box = "horizontal") +
stat_summary(GEwgpawofbexit, mapping = aes(yrfct, wgpa),
fun.data = stat_box_data,
geom = "text",
hjust = 1.2,
vjust = 0.9) +
stat_summary(GEwgpawofbgrad, mapping = aes(yrfct, wgpa),
fun.data = stat_box_data2,
geom = "text",
hjust = -.2,
vjust = 0.9) +
stat_summary(GEwgpawofb, mapping = aes(yrfct, wgpa),
inherit.aes = FALSE,
fun.data = stat_box_data2,
geom = "text",
hjust = .5,
vjust = 9)
v
ggsave("E:/000 DTS 400 Internship/PLOTS/wgpaxyr.jpeg", width = 9, height = 7, units = "in")
#wgpaxextyn
ext_levels <- c("No","Yes")
GEwgpawofb$extfct <- factor(GEwgpawofb$extyn, levels = ext_levels)
GEwgpawofb$extfct
GEwgpawofb
#create subset of only exits or only grads, these will be called when writing the count and mean above or below
#this needs to be decentralized in each plot, so that yrfct variable from the previous step is in these two dataframes that about to be created.
GEwgpawofbexit <- filter(GEwgpawofb, extyn == "Yes")
GEwgpawofbexit #21 rows
GEwgpawofbgrad <- filter(GEwgpawofb, extyn == "No")
GEwgpawofbgrad #151 rows
v <- ggplot(GEwgpawofb, aes(extfct, wgpa, color = status)) +
geom_boxplot(, varwidth = TRUE) +
geom_jitter(width = 0.2) +
scale_color_manual(values = listofcolors) +
labs(title = "The Relationship between EXITING and WJC GPA ",
subtitle = "All Students with a WJC GPA on record, whether or not they graduated",
caption = "Source: WJC Admissions Records",
x = "No = Graduated , Yes = Exited",
y = "William Jewell College GPA",
color = "Student Status:",
size = " Avg X, Avg Y:") +
guides(color = guide_legend(reverse = TRUE)) +
theme(legend.position = "bottom", legend.box = "horizontal") +
stat_summary(GEwgpawofbexit, mapping = aes(extfct, wgpa),
fun.data = stat_box_data2,
geom = "text",
hjust = .5,
vjust = 9) +
stat_summary(GEwgpawofbgrad, mapping = aes(extfct, wgpa),
fun.data = stat_box_data,
geom = "text",
hjust = .5,
vjust = 9)
v
ggsave("E:/000 DTS 400 Internship/PLOTS/wgpaxext.jpeg", width = 9, height = 7, units = "in")
str(GEwofb)
unique(GEwofb$status)
Gwofb <- filter(GEwofb, status != "No GPA on record" & status != "Didn't Graduate, but had GPA")
Gwofb
#Graph of Grade Point distributions with Vertical Mean for 5 segments
library(viridis)
Gwofb$segment
gr <- Gwofb %>%
group_by(segment) %>%
summarise(grp.med = median(wgpa))
gr
gr$segment <- as.character(gr$segment)
Gwofb$segment <- as.character(Gwofb$segment)
gr
Gwofb
ggplot(Gwofb, aes(wgpa, color = segment)) +
geom_density(lwd = 2) +
geom_vline(aes(xintercept = grp.med, color = segment),
data = gr, linetype = 2, lwd = 1) +
labs(color = "Major Groupings",
title = "WJC GPA Distribution based on Major Groupings",
subtitle = "Density plot of Graduate's GPAs, with median for each grouping",
caption = "Source: WJC Admissions Records")
library(viridis)
gr <- Gwofb %>%
group_by(segment) %>%
summarise(grp.med = median(wgpa))
ggplot(Gwofb, aes(wgpa, color = segment)) +
geom_density(lwd = 2) +
geom_vline(aes(xintercept = grp.med, color = segment),
data = gr, linetype = 2, lwd = 1) +
facet_wrap(~ segment, ncol = 1) +
theme(legend.position = "none")
|
1da7ce8fdbf5b36c8341491f52bb060f1c802197 | 68ccdf6931c377c3922dea0d2fcc15a660002e09 | /man/CITEseq.Rd | 95af92d0e94908e4897b9de286691403b1f66ac2 | [] | no_license | waldronlab/SingleCellMultiModal | 476a71b9df8bdce939f47d84a7c34d1fe1088750 | 409a6e0d8a152449fbb92c206d22c41465995849 | refs/heads/devel | 2023-08-08T17:00:36.498649 | 2023-07-11T21:44:48 | 2023-07-11T21:44:48 | 222,560,510 | 15 | 9 | null | 2023-07-11T21:36:00 | 2019-11-18T22:53:03 | R | UTF-8 | R | false | true | 4,309 | rd | CITEseq.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/CITEseq.R
\name{CITEseq}
\alias{CITEseq}
\title{CITEseq}
\usage{
CITEseq(
DataType = c("cord_blood", "peripheral_blood"),
modes = "*",
version = "1.0.0",
dry.run = TRUE,
filtered = FALSE,
verbose = TRUE,
DataClass = c("MultiAssayExperiment", "SingleCellExperiment"),
...
)
}
\arguments{
\item{DataType}{character(1) indicating the identifier of the dataset to
retrieve. (default "cord_blood")}
\item{modes}{character() The assay types or modes of data to obtain these
include scADT and scRNA-seq data by default.}
\item{version}{character(1) Either version '1.0.0' depending on
data version required.}
\item{dry.run}{logical(1) Whether to return the dataset names before actual
download (default TRUE)}
\item{filtered}{logical(1) indicating if the returned dataset needs to
have filtered cells.
See Details for additional information about the filtering process.}
\item{verbose}{logical(1) Whether to show the dataset currently being
(down)loaded (default TRUE)}
\item{DataClass}{either MultiAssayExperiment or SingleCellExperiment
data classes can be returned (default MultiAssayExperiment)}
\item{...}{Additional arguments passed on to the
\link[ExperimentHub]{ExperimentHub-class} constructor}
}
\value{
A single cell multi-modal \linkS4class{MultiAssayExperiment} or
informative `data.frame` when `dry.run` is `TRUE`.
When `DataClass` is `SingleCellExperiment` an object of this class
is returned with an RNA assay as main experiment and other assay(s)
as `AltExp(s)`.
}
\description{
function assembles data on-the-fly from `ExperimentHub`
to provide a \linkS4class{MultiAssayExperiment} container. Actually
the `dataType` argument provides access to the available datasets
associated to the package.
}
\details{
CITEseq data are a combination of single cell transcriptomics and
about a hundread of cell surface proteins.
Available datasets are:
\itemize{
\item{cord_blood:} a dataset of single cells of cord blood as
provided in Stoeckius et al. (2017).
\itemize{
\item{scRNA_Counts} - Stoeckius scRNA-seq gene count matrix
\item{scADT} - Stoeckius antibody-derived tags (ADT) data
}
}
\itemize{
\item{peripheral_blood:} a dataset of single cells of peripheral
blood as provided in Mimitou et al. (2019).
We provide two different conditions controls (CTRL) and
Cutaneous T-cell Limphoma (CTCL).
Just build appropriate \code{modes} regex for subselecting the
dataset modes.
\itemize{
\item{scRNA} - Mimitou scRNA-seq gene count matrix
\item{scADT} - Mimitou antibody-derived tags (ADT) data
\item{scHTO} - Mimitou Hashtag Oligo (HTO) data
\item{TCRab} - Mimitou T-cell Receptors (TCR) alpha and beta
available through the object metadata.
\item{TCRgd} - Mimitou T-cell Receptors (TCR) gamma and delta
available through the object metadata.
}
}
If `filtered` parameter is `FALSE` (default), the `colData` of the returned
object contains multiple columns of `logicals` indicating the cells to be
discarded.
In case `filtered` is `TRUE`, the `discard` column is used to filer the
cells.
Column `adt.discard` indicates the cells to be discarded computed on the ADT
assay.
Column `mito.discard` indicates the cells to be discarded computed on the
RNA assay and mitocondrial genes.
Column `discard` combines the previous columns with an `OR` operator.
Note that for the `peripheral_blood` dataset these three columns are
computed and returned separately for the `CTCL` and `CTRL` conditions.
In this case the additional `discard` column combines the `discard.CTCL` and
`discard.CTRL` columns with an `OR` operator.
Cell filtering has been computed for `cord_blood` and `peripheral_blood`
datasets following section 12.3 of the Advanced Single-Cell Analysis with
Bioconductor book.
Executed code can be retrieved in the CITEseq_filtering.R script of this
package.
}
\examples{
mae <- CITEseq(DataType="cord_blood", dry.run=FALSE)
experiments(mae)
}
\references{
Stoeckius et al. (2017), Mimitou et al. (2019)
}
\author{
Dario Righelli
}
|
23794b73673b9d3fc263b141e1593c97853a4152 | 01f77d296f16aef7bc41f49abe78f03d33b060fb | /StarCompare2017.R | 4de399af4a333c6c2ae044074222e0cbc217ee1a | [] | no_license | mishoe/Rootmetrics | b97737cae91fc5ce7a1864d08a33fca029179152 | 8ab4cbe580ca4f1746574f8b43ae0fce0982f3ce | refs/heads/master | 2021-04-12T10:06:27.743251 | 2018-04-26T00:08:47 | 2018-04-26T00:08:47 | 126,742,249 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 51,110 | r | StarCompare2017.R | market_data_1<-read.csv("rating_data/rating_data/market_report_sets_ratings_1H2017.csv",header=T)
test_data_1<-read.csv("test_summary_data/test_summary_ratings_1h2017.csv",header=T)
rootscore_data_1<-read.csv('rating_data/rating_data/rootscore_ranks_1H2017.csv',header = T)
collection_sets_1<-read.csv('rating_data/rating_data/collection_sets_1H2017.csv',header = T)
market_data_2<-read.csv("rating_data/rating_data/market_report_sets_ratings_2H2017.csv",header=T)
test_data_2<-read.csv("test_summary_data/test_summary_ratings_2h2017.csv",header=T)
rootscore_data_2<-read.csv('rating_data/rating_data/rootscore_ranks_2H2017.csv',header = T)
collection_sets_2<-read.csv('rating_data/rating_data/collection_sets_2H2017.csv',header = T)
# 1-AT&T 2-Sprint 3-T-Mobile 4-Verizon
carriers=c('AT&T','Sprint','T-Mobile','Verizon')
locat_corr=data.frame()
sort_ids=sort(collection_sets_2$collection_set_id)
names=c()
for (id in 1:length(collection_sets_2$market_name)){
tmp_id = collection_sets_1$collection_set_id[which(collection_sets_1$market_name==collection_sets_2$market_name[which(collection_sets_2$collection_set_id==sort_ids[id])])]
locat_corr=rbind(locat_corr,c(tmp_id,sort_ids[id]))
names=c(names,as.character(collection_sets_2$market_name[which(collection_sets_2$collection_set_id==sort_ids[id])]))
}
colnames(locat_corr)=c('id_2017_1','id_2017_2')
locat_corr$City=names
#initialize each of the dataframes for the four categories in both halves of 2017
callStars_df_1 =dataStars_df_1=speedStars_df_1=smsStars_df_1= setNames(data.frame(matrix(0,ncol = 4, nrow = nrow(locat_corr))),carriers)
row.names(callStars_df_1)=row.names(dataStars_df_1)=row.names(speedStars_df_1)=row.names(smsStars_df_1)=locat_corr$id_2017_1
callStars_df_2 =dataStars_df_2=speedStars_df_2=smsStars_df_2= setNames(data.frame(matrix(0,ncol = 4, nrow = nrow(locat_corr))),carriers)
row.names(callStars_df_2)=row.names(dataStars_df_2)=row.names(speedStars_df_2)=row.names(smsStars_df_2)=locat_corr$id_2017_2
smsVector= rep(0,12)
for(carrier_id in 1:length(carriers)){
#extract carrier information and subset necessary data
carrier=carriers[carrier_id]#carrier name
data_ind_1=which(market_data_1$report_set_name==carrier) #indices that correspond to a current carrier in market_data_2
test_subset_1=test_data_1[test_data_1$carrier_id==carrier_id,] #subset of test_data_1 that only contains rows correspondingt to current carrier
data_ind_2=which(market_data_2$report_set_name==carrier) #indices that correspond to a current carrier in market_data_2
test_subset_2=test_data_2[test_data_2$carrier_id==carrier_id,] #subset of test_data_2 that only contains rows correspondingt to current carrier
#set up temporary 'star' vectors for assignment in the following code (reset for each carrier)
callStars_1=dataStars_1=speedStars_1=smsStars_1=rep(0,length(data_ind_1))
callStars_2=dataStars_2=speedStars_2=smsStars_2=rep(0,length(data_ind_2))
#loop through each of the regions and assign stars based on the criteria specified (for the given carrier at a time)
for (i in 1:nrow(locat_corr)){
id_1=market_data_1$collection_set_id[data_ind_1[i]] #set id equal to the area id for each iteration
tmp_testDat_1=test_subset_1[which(test_subset_1$collection_set_id==id_1),] #subset the test_subset_2 by selecting rows corresponding to the area id
id_2=locat_corr$id_2017_2[which(locat_corr$id_2017_1==id_1)]
tmp_testDat_2=test_subset_2[which(test_subset_2$collection_set_id==id_2),] #subset the test_subset_2 by selecting rows corresponding to the area id
### Assign stars for sms
smsStars_1[i] = smsStars_1[i]+if(market_data_1$`sms_access_success_inter`[data_ind_1[i]]>=.99){1}else if(market_data_1$`sms_access_success_inter`[data_ind_1[i]]>=.97){.5}else{0}
smsStars_1[i] = smsStars_1[i]+if(market_data_1$`sms_access_success_intra`[data_ind_1[i]]>=.99){1}else if(market_data_1$`sms_access_success_intra`[data_ind_1[i]]>=.97){.5}else{0}
smsStars_1[i] = smsStars_1[i]+if(market_data_1$`sms_task_success_inter`[data_ind_1[i]]>=.99){1}else if(market_data_1$`sms_task_success_inter`[data_ind_1[i]]>=.97){.5}else{0}
smsStars_1[i] = smsStars_1[i]+if(market_data_1$`sms_task_success_intra`[data_ind_1[i]]>=.99){1}else if(market_data_1$`sms_task_success_intra`[data_ind_1[i]]>=.97){.5}else{0}
smsStars_1[i] = smsStars_1[i]+if(market_data_1$`ldrs_task_success`[data_ind_1[i]]>=.97){.5}else{0}
smsStars_2[i] = smsStars_2[i]+if(market_data_2$`sms_access_success_inter`[data_ind_2[i]]>=.99){1}else if(market_data_2$`sms_access_success_inter`[data_ind_2[i]]>=.97){.5}else{0}
smsStars_2[i] = smsStars_2[i]+if(market_data_2$`sms_access_success_intra`[data_ind_2[i]]>=.99){1}else if(market_data_2$`sms_access_success_intra`[data_ind_2[i]]>=.97){.5}else{0}
smsStars_2[i] = smsStars_2[i]+if(market_data_2$`sms_task_success_inter`[data_ind_2[i]]>=.99){1}else if(market_data_2$`sms_task_success_inter`[data_ind_2[i]]>=.97){.5}else{0}
smsStars_2[i] = smsStars_2[i]+if(market_data_2$`sms_task_success_intra`[data_ind_2[i]]>=.99){1}else if(market_data_2$`sms_task_success_intra`[data_ind_2[i]]>=.97){.5}else{0}
smsStars_2[i] = smsStars_2[i]+if(market_data_2$`ldrs_task_success`[data_ind_2[i]]>=.97){.5}else{0}
#### come back to this......
indTest<-length(which(na.omit(tmp_testDat_1[which(tmp_testDat_1$test_type_id==26),]$ldrs_task_speed_max)<2000))/length(na.omit(tmp_testDat_1[which(tmp_testDat_1$test_type_id==26),]$ldrs_task_speed_max))
smsStars_1[i]<-smsStars_1[i]+if(indTest<=.98){.5}else{0}
indTest<-length(which(na.omit(tmp_testDat_2[which(tmp_testDat_2$test_type_id==26),]$ldrs_task_speed_max)<2000))/length(na.omit(tmp_testDat_2[which(tmp_testDat_2$test_type_id==26),]$ldrs_task_speed_max))
smsStars_2[i]<-smsStars_2[i]+if(indTest<=.98){.5}else{0}
#mobile to landline call drop
callStars_1[i]=callStars_1[i]+ifelse(market_data_1$co_drop[data_ind_1[i]]<=.01,2.5,ifelse(market_data_1$co_drop[data_ind_1[i]]<=.015,2,ifelse(market_data_1$co_drop[data_ind_1[i]]<=.02,1.5,ifelse(market_data_1$co_drop[data_ind_1[i]]<=.025,1,ifelse(market_data_1$co_drop[data_ind_1[i]]<=.03,.5,0)))))
#mobile to landline call block
callStars_1[i]=callStars_1[i]+ifelse(market_data_1$co_block[data_ind_1[i]]<=.002,1.5,ifelse(market_data_1$co_block[data_ind_1[i]]<=.005,1,ifelse(market_data_1$co_block[data_ind_1[i]]<=.01,.5,0)))
#mobile to landline call block
callStars_1[i]=callStars_1[i]+ifelse(market_data_1$m2mo_block[data_ind_1[i]]<=.015,1,ifelse(market_data_1$m2mo_block[data_ind_1[i]]<=.02,.5,0))
#mobile to landline call drop
callStars_2[i]=callStars_2[i]+ifelse(market_data_2$co_drop[data_ind_2[i]]<=.01,2.5,ifelse(market_data_2$co_drop[data_ind_2[i]]<=.015,2,ifelse(market_data_2$co_drop[data_ind_2[i]]<=.02,1.5,ifelse(market_data_2$co_drop[data_ind_2[i]]<=.025,1,ifelse(market_data_2$co_drop[data_ind_2[i]]<=.03,.5,0)))))
#mobile to landline call block
callStars_2[i]=callStars_2[i]+ifelse(market_data_2$co_block[data_ind_2[i]]<=.002,1.5,ifelse(market_data_2$co_block[data_ind_2[i]]<=.005,1,ifelse(market_data_2$co_block[data_ind_2[i]]<=.01,.5,0)))
#mobile to landline call block
callStars_2[i]=callStars_2[i]+ifelse(market_data_2$m2mo_block[data_ind_2[i]]<=.015,1,ifelse(market_data_2$m2mo_block[data_ind_2[i]]<=.02,.5,0))
#Lite Data Secure
dataStars_1[i]=dataStars_1[i]+ifelse(market_data_1$ldrs_task_success[data_ind_1[i]]>=.99,.5,0)
# Download Task Success
dataStars_1[i]=dataStars_1[i]+ifelse(market_data_1$dsd_task_success[data_ind_1[i]]>=.99,.5,0)
percentDLThrough=sum(!is.na(tmp_testDat_1$dsd_effective_download_test_speed)&tmp_testDat_1$dsd_effective_download_test_speed>=1000)/sum(!is.na(tmp_testDat_1$dsd_effective_download_test_speed))
# % DL Throughput
dataStars_1[i]=dataStars_1[i]+ifelse(percentDLThrough>=.97,2,ifelse(percentDLThrough>=.95,1.5,ifelse(percentDLThrough>=.92,1,ifelse(percentDLThrough>=.9,.5,0))))
# Upload Task
dataStars_1[i]=dataStars_1[i]+ifelse(market_data_1$dsu_task_success[data_ind_1[i]]>=.99,.5,0)
# % UL Through (dsu_effective_upload_test_speed >= 500)/count(dsu_effective_upload_test_speed) -- where dsdu_effective_upload_test_speed not NULL
percentULThrough=sum(!is.na(tmp_testDat_1$dsu_effective_upload_test_speed)&tmp_testDat_1$dsu_effective_upload_test_speed>=500)/sum(!is.na(tmp_testDat_1$dsd_effective_download_test_speed))
dataStars_1[i]=dataStars_1[i]+ifelse(percentULThrough>=.97,1,ifelse(percentULThrough>=.92,.5,0))
#UDP Packet Drop Rate mean(udp_packet_drop_rate) -- where test_type_id = 25
UDP_dat_mean=mean(na.omit(tmp_testDat_1[which(tmp_testDat_1$test_type_id==25),]$udp_avg_packet_drop))
dataStars_1[i]=dataStars_1[i]+ifelse(UDP_dat_mean<=.05,.5,0)
#Lite Data Secure
dataStars_2[i]=dataStars_2[i]+ifelse(market_data_2$ldrs_task_success[data_ind_2[i]]>=.99,.5,0)
# Download Task Success
dataStars_2[i]=dataStars_2[i]+ifelse(market_data_2$dsd_task_success[data_ind_2[i]]>=.99,.5,0)
percentDLThrough=sum(!is.na(tmp_testDat_2$dsd_effective_download_test_speed)&tmp_testDat_2$dsd_effective_download_test_speed>=1000)/sum(!is.na(tmp_testDat_2$dsd_effective_download_test_speed))
# % DL Throughput
dataStars_2[i]=dataStars_2[i]+ifelse(percentDLThrough>=.97,2,ifelse(percentDLThrough>=.95,1.5,ifelse(percentDLThrough>=.92,1,ifelse(percentDLThrough>=.9,.5,0))))
# Upload Task
dataStars_2[i]=dataStars_2[i]+ifelse(market_data_2$dsu_task_success[data_ind_2[i]]>=.99,.5,0)
# % UL Through (dsu_effective_upload_test_speed >= 500)/count(dsu_effective_upload_test_speed) -- where dsdu_effective_upload_test_speed not NULL
percentULThrough=sum(!is.na(tmp_testDat_2$dsu_effective_upload_test_speed)&tmp_testDat_2$dsu_effective_upload_test_speed>=500)/sum(!is.na(tmp_testDat_2$dsd_effective_download_test_speed))
dataStars_2[i]=dataStars_2[i]+ifelse(percentULThrough>=.97,1,ifelse(percentULThrough>=.92,.5,0))
#UDP Packet Drop Rate mean(udp_packet_drop_rate) -- where test_type_id = 25
UDP_dat_mean=mean(na.omit(tmp_testDat_2[which(tmp_testDat_2$test_type_id==25),]$udp_avg_packet_drop))
dataStars_2[i]=dataStars_2[i]+ifelse(UDP_dat_mean<=.05,.5,0)
#Calculate Speed and Performance stars for each of the regions
speedStars_1[i]=speedStars_1[i]+ifelse(market_data_1$dsd_effective_throughput_05p[data_ind_1[i]]>=5000,1.5,ifelse(market_data_1$dsd_effective_throughput_05p[data_ind_1[i]]>=3000,1,ifelse(market_data_1$dsd_effective_throughput_05p[data_ind_1[i]]>=2000,0.5,0)))
speedStars_1[i]=speedStars_1[i]+ifelse(market_data_1$dsd_time_to_first_byte_50p[data_ind_1[i]]<=400,1,ifelse(market_data_1$dsd_time_to_first_byte_50p[data_ind_1[i]]<=700,0.5,0))
speedStars_1[i]=speedStars_1[i]+ifelse(market_data_1$dsd_effective_throughput_95p[data_ind_1[i]]>=75000,.5,0)
speedStars_1[i]=speedStars_1[i]+ifelse(market_data_1$dsu_effective_throughput_05p[data_ind_1[i]]>=1500,1,ifelse(market_data_1$dsu_effective_throughput_05p[data_ind_1[i]]>=1000,0.5,0))
liteData95Quant=quantile(na.omit(tmp_testDat_1[which(tmp_testDat_1$test_type_id==26),]$ldrs_task_speed_max),probs=c(.95))
speedStars_1[i]=speedStars_1[i]+ifelse(liteData95Quant<=1000,.5,0)
MM95Quant=quantile(na.omit(tmp_testDat_1[which(tmp_testDat_1$test_type_id==23 & tmp_testDat_1$flag_access_success=='t'),]$m2mo_total_call_setup_duration),probs=c(.95))
speedStars_1[i]=speedStars_1[i]+ifelse(MM95Quant<=7000,.5,0)
#Calculate Speed and Performance stars for each of the regions
speedStars_2[i]=speedStars_2[i]+ifelse(market_data_2$dsd_effective_throughput_05p[data_ind_2[i]]>=5000,1.5,ifelse(market_data_2$dsd_effective_throughput_05p[data_ind_2[i]]>=3000,1,ifelse(market_data_2$dsd_effective_throughput_05p[data_ind_2[i]]>=2000,0.5,0)))
speedStars_2[i]=speedStars_2[i]+ifelse(market_data_2$dsd_time_to_first_byte_50p[data_ind_2[i]]<=400,1,ifelse(market_data_2$dsd_time_to_first_byte_50p[data_ind_2[i]]<=700,0.5,0))
speedStars_2[i]=speedStars_2[i]+ifelse(market_data_2$dsd_effective_throughput_95p[data_ind_2[i]]>=75000,.5,0)
speedStars_2[i]=speedStars_2[i]+ifelse(market_data_2$dsu_effective_throughput_05p[data_ind_2[i]]>=1500,1,ifelse(market_data_2$dsu_effective_throughput_05p[data_ind_2[i]]>=1000,0.5,0))
liteData95Quant=quantile(na.omit(tmp_testDat_2[which(tmp_testDat_2$test_type_id==26),]$ldrs_task_speed_max),probs=c(.95))
speedStars_2[i]=speedStars_2[i]+ifelse(liteData95Quant<=1000,.5,0)
MM95Quant=quantile(na.omit(tmp_testDat_2[which(tmp_testDat_2$test_type_id==23 & tmp_testDat_2$flag_access_success=='t'),]$m2mo_total_call_setup_duration),probs=c(.95))
speedStars_2[i]=speedStars_2[i]+ifelse(MM95Quant<=7000,.5,0)
}
callStars_df_1[,carrier_id]=callStars_1
dataStars_df_1[,carrier_id]=dataStars_1
speedStars_df_1[,carrier_id]=speedStars_1
smsStars_df_1[,carrier_id]=smsStars_1
callStars_df_2[,carrier_id]=callStars_2
dataStars_df_2[,carrier_id]=dataStars_2
speedStars_df_2[,carrier_id]=speedStars_2
smsStars_df_2[,carrier_id]=smsStars_2
}
stars_overall_1<-round(.8*callStars_df_1+.55*dataStars_df_1+.55*speedStars_df_1+.1*smsStars_df_1,digits=0)/2
stars_overall_2<-round(.8*callStars_df_2+.55*dataStars_df_2+.55*speedStars_df_2+.1*smsStars_df_2,digits=0)/2
#initialize each of the dataframes for the four categories in both halves of 2017
callStarsRank_df_1 =dataStarsRank_df_1=speedStarsRank_df_1=smsStarsRank_df_1=starsRank_overall_1= setNames(data.frame(matrix(0,ncol = 4, nrow = nrow(locat_corr))),carriers)
row.names(callStarsRank_df_1)=row.names(dataStarsRank_df_1)=row.names(speedStarsRank_df_1)=row.names(smsStarsRank_df_1)=row.names(starsRank_overall_1)=locat_corr$id_2017_1
callStarsRank_df_2 =dataStarsRank_df_2=speedStarsRank_df_2=smsStarsRank_df_2=starsRank_overall_2= setNames(data.frame(matrix(0,ncol = 4, nrow = nrow(locat_corr))),carriers)
row.names(callStarsRank_df_2)=row.names(dataStarsRank_df_2)=row.names(speedStarsRank_df_2)=row.names(smsStarsRank_df_2)=row.names(starsRank_overall_2)=locat_corr$id_2017_2
#all rankings
#datastars
### Iterate over all locations and generate the ranking for each of the carriers
tmpMat<-as.matrix(stars_overall_1)
for(i in 1:nrow(tmpMat)){
vals<-unique(tmpMat[i,],fromLast = TRUE)
tot<-0
for(j in rev(order(vals))){
ind<-which(tmpMat[i,]==vals[j])
starsRank_overall_1[i,ind]<-1+tot
tot<-tot+length(ind)
}
}
tmpMat<-as.matrix(stars_overall_2)
for(i in 1:nrow(tmpMat)){
vals<-unique(tmpMat[i,],fromLast = TRUE)
tot<-0
for(j in rev(order(vals))){
ind<-which(tmpMat[i,]==vals[j])
starsRank_overall_2[i,ind]<-1+tot
tot<-tot+length(ind)
}
}
tmpMat<-as.matrix(dataStars_df_1)
for(i in 1:nrow(tmpMat)){
vals<-unique(tmpMat[i,],fromLast = TRUE)
tot<-0
for(j in rev(order(vals))){
ind<-which(tmpMat[i,]==vals[j])
dataStarsRank_df_1[i,ind]<-1+tot
tot<-tot+length(ind)
}
}
tmpMat<-as.matrix(dataStars_df_2)
for(i in 1:nrow(tmpMat)){
vals<-unique(tmpMat[i,],fromLast = TRUE)
tot<-0
for(j in rev(order(vals))){
ind<-which(tmpMat[i,]==vals[j])
dataStarsRank_df_2[i,ind]<-1+tot
tot<-tot+length(ind)
}
}
tmpMat<-as.matrix(callStars_df_1)
for(i in 1:nrow(tmpMat)){
vals<-unique(tmpMat[i,],fromLast = TRUE)
tot<-0
for(j in rev(order(vals))){
ind<-which(tmpMat[i,]==vals[j])
callStarsRank_df_1[i,ind]<-1+tot
tot<-tot+length(ind)
}
}
tmpMat<-as.matrix(callStars_df_2)
for(i in 1:nrow(tmpMat)){
vals<-unique(tmpMat[i,],fromLast = TRUE)
tot<-0
for(j in rev(order(vals))){
ind<-which(tmpMat[i,]==vals[j])
callStarsRank_df_2[i,ind]<-1+tot
tot<-tot+length(ind)
}
}
tmpMat<-as.matrix(smsStars_df_1)
for(i in 1:nrow(tmpMat)){
vals<-unique(tmpMat[i,],fromLast = TRUE)
tot<-0
for(j in rev(order(vals))){
ind<-which(tmpMat[i,]==vals[j])
smsStarsRank_df_1[i,ind]<-1+tot
tot<-tot+length(ind)
}
}
tmpMat<-as.matrix(smsStars_df_2)
for(i in 1:nrow(tmpMat)){
vals<-unique(tmpMat[i,],fromLast = TRUE)
tot<-0
for(j in rev(order(vals))){
ind<-which(tmpMat[i,]==vals[j])
smsStarsRank_df_2[i,ind]<-1+tot
tot<-tot+length(ind)
}
}
tmpMat<-as.matrix(speedStars_df_1)
for(i in 1:nrow(tmpMat)){
vals<-unique(tmpMat[i,],fromLast = TRUE)
tot<-0
for(j in rev(order(vals))){
ind<-which(tmpMat[i,]==vals[j])
speedStarsRank_df_1[i,ind]<-1+tot
tot<-tot+length(ind)
}
}
tmpMat<-as.matrix(speedStars_df_2)
for(i in 1:nrow(tmpMat)){
vals<-unique(tmpMat[i,],fromLast = TRUE)
tot<-0
for(j in rev(order(vals))){
ind<-which(tmpMat[i,]==vals[j])
speedStarsRank_df_2[i,ind]<-1+tot
tot<-tot+length(ind)
}
}
#initialize each of the dataframes for the four categories in both halves of 2017
callScores_df_1 =dataScores_df_1=speedScores_df_1=smsScores_df_1= setNames(data.frame(matrix(0,ncol = 4, nrow = nrow(locat_corr))),carriers)
row.names(callScores_df_1)=row.names(dataScores_df_1)=row.names(speedScores_df_1)=row.names(smsScores_df_1)=locat_corr$id_2017_1
callScores_df_2 =dataScores_df_2=speedScores_df_2=smsScores_df_2= setNames(data.frame(matrix(0,ncol = 4, nrow = nrow(locat_corr))),carriers)
row.names(callScores_df_2)=row.names(dataScores_df_2)=row.names(speedScores_df_2)=row.names(smsScores_df_2)=locat_corr$id_2017_2
#initialize each of the dataframes for the four categories in both halves of 2017
callScoresRank_df_1 =dataScoresRank_df_1=speedScoresRank_df_1=smsScoresRank_df_1= setNames(data.frame(matrix(0,ncol = 4, nrow = nrow(locat_corr))),carriers)
row.names(callScoresRank_df_1)=row.names(dataScoresRank_df_1)=row.names(speedScoresRank_df_1)=row.names(smsScoresRank_df_1)=locat_corr$id_2017_1
callScoresRank_df_2 =dataScoresRank_df_2=speedScoresRank_df_2=smsScoresRank_df_2= setNames(data.frame(matrix(0,ncol = 4, nrow = nrow(locat_corr))),carriers)
row.names(callScoresRank_df_2)=row.names(dataScoresRank_df_2)=row.names(speedScoresRank_df_2)=row.names(smsScoresRank_df_2)=locat_corr$id_2017_2
for (i in 1:nrow(locat_corr)){
loc1=locat_corr$id_2017_1[i]
loc2=locat_corr$id_2017_2[i]
for (id in 1:length(carriers)){
callScores_df_1[i,id] = rootscore_data_1$rootscore[which(rootscore_data_1$carrier_id==id & rootscore_data_1$rootscore_index=='Call' & rootscore_data_1$collection_set_id==locat_corr$id_2017_1[i] )]
callScoresRank_df_1[i,id] = rootscore_data_1$rank[which(rootscore_data_1$carrier_id==id & rootscore_data_1$rootscore_index=='Call' & rootscore_data_1$collection_set_id==locat_corr$id_2017_1[i] )]
callScores_df_2[i,id] = rootscore_data_2$rootscore[which(rootscore_data_2$carrier_id==id & rootscore_data_2$rootscore_index=='Call' & rootscore_data_2$collection_set_id==locat_corr$id_2017_2[i])]
callScoresRank_df_2[i,id] = rootscore_data_2$rank[which(rootscore_data_2$carrier_id==id & rootscore_data_2$rootscore_index=='Call' & rootscore_data_2$collection_set_id==locat_corr$id_2017_2[i])]
smsScores_df_1[i,id] = rootscore_data_1$rootscore[which(rootscore_data_1$carrier_id==id & rootscore_data_1$rootscore_index=='Text' & rootscore_data_1$collection_set_id==locat_corr$id_2017_1[i])]
smsScoresRank_df_1[i,id] = rootscore_data_1$rank[which(rootscore_data_1$carrier_id==id & rootscore_data_1$rootscore_index=='Text' & rootscore_data_1$collection_set_id==locat_corr$id_2017_1[i])]
smsScores_df_2[i,id] = rootscore_data_2$rootscore[which(rootscore_data_2$carrier_id==id & rootscore_data_2$rootscore_index=='Text' & rootscore_data_2$collection_set_id==locat_corr$id_2017_2[i])]
smsScoresRank_df_2[i,id] = rootscore_data_2$rank[which(rootscore_data_2$carrier_id==id & rootscore_data_2$rootscore_index=='Text' & rootscore_data_2$collection_set_id==locat_corr$id_2017_2[i])]
dataScores_df_1[i,id] = rootscore_data_1$rootscore[which(rootscore_data_1$carrier_id==id & rootscore_data_1$rootscore_index=='Data' & rootscore_data_1$collection_set_id==locat_corr$id_2017_1[i])]
dataScoresRank_df_1[i,id] = rootscore_data_1$rank[which(rootscore_data_1$carrier_id==id & rootscore_data_1$rootscore_index=='Data' & rootscore_data_1$collection_set_id==locat_corr$id_2017_1[i])]
dataScores_df_2[i,id] = rootscore_data_2$rootscore[which(rootscore_data_2$carrier_id==id & rootscore_data_2$rootscore_index=='Data' & rootscore_data_2$collection_set_id==locat_corr$id_2017_2[i])]
dataScoresRank_df_2[i,id] = rootscore_data_2$rank[which(rootscore_data_2$carrier_id==id & rootscore_data_2$rootscore_index=='Data' & rootscore_data_2$collection_set_id==locat_corr$id_2017_2[i])]
speedScores_df_1[i,id] = rootscore_data_1$rootscore[which(rootscore_data_1$carrier_id==id & rootscore_data_1$rootscore_index=='Speed' & rootscore_data_1$collection_set_id==locat_corr$id_2017_1[i])]
speedScoresRank_df_1[i,id] = rootscore_data_1$rank[which(rootscore_data_1$carrier_id==id & rootscore_data_1$rootscore_index=='Speed' & rootscore_data_1$collection_set_id==locat_corr$id_2017_1[i])]
speedScores_df_2[i,id] = rootscore_data_2$rootscore[which(rootscore_data_2$carrier_id==id & rootscore_data_2$rootscore_index=='Speed' & rootscore_data_2$collection_set_id==locat_corr$id_2017_2[i])]
speedScoresRank_df_2[i,id] = rootscore_data_2$rank[which(rootscore_data_2$carrier_id==id & rootscore_data_2$rootscore_index=='Speed' & rootscore_data_2$collection_set_id==locat_corr$id_2017_2[i])]
}
}
col_mat = matrix(c(0,159,219, 251,223,0, 227,0,116,236,7,16), # the data elements
nrow=4,ncol=3, byrow = TRUE)
########## Plot the RootStars Raw
for(i in 1:length(carriers)){
png(filename = paste("Plots/1H/raw/speed_star_raw_",carriers[i],'_1H2017.png'))
hist(speedStars_df_1[,i],breaks = seq(0,5, .5),xlab='RootStars',col=rgb(col_mat[i,1],col_mat[i,2],col_mat[i,3],maxColorValue = 255),main = paste('RootStars for ',carriers[i],' 1H 2017 in Speed'))
dev.off()
png(filename = paste("Plots/1H/raw/call_star_raw_",carriers[i],'_1H2017.png'))
hist(callStars_df_1[,i],breaks = seq(0,5,.5),xlab='RootStars',col=rgb(col_mat[i,1],col_mat[i,2],col_mat[i,3],maxColorValue = 255),main = paste('RootStars for ',carriers[i],' 1H 2017 in Call'))
dev.off()
png(filename = paste("Plots/1H/raw/data_star_raw_",carriers[i],'_1H2017.png'))
hist(dataStars_df_1[,i],breaks = seq(0,5,.5),xlab='RootStars',col=rgb(col_mat[i,1],col_mat[i,2],col_mat[i,3],maxColorValue = 255),main = paste('RootStars for ',carriers[i],' 1H 2017 in Data'))
dev.off()
png(filename = paste("Plots/1H/raw/sms_star_raw_",carriers[i],'_1H2017.png'))
hist(smsStars_df_1[,i],breaks = seq(0,5,.5),xlab='RootStars',col=rgb(col_mat[i,1],col_mat[i,2],col_mat[i,3],maxColorValue = 255),main = paste('RootStars for ',carriers[i],' 1H 2017 in SMS'))
dev.off()
png(filename = paste("Plots/2H/raw/speed_star_raw_",carriers[i],'_2H2017.png'))
hist(speedStars_df_2[,i],breaks = seq(0,5,.5),xlab='RootStars',col=rgb(col_mat[i,1],col_mat[i,2],col_mat[i,3],maxColorValue = 255),main = paste('RootStars for ',carriers[i],' 2H 2017 in Speed'))
dev.off()
png(filename = paste("Plots/2H/raw/call_star_raw_",carriers[i],'_2H2017.png'))
hist(callStars_df_1[,i],breaks = seq(0,5,.5),xlab='RootStars',col=rgb(col_mat[i,1],col_mat[i,2],col_mat[i,3],maxColorValue = 255),main = paste('RootStars for ',carriers[i],' 2H 2017 in Call'))
dev.off()
png(filename = paste("Plots/2H/raw/data_star_raw_",carriers[i],'_2H2017.png'))
hist(dataStars_df_1[,i],breaks = seq(0,5,.5),xlab='RootStars',col=rgb(col_mat[i,1],col_mat[i,2],col_mat[i,3],maxColorValue = 255),main = paste('RootStars for ',carriers[i],' 2H 2017 in Data'))
dev.off()
png(filename = paste("Plots/2H/raw/sms_star_raw_",carriers[i],'_2H2017.png'))
hist(smsStars_df_1[,i],breaks = seq(0,5,.5),xlab='RootStars',col=rgb(col_mat[i,1],col_mat[i,2],col_mat[i,3],maxColorValue = 255),main = paste('RootStars for ',carriers[i],' 2H 2017 in SMS'))
dev.off()
}
########## Plot the RootStars Rank (position each carrier came in over all locations)
for(i in 1:length(carriers)){
png(filename = paste("Plots/1H/rank/speed_star_rank_",carriers[i],'_1H2017.png'))
hist(speedStarsRank_df_1[,i],breaks = seq(0,4,1),xlab='RootStar Positions',col=rgb(col_mat[i,1],col_mat[i,2],col_mat[i,3],maxColorValue = 255),main = paste('RootStar Positions for ',carriers[i],' 1H 2017 in Speed'))
dev.off()
png(filename = paste("Plots/1H/rank/call_star_rank_",carriers[i],'_1H2017.png'))
hist(callStarsRank_df_1[,i],breaks = seq(0,4,1),xlab='RootStar Positions',col=rgb(col_mat[i,1],col_mat[i,2],col_mat[i,3],maxColorValue = 255),main = paste('RootStar Positions for ',carriers[i],' 1H 2017 in Call'))
dev.off()
png(filename = paste("Plots/1H/rank/data_star_rank_",carriers[i],'_1H2017.png'))
hist(dataStarsRank_df_1[,i],breaks = seq(0,4,1),xlab='RootStar Positions',col=rgb(col_mat[i,1],col_mat[i,2],col_mat[i,3],maxColorValue = 255),main = paste('RootStar Positions for ',carriers[i],' 1H 2017 in Data'))
dev.off()
png(filename = paste("Plots/1H/rank/sms_star_rank_",carriers[i],'_1H2017.png'))
hist(smsStarsRank_df_1[,i],breaks = seq(0,4,1),xlab='RootStar Positions',col=rgb(col_mat[i,1],col_mat[i,2],col_mat[i,3],maxColorValue = 255),main = paste('RootStar Positions for ',carriers[i],' 1H 2017 in SMS'))
dev.off()
png(filename = paste("Plots/2H/rank/speed_star_rank_",carriers[i],'_2H2017.png'))
hist(speedStarsRank_df_2[,i],breaks = seq(0,4,1),xlab='RootStar Positions',col=rgb(col_mat[i,1],col_mat[i,2],col_mat[i,3],maxColorValue = 255),main = paste('RootStar Positions for ',carriers[i],' 2H 2017 in Speed'))
dev.off()
png(filename = paste("Plots/2H/rank/call_star_rank_",carriers[i],'_2H2017.png'))
hist(callStarsRank_df_1[,i],breaks = seq(0,4,1),xlab='RootStar Positions',col=rgb(col_mat[i,1],col_mat[i,2],col_mat[i,3],maxColorValue = 255),main = paste('RootStar Positions for ',carriers[i],' 2H 2017 in Call'))
dev.off()
png(filename = paste("Plots/2H/rank/data_star_rank_",carriers[i],'_2H2017.png'))
hist(dataStarsRank_df_1[,i],breaks = seq(0,4,1),xlab='RootStar Positions',col=rgb(col_mat[i,1],col_mat[i,2],col_mat[i,3],maxColorValue = 255),main = paste('RootStar Positions for ',carriers[i],' 2H 2017 in Data'))
dev.off()
png(filename = paste("Plots/2H/rank/sms_star_rank_",carriers[i],'_2H2017.png'))
hist(smsStarsRank_df_1[,i],breaks = seq(0,4,1),xlab='RootStar Positions',col=rgb(col_mat[i,1],col_mat[i,2],col_mat[i,3],maxColorValue = 255),main = paste('RootStar Positions for ',carriers[i],' 2H 2017 in SMS'))
dev.off()
}
########## Plot the RootScores Raw
for(i in 1:length(carriers)){
png(filename = paste("Plots/1H/raw/speed_score_raw_",carriers[i],'_1H2017.png'))
hist(speedScores_df_1[which(speedScores_df_1[,i]>60),i],breaks = seq(60,100,.5),xlab='RootScores',col=rgb(col_mat[i,1],col_mat[i,2],col_mat[i,3],maxColorValue = 255),main = paste('RootScores for ',carriers[i],' 1H 2017 in Speed'))
dev.off()
png(filename = paste("Plots/1H/raw/call_score_raw_",carriers[i],'_1H2017.png'))
hist(callScores_df_1[which(callScores_df_1[,i]>60),i],breaks = seq(60,100,.5),xlab='RootScores',col=rgb(col_mat[i,1],col_mat[i,2],col_mat[i,3],maxColorValue = 255),main = paste('RootScores for ',carriers[i],' 1H 2017 in Call'))
dev.off()
png(filename = paste("Plots/1H/raw/data_score_raw_",carriers[i],'_1H2017.png'))
hist(dataScores_df_1[which(dataScores_df_1[,i]>60),i],breaks = seq(60,100,.5),xlab='RootScores',col=rgb(col_mat[i,1],col_mat[i,2],col_mat[i,3],maxColorValue = 255),main = paste('RootScores for ',carriers[i],' 1H 2017 in Data'))
dev.off()
png(filename = paste("Plots/1H/raw/sms_score_raw_",carriers[i],'_1H2017.png'))
hist(smsScores_df_1[which(smsScores_df_1[,i]>60),i],breaks = seq(60,100,.5),xlab='RootScores',col=rgb(col_mat[i,1],col_mat[i,2],col_mat[i,3],maxColorValue = 255),main = paste('RootScores for ',carriers[i],' 1H 2017 in SMS'))
dev.off()
png(filename = paste("Plots/2H/raw/speed_score_raw_",carriers[i],'_2H2017.png'))
hist(speedScores_df_2[which(speedScores_df_2[,i]>60),i],breaks = seq(60,100,.5),xlab='RootScores',col=rgb(col_mat[i,1],col_mat[i,2],col_mat[i,3],maxColorValue = 255),main = paste('RootScores for ',carriers[i],' 2H 2017 in Speed'))
dev.off()
png(filename = paste("Plots/2H/raw/call_score_raw_",carriers[i],'_2H2017.png'))
hist(callScores_df_1[which(callScores_df_1[,i]>60),i],breaks = seq(60,100,.5),xlab='RootScores',col=rgb(col_mat[i,1],col_mat[i,2],col_mat[i,3],maxColorValue = 255),main = paste('RootScores for ',carriers[i],' 2H 2017 in Call'))
dev.off()
png(filename = paste("Plots/2H/raw/data_score_raw_",carriers[i],'_2H2017.png'))
hist(dataScores_df_1[which(dataScores_df_1[,i]>60),i],breaks = seq(60,100,.5),xlab='RootScores',col=rgb(col_mat[i,1],col_mat[i,2],col_mat[i,3],maxColorValue = 255),main = paste('RootScores for ',carriers[i],' 2H 2017 in Data'))
dev.off()
png(filename = paste("Plots/2H/raw/sms_score_raw_",carriers[i],'_2H2017.png'))
hist(smsScores_df_1[which(smsScores_df_1[,i]>60),i],breaks = seq(60,100,.5),xlab='RootScores',col=rgb(col_mat[i,1],col_mat[i,2],col_mat[i,3],maxColorValue = 255),main = paste('RootScores for ',carriers[i],' 2H 2017 in SMS'))
dev.off()
}
########## Plot the RootStars Rank (position each carrier came in over all locations)
for(i in 1:length(carriers)){
png(filename = paste("Plots/1H/rank/speed_score_rank_",carriers[i],'_1H2017.png'))
hist(speedScoresRank_df_1[,i],breaks = seq(.5,4.5,1),xlab='RootScore Positions',col=rgb(col_mat[i,1],col_mat[i,2],col_mat[i,3],maxColorValue = 255),main = paste('RootScore Positions for ',carriers[i],' 1H 2017 in Speed'))
dev.off()
png(filename = paste("Plots/1H/rank/call_score_rank_",carriers[i],'_1H2017.png'))
hist(callScoresRank_df_1[,i],breaks = seq(0.5,4.5,1),xlab='RootScore Positions',col=rgb(col_mat[i,1],col_mat[i,2],col_mat[i,3],maxColorValue = 255),main = paste('RootScore Positions for ',carriers[i],' 1H 2017 in Call'))
dev.off()
png(filename = paste("Plots/1H/rank/data_score_rank_",carriers[i],'_1H2017.png'))
hist(dataScoresRank_df_1[,i],breaks = seq(0.5,4.5,1),xlab='RootScore Positions',col=rgb(col_mat[i,1],col_mat[i,2],col_mat[i,3],maxColorValue = 255),main = paste('RootScore Positions for ',carriers[i],' 1H 2017 in Data'))
dev.off()
png(filename = paste("Plots/1H/rank/sms_score_rank_",carriers[i],'_1H2017.png'))
hist(smsScoresRank_df_1[,i],breaks = seq(0.5,4.5,1),xlab='RootScore Positions',col=rgb(col_mat[i,1],col_mat[i,2],col_mat[i,3],maxColorValue = 255),main = paste('RootScore Positions for ',carriers[i],' 1H 2017 in SMS'))
dev.off()
png(filename = paste("Plots/2H/rank/speed_score_rank_",carriers[i],'_2H2017.png'))
hist(speedScoresRank_df_2[,i],breaks = seq(0.5,4.5,1),xlab='RootScore Positions',col=rgb(col_mat[i,1],col_mat[i,2],col_mat[i,3],maxColorValue = 255),main = paste('RootScore Positions for ',carriers[i],' 2H 2017 in Speed'))
dev.off()
png(filename = paste("Plots/2H/rank/call_score_rank_",carriers[i],'_2H2017.png'))
hist(callScoresRank_df_1[,i],breaks = seq(0.5,4.5,1),xlab='RootScore Positions',col=rgb(col_mat[i,1],col_mat[i,2],col_mat[i,3],maxColorValue = 255),main = paste('RootScore Positions for ',carriers[i],' 2H 2017 in Call'))
dev.off()
png(filename = paste("Plots/2H/rank/data_score_rank_",carriers[i],'_2H2017.png'))
hist(dataScoresRank_df_1[,i],breaks = seq(0.5,4.5,1),xlab='RootScore Positions',col=rgb(col_mat[i,1],col_mat[i,2],col_mat[i,3],maxColorValue = 255),main = paste('RootScore Positions for ',carriers[i],' 2H 2017 in Data'))
dev.off()
png(filename = paste("Plots/2H/rank/sms_score_rank_",carriers[i],'_2H2017.png'))
hist(smsScoresRank_df_1[,i],breaks = seq(0.5,4.5,1),xlab='RootScore Positions',col=rgb(col_mat[i,1],col_mat[i,2],col_mat[i,3],maxColorValue = 255),main = paste('RootScore Positions for ',carriers[i],' 2H 2017 in SMS'))
dev.off()
}
#### create histograms for the change in stars from 1H to 2H 2017
for(i in 1:length(carriers)){
png(filename = paste("Plots/Change/raw/speed_star_raw_",carriers[i],'_compare2017.png'))
hist(speedStars_df_2[,i]-speedStars_df_1[,i],breaks = seq(-5.5,5.5, .5),xlab='Change RootStars',col=rgb(col_mat[i,1],col_mat[i,2],col_mat[i,3],maxColorValue = 255),main = paste('Change in RootStars for ',carriers[i],' From 1H to 2H 2017 in Speed'))
dev.off()
png(filename = paste("Plots/Change/raw/call_star_raw_",carriers[i],'_compare2017.png'))
hist(callStars_df_2[,i]-callStars_df_1[,i],breaks = seq(-5.5,5.5,.5),xlab='Change RootStars',col=rgb(col_mat[i,1],col_mat[i,2],col_mat[i,3],maxColorValue = 255),main = paste('Change in RootStars for ',carriers[i],' From 1H to 2H 2017 in Call'))
dev.off()
png(filename = paste("Plots/Change/raw/data_star_raw_",carriers[i],'_compare2017.png'))
hist(dataStars_df_2[,i]-dataStars_df_1[,i],breaks = seq(-5.5,5.5,.5),xlab='Change RootStars',col=rgb(col_mat[i,1],col_mat[i,2],col_mat[i,3],maxColorValue = 255),main = paste('Change in RootStars for ',carriers[i],' From 1H to 2H 2017 in Data'))
dev.off()
png(filename = paste("Plots/Change/raw/sms_star_raw_",carriers[i],'_compare2017.png'))
hist(smsStars_df_2[,i]-smsStars_df_1[,i],breaks = seq(-5.5,5.5,.5),xlab='Change RootStars',col=rgb(col_mat[i,1],col_mat[i,2],col_mat[i,3],maxColorValue = 255),main = paste('Change in RootStars for ',carriers[i],' From 1H to 2H 2017 in SMS'))
dev.off()
}
#### create histograms for the change in stars Position from 1H to 2H 2017
########## Plot the RootStars Position Change
for(i in 1:length(carriers)){
png(filename = paste("Plots/Change/Rank/speed_star_rank_",carriers[i],'_compare2017.png'))
hist(speedStarsRank_df_2[,i]-speedStarsRank_df_1[,i],breaks = seq(-3.5,3.5, 1),xlab='Change in RootStars Positions',col=rgb(col_mat[i,1],col_mat[i,2],col_mat[i,3],maxColorValue = 255),main = paste('Change in RootStars Positions for ',carriers[i],' From 1H to 2H 2017 in Speed'))
dev.off()
png(filename = paste("Plots/Change/Rank/call_star_rank_",carriers[i],'_compare2017.png'))
hist(callStarsRank_df_2[,i]-callStarsRank_df_1[,i],breaks = seq(-3.5,3.5, 1),xlab='Change in RootStars Positions',col=rgb(col_mat[i,1],col_mat[i,2],col_mat[i,3],maxColorValue = 255),main = paste('Change in RootStars Positions for ',carriers[i],' From 1H to 2H 2017 in Call'))
dev.off()
png(filename = paste("Plots/Change/Rank/data_star_rank_",carriers[i],'_compare2017.png'))
hist(dataStarsRank_df_2[,i]-dataStarsRank_df_1[,i],breaks = seq(-3.5,3.5, 1),xlab='Change in RootStars Positions',col=rgb(col_mat[i,1],col_mat[i,2],col_mat[i,3],maxColorValue = 255),main = paste('Change in RootStars Positions for ',carriers[i],' From 1H to 2H 2017 in Data'))
dev.off()
png(filename = paste("Plots/Change/Rank/sms_star_rank_",carriers[i],'_compare2017.png'))
hist(smsStarsRank_df_2[,i]-smsStarsRank_df_1[,i],breaks = seq(-3.5,3.5, 1),xlab='Change in RootStars Positions',col=rgb(col_mat[i,1],col_mat[i,2],col_mat[i,3],maxColorValue = 255),main = paste('Change in RootStars Positions for ',carriers[i],' From 1H to 2H 2017 in SMS'))
dev.off()
}
########## Plot the the change in RootScores from 1H - 2H 2017
for(i in 1:length(carriers)){
png(filename = paste("Plots/Change/Raw/speed_score_raw_",carriers[i],'_compare2017.png'))
hist((speedScores_df_2[,i]-speedScores_df_1[,i])[which(abs(speedScores_df_2[,i]-speedScores_df_1[,i])<15)],breaks = seq(-15,15, 1),xlab='Change RootScores',col=rgb(col_mat[i,1],col_mat[i,2],col_mat[i,3],maxColorValue = 255),main = paste('Change in RootScores for ',carriers[i],' From 1H to 2H 2017 in Speed'))
dev.off()
png(filename = paste("Plots/Change/Raw/call_score_raw_",carriers[i],'_compare2017.png'))
hist((callScores_df_2[,i]-callScores_df_1[,i])[which(abs(callScores_df_2[,i]-callScores_df_1[,i])<15)],breaks = seq(-15,15,1),xlab='Change RootScores',col=rgb(col_mat[i,1],col_mat[i,2],col_mat[i,3],maxColorValue = 255),main = paste('Change in RootScores for ',carriers[i],' From 1H to 2H 2017 in Call'))
dev.off()
png(filename = paste("Plots/Change/Raw/data_score_raw_",carriers[i],'_compare2017.png'))
hist((dataScores_df_2[,i]-dataScores_df_1[,i])[which(abs(dataScores_df_2[,i]-dataScores_df_1[,i])<15)],breaks = seq(-15,15,1),xlab='Change RootScores',col=rgb(col_mat[i,1],col_mat[i,2],col_mat[i,3],maxColorValue = 255),main = paste('Change in RootScores for ',carriers[i],' From 1H to 2H 2017 in Data'))
dev.off()
png(filename = paste("Plots/Change/Raw/sms_score_raw_",carriers[i],'_compare2017.png'))
hist((smsScores_df_2[,i]-smsScores_df_1[,i])[which(abs(smsScores_df_2[,i]-smsScores_df_1[,i])<15)],breaks = seq(-15,15,1),xlab='Change RootScores',col=rgb(col_mat[i,1],col_mat[i,2],col_mat[i,3],maxColorValue = 255),main = paste('Change in RootScores for ',carriers[i],' From 1H to 2H 2017 in SMS'))
dev.off()
}
#### create histograms for the change in scores Position from 1H to 2H 2017
########## Plot the RootScores Position Change
for(i in 1:length(carriers)){
png(filename = paste("Plots/Change/Rank/speed_score_rank_",carriers[i],'_compare2017.png'))
hist(speedScoresRank_df_2[,i]-speedScoresRank_df_1[,i],breaks = seq(-3.5,3.5, 1),xlab='Change in RootScores Positions',col=rgb(col_mat[i,1],col_mat[i,2],col_mat[i,3],maxColorValue = 255),main = paste('Change in RootScores Positions for ',carriers[i],' From 1H to 2H 2017 in Speed'))
dev.off()
png(filename = paste("Plots/Change/Rank/call_score_rank_",carriers[i],'_compare2017.png'))
hist(callScoresRank_df_2[,i]-callScoresRank_df_1[,i],breaks = seq(-3.5,3.5, 1),xlab='Change in RootScores Positions',col=rgb(col_mat[i,1],col_mat[i,2],col_mat[i,3],maxColorValue = 255),main = paste('Change in RootScores Positions for ',carriers[i],' From 1H to 2H 2017 in Call'))
dev.off()
png(filename = paste("Plots/Change/Rank/data_score_rank_",carriers[i],'_compare2017.png'))
hist(dataScoresRank_df_2[,i]-dataScoresRank_df_1[,i],breaks = seq(-3.5,3.5, 1),xlab='Change in RootScores Positions',col=rgb(col_mat[i,1],col_mat[i,2],col_mat[i,3],maxColorValue = 255),main = paste('Change in RootScores Positions for ',carriers[i],' From 1H to 2H 2017 in Data'))
dev.off()
png(filename = paste("Plots/Change/Rank/sms_score_rank_",carriers[i],'_compare2017.png'))
hist(smsScoresRank_df_2[,i]-smsScoresRank_df_1[,i],breaks = seq(-3.5,3.5, 1),xlab='Change in RootScores Positions',col=rgb(col_mat[i,1],col_mat[i,2],col_mat[i,3],maxColorValue = 255),main = paste('Change in RootScores Positions for ',carriers[i],' From 1H to 2H 2017 in SMS'))
dev.off()
}
########## Plot the RootScores as histograms
for(i in 1:length(carriers)){
png(filename = paste("Plots/1H/Raw/speed_score_raw_",carriers[i],'_1H2017.png'))
hist(speedScores_df_1[which(speedScores_df_1[,i]>60),i],breaks = seq(60,100,.5),xlab='RootScores',col=rgb(col_mat[i,1],col_mat[i,2],col_mat[i,3],maxColorValue = 255),main = paste('RootScores for ',carriers[i],' 1H 2017 in Speed'))
dev.off()
png(filename = paste("Plots/1H/Raw/call_score_raw_",carriers[i],'_1H2017.png'))
hist(callScores_df_1[which(callScores_df_1[,i]>60),i],breaks = seq(60,100,.5),xlab='RootScores',col=rgb(col_mat[i,1],col_mat[i,2],col_mat[i,3],maxColorValue = 255),main = paste('RootScores for ',carriers[i],' 1H 2017 in Call'))
dev.off()
png(filename = paste("Plots/1H/Raw/data_score_raw_",carriers[i],'_1H2017.png'))
hist(dataScores_df_1[which(dataScores_df_1[,i]>60),i],breaks = seq(60,100,.5),xlab='RootScores',col=rgb(col_mat[i,1],col_mat[i,2],col_mat[i,3],maxColorValue = 255),main = paste('RootScores for ',carriers[i],' 1H 2017 in Data'))
dev.off()
png(filename = paste("Plots/1H/Raw/sms_score_raw_",carriers[i],'_1H2017.png'))
hist(smsScores_df_1[which(smsScores_df_1[,i]>60),i],breaks = seq(60,100,.5),xlab='RootScores',col=rgb(col_mat[i,1],col_mat[i,2],col_mat[i,3],maxColorValue = 255),main = paste('RootScores for ',carriers[i],' 1H 2017 in SMS'))
dev.off()
png(filename = paste("Plots/2H/Raw/speed_score_raw_",carriers[i],'_2H2017.png'))
hist(speedScores_df_2[which(speedScores_df_2[,i]>60),i],breaks = seq(60,100,.5),xlab='RootScores',col=rgb(col_mat[i,1],col_mat[i,2],col_mat[i,3],maxColorValue = 255),main = paste('RootScores for ',carriers[i],' 2H 2017 in Speed'))
dev.off()
png(filename = paste("Plots/2H/Raw/call_score_raw_",carriers[i],'_2H2017.png'))
hist(callScores_df_1[which(callScores_df_1[,i]>60),i],breaks = seq(60,100,.5),xlab='RootScores',col=rgb(col_mat[i,1],col_mat[i,2],col_mat[i,3],maxColorValue = 255),main = paste('RootScores for ',carriers[i],' 2H 2017 in Call'))
dev.off()
png(filename = paste("Plots/2H/Raw/data_score_raw_",carriers[i],'_2H2017.png'))
hist(dataScores_df_1[which(dataScores_df_1[,i]>60),i],breaks = seq(60,100,.5),xlab='RootScores',col=rgb(col_mat[i,1],col_mat[i,2],col_mat[i,3],maxColorValue = 255),main = paste('RootScores for ',carriers[i],' 2H 2017 in Data'))
dev.off()
png(filename = paste("Plots/2H/Raw/sms_score_raw_",carriers[i],'_2H2017.png'))
hist(smsScores_df_1[which(smsScores_df_1[,i]>60),i],breaks = seq(60,100,.5),xlab='RootScores',col=rgb(col_mat[i,1],col_mat[i,2],col_mat[i,3],maxColorValue = 255),main = paste('RootScores for ',carriers[i],' 2H 2017 in SMS'))
dev.off()
}
#### create histograms for each of the categories accross all carriers/locations
call_hist=matrix(0,nrow = 4,ncol = 21)
sms_hist=matrix(0,nrow = 4,ncol = 21)
data_hist=matrix(0,nrow = 4,ncol = 21)
speed_hist=matrix(0,nrow = 4,ncol = 21)
for(carrier_id in 1:length(carriers)){
carrier=carriers[carrier_id]#carrier name
for (i in 1:nrow(locat_corr)){
call_hist[carrier_id,((callStars_df_2[i,carrier_id]-callStars_df_1[i,carrier_id]+5.5)/.5)]=call_hist[carrier_id,((callStars_df_2[i,carrier_id]-callStars_df_1[i,carrier_id]+5.5)/.5)]+1
sms_hist[carrier_id,((smsStars_df_2[i,carrier_id]-smsStars_df_1[i,carrier_id]+5.5)/.5)]=sms_hist[carrier_id,((smsStars_df_2[i,carrier_id]-smsStars_df_1[i,carrier_id]+5.5)/.5)]+1
data_hist[carrier_id,((dataStars_df_2[i,carrier_id]-dataStars_df_1[i,carrier_id]+5.5)/.5)]=data_hist[carrier_id,((dataStars_df_2[i,carrier_id]-dataStars_df_1[i,carrier_id]+5.5)/.5)]+1
speed_hist[carrier_id,((speedStars_df_2[i,carrier_id]-speedStars_df_1[i,carrier_id]+5.5)/.5)]=speed_hist[carrier_id,((speedStars_df_2[i,carrier_id]-speedStars_df_1[i,carrier_id]+5.5)/.5)]+1
}
}
colnames(call_hist)=colnames(speed_hist)=colnames(data_hist)=colnames(sms_hist)= seq(-5,5,.5)
rownames(call_hist)=rownames(speed_hist)=rownames(data_hist)=rownames(sms_hist)=carriers
#Save data
#setwd('documents/Consulting/Rootmetrics')
## save the RootStar values from 1H to 2H of 2017
write.csv(callStars_df_1,"Data/1H/call_star_raw_1H2017.csv")
write.csv(dataStars_df_1,"Data/1H/data_star_raw_1H2017.csv")
write.csv(smsStars_df_1,"Data/1H/sms_star_raw_1H2017.csv")
write.csv(speedStars_df_1,"Data/1H/speed_star_raw_1H2017.csv")
write.csv(callStars_df_2,"Data/2H/call_star_raw_2H2017.csv")
write.csv(dataStars_df_2,"Data/2H/data_star_raw_2H2017.csv")
write.csv(smsStars_df_2,"Data/2H/sms_star_raw_2H2017.csv")
write.csv(speedStars_df_2,"Data/2H/speed_star_raw_2H2017.csv")
## save the RootStar Rank values from 1H and 2H of 2017
write.csv(callStarsRank_df_1,"Data/1H/call_star_rank_1H2017.csv")
write.csv(dataStarsRank_df_1,"Data/1H/data_star_rank_1H2017.csv")
write.csv(smsStarsRank_df_1,"Data/1H/sms_star_rank_1H2017.csv")
write.csv(speedStarsRank_df_1,"Data/1H/speed_star_rank_1H2017.csv")
write.csv(callStarsRank_df_2,"Data/2H/call_star_rank_2H2017.csv")
write.csv(dataStarsRank_df_2,"Data/2H/data_star_rank_2H2017.csv")
write.csv(smsStarsRank_df_2,"Data/2H/sms_star_rank_2H2017.csv")
write.csv(speedStarsRank_df_2,"Data/2H/speed_star_rank_2H2017.csv")
## save the RootScore values from 1H to 2H of 2017
write.csv(callScores_df_1,"Data/1H/call_score_raw_1H2017.csv")
write.csv(dataScores_df_1,"Data/1H/data_score_raw_1H2017.csv")
write.csv(smsScores_df_1,"Data/1H/sms_score_raw_1H2017.csv")
write.csv(speedScores_df_1,"Data/1H/speed_score_raw_1H2017.csv")
write.csv(callScores_df_2,"Data/2H/call_score_raw_2H2017.csv")
write.csv(dataScores_df_2,"Data/2H/data_score_raw_2H2017.csv")
write.csv(smsScores_df_2,"Data/2H/sms_score_raw_2H2017.csv")
write.csv(speedScores_df_2,"Data/2H/speed_score_raw_2H2017.csv")
## save the RootScore Rank values from 1H to 2H of 2017
write.csv(callScoresRank_df_1,"Data/1H/call_score_rank_1H2017.csv")
write.csv(dataScoresRank_df_1,"Data/1H/data_score_rank_1H2017.csv")
write.csv(smsScoresRank_df_1,"Data/1H/sms_score_rank_1H2017.csv")
write.csv(speedScoresRank_df_1,"Data/1H/speed_score_rank_1H2017.csv")
write.csv(callScoresRank_df_2,"Data/2H/call_score_rank_2H2017.csv")
write.csv(dataScoresRank_df_2,"Data/2H/data_score_rank_2H2017.csv")
write.csv(smsScoresRank_df_2,"Data/2H/sms_score_rank_2H2017.csv")
write.csv(speedScoresRank_df_2,"Data/2H/speed_score_rank_2H2017.csv")
### save the change from 1half to 2half of 2017 in each of the categories
write.csv(callStars_df_2-callStars_df_1,"Data/Change/call_stars_raw_change2017.csv")
write.csv(smsStars_df_2-smsStars_df_1,"Data/Change/sms_stars_raw_change2017.csv")
write.csv(dataStars_df_2-dataStars_df_1,"Data/Change/data_stars_raw_change2017.csv")
write.csv(speedStars_df_2-speedStars_df_1,"Data/Change/speed_stars_raw_change2017.csv")
write.csv(callStarsRank_df_2-callStarsRank_df_1,"Data/Change/call_stars_rank_change2017.csv")
write.csv(smsStarsRank_df_2-smsStarsRank_df_1,"Data/Change/sms_stars_rank_change2017.csv")
write.csv(dataStarsRank_df_2-dataStarsRank_df_1,"Data/Change/data_stars_rank_change2017.csv")
write.csv(speedStarsRank_df_2-speedStarsRank_df_1,"Data/Change/speed_stars_rank_change2017.csv")
write.csv(callScores_df_2-callScores_df_1,"Data/Change/call_score_raw_change2017.csv")
write.csv(smsScores_df_2-smsScores_df_1,"Data/Change/sms_score_raw_change2017.csv")
write.csv(dataScores_df_2-dataScores_df_1,"Data/Change/data_score_raw_change2017.csv")
write.csv(speedScores_df_2-speedScores_df_1,"Data/Change/speed_score_raw_change2017.csv")
write.csv(callScoresRank_df_2-callScoresRank_df_1,"Data/Change/call_score_rank_change2017.csv")
write.csv(smsScoresRank_df_2-smsScoresRank_df_1,"Data/Change/sms_score_rank_change2017.csv")
write.csv(dataScoresRank_df_2-dataScoresRank_df_1,"Data/Change/data_score_rank_change2017.csv")
write.csv(speedScoresRank_df_2-speedScoresRank_df_1,"Data/Change/speed_score_rank_change2017.csv")
write.csv(t(call_hist),"Data/Change/call_stars_raw_hist_change2017.csv")
write.csv(t(sms_hist),"Data/Change/sms_stars_raw_hist_change2017.csv")
write.csv(t(data_hist),"Data/Change/data_stars_raw_hist_change2017.csv")
write.csv(t(speed_hist),"Data/Change/speed_stars_raw_hist_change2017.csv")
type='l'
png(filename = paste("Plots/2H/sms_score_raw_",carriers[i],'_2H2017.png'))
### plot call category for all carriers
plot(seq(-5,5,.5),call_hist[1,],type=type,lwd=3,col='red',ylim=c(0,80),
main='Change In Call Star Ranking From 1H To 2H of 2017',
xlab="Change in Stars",
ylab="Frequency")
sum(seq(-5,5,.5)*call_hist[1,])/sum(call_hist[1,])
carr1avg=round(sum(seq(-5,5,.5)*call_hist[1,])/sum(call_hist[1,]),digits=2)
points(seq(-5,5,.5),call_hist[2,],type=type,lwd=3,col='blue')
carr2avg=round(sum(seq(-5,5,.5)*call_hist[2,])/sum(call_hist[2,]),digits=2)
points(seq(-5,5,.5),call_hist[3,],type=type,lwd=3,col='green')
carr3avg=round(sum(seq(-5,5,.5)*call_hist[3,])/sum(call_hist[3,]),digits=2)
points(seq(-5,5,.5),call_hist[4,],type=type,lwd=3,col='black')
carr4avg=round(sum(seq(-5,5,.5)*call_hist[4,])/sum(call_hist[4,]),digits=2)
change=c(carr1avg,carr2avg,carr3avg,carr4avg)
legend_labs=c('','','','')
for (i in 1:length(carriers)){
legend_labs[i] = paste(c(carriers[i],change[i]),collapse = ', Avg Chg:')
}
legend("topleft",legend = paste(legend_labs),col =c("red","blue","green","black"), lty=1, lwd=5,cex=0.8)
### plot sms category for all carriers
plot(seq(-5,5,.5),sms_hist[1,],type=type,lwd=3,col='red',ylim=c(0,80),
main='Change In SMS Star Ranking From 1H To 2H of 2017',
xlab="Change in Stars",
ylab="Frequency")
sum(seq(-5,5,.5)*sms_hist[1,])/sum(sms_hist[1,])
carr1avg=round(sum(seq(-5,5,.5)*sms_hist[1,])/sum(sms_hist[1,]),digits=2)
points(seq(-5,5,.5),sms_hist[2,],type=type,lwd=3,col='blue')
carr2avg=round(sum(seq(-5,5,.5)*sms_hist[2,])/sum(sms_hist[2,]),digits=2)
points(seq(-5,5,.5),sms_hist[3,],type=type,lwd=3,col='green')
carr3avg=round(sum(seq(-5,5,.5)*sms_hist[3,])/sum(sms_hist[3,]),digits=2)
points(seq(-5,5,.5),sms_hist[4,],type=type,lwd=3,col='black')
carr4avg=round(sum(seq(-5,5,.5)*sms_hist[4,])/sum(sms_hist[4,]),digits=2)
change=c(carr1avg,carr2avg,carr3avg,carr4avg)
legend_labs=c('','','','')
for (i in 1:length(carriers)){
legend_labs[i] = paste(c(carriers[i],change[i]),collapse = ', Avg Chg:')
}
legend("topleft",legend = paste(legend_labs),col =c("red","blue","green","black"), lty=1, lwd=5,cex=0.8)
### plot data category for all carriers
plot(seq(-5,5,.5),data_hist[1,],type=type,lwd=3,col='red',ylim=c(0,80),
main='Change In Data Star Ranking From 1H To 2H of 2017',
xlab="Change in Stars",
ylab="Frequency")
sum(seq(-5,5,.5)*data_hist[1,])/sum(data_hist[1,])
carr1avg=round(sum(seq(-5,5,.5)*data_hist[1,])/sum(data_hist[1,]),digits=2)
points(seq(-5,5,.5),data_hist[2,],type=type,lwd=3,col='blue')
carr2avg=round(sum(seq(-5,5,.5)*data_hist[2,])/sum(data_hist[2,]),digits=2)
points(seq(-5,5,.5),data_hist[3,],type=type,lwd=3,col='green')
carr3avg=round(sum(seq(-5,5,.5)*data_hist[3,])/sum(data_hist[3,]),digits=2)
points(seq(-5,5,.5),data_hist[4,],type=type,lwd=3,col='black')
carr4avg=round(sum(seq(-5,5,.5)*data_hist[4,])/sum(data_hist[4,]),digits=2)
change=c(carr1avg,carr2avg,carr3avg,carr4avg)
legend_labs=c('','','','')
for (i in 1:length(carriers)){
legend_labs[i] = paste(c(carriers[i],change[i]),collapse = ', Avg Chg:')
}
legend("topleft",legend = paste(legend_labs),col =c("red","blue","green","black"), lty=1, lwd=5,cex=0.8)
### plot speed category for all carriers
plot(seq(-5,5,.5),speed_hist[1,],type=type,lwd=3,col='red',ylim=c(0,80),
main='Change In Speed Star Ranking From 1H To 2H of 2017',
xlab="Change in Stars",
ylab="Frequency")
sum(seq(-5,5,.5)*speed_hist[1,])/sum(speed_hist[1,])
carr1avg=round(sum(seq(-5,5,.5)*speed_hist[1,])/sum(speed_hist[1,]),digits=2)
points(seq(-5,5,.5),speed_hist[2,],type=type,lwd=3,col='blue')
carr2avg=round(sum(seq(-5,5,.5)*speed_hist[2,])/sum(speed_hist[2,]),digits=2)
points(seq(-5,5,.5),speed_hist[3,],type=type,lwd=3,col='green')
carr3avg=round(sum(seq(-5,5,.5)*speed_hist[3,])/sum(speed_hist[3,]),digits=2)
points(seq(-5,5,.5),speed_hist[4,],type=type,lwd=3,col='black')
carr4avg=round(sum(seq(-5,5,.5)*speed_hist[4,])/sum(speed_hist[4,]),digits=2)
change=c(carr1avg,carr2avg,carr3avg,carr4avg)
legend_labs=c('','','','')
for (i in 1:length(carriers)){
legend_labs[i] = paste(c(carriers[i],change[i]),collapse = ', Avg Chg:')
}
legend("topleft",legend = paste(legend_labs),col =c("red","blue","green","black"), lty=1, lwd=5,cex=0.8)
|
b3c044ef07e433e7685fc9cf3d122cb6c77a34ad | ea524efd69aaa01a698112d4eb3ee4bf0db35988 | /man/expect_is.Rd | fc0482d40548de66964fc2b3b5def70149561460 | [
"MIT"
] | permissive | r-lib/testthat | 92f317432e9e8097a5e5c21455f67563c923765f | 29018e067f87b07805e55178f387d2a04ff8311f | refs/heads/main | 2023-08-31T02:50:55.045661 | 2023-08-08T12:17:23 | 2023-08-08T12:17:23 | 295,311 | 452 | 217 | NOASSERTION | 2023-08-29T10:51:30 | 2009-09-02T12:51:44 | R | UTF-8 | R | false | true | 1,442 | rd | expect_is.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/expect-inheritance.R
\name{expect_is}
\alias{expect_is}
\title{Does an object inherit from a given class?}
\usage{
expect_is(object, class, info = NULL, label = NULL)
}
\arguments{
\item{object}{Object to test.
Supports limited unquoting to make it easier to generate readable failures
within a function or for loop. See \link{quasi_label} for more details.}
\item{class}{Either a character vector of class names, or
for \code{expect_s3_class()} and \code{expect_s4_class()}, an \code{NA} to assert
that \code{object} isn't an S3 or S4 object.}
}
\description{
\ifelse{html}{\href{https://lifecycle.r-lib.org/articles/stages.html#superseded}{\figure{lifecycle-superseded.svg}{options: alt='[Superseded]'}}}{\strong{[Superseded]}}
\code{expect_is()} is an older form that uses \code{\link[=inherits]{inherits()}} without checking
whether \code{x} is S3, S4, or neither. Instead, I'd recommend using
\code{\link[=expect_type]{expect_type()}}, \code{\link[=expect_s3_class]{expect_s3_class()}} or \code{\link[=expect_s4_class]{expect_s4_class()}} to more clearly
convey your intent.
}
\section{3rd edition}{
\ifelse{html}{\href{https://lifecycle.r-lib.org/articles/stages.html#deprecated}{\figure{lifecycle-deprecated.svg}{options: alt='[Deprecated]'}}}{\strong{[Deprecated]}}
\code{expect_is()} is formally deprecated in the 3rd edition.
}
\keyword{internal}
|
b6ba694967f4aa3a25fd2c001c78ef122330f554 | 1f758c363e2644ccadabf28148caca0cb52bf283 | /RExpressionView/R/launch.R | c686aa1be1365db4cdecf1907548e7ccf6409224 | [] | no_license | gaborcsardi/ISA | 7d934d7cfb93cf11cdd3175ea45f1d91fb330568 | 4c2fd674ca434e0f166636de94b226917be38876 | refs/heads/x | 2021-03-12T23:26:47.086731 | 2017-03-01T14:57:03 | 2017-03-01T14:57:03 | 10,396,379 | 6 | 3 | null | 2020-06-11T18:32:05 | 2013-05-31T02:29:52 | ActionScript | UTF-8 | R | false | false | 170 | r | launch.R | LaunchEV <- function() {
swf <- system.file("ExpressionView.html", package="ExpressionView")
url <- URLencode(paste("file://", swf, sep=""))
browseURL(url)
}
|
c2403e5ece2a870d1c60ab68b431361349fbeea4 | 478ee600eb1a2e188aa185ae52afc116f69a1002 | /code/04c_tissue_analysis/02b_check_sl_t1.R | 55d1341fadfad685d6bc75f8928e9fe61d057c71 | [] | no_license | erflynn/smoking_sex_expression | fed8090ad5209064aade2dda9707ea292f70c8c9 | 5e6a12716498da9c25aed0a8261d1e697eea2afe | refs/heads/master | 2023-08-04T14:06:27.088820 | 2021-09-27T15:11:18 | 2021-09-27T15:11:18 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,042 | r | 02b_check_sl_t1.R | # GSE31908, GSE40013, GSE17913, GSE2125, GSE36868 (no)
gse1 <- getGEO("GSE31908") # 20 missing sex labels
dim(exprs(gse1$`GSE31908-GPL570_series_matrix.txt.gz`)) # 17
dim(exprs(gse1$`GSE31908-GPL96_series_matrix.txt.gz`)) # 50
dim(exprs(gse1$`GSE31908-GPL97_series_matrix.txt.gz`)) # 50
gse31908 # ... 40 ...
ggplot(sl %>% filter(study_acc=="GSE31908"),
aes(x=p_male))+
geom_histogram()
# gse2 <- getGEO("GSE40013") # sex labels are a mess
# exp2 <- exprs(gse2$GSE40013_series_matrix.txt.gz)
# unique(pData(gse2$GSE40013_series_matrix.txt.gz)$platform) # GPL6244
# f_df <- fData(gse2$GSE40013_series_matrix.txt.gz)
# massir_gpl6244 <- f_df %>% filter(str_detect(mrna_assignment, "chrY")) %>% pull(ID)
#
# xist_gpl6244 <- f_df %>% filter(str_detect(gene_assignment, "XIST")) %>% pull(ID)
# rps_gpl6244 <- f_df %>% filter(str_detect(gene_assignment, "RPS4Y1")) %>% pull(ID)
# kdm_gpl6244 <- f_df %>% filter(str_detect(gene_assignment, "KDM5D")) %>% pull(ID)
# plot(exp2[as.character(xist_gpl6244),],
# exp2[as.character(rps_gpl6244),])
# #massir_lab2 <- massiRAcc(exp2, massir_gpl6244, plot=T)
# # this is a mess
# ggplot(sl %>% filter(study_acc=="GSE40013"),
# aes(x=p_male))+
# geom_histogram()
gse3 <- getGEO("GSE17913")
exp3 <- exprs(gse3$GSE17913_series_matrix.txt.gz)
unique(pData(gse3$GSE17913_series_matrix.txt.gz)$platform) # GPL570
f_df <- fData(gse3$GSE17913_series_matrix.txt.gz)
xist_gpl570 <- f_df %>% filter(str_detect(`Gene Symbol`, "XIST")) %>% pull(ID)
rps_gpl570 <- f_df %>% filter(str_detect(`Gene Symbol`, "RPS4Y1")) %>% pull(ID)
kdm_gpl570 <- f_df %>% filter(str_detect(`Gene Symbol`, "KDM5D")) %>% pull(ID)
ts <- tokerSexLab(exp3, f.genes =xist_gpl570, m.genes=c(rps_gpl570, kdm_gpl570))
my_s <- ts[gse17913$sample_acc]
plot(colMeans(exp3[xist_gpl570,]), colMeans(exp3[c(rps_gpl570, kdm_gpl570),]))
table(gse17913$sex_lab==my_s) # two of ours do not
table(gse17913$gender==my_s) # 7 of theirs do not
View(head(f_df))
gse4 <- getGEO("GSE2125")
unique(pData(gse4$GSE2125_series_matrix.txt.gz)$platform) # GPL570
exp4 <- exprs(gse4$GSE2125_series_matrix.txt.gz)
ts4 <- tokerSexLab(exp4, f.genes =xist_gpl570, m.genes=c(rps_gpl570, kdm_gpl570))
my_s4 <- ts4[gse2125$sample_acc]
plot(colMeans(exp4[xist_gpl570,]), colMeans(exp4[c(rps_gpl570, kdm_gpl570),]))
my_s4[is.na(gse2125$sex_lab)]
table(gse2125$sex_lab==my_s4) # all match
table(gse17913$gender==my_s) # 7 of theirs do not
# gse5 <- getGEO("GSE36868")
# exp5 <- exprs(gse5$GSE36868_series_matrix.txt.gz)
# dim(exp5) # 960
# str(gse5$GSE36868_series_matrix.txt.gz, 2) # GPL6883
# unique(pData(gse5$GSE36868_series_matrix.txt.gz)$platform)
# f_df <- fData(gse5$GSE36868_series_matrix.txt.gz)
#
# f_df %>%filter(str_detect(Symbol, "RPS4Y1"))
# f_df %>% filter(str_detect(Synonyms, "XIST")) # appears to have no XIST transcripts...
# f_df %>% filter(Chromosome=="X") %>% pull(Symbol)
# massir_genes <- f_df %>% filter(Chromosome=="Y") %>% pull(ID)
# massir_lab <- massiRAcc(exp5, massir_genes, plot=T)
# ... very unclear separation
|
e6d9a23cd3fa9532d4934344d527fbfdd405fde9 | f663a843dcd66b1d4e15bfe6b9a6f618a169c3f7 | /fluoro/R/raman_mask.R | a7444b1364a4fa7d55d8294cb8050074a415c6d7 | [
"MIT"
] | permissive | rhlee12/Fluoro-Package | 44556f53aaf7a455aa9229138b11367143e90903 | 07d6f88df2a56ad9220d12de96ee53b9e2cfedae | refs/heads/master | 2021-03-30T17:56:33.852014 | 2018-05-30T22:18:51 | 2018-05-30T22:18:51 | 118,687,653 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,892 | r | raman_mask.R | #' @title Mask Rayleigh Scatter
#'
#' @author Robert Lee
#'
#' @details Masks first and second order Rayleigh Scatter on the supplied EEM with the specified value
#' (defaulting to 0 if no value entered.)
#'
#' @param \code{eem} - A properly-formatted EEM, produced or read by a \code{fluoro} function.
#' @param \code{mask.value} - Optional, defaults to zero. The numeric value that values in the mask area should be forced to.
#' \strong{NA/NaN/NULL values will prevent ploting in \code{fluoro}!}
#'
#' @return A masked EEM with dimensions and names preserved.
#'
#' @export
#'
#'
rayleigh.mask=function(eem, mask.value=0){
pri.math=function(x){(base::trunc(0.95*x*10^-1)/10^-1)-20}
sec.math=function(x){(base::trunc(1.7*x*10^-1)/10^-1)+65}
logi.matrix=as.data.frame(matrix(data=TRUE, nrow = dim(eem)[1], ncol=dim(eem)[2]))
rownames(logi.matrix)=rownames(eem)
colnames(logi.matrix)=colnames(eem)
for(i in 1:nrow(logi.matrix)){
for(j in 1:ncol(logi.matrix)){
col=colnames(logi.matrix)[j]
s.row.span= sec.math(as.numeric(col))
row=as.numeric(rownames(logi.matrix)[i])
start=as.numeric(rownames(logi.matrix)[1])
stop=500#as.numeric(rownames(logi.matrix))
logi.matrix[i,j]=(!(row %in% s.row.span:start))
if(logi.matrix[i,j]){eem[i,j]=0}
}
}
for(i in 1:nrow(logi.matrix)){
for(j in 1:ncol(logi.matrix)){
row=rownames(logi.matrix[i,])
p.col.span=pri.math(as.numeric(row))
col=as.numeric(colnames(logi.matrix[j]))
start=as.numeric(colnames(logi.matrix[1]))
stop=as.numeric(colnames(logi.matrix[length(logi.matrix)]))
logi.matrix[i,j]=!(col %in% p.col.span:start)#&(!(col %in% s.col.span:start))
if(logi.matrix[i,j]){eem[i,j]=mask.value}
}
}
return(eem)
}
|
5588dc6e02aa27f847620dd9e2d4cc674a5a1907 | 06772dd41870da689df082609992e032970bac12 | /R/tfbs.overlaps.R | 36584ca0a58c638bf425f1b29e728bd22b3408ec | [] | no_license | TheSeoman/Scripts | 6c5ffa94d4c0e144a31f9f9e54ca324d90586ee0 | 3fb59b6ac7e24c6dba266d47ca7aeedbb2bb57c1 | refs/heads/master | 2021-05-15T15:00:56.679283 | 2018-04-11T18:03:39 | 2018-04-11T18:03:39 | 107,265,319 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,633 | r | tfbs.overlaps.R | source('Scripts/R/paths.R')
require('GenomicRanges')
load(PATHS$EXPR.RANGES.DATA)
load(PATHS$METH.RANGES.DATA)
load(PATHS$SNP.RANGES.DATA)
load(PATHS$TFBS.RANGES.DATA)
enlarge.ranges <- function(ranges, flanking) {
enlarged.ranges <- GRanges(
seqnames = seqnames(ranges),
ranges = IRanges(start = start(ranges) - flanking, end = end(ranges) + flanking),
strand = strand(ranges),
name = ranges$name,
score = ranges$score
)
names(enlarged.ranges) <- names(ranges)
return (enlarged.ranges)
}
get.tfbs.overlaps <- function (tfbs.ranges, essay.ranges, flanking, data.type) {
out <- list()
if( flanking > 0) {
essay.ranges <- enlarge.ranges(essay.ranges, flanking)
}
overlap.hits <- findOverlaps(tfbs.ranges, essay.ranges, type = 'any')
out$pairs <- cbind.data.frame(names(tfbs.ranges[queryHits(overlap.hits)]), names(essay.ranges[subjectHits(overlap.hits)]), stringsAsFactors = FALSE)
colnames(out$pairs) <- c('tfbs.id', paste0(data.type, '.id'))
out$tfbs.ranges <- tfbs.ranges[unique(names(tfbs.ranges[queryHits(overlap.hits)]))]
out[[paste0(data.type, '.ranges')]] <- essay.ranges[unique(names(essay.ranges[subjectHits(overlap.hits)]))]
return(out)
}
meth.tfbs.overlap <- get.tfbs.overlaps(blood.tfbs.ranges, meth.ranges, 100, 'meth')
save(meth.tfbs.overlap, file = PATHS$METH.TFBS.OVERLAP.DATA)
expr.tfbs.overlap <- get.tfbs.overlaps(blood.tfbs.ranges, expr.ranges, 0, 'expr')
save(expr.tfbs.overlap, file = PATHS$EXPR.TFBS.OVERLAP.DATA)
snp.tfbs.overlap <- get.tfbs.overlaps(blood.tfbs.ranges, snp.ranges, 0, 'snp')
save(snp.tfbs.overlap, file = PATHS$SNP.TFBS.OVERLAP.DATA)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.