blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9b3b993517558f506ea7c9a905013e8cde21e5cc
|
f70e4f30b846d990c5fb3ddca6ac71b7082eaa04
|
/cybera/Intro_to_R_programming_script.R
|
27c6b8d1bc778792b0f5f45e966790aead7cca49
|
[] |
no_license
|
barton-satchwill/titanic
|
0f8ae38bcd9b572df7829bf1c5d16b43a1d8803b
|
0ae908ba4a5aa6dd8eb95bb7049dff49e339de5b
|
refs/heads/master
| 2016-08-11T22:10:50.297980
| 2016-03-20T23:55:59
| 2016-03-20T23:55:59
| 54,438,523
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 756
|
r
|
Intro_to_R_programming_script.R
|
# Intro to R Script
# To be used line by line or in small collections to reduce typing time during webinar
library(plyr)
# Factor vector example
v <- c("New York", "Chicago", "Seattle", "San Jose", "Gary", "Seattle",
"Seattle", "San Jose", "New York", "New York", "New York")
v
v.factor <- as.factor(v) # This is an inline comment
v.factor
levels(v.factor) <- c("Chicago", "Gary", "Brooklyn", "San Jose", "Seattle")
v.factor
length(v.factor)
# Data frame examples
data("iris")
names(iris)
nrow(iris)
ncol(iris)
dim(iris)
names(iris)
head(iris)
tail(iris)
str(iris)
summary(iris)
head(iris[,"Sepal.Length"])
head(iris[,c("Petal.Length", "Petal.Width")])
head(iris[,2])
head(iris[,1:3])
head(iris[,-c(1,2)])
head(iris[iris$Species=="virginica",])
|
ff8493604ced52ef2fd0171efab81957ff3c98f0
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/hyper2/examples/mult_grid.Rd.R
|
45f2e4877d166f8cea11f6e6536485f2fb998e01
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 189
|
r
|
mult_grid.Rd.R
|
library(hyper2)
### Name: mult_grid
### Title: Kronecker matrix functionality
### Aliases: mult_grid pair_grid
### ** Examples
pair_grid(diag(2),diag(3))
mult_grid(lapply(1:4,diag))
|
fa6da1669dc914d5487079615b6f5c8800abbc5c
|
434584f79283272e674845545e63e09872c57122
|
/man/ouludaily10.Rd
|
505d06677ddd47411d4bd6f7742c10228b3d357f
|
[
"CC-BY-4.0",
"MIT"
] |
permissive
|
ollinevalainen/fmir
|
6c796068ca1d12abf319a41914d523254cbeec5f
|
4270c064cdb8198e6f0b265ea22159d84a217cb6
|
refs/heads/master
| 2023-03-05T01:53:34.774497
| 2021-02-19T17:18:38
| 2021-02-19T17:18:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,079
|
rd
|
ouludaily10.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{ouludaily10}
\alias{ouludaily10}
\title{Daily weather in Oulu, Finland, in 2010-2017}
\format{A data frame with 2922 rows and 8 variables:
\describe{
\item{place}{city name}
\item{location}{coordinates of the observation station}
\item{time}{date of observation}
\item{rrday}{precipitation rate}
\item{snow}{snow depth}
\item{tday}{average temperature, degrees Celcius}
\item{tg_pt12h_min}{?}
\item{tmax}{maximum temperature, degrees Celcius}
\item{tmin}{minimum temperature, degrees Celcius}
}}
\usage{
ouludaily10
}
\description{
A dataset downloaded from the Finnish Meteorological Institute's open data
API using \strong{fmir}. Contains daily simple weather observations from Oulu,
Finland, covering the years 2010 to 2017. The data are made available by the
\href{https://en.ilmatieteenlaitos.fi}{Finnish Meteorological Institute} and are
licensed under \href{https://creativecommons.org/licenses/by/4.0/}{CC-BY 4.0}.
}
\keyword{datasets}
|
3e3db0cca06924abb8acbc218801a18878ea0af3
|
42ec3b3289fed899f634e6fb1868d3fa3e585288
|
/MM_Barstool_RScript.R
|
faedc8cbbb51647d9a33cdb4c3a0f46dbbb39443
|
[] |
no_license
|
momiller88/BANA_7025_001
|
d67e70373c6c9ba5446f84c81d0f2632e2bdd1dd
|
41c0a1671489a24509ef73d37ee94bc0bb9506f3
|
refs/heads/master
| 2020-08-31T09:34:21.336086
| 2019-12-04T04:31:32
| 2019-12-04T04:31:32
| 218,661,248
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,542
|
r
|
MM_Barstool_RScript.R
|
library(tidyverse)
library(dplyr)
setwd("~/School papers/Graduate/Data Wrangling/BANA_7025_001")
# import datasets
barstool<- read.csv("pizza_barstool.csv", header = T, stringsAsFactors = FALSE)
datafiniti<- read.csv("pizza_datafiniti.csv", header = T, stringsAsFactors = FALSE)
jared<- read.csv("pizza_jared.csv", header = T, stringsAsFactors = FALSE)
# Check structure of each dataset
str(barstool)
str(datafiniti)
str(jared)
# variable names for each dataset
names(barstool)
names(datafiniti)
names(jared)
# example data from each dataset
head(barstool)
head(datafiniti)
head(jared)
# missing variables in each dataset and percentages
colSums(is.na(barstool))
colSums(is.na(barstool))/463*100
colSums(is.na(datafiniti))
colSums(is.na(datafiniti))/10000*100
colSums(is.na(jared))
colSums(is.na(jared))/375*100
# Duplicated pizza shops by name column
sum(duplicated(barstool$name))
sum(duplicated(datafiniti$name))
sum(duplicated(jared$place))
#####################################################################################
# Unique values for each variable in each dataset
## this takes treats each column in your DF as 'x' function(x)
### and applys length(unique()) to each column of your DF
### the 2 specifies the number of row outputs that you want
apply(barstool, 2, function(x) length(unique(x)))
apply(datafiniti, 2, function(x) length(unique(x)))
apply(jared, 2, function(x) length(unique(x)))
setwd("/Users/sidne/Documents/Data Wrangling")
library(readr)
library(dplyr)
library(ggplot2)
barstool <- read_csv("pizza_barstool.csv")
#'barstool' Attributes#
attributes(barstool)
names(barstool)[2] <- "address"
head(barstool)
#change data types#
barstool_data_type <- transform(barstool, zip = as.character(zip), price_level = as.integer(price_level), provider_review_count = as.integer(provider_review_count), review_stats_all_count = as.integer(review_stats_all_count), review_stats_community_count = as.integer(review_stats_community_count), review_stats_critic_count = as.integer(review_stats_critic_count), review_stats_dave_count = as.integer(review_stats_dave_count))
head(barstool_data_type)
tibble(barstool_data_type)
#Table#
barstool_names <- table(barstool$name)
barstool_names[barstool_names > 1]
barstool_data_type[barstool_data_type$name == "Joe's Pizza",]
barstool_data_type[422,]$name <- "Joe's Pizza - 8th"
barstool_data_type[422,]$name
barstool_addresses <- table(barstool$address1)
barstool_addresses[barstool_addresses > 1]
table(barstool$city) #checked
table(barstool$zip) #checked
table(barstool$country) #checked
table(barstool$price_level)
table(barstool$provider_rating)
table(barstool$provider_review_count)
table(barstool$latitude)
table(barstool$longitude)
table(barstool$review_stats_all_average_score) #checked (could simplify to rounded 2 decimals)
table(barstool$review_stats_all_count)
#Boxplot#
#needs to probably be updated to ggplot#
boxplot(barstool$price_level)
boxplot(barstool$provider_rating)
boxplot(barstool$provider_review_count)
boxplot(barstool$review_stats_all_average_score)
boxplot(barstool$review_stats_all_count)
###
#Summary#
summary(barstool$price_level)
summary(barstool$provider_rating)
summary(barstool$provider_review_count)
summary(barstool$review_stats_all_average_score)
summary(barstool$review_stats_all_count)
#########################################################################################
#Read in the infiniti file#
datafiniti <- read_csv("pizza_datafiniti.csv")
#'datafiniti' Attributes#
attributes(datafiniti)
#Table#
table(datafiniti$name)
table(datafiniti$address)
table(datafiniti$city)
table(datafiniti$country)
table(datafiniti$province)
table(datafiniti$latitude)
table(datafiniti$longitude)
table(datafiniti$categories)# checked (multiple values in the same column, could separate into it's own dataframe, could create binary flags for each distinct category, tbd)
table(datafiniti$price_range_min)
table(datafiniti$price_range_max)
#remove duplicates#
datafiniti_dup_removed <- datafiniti[!duplicated(datafiniti, nmax = 1), ]
view(datafiniti_dup_removed)
#table after dedup#
table(datafiniti_dup_removed$name)
table(datafiniti_dup_removed$address)
table(datafiniti_dup_removed$city)
table(datafiniti_dup_removed$country)
table(datafiniti_dup_removed$province)
table(datafiniti_dup_removed$latitude)
table(datafiniti_dup_removed$longitude)
table(datafiniti_dup_removed$categories)# checked (multiple values in the same column, could separate into it's own dataframe, could create binary flags for each distinct category, tbd)
table(datafiniti_dup_removed$price_range_min)
table(datafiniti_dup_removed$price_range_max)
#select name and categories from datafiniti#
df4 <- datafiniti_dup_removed %>%
select(name, categories)
#select only records containing "Pizza Place" in categories#
df4 %>%
filter(str_detect(categories, "Pizza Place"))
str(df4)
str(datafiniti_dup_removed)
#remove 'categories' column#
datafiniti_dup_removed <- datafiniti_dup_removed[-c(8)]
###
#Summary#
summary(datafiniti_dup_removed$name)
summary(datafiniti_dup_removed$address)
summary(datafiniti_dup_removed$city)
summary(datafiniti_dup_removed$country)
summary(datafiniti_dup_removed$province)
summary(datafiniti_dup_removed$latitude)
summary(datafiniti_dup_removed$longitude)
summary(datafiniti_dup_removed$price_range_min)
summary(datafiniti_dup_removed$price_range_max)
###############################################################################
#import 'tidyverse' library#
library(tidyverse)
#read in jared file#
jared<- read_csv("pizza_jared.csv")
#'jared' Attributes#
attributes(jared)
#Table#
table(jared$polla_qid)#checked (ID's duplicated x5, assuming it corresponds to each poll data may need reformatted)
table(jared$answer)#checked
table(jared$votes)#checked
table(jared$pollq_id)#checked
table(jared$question)#checked (might be some discrepancies with the q_id, and questions, several questions had more than 5 dups and each q_id had only 5 dups)
table(jared$place)#checked (might have similar issue as question, but could be different locations with the same name)
table(jared$time)#checked
table(jared$total_votes)#checked (might want to convert time to a more readable format)
table(jared$percent)#checked (not bad, lots of 0's compared to others, next largest is 21)
#spread columns to display number of votes labeled by column#
#select and spread responses count#
df1 <- jared %>%
select(polla_qid, answer, votes) %>%
spread(answer, votes, fill = 0)
#select and spread responses percent#
df2 <- jared %>%
select(polla_qid, answer, percent) %>%
spread(answer, percent, fill = 0)
names(df2) <- c("polla_qid", "Pct_Average", "Pct_Excellent", "Pct_Fair", "Pct_Good", "Pct_Never_Again", "Pct_Poor")
#merge df1 and df2
dfm <- merge(x = df1, y = df2, by = "polla_qid", all.x = TRUE)
# select remaining columns and dedup question by poll#
df3 <- jared %>%
select(pollq_id, question, place, time, total_votes)
df3 <- df3[!duplicated(df3, nmax = 1,), ]
#merge votes df and question/location df
jared_clean <- merge(x = df3, y = dfm, by.x = "pollq_id", by.y = "polla_qid", all.x = TRUE)
jared_clean <- select(jared_clean,
place,
question,
total_votes,
`Never Again`,
Poor, Fair,
Average,
Good,
Excellent,
Pct_Never_Again,
Pct_Poor,
Pct_Fair,
Pct_Average,
Pct_Good,
Pct_Excellent)
head(jared_clean)
#Table jared_clean#
table(jared_clean$pollq_id)
table(jared_clean$question)
table(jared_clean$place)
table(jared_clean$time)
table(jared_clean$total_votes)
table(jared_clean$Average)
table(jared_clean$Excellent)
table(jared_clean$Fair)
table(jared_clean$Good)
table(jared_clean$`Never Again`)
table(jared_clean$Poor)
table(jared_clean$`%_Average`)
table(jared_clean$`%_Excellent`)
table(jared_clean$`%_Fair`)
table(jared_clean$`%_Average`)
table(jared_clean$`%_Never_Again`)
table(jared_clean$`%_Poor`)
#identifying columns that have duplicated names in jared_clean#
jared_dups <- data.frame(table(jared_clean$place))
#shorten list to only dup records#
jared_dups[jared_dups$Freq > 1, ]
jared_dups <- arrange(jared_clean[jared_clean$place %in% jared_dups$Var1[jared_dups$Freq > 1], ], desc(place))
jared_dups
#sum duplicate records votes#
jared_dups_combined <-
jared_dups %>%
group_by(place, question) %>%
summarise(total_votes = sum(total_votes),
`Never Again` = sum(`Never Again`),
Poor = sum(Poor),
Fair = sum(Fair),
Average = sum(Average),
Good = sum(Good),
Excellent = sum(Excellent))
jared_dups_combined
#recalculate percents columns
jared_dups_pct <-
jared_dups_combined %>%
mutate( Pct_Never_Again = `Never Again` / total_votes,
Pct_Poor = Poor / total_votes,
Pct_Fair = Fair / total_votes,
Pct_Average = Average / total_votes,
Pct_Good = Good / total_votes,
Pct_Excellent = Excellent / total_votes)
jared_dups_pct <- as_data_frame(jared_dups_pct)
colnames(jared_dups_pct)[4] <- "Never Again"
jared_dups_pct
#remove duplicated records from jared_clean#
jd_dup_names <- jared_dups_pct$place
jc_wo_dupnames <- jared_clean[ ! jared_clean$place %in% jd_dup_names, ]
head(jc_wo_dupnames)
head(jared_dups_pct)
jared2 <- rbind(jc_wo_dupnames, jared_dups_pct)
jared2
###
#Summary#
summary(datafiniti_dup_removed$name)
###############################################################################################
#test on inner joining jared2 with barstool by place = name#
merge <- merge(x = jared2, y = barstool, by.x = "place", by.y = "name")
#27 matches, with 5 duplicated names w/ different addresses
dup <- duplicated(merge[,1])
merge[dup,]
#test on inner joining jared2 with datafiniti by place = name#
merge2 <- merge(x = jared2, y = datafiniti_dup_removed, by.x = "place", by.y = "name")
merge2
#9 results all unique
#test on inner joining datafiniti with datafiniti by name = name#
merge3 <- merge(x = datafiniti_dup_removed, y = datafiniti_dup_removed, by.x = "name", by.y = "name")
str(merge3)
merge3
#7698 results, 5862 duplicated names, 5411 duplicated addresses
dup2 <- duplicated(merge3[,1])
dups2
str(merge3[dup2, ])
dup3 <- duplicated(merge3[ ,2])
dup3
str(merge3[dup3, ])
|
6a71b53518cbacd6f5f913a79fe680dcd4a16e15
|
06cdfccf8d44f11742fec1162afdfe2421c22302
|
/man/norm_minkowski.Rd
|
a547540bca3e1c7c6dcfd5d2fe8a0e690b51bf8c
|
[
"MIT"
] |
permissive
|
lgaborini/rdirdirgamma
|
06feabefb12a42d0496818ecc9a0f70f7ccc1c5c
|
f3087f0a81c9e4b08ff56efcc260873eaa16232d
|
refs/heads/master
| 2023-04-18T00:18:29.380512
| 2021-03-05T18:06:26
| 2021-03-05T18:06:26
| 290,997,800
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 581
|
rd
|
norm_minkowski.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{norm_minkowski}
\alias{norm_minkowski}
\title{Compute the Minkowski norm of a vector}
\usage{
norm_minkowski(v, p = 2)
}
\arguments{
\item{v}{a vector}
\item{p}{exponent of the Minkowski norm (from 1 to Inf)}
}
\value{
a double
}
\description{
Compute the Minkowski norm of a vector.
$p$ can range from 1 to infinity.
}
\seealso{
Other column-wise and utilities:
\code{\link{colkurtosis}()},
\code{\link{colsd}()},
\code{\link{colskewness}()}
}
\concept{column-wise and utilities}
|
633571f52558f56cd2f4ba10374ee4566a7d3b1f
|
a802e08368280ccf5fbf07c764f1b30d7533a971
|
/Use_Cases/VPS_Popcorn_Production/Kubernetes/src/configurations/requirements.R
|
977ff244e6f54e1df9ccc2f5894833040f64b317
|
[
"Apache-2.0"
] |
permissive
|
janstrohschein/KOARCH
|
d86850d1b5e2fbd13401d93023cde783e14fb158
|
8eaa4a71cd1a33475f7b1618d075037d1446a7e1
|
refs/heads/master
| 2023-07-25T08:21:06.879591
| 2021-09-17T11:55:03
| 2021-09-17T11:55:03
| 229,251,272
| 4
| 8
|
Apache-2.0
| 2023-07-06T21:40:07
| 2019-12-20T11:18:52
|
C#
|
UTF-8
|
R
| false
| false
| 525
|
r
|
requirements.R
|
install.packages("devtools", dependecies=TRUE)
library(devtools)
install_version("CEGO", version = "2.4.0", repos = "http://cran.us.r-project.org")
install_version("data.table", version = "1.13.0", repos = "http://cran.us.r-project.org")
install_version("nloptr", version = "1.2.2.0", repos = "http://cran.us.r-project.org")
install_version("SPOT", version = "2.0.6", repos = "http://cran.us.r-project.org")
withr::with_libpaths(new = "/usr/local/lib/R/site-library", install_github("martinzaefferer/COBBS", upgrade="never"))
|
f6dac801895da580e7fe2116f36e9d98394bda6a
|
6b4b561e78f6bf5d94b95c445d671facf1434e02
|
/man/GE_enumerate_inputs.Rd
|
b074c136e2e5eecfe75dbd80147c99e43cf7c8c9
|
[] |
no_license
|
cran/GEint
|
11f4ae8466e2add11832152de6d666eeba808803
|
78dca7ea284ccb1344d5a08d7a8130deb7c841e7
|
refs/heads/master
| 2022-05-30T01:12:06.005682
| 2022-05-18T06:20:02
| 2022-05-18T06:20:02
| 69,889,213
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 457
|
rd
|
GE_enumerate_inputs.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/GE_enumerate_inputs.R
\name{GE_enumerate_inputs}
\alias{GE_enumerate_inputs}
\title{GE_enumerate_inputs.R}
\usage{
GE_enumerate_inputs()
}
\value{
Nothing
}
\description{
Call this function to display the necessary inputs for GE_bias_set. For terms in
cov_mat_list like E[ZZ], these should be matrices where the (i,j) element is E[Z_i*Z_j].
}
\examples{
GE_enumerate_inputs()
}
|
d56c3bed681d47a668c599333616f37865052d0c
|
12cc761534228dd286b39a00368c084ff68c4009
|
/DSS4/Proj2/plot1.R
|
31efd10ceba0829cf88ccf79f8c7a5d0ac317b1e
|
[] |
no_license
|
DragonflyStats/DSS
|
eb87d6ee8bff870633971d1be6882ca5c5184417
|
16186b3618b7d550b6b73d59208330f854dab3e3
|
refs/heads/master
| 2023-04-14T08:52:33.443675
| 2023-04-13T07:58:05
| 2023-04-13T07:58:05
| 21,777,873
| 7
| 21
| null | null | null | null |
UTF-8
|
R
| false
| false
| 334
|
r
|
plot1.R
|
NEI <- readRDS("summarySCC_PM25.rds")
x <- aggregate(NEI$Emissions,by=list(Category=NEI$year),FUN=sum)
colnames(x)<-c("year","Emissions")
png("plot1.png",width=480,height=480)
plot(x$year,x$Emissions,
type="o",col=c("blue"),
xlab="year",ylab="Emissions",
main="Emission of PM(2.5) per year of all sources")
dev.off()
|
db1f32d4f39b3f834b2faa712abe8aadcbd89d99
|
e6c169e9a4f564dfcba7cc32f0e72d04033f81b0
|
/R/data.R
|
5e79bca42315273a7417edb36d40d8ca3bc4936f
|
[] |
no_license
|
yoojeonglee/polnet
|
6943f4d42a60e9016008f70899275040165368e5
|
780132fdc2779d003d2e45cfd2dc46cad52cf9f5
|
refs/heads/master
| 2020-04-16T21:56:37.183780
| 2018-07-24T22:03:23
| 2018-07-24T22:03:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 310
|
r
|
data.R
|
#' 115th House of Representatives Voting Data
#'
#' A dataset containing the voting records of House of Representative
#' members of the 115th Congress
#'
#' @format A data frame with 376 rows and 672 variables:
#' \describe{
#' \item{vote_X}{Vote on the X'th bill (1=yay, 0=nay)}
#' ...
#' }
"house-data"
|
f23f673285453333883e5212c1d4acfaa1004e3e
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/CoRpower/examples/plotRRgradVE.Rd.R
|
a9135ef18d514b25bfb39769733e5aa292651b58
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,896
|
r
|
plotRRgradVE.Rd.R
|
library(CoRpower)
### Name: plotRRgradVE
### Title: Plotting of the Ratio of Relative Risks for Higher/Lower Latent
### Subgroups against Correlate of Risk Effect Size for Trichotomous
### Biomarkers
### Aliases: plotRRgradVE
### ** Examples
# Example scenario with trichotomous biomarker, where values of rho are varied
# Set input parameters for computePower function
nCases <- 10
nControls <- 300
nCasesWithS <- 10
controlCaseRatio <- 3
VEoverall <- 0.75
risk0 <- 0.034
VElat0 <- seq(0, VEoverall, len=10)
VElat1 <- rep(VEoverall, 10)
Plat0 <- P0 <- 0.2
Plat2 <- P2 <- 0.6
M <- 20
alpha <- 0.05
sigma2obs <- 1
rho <- c(1, 0.7, 0.4)
biomType <- "trichotomous"
# Output from computePower function is stored in an object as a list
pwr <- computePower(nCases=nCases, nControls=nControls, nCasesWithS=nCasesWithS,
controlCaseRatio=controlCaseRatio, risk0=risk0, VEoverall=VEoverall,
Plat0=Plat0, Plat2=Plat2, P0=P0, P2=P2, VElat0=VElat0,
VElat1=VElat1, M=M, alpha=alpha, sigma2obs=sigma2obs, rho=rho,
biomType=biomType)
# Set parameters for plotPowerCont function
# outComputePower is a list containing output from the computePower function
outComputePower <- pwr
legendText <- paste0("rho = ", c(1, 0.7, 0.4))
plotRRgradVE(outComputePower=outComputePower, legendText=legendText)
## Not run:
##D # Output from computePower function is saved in an RData file
##D computePower(..., saveDir = "myDir", saveFile = "myFile.RData")
##D # outComputePower is a character string specifying the file containing the computePower output
##D # outDir is a character string specifying the outComputePower file directory
##D outComputePower = "myFile"
##D outDir = "~/myDir"
##D legendText <- paste0("rho = ", c(1, 0.7, 0.4))
##D plotRRgradVE(outComputePower, outDir=outDir, legendText = legendText)
## End(Not run)
|
d9b42ff5fe213db42d2eeac945ca602336564532
|
c94b94a33de2211e21437db702f175bf778ebb4d
|
/R/utils.R
|
d96f0e96ffea1476b3f5cc2909f771f976e85a4e
|
[
"MIT"
] |
permissive
|
kuzmenkov111/tagmeppr
|
87a0a8cf2969e3d07f1c83e55247ef5c6c1a29a3
|
55a0bf961e08b57d43c7f3594d77a7c429d16ae8
|
refs/heads/master
| 2022-04-09T20:46:31.826681
| 2020-02-19T15:56:28
| 2020-02-19T15:56:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,353
|
r
|
utils.R
|
#' tagMepprCol
#'
#' A pallette based on CYRUP (github.com/robinweide/CYRUP)
#'
#' @author Robin H. van der Weide, \email{r.vd.weide@nki.nl}
#' @param n The number of colours needed.
#' @export
tagMepprCol = function(n){
pal = c("#009bef", "#ff5c49", "#949494", "#fed500")
return(pal[n])
}
#' runIDgen
#'
#' Generate an unique 10-item ID
#'
#' @author Robin H. van der Weide, \email{r.vd.weide@nki.nl}
#' @param n The number of ID's needed.
#' @export
runIDgen <- function(n = 1) {
a <- do.call(paste0, replicate(5, sample(LETTERS, n, TRUE), FALSE))
a <- paste0(a, sprintf("%04d", sample(9999, n, TRUE)), sample(LETTERS, n, TRUE))
return(a)
}
# split multiple [XS]A fields
splitXSA = function(x, field = 'XA'){
COI = x[,colnames(x) == field]
NAMES = x$readName
# split
CS = reshape2::colsplit(COI, pattern = ';',LETTERS)
CS[CS == ""] = NA
# remove na-rows
notShit = apply(CS, 1, function(x){sum(is.na(x))}) != 26
CS = CS[notShit, ]
NAMES = NAMES[notShit]
cnts = apply(CS, 1, function(x){sum(!is.na(x))}) == 1
# add name to singles
singles = data.frame('XA' = CS[cnts, 1], 'RN' = NAMES[cnts])
# split multiples
multiples = CS[!cnts, ]
if(nrow(multiples) == 1){
multiples = multiples[, as.numeric(apply(as.data.frame(multiples),2,is.na)) < nrow(multiples)]
} else if(nrow(multiples) > 0){
multiples = multiples[, colSums(apply(as.data.frame(multiples),2,is.na)) < nrow(multiples)]
} else{
multiples = c()
}
out = c()
if(length(multiples) > 0){
Mvect = c()
for(i in 1:ncol(multiples)){
Mvect = c(Mvect, stats::setNames(multiples[,i], NAMES[!cnts]))
}
multiples = data.frame('XA' = Mvect, 'RN' = names(Mvect))
multiples = multiples[!is.na(multiples[,1]),]
out = unique(rbind(singles,multiples))
} else {
out = unique(singles)
}
return(out)
}
empericalTransposonCentre = function(exp, ref){
transposonSeq = NULL
if(ref$ITR == "PiggyBac"){
transposonSeq = tagMeppr::PiggyBacITRs
} else if(ref$ITR == "SleepingBeauty"){
transposonSeq = tagMeppr::SleepingBeautyITRs
} else if(grepl(ref$ITR, pattern = ".fa")){
# check if exists
if(file.exists(ref$ITR)){
transposonSeq = Biostrings::readDNAStringSet(filepath = ref$ITR, use.names = T)
} else {
stop('The file ', ref$ITR, ' does not exist.')
}
}
MID = 0
# 1. get mid of largest N-padded sequence
if(Biostrings::letterFrequency(transposonSeq, letters = 'N') > 0){
Nranges = IRanges::reduce(Biostrings::vmatchPattern("N",transposonSeq)[[1]])
Nranges = Nranges[base::which.max(S4Vectors::width(Nranges))]
IRDF = IRanges::as.data.frame(Nranges)[1,]
MID = IRDF[,2] - IRDF[,3]/2
} else {
# 2. if no Ns, get coverage-distribution
SL = S4Vectors::width(transposonSeq)
names(SL) = base::names(transposonSeq)
gr.windows <- GenomicRanges::tileGenome(SL, tilewidth=10, cut.last.tile.in.chrom=TRUE)
COF = IRanges::countOverlaps(gr.windows, exp$FWDBAM)
COR = IRanges::countOverlaps(gr.windows, exp$REVBAM)
hits = sort(c(gr.windows[which.max(COF)],
gr.windows[which.max(COR)]))
hits = unname(sort(unlist(as.data.frame( IRanges::ranges(hits)))[1:4])[c(1,4)])
MID = unname(mean(hits))
}
return( MID)
}
dedupAR = function(AR){
# make a string of ranges pluts cigar
AR$string = GR2string(AR)
# combine strings of the same readname
L = split(AR$string, AR$readName)
STRINGS = unlist(lapply(L, paste, collapse = "___"))
STRINGSnames = unique(names(STRINGS))
# find multiples
TABLE = table(STRINGS)
MULTIPLES = names(TABLE[TABLE > 1])
MDF = as.data.frame(STRINGS[STRINGS %in% MULTIPLES])
MDF = split(rownames(MDF), MDF[,1])
# pick a random one
keeps = unname(vapply(MDF, FUN = function(x){sample(x, 1)},
FUN.VALUE = character(1)))
nonkeeps = STRINGSnames[is.na(match(STRINGSnames, keeps))]
AR[is.na(BiocGenerics::match(AR$readName, nonkeeps, nomatch=NA_integer_))]
}
#' @importFrom GenomeInfoDb seqnames
#' @importFrom BiocGenerics start end strand
GR2string = function(x){
if (length(x) == 0L)
return(character(0))
ans <- paste0(GenomeInfoDb::seqnames(x), ":", BiocGenerics::start(x), "-", BiocGenerics::end(x))
if (any(strand(x) != "*"))
ans <- paste0(ans, ":", BiocGenerics::strand(x))
ans
}
|
c3105d47ad6efdd1a65b2cd78ee90323b280b3e3
|
8cb3a94ac203f7213c30ff4f254721c24ce70157
|
/cachematrix.R
|
4ee3baa8e7f58342eaf5e807022e4d643176f6ec
|
[] |
no_license
|
jcastillor/ProgrammingAssignment2
|
91529a5957889b9253e5896ec0008abe9cd72c78
|
4cb2ae726c81656ae6de52d5e0b2038c18afa7d1
|
refs/heads/master
| 2022-07-05T03:08:04.064321
| 2020-05-19T15:06:00
| 2020-05-19T15:06:00
| 265,244,324
| 0
| 0
| null | 2020-05-19T12:42:48
| 2020-05-19T12:42:47
| null |
UTF-8
|
R
| false
| false
| 904
|
r
|
cachematrix.R
|
## Week 3 R programming course peer graded Assignment
## Jose Castillo Rabazo
## These functions aim to cache a matrix and make the inverse in order to
## save computing power and make programs more efficients.
## makeCacheMatrix creates an R object that stores a matrix and its inverse
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y){
x <<- y
i <<- NULL
}
get <- function() x
setinv <- function(inv) i <<- inv
getinv <- function() i
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
## Retrieves the inverse matrix cached by makeCacheMatrix().
## It requires and argument from makeCacheMatrix().
cacheSolve <- function(x, ...) {
i <- x$getinv()
if(!is.null(i)){
message("getting cached data")
return(i)
}
data <- x$get()
i <- solve(data)
x$setinv(i)
i
}
|
8a24a81819822f27f21c19d2b373f647ffe040f2
|
779215d6b0ac83368f9c71f7a4aff494d64a0835
|
/ddcrp/Code/arthur/neal8.R
|
497c6e9309d6c97cc41eda7aca32083fd8b2e563
|
[] |
no_license
|
luiarthur/byuMsProject
|
a257eccd12addca37b5a289ab47bb80f3fd1aecf
|
0a3c101de8b311639dd60355ff62f562e8399bbe
|
refs/heads/master
| 2021-01-19T08:32:20.541339
| 2015-03-31T16:14:12
| 2015-03-31T16:14:12
| 31,347,525
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,410
|
r
|
neal8.R
|
# Progress:
pb <- function(i,n) { cat(paste0("\rProgress: ",round(i*1000/n)/10,"%")); if (i==n) cat("\n") } # arthur
library(foreach)
library(doMC)
registerDoMC(16)
y <- c(-1.48,-1.4,-1.16,-1.08,-1.02,.14,.51,.53,.78)
n <- length(y)
relabel <- function(x){
uniq <- unique(x)
y <- NULL
for (i in 1:length(uniq)){
y[x==uniq[i]] <- i
}
y
}
rg <- function(n=1){
rnorm(n,0,1)
}
rF <- function(n=1,x){
rnorm(n,x,.1)
}
dF <- function(x,phi){
dnorm(x,phi,.1)
}
# Take out the c[i]
# c[i] needs to go back to one of the existing clusters
# OR it needs to get a new cluster.
# In practice, a=1, m=1.
c <- rep(1,n)
phi <- rg(length(unique(c))+1)
# Chinese Restaurant:
crp <- function() {
for (i in 1:n){ # start from two because the first cluster is 1
k <- length(unique(c[-i]))
h <- k + 1
#phi <- phi[1:h]
c[-i] <- relabel(c[-i])
if (any(c[-i]==c[i])){
phi[h] <- rg(1) # when m = 1
}
w <- NULL; w[h] <- 1
for (t in 1:k){
w[t] <- sum(c[-i]==t)
}
samp <- 1:h
prob <- w * dF(y[i],phi) #a=1, m=1
c[i] <- sample(samp,1,prob=prob)
}
c
}
do.neal <- function(i,B) { pb(i,B); neal <- crp() }
# Main:
B = 100000
neal.crp <- foreach(i=1:B,.combine=rbind,.errorhandling="remove") %dopar% do.neal(i,B)
neal.num.clust <- apply(neal.crp,1,max)
dist.neal <- table(neal.num.clust) / sum(table(neal.num.clust))
round(dist.neal,4)
|
d4fd01951a6d2c059806dc02b75410e7c728a5b1
|
c700718d694cf2317400174d3480837a55a5b371
|
/JKleiberPkg/man/plotOutliers.Rd
|
f0a60090a3eba0da6184ca0986bfc9704c8493b6
|
[] |
no_license
|
jkleiber/MATH4753
|
51774e5e19617ffe152e04288d352579bd613c23
|
fbd8052731732f9f287258bba12bc256c049a4ed
|
refs/heads/master
| 2022-03-17T03:41:56.737223
| 2019-11-25T23:13:59
| 2019-11-25T23:13:59
| 209,825,951
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 578
|
rd
|
plotOutliers.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/outliers.R
\name{plotOutliers}
\alias{plotOutliers}
\title{Outlier plot function}
\usage{
plotOutliers(x, title = "", col = c("Red", "Blue", "Black"))
}
\arguments{
\item{x}{The data to plot}
\item{col}{Color vector containing outlier color, potential outlier color, and regular color (in that order)}
}
\value{
Plots a dot plot of the data points, highlighting outliers
}
\description{
This function plots data points, outliers, and potential outliers in a dot plot
}
\examples{
plotOutliers(x)
}
|
f1e3ec8fb9668bf2b4d79afc07b5e1e1dbe5d01f
|
f7a17a0c09546f0b5448921c270cf6dafa899d25
|
/R-Programming-Assignment-3/rankall.R
|
d15313aafd36c75b526b5e36c7c24c948a93245a
|
[] |
no_license
|
daswan1118/Coursera-R-Programming
|
9a8016633fe7de3604b387af8a68ab3574271790
|
525e16e5527f8b873673892760ce39bdb28b0bbc
|
refs/heads/master
| 2020-04-06T08:35:05.297253
| 2018-11-13T03:39:13
| 2018-11-13T03:39:13
| 157,308,979
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,617
|
r
|
rankall.R
|
##### Ranking hospitals in all state #####
## The function reads the outcome-of-care-measures.csv file and returns a 2-column data frame
## containing the hospital in each state that has the ranking specified in num
rankall <- function (condition,rank='best') {
library(dplyr)
# Read Data
options(warn=-1)
outcome <- read.csv('outcome-of-care-measures.csv', colClasses = 'character')
outcome[,11] <- as.numeric(outcome[,11])
names(outcome) <- tolower(names(outcome))
# Subset Data based on conditions
col_nm <- paste('hospital.30.day.death..mortality..rates.from.', sub(' ', '.', condition), sep='')
outcome <- outcome[,c('hospital.name','state',col_nm)]
outcome <- outcome[complete.cases(outcome),]
outcome[,c(col_nm)] <- as.numeric(outcome[,c(col_nm)])
# Check that state and outcome are valid
if (!(condition %in% c('heart attack','heart failure', 'pneumonia'))) return('invalid outcome')
if (rank == 'best') rank <- 1
# Return hospital name in the state with given rank
outcome <- outcome[order(outcome$state,outcome[,c(col_nm)],outcome$hospital.name),]
outcome <- tryCatch(outcome %>%
group_by(state) %>%
mutate(rank = 1:n()) %>%
ungroup(), error=function(err) NA)
if (is.numeric(rank)) {
result <- outcome[outcome$rank == rank,]
result <- result[,c('hospital.name','state')]
return(result)
} else if (rank == 'worst') {
result <- outcome %>%
group_by(state) %>%
filter(rank == max(rank))
result <- result[,c('hospital.name','state')]
return(result)
}
options(warn=0)
}
|
839661ec4003c596af2f78aca61bfc8414fcf06b
|
b61b198991f50d300ac17a2884e81fc8ab72875c
|
/pr.r
|
e42fb689dc87a924d4237cfc48f2253e040af14a
|
[] |
no_license
|
Valensya/RLessons1
|
e29957cdd9ca7970c644ba324baef0fab3af2a1d
|
39731077b71f2a00bc6d45316ae6c4c0357346b0
|
refs/heads/master
| 2021-01-22T19:30:43.308710
| 2017-03-21T10:49:05
| 2017-03-21T10:49:05
| 85,203,623
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 76
|
r
|
pr.r
|
iris$a1 <- iris$Specias=="setosa" <-0
iris$a1 <- iris$Specias=="setosa" <-1
|
8930fc11d6d4cb0dfb2de97494ec48d7c4b3a3e5
|
becc0c34c00903dba35b9eaff62c6ade569901d2
|
/visualisation/maps/xa_map/main.r
|
49ca1d26b7fe16f315f4786b615066577e1c5e38
|
[] |
no_license
|
nicholasrios/football-analytics
|
0f7a8a6ec0fc6f69c33a8c1a8f0d24e43eb1b695
|
8db50fff35d9711568855f23147dfaa9c9b603a6
|
refs/heads/master
| 2022-04-18T15:15:41.734452
| 2019-09-14T16:09:28
| 2019-09-14T16:09:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 939
|
r
|
main.r
|
# xA map
library(dplyr)
library(rvest)
library(hrbrthemes)
library(magick)
library(viridis)
library(ggforce)
library(gtable)
library(gridExtra)
library(grid)
library(readr)
source(file="src/xa_map.r")
# Launcher
data_file <- "data/bernardo_silva_xa.csv"
text <- "Bernardo Silva passes from 2018-2019 Premier League season"
final_filename <- "bernardo_silva_xa_map_1819.png"
# Colors
background_color <- "#2162AA"
foreground_color <- "#F7F6F4"
text_color <- "white"
color1 <- "#64BEF3"
color2 <- "#FFCB41"
color3 <- "#56FFAE"
high_gradient_color <- "#64BEF3"
# Load data
data <- read_csv(data_file)
# Filter data
# you can filter on team_name or player_name
# data <- data %>% filter(player_name=="Olivier Giroud")
# Build map
stats <- get_stats(data)
map <- xa_map(data, background_color, foreground_color, color1, color2, color3, high_gradient_color)
create_graphic(map, text, stats, final_filename, background_color, text_color)
|
bf010c6cd2ebf2ebb0c69a4e0832871fc362530a
|
ab564a9cc7755be703abff95701a0c1b0633bc7a
|
/plot2.R
|
e4d38c612ee8cb58a6528c5b75ebdab2b852d6a2
|
[] |
no_license
|
fcp018/ExData_Plotting1
|
ee029712c2ae8e8c3446d6ed5212d388fcea1fcf
|
14183ae4aa07789327177fab6a502b329ccdcbb2
|
refs/heads/master
| 2021-01-22T00:32:23.591591
| 2015-07-12T15:10:10
| 2015-07-12T15:10:10
| 38,949,949
| 0
| 0
| null | 2015-07-12T04:28:01
| 2015-07-12T04:28:00
| null |
UTF-8
|
R
| false
| false
| 844
|
r
|
plot2.R
|
#This script uses the subset data file created using the "load & subset data.R" script.
#If not already done so, please execute "load & subset data.R" script to
#create subset of main data file for this script.
# Check to see if subset data file is in current dir. If not, set correct dir.
if (!"Pwr_consump_subData.csv" %in% list.files()) {
setwd("~/Data Science/Coursera Data Science/ExData_Plotting1")
}
#read Pwr consumption subset file
data <- read.csv("~/Data Science/Coursera Data Science/ExData_Plotting1/Pwr_consump_subData.csv")
#If file not found, run the "load & subset data.R" script first.
attach(data)
DateTime <- as.POSIXct(DateTime)
#create plot2 file
png(filename = "plot2.png", width = 480, height = 480)
plot(DateTime, Global_active_power, type = "l", xlab = "", ylab = "Global Active Power (kilowatts)")
dev.off()
|
737db4eeb8424bdddf916c6292bff1c9c70b6dfe
|
f298bc9d1f2dcbd2b9b7ee5b86b46af9ba5da782
|
/src/functions/walkR_functions.R
|
b220d9df77b691a5512ea1d033e63b77854d5fe1
|
[] |
no_license
|
lindsaywaldrop/dotwalkR
|
cb344228d2f4faa4dc79093a588aee25a9446c62
|
3b0756f8ec0b45bafe4dbc726cd149752bae4eb6
|
refs/heads/master
| 2023-06-21T22:45:57.229721
| 2021-07-19T23:15:12
| 2021-07-19T23:15:12
| 329,145,136
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,651
|
r
|
walkR_functions.R
|
# DotWalkR functions
register.backend <- function(copl, windows = FALSE) {
co <- detectCores()
if (windows == TRUE) {
if (copl > co ) {
cluster <- makePSOCKcluster(co)
message("Cores requested exceed the number available, using max detected.")
} else {
cluster <- makePSOCKcluster(copl)
}
return(cluster)
} else {
if (copl > co ) {
registerDoParallel(cores = co)
message("Cores requested exceed the number available, using max detected.")
} else {
registerDoParallel(cores = copl)
}
return(NULL)
}
}
load.matdata <- function(surrogate.type, data.type, example = TRUE, test = FALSE){
# Loading function which sets up matlab data in correct type. Specific naming structure
# for mat files required!
require(R.matlab)
if (test == TRUE) {
folder.loc <- "./tests/testdata/"
surrogate.type <- paste(surrogate.type, "_resampled_", sep = "")
data.type <- paste(data.type, "_long", sep = "")
tmp.ext <- ".mat"
} else if (test == FALSE && example == TRUE) {
folder.loc <- paste("./data/example-data/", surrogate.type, "/", sep = "")
surrogate.type <- paste(surrogate.type, "_resampled_", sep = "")
data.type <- paste(data.type, "_long", sep = "")
tmp.ext <- ".mat"
} else {
folder.loc <- paste("./data/", surrogate.type, "/", sep = "")
surrogate.type <- paste(surrogate.type, "_", sep = "")
tmp.ext <- ".mat"
}
if (data.type == "input" || data.type == "input_long") {
mat.data <- readMat(paste(folder.loc, surrogate.type, data.type, tmp.ext, sep = ""))
data <- mat.data[["input"]]
} else if (data.type == "SI" || data.type == "SI_long") {
tmp.type <- strsplit(surrogate.type, "_")
surrogate.type <- tmp.type[[1]][1]
SI.data <- readMat(paste(folder.loc, surrogate.type, "_SI", tmp.ext, sep = ""))
A<-SI.data[["A"]]
data<-as.matrix(A)
} else if (data.type == "surrogate" || data.type == "surrogate_long") {
mat.data <- readMat(paste(folder.loc, surrogate.type, data.type, tmp.ext, sep = ""))
data.name <- "fmat"
data<-mat.data[[data.name]]
} else {
mat.data <- readMat(paste(folder.loc, surrogate.type, data.type, tmp.ext, sep = ""))
data.name <- strsplit(data.type, "_")
data.name <- data.name[[1]][1]
data<-mat.data[[data.name]]
}
if(is.null(data)) stop("Unexpected structure to MATLAB file, look again at the names of child objects!")
return(data)
}
find.max.grad <- function(gradX, gradY, gradZ){
max.x <- max(gradX, na.rm = TRUE)
max.y <- max(gradY, na.rm = TRUE)
max.z <- max(gradZ, na.rm = TRUE)
max.grad <- max(c(max.x, max.y, max.z), na.rm = TRUE)
return(max.grad)
}
scale.dots <- function(dots, x.range, y.range, z.range){
# Reshapes 3D positions of dots based on ranges provided.
if (class(dots)[1] != "matrix") stop("dots must be a matrix")
if (length(x.range) != 2 && class(x.range) != "numeric") {
stop("x.range must be a vector with two values, try using range()")
}
if (length(y.range) != 2 && class(y.range) != "numeric") {
stop("y.range must be a vector with two values, try using range()")
}
if (length(z.range) != 2 && class(z.range) != "numeric") {
stop("z.range must be a vector with two values, try using range()")
}
if (is.null(dim(dots))) {
dots.size <- as.numeric(length(dots))
dots.scaled <- rep(NA, length = dots.size)
dots.scaled[1] <- (dots[1] - x.range[1])/(x.range[2] - x.range[1])
dots.scaled[2] <- (dots[2] - y.range[1])/(y.range[2] - y.range[1])
dots.scaled[3] <- (dots[3] - z.range[1])/(z.range[2] - z.range[1])
} else {
dots.size <- dim(dots)
dots.scaled <- matrix(data = NA, nrow = dots.size[1], ncol = dots.size[2])
dots.scaled[,1] <- (dots[,1] - x.range[1])/(x.range[2] - x.range[1])
dots.scaled[,2] <- (dots[,2] - y.range[1])/(y.range[2] - y.range[1])
dots.scaled[,3] <- (dots[,3] - z.range[1])/(z.range[2] - z.range[1])
}
return(dots.scaled)
}
save.dots <- function(folder.name, dots, time.now){
# Saves dot file at specified time.
require(data.table)
if(sum(is.na(dots)) != 0) warning("Dots contain NA values!!")
filename <- paste(folder.name, "/dots_" ,time.now, ".csv", sep = "")
fwrite(data.frame(dots), file = filename, append = TRUE, sep = " ", nThread = 2)
}
getMeanValue <- function(vectors, values, testVector, desiredNeighbors) {
# Gets mean surrogate value from the number of nearest-neighbor points. Mean
# is a weighted average based on the distance of the points.
require(data.table)
require(parallel)
require(doParallel)
require(foreach)
if (class(testVector)[1] != "matrix") stop("testVector must be a matrix")
if (class(vectors)[1] != "matrix") stop("vetors must be a matrix")
if (class(desiredNeighbors) == "numeric") {
desiredNeighbors <- as.integer(desiredNeighbors)
} else if(class(desiredNeighbors) == "integer"){
} else {
stop("desiredNeighbors must be an integer or numeric")
}
means1 <- foreach(i = 1:dim(testVector)[1]) %dopar% {
distances <- matrix(data = NA, nrow = dim(vectors)[1], ncol = 2)
colnames(distances) <- c("index","distance")
distances[, 1] <- seq(1:dim(vectors)[1])
distances[, 2] <- sqrt((vectors[, 1] - testVector[i, 1])^2 +
(vectors[, 2] - testVector[i, 2])^2 +
(vectors[, 3] - testVector[i, 3])^2)
distances2 <- as.data.table(distances)
sorted.distances <- setkey(distances2, "distance")
idx <- sorted.distances[1:desiredNeighbors, ]
wgts <- 1 - 20*(idx$distance)
value <- values[idx$index]
weighted.mean(as.numeric(value), wgts)
}
means <- unlist(means1)
return(means)
}
find.betas <- function(dots, gradX, gradY, gradZ, input.real, input.scaled, dN, delta.t){
scaled.dots <- scale.dots(dots, range(input.real[, 1]),
range(input.real[, 2]),
range(input.real[, 3]))
dx <- getMeanValue(input.scaled, gradX, scaled.dots, dN)
dy <- getMeanValue(input.scaled, gradY, scaled.dots, dN)
dz <- getMeanValue(input.scaled, gradZ, scaled.dots, dN)
beta <- matrix(data = c(dx,dy,dz), ncol = 3)
return(beta)
}
generaterandM <- function(n){
a <- -1
b <- 1
M <- (b - a)*runif(n) + a
return(M)
}
herd.dots <- function(dots, position, range.dots){
escaped.low <- which(dots[, position] < range.dots[1])
escaped.high <- which(dots[, position] > range.dots[2])
if (length(escaped.low) > 0) {
dots[escaped.low, position] <- range.dots[1]
}
if (length(escaped.high) > 0) {
dots[escaped.high, position] <- range.dots[2]
}
return(dots)
}
|
28adb4ea604e09076882c4548afb89a629676e9d
|
75ad61a5b05261bd434cf3027d2b5938ca5bc561
|
/plot2.R
|
4c06d06b94d49d70625293c16e2efbe8d415b189
|
[] |
no_license
|
Kshear/ExData_Plotting1
|
88f089dc8c9e76f0975379e31055fea19e603bdd
|
5c11624c557677cb260f3729916e4cb018bc36b4
|
refs/heads/master
| 2022-11-25T10:45:06.207452
| 2020-08-04T16:22:11
| 2020-08-04T16:22:11
| 284,797,350
| 0
| 0
| null | 2020-08-03T20:12:16
| 2020-08-03T20:12:15
| null |
UTF-8
|
R
| false
| false
| 1,210
|
r
|
plot2.R
|
library(lubridate)
library(data.table)
library(dplyr)
##Reading in file
power<- read.table(file="household_power_consumption.txt", header=T, sep=";", stringsAsFactors=F, dec=".", na.strings="?")
## (creates issue with col classes) power<- read.table(file="household_power_consumption.txt", header=T, sep=";", dec=".", na.strings="?")
#Checking strcture of data
str(power)
#(Not working still get NAs)subSETdata <- filter(power, (Date="1-2-2007") & (Date="2-2-2007"))
#transforming class of "Date" variable from character to date
power$Date <- as.Date(power$Date, format = "%d/%m/%Y")
#checking that transformation was successful
str(power)
#filtering dates
SubSetDates <- filter(power, (Date=="2007-2-1") | (Date=="2007-2-2"))
##SubSetDates$DateTime <- strptime(paste(SubSetDates$Date, SubSetDates$Time), format="%d/%m/%Y %H:%M:%S")
datetime<- paste(SubSetDates$Date,SubSetDates$Time)
str(datetime)
dt2 <- data.frame(cbind(SubSetDates, datetime))
str(dt2)
dt2$datetime<-as.POSIXct(dt2$datetime)
##setfile type
png(filename="plot1.png",width=480, height=480)
#plot function
plot(x=dt2$datetime, y=dt2$Global_active_power, type = 'l', xlab='',ylab = 'Global Active Power (kilowatts)')
dev.off()
|
5147b159b40968ac4548c4857b48d115d12f21cb
|
10784406e656ff1c16800e595e6033de4f71f9ff
|
/ui.R
|
eecc58e170b719a282d438b97a5da3afd43b53ef
|
[] |
no_license
|
AlgoSkyNet/BasicLogInModule
|
1748b0670f32b10d46d72f5d44783b5f147e681d
|
4ba23cf8db4da92c40581f7b96a5f703706032ca
|
refs/heads/master
| 2022-04-09T12:50:32.793925
| 2020-02-23T17:48:16
| 2020-02-23T17:48:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,211
|
r
|
ui.R
|
# SHINY UI ####
# Aplicacion ####
dashboardPagePlus(
skin = "black",
title = "Basic LogIn",
# enable_preloader = TRUE,
sidebar_fullCollapse = T,
# TITULO --------------------
header = dashboardHeaderPlus(
title = "Basic LogIn",
userOutput("menu_user")
),
# BARRA LATERAL--------------------
sidebar = dashboardSidebar(
useShinyjs(),
useSweetAlert(),
sidebarMenuOutput("menu")
),
# CONTENIDO--------------------
body = dashboardBody(
login_ui("login"),
# CONTENIDO TABS--------------------
tabItems(
# 1 - Personal > Personal Activo --------------------
tabItem(tabName = "first_page",
hidden(div(id="first_page_show", ## ATENCION
fluidRow(
h1(HTML("Bienvenido!!! Más info en <a href = 'https://www.hasselpunk.com' target = '_blank'>hasselpunk</a>!!!"), align = "center")
)
)) ## ATENCION
) # cierra el tab
)# cierra tabitems
)# cierra el body del dashboard
)# cierra el dashboard page
|
30da1c2fea390663234853e66d3b86f90600af3c
|
2d34708b03cdf802018f17d0ba150df6772b6897
|
/googlecontentv2sandbox.auto/man/OrdersUpdateMerchantOrderIdRequest.Rd
|
fc994773af75f690dc1268968e88c06ac358c68c
|
[
"MIT"
] |
permissive
|
GVersteeg/autoGoogleAPI
|
8b3dda19fae2f012e11b3a18a330a4d0da474921
|
f4850822230ef2f5552c9a5f42e397d9ae027a18
|
refs/heads/master
| 2020-09-28T20:20:58.023495
| 2017-03-05T19:50:39
| 2017-03-05T19:50:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 767
|
rd
|
OrdersUpdateMerchantOrderIdRequest.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/content_objects.R
\name{OrdersUpdateMerchantOrderIdRequest}
\alias{OrdersUpdateMerchantOrderIdRequest}
\title{OrdersUpdateMerchantOrderIdRequest Object}
\usage{
OrdersUpdateMerchantOrderIdRequest(merchantOrderId = NULL,
operationId = NULL)
}
\arguments{
\item{merchantOrderId}{The merchant order id to be assigned to the order}
\item{operationId}{The ID of the operation}
}
\value{
OrdersUpdateMerchantOrderIdRequest object
}
\description{
OrdersUpdateMerchantOrderIdRequest Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
No description
}
\seealso{
Other OrdersUpdateMerchantOrderIdRequest functions: \code{\link{orders.updatemerchantorderid}}
}
|
6ff02f32ec9b8d289f969bd51cf3c3d1e7f523a9
|
27f615eae91f8548adb1e6ba8da8888417c3ef56
|
/man/ggmanHighlightGroup.Rd
|
74fab87871d829cbbf4599f723fdc3f389e89e81
|
[] |
no_license
|
drveera/ggman
|
364a572c289799b066f1046e7af952e6cec74ff0
|
b264cab4301c861cc033ca639fda8fd9bc694e10
|
refs/heads/master
| 2023-05-24T00:20:01.242213
| 2019-05-01T17:21:11
| 2019-05-01T17:21:11
| 75,216,178
| 24
| 13
| null | 2021-06-17T10:58:59
| 2016-11-30T18:43:06
|
R
|
UTF-8
|
R
| false
| true
| 1,270
|
rd
|
ggmanHighlightGroup.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ggmanHighlightGroup.R
\name{ggmanHighlightGroup}
\alias{ggmanHighlightGroup}
\title{Highlight groups of points in Manhattan plot}
\usage{
ggmanHighlightGroup(ggmanPlot, highlightDfm, snp = "snp", group = "group",
legend.title = "legend", legend.remove = FALSE, ...)
}
\arguments{
\item{ggmanPlot}{A ggman plot of class 'ggman'; see \code{\link{ggman}}}
\item{highlightDfm}{A data frame object; one of the columns should contain snps identifiers}
\item{snp}{Name of the snp column}
\item{group}{Name of the grouping column; if all the snps are to be highlighted with same colour use \code{\link{ggmanHighlight}}}
\item{legend.title}{Title of the legend.}
\item{legend.remove}{If TRUE, legend will be removed.}
\item{...}{other arguments passed to \code{\link[ggplot2]{geom_point}}}
}
\value{
A manhattan plot with highlighted markers
}
\description{
Highlights groups of points in the ggman Manhattan Plot and add a legend.
}
\examples{
p1 <- ggman(toy.gwas, snp = "snp", bp = "bp", chrom = "chrom",
pvalue = "pvalue")
ggmanHighlightGroup(p1, highlightDfm = toy.highlights.group, snp = "snp", group = "group",
size = 0.5, legend.title = "Significant groups")
}
|
928825f3e407528b4c7070bc528b7d1fd260bd76
|
89b48f1af10fe015001dd8b6adb92b75829f3cdc
|
/man/sceua.Rd
|
978d6eded2000d620c88231edee3582514523ec2
|
[] |
no_license
|
cran/rtop
|
561ccd2863299ec410d28abaa99adf0792635367
|
df1ca98e050e57a4c3497d65fe96de3d98469a51
|
refs/heads/master
| 2023-04-06T23:24:15.866989
| 2023-03-31T17:10:02
| 2023-03-31T17:10:02
| 17,699,401
| 4
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,210
|
rd
|
sceua.Rd
|
\name{sceua}
\alias{sceua}
\title{
Optimisation with the Shuffle Complex Evolution method}
\description{
Calibration function which searches a parameter set which is minimizing the value of an objective function
}
\usage{
sceua(OFUN, pars, lower, upper, maxn = 10000, kstop = 5, pcento = 0.01,
ngs = 5, npg = 2 * length(pars) + 1, nps = length(pars) + 1,
nspl = 2 * length(pars) + 1, mings = ngs, iniflg = 1, iprint = 0, iround = 3,
peps = 0.0001, plog = rep(FALSE,length(pars)), implicit = NULL, timeout = NULL, ...)
}
\arguments{
\item{OFUN}{ A function to be minimized, with first argument the vector of
parameters over which minimization is to take place. It should return a scalar result
as an indicator of the error for a certain parameter set}
\item{pars}{ a vector with the initial guess the parameters }
\item{lower}{ the lower boundary for the parameters }
\item{upper}{ the upper boundary for the parameters }
\item{maxn}{ the maximum number of function evaluations}
\item{kstop}{number of shuffling loops in which the criterion value must change by the given percentage before optimization is terminated}
\item{pcento}{percentage by which the criterion value must change in given number (kstop) of shuffling loops to continue optimization}
\item{ngs}{number of complexes in the initial population}
\item{npg}{number of points in each complex}
\item{nps}{number of points in a sub-complex}
\item{nspl}{number of evolution steps allowed for each complex before complex shuffling}
\item{mings}{minimum number of complexes required, if the number of complexes is allowed to reduce as the optimization proceeds}
\item{iniflg}{flag on whether to include the initial point in population. iniflg = 0, not included. iniflg= 1, included
}
\item{iprint}{flag for controlling print-out after each shuffling loop. iprint < 0: no output. iprint = 1: print information on the best point of the population. iprint > 0: print information on every point of the population
}
\item{iround}{number of significant digits in print-out}
\item{peps}{convergence level for parameter set (lower number means smaller difference between parameters of the population required for stop)}
\item{plog}{whether optimization should be done in log10-domain. Either a single TRUE value for all parameters, or a vector with TRUE/FALSE for the different parameters}
\item{implicit}{function for implicit boundaries for the parameters (e.g. sum(pars[4]+pars[5]) < 1). See below for details}
\item{timeout}{if different from NULL: maximum time in seconds for execution before the optimization returns with the parameters so far.}
\item{...}{arguments for the objective function, must be named}
}
\value{
The function returns a list with the following elements
\itemize{
\item{par}{ - a vector of the best parameters combination }
\item{value}{ - the value of the objective function for this parameter set}
\item{convergence}{ - a list of two values \itemize{
\item{funConvergence}{ - the function convergence relative to pcento}
\item{parConvergence}{ - the parameter convergence relative to peps}
}}
\item{counts}{ - the number of function evaluations}
\item{iterations}{ - the number of shuffling loops}
\item{timeout}{ - logical; TRUE if the optimization was aborted because the timeout time was reached, FALSE otherwise}
}
There are also two elements returned as attributes:
\itemize{
\item{parset}{ - the entire set of parameters from the last evolution step }
\item{xf}{ - the values of the objective function from the last evolution step }
}
The last two can be accessed as \code{attr(sceuares, "parset")} and \code{attr(sceuares, "xf")}, if the
result is stored as \code{sceuares}.
}
\details{
sceua is an R-implementation of the Shuffle Complex Evolution - University of Arizona (Duan et al., 1992),
a global optimization method which "combines the strengths of the simplex procedure of Nelder and Mead (1965)
with the concepts of controlled random search (Price, 1987), competetive evolusion (Holland, 1975)" with the
concept of complex shuffling, developed by Duan et al. (1992).
This implementation follows the Fortran implementation relatively close, but adds the possibility of searching
in log-space for one or more of the parameters, and it uses the capability of R to pass functions
as arguments, making it possible to pass implicit conditions to the parameter selection.
The objective function \code{OFUN} is a function which should give an error value for each parameter set.
It should never return non-numeric values such as NA, NULL, or Inf. If some parameter combinations can
give such values, the return value should rather be a large number.
The function works with fixed upper and lower boundaries for the parameters. If the possible range of
a parameter might span several orders of magnitude, it might be better to search in log-space for the optimal parameter,
to reduce the risk of being trapped in local optima. This can be set with the argument \code{plog}, which is either
a single value (FALSE/TRUE) or a vector for all parameters.
\code{plog = c(TRUE, FALSE, FALSE, TRUE, TRUE)} means that the search for parameters 1,4 and 5 should be in log10-space,
whereas the search for parameters 2 and 3 are in normal space.
Implicit boundaries can be evoked by passing a function \code{implicit} to \code{sceua}.
This function should give 0 when parameters are acceptable
and 1 if not. If, for example, the condition is that the following sum of parameters four and five should be limited:
sum(pars[4]+pars[5]) <= 1
then the function will be implicit = function(pars) (2*pars[4] + pars[5]) > 1
}
\references{Duan, Q., Sorooshian, S., and Gupta, V.K., 1992. Effective and efficient
global optimization for conceptual rainfall-runoff models.
Water Resour. Res. 28 (4), 1015?1031.
Holland, H.H., 1975. Adaptation in natural and artificial systems,
University of Michigan Press, Ann Arbor.
Nelder, J.A. and Mead, R., 1965. A simplex method for function minimization,
Comput. J., 7(4), 308-313.
Price, W.L., 1987. Global optimization algorithms for a CAD workstation,
J. Optim. Theory Appl., 55(1), 133-146.
Skoien, J. O., Bloschl, G., Laaha, G., Pebesma, E., Parajka, J., Viglione, A., 2014. Rtop: An R package for interpolation of data with a variable spatial support, with an example from river networks. Computers & Geosciences, 67.
}
\author{ Jon Olav Skoien }
\examples{
set.seed(1)
# generate example data from a function with three parameters
# with some random noise
fun = function(x, pars) pars[2]*sin(x*pars[1])+pars[3]
x = rnorm(50, sd = 3)
y = fun(x, pars = c(5, 2, 3)) + rnorm(length(x), sd = 0.3)
plot(x,y)
# Objective function, summing up squared differences
OFUN = function(pars, x, yobs) {
yvals = fun(x, pars)
sum((yvals-yobs)^2)
}
sceuares = sceua(OFUN, pars = c(0.1,0.1,0.1), lower = c(-10,0,-10),
upper = c(10,10,10), x = x, yobs = y)
sceuares
xx = seq(min(x), max(x), 0.1)
lines(xx, fun(xx, pars = sceuares$par))
}
|
ca554e3386fb17f98f268e43a74a5e1da5ea7cd3
|
c06883a35a8d458f7e623f097a05e560024e59da
|
/R/RelativeWaterDeficit.R
|
7d71c04804df1f64084e27173ba6914ad6b71f8f
|
[] |
no_license
|
cran/pvldcurve
|
5c00b7967d185564480d016af8e88fcda44f8ae0
|
f4d98bb01b795ef3ebb842bc23edeb0c9b474bd1
|
refs/heads/master
| 2022-12-20T10:23:09.805027
| 2020-10-05T10:30:02
| 2020-10-05T10:30:02
| 301,814,798
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,940
|
r
|
RelativeWaterDeficit.R
|
#' Relative Water Deficit (RWD)
#'
#' Calculates relative water deficit (\%)
#'
#' @param data data frame with columns of equal length containing at least columns with the fresh weight (g),
#' the dry weight (g) and the saturated fresh weight (g)
#' @param fresh.weight optional name of the column in data containing
#' the numeric fresh weight values (g); default: fresh.weight
#' @param dry.weight optional name of the column in data containing
#' the numeric dry weight values (g); default: dry.weight
#' @param fresh.weight.saturated optional name of the column in data containing
#' the numeric saturated fresh weight values (g); default: fresh.weight.saturated
#' @details Relative water deficit (\%) is calculated as:
#' \deqn{RWD = 100 - 100 * ((FW - DW) (FWs - DW)^-1)}
#' whereas FW = fresh weight, DW = dry weight and FWs = fresh weight at water saturation.
#' @return the original data frame extended by a numeric column with the relative water deficit (RWD) (\%).
#' @examples
#' # get example data
#' df <- leaf_drying_data
#'
#' # extend df by RWD
#' df_with_RWD <- RelativeWaterDeficit(df)
#'
#' @import ggplot2
#' @importFrom graphics legend
#' @importFrom stats approx coef confint lm na.omit nls
#'
#' @export
RelativeWaterDeficit <- function(data,
fresh.weight = "fresh.weight",
dry.weight = "dry.weight",
fresh.weight.saturated = "fresh.weight.saturated") {
# check validity of data
data_in <-
ValidityCheck(
data,
fresh.weight = fresh.weight,
dry.weight = dry.weight,
fresh.weight.saturated = fresh.weight.saturated
)
# calculate RWD
RWD <- 100 - ((data_in[[fresh.weight]] - data_in[[dry.weight]]) /
(data_in[[fresh.weight.saturated]] - data_in[[dry.weight]])) *
100
return(data.frame(data, RWD))
}
|
4f56f60d3561d945de322633447170f04f10efac
|
1dd83faf99068d13efbf35e34d91976b745b862b
|
/src/data_profiling/property_indicator_graphics.R
|
f16a0425a98b36b3944f3e1e58925f75f5b4dced
|
[] |
no_license
|
mclaire19/dspg20broadbandERS
|
6b5fc0c8676e34da5890ee0d822e5b01631e737a
|
175b913e751056d9867540ce2208d8a408d60aef
|
refs/heads/master
| 2022-12-06T01:10:36.113094
| 2020-08-07T14:20:58
| 2020-08-07T14:20:58
| 292,614,679
| 1
| 0
| null | 2020-09-03T15:52:48
| 2020-09-03T15:52:47
| null |
UTF-8
|
R
| false
| false
| 4,264
|
r
|
property_indicator_graphics.R
|
library(tidyverse)
ffx_original$HOUSING_TYPE <- as.factor(ffx_original$HOUSING_TYPE)
summary(ffx_original$HOUSING_TYPE)
ffx_original$HOUSING_TYPE <- recode(ffx_original$HOUSING_TYPE, 'High Rise' = 'Multiplex/Condo/Apartment')
ffx_original$HOUSING_TYPE <- recode(ffx_original$HOUSING_TYPE, 'Mid Rise' = 'Multiplex/Condo/Apartment')
ffx_original$HOUSING_TYPE <- recode(ffx_original$HOUSING_TYPE, 'Low Rise' = 'Multiplex/Condo/Apartment')
ffx_original$HOUSING_TYPE <- recode(ffx_original$HOUSING_TYPE, 'Mobile Home' = 'Single Family Home')
ffx_original$HOUSING_TYPE <- recode(ffx_original$HOUSING_TYPE, 'Multiplex' = 'Multiplex/Condo/ApartmentCondo')
ffx_original$HOUSING_TYPE <- recode(ffx_original$HOUSING_TYPE, 'Townhouse' = 'Single Family Home')
ffx_original$HOUSING_TYPE <- recode(ffx_original$HOUSING_TYPE, 'Single Family Detached' = 'Single Family Home')
ggplot(ffx_original) +
aes(x=forcats::fct_infreq(HOUSING_TYPE)) +
geom_bar() +
geom_text(stat = 'count', aes(label=..count.., vjust = -1))
ffx_2018$property_indicator_code <- as.character(ffx_2018$property_indicator_code)
summary(ffx_2018$property_indicator_code)
ffx_2018$property_indicator_code <- recode(ffx_2018$property_indicator_code, '10' = 'Single Family Home')
ffx_2018$property_indicator_code <- recode(ffx_2018$property_indicator_code, '11' = 'Multiplex/Condo/Apartment')
ffx_2018$property_indicator_code <- recode(ffx_2018$property_indicator_code, '21' = 'Duplex')
ffx_2018$property_indicator_code <- recode(ffx_2018$property_indicator_code, '22' = 'Multiplex/Condo/Apartment')
ffx_2018$property_indicator_code <- recode(ffx_2018$property_indicator_code, '24' = 'Multiplex/Condo/Apartment')
ffx_2018 %>%
filter(property_indicator_code == 'Single Family Home' | property_indicator_code == 'Multiplex/Condo/Apartment' | property_indicator_code == 'Multiple/Condo/Apartment' | property_indicator_code == 'Duplex') %>%
ggplot(aes(x=property_indicator_code)) +
geom_bar()+
geom_text(stat = 'count', aes(label=..count.., vjust = -1))
f1 <- ffx_2018 %>%
group_by(property_indicator_code) %>%
summarize(n = n()) %>%
mutate(dataset = 'CoreLogic') %>%
filter(property_indicator_code == 'Single Family Home' | property_indicator_code == 'Multiplex/Condo/Apartment' | property_indicator_code == 'Multiple/Condo/Apartment' | property_indicator_code == 'Duplex')
f2 <- ffx_original %>%
group_by(HOUSING_TYPE) %>%
summarize(n = n()) %>%
mutate(dataset = 'Fairfax County') %>%
filter(HOUSING_TYPE == 'Single Family Home' | HOUSING_TYPE == 'Multiplex/Condo/Apartment' | HOUSING_TYPE == 'Multiplex/Condo/Apartment' | HOUSING_TYPE == 'Duplex') %>%
rename('property_indicator_code' = 'HOUSING_TYPE')
f <- rbind(f1, f2)
ggplot(f) +
aes(x = property_indicator_code, y = n, fill = dataset) +
geom_bar(stat = 'identity', position = 'dodge') #+
#geom_text(aes(y = n, label = n), position_dodge(width = 1))
nk_2018$property_indicator_code <- as.character(nk_2018$property_indicator_code)
summary(nk_2018$property_indicator_code)
nk_2018$property_indicator_code <- recode(nk_2018$property_indicator_code, '10' = 'Single Family Home')
nk_2018$property_indicator_code <- recode(nk_2018$property_indicator_code, '11' = 'Multiplex/Condo/Apartment')
nk_2018$property_indicator_code <- recode(nk_2018$property_indicator_code, '21' = 'Duplex')
nk_2018$property_indicator_code <- recode(nk_2018$property_indicator_code, '22' = 'Multiplex/Condo/Apartment')
nk_2018$property_indicator_code <- recode(nk_2018$property_indicator_code, '24' = 'Multiplex/Condo/Apartment')
nk_2018$property_indicator_code <- as.factor(nk_2018$property_indicator_code)
names(nk)
nk_2018 %>%
filter(property_indicator_code == 'Single Family Home' | property_indicator_code == 'Multiplex/Condo/Apartment' | property_indicator_code == 'Multiple/Condo/Apartment' | property_indicator_code == 'Duplex') %>%
ggplot(aes(x=reorder(property_indicator_code, desc(property_indicator_code)))) +
geom_bar(fill = '#232D4B')+
geom_text(stat = 'count', aes(label=..count.., vjust = -0.5), size =3) +
xlab('Property Type') +
ylab('Number of Properties') +
theme_classic() +
labs(title='Number of Properties by Type in New Kent County CoreLogic \nData')
|
53a5755a2d9056a11a785b746fdc0ea805939278
|
b8f60b0cc802d613ff252ebf5f2aec9ac005b3b7
|
/search/mlevy.R
|
de712c5336105a38d9b06fc05ec8eada83286f42
|
[] |
no_license
|
dsidavis/ResearchProfiles
|
e66675406195ab552dd888e5db65ca1e003d8e2a
|
e04ea0d2c712993485a6e19b0d38a18b506d13eb
|
refs/heads/master
| 2020-06-11T18:59:27.871947
| 2018-02-23T17:01:15
| 2018-02-23T17:01:15
| 38,620,699
| 1
| 2
| null | 2018-02-18T10:04:49
| 2015-07-06T13:29:13
|
HTML
|
UTF-8
|
R
| false
| false
| 157
|
r
|
mlevy.R
|
hp = "http://www.michaellevy.name/research.html"
ll = getHTMLLinks(hp)
ll = getRelativeURL(ll, hp)
doiRX = "s00267"
links = grep(doiRX, ll, value = TRUE)
|
53b534688a2ef5defb8d8613584bb5d8b847a02d
|
d70884b04e80b474aa3cf5137e1758ae443ea330
|
/incomezip.R
|
05f22ee752e21692a9957ffcf20a232d2dcee5eb
|
[] |
no_license
|
VictorTran808/Econ256-Project
|
a46a6f394cc29e50f645b92b96686a3e0b6cb070
|
fdaad32b260afe09609eb856e58cdcab81bf2f66
|
refs/heads/master
| 2021-03-23T04:06:04.454059
| 2020-05-08T04:28:38
| 2020-05-08T04:28:38
| 247,421,165
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,083
|
r
|
incomezip.R
|
library(sf)
library(ggplot2)
library(tidyverse)
#SORTING/CLEANING
income <- read_csv("nhgis0008_ds239_20185_2018_zcta.csv") #2014-2018 estimates of median household income according to zipcode from IPUMS
income$ZIP <- as.character(income$ZCTA5A)
income$medinc <- income$AJZAE001
income <- select(income, "ZIP", "medinc")
zips <- read_sf(dsn = "zips", layer = "zips")
incomezip <- left_join(zips, incomezip, by = "ZIP")
tidy_income <- select(incomezip, "ZIP", "medinc")
#st_write(tidy_income, dsn = "tidy_income", layer = "tidy_income", driver = "ESRI Shapefile")
#check <- read_sf(dsn = "tidy_income", layer = "tidy_income")
#PLOTTING
pvdata <- read_sf(dsn = "solarzip", layer = "solarzip")
ggplot()+
geom_sf(data = tidy_income, aes(fill = tidy_income$medinc))+
scale_fill_gradient(low = "#ffecde", high = "#5c2600", na.value = NA)+
theme_void()+
labs(title = "Median Household Income According to Zipcode", caption = "Data Source: IPUMS ACS & City and County of Honolulu", fill = "Median Household Income")+
geom_sf(data = pvdata, color = "black") #ALL pv points
|
dbc633b97cbdc4f4eac807ac52fe64a46d637319
|
42934b7ab611c7550084172dcf7b0db42f015d1e
|
/R/localBias.R
|
e6fe2cd7ab26820b85517918827827196a2478dd
|
[] |
no_license
|
jskoien/intamapInteractive
|
4671e1305cb9a32d09f04542c06a57cdcf9c4924
|
7d0e8d58c4a0d5108f548c1aab974723e79a1ea4
|
refs/heads/master
| 2021-01-17T18:04:41.270596
| 2018-02-28T10:25:08
| 2018-02-28T10:25:08
| 70,917,111
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,115
|
r
|
localBias.R
|
removeLocalBias = function(object,localBias,gid = "group", formulaString=value~1,regCode = "regCode") {
# if (is.list(localBias)) localBias = localBias$bias
depVar=as.character(formulaString[[2]])
for (i in 1:length(localBias)) {
lBias = localBias[[i]]$bias
rci = names(localBias)[i]
for (j in 1:dim(lBias)[1]){
if (rci == "single") {
lNew = object[[depVar]][object[[gid]] == lBias$groups[j]] - lBias$wls[j]
object[[depVar]][object[[gid]] == lBias$groups[j]] = lNew
} else {
lNew = object[[depVar]][object[[regCode]] == rci &
object[[gid]] == lBias$groups[j]] - lBias$wls[j]
object[[depVar]][object[[regCode]] == rci &
object[[gid]] == lBias$groups[j]] = lNew
}
}
}
return(object)
}
findLocalBias = function(object,gid = "group",formulaString=value~1,regCode="regCode",...) {
if (regCode %in% names(object)) {
uRegCode = unique(object[[regCode]])
nRegCode = length(uRegCode)
} else nRegCode = 1
igid = which(names(object) == gid)
localBias = list()
for (ic in 1:nRegCode) {
if (nRegCode > 1) {
regCodei = as.character(uRegCode[ic])
localData = object[object[[regCode]] == regCodei, ]
} else {
regCodei = "single"
localData = object
}
# localData@data[,igid] = factor(localData@data[,igid])
ngroups = length(unique(localData@data[,igid]))
if (ngroups > 1) {
localBias[[regCodei]] = localNetworkBias(localData,igid,formulaString,...)
}
}
return(localBias)
}
localNetworkBias = function(localData,igid,formulaString,minKrige = 3,...) {
# groups = sort(unique(localData@data[,igid]))
depVar = formulaString[[2]]
groups = as.character(sort(unique(localData@data[,igid])))
nGroups = length(groups)
D = matrix(0,ncol = nGroups,nrow = 1)
varModel = vgm(0,"Nug",0,100)
V = array()
for (i in 1:nGroups) {
ig = groups[i]
groupData = localData[localData@data[,igid] == ig,]
class(groupData) = class(localData)[1]
ndat = dim(groupData)[1]
# cat(paste(i,ig,"\n"))
if (ndat > minKrige) varModel = autofitVariogram(formulaString,groupData,model="Sph")$var_model
print(ndat)
print(varModel)
if (i == 1) {
gdat = gstat(NULL,id = as.character(ig),formula = formulaString,model = varModel,data = groupData)
} else {
gdat = gstat(gdat,id = ig,formula = formulaString,model = varModel,data = groupData)
}
}
im = 0
Q = c(1)
for (i in 1:(nGroups-1)) {
ig = groups[i]
ndat = sum(localData@data[,igid] ==ig)
groupDatai = gdat[[1]][ig][[1]]$data
if (ndat > minKrige) imod = gdat[[2]][ig][[1]]
for (j in (i+1):nGroups) {
jg = groups[j]
mdat = sum(localData@data[,igid]==jg)
groupDataj = gdat[[1]][jg][[1]]$data
if (mdat > minKrige) jmod = gdat[[2]][jg][[1]]
cArea = commonArea(groupDatai,groupDataj)
bi = cArea[[1]]
bj = cArea[[2]]
if (bj > 0.5 & ndat > minKrige) {
krigj <- krig(formulaString, groupDatai,groupDataj,imod)
im = im + 1
D[im,i] = 1
D[im,j] = -1
Q[im] = krigj[[1]]
V[im] = krigj[[2]]
kresj = data.frame(icol = i, jcol = j,krigj[[3]])
if (im == 1 & FALSE) {
write.csv(round(kresj),file = "sl_varios.txt",append=TRUE)
} else if (FALSE) {
write.csv(round(kresj,2),file = "sl_varios.txt",col.names=FALSE,append=TRUE)
}
D = rbind(D,matrix(0,ncol=nGroups,nrow=1))
}
if (bi > 0.5 & mdat > minKrige) {
krigi <- krig(formulaString, groupDataj,groupDatai,jmod)
im = im + 1
D[im,j] = 1
D[im,i] = -1
Q[im] = krigi[[1]]
V[im] = krigi[[2]]
kresi = data.frame(icol = j, jcol = i,krigi[[3]])
D = rbind(D,matrix(0,ncol=nGroups,nrow=1))
}
}
}
#
im = im+1
D[im,] = 1
Q[im] = 0
V[im] = min(V)
rDiff = list(D=D,Q=Q,V=V)
locBias = dSolve(rDiff)
locBias = list(bias = data.frame(groups,ols = locBias$ols,ols.std = locBias$ols.std,
wls = locBias$wls,wls.std = locBias$wls.std),
D = D, V = V, Q = Q)
return(locBias)
}
krig = function(form, groupDatai, groupDataj, imod) {
depVar = as.character(form[[2]])
jlen = dim(groupDataj)[1]
strj = krige(form, groupDatai, groupDataj, imod)
strj$obs = groupDataj[[depVar]]
weight = 1/strj$var1.var
aest = sum(weight*strj$var1.pred - weight*groupDataj[[depVar]])/sum(weight)
# prediction variance - avar - is wrong
errvar = 1/(jlen-2)*sum((groupDataj[[depVar]] - strj$var1.pred+aest)^2)
# avar = (1/jlen + ((mean(strj$var1.pred))^2)/(jlen-1)/var(strj$var1.pred))*errvar
# avar = var(strj$var1.pred-groupDataj[[depVar]])/jlen
avar = ((weight%*%((strj$var1.pred-strj$obs)^2))/sum(weight)-aest^2)/jlen
avar = (weight%*%((strj$var1.pred-strj$obs-aest)^2))/sum(weight)/jlen
# print(asdf)
return(list(aest,avar,strj))
}
|
34ffc1ec8ac7eb3011d8256cb43e108bded4a15c
|
2dfc1715d53363b0c4572c5305d82b36f2d7cee3
|
/man/pieplot-methods.Rd
|
eb64f0d2b3e80d2493fda91f4778ab08f51e55bf
|
[] |
no_license
|
cran/haplotypes
|
53833ab28b3e6b9995e21226efe93e2315065052
|
d161571530d08d75de14ed2c153d95776099d54d
|
refs/heads/master
| 2023-07-25T22:49:01.985321
| 2023-07-15T05:40:03
| 2023-07-15T06:35:14
| 34,604,271
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,012
|
rd
|
pieplot-methods.Rd
|
\name{pieplot-methods}
\docType{methods}
\alias{pieplot}
\alias{pieplot-methods}
\alias{pieplot,Parsimnet,Haplotype-method}
\title{Plots pie charts on statistical parsimony network}
\description{
Plotting pie charts on the statistical parsimony network.
}
\usage{
\S4method{pieplot}{Parsimnet,Haplotype}(x,y,net=1,factors, coord = NULL,inter.labels=FALSE,interactive=FALSE,rex=1,...)
}
\arguments{
\item{x}{ a \code{\link{Parsimnet}} object. }
\item{y}{ a \code{\link{Haplotype}} object.}
\item{net}{a numeric vector of length one indicating which network to plot.}
\item{factors}{a vector or factor giving the grouping variable (populations, species, etc.), with one element per individual. }
\item{coord}{a matrix that contains user specified coordinates of the vertices, or NULL to generate vertex layouts with \code{\link{network.layout}} function from package \pkg{network}. }
\item{inter.labels}{boolean; should vertex labels of intermediate haplotypes be displayed? }
\item{interactive}{boolean; should vertices be interactively adjusted? }
\item{rex}{ expansion factor for the pie radius.}
\item{...}{ arguments to be passed to \code{\link{floating.pie}} and others. See \sQuote{Details} }
}
\details{
This method calls \code{\link{floating.pie}} \code{{plotrix}}, \code{\link{network.vertex}} \code{{network}}, and \code{\link{plot.default}}, \code{\link{lines}}, and \code{\link{text}} \code{{graphics}}. This method also uses some internal structures of \code{\link{plot.network.default}} from package \pkg{network}. The following additional arguments can be passed to these functions:
\describe{
\item{mode}{ the vertex placement algorithm. Default is set to \code{"fruchtermanreingold"}. }
\item{pad}{ amount to pad the plotting range; useful if labels are being clipped. Default is set to 1.}
\item{displaylabels}{boolean; should vertex labels be displayed? }
\item{label}{ a vector of vertex labels. By default, the rownames of the distance matrix (\code{rownames(p@d[[net]])}) are used. If \code{inter.labels==FALSE} only haplotype labels are displayed. }
\item{label.cex}{ character expansion factor for labels. Default is set to 0.75.}
\item{label.col}{ an integer or character vector for the label colors. By default, it is 1 (black). }
\item{label.pos}{ position at which labels should be placed relative to vertices. 0 and 6 results in labels which are placed away from the center of the plotting region; 1, 2, 3, and 4 result in labels being placed below, to the left of, above, and to the right of vertices, respectively; and label.pos 5 or greater than 6 results in labels which are plotted with no offset (i.e., at the vertex positions). Default is set to 0.}
\item{label.pad}{ amount to pad the labels. This setting is available only if the labels are plotted with offset relative to vertex positions. Default is set to 1.}
\item{vertex.cex}{ a numeric vector of expansion factor for intermediate vertices (only). By default it is \code{(0.5)*min(radius)}. Use 'radius' to specify size of pie charts. }
\item{col}{ the colors of the pie sectors (i.e., colors for populations), by default \code{rainbow}. }
\item{vertex.col}{ an integer or character vector for the intermediate vertex colors. By default, it is 1 (black). }
\item{edge.col}{ an integer or character vector for the edge colors. By default, it is 1 (black). }
\item{edge.lwd}{ a numeric vector, edges line width. By default, it is 1. }
\item{edge.lty}{ a numeric vector of length one, specifies the line type for the edges. By default it is 1. }
\item{edges}{ the number of lines forming a pie circle, By default, it is 200. }
\item{radius}{ a numeric vector of length \code{p@nhap[net]} for the radius of drawn pie charts. Useful for specifying the radius independent of the haplotype frequencies. Default is \code{(0.8*(haplotype frequencies)*rex)/max(haplotype frequencies)}. }
\item{vertex.sides}{ number of polygon sides for vertices. Default is set to 50. }
\item{xlab}{x axis label. }
\item{xlab}{y axis label. }
}
}
\value{
A two-column matrix containing the vertex positions as x,y coordinates.
}
\section{Methods}{
\describe{
\item{\code{signature(x = "Parsimnet", y = "Haplotype")}}{
}
}}
\note{
Some internal structures of \code{\link{plot.network.default}} is taken from package \pkg{network} with modifications, author Carter T. Butts.
}
\seealso{
\code{\link{plot,Parsimnet-method}},\code{\link{floating.pie}}, \code{\link{plot.default}} and \code{\link{plot.network.default}}
}
\author{
Caner Aktas, \email{caktas.aca@gmail.com}.
}
\examples{
data("dna.obj")
x<-dna.obj
h<-haplotypes::haplotype(x)
### Statistical parsimony with 95% connection limit
p<-parsimnet(x)
#randomly generated populations
pop<-c("pop1","pop2","pop3","pop4","pop5","pop6","pop7","pop8")
set.seed(5)
pops<-sample(pop,nrow(x),replace=TRUE)
## Plotting with default parameters.
pieplot(p,h,1, pops)
## Change colors for the populations.
#8 colors for 8 populations
cols<-colors()[c(30,369,552,558,538,642,142,91)]
pieplot(p,h,1, pops,col=cols)
## Expanding pie charts and intermediate vertices.
pieplot(p,h,1, pops,rex=2)
## Adjusting intermediate vertex sizes.
pieplot(p,h,1, pops, vertex.cex=rep(0.2, nrow(p@d[[1]])-p@nhap))
## Expanding pie charts and intermediate vertices, adjusting intermediate vertex sizes.
pieplot(p,h,1, pops,rex=2, vertex.cex=rep(0.1, nrow(p@d[[1]])-p@nhap))
## Adjusting radius of pie charts.
pieplot(p,h,1, pops,radius=rep(1, p@nhap))
\dontrun{
## Interactively adjusting vertex positions.
pieplot(p,h,1, pops, interactive=TRUE)
}
### Multiple networks with 99% connection limit.
p<-parsimnet(x,prob=.99)
## Plotting first network with default parameters.
pieplot(p,h,1, pops)
## Change colors for the populations.
#8 colors for 8 populations
cols<-colors()[c(30,369,552,558,538,642,142,91)]
pieplot(p,h,1, pops,col=cols)
}
\keyword{STATISTICAL PARSIMONY}
|
6ebbdfeb894ad9d7cfc0d4c9cd854a5360ce5bd9
|
1348830c4ac089d25b841bbb87283937d6362769
|
/MDS 534 - 2019 Summer/Unit 4 Assignment_2.R
|
41b61433ece050901d3e69f138b1822e85392034
|
[] |
no_license
|
megancusey/DataScienceCoursework
|
1a14938bc2070f21c64af44e92881e1ebe3e9c60
|
77b0799055e2d09f3fa98a318b52b96f27f5744c
|
refs/heads/master
| 2020-08-26T22:43:12.674912
| 2020-06-15T02:49:39
| 2020-06-15T02:49:39
| 217,170,696
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,709
|
r
|
Unit 4 Assignment_2.R
|
getwd()
library(arules)
library(arulesViz)
library(RColorBrewer)
## Define function
function_apriori <- function (file_name, minsup, minconf, unique_id_first_column) {
data <- read.csv(file_name)
if (unique_id_first_column == 1)
{
## remove first column
data <- data[ -1]
}
## Replace any missing values with 0
data[is.na(data)] <- 0
## Turns into T/F Values
data <- apply(data,2,as.logical)
#Create Frequent Itemsets
help(apriori)
rules <- apriori(data, parameter=list(support=minsup,target="frequent itemsets"))
inspect(head(sort(rules, by = "support"), 50))
#Create Apriori Rules
##rules_6_6c <- apriori(ap7, parameter = list(support = 0.6, confidence = 0.8, target = "rules"))
##inspect(head(sort(rules_6_6c, by = "support"), 50))
#Analyze and Visualize Results
##inspect(rules_6_6_r[1:10])
##itemFrequencyPlot(transactional_data_6_6,topN=20,col=brewer.pal(8,'Pastel2'), main='Relative Item Frequency Plot',type="relative",ylab="Item Frequency (Relative)")
##plot(rules_6_6_r[1:10],method="graph")
##plot(rules_6_6_r[1:10],method="paracoord",control=list(reorder=TRUE))
##plot(rules_6_6_r[1:10],method="matrix",control=list(reorder='support/confidence'))
return (inspect)
}
#Reading Data from a file - format is one entry per transaction
file_name <- readline(prompt="Enter File Name & Extension:")
min_sup <- as.numeric(readline(prompt="Enter Minimum Support:"))
min_conf <- as.numeric(readline(prompt="Enter Minimum Confidence:"))
unique_id_first_column <- as.numeric(readline(prompt="Enter a 1 if first column is the unique key:"))
apriori(file_name, min_sup, min_conf, unique_id_first_column)
##rm(list=ls())
|
d37dd1b1c7ede40257b5f3e3d558d61dfb71ff8e
|
ac4fb1bef33a85bd1b79632485d7ff21c7e0b6e7
|
/hpc58.R
|
cfe458304cdff7e3436d7ca3c3882a11b7ad4df1
|
[] |
no_license
|
samual-williams/Hyaena-density
|
2bd627a6d721776023bffcc739906f9a7fd45b6e
|
9ae0c82d8fce2ed87d5f3bacffda199892d27a53
|
refs/heads/master
| 2020-06-19T01:56:17.587557
| 2019-10-18T10:11:23
| 2019-10-18T10:11:23
| 196,524,657
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 230
|
r
|
hpc58.R
|
library(secr)
load("inputs.RData")
# is magnitude of detection affected by reserve size
mOpeng07_HN <- secr.fit(cptr_hst, model=g0~ReserveSize, mask=maskOpen, detectfn=0, CL=FALSE)
saveRDS(mOpeng07_HN, file = "mOpeng07_HN.rds")
|
cb28fb8430ab6cf87745df7e03fd40b4ed0628f4
|
3fa5d2b705a1078b0fe099ff15a41955a18e81b6
|
/RScripts/siham/test6.R
|
d76fe9823e04a76bf389d0d2b65678920ef3dd0d
|
[] |
no_license
|
CodesByMass/Rambam-Hospital_EDA
|
4eb61b464877e1cd52599cc637c3bed356714254
|
eaf19646ebcbd3fa5a7776fdd6da7263955692b1
|
refs/heads/master
| 2020-12-20T00:59:43.111711
| 2020-04-26T22:36:59
| 2020-04-26T22:36:59
| 235,907,005
| 0
| 0
| null | 2020-01-23T23:30:16
| 2020-01-23T23:30:08
| null |
UTF-8
|
R
| false
| false
| 8,295
|
r
|
test6.R
|
library(dbConnect)
library(ggplot2)
library(dplyr)
library(hrbrthemes)
con = dbConnect(MySQL(),dbname='hopital',user='root',password='Racheletmoi2',host='localhost')
###########################################################################################
rq=dbGetQuery(con,"SELECT COUNT(*),HOUR(v.entry_date) FROM visit_details v WHERE v.department=1 and month(v.entry_date)=7 and YEAR(v.entry_date)=2004 GROUP BY HOUR(v.entry_date);
")
rq0=dbGetQuery(con,"SELECT COUNT(*)/24 as valeur FROM visit_details v WHERE v.department=1 and dayofweek(v.entry_date)=6 and YEAR(v.entry_date)=2004 GROUP BY HOUR(v.entry_date)")
rq0
rq
d<-data.frame(dose=c(rq$`HOUR(v.entry_date)`),len=c(rq$`COUNT(*)`) )
ggplot( data=d ,aes(x=dose, y=len)) +
geom_line( color="#993333" , alpha = 1,
size = 1.2) +
labs(x = "Heure",
y = "Nombres de patients",subtitle = " Departement \"Emergency Internal Medicine Unit\" ",
title = "Nombres de patients qui arrivent en heure pour juillet 2004 ")+
scale_color_discrete(labels = c("nombres de patients hospitaliser"))+
scale_x_continuous(name="Heure") +
scale_y_continuous(name="Nombre de patients ")+ theme(
plot.title = element_text(color="black", size=14, face="bold.italic",hjust = 0.5),
plot.subtitle = element_text(color="grey", size=14, face="bold",hjust = 0.5),
axis.title.x = element_text(color="blue", size=10, face="bold"),
axis.title.y = element_text(color="blue", size=14, face="bold")
)
###########################################################################################
rq=dbGetQuery(con,"SELECT COUNT(*),HOUR(v.entry_date) FROM visit_details v WHERE v.department=1 and month(v.entry_date)=5 and YEAR(v.entry_date)=2004 GROUP BY HOUR(v.entry_date);
")
rq0=dbGetQuery(con,"SELECT COUNT(*)/24 as valeur FROM visit_details v WHERE v.department=1 and dayofweek(v.entry_date)=6 and YEAR(v.entry_date)=2004 GROUP BY HOUR(v.entry_date)")
d<-data.frame(dose=c(rq$`HOUR(v.entry_date)`),len=c(rq$`COUNT(*)`) )
ggplot( data=d ,aes(x=dose, y=len)) +
geom_line( color="#993333" , alpha = 1,
size = 1.2) +
labs(x = "Heure",
y = "Nombres de patients",subtitle = " Departement \"Emergency Internal Medicine Unit\" ",
title = "Nombres de patients qui arrivent en heure pour juin 2004 ")+
scale_color_discrete(labels = c("nombres de patients hospitaliser"))+
scale_x_continuous(name="Heure") +
scale_y_continuous(name="Nombre de patients ")+ theme(
plot.title = element_text(color="black", size=14, face="bold.italic",hjust = 0.5),
plot.subtitle = element_text(color="grey", size=14, face="bold",hjust = 0.5),
axis.title.x = element_text(color="blue", size=10, face="bold"),
axis.title.y = element_text(color="blue", size=14, face="bold")
)
###########################################################################################
rq=dbGetQuery(con,"SELECT COUNT(*),HOUR(v.entry_date) FROM visit_details v WHERE v.department=1 and month(v.entry_date)=9 and YEAR(v.entry_date)=2004 GROUP BY HOUR(v.entry_date);
")
rq0=dbGetQuery(con,"SELECT COUNT(*)/24 as valeur FROM visit_details v WHERE v.department=1 and dayofweek(v.entry_date)=6 and YEAR(v.entry_date)=2004 GROUP BY HOUR(v.entry_date)")
d<-data.frame(dose=c(rq$`HOUR(v.entry_date)`),len=c(rq$`COUNT(*)`) )
ggplot( data=d ,aes(x=dose, y=len)) +
geom_line( color="#993333" , alpha = 1,
size = 1.2) +
labs(x = "Heure",
y = "Nombres de patients",subtitle = " Departement \"Emergency Internal Medicine Unit\" ",
title = "Nombres de patients qui arrivent en heure \n pour septembre 2004 ")+
scale_color_discrete(labels = c("nombres de patients hospitaliser"))+
scale_x_continuous(name="Heure") +
scale_y_continuous(name="Nombre de patients ")+ theme(
plot.title = element_text(color="black", size=14, face="bold.italic",hjust = 0.5),
plot.subtitle = element_text(color="grey", size=14, face="bold",hjust = 0.5),
axis.title.x = element_text(color="blue", size=10, face="bold"),
axis.title.y = element_text(color="blue", size=14, face="bold")
)
###########################################################################################
rq=dbGetQuery(con,"SELECT COUNT(*),HOUR(v.entry_date) FROM visit_details v WHERE v.department=1 and month(v.entry_date)=7 and YEAR(v.entry_date)=2004 GROUP BY HOUR(v.entry_date);
")
rq0=dbGetQuery(con,"SELECT COUNT(*)/24 as valeur FROM visit_details v WHERE v.department=1 and dayofweek(v.entry_date)=6 and YEAR(v.entry_date)=2004 GROUP BY HOUR(v.entry_date)")
d<-data.frame(dose=c(rq$`HOUR(v.entry_date)`),len=c(rq$`COUNT(*)`) )
ggplot( data=d ,aes(x=dose, y=len)) +
geom_line( color="#993333" , alpha = 1,
size = 1.2) +
labs(x = "Heure",
y = "Nombres de patients",subtitle = " Departement \"Emergency Internal Medicine Unit\" ",
title = "Nombres de patients qui arrivent en heure pour juillet 2004 ")+
scale_color_discrete(labels = c("nombres de patients hospitaliser"))+
scale_x_continuous(name="Heure") +
scale_y_continuous(name="Nombre de patients ")+ theme(
plot.title = element_text(color="black", size=14, face="bold.italic",hjust = 0.5),
plot.subtitle = element_text(color="grey", size=14, face="bold",hjust = 0.5),
axis.title.x = element_text(color="blue", size=10, face="bold"),
axis.title.y = element_text(color="blue", size=14, face="bold")
)
###########################################################################################
rq=dbGetQuery(con,"SELECT COUNT(*),HOUR(v.entry_date) FROM visit_details v WHERE v.department=1 and month(v.entry_date)=5 and YEAR(v.entry_date)=2004 GROUP BY HOUR(v.entry_date);
")
rq0=dbGetQuery(con,"SELECT COUNT(*)/24 as valeur FROM visit_details v WHERE v.department=1 and dayofweek(v.entry_date)=6 and YEAR(v.entry_date)=2004 GROUP BY HOUR(v.entry_date)")
d<-data.frame(dose=c(rq$`HOUR(v.entry_date)`),len=c(rq$`COUNT(*)`) )
ggplot( data=d ,aes(x=dose, y=len)) +
geom_line( color="#993333" , alpha = 1,
size = 1.2) +
labs(x = "Heure",
y = "Nombres de patients",subtitle = " Departement \"Emergency Internal Medicine Unit\" ",
title = "Nombres de patients qui arrivent en heure pour juin 2004 ")+
scale_color_discrete(labels = c("nombres de patients hospitaliser"))+
scale_x_continuous(name="Heure") +
scale_y_continuous(name="Nombre de patients ")+ theme(
plot.title = element_text(color="black", size=14, face="bold.italic",hjust = 0.5),
plot.subtitle = element_text(color="grey", size=14, face="bold",hjust = 0.5),
axis.title.x = element_text(color="blue", size=10, face="bold"),
axis.title.y = element_text(color="blue", size=14, face="bold")
)
###########################################################################################
rq=dbGetQuery(con,"SELECT COUNT(*),HOUR(v.entry_date) FROM visit_details v WHERE v.department=1 and month(v.entry_date)=1 and YEAR(v.entry_date)=2004 GROUP BY HOUR(v.entry_date);
")
rq0=dbGetQuery(con,"SELECT COUNT(*)/24 as valeur FROM visit_details v WHERE v.department=1 and dayofweek(v.entry_date)=1 and YEAR(v.entry_date)=2004 GROUP BY HOUR(v.entry_date)")
d<-data.frame(dose=c(rq$`HOUR(v.entry_date)`),len=c(rq$`COUNT(*)`) )
ggplot( data=d ,aes(x=dose, y=len)) +
geom_line( color="#993333" , alpha = 1,
size = 1.2) +
labs(x = "Heure",
y = "Nombres de patients",subtitle = " Departement \"Emergency Internal Medicine Unit\" ",
title = "Nombres de patients qui arrivent en heure \n pour janvier 2004 ")+
scale_color_discrete(labels = c("nombres de patients hospitaliser"))+
scale_x_continuous(name="Heure") +
scale_y_continuous(name="Nombre de patients ")+ theme(
plot.title = element_text(color="black", size=14, face="bold.italic",hjust = 0.5),
plot.subtitle = element_text(color="grey", size=14, face="bold",hjust = 0.5),
axis.title.x = element_text(color="blue", size=10, face="bold"),
axis.title.y = element_text(color="blue", size=14, face="bold")
)
|
419a437f6f9918a4d58b1bcbf41806e02594c788
|
48518af498426c9ccfb1cde5b72c4c684d1cadf4
|
/Openair/openair_plot_builder.r
|
177eb9b3ea5ee850ab2b7ad611de6f1f90aa17cb
|
[] |
no_license
|
agmeyer4/CO2_Eddy
|
a7393b48a2b28166bd011bc80b210c9098a00aaa
|
f8cd236539bac6dcffdd2564093c5b8b04be290d
|
refs/heads/master
| 2022-12-22T13:01:56.215806
| 2022-12-13T17:45:22
| 2022-12-13T17:45:22
| 203,871,619
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,718
|
r
|
openair_plot_builder.r
|
#library(reticulate)
#path_to_python <- "/uufs/chpc.utah.edu/common/home/u0890904/software/pkg/miniconda3/envs/CO2_Eddy/bin/python"
#use_python(path_to_python)
#sys = import("sys")
#test_package <- import("CO2_Dataset_Preparation")
#use_virtualenv("/uufs/chpc.utah.edu/common/home/u0890904/software/pkg/miniconda3/envs/CO2_Eddy")
#py_run_file("~/CO2_Eddy/openair_prep.py")
#os<-import("os")
#os$listdir(".")
##======================================================================================================
create_plot_calls <- function(tower,pn,excess_roll,poll_const,poll_stat,time_lag,wind_roll,low_ws,high_ws,percentile){
if (poll_stat == 'none'){
pollutant = poll_const
}else{
pollutant = paste0(poll_stat,'_r',excess_roll,'_',poll_const)
}
if (tower == 'Multi'){
ws = paste0('ws.t.',time_lag,'.')
wd = paste0('wd.t.',time_lag,'.')
}
if (tower == 'Picarro'){
if (wind_roll ==0){
ws=paste0('ws.t.',time_lag,'.')
wd=paste0('wd.t.',time_lag,'.')
}
else {
if (time_lag == 0){
ws = paste0('roll_',wind_roll,'_ws')
wd = paste0('roll_',wind_roll,'_wd')
}
else{
ws = paste0('roll_',wind_roll,'_ws.t.',time_lag,'.')
wd = paste0('roll_',wind_roll,'_wd.t.',time_lag,'.')
}
}
}
if (tower == 'WBB'){
ws = paste0('ws.t.',time_lag,'.')
wd = paste0('wd.t.',time_lag,'.')
}
info = paste0('tower_',tower,'_pn_',pn,'_timelag_',time_lag,'_windroll_',wind_roll,'_highws_',high_ws,'_lowws_',low_ws,'_percentile',percentile)
namelist = list("pollutant"=pollutant,"ws"=ws,"wd"=wd,"info"=info)
return(namelist)
}
###+===========================
extract_plot_data <- function(index,OA_data){
x = OA_data[["data"]][["z"]]
max_prob = max(x[!is.na(x)])
maxid = which(x==max_prob)
u = OA_data[["data"]][["u"]][[maxid]]
v = OA_data[["data"]][["v"]][[maxid]]
ws = sqrt(u^2+v^2)
wd = atan2(v,u)/pi*180
st = OA_data[["plot"]][["sub"]]
cpf = as.numeric(substr(st,nchar(st)-4,nchar(st)-1))
details = index
d <- data.frame( cpf =cpf,
max_prob=max_prob,u=u,
v = v,ws = ws,
wd = wd,details= details)
return(d)
}
###=========================================================================================
library(openair)
setwd('~/CO2_Eddy/Openair/')
path = '../../CO2_Data_Processed/R_Dataframes/'
dfs = list()
pic_dfs <- list()
for (i in 1:2){
filename = paste0('Picarro_',i,'.csv')
pic_dfs[[i]]<-read.csv(paste0(path,filename))
}
dfs[["Picarro"]] = pic_dfs
rm(pic_dfs)
multi_dfs <- list()
for (i in 1:3){
filename = paste0('Multi_',i,'.csv')
multi_dfs[[i]]<-read.csv(paste0(path,filename))
}
wbb_dfs <- list()
for (i in 1:1){
filename = paste0('WBB_',i,'.csv')
wbb_dfs[[i]]<-read.csv(paste0(path,filename))
}
dfs[["WBB"]] = wbb_dfs
dfs[["Multi"]] = multi_dfs
rm(wbb_dfs)
rm(multi_dfs)
plot_data = list()
pn = 2
tower = 'Picarro'
poll_const = 'Pic_CH4'
poll_stat = 'excess'
low_ws = 2
high_ws = 10
wind_roll = 10
excess_roll = 10
time_lag = 10
percentile = 90
df_sub <- subset(dfs[[tower]][[pn]],ws>low_ws&ws<high_ws)
plot_builder_index = create_plot_calls(tower,pn,excess_roll,poll_const,
poll_stat,time_lag,wind_roll,
low_ws,high_ws,percentile)
plot_data <- extract_plot_data(plot_builder_index,
polarPlot(df_sub,pollutant=plot_builder_index$pollutant,
ws=plot_builder_index$ws,wd=plot_builder_index$wd,statistic = 'cpf',
percentile=percentile,key.header=plot_builder_index$info)
)
plot_data[[pn]] <- data.frame(lag_time=numeric(0),
cpf =numeric(0),
max_prob=numeric(0),
u = numeric(0),
v = numeric(0),
ws = numeric(0),
wd = numeric(0),
details= character(0))
for (t in c(1,3,5,10,15,20,25,30,35,40,45,50,55,60,90,120)){
plot_builder_index = create_plot_calls(tower,pn,excess_roll,poll_const,poll_stat,t,wind_roll,low_ws,high_ws,percentile)
plot_data_t = extract_plot_data(plot_builder_index,
polarPlot(df_sub,pollutant=plot_builder_index$pollutant,
ws=plot_builder_index$ws,wd=plot_builder_index$wd,statistic = 'cpf',
percentile=percentile,key.header=plot_builder_index$info)
)
#plot_data_t$time_lag = t
#plot_data[[pn]] = rbind(plot_data[[pn]],plot_data_t)
}
ggplot()+
geom_point(data = plot_data[[1]],aes(x=time_lag,y=ws),color="red")+
geom_point(data = plot_data[[2]],aes(x=time_lag,y=ws),color="blue")
|
70bc7af8a3ce88eab944629acd466dd286b74c7d
|
8b26eae5081c31c1b70e0f8cfef30da01b567488
|
/Assignment3.R
|
cb6346f9685d74bcb151e163aa6782e22f85204f
|
[] |
no_license
|
aihiangng/ST3248-StatisticalAnalysis
|
7fab323e9fa6b78c5beb7d2de130ef5cff1897e6
|
e1553f3ce2286da10ecba33efc290cfd864fef12
|
refs/heads/master
| 2020-11-25T00:38:09.700654
| 2019-12-16T15:02:59
| 2019-12-16T15:02:59
| 228,412,032
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,344
|
r
|
Assignment3.R
|
#Assignment 3
#q1)
library(ISLR)
set.seed(17) #set same as chapter 5
k = 10
#method
#get the remainder
#(total - remainder) % kvalue = number of observation in each fold
#for each remainder observation, randomly insert into a fold
#loop through polynomial and then each fold. Nested for loop
num_obs = nrow(Auto)
remain_obs = num_obs%%10 #get modulo value
num_obs2 = num_obs-remain_obs # number divisible by k value
#sample dataset first before splitting into k
sampled_data = sample(num_obs, num_obs2) #returnsindex
#split sampled data into each fold
split_data = split(sampled_data, f = 1:k) #returns list of list
#get remainder datasets
remain_data = setdiff(1:num_obs, sampled_data) #131 and 161 left out
#randomly assign a k value to each of remainder. Add it to the kth fold
sample_kfold = sample(k, remain_obs)
#store cv results
cv.error=rep(0,k) #per poly you have 1 error. Take th mean across all fold
for (i in 1:remain_obs){
split_data[[sample_kfold[i]]] = c(split_data[[sample_kfold[i]]] , sample_kfold[i])
}
for (poly in 1:10){
mean_lst = numeric(10)
for (kfold in 1:k){
test_data = Auto[split_data[[kfold]], ] #split data
train_data = Auto[setdiff(1:num_obs, split_data[[kfold]]),]
model=lm(mpg~poly(horsepower, degree=poly, raw=T), data=train_data) #train model
pred=predict(model, test_data) #predict using fitted model
#take sum of MSE, then divide by num to get average
mean_lst[kfold] = sum((test_data$mpg-pred)^2)/length(split_data[[kfold]])
}
cv.error[poly] = mean(mean_lst)
}
#cv.error returns 24.38442 19.21583 19.23603 19.36616 18.99810 19.07437 18.98292 19.06402 19.06271 19.29920
CV.error = 24.23151 19.24821 19.33498 19.42443 19.03321
#Question 2
#2a)
set.seed(1)
X = rnorm(100)
noise = rnorm(100)
Y = 3 + 1*X + 4*X^2 - 1*X^3 + noise
#2b)
require(leaps)
df = data.frame(Y, X)
fit = regsubsets(Y ~ poly(X, 10), data = df, nvmax = 10)
fit_summary = summary(fit)
require(tidyverse)
require(ggplot2)
require(ggthemes);
data_frame(Cp = fit_summary$cp,
BIC = fit_summary$bic,
AdjR2 = fit_summary$adjr2) %>%
mutate(id = row_number()) %>% #add new
gather(value_type, value, -id) %>%
ggplot(aes(id, value, col = value_type)) +
geom_point() + geom_line() + ylab('') + xlab('Number of Variables Selected') +
facet_wrap(~ value_type, scales = 'free') +
scale_x_continuous(breaks = 1:10)
coef(fit,which.min(fit_summary$bic))
#d)
require(caret)
#forward model
x_poly = poly(df$X, 10)
colnames(x_poly) <- paste0('poly', 1:10) #paste0 concatenates vectors after converting to character
model_for = train(y = Y, x = x_poly,
method = 'glmStepAIC', direction = 'forward',
trace = 0,
trControl = trainControl(method = 'none', verboseIter = FALSE))
postResample(predict(model_for, data.frame(x_poly)), df$Y) #calculated mean squared error of 2 numeric vectors of data
summary(model_for$finalModel)
#backward
model_back = train(Y ~ poly(X, 10), data = df,
method = 'glmStepAIC', direction = 'backward',
trace = 0,
trControl = trainControl(method = 'none', verboseIter = FALSE))
postResample(predict(model_back, df), df$Y) #calculated mean squared error of 2 numeric vectors of data
summary(model_back$finalModel)
|
ddc57a87451407ef1897e64b60f97df56b7d1557
|
8f0b59d4c71bf8fca01870f1b171564c2573a7fb
|
/tests/testthat.R
|
b43f9b963c1cb76534291f2ae1a8457b2ee4bdbc
|
[] |
no_license
|
felipe179971/USK
|
2b17fa9717229b9f16fcc21940455d38253ab01a
|
f4ab721570b77281ee204c99d1ce7d14f772b47a
|
refs/heads/master
| 2023-08-29T10:27:32.779953
| 2021-11-13T22:27:24
| 2021-11-13T22:27:24
| 369,890,387
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 73
|
r
|
testthat.R
|
library(testthat)
#Carregando seu pacote
library(USK)
test_check("USK")
|
9b0c62c05e2f2ba56c58b74843da8e3bd7ffa1fd
|
056de4134ef2e967846ae29883128d14dd5ac0a1
|
/exploring.R
|
c57a3469b31f7c60c3d1c85acc59f67a2b10e2c2
|
[] |
no_license
|
j-desilva/sports-analysis-project
|
6bb0977750a176ac458cd0f1aacc002721a8feb4
|
5b6d3fd164585d897d3f3b8e5708879b53d658e0
|
refs/heads/main
| 2023-04-16T20:29:06.011907
| 2021-05-09T00:35:18
| 2021-05-09T00:35:18
| 361,029,693
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,419
|
r
|
exploring.R
|
# check for missing values
sum(is.na(joined))
# plot NRtg against W using a scatterplot
ggplot(data = joined, aes(x = NRtg, y = W)) +
geom_point()
# plot NRtg from worst to best using a bar graph
ggplot(data = joined, aes(x = reorder(Team, NRtg), y = NRtg)) +
geom_bar(stat = "identity", fill = "dodgerblue") +
theme(axis.text.x = element_text(angle = 90, vjust = 0.8))
# plot distribution of ORtg using a histogram
joined %>%
ggplot(aes(x = ORtg)) +
geom_histogram(binwidth = 1, fill = "dodgerblue", colour = "black")
# plot FT against ORtg using a scatterplot
ggplot(data = joined, aes(x = FT, y = ORtg)) +
geom_point()
# plot P2 against ORtg using a scatterplot
ggplot(data = joined, aes(x = P2, y = ORtg)) +
geom_point()
# plot P3 against ORtg using a scatterplot
ggplot(data = joined, aes(x = P3, y = ORtg)) +
geom_point()
# plot FT against ORtg and account for FTA, save into a new object
FTA_plot <- ggplot(data = joined, aes(x = FT, y = ORtg, label = Team)) +
geom_point(aes(colour = FTA, size = FTA)) +
scale_colour_gradient(low = "red", high = "green")
# view the new plot in an interactive way
ggplotly(FTA_plot)
# plot P2 against ORtg and account for P2A, save into a new object
P2A_plot <- ggplot(data = joined, aes(x = P2, y = ORtg, label = Team)) +
geom_point(aes(colour = P2A, size = P2A)) +
scale_colour_gradient(low = "red", high = "green")
# view the new plot in an interactive way
ggplotly(P2A_plot)
# plot P3 against ORtg and account for P3A, save into a new object
P3A_plot <- ggplot(data = joined, aes(x = P3, y = ORtg, label = Team)) +
geom_point(aes(colour = P3A, size = P3A)) +
scale_colour_gradient(low = "red", high = "green")
# view the new plot in an interactive way
ggplotly(P3A_plot)
# plot FT against ORtg and account for AST, save into a new object
FT_AST_plot <- ggplot(data = joined, aes(x = FT, y = ORtg, label = Team)) +
geom_point(aes(colour = AST, size = AST)) +
scale_colour_gradient(low = "red", high = "green")
# view the new plot in an interactive way
ggplotly(FT_AST_plot)
# plot P2 against ORtg and account for AST, save into a new object
P2_AST_plot <- ggplot(data = joined, aes(x = P2, y = ORtg, label = Team)) +
geom_point(aes(colour = AST, size = AST)) +
scale_colour_gradient(low = "red", high = "green")
# view the new plot in an interactive way
ggplotly(P2_AST_plot)
# plot P3 against ORtg and account for AST, save into a new object
P3_AST_plot <- ggplot(data = joined, aes(x = P3, y = ORtg, label = Team)) +
geom_point(aes(colour = AST, size = AST)) +
scale_colour_gradient(low = "red", high = "green")
# view the new plot in an interactive way
ggplotly(P3_AST_plot)
# label teams as either "above" or "below" average based on TSp
joined <- joined %>%
mutate(TSp_avg = if_else(TSp > mean(TSp), "above_avg", "below_avg"))
# view TSp_avg in a box plot
ggplot(data = joined, aes(x = TSp_avg, y = ORtg)) +
geom_boxplot(aes(fill = TSp_avg))
# use a facet wrap to view the distribution of each level of the TSp_avg variable
ggplot(data = joined, aes(x = ORtg, fill = TSp_avg)) +
geom_histogram(colour = "black", binwidth = 2) +
facet_wrap(~TSp_avg, nrow = 2) +
theme(legend.position = "none")
# view the relationship between TSp and ORtg using a scatter plot
ggplot(data = joined, aes(x = TSp, y = ORtg)) +
geom_point(colour = "dodgerblue", size = 2) +
geom_smooth(method = lm, colour = "magenta")
|
de158b05e5afb920e1697659acd234fc14e004fc
|
8cf72f7ea4604e1616501f68a27d0e1c82619002
|
/man/barplot_server.Rd
|
4588c8411a572fbb5a5014fac6af1503196774ea
|
[
"MIT"
] |
permissive
|
Gibbsdavidl/iatlas.modules
|
394801edc5f0c2aae861463b1fe5128204a2d704
|
42bc8d9d2789eea6229010a5159cd5d8ab390513
|
refs/heads/master
| 2023-02-27T00:16:49.504703
| 2021-01-31T20:27:54
| 2021-01-31T20:27:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,290
|
rd
|
barplot_server.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/barplot_server.R
\name{barplot_server}
\alias{barplot_server}
\title{Barplot Server}
\usage{
barplot_server(
id,
plot_data_function,
feature_classes = shiny::reactive(NULL),
barplot_xlab = shiny::reactive(""),
barplot_ylab = shiny::reactive(""),
barplot_title = shiny::reactive(""),
barplot_label = shiny::reactive("Feature"),
drilldown = shiny::reactive(F),
...
)
}
\arguments{
\item{id}{Module ID}
\item{plot_data_function}{A shiny::reactive that returns a function
The function must take an argument called ".feature_class" and return a
dataframe with columns "sample", "feature", "feature_value", "group",
and optionally "group_description"}
\item{feature_classes}{A shiny::reactive that returns a vector of strings.
One of these strings are passed to plot_data_function}
\item{barplot_xlab}{A shiny::reactive that returns a string}
\item{barplot_ylab}{A shiny::reactive that returns a string}
\item{barplot_title}{A shiny::reactive that returns a string}
\item{barplot_label}{A shiny::reactive that returns a string}
\item{drilldown}{A shiny::reactive that returns True or False}
\item{...}{shiny::reactives passed to drilldown_scatterplot_server}
}
\description{
Barplot Server
}
|
d24d99ecf5bc31e371ef67c6b4f336bc8afb5377
|
e5f34fb38da6067679256a8cf3aceebceac58b9c
|
/makeCodebook.R
|
5ba84fa07c37fe16ff94f6a5aa656513de15ae29
|
[] |
no_license
|
artitj/GCDProject
|
4f21b57d9c1c5b969e5ca3f16d8afedbc6418b7e
|
b38dee67479007c60874f961c14d3355aaa1c838
|
refs/heads/master
| 2021-01-02T09:43:47.569988
| 2014-08-24T17:21:15
| 2014-08-24T17:21:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,557
|
r
|
makeCodebook.R
|
# makeCodebook.R
# Creates a codebook
# NOTE: Assumes that this is run immediately after run_analysis.R to ensure that
# the variables exist in the workspace
# Read in the header Codebook markdown document (so I don't have to put
# everything in this script.)
sink('CodeBook.md')
# Got these two lines from
# http://stackoverflow.com/questions/9068397/import-text-file-as-single-character-string
headerFile <- 'codebookHead.md'
header <- readChar(headerFile, file.info(headerFile)$size)
cat(header)
cat("\n\n")
featNames <- names(tidyData)
cat("### Variables\n\n")
cat(paste("This dataset contains ", nrow(tidyData),
" which corresponds to all combinations of ",
length(unique(tidyData$Subject)), " subjects and ",
length(unique(tidyData$Activity)), " activities."))
cat(paste("There are ", length(featNames), " columns in the dataset."))
cat(paste("These columns are described below.\n\n"))
for (i in 1:length(featNames)) {
# Calculate the range
if(!is.factor(tidyData[,i])) {
# if not a factor calculate min and max
colMin <- min(tidyData[,i])
colMax <- max(tidyData[,i])
rangeStr <- paste("[",colMin,",",colMax,"]")
} else {
# if a factor, output the levels
rangeStr <- paste(levels(tidyData[,i]),collapse=",")
}
# Output
cat("---\n")
cat(paste("Variable name: ", featNames[i], "\n\n"))
cat(paste("Colunm position: ", i, "\n\n"))
cat(paste("R Class: ", class(tidyData[,i]), "\n\n"))
cat(paste("Range: ", rangeStr, "\n\n"))
}
|
449e146a4f7a7d320408e576b8e04071217aa2f6
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/auRoc/examples/auc.nonpara.kernel.Rd.R
|
ccb5dabbef29099d94c788c772e0130fc1320380
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 693
|
r
|
auc.nonpara.kernel.Rd.R
|
library(auRoc)
### Name: auc.nonpara.kernel
### Title: AUC by Kernel Methods
### Aliases: auc.nonpara.kernel
### Keywords: htest
### ** Examples
#Example 1
data(petBrainGlioma)
y <- subset(petBrainGlioma, grade==1, select="FDG", drop=TRUE)
x <- subset(petBrainGlioma, grade==2, select="FDG", drop=TRUE)
auc.nonpara.kernel(x, y)
## Not run:
##D #Example 2
##D data(petBrainGlioma)
##D y <- subset(petBrainGlioma, grade==1, select="ACE", drop=TRUE)
##D x <- subset(petBrainGlioma, grade==2, select="ACE", drop=TRUE)
##D auc.nonpara.kernel(x, y, integration="TRUE",
##D bw="sj", method="bootstrapBCa", nboot=999)
##D
## End(Not run)
|
531cef17429f877ac81744ffe21ce01b57ff0cb3
|
64d290b9be9794104272362e2e8a368fceb5052c
|
/plot4.R
|
6014d68014bac650de50719a71fdc301839fd818
|
[] |
no_license
|
meblesa/ExploratoryDataWk4
|
49c6a40626fff6c0c6b9db3ece3e336fe67a07f0
|
64f2d1d24c2e8d775473fe6d076d98ff874e8d2f
|
refs/heads/main
| 2023-06-24T21:02:35.790441
| 2021-07-29T06:02:36
| 2021-07-29T06:02:36
| 390,198,994
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,099
|
r
|
plot4.R
|
# Getting Data
# 1.Unzip file into project folder
filen<-"Data.zip"
fileURL<-"https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip"
download.file(fileURL, filen, method="curl")
unzip(filen)
install.packages("RDS")
library(RDS)
## This first line will likely take a few seconds. Be patient!
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
#Question: Across the United States, how have emissions from coal combustion-related
#sources changed from 1999–2008?
#explore the data
str(SCC)
#common variable in both NEI and SCC is SCC string
#merge data
M1<-merge(NEI,SCC, by="SCC")
#filter Short.Name column by cells conatining the word "Coal"
M2<-dplyr::filter(M1, grepl("Coal",Short.Name))
#aggregate emissions values per year
M3<-aggregate(M2$Emissions, by=list(M2$year), sum)
colnames(M3)<-c("year","Emissions")
#plot Emissions(y-axis) and year (x-axis) and save png
png("plot4.png")
barplot(M3$Emissions,M3$year,names.arg = M3$year,col="orange",main="Total Coal emissions in the United States",xlab="Year",ylab="Emissions (tons)")
dev.off()
|
06710cb883491154796037fcc615cf8883f4c397
|
e9c07ff460f0d1c20e6a5a1789be2108c47a5ee6
|
/plot1.R
|
4111e84f8559186777b25fc6617ab67b67167b31
|
[] |
no_license
|
marcfinot/ExData_Plotting1
|
2753ec93bf867b6c483daf44b50409b355d75b6a
|
0e7c2628053640fbdb447d2b730ff7e86d9540e2
|
refs/heads/master
| 2021-01-09T07:47:53.512532
| 2014-10-10T06:29:48
| 2014-10-10T06:29:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 684
|
r
|
plot1.R
|
library(data.table)
filename <- "household_power_consumption.txt" ## file to read
dt <- read.table(filename,sep=";", header=TRUE, na.strings = "?")
## create a new column with the date/time stamp
dt$DateTime <- strptime(paste(dt$Date,dt$Time), format = "%d/%m/%Y %H:%M:%S")
dt$Date <- as.Date(dt$Date, format = "%d/%m/%Y")
## subselect between 2 dates
startdate <- "2007-02-01"
enddate <- "2007-02-02"
dts <- dt[(startdate <= dt$Date) & (dt$Date < = enddate ),]
## plot 1
hist(dts$Global_active_power,col ="red", main = "Global Active Power", xlab = "Global Active Power (kilowatts)",ylab = "Frequency")
dev.copy(png, file = "plot1.png") ## Copy my plot to a PNG file
dev.off()
|
82adf4dff2089965e2ba7496896b676bbcd74aa4
|
19a6c2cc5a32f71bdc10ae657361ca9b2aeef182
|
/plot1.R
|
32a68bd844005cfe6bb16ae24cb5b0bf9a337ff8
|
[] |
no_license
|
friedoutkombi/ExData_Plotting1
|
f9f3c3907f0ab9b6451186174820ae752855d485
|
5753f27b1e6824eb3421880bb3dfb95d03c8785b
|
refs/heads/master
| 2021-01-20T01:44:30.472710
| 2017-04-25T12:20:17
| 2017-04-25T12:20:17
| 89,322,999
| 1
| 0
| null | 2017-04-25T05:59:51
| 2017-04-25T05:59:51
| null |
UTF-8
|
R
| false
| false
| 1,184
|
r
|
plot1.R
|
#This R code will be for the creation of Plot 1 - Global Active Power.
#Basic steps are load in the data set.
#Analyse the data to get a feel for it.
#Cut down to the 2 day time period.
#Plot Global Active Power vs Frequency.
#Ensure labels, colours are good.
#Load in the data
df <- read.table("household_power_consumption.txt",header = TRUE, sep=";")
#Cull data down to the two days relevant to the time period
twodaydata <- subset(df, Date %in% c("1/2/2007","2/2/2007"))
#Convert Date to appropriate class (Date)
twodaydata$Date<-as.Date(twodaydata$Date, format="%d/%m/%Y")
#Convert Global Active Power to Numeric
twodaydata$Global_active_power<-as.numeric(as.character(twodaydata$Global_active_power))
#Convert to kilowatts
twodaydata$Global_active_power<-twodaydata$Global_active_power
#Create a histogram with the
hist(twodaydata$Global_active_power,main="Global Active Power",col="Red",
xlab="Global Active Power (kilowatts)")
#Export to png format
png("plot1.png", width=480, height=480)
#Create a histogram with the
hist(twodaydata$Global_active_power,main="Global Active Power",col="Red",
xlab="Global Active Power (kilowatts)")
#Close Window
dev.off()
|
b09cdbcef9e625bb043558e02d03bb44aa1a3126
|
0323b501e5edb696ac88a15e60d6a3bb9218222c
|
/man/LRC3_TotalListOfLRpairs.Rd
|
9b1aa3fe20b993189b7b8e36f80829253f8cfd49
|
[] |
no_license
|
XiaojuanShen/LRC3
|
a33ebe9faed00db34b1a9695bd138cb0e1547810
|
c3ddc05abface17447f7bcd527965bf4bc4a6c7e
|
refs/heads/master
| 2020-03-26T21:54:04.998724
| 2018-08-21T09:12:43
| 2018-08-21T09:12:43
| 140,725,467
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,706
|
rd
|
LRC3_TotalListOfLRpairs.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/LRC3_Alpha.R
\name{LRC3_TotalListOfLRpairs}
\alias{LRC3_TotalListOfLRpairs}
\title{Total list of ligand-receptor pairs.}
\usage{
LRC3_TotalListOfLRpairs(LRC3_list)
}
\arguments{
\item{LRC3_list}{the input data generated by function LRC3_INF.}
}
\value{
ListOfLRpairs a table contains information of ligand-receptor pairs used between pairwised cell types.
In the table, the first two colomn are the names of ligand and receptor pairs.
Signal is the mean of the two expression value of ligand and receptor in the pairwised cell types.
L_Signal is the signal for the ligand, which is the factor of mean expression value of the ligand in a cell type devided by the threshold of the ligand.
R_Signal is the signal for the receptor, which is the factor of the mean expression value of the receptor in a cell type devided by the threshold of the receptor.
L_Origin is the mean expression value of the ligand in a cell type.
R_Origin is the mean expression value of the receptor in a cell type.
L_Thresh is the threshold of the ligand.
R_Thresh is the threshold of the receptor.
Celltypes are the two interacting cell types that are using the pathway.
}
\description{
This function gives a table that contains the information of ligand-receptor pairs used between pairwised cell types.
It includes the name of the ligands and receptors, the signal of the pathways,
the original mean expression value of a gene, the threshold for a gene, and cell type names that uses the pathways.
The pairs are ranked by their signal of the pathways, which is the mean of the two expression value of ligand and receptor in the pairwised cell types.
}
|
c9cab89c91ddcc0ee33cf1a9d480e059469d4226
|
eac2ef9a7a0f0d3b8ba4833f8aa0a6a7bee15bf4
|
/icu project/icu_r/icu_r/main.R
|
fa376ebefeda6cf295d17919f493fb9b6eb7781e
|
[] |
no_license
|
yaozhengjie/ICU_SEARCH
|
984d675a64075f0849cc567043432a27ce1386f8
|
15cbd4b4e5ef2b7f689ba642b5240260b8c54904
|
refs/heads/master
| 2020-02-26T17:27:20.796308
| 2016-10-24T05:21:53
| 2016-10-24T05:21:53
| 71,754,027
| 4
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,447
|
r
|
main.R
|
library(Rcpp)
library(compiler)
cppFunction(
'int fib_cpp_0(int n){
if(n==1||n==2) return 1;
return(fib_cpp_0(n-1)+fib_cpp_0(n-2));
}'
)
patient.data = read.table("F:\\Downloads\\PATIENTS_DATA_TABLE.csv", header = TRUE ,sep = ",")
icustay.data = read.table("F:\\Downloads\\ICUSTAYS.csv", header = TRUE ,sep = ",", nrows = 10)
admissions.data = read.table("F:\\Downloads\\ADMISSIONS.csv", header = TRUE ,sep = ",", nrows = 100)
chartevents.data = read.table("F:\\Downloads\\CHARTEVENTS_DATA_TABLE.csv", header = TRUE ,sep = ",", nrows = 100000)
labevents.data = read.table("F:\\Downloads\\LABEVENTS.csv", header = TRUE ,sep = ",", nrows = 1000000)
sepsis.data = read.table("F:\\Downloads\\sepsis_icu.csv", header = TRUE ,sep = ",")
x= c(1,2,3)
centre =function(type) {
switch(type,
"2"=,"3" = 4,
"5" = 5)
}
x <- iris[which(iris[,5] != "setosa"), c(1,5)]
trials <- 10000
system.time( r <- foreach(icount(trials), .combine=cbind) %dopar% {
ind <- sample(100, 100, replace=TRUE)
result1 <- glm(x[ind,2]~x[ind,1], family=binomial(logit))
coefficients(result1)
})
system.time( r <- foreach(i = 1:trials) %dopar% {
ind <- sample(100, 100, replace=TRUE)
result1 <- glm(x[ind,2]~x[ind,1], family=binomial(logit))
coefficients(result1)
})
system.time( r <- for(i in 1:trials){
ind <- sample(100, 100, replace=TRUE)
result1 <- glm(x[ind,2]~x[ind,1], family=binomial(logit))
coefficients(result1)
})
|
0aef56943dba31c1baa199bc39c13fb5007dedab
|
8b49a8ab0b211b3f7be15acbedce3986484a3696
|
/inst/unitTests/test_intersect.R
|
b4b60002cb3b98eccdd3b8263744ef67e55374ab
|
[] |
no_license
|
lawremi/HelloRanges
|
1740e78dc3d024413b8026ba544f27c2bd9418cd
|
d7e62515dce9e25d7e6c87f7b1741b32ea9c24c2
|
refs/heads/master
| 2022-11-07T08:03:05.027227
| 2022-10-25T02:26:37
| 2022-10-25T02:27:12
| 61,642,306
| 10
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,335
|
r
|
test_intersect.R
|
### =========================================================================
### Tests for bedtools intersect command
### -------------------------------------------------------------------------
###
### Based on tests from bedtools (C) 2016 Aaron Quinlan et al.
###
test_intersect <- function() {
fixup <- function(x) {
mcols(x)$hit <- NULL # 'hit' column added by pintersect()
x
}
setwd(system.file("unitTests", "data", "intersect", package="HelloRanges"))
exp <- GRanges("chr1", IRanges(c(11, 101), c(20, 200)), name=c("a1", "a2"),
score=c(1, 2), strand=c("+", "-"))
r <- bedtools_intersect("-a a.bed -b a.bed")
checkIdentical(exp, fixup(eval(r)))
r <- bedtools_intersect("-a a.bed -b a.bed -v")
checkIdentical(exp[NULL], eval(r))
cexp <- exp
mcols(cexp)$overlap_count <- c(0L, 2L)
r <- bedtools_intersect("-a a.bed -b b.bed -c")
checkIdentical(cexp, eval(r))
mcols(cexp)$overlap_count <- c(0L, 1L)
r <- bedtools_intersect("-a a.bed -b b.bed -c -s")
checkIdentical(cexp, eval(r))
mcols(cexp)$overlap_count <- c(0L, 0L)
r <- bedtools_intersect("-a a.bed -b b.bed -c -s -f 0.1")
checkIdentical(cexp, eval(r))
exp <- GRanges("chr1", IRanges(c(101, 101), c(101, 110)),
name=c("a2", "a2"),
score=c(2, 2), strand=c("-", "-"))
r <- bedtools_intersect("-a a.bed -b b.bed")
checkIdentical(exp, fixup(eval(r)))
exp_a <- import("a.bed")
exp_b <- import("b.bed")
r <- bedtools_intersect("-a a.bed -b b.bed -wa")
checkIdentical(exp_a[c(2, 2)], eval(r))
exp_a_b <- Pairs(exp_a[c(2, 2)], exp_b[2:3])
r <- bedtools_intersect("-a a.bed -b b.bed -wa -wb")
checkIdentical(exp_a_b, eval(r))
exp_o <- exp_a_b
mcols(exp_o)$overlap_width <- c(1L, 10L)
r <- bedtools_intersect("-a a.bed -b b.bed -wo")
checkIdentical(exp_o, eval(r))
suppressWarnings({
first <- exp_a[c(1, 2, 2)]
seqlevels(first) <- c(".", seqlevels(first))
exp_loj <- Pairs(first, c(HelloRanges:::NAGRanges(exp_b), exp_b[2:3]))
})
mcols(exp_loj)$overlap_width <- c(0L, 1L, 10L)
r <- bedtools_intersect("-a a.bed -b b.bed -wao")
checkIdentical(exp_loj, eval(r))
r <- bedtools_intersect("-a a.bed -b b.bed -wo -s")
checkIdentical(exp_o[1L], eval(r))
r <- bedtools_intersect("-a a.bed -b b.bed -wao -s")
checkIdentical(exp_loj[1:2], eval(r))
## p <- pipe("cat a.bed | Rscript -e 'library(HelloRanges); export(eval(bedtools_intersect(\"-a stdin -b b.bed\")), stdout(), format=\"bed\")'", "r")
## checkIdentical(exp, import(p, format="bed"))
## close(p)
## p <- pipe("cat b.bed | Rscript -e 'library(HelloRanges); export(eval(bedtools_intersect(\"-a a.bed -b stdin\")), stdout(), format=\"bed\")'", "r")
## checkIdentical(exp, import(p, format="bed"))
## close(p)
one_block <- Rsamtools::asBam("one_block.sam", "one_block", overwrite=TRUE)
two_blocks <- Rsamtools::asBam("two_blocks.sam", "two_blocks",
overwrite=TRUE)
three_blocks <- Rsamtools::asBam("three_blocks.sam", "three_blocks",
overwrite=TRUE)
three_blocks_exp <- GenomicAlignments::readGAlignments(three_blocks)
r <- bedtools_intersect("-a three_blocks.bam -b three_blocks_nomatch.bed")
checkIdentical(three_blocks_exp, eval(r))
r <- bedtools_intersect("-a three_blocks.bam -b three_blocks_nomatch.bed -split")
checkIdentical(three_blocks_exp[NULL], eval(r))
r <- bedtools_intersect("-a three_blocks.bam -b three_blocks_match.bed -split")
checkIdentical(three_blocks_exp, eval(r))
r <- bedtools_intersect("-a three_blocks.bam -b three_blocks_match.bed -split -s")
checkIdentical(three_blocks_exp[NULL], eval(r))
r <- bedtools_intersect("-a three_blocks.bam -b three_blocks_match_1bp.bed -split -f 0.1")
checkIdentical(three_blocks_exp[NULL], eval(r))
three_blocks_match <- import("three_blocks_match.bed")
d <- import("d.bed")
p <- Pairs(three_blocks_match, d)
mcols(p)$overlap_width <- c(three_blocks_match=5L)
r <- bedtools_intersect("-a three_blocks_match.bed -b d.bed -split -wo")
checkIdentical(p, eval(r))
first(p) <- asBED(three_blocks_exp)
mcols(p)$overlap_width <- unname(mcols(p)$overlap_width)
r <- bedtools_intersect("-a three_blocks.bam -b d.bed -split -wo -bed")
checkIdentical(p, eval(r))
one_block_c_exp <- GenomicAlignments::readGAlignments(one_block)
mcols(one_block_c_exp)$overlap_count <- 1L
r <- bedtools_intersect("-a one_block.bam -b c.bed -c")
checkIdentical(one_block_c_exp, eval(r))
one_block_exp <- GenomicAlignments::readGAlignments(one_block)
exp_c <- import("c.bed")
bam_wo_exp <- Pairs(one_block_exp, exp_c)
mcols(bam_wo_exp)$overlap_width <- 30L
r <- bedtools_intersect("-a one_block.bam -b c.bed -wo")
checkIdentical(bam_wo_exp, eval(r))
seqlevels(first(bam_wo_exp)) <- c(".", seqlevels(first(bam_wo_exp)))
seqlevels(second(bam_wo_exp)) <- c(".", seqlevels(second(bam_wo_exp)))
r <- bedtools_intersect("-a one_block.bam -b c.bed -wao")
checkIdentical(bam_wo_exp, eval(r))
### FIXME: these -f tests (from bedtools) are not that great
x <- import("x.bed")
y <- import("y.bed")
f_exp <- pintersect(x, y, ignore.strand=TRUE)
r <- bedtools_intersect("-a x.bed -b y.bed -f 0.2")
checkIdentical(f_exp, eval(r))
r <- bedtools_intersect("-a x.bed -b y.bed -f 0.21")
checkIdentical(f_exp[NULL], eval(r))
r <- bedtools_intersect("-a x.bed -b y.bed -F 0.21")
checkIdentical(f_exp, eval(r))
r <- bedtools_intersect("-a x.bed -b y.bed -f 0.21 -F 0.21")
checkIdentical(f_exp[NULL], eval(r))
r <- bedtools_intersect("-a x.bed -b y.bed -f 0.21 -r")
checkIdentical(f_exp[NULL], eval(r))
r <- bedtools_intersect("-a x.bed -b y.bed -f 0.19 -F 0.21")
checkIdentical(f_exp, eval(r))
r <- bedtools_intersect("-a x.bed -b y.bed -f 0.19 -F 0.5")
checkIdentical(f_exp, eval(r))
r <- bedtools_intersect("-a x.bed -b y.bed -f 0.19 -F 0.51")
checkIdentical(f_exp[NULL], eval(r))
r <- bedtools_intersect("-a x.bed -b y.bed -f 0.21 -F 0.21 -e")
checkIdentical(f_exp, eval(r))
}
|
e3783901e10a4c1f577b235dc4b3aad8e5c18ba5
|
f400e65fec959b0bf45cce6cbbf1fa4d73d69324
|
/Datamining/xgboost_parameter.R
|
78d186798368be6434c707e2ed1f95fe77bae4e2
|
[] |
no_license
|
miniii222/study_in_graduate
|
afd726226d01fa7dceeadaed421d0e2bb862ea3b
|
70d3a2e29cf9497298c7505c883d78eab8d29bd7
|
refs/heads/master
| 2020-04-10T08:28:59.503539
| 2019-06-25T13:41:42
| 2019-06-25T13:41:42
| 160,906,960
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,534
|
r
|
xgboost_parameter.R
|
setwd("C:/Users/wjssm/Desktop/0.graduate/3rd/Datamining")
#https://www.hackerearth.com/practice/machine-learning/machine-learning-algorithms/beginners-tutorial-on-xgboost-parameter-tuning-r/tutorial/
library(data.table)
library(mlr);library(readr)
setcol<- c("age","workclass","fnlwgt","education","education-num",
"marital-status","occupation","relationship","race",
"sex","capital-gain","capital-loss","hours-per-week",
"native-country","target")
train <- read_csv("adult_data.txt", col_names = setcol)
test <- read_csv("adult_test.txt", col_names = setcol)
setDT(train); setDT(test)
#check missing values
colSums(train[,is.na(train)]) #NA 0
colSums(test[,is.na(test)]) #NA 14
#quick data cleaning
library(stringr)
##remove extra character from target variable
test[,target := substr(target, 1, nchar(target)-1)]
##remove leading whitespaces
sapply(test, is.character)
char_col <- colnames(train)[sapply(test, is.character)]
for(i in char_col) set(train,j=i,value = str_trim(train[[i]],side = "left"))
for(i in char_col) set(test,j=i,value = str_trim(test[[i]],side = "left"))
train[is.na(train)] <- "Missing"
test[is.na(test)] <- "Missing"
#using one hot encoding
labels <- train$target
ts_label <- test$target
new_tr <- model.matrix(~.+0,data = train[,-c("target"),with=F])
new_ts <- model.matrix(~.+0,data = test[,-c("target"),with=F])
#convert factor to numeric
table(labels)
table(ts_label)
labels <- ifelse(labels == '<=50K',1,0)
ts_label <- ifelse(ts_label == '<=50K',1,0)
library(xgboost)
dtrain <- xgb.DMatrix(data = new_tr, label = labels)
dtest <- xgb.DMatrix(data = new_ts, label = ts_label)
params <- list(booster = 'gbtree', objective = 'binary:logistic',
eta = 0.3, gamma = 0,
max_depth = 6, min_child_weight = 1,
subsample = 1, colsample_bytree = 1)
xgbcv <- xgb.cv( params = params, data = dtrain, nrounds = 100, nfold = 5,
showsd = T, stratified = T,
print_every_n= 10, early_stop_round = 20, maximize = F)
names(xgbcv)
xgbcv$evaluation_log[,'test_error_mean']
min(xgbcv$evaluation_log[,'test_error_mean']) #best_iteration = 75
xgb1 <- xgb.train (params = params, data = dtrain, nrounds = 75,
watchlist = list(val = dtest, train = dtrain),
print_every_n = 10, early_stopping_rounds = 10,
maximize = F, eval_metric = 'error')
xgbpred <- predict(xgb1, dtest)
|
4e0f162029dc086e20518a398fce22cf1f296e5d
|
aa5a0d32a413a832e2cf6a68d2003185246ee3ae
|
/man/Lynch.distance.Rd
|
5c337ec713625f0f2c21e0a618c50e7946a1eafe
|
[] |
no_license
|
lvclark/polysat
|
62fb4ddcad15db6886d1b1d38e04c6892998dd9f
|
ab6f69af3310e102846faa1c8ea4242eae3e87d8
|
refs/heads/master
| 2022-09-19T04:23:37.375464
| 2022-08-23T12:52:08
| 2022-08-23T12:52:08
| 39,743,630
| 10
| 7
| null | 2018-09-10T15:24:09
| 2015-07-26T21:58:14
|
R
|
UTF-8
|
R
| false
| false
| 1,850
|
rd
|
Lynch.distance.Rd
|
\name{Lynch.distance}
\alias{Lynch.distance}
\title{
Calculate Band-Sharing Dissimilarity Between Genotypes
}
\description{
Given two genotypes in the form of vectors of unique alleles, a
dissimilarity is calculated as:
1 - (number of alleles in common)/(average number of alleles per genotype).
}
\usage{
Lynch.distance(genotype1, genotype2, usatnt = NA, missing = -9)
}
\arguments{
\item{genotype1}{
A vector containing all alleles for a particular sample and locus.
Each allele is only present once in the vector.
}
\item{genotype2}{
A vector of the same form as \code{genotype1}, for another sample at the
same locus.
}
\item{usatnt}{
The microsatellite repeat length for this locus (ignored by the function).
}
\item{missing}{
The symbol used to indicate missing data in either genotype vector.
}
}
\details{
Lynch (1990) defines a simple measure of similarity between DNA
fingerprints. This is 2 times the number of bands that two fingerprints
have in common, divided by the total number of bands that the two genotypes
have. \code{Lynch.distance} returns a dissimilarity, which is 1 minus
the similarity.
}
\value{
If the first element of either or both input genotypes is equal to
\code{missing}, NA is returned.
Otherwise, a numerical value is returned. This is one minus the
similarity. The similarity is calculated as the number of alleles that
the two genotypes have in common divided by the mean length of the two
genotypes.
}
\references{
Lynch, M. (1990) The similarity index and DNA fingerprinting.
\emph{Molecular Biology and Evolution} \bold{7}, 478-484.
}
\author{
Lindsay V. Clark
}
\seealso{
\code{\link{Bruvo.distance}},
\code{\link{meandistance.matrix}}
}
\examples{
Lynch.distance(c(100,102,104), c(100,104,108))
Lynch.distance(-9, c(102,104,110))
Lynch.distance(c(100), c(100,104,106))
}
\keyword{ arith }
|
4dd1fc083d7fa690a4b0708999241e086ce32b33
|
a7f0b0a405fc7f1d4c9e4ebb754529f3877a20dd
|
/RcodeData/vecomp.r
|
832c6b5e31c1e650f708a05d6e203e0444cb2ec1
|
[] |
no_license
|
tmuntianu/advancedstatistics
|
4298c0d0f3a52675d85d67f3aac3b7b11ab5cdc1
|
3885b67da560702df4b6d05b240ada6fde2f727c
|
refs/heads/master
| 2020-12-22T19:03:58.858657
| 2020-01-29T04:08:34
| 2020-01-29T04:08:34
| 236,890,079
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 961
|
r
|
vecomp.r
|
vecomp <-
function(job=1,N=10000)
{
#use 5 methods to compute exp(-(3*x^2-4*x*y+2*y^2)) for integral approximation
x=seq(from=-1,to=1,length=N);y=seq(from=-1,to=1,length=N)
M=matrix(ncol=N,nrow=N)
one=rep(1,N)
print(date())
if(job==1) # double loop
{
for(i in 1:N)
for(j in 1:N)
M[i,j]=3*x[i]^2-4*x[i]*y[j]+2*y[j]^2
}
if(job==2) # single loop
{
for(i in 1:N)
M[i,]=3*x[i]^2-4*x[i]*y+2*y^2
}
if(job==3) #matrix compputation
{
M=3*x^2%*%t(one)-4*x%*%t(y)+2*one%*%t(y^2)
}
if(job==4) # double grid
{
xx=rep(x,N);yy=rep(y,each=N)
M=matrix(3*xx^2-4*xx*yy+2*yy^2,ncol=N,nrow=N)
}
if(job==5) # expand grid
{
xy=expand.grid(x,y)
M=matrix(3*xy[,1]^2-4*xy[,1]*xy[,2]+2*xy[,2]^2,ncol=N,nrow=N)
}
if(job==6) # outer
{
qf=function(x,y) 3*x^2-4*x*y+2*y^2
M=outer(X=x,Y=y,FUN=qf)
}
print(date())
M=exp(-M)
x2y2=x^2%*%t(one)+one%*%t(y^2)
INT=sum(M[x2y2<1])*(max(x)-min(x))*(max(y)-min(y))/N^2
return(INT)
}
|
6c5c35e7783186c745014dc252cc197033f2ede8
|
361041c919cf7013441be2a7fbf2436b4b41e636
|
/files/SLGfiles/kara/IsItNumbers.R
|
87d82af7e99fb71d9ca471e87e2535b4f8d04a2f
|
[
"Apache-2.0"
] |
permissive
|
jbpost2/jbpost2.github.io
|
2ef930469f3881104f5d5f4ccaab562f7e0c82a8
|
91e5c73e4f426c3b5fe268ba250ba5c3427119a9
|
refs/heads/main
| 2023-04-29T16:41:56.476692
| 2023-04-17T03:45:05
| 2023-04-17T03:45:05
| 82,014,791
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,586
|
r
|
IsItNumbers.R
|
#Anders, S., McCarthy, D., et al. (2013)
# Count-based differential expression analysis
# of RNA sequencing data using R and Bioconductor.
# Nature Protocols,8(9) 1765–1786. doi:10.1038/nprot.2013.099
# This is an example from the above protocol,
# placed in one document for demonstration purposes.
# See original paper for full details.
#-----------------------------------------------------
# Load the edgeR package and use the utility function,
# readDGE, to read in the count files from a transcript
# quantification software, such as HTSeq-Count
source("https://bioconductor.org/biocLite.R")
biocLite("edgeR")
library("edgeR")
counts <- readDGE(samples$countf)$counts
#----------------------------------------------
# Filter weakly expressed features
# (less than 1 read per million in a group)
noint <- rownames(counts) %in%
c("no_feature", "ambiguous", "too_low_aQual",
"not_aligned", "alignment_not_unique")
cpms <- cmp(counts)
keep <- rowSums(cpms > 1) >= 3 & !noint
counts <- counts[keep, ]
#----------------------------------------------
#Visualize and inspect the count table
colnames(counts) <- samples$shortname
head(counts[, order(samples$condition)], 5)
#----------------------------------------------
#Create a DGEList object
d <- DGEList(counts = counts, group = samples$condition)
#----------------------------------------------
#Estimate normalization factors
d <- calcNormFactors(d)
#----------------------------------------------
#Create a design matrix
design <- model.matrix( ~ 0 + group, data = samples)
#estimate dispersion values
disperse <- EstimateGLMTrendedDispersion(d, design)
#Fit the model
model <- glmFit(disperse, design)
de <- glmLRT(model, "SYNTAX for DESIRED CONTRAST HERE")
#----------------------------------------------
# Chen, Y., McCarthy, D. et al. (2008)
# edgeR: differential expression analysis of
# digital gene expression data user’s guide.
# Below is a brief example from the edgeR user's
# guide to get a feel for the sytax involved.
# See user's guide for more examples and
# package details.
#----------------------------------------------
#EXAMPLE 1: (pg. 30)
#Group B vs. Group A
de <- glmLRT(model, contrast = c(-1, 1, 0))
#Group A vs. average of Group B, Group C
de <- glmLRT(model, contrast = c(1, -0.5, -0.5))
#You can do the same thing with coefficients
design2 <- model.matrix( ~ group, data = samples)
#Group B vs. Group A
de2 <- glmLRT(model, coef=2)
#Similar syntax with added covariates
design3 <- model.matrix( ~ group + time + group:time, data=samples)
|
088161d5312b7c77717cd01784743b9cfa7a9501
|
1a0201b8c4c778e9791fe8300cfdef9ba3b303fd
|
/tests/testthat.R
|
cc67730135f63a7a521f08976f7a8b7269aa2038
|
[] |
no_license
|
datocres/coursera-fars
|
3c47ed6c986ad312f40df3f21420f6aa7b3e42cd
|
9ee789a571369f89f49f9c38c503d98988c9e085
|
refs/heads/master
| 2021-01-21T14:08:44.883033
| 2018-04-12T16:16:19
| 2018-04-12T16:16:19
| 91,820,121
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 62
|
r
|
testthat.R
|
require(testthat)
require(farsdata)
test_check("farsdata")
|
e0efe776209fccc10f273ea787a5136934cc5b4b
|
220fb68383c0c2845aa1f47544108b874fb90261
|
/2013_haier_crawlers/U HE crawler ifeng.R
|
b2de86c14c9945f17ce0ecb4fc80f993684bf524
|
[] |
no_license
|
lipengyu/text_mining
|
b69dda69b908e81c302bf197ec5bffefb4ac734c
|
417795033408e7236422ec5766852815b4b831a6
|
refs/heads/master
| 2020-12-11T07:31:27.110823
| 2014-01-27T08:26:37
| 2014-01-27T08:26:37
| null | 0
| 0
| null | null | null | null |
GB18030
|
R
| false
| false
| 2,509
|
r
|
U HE crawler ifeng.R
|
###抓取凤凰家电网数据
#获取板块地址
library(XML)
url <- "http://tech.ifeng.com/elec/"
url1 <- htmlTreeParse( url, useInternalNodes=TRUE,encoding='utf8' )
nav <- getNodeSet(url1, "//body//div[@class = 'mainNav']/ul/li")
ban <- sapply( nav, xmlValue )
nav <- getNodeSet(url1, "//body//div[@class = 'mainNav']/ul/li/a")
link <- sapply( nav, function(el) xmlGetAttr(el, "href") )
banlist <- as.data.frame( cbind( ban , link ))
rm( ban, link, nav , url , url1)
#获取板块内文章数据(仅第一页)
ttlist <- data.frame()
for (i in 2:7) {
url <- as.character( banlist[i,2] )
url1 <- htmlTreeParse( url, useInternalNodes=TRUE,encoding='utf8' )
a <- getNodeSet( url1, "//body//div[@class = 'newsList']//li/a" )
title <- sapply( a , xmlValue)
link <- sapply( a, function(el) xmlGetAttr(el, "href") )
b <- regexpr( "20[0-9]{2}_[0-9]{2}\\/[0-9]{2}" , link)
time <- substr( link, b, b+9)
tt <- as.data.frame( cbind( banlist[i,1], title, time, link ))
ttlist <- rbind( ttlist, tt)
}
rm(url, url1, a, b, title, link, time, i, tt)
##获取全文
url <- as.character("http://tech.ifeng.com/elec/home/detail_2012_04/09/13747768_0.shtml")
ifeng1page <- function(url) {
url1 <- htmlTreeParse( url, useInternalNodes=TRUE,encoding='utf8' )
a <- getNodeSet( url1, "//body//div[@class = 'pageNum']")
if ( length(a) >0 ) {
b <- length( xmlElementsByTagName( a[[1]] , "a") ) #获取文章页数
} else b <- 0
vm <- vector()
#获取分页文章的全部内容
if (b > 0) {
for ( i in 0:b) {
ul <- paste( substr( url, 1, regexpr( ".shtml" , url)-2 ), i, ".shtml", sep="" )
ct <- getNodeSet( htmlTreeParse( ul, useInternalNodes=TRUE,encoding='utf8' ), "//body//div[@id = 'artical_real']")
vm <- paste( vm, sapply( ct, xmlValue) )
} #多页内容抓取循环结束
} else { #只有一页的情况
ct <- getNodeSet( htmlTreeParse( url, useInternalNodes=TRUE,encoding='utf8' ), "//body//div[@id = 'artical_real']")
vm <- sapply( ct, xmlValue)
} # end of else
out <- vm
#browser()
}
#把文章内容写入数据库
for ( i in 1: nrow(ttlist)) {
ttlist$content[i] <- ifeng1page( as.character( ttlist[i,4]) )
}
names(ttlist)
head(ttlist$content)
ttlist$content <- gsub( "[[:space:]]+", " ",ttlist$content)
library(RODBC)
con <- odbcConnect("mysql", uid="dingc", pwd="dingc" )
sqlSave(con, ttlist, tablename = "ods_ifeng_tech", append = T, rownames = F, addPK = FALSE)
#新闻正文用varchar(255)会出问题,在mysql中修改成varchar(65535)
close(con)
|
016475d93f79a66742cc8a2e66082723ba202f3f
|
fa0f3dce30e511ed185e8456293ee09bfe96b33d
|
/RMark/man/make.mark.model.Rd
|
f7a56ece0b3947e662288b3c5be2a1f5d45b2900
|
[] |
no_license
|
MichaelBBrown/RMark
|
f9b5d40ab03bfe4f54fd0550ed27906fced6f442
|
efc08916decbe8c23cea00fe084b497a5efa1a67
|
refs/heads/master
| 2020-05-01T19:12:13.865385
| 2019-02-05T16:26:31
| 2019-02-05T16:26:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 25,489
|
rd
|
make.mark.model.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/make.mark.model.R
\name{make.mark.model}
\alias{make.mark.model}
\title{Create a MARK model for analysis}
\usage{
make.mark.model(data, ddl, parameters = list(), title = "",
model.name = NULL, initial = NULL, call = NULL, default.fixed = TRUE,
options = NULL, profile.int = FALSE, chat = NULL, simplify = TRUE,
input.links = NULL, parm.specific = FALSE, mlogit0 = FALSE,
hessian = FALSE, accumulate = TRUE, icvalues = NULL, wrap = TRUE,
nodes = 101, useddl = FALSE)
}
\arguments{
\item{data}{Data list resulting from function \code{\link{process.data}}}
\item{ddl}{Design data list from function \code{\link{make.design.data}}}
\item{parameters}{List of parameter formula specifications}
\item{title}{Title for the analysis (optional)}
\item{model.name}{Model name to override default name (optional)}
\item{initial}{Vector of named or unnamed initial values for beta parameters
or previously run model (optional)}
\item{call}{Pass function call when this function is called from another
function (e.g.\code{\link{mark}}) (internal use)}
\item{default.fixed}{if TRUE, real parameters for which the design data have
been deleted are fixed to default values}
\item{options}{character string of options for Proc Estimate statement in
MARK .inp file}
\item{profile.int}{if TRUE, requests MARK to compute profile intervals}
\item{chat}{pre-specified value for chat used by MARK in calculations of
model output}
\item{simplify}{if FALSE, does not simplify PIM structure}
\item{input.links}{specifies set of link functions for parameters with non-simplified structure}
\item{parm.specific}{if TRUE, forces a link to be specified for each parameter}
\item{mlogit0}{if TRUE, any real parameter that is fixed to 0 and has an mlogit link will
have its link changed to logit so it can be simplified}
\item{hessian}{if TRUE specifies to MARK to use hessian rather than second partial matrix}
\item{accumulate}{if TRUE accumulate like data values into frequencies}
\item{icvalues}{numeric vector of individual covariate values for computation of real values}
\item{wrap}{if TRUE, data lines are wrapped to be length 80; if length of a row is not a
problem set to FALSE and it will run faster}
\item{nodes}{number of integration nodes for individual random effects (min 15, max 505, default 101)}
\item{useddl}{if TRUE and no rows of ddl are missing (deleted) then it will use ddl in place of full.ddl that is created internally.}
}
\value{
model: a MARK object except for the elements \code{output} and
\code{results}. See \code{\link{mark}} for a detailed description of the
list contents.
}
\description{
Creates a MARK model object that contains a MARK input file with PIMS and
design matrix specific to the data and model structure and formulae
specified for the model parameters.
}
\details{
This function is called by \code{\link{mark}} to create the model but it can
be called directly to create but not run the model. All of the arguments
have default values except for the first 2 that specify the processed data
list (\code{data}) and the design data list (\code{ddl}). If only these 2
arguments are specified default models are used for the parameters. For
example, following with the example from \code{\link{process.data}} and
\code{\link{make.design.data}}, the default model can be created with:
\code{mymodel=make.mark.model(proc.example.data,ddl)}
The call to \code{make.mark.model} creates a model object but does not do
the analysis. The function returns a list that includes several fields
including a design matrix and the MARK input file that will be used with
\code{MARK.EXE} to run the analysis from function
\code{\link{run.mark.model}}. The following shows the names of the list
elements in mymodel:
\preformatted{ names(mymodel) [1] "data" "model" "title" "model.name"
"links" [6] "mixtures" "call" "parameters" "input" "number.of.groups" [11]
"group.labels" "nocc" "begin.time" "covariates" "fixed" [16] "design.matrix"
"pims" "design.data" "strata.labels" "mlogit.list" [21] "simplify" }
The list is defined to be a mark object which is done by assigning a class
vector to the list. The classes for an R object can be viewed with the
class function as shown below:
\preformatted{ class(mymodel) [1] "mark" "CJS" } Each MARK model has 2 class
values. The first identifies it as a mark object and the second identifies
the type of mark analysis, which is the default "CJS" (recaptures only) in
this case. The use of the class feature has advantages in using generic
functions and identifying types of objects. An object of class \code{mark}
is defined in more detail in function \code{\link{mark}}.
To fit non-trivial models it is necessary to understand the remaining
calling arguments of \code{make.mark.model} and R formula notation. The
model formulae are specified with the calling argument \code{parameters}.
It uses a similar list structure as the \code{parameters} argument in
\code{\link{make.design.data}}. It expects to get a list with elements named
to match the parameters in the particular analysis (e.g., Phi and p in CJS)
and each list element is a list, so it is a list of lists). For each
parameter, the possible list elements are \code{formula, link, fixed,
component, component.name, remove.intercept}. In addition, for closed
capture models and robust design model, the element \code{share} is included
in the list for p (capture probabilities) and GammaDoublePrime
(respectively) to indicate whether the model is shared (share=TRUE) or
not-shared (the default) (share=FALSE) with c (recapture probabilities) and
GammaPrime respectively.
\code{formula} specifies the model for the parameter using R formula
notation. An R formula is denoted with a \code{~} followed by variables in
an equation format possibly using the \code{+} , \code{*}, and \code{:}
operators. For example, \code{~sex+age} is an additive model with the main
effects of \code{sex} and \code{age}. Whereas, \code{~sex*age} includes the
main effects and the interaction and it is equivalent to the formula
specified as \code{~sex+age+sex:age} where \code{sex:age} is the interaction
term. The model \code{~age+sex:age} is slightly different in that the main
effect for \code{sex} is dropped which means that intercept of the
\code{age} effect is common among the sexes but the age pattern could vary
between the sexes. The model \code{~sex*Age} which is equivalent to
\code{~sex + Age + sex:Age} has only 4 parameters and specifies a linear
trend with age and both the intercept and slope vary by sex. One additional
operator that can be useful is \code{I()} which allows computation of
variables on the fly. For example, the addition of the Agesq variable in
the design data (as described above) can be avoided by using the notation
\code{~Age + I(Age^2)} which specifies use of a linear and quadratic effect
for age. Note that specifying the model \code{~age + I(age^2)} would be
incorrect and would create an error because \code{age} is a factor variable
whereas \code{Age} is not.
As an example, consider developing a model in which Phi varies by age and p
follows a linear trend over time. This model could be specified and run as
follows:
\preformatted{ p.Time=list(formula=~Time) Phi.age=list(formula=~age)
Model.p.Time.Phi.age=make.mark.model(proc.example.data,ddl,
parameters=list(Phi=Phi.age,p=p.Time))
Model.p.Time.Phi.age=run.mark.model(Model.p.Time.Phi.age) }
The first 2 commands define the p and Phi models that are used in the
\code{parameter} list in the call to \code{make.mark.model}. This is a good
approach for defining models because it clearly documents the models, the
definitions can then be used in many calls to \code{make.mark.model} and it
will allow a variety of models to be developed quickly by creating different
combinations of the parameter models. Using the notation above with the
period separating the parameter name and the description (eg., p.Time) gives
the further advantage that all possible models can be developed quickly with
the functions \code{\link{create.model.list}} and
\code{\link{mark.wrapper}}.
Model formula can use any field in the design data and any individual
covariates defined in \code{data}. The restrictions on individual
covariates that was present in versions before 1.3 have now been removed.
You can now use interactions of individual covariates with all design data
covariates and products of individual covariates. You can specify
interactions of individual covariates and factor variables in the design
data with the formula notation. For example, \code{~region*x1} describes a
model in which the slope of \code{x1} varies by \code{region}. Also,
\code{~time*x1} describes a model in which the slope for \code{x1} varied by
time; however, there would be only one value of the covariate per animal so
this is not a time varying covariate model. Models with time varying
covariates are now more easily described with the improvements in version
1.3 but there are restrictions on how the time varying individual covariates
are named. An example would be a trap dependence model in which capture
probability on occasion i+1 depends on whether they were captured on
occasion i. If there are n occasions in a CJS model, the 0/1 (not
caught/caught) for occasions 1 to n-1 would be n-1 individual covariates to
describe recapture probability for occasions 2 to n. For times 2 to n, a
design data field could be defined such that the variable timex is 1 if
time==x and 0 otherwise. The time varying covariates must be named with a
time suffix on the base name of the covariate. In this example they would be
named as \code{x2,. . .,xn} and the model could be specified as \code{~time
+ x} for time variation and a constant trap effect or as \code{~time +
time:x} for a trap effect that varied by time. If in the
\code{\link{process.data}} call, the argument \code{begin.time} was set to
the year 1990, then the variables would have to be named x1991,x1992,...
because the first recapture occasion would be 1991. Note that the times are
different for different parameters. For example, survival is labeled based
on the beginning of the time interval which would be 1990 so the variables
should be named appropriately for the parameter model in which they will be
used.
In previous versions to handle time varying covariates with a constant
effect, it was necessary to use the component feature of the parameter
specification to be able to append one or more arbitrary columns to the
design matrix. That is no longer required for individual covariates and the
component feature was removed in v2.0.8.
There are three other elements of the parameter list that can be useful on
occasion. The first is \code{link} which specifies the link function for
transforming between the beta and real parameters. The possible values are
"logit", "log", "identity" and "mlogit(*)" where * is a numeric identifier.
The "sin" link is not allowed because all models are specified using a
design matrix. The typical default values are assigned to each parameter
(eg "logit" for probabilities, "log" for N, and "mlogit" for pent in POPAN),
so in most cases it will not be necessary to specify a link function.
The second is \code{fixed} which allows real parameters to be set at fixed
values. The values for \code{fixed} can be either a single value or a list
with 5 alternate forms for ease in specifying the fixed parameters.
Specifying \code{fixed=value} will set all parameters of that type to the
specified value. For example, \code{Phi=list(fixed=1)} will set all Phi to
1. This can be useful for situations like specifying F in the
Burnham/Barker models to all have the same value of 1. Fixed values can
also be specified as a list in which values are specified for certain
indices, times, ages, cohorts, and groups. The first 3 will be the most
useful. The first list format is the most general and flexible but it
requires an understanding of the PIM structure and index numbers for the
real parameters. For example,
\code{Phi=list(formula=~time, fixed=list(index=c(1,4,7),value=1))}
specifies Phi varying by time, but the real parameters 1,4,7 are set to 1.
The \code{value} field is either a single constant or its length must match
the number of indices. For example,
\code{Phi=list(formula=~time, fixed=list(index=c(1,4,7),value=c(1,0,1)))}
sets real parameters 1 and 7 to 1 and real parameter 4 to 0. Technically,
the index/value format for fixed is not wedded to the parameter type (i.e.,
values for p can be assigned within Phi list), but for the sake of clarity
they should be restricted to fixing real parameters associated with the
particular parameter type. The \code{time} and \code{age} formats for fixed
will probably be the most useful. The format fixed=list(time=x, value=y)
will set all real parameters (of that type) for time x to value y. For
example,
\code{p=list(formula=~time,fixed=list(time=1986,value=1))}
sets up time varying capture probability but all values of p for 1986 are
set to 1. This can be quite useful to set all values to 0 in years with no
sampling (e.g., \preformatted{fixed=list(time=c(1982,1984,1986), value=0)}).
The \code{age}, \code{cohort} and \code{group} formats work in a similar
fashion. It is important to recognize that the value you specify for
\code{time}, \code{age}, \code{cohort} and \code{group} must match the
values in the design data list. This is another reason to add binned fields
for age, time etc with \code{\link{add.design.data}} after creating the
default design data with \code{\link{make.design.data}}. Also note that the
values for \code{time} and \code{cohort} are affected by the
\code{begin.time} argument specified in \code{\link{process.data}}. Had I
not specified \code{begin.time=1980}, to set p in the last occasion (1986),
the specification would be
\code{p=list(formula=~time,fixed=list(time=7,value=1))}
because begin.time defaults to 1. The advantage of the time-, age-, and
cohort- formats over the index-format is that it will work regardless of the
group definition which can easily be changed by changing the \code{groups}
argument in \code{\link{process.data}}. The index-format will be dependent
on the group structure because the indexing of the PIMS will most likely
change with changes in the group structure.
Parameters can also be fixed at default values by deleting the specific rows
of the design data. See \code{\link{make.design.data}} and material below.
The default value for fixing parameters for deleted design data can be
changed with the \code{default=value} in the parameter list.
Another useful element of the parameter list is the
\code{remove.intercept} argument. It is set to TRUE to forcefully remove
the intercept. In R notation this can be done by specifiying the formula
notation ~-1+... but in formula with nested interactions of factor variables
and additive factor variables the -1 notation will not remove the intercept.
It will simply adjust the column definitions but will keep the same number
of columns and the model will be overparameterized. The problem occurs with
nested factor variables like tostratum within stratum for multistrata
designs (see \code{\link{mstrata}}). As shown in that example, you can
build a formula -1+stratum:tostratum to have transitions that are
stratum-specific. If however you also want to add a sex effect and you
specify -1+sex+stratum:tostratum it will add 2 columns for sex labelled M
and F when in fact you only want to add one column because the intercept is
already contained within the stratum:tostratum term. The argument
remove.intercept will forcefully remove the intercept but it needs to be
able to find a column with all 1's. For example,
Psi=list(formula=~sex+stratum:tostratum,remove.intercept=TRUE) will work but
Psi=list(formula=~-1+sex+stratum:tostratum,remove.intercept=TRUE) will not
work. Also, the -1 notation should be used when there is not an added
factor variable because
\preformatted{Psi=list(formula=~stratum:tostratum,remove.intercept=TRUE)}
will not work because while the stratum:tostratum effectively includes an
intercept it is equivalent to using an identity matrix and is not specified
as treatment contrast with one of the columns as all 1's.
The final argument of the parameter list is contrast which can be used to change
the contrast used for model.matrix. It uses the default if none specified. The form is
shown in ?model.matrix.
The argument simplify determines whether the pims are simplified such that
only indices for unique and fixed real parameters are used. For example,
with an all different PIM structure with CJS with K occasions there are
K*(K-1) real parameters for Phi and p. However, if you use simplify=TRUE
with the default model of Phi(.)p(.), the pims are re-indexed to be 1 for
all the Phi's and 2 for all the p's because there are only 2 unique real
parameters for that model. Using simplify can speed analysis markedly and
probably should always be used. This was left as an argument only to test
that the simplification was working and produced the same likelihood and
real parameter estimates with and without simplification. It only adjust the
rows of the design matrix and not the columns. There are some restrictions
for simplification. Real parameters that are given a fixed value are
maintained in the design matrix although it does simplify amongst the fixed
parameters. For example, if there are 50 real parameters all fixed to a
value of 1 and 30 all fixed to a value of 0, they are reduced to 2 real
parameters fixed to 1 and 0. Also, real parameters such as Psi in
Multistrata and pent in POPAN that use multinomial logits are not simplified
because they must maintain the structure created by the multinomial logit
link. All other parameters in those models are simplified. The only
downside of simplification is that the labels for real parameters in the
MARK output are unreliable because there is no longer a single label for the
real parameter because it may represent many different real parameters in
the all-different PIM structure. This is not a problem with the labels in R
because the real parameter estimates are translated back to the
all-different PIM structure with the proper labels.
The argument \code{default.fixed} is related to deletion of design data (see
\code{\link{make.design.data}}). If design data are deleted and
\code{default.fixed=T} the missing real parameters are fixed at a reasonable
default to represent structural "zeros". For example, p is set to 0, Phi is
set to 1, pent is set to 0, etc. For some parameters there are no
reasonable values (e.g., N in POPAN), so not all parameters will have
defaults. If a parameter does not have a default or if
\code{default.fixed=F} then the row in the design matrix for that parameter
is all zeroes and its real value will depend on the link function. For
example, with "logit" link the real parameter value will be 0.5 and for the
log link it will be 1. As long as the inverse link is defined for 0 it will
not matter in those cases in which the real parameter is not used because it
represents data that do not exist. For example, in a "CJS" model if initial
captures (releases) only occur every other occasion, but recaptures
(resightings) occurred every occasion, then every other cohort (row) in the
PIM would have no data. Those rows (cohorts) could be deleted from the
design data and it would not matter if the real parameter was fixed.
However, for the case of a Jolly-Seber type model (eg POPAN or Pradel
models) in which the likelihood includes a probability for the leading
zeroes and first 1 in a capture history (a likelihood component for the
first capture of unmaked animals), and groups represent cohorts that enter
through time, you must fix the real parameters for the unused portion of the
PIM (ie for occasions prior to time of birth for the cohort) such that the
estimated probability of observing the structural 0 is 1. This is easily
done by setting the pent (probability of entry) to 0 or by setting the
probability of capture to 0 or both. In that case if
\code{default.fixed=F}, the probabilities for all those parameters would be
incorrectly set to 0.5 for p and something non-zero but not predetermined
for pent because of the multinomial logit. Now it may be possible that the
model would correctly estimate these as 0 if the real parameters were kept
in the design, but we know what those values are in that case and they need
not be estimated. If it is acceptable to set \code{default.fixed=F}, the
functions such as \code{\link{summary.mark}} recognize the non-estimated
real parameters and they are not shown in the summaries because in essence
they do not exist. If \code{default.fixed=T} the parameters are displayed
with their fixed value and for \code{summary.mark(mymodel,se=TRUE)}, they
are listed as "Fixed".
Missing design data does have implications for specifying formula but only
when interactions are specified. With missing design data various factors
may not be fully crossed. For example, with 2 factors A and B, each with 2
levels, the data are fully crossed if there are data with values A1&B1,
A1&B2, A2&B1 and A2&B2. If data exist for each of the 4 combinations then
you can described the interaction model as ~A*B and it will estimate 4
values. However, if data are missing for one of more of the 4 cells then
the "interaction" formula should be specified as ~-1+A:B or ~-1+B:A or
~-1+A%in%B or ~-1+B%in%A to estimate all of the parameters represented by
the combinations with data. An example of this could be a marking program
with multiple sites which resighted at all occasions but which only marked
at sites on alternating occasions. In that case time is nested within site
and time-site interaction models would be specified as ~-1+time:site.
The argument \code{title} supplies a character string that is used to label
the output. The argument \code{model.name} is a descriptor for the model
used in the analysis. The code constructs a model name from the formula
specified in the call (e.g., \code{Phi(~1)p(~time)}) but on occasion the
name may be too long or verbose, so it can be over-ridden with the
\code{model.name} argument.
The final argument \code{initial} can be used to provide initial estimates
for the beta parameters. It is either 1) a single starting value for each
parameter, 2) an unnamed vector of values (one for each parameter), 3) named
vector of values, or 4) the name of \code{mark} object that has already been
run. For cases 3 and 4, the code only uses appropriate initial beta
estimates in which the column names of the design matrix (for model) or
vector names match the column names in the design matrix of the model to be
run. Any remaining beta parameters without an initial value specified are
assigned a 0 initial value. If case 4 is used the models must have the same
number of rows in the design matrix and thus presumably the same structure.
As long as the vector elements are named (#3), the length of the
\code{initial} vector no longer needs to match the number of parameters in
the new model as long as the elements are named. The names can be retrieved
either from the column names of the design matrix or from
\code{rownames(x$results$beta)} where \code{x} is the name of the
\code{mark} object.
\code{options} is a character string that is tacked onto the \code{Proc
Estimate} statement for the MARK .inp file. It can be used to request
options such as NoStandDM (to not standardize the design matrix) or
SIMANNEAL (to request use of the simulated annealing optimization method) or
any existing or new options that can be set on the estimate proc.
}
\examples{
\donttest{
# This example is excluded from testing to reduce package check time
data(dipper)
#
# Process data
#
dipper.processed=process.data(dipper,groups=("sex"))
#
# Create default design data
#
dipper.ddl=make.design.data(dipper.processed)
#
# Add Flood covariates for Phi and p that have different values
#
dipper.ddl$Phi$Flood=0
dipper.ddl$Phi$Flood[dipper.ddl$Phi$time==2 | dipper.ddl$Phi$time==3]=1
dipper.ddl$p$Flood=0
dipper.ddl$p$Flood[dipper.ddl$p$time==3]=1
#
# Define range of models for Phi
#
Phidot=list(formula=~1)
Phitime=list(formula=~time)
PhiFlood=list(formula=~Flood)
#
# Define range of models for p
#
pdot=list(formula=~1)
ptime=list(formula=~time)
#
# Make assortment of models
#
dipper.phidot.pdot=make.mark.model(dipper.processed,dipper.ddl,
parameters=list(Phi=Phidot,p=pdot))
dipper.phidot.ptime=make.mark.model(dipper.processed,dipper.ddl,
parameters=list(Phi=Phidot,p=ptime))
dipper.phiFlood.pdot=make.mark.model(dipper.processed,dipper.ddl,
parameters=list(Phi=PhiFlood, p=pdot))
}
}
\seealso{
\code{\link{process.data}},\code{\link{make.design.data}},
\code{\link{run.mark.model}} \code{\link{mark}}
}
\author{
Jeff Laake
}
\keyword{model}
|
d3a4a8e75c4b6e8eda39c5d19bcea47c045fe6da
|
d1c5e83fc6ac0895f944d9abdda05214fa8b6dbc
|
/MRSP Master.R
|
06b4f930ae7ace0933da9e58034b5a44cd29aca8
|
[] |
no_license
|
EpidemiologyDVM/MRSP-Rnets
|
fd0a354cef389097297858ee8d0fe93492f70fe8
|
f485cef4c553d2ca3a5f2c6bc504f40f29024d9d
|
refs/heads/master
| 2021-08-08T07:08:43.732504
| 2017-11-09T21:16:53
| 2017-11-09T21:16:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 453
|
r
|
MRSP Master.R
|
#LOAD PACKAGES
library(RMySQL)
library(rrcov)
#CONNECT TO STAPH DATABASE
STAPH_DB_CXN <- dbConnect(RMySQL::MySQL(),
host = 'lanzas-lab.cmaytakjco4n.us-east-2.rds.amazonaws.com',
user = "LanzasNCSU", pass = "Lan2as_Lab",
dbname = "copos_staph",
port = 3306
)
source('IMPORT MRSP Data.R')
source('ANALYSIS MRSP MANOVA.R')
|
1789c011a43336889e209076bd4a07277079e23a
|
b022e68f0139455784d95133deb4cf8f487142ce
|
/demo/Silverman.R
|
f01e120d113c29d535b3ccfe7071b1dec2c47991
|
[] |
no_license
|
cran/REBayes
|
1e311610a28f37509d2da28f81385b17e84b4bbf
|
e26237baf78f2dc4bb776ae29a2ddfce68963435
|
refs/heads/master
| 2022-05-13T04:13:53.603760
| 2022-03-22T17:20:02
| 2022-03-22T17:20:02
| 17,681,954
| 4
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 512
|
r
|
Silverman.R
|
# Test of Silverman log-spline estimator in medde
# Silverman BW (1982). On the Estimation of a Probability Density Function
# by the Maximum Penalized Likelihood Method. Annals of Statistics, 10, 795-810
require(REBayes)
n <- 500
x <- rnorm(n)
main = "Histogram and two Silverman Estimates"
hist(x, 70, freq = FALSE, main = main, col = grey(.9))
f <- medde(x, Dorder = 2, lambda = 1, verb = 5, mass = 1)
lines(f, col = "red")
f <- medde(x, Dorder = 2, lambda = 0.05, verb = 5, mass = 1)
lines(f, col = "blue")
|
7a1a70c05fc76f43f059f9cdb9e2584f92774736
|
8bf5221174e52cbd489f81e75759014f22b7657c
|
/R/303_calculo_liquidaciones_HO.R
|
059d696c7f105ba733580fa7f21ba522aeb3fba5
|
[] |
no_license
|
pabloalban/IESS_C.D.625_Art25
|
bf2bcf425b82997313e9d8b12e69c97cab2a0e72
|
8f2d1edb0686c042b9587500b444aefedc059fca
|
refs/heads/main
| 2023-05-24T09:15:27.170559
| 2021-06-07T16:17:28
| 2021-06-07T16:17:28
| 368,037,771
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,679
|
r
|
303_calculo_liquidaciones_HO.R
|
message(paste(rep("-", 100), collapse = ""))
message("\tCargando base de beneficiarios del Caso HOLCIM")
load(paste0(parametros$RData, "IESS_listado_beneficiarios.Rdata"))
load(paste0( parametros$RData, 'IESS_canasta_basica_familiar.RData'))
load(paste0( parametros$RData, 'IESS_ultimo_sueldo.RData'))
message("\tCalculando reliquidaciones de jubilados de los cementeros")
#Límite superior pago-------------------------------------------------------------------------------
beneficios <- beneficiarios %>%
mutate(fecha_fin = as.character(fecha_fallecimiento) ) %>%
mutate(fecha_fin = ifelse(is.na(fecha_fallecimiento), ("2010-09-30"),fecha_fin )) %>%
mutate(fecha_fin = as.Date(fecha_fin,"%Y-%m-%d")) %>%
mutate(fecha_fin = if_else(fecha_fin > as.Date("2010-09-30","%Y-%m-%d"),as.Date("2010-09-30","%Y-%m-%d"), fecha_fin )) %>%
mutate(fecha_inicio = as.Date("2000-03-01","%Y-%m-%d") ) %>%
mutate(fecha_inicio = if_else(ultima_planilla > as.Date("2000-03-01","%Y-%m-%d"),ultima_planilla, fecha_inicio )) %>%
mutate(dif_meses = interval(fecha_inicio,fecha_fin) %/% months(1)) %>%
mutate (id_ben = 1:n())
#Adjunto las Pensiones-------------------------------------------------------------------------------
beneficios <- beneficios %>%
left_join(., pension, by='cedula')
#Adjunto intereses-------------------------------------------------------------------------------
beneficios <- beneficios %>%
mutate( i_p = 5.31/100 )
#Generación de la malla anual------------------------------------------------------------------------
beneficios_anual <- beneficios %>%
mutate( anio_ini = year(fecha_inicio), anio_fin = year(fecha_fin)) %>%
mutate( dif_anios = anio_fin - anio_ini + 1 ) %>%
slice(rep(1:n(),dif_anios)) %>%
group_by(cedula) %>%
mutate(contador = 1:n()) %>%
mutate(anio_cal = contador + anio_ini - 1) %>%
ungroup() %>%
mutate( i = 2010 - anio_cal) %>%
arrange(id_ben, anio_cal ) %>%
# mutate( meses_cal = ifelse( anio_cal == '2000', 12 - month(fecha_inicio) + 1, 12 ) ) %>%
# mutate( meses_cal = ifelse( anio_cal == '2010', month(fecha_fin), 12 ) ) %>%
mutate( meses_cal = if_else( anio_cal == anio_ini, 12 - month(fecha_inicio) + 1, 12 ) ) %>%
mutate( meses_cal = if_else( anio_cal == anio_ini & anio_ini == year(fecha_fallecimiento) , month(fecha_fallecimiento) - month(fecha_inicio) + 1, meses_cal ) ) %>%
mutate( meses_cal = if_else( anio_cal == anio_fin & anio_fin >= 2001, month(fecha_fin), meses_cal ) ) %>%
mutate( meses_cal = if_else( anio_cal == anio_fin & anio_fin == year(fecha_fallecimiento) & anio_fin >= 2001, month(fecha_fallecimiento), meses_cal ) ) %>%
mutate( meses_cal = ifelse( anio_cal == '2000' & is.na(meses_cal), 12 - month(fecha_inicio) + 1, meses_cal ) ) %>%
mutate( meses_cal = ifelse( anio_cal == '2010' & is.na(meses_cal), month(fecha_fin), meses_cal ) ) %>%
mutate( meses_cal = ifelse( anio_cal == '2007' & is.na(meses_cal), month(fecha_fin), meses_cal ) ) %>%
mutate( meses_cal = ifelse( anio_cal == '2006' & is.na(meses_cal), month(fecha_fin), meses_cal ) ) %>%
mutate( meses_cal = if_else( anio_cal == anio_ini & anio_cal > 2000, 12 - month(fecha_inicio) + 1, meses_cal ) ) %>%
mutate( pension_anual = meses_cal * pension ) %>%
mutate( interes = pension_anual * ( i_p * i ) ) %>%
mutate( liquidacion = pension_anual + interes) %>%
select( id_ben, cedula, apellidos_y_nombres, fecha_fallecimiento , fecha_inicio, fecha_fin, ultimo_sueldo_nominal,
anio_cal, meses_cal, i, pension, pension_anual,
interes, liquidacion)
# sum(beneficios_anual$pension_anual)
# sum(beneficios_anual$interes)
# sum(beneficios_anual$liquidacion)
#Lista de liquidaciones individual------------------------------------------------------------------
lista_ben <- beneficios_anual %>%
group_by(cedula) %>%
mutate( pension_anual = sum(pension_anual, na.rm = TRUE),
interes = sum(interes, na.rm = TRUE ),
liquidacion = sum( liquidacion, na.rm = TRUE ) ) %>%
ungroup() %>%
distinct(cedula, .keep_all = TRUE) %>%
select( id_ben, cedula, apellidos_y_nombres, fecha_inicio, fecha_fin, fecha_fallecimiento,
ultimo_sueldo_nominal, pension, pension_anual, interes, liquidacion)
#Guardar en un RData--------------------------------------------------------------------------------
message( '\tGuardando liquidaciones a jubilados' )
save( beneficios_anual,
lista_ben,
file = paste0( parametros$RData, 'IESS_liquidaciones.RData' ) )
# Borrar elementos restantes -----------------------------------------------------------------------
message(paste(rep("-", 100), collapse = ""))
rm(list = ls()[!(ls() %in% c("parametros"))])
gc()
|
78921e2bc7792a390fe58d17c05a4a4a37f34125
|
8198315b087ea397d1ff65e2f0be81761fb99fdf
|
/lab08/lab8_rozwiazania.R
|
9008fb13ed3568d5bc7f817696d34162387c4a4a
|
[] |
no_license
|
mmazepa/InteligencjaObliczeniowa
|
a7fa9a2d5a9ef375a8b5fef777d39f2d88c6d5df
|
67f8bd057920189d8e37931bf31f17ddc82982ea
|
refs/heads/master
| 2020-03-31T10:47:06.723886
| 2019-01-22T13:32:43
| 2019-01-22T13:32:43
| 152,149,557
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,350
|
r
|
lab8_rozwiazania.R
|
# LABORATORIUM 8 [20.11.2018]
# ------------------------------------------------------------------------
# ___ ZADANIE 1 __________________________________________________________
a2.support <- 4/10
a2.confidence <- a2.support / (9/10)
a3.support <- 5/10
a3.confidence <- a3.support / (7/10)
a4.support <- 5/10
a4.confidence <- a4.support / (5/10)
a5.support <- 2/10
a5.confidence <- a5.support / (2/10)
# ___ ZADANIE 2 __________________________________________________________
str(titanic.raw)
#install.packages("arules")
#library(arules)
rules <- apriori(titanic.raw)
inspect(rules)
rules <- apriori(titanic.raw,
parameter = list(minlen=2, supp=0.005, conf=0.8),
appearance = list(rhs=c("Survived=No", "Survived=Yes"), default="lhs"),
control = list(verbose=F))
rules.sorted <- sort(rules, by="lift")
inspect(rules.sorted)
subset.matrix <- is.subset(rules.sorted, rules.sorted)
subset.matrix[lower.tri(subset.matrix, diag=T)] <- FALSE
redundant <- colSums(subset.matrix, na.rm=T) >= 1
which(redundant)
rules.pruned <- rules.sorted[!redundant]
inspect(rules.pruned)
#install.packages("arulesViz")
#library(arulesViz)
plot(rules)
plot(rules, method="graph", control=list(type="items"))
plot(rules, method="paracoord", control=list(reorder=TRUE))
# ------------------------------------------------------------------------
|
189f6cc54149bc6d1850e20705ad4950557cca51
|
b696679287e97a6622cc5a257eab12ad15cb5a0a
|
/cachematrix.R
|
92877eb9c0e21a198572acdd0cef2ca9612ad297
|
[] |
no_license
|
ccaudillo/ProgrammingAssignment2
|
caaf140e811bde4cb6c3a0fe875f9d12bd10335d
|
4d24d678614e97113a914ef734e50d68f3699b56
|
refs/heads/master
| 2021-01-17T07:55:22.901377
| 2015-01-25T23:20:42
| 2015-01-25T23:20:42
| 29,512,990
| 0
| 0
| null | 2015-01-20T05:11:09
| 2015-01-20T05:11:09
| null |
UTF-8
|
R
| false
| false
| 1,522
|
r
|
cachematrix.R
|
## Put comments here that give an overall description of what your
## functions do
# The functions will calculate the inverse of a matrix, If the matrix
# has been cached before, the inverse of a matrix will be returned
# instead than compute it again.
## Write a short comment describing this function
# This function 'makeCacheMatrix' creates a special matrix, which is
# a list containing a function to do several things
# * Sets the value of the matrix
# * Gets the value of the matrix
# * Sets the inverse of the matrix
# * Gets the inverse of the matrix
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y)
{
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(inverse) m <<- inverse
getinverse <- function() m
list(set = set, get = get, setinverse = setinverse, getinverse = getinverse)
}
## Write a short comment describing this function
# This function 'cacheSolve' computes the inverse of the special matrix
# created with the function above. First it checks in the inverse of the
# matrix has been already calculated. If this is the case, it returns the
# the inverse matrix from the cache and skips the computation. Otherwise,
# it calculates the inverse of the matrix and sets the inverse matrix in
# the cache by means of the 'setinverse' funcion.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinverse(m)
m
}
|
c4f0e910b2f82620c0a7d959779aa26bd7648bd7
|
1a372979ef9fd1e1c0f238b3d7e562985d4144a1
|
/bee_analysis_site_chem.R
|
04318b8ced046c8f4e6b7cd31a4eb692d71cf69b
|
[] |
no_license
|
YDAckerman/EPA-Bee
|
9d6b4981c61b5be1ab59a40f1aabda2a6b6ed41d
|
7e3309e51064d22e0b28c938e4141ac2dab95b2a
|
refs/heads/master
| 2021-05-04T10:45:16.429799
| 2018-01-19T17:39:07
| 2018-01-19T17:39:07
| 43,094,241
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,875
|
r
|
bee_analysis_site_chem.R
|
## site - chemical analysis/visualization
library(dplyr)
library(ggplot2)
library(plyr)
library(reshape)
library(wesanderson)
library(stringdist)
## set working directory
setwd("~/Documents/ZhangLab/")
## load up site_chem
load("BeePUR/YA_bee_analysis.rda")
## get some helper functions (env hf) just in case
source("R/helper_functions.R") ## local machine
## load LD50 for pesticides
tox <- read.csv("BeePUR/AI-BeeToxicity.csv") ## local machine
tox$chem_code <- as.character(tox$chem_code)
## load crop attractiveness to bees:
load("BeePUR/cropBeeAttr.rda")
### visualize site - chemical relationship FOR 2013 ###
## now all site_chem joining is being done on abound. All
## that's needed to be done here is filter by year and
## rename/clean some variables.
## add "total LD"
site_chem <- site_chem %>%
mutate(total_LD = 453.592 * total_lbs / (LD50..ug.bee.1. / (10^6)))
## site_chem really needs to be looked at one year
## and one buffer at a time, that way we can get some
## orders on site_code and chem_code:
site_chem_2013 <- site_chem %>%
filter(year == 2013 & buff_size == 1) %>%
mutate(
chemname_w_LD50 = paste(chemname, LD50..ug.bee.1., sep = " - ")
) %>% filter(
HB.Pol %in% c("++", "+") | HB.Nec %in% c("++", "+")
)
## order site_name by attractiveness to bees and by frequency of
## the site (see site_chem contruction above,
## in particular the construction for r [rank])
## todo: make this work without killing off things
r <- site_freq %>%
filter(year == 2013 & buff_size == 5) %>%
select(site_name, perc_freq) %>%
distinct(site_name, perc_freq)
site_chem_2013 <- left_join(site_chem_2013, r, by = "site_name")
order1 <- site_chem_2013 %>%
arrange(desc(perc_freq)) %>%
select(site_name) %>%
distinct(site_name)
site_chem_2013$site_name <- factor(site_chem_2013$site_name,
levels = order1$site_name)
## ordered chemname by LD50
order2 <- site_chem_2013 %>%
arrange(LD50..ug.bee.1.) %>%
distinct(chemname, chemname_w_LD50)
site_chem_2013$chemname_w_LD50 <- factor(site_chem_2013$chemname_w_LD50,
levels = order2$chemname_w_LD50)
site_chem_2013$chemname <- factor(site_chem_2013$chemname,
levels = order2$chemname)
### visualize the relationship between site and crop
### by number of total lbs applied
## this can either be visualized in num
## bin the results:
bins <- c(list(c(0.0001, 100)),list(c(101,200)), list(c(201,2000)), list(c(2001,20000)),
list(c(20001,60500)))
site_chem_2013$binned_applications <-
unlist(llply(site_chem_2013$total_lbs,
function(x){
if(x > 1){ x <- floor(x) }
i <- unlist(llply(bins, function(bin){
x <= max(bin) & x >= min(bin)
}))
if(length(which(i)) == 0){print(x)}
which(i)
}, .inform = TRUE))
labels <- unlist(llply(bins, function(bin){
paste0(min(bin), " - ",max(bin))
}))
pal <- wes_palette("Zissou", length(labels), type = "continuous")
ggplot(site_chem_2013, aes(x = site_name,
y = chemname_w_LD50,
fill = as.factor(binned_applications))) +
geom_tile() +
theme(axis.text.x = element_text(angle = 90, hjust = 0, size = 9)) +
theme(axis.text.y = element_text(angle = 0, hjust = 0, size = 8)) +
scale_fill_manual(name = "Total Lbs Applied",
values = pal,
breaks = seq_along(labels),
labels = labels) +
xlab(label = "Crop \n (ordered by decresing frequency of co-occurence with almonds)") +
ylab(label = "Active Ingredient & LD50 \n (ordered by increasing LD50)") +
ggtitle("Total Lbs Active Ingredient Use \n Bee-Attractive Crops 2013")
### Do the same but with lethal doses (!?)
bins <- c(list(c(400, 4000000)), list(c(4000001, 30000000)),
list(c(30000001, 100000000)),
list(c(100000001, 500000000)),
list(c(500000001, 1000000000)),
list(c(1000000001, 10000000000)),
list(c(10000000001, 300000000000)))
site_chem_2013$binned_applications <-
unlist(llply(site_chem_2013$med_LD_acre_planted,
function(x){
is.na(x) && return(NA)
x <- floor(x)
i <- unlist(llply(bins, function(bin){
x <= max(bin) & x >= min(bin, na.rm = TRUE)
}))
if(length(which(i)) == 0){print(x)}
which(i)
}, .inform = TRUE))
labels <- unlist(llply(bins, function(bin){
paste0(signif(min(bin),2), " - ",signif(max(bin),2))
}))
pal <- wes_palette("Rushmore", length(bins), type = "continuous")
ggplot(site_chem_2013, aes(x = site_name,
y = chemname_w_LD50,
fill = as.factor(binned_applications))) +
geom_tile() +
theme(axis.text.x = element_text(angle = 90, hjust = 0, size = 9)) +
theme(axis.text.y = element_text(angle = 0, hjust = 0, size = 8)) +
scale_fill_manual(name = "Median lethal doses per Acre Planted",
values = pal,
breaks = seq_along(labels),
labels = labels) +
xlab(label = "Crop \n (ordered by decresing frequency of co-occurence with almonds)") +
ylab(label = "Active Ingredient & LD50 \n (ordered by increasing LD50)") +
ggtitle("Average Lethal Doses per Acre Planted Active Ingredient Use \n Bee-Attractive Crops 2013")
### The same, but with products
site_prod_2013 <- site_prod %>%
filter(year == 2013)
## order site_name by attractiveness to bees and by frequency of
## the site (see site_chem contruction above,
## in particular the construction for r [rank])
r_prod <- site_freq %>%
filter(year == 2013 & buff_size == 5) %>%
dplyr::select(site_name, site_code, perc_freq) %>%
distinct(site_name, site_code, perc_freq) %>%
dplyr::mutate(site_code = as.numeric(site_code))
site_prod_2013 <- left_join(site_prod_2013, r_prod, by = "site_code")
order1 <- site_prod_2013 %>%
arrange(desc(perc_freq)) %>%
select(site_name) %>%
distinct(site_name)
site_prod_2013$site_name <- factor(site_prod_2013$site_name,
levels = order1$site_name)
## ordered chemname by LD50
order2 <- site_prod_2013 %>%
arrange(total_lbs) %>%
distinct(product_name)
site_prod_2013$product_name <- factor(site_prod_2013$product_name,
levels = order2$product_name)
bins <- c(list(c(0.0001,100)), list(c(100.1, 5000)), list(c(5001, 17000)),
list(c(17001, 80000)), list(c(80001, 100000)), list(c(100001, 170000)))
site_prod_2013$binned_applications <-
unlist(llply(site_prod_2013$total_lbs,
function(x){
is.na(x) && return(NA)
i <- unlist(llply(bins, function(bin){
x <= max(bin) & x >= min(bin, na.rm = TRUE)
}))
if(length(which(i)) == 0){print(x)}
which(i)
}, .inform = TRUE))
labels <- unlist(llply(bins, function(bin){
paste0(signif(min(bin),2), " - ",signif(max(bin),2))
}))
pal <- wes_palette("Rushmore", length(bins), type = "continuous")
ggplot(site_prod_2013 %>% filter(total_lbs >= 5000), aes(x = site_name,
y = product_name,
fill = as.factor(binned_applications))) +
geom_tile() +
theme(axis.text.x = element_text(angle = 90, hjust = 0, size = 9)) +
theme(axis.text.y = element_text(angle = 0, hjust = 0, size = 8)) +
scale_fill_manual(name = "Total Lbs Applied",
values = pal,
breaks = seq_along(labels),
labels = labels) +
xlab(label = "Crop \n (ordered by decresing frequency of co-occurence with almonds)") +
ylab(label = "Product Name") +
ggtitle("Product Use by Site 2013")
prod_summary <- site_prod %>%
group_by(year, prodno, product_name) %>%
dplyr::summarise(total_use = sum(total_lbs, na.rm = TRUE),
median_use_per_crop = median(total_lbs, na.rm = TRUE),
average_use_per_crop = mean(total_lbs, na.rm = TRUE))
ggplot(prod_summary, aes(product_name, y = total_use, group = year, color = year)) +
geom_point() +
theme(axis.text.x = element_text(angle = 90, hjust = 0, size = 5))
### rank bee crops by usage:
site_summ_chem <- site_chem %>%
filter(year == 2013 &
(HB.Nec %in% c("++","+") |
HB.Pol %in% c("++","+")) &
buff_size == 5 &
aer_gnd_ind == "tot") %>%
dplyr::mutate(total_LD = total_lbs * 453.592 / (LD50..ug.bee.1. / (10^6))) %>%
group_by(year, site_code, site_name) %>%
dplyr::summarise(total_use_chem = sum(total_lbs, na.rm = TRUE),
total_LD_chem= sum(total_LD, na.rm = TRUE),
num_applications_chem = sum(num_applications, na.rm = TRUE)
)
site_summ_tot <- site_chem %>%
filter(year == 2013 &
(HB.Nec %in% c("++","+") |
HB.Pol %in% c("++","+")) &
buff_size == 5 &
aer_gnd_ind == "tot") %>%
dplyr::mutate(total_LD = total_lbs * 453.592 / (LD50..ug.bee.1. / (10^6))) %>%
group_by(year) %>%
dplyr::summarise(total_use = sum(total_lbs, na.rm = TRUE),
total_LD = sum(total_LD, na.rm = TRUE),
num_applications = sum(num_applications, na.rm = TRUE)
)
site_summ <- left_join(site_summ_chem, site_summ_tot, by = "year")
site_summ <- site_summ %>%
ungroup() %>%
dplyr::mutate(perc_use = round(total_use_chem / total_use, 3),
perc_LD = round(total_LD_chem / total_LD, 3),
perc_applications = round(num_applications_chem / num_applications, 3))
### quantify ranks of usage within bee crops:
chem_summ_chem <- site_chem %>%
filter(year == 2013 &
(HB.Nec %in% c("++","+") |
HB.Pol %in% c("++","+")) &
buff_size == 5 &
aer_gnd_ind == "tot") %>%
dplyr::mutate(total_LD = total_lbs * 453.592 / (LD50..ug.bee.1. / (10^6))) %>%
group_by(year, chem_code, chemname) %>%
dplyr::summarise(total_use_chem = sum(total_lbs, na.rm = TRUE),
total_LD_chem= sum(total_LD, na.rm = TRUE),
num_applications_chem = sum(num_applications, na.rm = TRUE)
)
chem_summ_tot <- site_chem %>%
filter(year == 2013 &
(HB.Nec %in% c("++","+") |
HB.Pol %in% c("++","+")) &
buff_size == 5 &
aer_gnd_ind == "tot") %>%
dplyr::mutate(total_LD = total_lbs * 453.592 / (LD50..ug.bee.1. / (10^6))) %>%
group_by(year) %>%
dplyr::summarise(total_use = sum(total_lbs, na.rm = TRUE),
total_LD = sum(total_LD, na.rm = TRUE),
num_applications = sum(num_applications, na.rm = TRUE)
)
chem_summ <- left_join(chem_summ_chem, chem_summ_tot, by = "year")
chem_summ <- chem_summ %>%
ungroup() %>%
dplyr::mutate(perc_use = round(total_use_chem / total_use, 3),
perc_LD = round(total_LD_chem / total_LD, 3),
perc_applications = round(num_applications_chem / num_applications, 3))
chem_summ <- as.data.frame(chem_summ)
## chem_summ tables
chem_summ %>%
arrange(desc(perc_use)) %>%
select(chemname, fraction_total_use = perc_use,
fraction_total_LD = perc_LD,
fraction_all_applications = perc_applications) %>%
head(10)
chem_summ %>%
arrange(desc(perc_LD)) %>%
select(chemname, fraction_total_LD = perc_LD) %>%
head(10)
chem_summ %>%
arrange(desc(perc_applications)) %>%
select(chemname, fraction_all_applications = perc_applications) %>%
head(10)
tmp <- melt(chem_summ, id = "chemname")
ggplot(tmp, aes(x = chemname, y = value, color = variable)) +
geom_point() +
theme(axis.text.x = element_text(angle = 90, hjust = 0, size = 9))
rank_by_use <- site_chem %>%
filter( FALSE | HB.Nec %in% c("++", "+") |
HB.Pol %in% c("++", "+")) %>%
filter( aer_gnd_ind == "tot" ) %>%
group_by(year, buff_size, site_code, site_name) %>%
dplyr::mutate(total_lds = 453.592 * total_lbs /
(LD50..ug.bee.1. / (10^6))) %>%
dplyr::summarise(total_lbs = sum(total_lbs),
total_lds = sum(total_lds)) %>%
ungroup() %>%
filter(year == 2013 & buff_size == 5) %>%
select(site_name, total_lds) %>%
top_n(10)
|
d1fd24ed3d50899ae0da25cd54b06caa2715ed21
|
776b2f5c961e243b53f209591e36428e6019289e
|
/man/buildGLMMF.Rd
|
3ff46e7be65cd6b814f474f462681445fb3d5111
|
[] |
no_license
|
lnsongxf/GLMMF
|
587e60fd398651e9729709b319da6a58bc438d41
|
3f5e6ac53cf65eeabfbbfa073e9620f615d88ce6
|
refs/heads/master
| 2020-12-01T12:05:24.190440
| 2015-03-19T17:59:43
| 2015-03-19T17:59:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,596
|
rd
|
buildGLMMF.Rd
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\name{buildGLMMF}
\alias{buildGLMMF}
\title{Build Generalized Linear Mixed-Effects Model with Latent Factors}
\usage{
buildGLMMF(group, response, common.fixed, distinct.fixed, random,
nfactors = 0, data, distribution = c("gaussian", "poisson", "binomial",
"gamma", "negative binomial"), u, random.cov, factors,
tol = .Machine$double.eps^0.5)
}
\arguments{
\item{group}{Name of the grouping variable in data.
Only one grouping variable is allowed and the group sizes must be equal. In case of unequal group sizes,
patch missing rows with NA's.}
\item{response}{Name of the response variable in data.}
\item{common.fixed}{formula for common fixed effects. LHS of the formula is ignored if present.}
\item{distinct.fixed}{Formula for distinct fixed effects i.e. each group has separate regression
coefficient. This formula cannot contain variables which are already present in
\code{common.fixed} or \code{random} formulas, as in that case the model would not be identifiable.
LHS of the formula is ignored if present.}
\item{random}{Formula for random effects. LHS of the formula is ignored if present.}
\item{data}{Data frame containing the variables in the model. Must contain all variables used formulas and
variables defined in \code{group} and \code{response}}
\item{distribution}{Character. Possible values are "gaussian", "poisson",
"binomial", "negative binomial" and "gamma". Default is "gaussian".}
}
\description{
Function \code{buildGLMMF} builds GLMM with latent factors using methods based on state space modelling.
}
|
0cbff1c9c3e89b332098c28331bff9ef278ddd37
|
990b0c449377c7afa108b5d182df4dc306be19ce
|
/Rlib/man/db_info.Rd
|
0658a6bf8b145c42b55204928f33a1b7cf13b965
|
[] |
no_license
|
oliver4444/DublinRTPI
|
292b75399a6aa05a68d3f9e15fbf23282fe538e7
|
ef089791a41346eeb8674ec34b6a95fd17cedd96
|
refs/heads/master
| 2022-04-09T06:19:55.862443
| 2020-03-29T15:37:10
| 2020-03-29T15:37:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 717
|
rd
|
db_info.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/db_info.R
\name{db_info}
\alias{db_info}
\title{Real time info for multiple bus stops (API)}
\usage{
db_info(
stop_numbers,
base_url = "https://data.smartdublin.ie",
api_path = "/cgi-bin/rtpi/realtimebusinformation"
)
}
\arguments{
\item{stop_numbers}{A vector of bus stop numbers.}
\item{base_url}{Base URL for dublin bus API}
\item{api_path}{path for API endpoint}
}
\value{
A data frame containing the times until the next buses at the selected stops.
}
\description{
This function uses the Irish transport API to retrieve the rela time information about bus stops.
}
\examples{
\dontrun{db_get_multi_stop_info(c(334, 336))}
}
|
84720ee1bfccc19f98c5618abd6b08892bd782c4
|
5ed44176b4e3716a44565d118283223c07b791a3
|
/man/TSP-package.Rd
|
0ad0a873a4570fd846979cbbda8eefdb35fd3f73
|
[] |
no_license
|
mhahsler/TSP
|
d94ead22a9d3e3b44829477ff474ce458b857623
|
f274bf7098570943674f0e7ef3a281cda78a040e
|
refs/heads/master
| 2023-08-09T03:27:48.714780
| 2023-07-21T14:32:25
| 2023-07-21T14:32:25
| 43,990,993
| 66
| 17
| null | 2020-01-23T18:53:26
| 2015-10-10T02:54:22
|
R
|
UTF-8
|
R
| false
| true
| 945
|
rd
|
TSP-package.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AAA_TSP-package.R
\docType{package}
\name{TSP-package}
\alias{TSP-package}
\title{TSP: Traveling Salesperson Problem (TSP)}
\description{
Basic infrastructure and some algorithms for the traveling salesperson problem (also traveling salesman problem; TSP). The package provides some simple algorithms and an interface to the Concorde TSP solver and its implementation of the Chained-Lin-Kernighan heuristic. The code for \href{https://www.math.uwaterloo.ca/tsp/concorde/}{Concorde} itself is not included in the package and has to be obtained separately.
}
\section{Key functions}{
\itemize{
\item \code{\link[=solve_TSP]{solve_TSP()}}
}
}
\references{
Michael Hahsler and Kurt Hornik. TSP -- Infrastructure for the traveling salesperson problem. Journal of Statistical Software, 23(2):1--21, December 2007. \doi{10.18637/jss.v023.i02}
}
\author{
Michael Hahsler
}
|
ed381636a881f64b1fcd4cd5cabc801f5da97561
|
19fc514eed402cb1391dc5f2a1953136183ca5d2
|
/4_summary/src/summarise_data.R
|
9fed71ffa14f84b22a5183cb978b5e55b68888b1
|
[] |
no_license
|
cstillwellusgs/ds-pipelines-targets-1
|
1c86bc3b9b820df939ec101ca56a8babd16a42ee
|
619fe765ec0c221278706f11d85df2be75d9bdb4
|
refs/heads/main
| 2023-08-25T05:46:57.171091
| 2021-10-28T18:56:50
| 2021-10-28T18:56:50
| 412,181,962
| 0
| 0
| null | 2021-09-30T19:43:06
| 2021-09-30T18:37:28
|
R
|
UTF-8
|
R
| false
| false
| 1,301
|
r
|
summarise_data.R
|
# Function to summarize findings -----------------------------------------------
summarise_data <- function(summary_data, summary_out_dir, summary_out_file) {
dest <- file.path(summary_out_dir, summary_out_file)
data <- read_csv(summary_data, col_types = "icddd")
summary <- paste0("resulted in mean RMSEs (means calculated as average of RMSEs from the five dataset iterations) of ",
round(data[[17,3]], 3), ", ", round(data[[15,3]], 3), ", and ", round(data[[16,3]], 3),
"°C for the PGDL, DL, and PB models, respectively. The relative performance of DL vs PB depended on the amount of training data. The accuracy of Lake Mendota temperature predictions from the DL was better than PB when trained on 500 profiles (",
round(data[[12,3]], 3), " and ", round(data[[13,3]], 3), "°C, respectively) or more, but worse than PB when training was reduced to 100 profiles (",
round(data[[9,3]], 3), " and ", round(data[[10,3]], 3), "°C respectively) or fewer. The PGDL prediction accuracy was more robust compared to PB when only two profiles were provided for training (",
round(data[[2,3]], 3), " and ", round(data[[1,3]], 3), "°C, respectively).")
cat(summary, file = dest)
return(dest)
}
|
0d54709c660226768f692fb409abd27ed5c56850
|
e2cf95573135143abda85710adc33336bf41adef
|
/tests/test_convert_from_rmd_to_rnw.R
|
e91fdd70f61221ab038c5418c48cda170bdb62d6
|
[] |
no_license
|
kwojdalski/rpm2
|
41d3cac7f1ef08585ed5e5a847f86c665accf285
|
a6b0ea44832210b0601deed5691eaac664752d6c
|
refs/heads/master
| 2021-08-15T21:43:13.871194
| 2017-11-18T10:22:15
| 2017-11-18T10:22:15
| 110,359,212
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 428
|
r
|
test_convert_from_rmd_to_rnw.R
|
require(rpm2)
require(assertthat)
require(magrittr)
require(stringr)
require(plyr)
debugonce(rpm2:::.swap)
debugonce(rpm2:::.swapCitations)
debugonce(convertDocs)
file.remove(list.files('./data/Rnw', recursive = T, full.names = T))
convertDocs(type = 'Rmd', path_folder = './data/Rmd2/', new_path_folder = './data/Rnw', recursive = T)
rmarkdown::render('./data/Rmd/masters_thesis.Rmd', intermediates_dir = 'intermediates_dir')
|
8498a9c874d557c1e446843d1c1eb599d998fbb8
|
e72c0d6f1691d326b0b7768baec7beabd93076ef
|
/data/DataPrep/old_stuff.R
|
f017dccb290e200297221aff4aed6f17393a08b8
|
[
"MIT"
] |
permissive
|
wellermatt/iri
|
d613394450e334786ddc7860a899144dc31452b8
|
73ba32142ceb8d52e651d8a8b29b5089cb554fa9
|
refs/heads/master
| 2016-08-07T19:39:53.958351
| 2015-02-02T08:43:38
| 2015-02-02T08:43:38
| 30,034,112
| 1
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,521
|
r
|
old_stuff.R
|
head(dat.item)
head(sp)
f_data.aggregate.save.old = function(dat.item, promo.flags.agg.methods = "wmean")
{
##############################################################################
### PREPARE WEEKLY RAW DATA AND AGGREGATE ALL VARIABLES TO CHAIN/UPC LEVELS
##############################################################################
# initially we need to get a data.table containing every SKU (fc.item at the lowest level) for each week in the set
fi1 = data.table(expand.grid(fc.item = fc.item[lvl==1, fc.item],
WEEK= min(dat.item$WEEK):max(dat.item$WEEK)),key="fc.item,WEEK")
### Firstly aggregate Sales and Price Data (UNITS, DOLLARS, PR, PRICE)
sales.pos = dat.item[,list(fc.item,WEEK,UNITS,DOLLARS,PR)]
#sales.pos$PR = as.integer(sales.pos$PR)-1
sales.pos$PRICE = sales.pos$DOLLARS/sales.pos$UNITS
sales.pos = merge(fi1,
sales.pos,
all.x=TRUE,)
sales.pos = merge(sales.pos,
fc.item[lvl==1,list(fc.item,UPC,chain)],
by="fc.item")
###AGGREGATE THE SALES DATA TO NEXT LEVEL
# note that weighted mean is used on the PR flag here as opposed to mean
DT = sales.pos[, list(UNITS = sum(UNITS,na.rm=TRUE),
DOLLARS = sum(DOLLARS, na.rm = TRUE),
PR = weighted.mean(PR,w=UNITS,na.rm=TRUE) ), ######### weighted mean or mean?
by="UPC,chain,WEEK"]
DT$PRICE = DT$DOLLARS/DT$UNITS
DT$fc.item = with(DT,paste(UPC,chain,sep="/"))
setcolorder(DT, names(DT)[c(length(DT),3:(length(DT)-1),1:2)])
DT$UPC = NULL ; DT$chain = NULL
sales.chain = DT ; DT = NULL
head(sales.chain,100)
## Aggregate sales to the ITEM level
DT = sales.pos[, list(UNITS = sum(UNITS,na.rm=TRUE),
DOLLARS = sum(DOLLARS, na.rm = TRUE),
PR = weighted.mean(PR,w=UNITS,na.rm=TRUE)), ############ weighted mean or mean?
by="UPC,WEEK"]
DT$PRICE = DT$DOLLARS/DT$UNITS
DT$fc.item = as.character(DT$UPC)
setcolorder(DT, names(DT)[c(length(DT),2:(length(DT)-1),1)])
DT$UPC = NULL
sales.upc = DT ; DT = NULL
sales.pos$UPC = NULL ; sales.pos$chain=NULL # clear up the extra columns
sales = rbindlist(list(sales.pos,sales.chain, sales.upc))
keycols = c("fc.item","WEEK")
setkeyv(sales,keycols)
# set UNITS and DOLLARS to NA from zero
sales[UNITS ==0,c("UNITS","DOLLARS"):= NA]
##################### PROMOTIONS FLAG TRANSFORMATION
# FEATURE
promos.pos.feat = data.table(cast(dat.item, fc.item + WEEK ~ F, value="counter", sum))
setnames(promos.pos.feat ,
old=names(promos.pos.feat),
new= c("fc.item","WEEK",paste("FEAT_",names(promos.pos.feat)[3:length(promos.pos.feat)],sep="")))
promos.pos.feat$FEAT_ANY = abs(promos.pos.feat$FEAT_NONE-1)
keycols = c("fc.item","WEEK")
setkeyv(promos.pos.feat,keycols)
promos.pos.feat = merge(fi1, #data.table(expand.grid(fc.item=fc.item[lvl==1,fc.item],WEEK= 1:313),key="fc.item,WEEK")
promos.pos.feat,all.x=TRUE)
#promos.pos.feat = merge(fc.item.weeks.all,promos.pos.feat,all.x=TRUE)
# DISPLAY
promos.pos.disp = data.table(cast(dat.item, fc.item + WEEK ~ D, value = "counter", sum))
setnames(promos.pos.disp ,
old=names(promos.pos.disp),
new= c("fc.item","WEEK",paste("DISP_",names(promos.pos.disp)[3:length(promos.pos.disp)],sep="")))
keycols = c("fc.item","WEEK")
setkeyv(promos.pos.disp,keycols)
promos.pos.disp = merge(fi1,
promos.pos.disp,
all.x=TRUE)
# this is the key table
promos.pos = merge(promos.pos.feat, promos.pos.disp)
###### aggregate chain level promos data
promos.pos.2 = merge(fc.item, promos.pos, by="fc.item")
promos.pos.2 = merge(promos.pos.2, dat.item[,list(fc.item,WEEK,UNITS)], by=c("fc.item","WEEK"),all.x=TRUE)
DT = promos.pos.2
if (promo.flags.agg.methods == "wmean"){
# using weighted mean
DT = DT[, lapply(.SD, weighted.mean, w=UNITS, na.rm=TRUE), by="UPC,chain,WEEK", .SDcols=7:(length(DT))]
} else {
# using mean
DT = DT[, lapply(.SD, mean, na.rm=TRUE), by="UPC,chain,WEEK", .SDcols=7:(length(DT)-1)]
}
DT$fc.item = with(DT,paste(UPC,chain,sep="/"))
setcolorder(DT, names(DT)[c(length(DT),3:(length(DT)-1),1:2)])
DT$UPC = NULL ; DT$chain = NULL ; DT$UNITS = NULL
promos.chain = DT ; DT = NULL
###### aggregate upc level promos data
DT=promos.pos.2
if (promo.flags.agg.methods == "wmean"){
DT = DT[, lapply(.SD, mean, na.rm=TRUE), by="UPC,WEEK", .SDcols=7:(length(promos.pos.2)-1)]
} else {
DT = DT[, lapply(.SD, weighted.mean, w=UNITS, na.rm=TRUE), by="UPC,WEEK", .SDcols=7:(length(promos.pos.2)-1)] }
DT$fc.item = as.character(DT$UPC) #with(DT,paste(UPC,chain,sep="/"))
setcolorder(DT, names(DT)[c(length(DT),2:(length(DT)-1),1)])
DT$UPC = NULL #; DT$UNITS = NULL
promos.upc = DT ; DT = NULL
promos = rbind(promos.pos,promos.chain,promos.upc)
keycols = c("fc.item","WEEK")
setkeyv(promos,keycols)
sales.promos = merge(sales,promos)
getwd()
#sapply(promos.pos,class)
#sapply(promos.upc,class)
#sapply(promos,class)
#sapply(sales,class)
setwd("./iri category subsets/reformatted")
#saveRDS(promos,"beer.top.upc.promos.rds")
#saveRDS(sales,"beer.top.upc.sales.rds")
saveRDS(sales.promos,"beer.top.upc.sales.promos.rds")
#write.csv(sales.promos, file="beer.top.upc.sales.promos2.csv")
sales.promos
}
rm(list=ls())
dt = data.table(grp = sample(letters[1:3],100, replace = TRUE),
v1 = rnorm(100),
v2 = rnorm(100),
v3 = rnorm(100))
sd.cols = c("v1","v2", "v3")
dt.out = dt[, list(sum(v1), lapply(.SD,mean)), by = grp, .SDcols = sd.cols]
dt = data.table(grp = sample(letters[1:3],100, replace = TRUE),
v1 = rnorm(100),
v2 = rnorm(100),
v3 = rnorm(100))
sd.cols = c("v2", "v3")
dt.out = dt[, c(sum(v1), lapply(.SD,mean)), by = grp, .SDcols = sd.cols]
dt.out1 = dt[, sum(v1), by = grp]
dt.out2 = dt[, lapply(.SD,mean), by = grp, .SDcols = sd.cols]
dt.out = merge(dt.out1, dt.out2, by = "grp")
#dupes = DT[duplicated(DT$IRI_KEY)]
#dupes[order(-rank(IRI_KEY)),]
#stores.old = stores
#DT = data.table(read.csv("./iri reference data/stores.csv",header=TRUE,stringsAsFactors=FALSE))
#DT = unique(DT)
#if (sum(duplicated(DT)) > 0) {
#dupes = unique(DT[duplicated(DT$IRI_KEY)])[,IRI_KEY]
#stores = DT[!DT$IRI_KEY %in% dupes]
#} else {
#stores = DT
#}
#droplevels(stores)
## testing performance of cast versus model.matrix
#ptm <- proc.time()
#promos.pos.feat = data.table(cast(dat.item, fc.item + WEEK ~ F, value="counter", sum))
#setnames(promos.pos.feat ,
#old=names(promos.pos.feat),
#new= c("fc.item","WEEK",paste("FEAT_",names(promos.pos.feat)[3:length(promos.pos.feat)],sep="")))
#promos.pos.feat$FEAT_ANY = abs(promos.pos.feat$FEAT_NONE-1)
#keycols = c("fc.item","WEEK")
#setkeyv(promos.pos.feat,keycols)
#proc.time() - ptm
#
#?model.matrix
#library(caret)
#
#
#feat =
#td = dat.item
#ptm <- proc.time()
#promos.pos.feat = data.table(cast(td, fc.item + WEEK ~ F, value="counter", sum))
#proc.time() - ptm
#ptm <- proc.time()
#promos.pos.feat = data.table(cast(td, fc.item + WEEK ~ F, length))
#proc.time() - ptm
#ptm <- proc.time()
#promos.pos.all = data.table(model.matrix(~F+D-1,data = td))
#names(promos.pos.feat)
#proc.time() - ptm
#ptm = proc.time()
#dat.item$D = factor(dat.item$D,labels = c("NONE", "MINOR", "MAJOR"))
#dat.item$F = factor(dat.item$F)
#setnames(dat.item,old = c("D","F"),new = c("DISP_","FEAT_"))
#dummies = dummyVars(UNITS~D+F, data=dat.item)
#dummies = data.table(head(predict(dummies,newdata=dat.item)))
#proc.time()-ptm
#
#?model.matrix
#
|
1d64ea5cd75bac75cc1ec723a512fb17291cc633
|
afd493c50b4565ed483c5fc9095f464f36b78d55
|
/man/pearson.Rd
|
130e0f8fc36c38e751526a07061cbd25e8a0b212
|
[] |
no_license
|
jigten/coorbest
|
54e8035f56884a8d372491d8f7638d084183e3d6
|
05250316e6279696d70cbbe84231acfba60cdfa9
|
refs/heads/master
| 2021-01-07T18:47:20.337588
| 2020-03-18T05:01:28
| 2020-03-18T05:01:28
| 241,783,353
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 273
|
rd
|
pearson.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bstrapcor.R
\name{pearson}
\alias{pearson}
\title{Pearson Correlation Coefficient}
\usage{
pearson(d, i = c(1:n))
}
\description{
Helper function to compute the Pearson correlation coefficient
}
|
f2d99b2254f8c6bb3792d1d2f90fafdadd3018b3
|
9bbf41c9336c219119d2322c9972d53b0aaede33
|
/CCMv2/app.R
|
603bb2e04d723cc605e931de6d6ebda9799be073
|
[] |
no_license
|
princesslililu/CCM_modelling
|
5ebb8d2d65a01e1d5aa91009238247ff75bc4f8f
|
80eb41596dbbe00645d12c9344593440dd826fcf
|
refs/heads/master
| 2023-08-18T11:26:40.862113
| 2021-10-06T20:57:19
| 2021-10-06T20:57:19
| 280,728,442
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,570
|
r
|
app.R
|
#
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
# Define UI for application that draws a histogram
ui <- fluidPage(
# Application title
titlePanel("Parameters"),
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
h4('Set Models Parameter Below'),
img(src='CCM_eqn.png'),
# Set drainage times for each pool
sliderInput("F1", "F1: carbon influx",
min = 1, max = 100, value = 50, step= 1),
sliderInput("F3", "F1: carbon eflux",
min = 1, max = 100, value = 50, step= 1)
),
# meant to show number for CCM simple formula
mainPanel(
tabsetPanel(
tabPanel("Model",
h3('Simple of model of Algal CCM by Sharkey & Berry, 1985'),
img(src='CCM_fig.png', height = 300, width = 350),
h2('Model output'),
span(textOutput("CCM"), style="font-size:30px")
),
tabPanel("Explanation")
)
)
)
)
# Define server logic required to draw a histogram
server <- function(input, output) {
output$CCM <- renderText({
paste(HTML("13-C discrimination:"),
-7.9+27*(input$F3/input$F1)
)
})
}
# Run the application
shinyApp(ui = ui, server = server)
|
78a5576f49ea4d38fae758eaefbada4128fd60bf
|
c7a98fbc1d2acad22e232b4c4f55e02ade39634d
|
/man/fars_summarize_years.Rd
|
bf5e0f8da36b84c9a382f18d29b5b834cd11134f
|
[] |
no_license
|
ynren1020/fars
|
2102eaf4664e3f1ecf4e9f1fdfb8d997958206ff
|
325400d5e4b25a8b1680956b452e481b5e373c24
|
refs/heads/master
| 2022-11-15T08:15:51.511282
| 2020-07-07T17:21:40
| 2020-07-07T17:21:40
| 277,640,735
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 620
|
rd
|
fars_summarize_years.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fars.R
\name{fars_summarize_years}
\alias{fars_summarize_years}
\title{Sum of accidents by year and month}
\usage{
fars_summarize_years(years)
}
\arguments{
\item{years}{A list of numeric values which are years}
}
\value{
A data frame with counts of accidents for each month each year
}
\description{
Take in a list of years and summarize how many accidents happened in each month
each year, returns a data frame with years as columns, rows as months, each cell
is the number of accident happened in that month (row) of that year (column).
}
|
bd5139bf3157e8c9374d1e03235005bda7c068f2
|
436fdfdedb2f0abcf6bcdc990934414f97374a52
|
/Presentations and templets/templets/Quant_1(.rmd, from YUE HU)/slides_gh-master/slides_gh-master/convert2PDF.R
|
c33ba3623c45789478f5e1e2e1a6936146789599
|
[] |
no_license
|
bi-wei/THU_Courses
|
da507ca86ec571531f184acee83e6da597e869b2
|
b6eb81734b854441da964af088e3cff5d2c7f56d
|
refs/heads/main
| 2023-05-11T15:27:57.669390
| 2021-05-25T03:59:47
| 2021-05-25T03:59:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 791
|
r
|
convert2PDF.R
|
library(pagedown)
pagedown::chrome_print("slides/conference/2019-ACPS/migrant_language.Rmd")
library(webshot)
# install_phantomjs()
## For Tsinghua Political Confernece
file_name <- paste0("file:///", normalizePath("slides/conference/2018-MPSA/proficiencyTrust.html", winslash = "/"))
# webshot(file_name, "./slides/conference/2018-MPSA/proficiencyTrust.pdf", zoom = 0.28)
webshot(file_name, "./slides/conference/2018-MPSA/proficiencyTrust.pdf", vwidth = 272, vheight = 204, delay = 2)
file_name <- paste0("file:///", normalizePath("slides/teachingDemo/languagePolicy.html", winslash = "/"))
# webshot(file_name, "./slides/conference/2018-MPSA/proficiencyTrust.pdf", zoom = 0.28)
webshot(file_name, "./slides/teachingDemo/languagePolicy.pdf", vwidth = 272, vheight = 204, delay = 2)
|
92815a8ed7877d8242390a0d296e9ff499e12f91
|
55fe7eeb9397100fcd544c2b8bcecfcbcabdbb06
|
/REPSI_Tool_02.00_Mesaurement_Data/Query_99999_YYYY-MM-DD_HH-MI-SS.HS.R/Query_80501_2007-01-18_18-29-20.781.R
|
120f47a2b256f52d99f4474af7e1f430f468ec34
|
[
"Apache-2.0"
] |
permissive
|
walter-weinmann/repsi-tool
|
d5e7b71838dc92d61c1a06a2c7f2541a0c807b32
|
5677cdf1db38672eff7f1abcf6dca677eb93a89c
|
refs/heads/master
| 2021-01-10T01:34:55.746282
| 2016-01-26T05:31:17
| 2016-01-26T05:31:17
| 49,252,156
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,259
|
r
|
Query_80501_2007-01-18_18-29-20.781.R
|
if (exists("A_A")) remove("A_A")
if (exists("A_U")) remove("A_U")
if (exists("C_A")) remove("C_A")
if (exists("C_U")) remove("C_U")
A_U<-c(93268,101637,68510,61709,61641,61113,61610,61598,61607,62212,60639,61281,61040,67599,61458,60900,61396,61337,60574,62265,62823,61118,60756,61102,61758,66182,61513,60485,61130,60733,61000,63195,61021,60594,60647,61002,62622,62328,61092,61367,61522,61176,60883,61680,60779,61050,60638,62399,62376,61435)
A_A<-c(44715,39734,39842,18756,17903,18343,18947,18852,18164,18365,17969,18613,18210,19131,19853,19243,17875,17829,17886,18470,22609,19753,19986,17960,17940,17862,18132,17852,18825,18061,17698,18042,17593,17725,18046,17757,17942,17859,21726,18110,17648,17613,17744,17425,18412,18439,18087,18367,17579,18454)
if (exists("A_U")) boxplot.stats(A_U)
if (exists("A_A")) boxplot.stats(A_A)
if (exists("C_U")) boxplot.stats(C_U)
if (exists("C_A")) boxplot.stats(C_A)
if (exists("A_U")) summary(A_U)
if (exists("A_A")) summary(A_A)
if (exists("C_U")) summary(C_U)
if (exists("C_A")) summary(C_A)
if (exists("A_U")) boxplot(A_A,A_U,col="lightblue",horizontal=TRUE,match=TRUE,names=c("(A_A)","(A_U)"),notch=TRUE)
if (exists("C_U")) boxplot(C_A,C_U,col="lightblue",horizontal=TRUE,match=TRUE,names=c("(C_A)","(C_U)"),notch=TRUE)
|
915b1bf235af693571ddbd051d56baa3a6786870
|
ba65d8b42dfce42e1a4594d5a58a815194082112
|
/R/filterByVAF.R
|
39e88c6ebc7ced920cc4f398eeab680973ec4d32
|
[
"MIT"
] |
permissive
|
acc-bioinfo/TMBleR
|
b4ac594173ecc2ead98fd19696136f0d235065a3
|
f3ded88b111b8db0867222aaa8be4bcd9fe8e80d
|
refs/heads/main
| 2023-06-19T22:21:49.508537
| 2021-07-16T16:42:20
| 2021-07-16T16:42:20
| 378,995,779
| 4
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,664
|
r
|
filterByVAF.R
|
#' Filter variants according to variant allele frequency
#'
#' This function removes from the input variants those with variant allele
#' frequency (VAF) inferior to the cutoff value provided. The AF field is
#' required in the input vcf file to apply this function.
#'
#' @param vcf a \code{CollapsedVCF} object containing somatic variants
#' @param vaf.cutoff minimum value of variant allele frequency accepted
#'
#' @return Returns a \code{CollapsedVCF} object containing only variants with
#' variant allele frequency above the cutoff
#'
#' @author Laura Fancello
#'
filterByVAF <- function(vcf, vaf.cutoff){
# Sanity Checks -----------------------------------------------------------
## Check the input arguments
if (!(methods::is(vcf)[1] == "CollapsedVCF")) {
stop("No valid vcf provided.")
}
if (is.null(vaf.cutoff)) {
stop("argument 'vaf.cutoff' is missing, with no default")
}
if(!(methods::is(vaf.cutoff)[1]=="numeric")){
stop("No valid vaf.cutoff provided: please indicate a numeric value")
}
# Filter by VAF -----------------------------------------------------------
## Remove variants not passing VAF filter
if (!(is.null(VariantAnnotation::geno(vcf)$AF))) { # Check if allele frequency field (AF) is present
# make sure that the metafield "number" in the input vcf is set to "A"
# ##FORMAT=<ID=DP,Number=A
vaf <- unname(rapply(VariantAnnotation::geno(vcf)$AF[,1], function(x) x[1], how="unlist"))
vcf_filtered <- vcf[vaf > vaf.cutoff]
} else {stop("No AF (Allele Frequency) field found in vcf file")}
return(vcf_filtered)
}
|
51f991423d73c9193976494ee49531657035acc5
|
7dda7d663a26f5923a592677795fa551e6ee9c76
|
/scripts/tema 3 graficos script ejemplos.R
|
18b9032a1bea95b4738ef1b001f39af7238873d5
|
[] |
no_license
|
wrondon23/baseball_R
|
c89b7711d2b8f60913e49007f21f09bcd9224027
|
b307ffbff5cd480d790d171800cc07546245a9b8
|
refs/heads/master
| 2020-09-19T02:57:30.692285
| 2019-12-10T11:40:20
| 2019-12-10T11:40:20
| 224,190,924
| 0
| 0
| null | 2019-11-26T12:47:35
| 2019-11-26T12:47:34
| null |
UTF-8
|
R
| false
| false
| 1,760
|
r
|
tema 3 graficos script ejemplos.R
|
#cargando el dataset de bateo de los miembros en saldo de la fama
hof <- read.csv("./data/hofbatting.csv")
#agregar la variables MidCareer, que muestra la mitad de la carrera del jugador
hof$MidCareer <- with(hof, (From + To)/2)
#convertir en factor las epocas
hof$Era <- cut(hof$MidCareer,
breaks=c(1800, 1900, 1919, 1941, 1960, 1976, 1993, 2050),
labels=c("19th Century", "Dead Ball","Lively Ball",
"Integration", "Expansion", "Free Agency",
"Long Ball"))
T.Era <- table(hof$Era)
#graficos para variables de factores o categoricas
barplot(table(hof$Era), xlab = "Era", ylab = "Frecuency",
main = "Era of the Nonpitching Hall of Famers")
plot(table(hof$Era))
pie(table(hof$Era))
#****************************************************************
#variables numericas
windows(width=7, height = 3.5)
stripchart(hof$MidCareer, method = "jitter", pch = 1,
xlab = "Mid Career"
)
hist(hof$MidCareer, xlab = "Mid Career", main ="",
breaks = seq(1880,200, by = 20))
#************************************************************
with(hof, plot(MidCareer, OPS))
with(hof, lines(lowess(MidCareer,OPS, f=0.3)))
with(hof, identify(MidCareer, OPS, Player, n=4))
#***********************************************************
with(hof, plot(OBP, SLG, xlim = c(0.25,0.50),
ylim = c(0.28,0.75), pch=19,
xlab = "On-Base Percentage",
ylab = "Slugging Percentaje"
))
with(hof, identify(OBP, SLG, Player, n=6))
#******************************************************
#comparacion de homerum y la ERA en baseball
hof$HR.Rate <- with(hof, HR/AB)
#Grafico stripchart(HR.Rate !)
|
5d5ad71324f4172ad8cefd2eadbb3b3714a03f2b
|
b58787ca244bec6e3788d2c875958b04a27fcd22
|
/regression3_NonNormalityAssumption/PlottingDensityHist.R
|
53e6c680f87f0124c398645a5bd9d18961407db9
|
[] |
no_license
|
anhnguyendepocen/CSE845_R_tutorials
|
917d4a479ea3d29afae7507c17710fdd2b0359a4
|
1c5a872de767f23a1ce9f0ff7802f522df98e1b0
|
refs/heads/master
| 2020-03-14T21:21:37.232080
| 2013-05-27T01:14:01
| 2013-05-27T01:14:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 754
|
r
|
PlottingDensityHist.R
|
x <- rnorm(100, 20, 4)
par(mfrow=c(2,2))
hist(x, main="default breaks")
hist(x, breaks=4, main = "4 breaks")
hist(x, breaks=8, main = "8 breaks")
hist(x, breaks=20, main = "20 breaks")
par(mfrow=c(2,2))
plot(density(x), main="default bw")
plot(density(x, bw=0.5), main="bw = 0.5")
plot(density(x, bw=1), main="bw = 1")
plot(density(x, bw=2), main="bw = 2")
# repeat with sample size = 1000
x <- rnorm(1000, 20, 4)
par(mfrow=c(2,2))
hist(x, main="default breaks")
hist(x, breaks=4, main = "4 breaks")
hist(x, breaks=8, main = "8 breaks")
hist(x, breaks=20, main = "20 breaks")
par(mfrow=c(2,2))
plot(density(x), main="default bw")
plot(density(x, bw=0.5), main="bw = 0.5")
plot(density(x, bw=1), main="bw = 1")
plot(density(x, bw=2), main="bw = 2")
|
52ffdfa7467c092df8424683c6ebf1e4f46ce18d
|
b6fd60488d5ce74432cec8dea776ab22de6b4e00
|
/tokenize.R
|
493c6f895f335279ff098cdbe4c80e0e0bb623bb
|
[] |
no_license
|
sarafy23/CapstoneProject
|
302dd8cf7cd1309b9880c588ed4bbfdcfaf2fa41
|
b7942a8a0910ef1f6c9f6c0c16e83e987e10ab1f
|
refs/heads/master
| 2023-05-28T23:32:54.764056
| 2021-06-15T15:17:18
| 2021-06-15T15:17:18
| 377,179,797
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,466
|
r
|
tokenize.R
|
library(dplyr) #need for operator
library(tidyr) #need for separate
library(tokenizers) #need for tokenize_ngrams
#create ngrams
#create function that tokenizes, tabluates (freq count), turns it to dataframe, then ranks them based on freq count
ngramTokenizer <- function(theCorpus, ngramCount) {
ngramFunction <- tokenize_ngrams(sapply(theCorpus, as.character), n=ngramCount, n_min=ngramCount ) #tokenize
ngramFunction <- table(unlist(ngramFunction)) #tabulate
ngramFunction <- data.frame(ngramFunction) # turn to dataframe
ngramFunction <- ngramFunction[order(ngramFunction$Freq,decreasing = TRUE),] #rank by freq
ngramFunction
}
#create ngrams using the function above
#unigram<-ngramTokenizer(finalCorpus,1)
ngramTwo <-ngramTokenizer(finalCorpusDF3,2) #same as if you had done ngramTwo2 <-ngramTokenizer(finalCorpus,2) where finalCorpus is file that was not converted to DF
ngramThree <-ngramTokenizer(finalCorpusDF3,3)
ngramFour<-ngramTokenizer(finalCorpusDF3,4)
#ngramFive<-ngramTokenizer(finalCorpusDF3,5)
#' ## Separate words
ngramTwos <- ngramTwo %>% separate(Var1 , c("word1", "word2"), sep = " ")
ngramThrees <- ngramThree %>% separate(Var1 , c("word1", "word2", "word3"), sep = " ")
ngramFours<- ngramFour %>% separate(Var1 , c("word1", "word2", "word3", "word4"), sep = " ")
#ngramFives<- ngramFour %>% separate(Var1 , c("word1", "word2", "word3", "word4","word5"), sep = " ")
|
90131da213818b37599ceb1100b93277970b0f1d
|
f1ab6e90d0e38653d4f20906798b188caa6b5b9e
|
/code/distance_sim_functions.R
|
be741f0e547e1f736246454a8a134069561fe135
|
[] |
no_license
|
raikens1/PilotMatch
|
73f497a0b5b3c408d2d0cf6efd883dc17cd1db9a
|
313872482f81d39e114396b499bdcff60d4867dc
|
refs/heads/master
| 2021-07-07T05:57:03.323388
| 2020-08-17T23:17:17
| 2020-08-17T23:17:17
| 169,315,951
| 1
| 1
| null | 2020-03-24T21:11:08
| 2019-02-05T21:29:35
|
HTML
|
UTF-8
|
R
| false
| false
| 7,233
|
r
|
distance_sim_functions.R
|
require("optmatch", quietly = T)
require(dplyr, quietly = T)
require(magrittr, quietly = T)
require(ggplot2, quietly = T)
require(rlang, quietly = T)
require(tidyr, quietly = T)
require(sensitivitymw, quietly = T)
require(tidyselect, quietly = T)
require(bindrcpp, quietly = T)
require(sensitivityfull, quietly = T)
source("../code/basic_sim_functions.R")
#' @title Simulate for distances
#' @description perform matchings like simulate, but with fixed k, and return distances between matches
#' rather than effect estimate and gamma
#' @param df, a data.frame from generate_data
#' @param prop_model, the propensity score model
#' @param prog_model, the prognostic score model
#' @param k, the number of controls to match to each treated
#' @param true_rho the actual value of rho
#' @param gamma boolean for whether or not to compute gamma (takes longer if TRUE)
#' @return a data.frame with results from propensity, mahalanobis, and buffalo matching
simulate_for_distances <- function(df,
prop_model = formula(t ~ X1 + X2 + X3 + X4 + X5 + X6 + X7 + X8 + X9 + X10),
prog_model = formula(y ~ X1 + X2 + X3 + X4 + X5 + X6 + X7 + X8 + X9 + X10),
verbose = FALSE,
k = 3,
true_rho = 0,
gamma = FALSE){
# if not enough controls, do less than 1:k, but print an error
if(sum(df$t)*5 > nrow(df)){
kmax = floor(nrow(df)/sum(df$t))-2
k = min(k, kmax)
message(paste0("Insufficient controls. Doing 1:", k, " matching instead"))
}
# propensity score matching for k = 1:10
propensity <- glm(prop_model, family = binomial(), data = df)
prop_match <- pairmatch(propensity, controls = k, df)
# 1:2 mahalanobis matching to select data to use for prognostic model
mahal_dist <- match_on(prop_model, method = "mahalanobis", data = df)
mahal_match <- pairmatch(mahal_dist, controls = 2, df)
# perform prognostic score matching for k
prog_list <- prognostic_match(df, propensity, mahal_match, prog_model, k, params_only = TRUE)
# mahalanobis matching for k
m_match <- pairmatch(mahal_dist, controls = k, df)
prop_df <- data_frame(method = "propensity",
k = k,
mean_dist = true_mean_dist(df, prop_match, true_rho),
var_dist = true_var_dist(df, prop_match, true_rho),
emp_mean_dist = emp_mean_dist(df, prop_match, propensity, prog_list$prognostic),
prog_dist = prog_dist(df, prop_match, true_rho),
prop_dist = prop_dist(df, prop_match),
estimate = att_estimate(reformat(df, prop_match, k)),
gamma = ifelse(gamma, gamma_sensitivity(reformat(df, prop_match, k)), NA))
prog_df <- data_frame(method = "prognostic",
k = k,
mean_dist = true_mean_dist(prog_list$df, prog_list$prog_match, true_rho),
var_dist = true_var_dist(prog_list$df, prog_list$prog_match, true_rho),
emp_mean_dist = emp_mean_dist(prog_list$df, prog_list$prog_match, propensity, prog_list$prognostic),
prog_dist = prog_dist(prog_list$df, prog_list$prog_match, true_rho),
prop_dist = prop_dist(prog_list$df, prog_list$prog_match),
estimate = att_estimate(reformat(prog_list$df, prog_list$prog_match, k)),
gamma = ifelse(gamma, gamma_sensitivity(reformat(prog_list$df, prog_list$prog_match, k)), NA))
mahal_df <- data_frame(method = "mahalanobis",
k = k,
mean_dist = true_mean_dist(df, m_match, true_rho),
var_dist = true_var_dist(df, m_match, true_rho),
emp_mean_dist = emp_mean_dist(df, m_match, propensity, prog_list$prognostic),
prog_dist = prog_dist(df, m_match, true_rho),
prop_dist = prop_dist(df, m_match),
estimate = att_estimate(reformat(df, m_match, k)),
gamma = ifelse(gamma, gamma_sensitivity(reformat(df, m_match, k)), NA))
if (verbose){
message("Completed One Simulation")
}
# return results for prop, prog, and mahal
return(bind_rows(prop_df, prog_df, mahal_df))
}
#' @title Get True Distances
#' @description Return the mean mahalanobis distance between matched t and c individuals in terms of true scores
#' @param df data.frame of all individuals
#' @param match the result of a call to fullmatch or pairmatch
#' @param true_rho (float) the true value of rho
#' @return (float) mean mahalanobis distance between matched t and c individuals from match
true_mean_dist <- function(df, match, true_rho){
ndf <- df %>% mutate( psi = true_rho*X1 + sqrt(1-true_rho^2)*X2)
dists <- match_on(t ~ psi + mu, data = ndf)
dist_list <- matched.distances(match, dists)
return(mean(unlist(dist_list)))
}
#' @title Get variance in True Distances
#' @description Return the mean mahalanobis distance between matched t and c individuals in terms of true scores
#' @param df data.frame of all individuals
#' @param match the result of a call to fullmatch or pairmatch
#' @param true_rho (float) the true value of rho
#' @return (float) variance in mahalanobis distance between matched t and c individuals from match
true_var_dist <- function(df, match, true_rho){
df <- df %>% mutate( psi = true_rho*X1 + sqrt(1-true_rho^2)*X2)
dists <- match_on(t ~ psi + mu, data = df)
dist_list <- matched.distances(match, dists)
return(var(unlist(dist_list)))
}
#' @title Get Empirical Distances
#' @description Return the mean mahalanobis distance between matched t and c individuals in terms of estimated scores
#' @param df data.frame of all individuals
#' @param match the result of a call to fullmatch or pairmatch
#' @param true_rho (float) the true value of rho
#' @return (float) mean mahalanobis distance between matched t and c individuals from match
emp_mean_dist <- function(df, match, propensity, prognosis){
df <- df %>% mutate(prog = predict(prognosis, df), prop = predict(propensity, df))
dists <- match_on(t ~ prog + prop, data = df)
dist_list <- matched.distances(match, dists)
return(mean(unlist(dist_list)))
}
prog_dist <- function(df, match, true_rho){
ndf <- df %>% mutate( psi = true_rho*X1 + sqrt(1-true_rho^2)*X2, m = as.character(match))
treat_df <- ndf %>% filter(t == 1, !is.na(m)) %>% mutate(psi_treat = psi) %>% select(c(psi_treat, m))
control_df <- ndf %>% filter(t == 0, !is.na(m)) %>% full_join(treat_df, by = "m") %>% mutate(prog_dist = abs(psi-psi_treat))
return(mean(control_df$prog_dist))
}
prop_dist <- function(df, match, true_rho){
ndf <- df %>% mutate(m = as.character(match))
treat_df <- ndf %>% filter(t == 1, !is.na(m)) %>% mutate(mu_treat = mu) %>% select(c(mu_treat, m))
control_df <- ndf %>% filter(t == 0, !is.na(m)) %>% full_join(treat_df, by = "m") %>% mutate(prop_dist = abs(mu-mu_treat))
return(mean(control_df$prop_dist))
}
|
e37296cbb2d00b2919c2390d8a3127eb2506703f
|
5137d2b74fa00d70152f5a18d0a24d1b4785cff0
|
/SingleCellUtils/removeFromEachListField.R
|
adebfdfd55535a2d172f9ddc9badce83938e7aa5
|
[] |
no_license
|
noemiandor/Utils
|
02462f322102d602cb22d028fb7ac3ff4084489e
|
9de792c32e46dbef7485e34400b4e4153cd41c4d
|
refs/heads/master
| 2022-05-02T08:00:38.590908
| 2022-04-04T16:06:42
| 2022-04-04T16:06:42
| 192,091,982
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,615
|
r
|
removeFromEachListField.R
|
removeFromEachListField<-function(X,rowIdx=NULL, colIdx=NULL, ignoreInconsistentDim=F){
# X$mat=X$mat[iK,]; X$barcodes=X$barcodes[iK]; X$tsne$Y=X$tsne$Y[iK,]; X$gpc=X$gpc[iK];
# dm_TREG$dm=dm_TREG$dm[iK,]; dm_TREG$P=dm_TREG$P[iK,]; dm_DMAP$dm=dm_DMAP$dm[iK,]; dm_DMAP$P=dm_DMAP$P[iK,];
##First remove from matrices & retrieve dimensions
nrows=NULL; ncols=NULL;
for(f in names(X)){
x=X[[f]]
print(paste("Changing list-field ",f))
if(class(x)=="list"){
##Apply recursively:
x=removeFromEachListField(x,rowIdx=rowIdx, colIdx=colIdx, ignoreInconsistentDim = ignoreInconsistentDim )
}else if(is.null(dim(x))){
next;
}else{
##Record dimensions and check consistency
if( (!is.null(rowIdx) && !is.null(nrows) && nrows!=nrow(x)) || (!is.null(colIdx) && !is.null(ncols) && ncols!=ncol(x)) ){
if(ignoreInconsistentDim){
print(paste("Field ignored due to inconsistent dimensions"))
next
}else{
stop(paste("Field ",f,"contains matrix with inconsistent dimensions"))
}
}
nrows=nrow(x); ncols=ncol(x);
if(!is.null(rowIdx)){
x=x[-rowIdx,,drop=F]
}
if(!is.null(colIdx)){
x=x[,-colIdx,drop=F]
}
}
X[[f]]=x
}
##Next remove from vectors
for(f in names(X)){
x=X[[f]]
if(!is.null(dim(x))){
next;
}
if(!is.null(nrows) && length(x)==nrows && !is.null(rowIdx)){
x=x[-rowIdx]
}
if(!is.null(ncols) && length(x)==ncols && !is.null(colIdx)){
x=x[-colIdx]
}
X[[f]]=x
}
return(X)
}
|
fc87103088602013c177bfeaf40a359408e0efb0
|
d457217ad738c0125a92d526bee19a63e920bf92
|
/r/spotify.R
|
8d64b5f97452867688e326e4cd4c4c13b4ffd0f6
|
[] |
no_license
|
Dantesean/cm
|
9b17a6f58ece72520be3754c81652e2c2da93b7e
|
54eff1375dbb43d60c95803e14e66c5a707737b8
|
refs/heads/master
| 2020-04-25T16:28:11.796198
| 2019-03-31T20:03:37
| 2019-03-31T20:03:37
| 172,912,988
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,752
|
r
|
spotify.R
|
#----- SPOTIFY CREDENTIAL FOR SPOTIFYR -----#
if (Sys.getenv('SPOTIFY_CLIENT_ID') == '')
Sys.setenv(SPOTIFY_CLIENT_ID = '557ece0477ae4ea5807f7a809a85a597')
if (Sys.getenv('SPOTIFY_CLIENT_SECRET') == '')
Sys.setenv(SPOTIFY_CLIENT_SECRET = '24684514c966448b87e8809c1c5a4156')
#' Normalise vectors for Computational Musicology.
#'
#' We use a number of normalisation strategies in Computational Musicology.
#' This function brings them together into one place, along with common
#' alternative names.
compmus_normalise <- compmus_normalize <- function(v, method = "euclidean")
{
## Supported functions
harmonic <- function(v) v * sum(1 / abs(v))
manhattan <- function(v) v / sum(abs(v))
euclidean <- function(v) v / sqrt(sum(v^2))
chebyshev <- function(v) v / max(abs(v))
clr <- function(v) {lv <- log(v); lv - mean(lv)}
## Method aliases
METHODS <-
list(
harmonic = harmonic,
manhattan = manhattan,
L1 = manhattan,
euclidean = euclidean,
L2 = euclidean,
chebyshev = chebyshev,
maximum = chebyshev,
aitchison = clr,
clr = clr)
## Function selection
if (!is.na(i <- pmatch(method, names(METHODS))))
METHODS[[i]](v)
else
stop('The method name is ambiguous or the method is unsupported.')
}
#' Compute pairwise distances for Computational Musicology in long format.
#'
#' We use a number of distance measures in Computational Musicology.
#' This function brings them together into one place, along with common
#' alternative names. It is designed for convenience, not speed.
compmus_long_distance <- function(xdat, ydat, feature, method = "euclidean")
{
feature <- enquo(feature)
## Supported functions
manhattan <- function(x, y) sum(abs(x - y))
euclidean <- function(x, y) sqrt(sum((x - y) ^ 2))
chebyshev <- function(x, y) max(abs(x - y))
pearson <- function(x, y) 1 - cor(x, y)
cosine <- function(x, y)
{
1 - sum(compmus_normalise(x, "euc") * compmus_normalise(y, "euc"))
}
angular <- function(x, y) 2 * acos(1 - cosine(x, y)) / pi
aitchison <- function(x, y)
{
euclidean(compmus_normalise(x, "clr"), compmus_normalise(y, "clr"))
}
## Method aliases
METHODS <-
list(
manhattan = manhattan,
cityblock = manhattan,
taxicab = manhattan,
L1 = manhattan,
totvar = manhattan,
euclidean = euclidean,
L2 = euclidean,
chebyshev = chebyshev,
maximum = chebyshev,
pearson = pearson,
correlation = pearson,
cosine = cosine,
angular = angular,
aitchison = aitchison)
## Function selection
if (!is.na(i <- pmatch(method, names(METHODS))))
bind_cols(
crossing(
xdat %>% select(xstart = start, xduration = duration),
ydat %>% select(ystart = start, yduration = duration)),
xdat %>% select(x = !!feature) %>%
crossing(ydat %>% select(y = !!feature)) %>%
transmute(d = map2_dbl(x, y, METHODS[[i]])))
else
stop('The method name is ambiguous or the method is unsupported.')
}
#' Gathers chroma vectors into long format.
#'
#' Gathers chroma vectors into long format for Computational Musicology.
compmus_gather_chroma <- function(data)
{
data %>%
mutate(pitches = map(pitches, bind_rows)) %>% unnest(pitches) %>%
gather("pitch_class", "value", C:B) %>%
mutate(pitch_class = fct_shift(factor(pitch_class), 3))
}
# Tonal Template
circshift <- function(v, n) {if (n == 0) v else c(tail(v, n), head(v, -n))}
# C C# D Eb E F F# G Ab A Bb B
major_chord <-
c(1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0)
minor_chord <-
c(1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0)
seventh_chord <-
c(1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0)
major_key <-
c(6.35, 2.23, 3.48, 2.33, 4.38, 4.09, 2.52, 5.19, 2.39, 3.66, 2.29, 2.88)
minor_key <-
c(6.33, 2.68, 3.52, 5.38, 2.60, 3.53, 2.54, 4.75, 3.98, 2.69, 3.34, 3.17)
chord_templates <-
tribble(
~name , ~template,
'Gb:7' , circshift(seventh_chord, 6),
'Gb:maj', circshift(major_chord, 6),
'Bb:min', circshift(minor_chord, 10),
'Db:maj', circshift(major_chord, 1),
'F:min' , circshift(minor_chord, 5),
'Ab:7' , circshift(seventh_chord, 8),
'Ab:maj', circshift(major_chord, 8),
'C:min' , circshift(minor_chord, 0),
'Eb:7' , circshift(seventh_chord, 3),
'Eb:maj', circshift(major_chord, 3),
'G:min' , circshift(minor_chord, 7),
'Bb:7' , circshift(seventh_chord, 10),
'Bb:maj', circshift(major_chord, 10),
'D:min' , circshift(minor_chord, 2),
'F:7' , circshift(seventh_chord, 5),
'F:maj' , circshift(major_chord, 5),
'A:min' , circshift(minor_chord, 9),
'C:7' , circshift(seventh_chord, 0),
'C:maj' , circshift(major_chord, 0),
'E:min' , circshift(minor_chord, 4),
'G:7' , circshift(seventh_chord, 7),
'G:maj' , circshift(major_chord, 7),
'B:min' , circshift(minor_chord, 11),
'D:7' , circshift(seventh_chord, 2),
'D:maj' , circshift(major_chord, 2),
'F#:min', circshift(minor_chord, 6),
'A:7' , circshift(seventh_chord, 9),
'A:maj' , circshift(major_chord, 9),
'C#:min', circshift(minor_chord, 1),
'E:7' , circshift(seventh_chord, 4),
'E:maj' , circshift(major_chord, 4),
'G#:min', circshift(minor_chord, 8),
'B:7' , circshift(seventh_chord, 11),
'B:maj' , circshift(major_chord, 11),
'D#:min', circshift(minor_chord, 3),
)
key_templates <-
tribble(
~name , ~template,
'Gb:maj', circshift(major_key, 6),
'Bb:min', circshift(minor_key, 10),
'Db:maj', circshift(major_key, 1),
'F:min' , circshift(minor_key, 5),
'Ab:maj', circshift(major_key, 8),
'C:min' , circshift(minor_key, 0),
'Eb:maj', circshift(major_key, 3),
'G:min' , circshift(minor_key, 7),
'Bb:maj', circshift(major_key, 10),
'D:min' , circshift(minor_key, 2),
'F:maj' , circshift(major_key, 5),
'A:min' , circshift(minor_key, 9),
'C:maj' , circshift(major_key, 0),
'E:min' , circshift(minor_key, 4),
'G:maj' , circshift(major_key, 7),
'B:min' , circshift(minor_key, 11),
'D:maj' , circshift(major_key, 2),
'F#:min', circshift(minor_key, 6),
'A:maj' , circshift(major_key, 9),
'C#:min', circshift(minor_key, 1),
'E:maj' , circshift(major_key, 4),
'G#:min', circshift(minor_key, 8),
'B:maj' , circshift(major_key, 11),
'D#:min', circshift(minor_key, 3))
|
d563c6d6beb8896ea50c2052c5fd0f6432d58e5f
|
08bd5a7e355670ae7677e7676dc216e0a14c2a5a
|
/BatchCorrection_and_QC/pca_analysis.R
|
9d54123afc508a03506a8fe6a87c62b236b41f0f
|
[] |
no_license
|
zmx21/GSK_NI
|
1b79f1933e245a1deab902eebf83370f8c6af4ea
|
3e89f15e4d906d704193346fa0ea6b914e89a881
|
refs/heads/master
| 2020-03-18T08:09:21.607329
| 2018-09-02T03:15:19
| 2018-09-02T03:15:19
| 134,493,420
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,225
|
r
|
pca_analysis.R
|
##########################################################################################################
#PCA based on gene expressions. Imports metadata, which could be visualzed as colors on PCA plot.
##########################################################################################################
#Import required Data
##########################################################################################################
#For Galatro et al, returns mapping between GSM, SRR and title.
##########################################################################################################
GetSampleMapping <- function(){
#Get SRR to GSM mapping, downloaded from SRA
runTable <- read.table(file = '/local/data/public/zmx21/zmx21_private/GSK/Galatro/SraRunTable.txt',header = T,sep = '\t')
SRRToGSM <- data.frame(GSM = runTable$Sample_Name,SRR = runTable$Run)
#Extract sample titles
matrixFile <- readLines(con = "/local/data/public/zmx21/zmx21_private/GSK/Galatro/GSE99074-GPL16791_series_matrix.txt")
sampleTitles <- matrixFile[which(sapply(matrixFile,function(x) grepl('Sample_title',x)))]
sampleTitles <- unlist(strsplit(sampleTitles,'\t'))
sampleTitles <- sampleTitles[2:length(sampleTitles)]
sampleTitles <- sapply(sampleTitles,function(x) paste0(unlist(strsplit(x,""))[2:(length(unlist(strsplit(x,"")))-1)],collapse = ""))
#Extract GSM id of each sample title, in same order
sampleGSM <- matrixFile[which(sapply(matrixFile,function(x) grepl('Sample_geo_accession',x)))]
sampleGSM <- unlist(strsplit(sampleGSM,'\t'))
sampleGSM <- sampleGSM[2:length(sampleGSM)]
sampleGSM <- sapply(sampleGSM,function(x) paste0(unlist(strsplit(x,""))[2:(length(unlist(strsplit(x,"")))-1)],collapse = ""))
#Merge the two mappings SRR->GSM->Title
titleToGSM <- data.frame(title=sampleTitles,GSM = sampleGSM)
mapping <- merge(SRRToGSM,titleToGSM)
return(mapping)
}
#Collections metadata, from a count matrix (where colnames are sample names)
#also need the tables, which is a list of tables with run info, alignment info, and read distribution info.
CollectMetadata <- function(inputMatrix,full=F){
library(dplyr)
#Get metadata (downloaded from SRA), alignment info, and read distribtuion
runTable_Galatro <- read.table(file = '../../Galatro/SraRunTable.txt',header = T,sep = '\t',stringsAsFactors = F) %>%
dplyr::select(Sample_Name,AvgSpotLen,gender,age) %>%
{.[!duplicated(.),]} #Each sample has two runs, so remove duplicated rows
runTable_Galatro_Brain <- read.table(file = '../../Galatro_Brain/SraRunTable.txt',header = T,sep = '\t',stringsAsFactors = F) %>%
dplyr::select(Sample_Name,AvgSpotLen,gender,age) %>%
{.[!duplicated(.),]} #Each sample has two runs, so remove duplicated rows
runTable_Galatro$BulkBrain <- rep(0,nrow(runTable_Galatro))
runTable_Galatro_Brain$BulkBrain <- rep(1,nrow(runTable_Galatro_Brain))
runTable_Galatro <- rbind(runTable_Galatro,runTable_Galatro_Brain)
alignmentTable_Galatro <- rbind(read.table(file='../../Galatro/Salmon_aligned_merged/multiqc_Salmon_merged/multiqc_general_stats.txt',header = T,stringsAsFactors = F))
readDist_Galatro <- rbind(read.table(file='../../Galatro/STAR_aligned_merged/multiqc_data/multiqc_rseqc_read_distribution.txt',header=T,stringsAsFactors = F))
readDist_Galatro$Sample <- sapply(readDist_Galatro$Sample,function(x) unlist(strsplit(x = x,split = '[.]'))[1])
runTable_Gosselin <- read.table(file = '../../Gosselin/SraRunTable_Parsed.txt',header = T,sep = '\t',stringsAsFactors = F) %>%
dplyr::select(Sample_Name,AvgSpotLen,gender,age) %>% dplyr::mutate(BulkBrain = 0)
alignmentTable_Gosselin <- read.table(file='../../Gosselin/multiqc_general_stats.txt',header = T,stringsAsFactors = F)
readDist_Gosselin <- read.table(file='../../Gosselin/multiqc_rseqc_read_distribution.txt',header=T,stringsAsFactors = F) %>%
dplyr::mutate(other_intergenic_tag_count=0,other_intergenic_tag_pct=0)
alignmentTable_Olah <- read.table(file='../../Olah/multiqc_general_stats.txt',header = T,stringsAsFactors = F)
readDist_Olah <- read.table(file='../../Olah/multiqc_rseqc_read_distribution.txt',header=T,stringsAsFactors = F) %>%
dplyr::mutate(other_intergenic_tag_count=0,other_intergenic_tag_pct=0)
tables <- list(runTable=rbind(runTable_Galatro,runTable_Gosselin),
alignmentTable=rbind(alignmentTable_Galatro,alignmentTable_Gosselin,alignmentTable_Olah),
readDist=rbind(readDist_Galatro,readDist_Gosselin,readDist_Olah))
allSamples <- colnames(inputMatrix)
if(full){
df <- as.data.frame(t(inputMatrix),row.names = NULL)
df$Sample_Name <- allSamples
}else{
df <- data.frame(Sample_Name = allSamples)
}
dataset <- sapply(allSamples,function(x) ifelse(grepl('GSM',x),'Galatro',ifelse(grepl('SRR',x),'Gosselin','Olah')))
df$dataset <- dataset
#Join all metadata tables, keep information needed.
dfMetadata <- dplyr::left_join(data.frame(Sample_Name = df$Sample_Name,stringsAsFactors = F),
dplyr::select(tables$runTable,readLength=AvgSpotLen,gender=gender,age=age,Sample_Name,BulkBrain),by=c('Sample_Name' = 'Sample_Name')) %>%
dplyr::left_join(dplyr::select(tables$alignmentTable,Sample_Name = Sample,
numReads=Salmon_num_mapped,mappingRate=Salmon_percent_mapped),
by = c('Sample_Name'='Sample_Name')) %>%
dplyr::left_join(dplyr::select(tables$readDist,Sample_Name=Sample,exonTags = cds_exons_tag_count,
intronTags = introns_tag_count,totalTags = total_tags,intergenicTags=other_intergenic_tag_count),by=c('Sample_Name'='Sample_Name'))
df <- cbind(df,dfMetadata%>% dplyr::select(-Sample_Name))
#Add expType and instrument for Gosselin Samples
GosselinAddtlData <- read.table(file = '../../Gosselin/SraRunTable_Parsed.txt',header = T,sep = '\t',stringsAsFactors = F) %>%
dplyr::select(Sample_Name,Instrument,Library_Name) %>%
dplyr::mutate(expType = ifelse(grepl('ExVivo',Library_Name),'ExVivo','InVitro')) %>%
dplyr::select(-Library_Name)
df <- cbind(df,left_join(data.frame(Sample_Name=df$Sample_Name,stringsAsFactors = F),GosselinAddtlData,by=c('Sample_Name' = 'Sample_Name')) %>% dplyr::select(-Sample_Name))
rownames(df) <- sapply(1:nrow(df),function(i) ifelse(df$dataset[i]=="Olah",
paste0('OLA_',substr(df$Sample_Name[i],11,12),'_',
substr(df$Sample_Name[i],20,22)),
ifelse(df$dataset[i]=='Galatro',
paste0('GAL',substr(df$Sample_Name[i],9,10)),
paste0('GOS',substr(df$Sample_Name[i],9,10)))))
return(df)
}
CalcPCA <- function(countMatrix){
Df <- CollectMetadata(countMatrix,full=T)
#Calculates PCA, based on gene/transcript expression values of different samples.
allGenes <- rownames(countMatrix)
PCA <- prcomp(Df[,which(colnames(Df) %in% allGenes)])
return(list(PCA = PCA,Df=Df))
}
#Calc PCA and plot results.
# results <- CalcPCA(SalmonTPM_Gene)
# SalmonGeneLevelDf <- results$Df
# SalmonGeneLevelPCA <- results$PCA
library(ggplot2)
library(ggfortify)
# autoplot(results$PCA, data = results$Df, colour = 'readLength',size=4,shape=F) + ggtitle('PCA of Read Length')
# scale_colour_gradientn(colours = rainbow(7))
# autoplot(SalmonGeneLevelPCA, data = SalmonGeneLevelDf, colour = 'sd',size=4,shape=F) + ggtitle('PCA of SD') +
# scale_colour_gradientn(colours = rainbow(7))
# autoplot(SalmonGeneLevelPCA, data = SalmonGeneLevelDf, colour = 'mappingRate',size=4,shape=F) + ggtitle('PCA of Mapping Rate') +
# scale_colour_gradientn(colours = rainbow(7))
# autoplot(SalmonGeneLevelPCA, data = SalmonGeneLevelDf, colour = 'exonReads',size=4,shape=F) + ggtitle('PCA of Mapping Rate') +
# scale_colour_gradientn(colours = rainbow(7))
|
0a2f6959e8090e628f48bb03c99ef8001ae98f77
|
d70a7c8dd603fd9564fa6fbbfc6db1440d3f2c27
|
/man/filter_common_genes.Rd
|
bed140939c42eb5755487ecaadaf446e27b82901
|
[] |
no_license
|
cran/tidyestimate
|
47cf411175cb1e43d4d295cade60ec0bb57f3362
|
2b67ce554714c972c38307d8eda5f42d1b5cbe78
|
refs/heads/master
| 2023-09-03T00:06:39.309616
| 2023-08-21T03:50:02
| 2023-08-21T04:30:34
| 379,601,030
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,536
|
rd
|
filter_common_genes.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/filter_common_genes.R
\name{filter_common_genes}
\alias{filter_common_genes}
\title{Remove non-common genes from data frame}
\usage{
filter_common_genes(
df,
id = c("entrezgene_id", "hgnc_symbol"),
tidy = FALSE,
tell_missing = TRUE,
find_alias = FALSE
)
}
\arguments{
\item{df}{a \code{data.frame} of RNA expression values, with columns corresponding
to samples, and rows corresponding to genes. Either rownames or the first
column can contain gene IDs (see \code{tidy})}
\item{id}{either \code{"entrezgene_id"} or \code{"hgnc_symbol"}, whichever
\code{df} contains.}
\item{tidy}{logical. If rownames contain gene identifier, set \code{FALSE}.
If first column contains gene identifier, set \code{TRUE}}
\item{tell_missing}{logical. If \code{TRUE}, prints message of genes in
common gene set that are not in supplied data frame.}
\item{find_alias}{logical. If \code{TRUE} and \code{id = "hgnc_symbol"}, will
attempt to find if genes missing from \code{common_genes} are going under
an alias. See details for more information.}
}
\value{
A \code{tibble}, with gene identifiers as the first column
}
\description{
As ESTIMATE score calculation is sensitive to the number of genes used, a set
of common genes used between six platforms has been established (see
\code{?tidyestimate::common_genes}). This function will filter for only those
genes.
}
\details{
The \code{find_aliases} argument will attempt to find aliases for HGNC
symbols in \code{tidyestimate::common_genes} but missing from the provided
dataset. This will only run if \code{find_aliases = TRUE} and \code{id =
"hgnc_symbol"}.
This algorithm is very conservative: It will only make a match if the gene
from the common genes has only one alias that matches with only one gene from
the provided dataset, \emph{and} the gene from the provided dataset with
which it matches only matches with a single gene from the list of common
genes. (Note that a single gene may have many aliases). Once a match has been
made, the gene in the provided dataset is updated to the gene name in the
common gene list.
While this method is fairly accurate, is is also a heuristic. Therefore, it is
disabled by default. Users should check which genes are becoming reassigned
to ensure accuracy.
The method of generation of these aliases can be found at
\code{?tidyestimate::common_genes}
}
\examples{
filter_common_genes(ov, id = "hgnc_symbol", tidy = FALSE, tell_missing = TRUE, find_alias = FALSE)
}
|
aacf85b4393ff5fa9de2b9a873e25f96e2edaf5a
|
6ce63673dd382556a6c1d3f8ab978c704ede84d0
|
/英国国王死亡年龄/英国国王死亡时间.R
|
a1c0685ff1829193948e129bba415b8699610564
|
[] |
no_license
|
zxyzjh233333/-
|
a7c33d9bc58a8954965707eca9f91cfa117a4880
|
51b8449c944313930d9814ae57cc625c2f8b7575
|
refs/heads/main
| 2023-06-25T18:14:10.013667
| 2021-05-04T08:50:37
| 2021-05-04T08:50:37
| null | 0
| 0
| null | null | null | null |
GB18030
|
R
| false
| false
| 7,147
|
r
|
英国国王死亡时间.R
|
##案例:英国国王死亡时间(非季节性模型)(Age of Death of Successive Kings of England)
#1.
#导入所需的包
install.packages()
library("TTR")
library("forecast")
#2.
#导入文件
kings <- scan("http://robjhyndman.com/tsdldata/misc/kings.dat",skip=3)
kings
#3.
#将时间序列数据读入R后,下一步就是将数据存储在R中的时间序列对象中。
#这样就可以使用R的许多函数来分析时间序列数据,我们使用R中的ts()函数。
#ts函数(time series):通过一向量或者矩阵创建一个一元的或多元的时间序列
kingstimeseries <- ts(kings)
kingstimeseries
#4.
#绘制kingstimeseries的数据图
plot.ts(kingstimeseries)
#5.
#“TTR”包中的SMA()函数可用于使用简单的移动平均值来平滑时间序列数据。
#要使用SMA()函数,需要使用参数“n”指定简单移动平均值的顺序(跨度)。
#这个42位英国国王的去世年龄数据呈现出非季节性,并且由于其随机变动在整个时间段内是大致不变的,这个序列也可以被描述称为一个相加模型。
#使用3阶简单移动平均值平滑时间序列,并绘制平滑时间序列数据。
kingstimeseriesSMA3 <- SMA(kingstimeseries,n=3)
plot.ts(kingstimeseriesSMA3)
#6.
#在使用3阶简单移动平均值平滑的时间序列中,似乎存在相当多的随机波动。
#因此,为了更准确地估计趋势分量,我们可能希望尝试使用简单的移动平均值来平滑数据。更高阶。
#使用8阶简单移动平均值平滑时间序列,并绘制平滑时间序列数据。
#使用8阶简单移动平均值进行平滑的数据可以更清晰地显示趋势分量,我们可以看到英国国王的死亡年龄似乎已经从大约55岁降至大约38岁。在最后的20位国王中,然后在第40位国王在时间序列的统治结束之后增加到大约73岁。
kingstimeseriesSMA8 <- SMA(kingstimeseries,n=8)
plot.ts(kingstimeseriesSMA8)
#7.
#从时间图(kingstimeseries图)中,我们可以看出时间序列不是平均值。要计算第一个差异的时间序列并绘制它
#第一个差异的时间序列在均值和方差上似乎是固定的,因此ARIMA(p,1,q)模型可能适合于英格兰国王的死亡年龄的时间序列。
#通过采用第一个差异的时间序列,我们删除了国王死亡时代的时间序列的趋势分量,并留下不规则的成分。
#我们现在可以检查这个不规则分量的连续项之间是否存在相关性; 如果是这样,这可以帮助我们为国王死亡的年龄做出预测模型。
kingtimeseriesdiff1 <- diff(kingstimeseries, differences=1)
plot.ts(kingtimeseriesdiff1)
#8.
#要绘制相关图和部分相关图,我们可以分别使用R中的“acf()”和“pacf()”函数。为了获得自相关和部分自相关的实际值,我们在“acf()”和“pacf()”函数中设置“plot = FALSE”。
#绘制英国国王死亡时间的一次差异时间序列的滞后1-20的相关图,并获得自相关的值
acf(kingtimeseriesdiff1, lag.max=20) # 绘制相关图
acf(kingtimeseriesdiff1, lag.max=20, plot=FALSE) # 得到自相关系数
#9.
#我们从相关图中看到,滞后1(-0.360)处的自相关超过了显着边界,但是滞后1-20之间的所有其他自相关都没有超过显著边界。
#为了绘制英语国王死亡时间的一次差异时间序列的滞后1-20的部分相关图,并获得部分自相关的值,我们使用“pacf()”函数
pacf(kingtimeseriesdiff1, lag.max=20) # 绘制偏相关图
pacf(kingtimeseriesdiff1, lag.max=20, plot=FALSE) # 得到偏相关系数
#部分相关图显示滞后1,2和3的部分自相关超过显着边界,为负,并且随着滞后的增加而在幅度上缓慢下降(滞后1:-0.360,滞后2:-0.335,滞后3:-0.321 )。在滞后3之后,部分自相关变为零。
#故ARMA(3,0)、ARMA(0,1)、ARMA(p,q)均可行,但是根据参数最少的思路,选择ARMA(0,1)模型
#10.
#MA(移动平均)模型通常用于模拟时间序列,该时间序列显示连续观察之间的短期依赖性。
#很有意义的是,MA模型可以用来描述英国国王死亡时间序列中的不规则成分。
#我们可以预期特定英国国王的死亡年龄对后任一位或两位国王的死亡年龄年龄有一定影响。
#auto.arima()函数可用于查找适当的ARIMA模型。在之前学习ARMA模型的过程中我们通过查看序列的ACF/PACF图来帮助定阶。其实实际应用中往往不是那么容易就能通过ACF/PACF图来识别出ARIMA模型的阶数。forecast包中的auto.arima()可以自动尝试不同的阶数组合并挑选出可能的最优模型。可以帮助我们进行定阶
auto.arima(kings)
#由于ARMA(0,1)模型(p = 0,q = 1)被认为是英国国王死亡年龄的第一个差异的时间序列的最佳候选模型,那么原始的时间序列死亡年龄可以使用ARIMA(0,1,1)模型建模(p = 0,d = 1,q = 1,其中d是所需差分的顺序)。
#11.
#将ARIMA(0,1,1)模型拟合到我们的时间序列,意味着将ARMA(0,1)模型拟合到第一个差分的时间序列。
kingstimeseriesarima <- arima(kingstimeseries, order=c(0,1,1)) # 拟合 ARIMA(0,1,1) 模型
kingstimeseriesarima
#输出的ma1=-0.7218为时间序列的系数
#12.
#对ARIMA模型使用“forecast.Arima()”函数对时间序列的未来值进行预测
kingstimeseriesforecasts <- forecast(kingstimeseriesarima, h=5)
kingstimeseriesforecasts
plot(kingstimeseriesforecasts)
#forecast.Arima()函数给出了对接下来的五位英国国王(43-47)的死亡年龄的预测,包括80%到95%置信区间。第42位英国国王的死亡年龄为56岁,接下来五位国王死亡的预测年龄为67.8岁。
#13.
#预测后还需研究ARIMA模型的预测误差是否正态分布为均值为零,方差是否为常数。(是否符合白噪声过程)
#为国王死亡时的ARIMA(0,1,1)模型制作预测误差的相关图,并通过输入以下内容执行Ljung-Box测试
#白噪声检验-Ljung-Box Test:首先假设序列为白噪声,根据假设求得的P值如果小于阈值(一般为5%),那么假设不成立;反之,假设成立。
acf(kingstimeseriesforecasts$residuals, lag.max=20)
Box.test(kingstimeseriesforecasts$residuals, lag=20, type="Ljung-Box")
plot.ts(kingstimeseriesforecasts$residuals) # 绘制预测误差序列图
#14.
#定义plotForecastErrors函数
plotForecastErrors <- function(forecasterrors){
#画预测误差的直方图
hist(forecasterrors, col="red", freq = F)
#画方差是预测误差数据的方差,平均值是0的正态分布数据的线
mysd <- sd(forecasterrors)
mynorm <- rnorm(10000, mean = 0, sd = mysd)
myhist <- hist(mynorm, plot = F)
points(myhist$mids, myhist$density, type="l", col="blue", lwd=2)
}
#15.
#绘制直方图
plotForecastErrors(kingstimeseriesforecasts$residuals) #绘制直方图
|
c8634967ddc89ac8637a9781a163c933ad7a3d58
|
d8f952e726878f922fe5c7a2644974eb0a7d6a67
|
/Documents/Glove/Codes/FANROI_Visual_visualizatiion_code.R
|
9ccb0b20ea4ca14e3cff6955a2e4b50aec46eef4
|
[] |
no_license
|
jisulee819/project_Glove
|
13e3725c164643ff37b6581104b1d119c2a95210
|
f27c684b09b8da82b6f448db242e5e03c27567e6
|
refs/heads/master
| 2020-07-26T16:51:51.423047
| 2019-09-16T04:29:01
| 2019-09-16T04:29:01
| 208,709,373
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,105
|
r
|
FANROI_Visual_visualizatiion_code.R
|
################
### 190906 DMN visualization ###
############
library(ggplot2)
library(reshape2)
library(FSA)
lda <- read.csv("/Users/jisu/Documents//Glove/20190911_FAN_VISUAL_lda.csv")
#svc <- read.csv("/Users/jisu/Documents//Glove/20190906_DMN_svc_all.csv")
data <- readxl::read_xlsx("/Volumes/clmnlab/GA/fmri_data/masks/Fan/fan_cluster_net_GA.xlsx")
DMNlist <- c(data$idx[data$cluster_16 == "Default"])
motorlist <- c(data$idx[data$clmnlab_GA == "motor"])
visuallist <- c(data$idx[data$clmnlab_GA == "visual"])
DMNlist <- DMNlist + 1
motorlist <- motorlist[!is.na(motorlist)] + 1
visuallist <- visuallist[!is.na(visuallist)] + 1
for ( i in 1:length(visuallist) ){
print(paste0(visuallist[i]))
}
DMNnames <- c(data$name_short[data$cluster_16 == "Default"])
motornames <- c(data$name_short[data$clmnlab_GA == "motor"])
motornames <- motornames[!is.na(motornames)]
visualnames <- c(data$name_short[data$clmnlab_GA == "visual"])
visualnames <- visualnames[!is.na(visualnames)]
###################
ROInames = visualnames
###################
multiplot <- function(..., plotlist=NULL, file, cols=4, layout=NULL) {
library(grid)
# Make a list from the ... arguments and plotlist
plots <- c(list(...), plotlist)
numPlots = length(plots)
# If layout is NULL, then use 'cols' to determine layout
if (is.null(layout)) {
# Make the panel
# ncol: Number of columns of plots
# nrow: Number of rows needed, calculated from # of cols
layout <- matrix(seq(1, cols * ceiling(numPlots/cols)),
ncol = cols, nrow = ceiling(numPlots/cols))
}
if (numPlots==1) {
print(plots[[1]])
} else {
# Set up the page
grid.newpage()
pushViewport(viewport(layout = grid.layout(nrow(layout), ncol(layout))))
# Make each plot, in the correct location
for (i in 1:numPlots) {
# Get the i,j matrix positions of the regions that contain this subplot
matchidx <- as.data.frame(which(layout == i, arr.ind = TRUE))
print(plots[[i]], vp = viewport(layout.pos.row = matchidx$row,
layout.pos.col = matchidx$col))
}
}
}
data <- lda
dim(data)
data$period <- ifelse(substr(data$subject, 1, 2) == "GA", "early",
ifelse(substr(data$subject, 1, 2) == "GB", "late", "NA"))
subdata = data[data$target == "pos",]
# 1. Early - Late 2. Prac - Unprac 3.roi
visudata.m = tapply(subdata$accu, list(subdata$practiced, subdata$roi, subdata$period), mean)
visudata.se = tapply(subdata$accu, list(subdata$practiced, subdata$roi, subdata$period), se)
visu.m <- melt(visudata.m)
visu.se <- melt(visudata.se)
visudata <- cbind(visu.m, visu.se$value)
visudata <- data.frame(visudata)
names(visudata) <- c("mapping", "roi", "period", "mean", "se")
for (i in 1:length(ROInames)){
p <- ggplot(data = visudata[visudata$roi == i,], aes(x = period, y = mean)) +
scale_y_continuous(name="mean_acc") +
scale_x_discrete(name = paste0(ROInames[i]," (LDA)")) +
geom_point(aes(col = mapping)) +
geom_line(aes(group=mapping, col=mapping)) +
geom_errorbar(aes(x = period, ymin = mean - se, ymax = mean + se, col=mapping, width = .2)) +
theme(legend.title = element_text(size=8, color = "salmon", face="bold"),
legend.justification=c(1,0),
legend.position=c(0.3, 0.05),
legend.background = element_blank(),
legend.key = element_blank())
assign(paste0("p", i), p)
}
#data <- svc
#dim(data)
#data$period <- ifelse(substr(data$subject, 1, 2) == "GA", "early",
# ifelse(substr(data$subject, 1, 2) == "GB", "late", "NA"))
#
#subdata = data[data$target == "pos",]
## 1. Early - Late 2. Prac - Unprac 3.roi
#visudata.m = tapply(subdata$accu, list(subdata$practiced, subdata$roi, subdata$period), mean)
#visudata.se = tapply(subdata$accu, list(subdata$practiced, subdata$roi, subdata$period), se)
#
#visu.m <- melt(visudata.m)
#visu.se <- melt(visudata.se)
#visudata <- cbind(visu.m, visu.se$value)
#visudata <- data.frame(visudata)
#names(visudata) <- c("mapping", "roi", "period", "mean", "se")
#
#ROInames = c("Core", "DMsub", "MTLsub")
#
#for (i in 1:3){
#p0 <- ggplot(data = visudata[visudata$roi == 1,], aes(x = period, y = mean)) +
# scale_y_continuous(name="mean_acc") +
# scale_x_discrete(name = paste0(ROInames[i]," (SVC)")) +
# geom_point(aes(col = mapping)) +
# geom_line(aes(group=mapping, col=mapping)) +
# geom_errorbar(aes(x = period, ymin = mean - se, ymax = mean + se, col=mapping, width = .2)) +
# theme(legend.title = element_text(size=8, color = "salmon", face="bold"),
# legend.justification=c(1,0),
# legend.position=c(1, 0.05),
# legend.background = element_blank(),
# legend.key = element_blank())
#
#assign(paste0("p0", i), p0)
#}
#png(paste0("/Users/jisulee/Documents/CLMN LAB/Glove Project/20190906_DMN_LDA_SVC.png"), width = 3000, height = 3000, res=220)
#multiplot(p1, p2, p3, p01, p02, p03)
#dev.off()
### Corrlation w/ learning amount
learning.amount <- c(0.417418981481482,
0.360081018518519,
0.346666666666667,
0.412719907407407,
0.212870370370370,
0.475243055555556,
0.345543981481481,
0.329375000000000,
0.545196759259259,
0.419328703703704,
0.219363425925926,
0.317615740740741,
0.264768518518519,
0.539016203703704,
0.164085648148148,
0.526053240740741,
0.292233796296296,
0.297141203703704,
0.474976851851852,
0.150625000000000,
0.221759259259259,
0.165706018518519,
0.687083333333333,
0.402210648148148,
0.373993055555556,
0.411678240740741,
0.529895833333333,
0.473599537037037,
0.413530092592593,
0.442511574074074)
subdata <- data[data$practiced == "practiced" & data$target == "pos",]
GAsub <- subdata[substr(subdata$subject, 1, 2) == "GA",]
GBsub <- subdata[substr(subdata$subject, 1, 2) == "GB",]
diffsub <- cbind(GAsub, GBsub$accu, "accu.diff" = (GBsub$accu - GAsub$accu), "learn" = rep(learning.amount, each=length(ROInames)))
#diffsub.Core <- diffsub[diffsub$roi == 1,]
#diffsub.DMsub <- diffsub[diffsub$roi == 2,]
#diffsub.MTLsub <- diffsub[diffsub$roi == 3,]
for (i in 1:length(ROInames)){
cor.value = cor(diffsub$learn[diffsub$roi == i], diffsub$accu.diff[diffsub$roi == i])
grob = grobTree(textGrob(paste0("r = ",round(cor.value, 2)), x=0.05, y=0.90, hjust=0), gp=gpar(fontsize=20, fonface="Bold"))
c <- ggplot(data = diffsub[diffsub$roi == i,], aes(x=accu.diff, y=learn)) +
scale_y_continuous(name = "reward difference (GB-GA)") +
scale_x_continuous(name = paste0(ROInames[i]," (prac_late-early)")) +
#geom_text(x=0.05, y=0.7, label = paste0("r = ",round(cor.value, 2))) +
geom_point() + # Use hollow circles
geom_smooth(method=lm) + # Add linear regression line
annotation_custom(grob)
# Plot
assign(paste0("c", i), c)
}
#png(paste0("/Users/jisu/Documents//Glove/20190906_DMN_LDA_corr_clusters.png"), width = 2000, height = 2000, res=220)
#multiplot(p1, p6, p16,
# c1, c6, c16)
#dev.off()
png(paste0("/Users/jisu/Documents/Glove/20190911_FAN_VISUAL_LDA.png"), width = 7000/6 * 4, height = 11500/18 * 13, res=220)
multiplot(
p1, p2, p3, p4, p5, p6, p7, p8, p9, p10, p11, p12, p13,
c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, c13,
p14, p15, p16, p17, p18, p19, p20, p21, p22, p23, p24, p25, p26,
c14, c15, c16, c17, c18, c19, c20, c21, c22, c23, c24, c25, c26
)
dev.off()
for (i in 1:54){
print(paste0("c",i))
}
|
ca9a35903802ae6ed0c1971328677bbe5abc5f53
|
bc313a5325487f27b6f39339f4f555301ec4b2c4
|
/generic_sim_fns/store_results.R
|
27db3b374e9a5d8bd777fa9e5e845fb902c3adb5
|
[] |
no_license
|
dill/ridgeds
|
cf58a81b996ad6d4595112b53129af0e89e0456b
|
d0afc2af431697056535a8e8cd8e0a7356bfb971
|
refs/heads/master
| 2020-12-31T06:22:57.989643
| 2014-06-27T15:59:51
| 2014-06-27T15:59:51
| 19,791,801
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,475
|
r
|
store_results.R
|
# shortcut function to store results
store_results <- function(mod, pop.size, sim, corr=0, Ncov){
# mod - fitted model object (call was wrapped in a try())
# model.name - string to identify model
# pop.size - population size
# sim - simulation id
# dummy
p <- NA
varp <- NA
cov1 <- NA
cov2 <- NA
b0 <- NA
aic <- NA
n <- NA
# other variables
other.vars <- c(sim, pop.size, mod$model.name, corr,Ncov)
# if fitting went okay, then store some values
if(!is.null(mod)){
if(class(mod)!="try-error"){
if(!is.null(mod$ddf)){
# average p is n/Nhat
p <- summary(mod$ddf)$average.p
# variance of average p and Nhat
varp <- summary(mod$ddf)$average.p.se
varN <- summary(mod$ddf)$Nhat.se
# pull out parameters
b0 <- mod$ddf$par[1]
if(!is.null(mod$ddf$par["cov1"])){
cov1 <- mod$ddf$par["cov1"]
}
if(!is.null(mod$ddf$par["cov2"])){
cov2 <- mod$ddf$par["cov2"]
}
aic <- mod$ddf$criterion
n <- summary(mod$ddf)$n
}
}
}
res <- rbind(
c(other.vars, "p", p),
c(other.vars, "n", n),
c(other.vars, "b0", b0),
c(other.vars, "cov1", cov1),
c(other.vars, "cov2", cov2),
c(other.vars, "aic", aic),
c(other.vars, "varp", varp),
c(other.vars, "varN", varN))
return(res)
}
|
4a501f290d01c9c5b07e7e1d68dd9abebf1700e1
|
a4c6332c239ea98e1e5cef105ba2c3befcfcf310
|
/WangzhenTools/man/WangzhenTools.Rd
|
9cd282e4137f7bbfd4da2b19ff8eaddf0dcce278
|
[] |
no_license
|
maxx0290/myfirstpackage
|
8f75f15159e15c7be70393b955349a4d1ece1cb4
|
168b76faedd741f89e3c7b7ab66b8a2f0f28d5c9
|
refs/heads/master
| 2021-04-28T02:27:08.931955
| 2018-03-10T01:22:58
| 2018-03-10T01:22:58
| 122,114,159
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 247
|
rd
|
WangzhenTools.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/WangzhenTools-package.r
\docType{package}
\name{WangzhenTools}
\alias{WangzhenTools}
\alias{WangzhenTools-package}
\title{WangzhenTools.}
\description{
WangzhenTools.
}
|
31993c30230dadd7a7e2189f16f67d403687a85d
|
f0563cf527cb1374ff9d3ae3e0f4c3aa0fa137a8
|
/man/readIDATs.Rd
|
d4da5b4fe7758826982fa0e1b086517e579a453a
|
[] |
no_license
|
yilinxu/sesame
|
5cd6e14a4c7ed85dd8676a44895f5ff8b35a2575
|
1a098702a9e91b5cb43fc61bfc4a9a09b71ba67d
|
refs/heads/master
| 2020-03-10T20:41:23.047311
| 2018-04-10T05:02:25
| 2018-04-10T05:02:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,146
|
rd
|
readIDATs.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sesame.R
\name{readIDATs}
\alias{readIDATs}
\title{Import IDATs from a list of samples}
\usage{
readIDATs(sample.names, base.dir = NULL, raw = FALSE, num.processes = 1)
}
\arguments{
\item{sample.names}{a sample list}
\item{base.dir}{base directory}
\item{raw}{to return raw data without mapping to signal}
\item{num.processes}{number of parallele processes, serial if 1}
}
\value{
a list of \code{SigSet}s or a list of matrices if `raw=TRUE`
}
\description{
If `raw = FALSE` (default), a list of \code{SigSet}s are returned. Each
\code{SigSet} contains all the signal measurement on the platform. If
`raw = TRUE`, a list is returned with each element of the returned list
containing a matrix having signal intensity indexed by chip address.
}
\details{
Sample.names is a vector of common prefixes between the *_Grn.idat and
*_Red.idat. `num.processes` controls the number of parallel workers. It
is default to 1 which means serial.
}
\examples{
ssets <- readIDATs(sub('_Grn.idat','',system.file(
"extdata", "4207113116_A_Grn.idat", package = "sesameData")))
}
|
f651b036a247a268010fd8a246e7901a5858d1bc
|
64257a0e57cf928b0ae7676a108a3688001181bd
|
/man/count_events.Rd
|
0759e8bf18c77a2a147d073fc885fb3688cec1a9
|
[
"BSD-3-Clause"
] |
permissive
|
marcpaterno/artsupport
|
9842a678c8070468dd93a258810b84067fe22f32
|
803310561741c4aa54bdd44e393da9ae8551bfa0
|
refs/heads/master
| 2020-06-30T06:49:04.780687
| 2020-04-20T23:14:48
| 2020-04-20T23:14:48
| 74,387,093
| 0
| 1
|
NOASSERTION
| 2019-02-07T07:11:33
| 2016-11-21T17:15:33
|
R
|
UTF-8
|
R
| false
| true
| 558
|
rd
|
count_events.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/public_utils.R
\name{count_events}
\alias{count_events}
\title{Count the distinct events in a data.frame.}
\usage{
count_events(data)
}
\arguments{
\item{data}{a data.frame}
}
\value{
the number of distinct events represented in the data.frame
}
\description{
This function expects the data.frame to contain columns named 'Run',
'SubRun', and 'Event', carrying the appropriate integer values. Events with
different labels (if the data.frame is labeled) are counted as distinct.
}
|
59cf948b394b3a730a9d3d1031e59c8caa082100
|
5f754675e68444db1fa7f077eff5dc6ee622a0be
|
/man/hcrConstantCatch.Rd
|
743d2944f5cb0253018c515a1e856b02e70d618d
|
[] |
no_license
|
flr/mydas
|
893c3e8f93957109377329682fc99eee4518cbc0
|
726126972e5bca4d6a99ba774f05ea63d60f7b87
|
refs/heads/master
| 2023-06-22T22:43:10.246612
| 2023-06-16T05:07:31
| 2023-06-16T05:07:31
| 221,472,392
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 587
|
rd
|
hcrConstantCatch.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hcrConstantCatch.R
\docType{methods}
\name{hcrConstantCatch}
\alias{hcrConstantCatch}
\title{hcrConstantCatch}
\usage{
hcrConstantCatch(yrs, catch, ...)
}
\arguments{
\item{yrs}{\code{numeric} target years to take catch over}
\item{catch}{an \code{FLQuant} with catch to average over by iter}
\item{...}{any additional arguments}
}
\description{
Sets a constant catch
}
\details{
hcrConstantCatch
}
\examples{
\dontrun{
data(pl4)
hcrConstantCatch(2010:2012,catch(ple4))
}
}
\author{
Laurence Kell, Sea++
}
|
7ea956ac2ce6598e4a72f60c71c8e3f7d24b1580
|
836a779c4e3405bb6ffe79117964b6b516cb3bc2
|
/R/632.BayesFactors_x.R
|
a8b4d8059c43a98f5fbb9afe9df0e681d2b7c240
|
[] |
no_license
|
RajeswaranV/proportion
|
657fe69c4ad62c00ee95077d40683d4452d42c66
|
c0e0a60d43113004c0366c71a2b2dac36bf1fc86
|
refs/heads/master
| 2022-06-21T13:51:47.915249
| 2022-06-11T06:31:18
| 2022-06-11T06:31:18
| 46,557,957
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 26,804
|
r
|
632.BayesFactors_x.R
|
#' Bayesain Hypothesis testing given x: Hypothesis 1: Theta = Theta0 Vs Theta <> Theta0
#' @param x - Number of success
#' @param n - Number of trials from data
#' @param th0 - Hypothetical parameter for H0
#' @param a1 - Priors for hypothesis H1
#' @param b1 - Priors for hypothesis H1
#' @details Computes Bayes factor under Beta-Binomial model for the
#' model: \eqn{p = p0} Vs \eqn{p \ne p0}{p not equal to p0} from the given number of
#' trials \code{n} and and for given number
#' of successes \eqn{x = 0, 1, 2......n }
#' We use the following guideline for reporting the results:
#' \itemize{
#' \item 1/3 <= BaFa01 < 1: Evidence against H0 is not worth more than a bare mention.
#' \item 1/20 <= BaFa01 < 1/3: Evidence against H0 is positive.
#' \item 1/150 <= BaFa01 < 1/20: Evidence against H0 is strong.
#' \item BaFa10 < 1/150: Evidence against H0 is very strong.
#' \item 1 <= BaFa01 < 3: Evidence against H1 is not worth more than a bare mention.
#' \item 3 <= BaFa01 < 20: Evidence against H1 is positive.
#' \item 20 <= BaFa01 < 150: Evidence against H1 is strong.
#' \item 150 <= BaFa01: Evidence against H1 is very strong.}
#' @return A dataframe with
#' \item{x}{ Number of successes}
#' \item{BaFa01}{ Bayesian Factor}
#' @family Hypothesis testing
#' @examples
#' x=682; n=925; th0=0.75; a1=3; b1=3
#' hypotestBAF1x(x,n,th0,a1,b1)
#' @references
#' [1] 2006 Ghosh M, Delampady M and Samanta T.
#' An introduction to Bayesian analysis: Theory and Methods.
#' Springer, New York
#'
#' [2] 2014 Sakthivel S, Subbiah M and Ramakrishnan R
#' Default prior approach for Bayesian testing of hypotheses involving single binomial proportion
#' International Journal of Statistics and Analysis, 4 (2), 139 - 153
#' @export
#####Hypothesis 1: Theta = Theta0 Vs Theta <> Theta0
hypotestBAF1x<-function(x,n,th0,a1,b1)
{
if (missing(x)) stop("'x' is missing")
if (missing(n)) stop("'n' is missing")
if (missing(th0)) stop("'th0' is missing")
if (missing(a1)) stop("'a1' is missing")
if (missing(b1)) stop("'b1' is missing")
if ((class(x) != "integer") & (class(x) != "numeric") || length(x) >1|| x>n || x<0 ) stop("'x' has to be between 0 and n")
if ((class(n) != "integer") & (class(n) != "numeric") || length(n) >1|| n<0 ) stop("'n' has to be greater or equal to 0")
if ((class(th0) != "integer") & (class(th0) != "numeric") || length(th0) >1|| th0>1 || th0<=0 ) stop("'th0' has to be between 0 and 1")
if ((class(a1) != "integer") & (class(a1) != "numeric") || length(a1) >1|| a1<=0 ) stop("'a1' has to be greater than 0")
if ((class(b1) != "integer") & (class(b1) != "numeric") || length(b1) >1|| b1<=0 ) stop("'b1' has to be greater than 0")
BaFa01=(beta(a1,b1)/beta(x+a1,n-x+b1))*(th0^x)*((1-th0)^(n-x))
rdf=data.frame(x,BaFa01)
ndf1=subset(rdf,(BaFa01<3 & BaFa01 >= 1))
ndf2=subset(rdf,(BaFa01<20 & BaFa01 >= 3))
ndf3=subset(rdf,(BaFa01<150 & BaFa01 >= 20))
ndf4=subset(rdf,(BaFa01>=150))
ndf5=subset(rdf,(BaFa01<1 & BaFa01 >= 1/3))
ndf6=subset(rdf,(BaFa01<1/3 & BaFa01 >= 1/20))
ndf7=subset(rdf,(BaFa01<1/20 & BaFa01 >= 1/150))
ndf8=subset(rdf,(BaFa01<1/150))
if(length(ndf1$x)>0){
ndf1$Interpretation="Evidence against H1 is not worth more than a bare mention"}
if(length(ndf2$x)>0){
ndf2$Interpretation="Evidence against H1 is positive"}
if(length(ndf3$x)>0){
ndf3$Interpretation="Evidence against H1 is strong"}
if(length(ndf4$x)>0){
ndf4$Interpretation="Evidence against H1 is very strong"}
if(length(ndf5$x)>0){
ndf5$Interpretation="Evidence against H0 is not worth more than a bare mention"}
if(length(ndf6$x)>0){
ndf6$Interpretation="Evidence against H0 is positive"}
if(length(ndf7$x)>0){
ndf7$Interpretation="Evidence against H0 is strong"}
if(length(ndf8$x)>0){
ndf8$Interpretation="Evidence against H0 is very strong"}
cbdf=rbind(ndf1,ndf2,ndf3,ndf4,ndf5,ndf6,ndf7,ndf8)
ndf=cbdf[order(cbdf$x),]
row.names(ndf)<-NULL
return(ndf)
}
######################################################################################################
#' Bayesain Hypothesis testing given x: Hypothesis 2: Theta = Theta0 Vs Theta > Theta0
#' @param x - Number of success
#' @param n - Number of trials from data
#' @param th0 - Hypothetical parameter for H0
#' @param a1 - Priors for hypothesis H1
#' @param b1 - Priors for hypothesis H1
#' @details Computes Bayes factor under Beta-Binomial model for the
#' model: \eqn{p = p0} Vs \eqn{p > p0} from the given number of
#' trials \code{n} and and for given number
#' of successes \eqn{x = 0, 1, 2......n }
#' We use the following guideline for reporting the results:
#' \itemize{
#' \item 1/3 <= BaFa01 < 1: Evidence against H0 is not worth more than a bare mention.
#' \item 1/20 <= BaFa01 < 1/3: Evidence against H0 is positive.
#' \item 1/150 <= BaFa01 < 1/20: Evidence against H0 is strong.
#' \item BaFa10 < 1/150: Evidence against H0 is very strong.
#' \item 1 <= BaFa01 < 3: Evidence against H1 is not worth more than a bare mention.
#' \item 3 <= BaFa01 < 20: Evidence against H1 is positive.
#' \item 20 <= BaFa01 < 150: Evidence against H1 is strong.
#' \item 150 <= BaFa01: Evidence against H1 is very strong.}
#' @return A dataframe with
#' \item{x}{ Number of successes}
#' \item{BaFa01}{ Bayesian Factor}
#' @family Hypothesis testing
#' @examples
#' x=682; n=925; th0=0.75; a1=3; b1=3
#' hypotestBAF2x(x,n,th0,a1,b1)
#' @references
#' [1] 2006 Ghosh M, Delampady M and Samanta T.
#' An introduction to Bayesian analysis: Theory and Methods.
#' Springer, New York
#'
#' [2] 2014 Sakthivel S, Subbiah M and Ramakrishnan R
#' Default prior approach for Bayesian testing of hypotheses involving single binomial proportion
#' International Journal of Statistics and Analysis, 4 (2), 139 - 153
#' @export
#####Hypothesis 1: Theta = Theta0 Vs Theta <> Theta0
hypotestBAF2x<-function(x,n,th0,a1,b1)
{
if (missing(x)) stop("'x' is missing")
if (missing(n)) stop("'n' is missing")
if (missing(th0)) stop("'th0' is missing")
if (missing(a1)) stop("'a1' is missing")
if (missing(b1)) stop("'b1' is missing")
if ((class(x) != "integer") & (class(x) != "numeric") || length(x) >1|| x>n || x<0 ) stop("'x' has to be between 0 and n")
if ((class(n) != "integer") & (class(n) != "numeric") || length(n) >1|| n<0 ) stop("'n' has to be greater or equal to 0")
if ((class(th0) != "integer") & (class(th0) != "numeric") || length(th0) >1|| th0>1 || th0<=0 ) stop("'th0' has to be between 0 and 1")
if ((class(a1) != "integer") & (class(a1) != "numeric") || length(a1) >1|| a1<=0 ) stop("'a1' has to be greater than 0")
if ((class(b1) != "integer") & (class(b1) != "numeric") || length(b1) >1|| b1<=0 ) stop("'b1' has to be greater than 0")
bet1=function(p) stats::dbeta(p,shape1=a1,shape2=b1)
bet2=function(p) stats::dbeta(p,shape1=x+a1,shape2=n-x+b1)
t1=stats::integrate(bet1,th0,1)$value
t2=stats::integrate(bet2,th0,1)$value
BaFa01=(t1/t2)*(th0^x)*((1-th0)^(n-x))
rdf=data.frame(x,BaFa01)
ndf1=subset(rdf,(BaFa01<3 & BaFa01 >= 1))
ndf2=subset(rdf,(BaFa01<20 & BaFa01 >= 3))
ndf3=subset(rdf,(BaFa01<150 & BaFa01 >= 20))
ndf4=subset(rdf,(BaFa01>=150))
ndf5=subset(rdf,(BaFa01<1 & BaFa01 >= 1/3))
ndf6=subset(rdf,(BaFa01<1/3 & BaFa01 >= 1/20))
ndf7=subset(rdf,(BaFa01<1/20 & BaFa01 >= 1/150))
ndf8=subset(rdf,(BaFa01<1/150))
if(length(ndf1$x)>0){
ndf1$Interpretation="Evidence against H1 is not worth more than a bare mention"}
if(length(ndf2$x)>0){
ndf2$Interpretation="Evidence against H1 is positive"}
if(length(ndf3$x)>0){
ndf3$Interpretation="Evidence against H1 is strong"}
if(length(ndf4$x)>0){
ndf4$Interpretation="Evidence against H1 is very strong"}
if(length(ndf5$x)>0){
ndf5$Interpretation="Evidence against H0 is not worth more than a bare mention"}
if(length(ndf6$x)>0){
ndf6$Interpretation="Evidence against H0 is positive"}
if(length(ndf7$x)>0){
ndf7$Interpretation="Evidence against H0 is strong"}
if(length(ndf8$x)>0){
ndf8$Interpretation="Evidence against H0 is very strong"}
cbdf=rbind(ndf1,ndf2,ndf3,ndf4,ndf5,ndf6,ndf7,ndf8)
ndf=cbdf[order(cbdf$x),]
row.names(ndf)<-NULL
return(ndf)
}
######################################################################################################
#' Bayesain Hypothesis testing given x: Hypothesis 3: Theta = Theta0 Vs Theta < Theta0
#' @param x - Number of success
#' @param n - Number of trials from data
#' @param th0 - Hypothetical parameter for H0
#' @param a1 - Priors for hypothesis H1
#' @param b1 - Priors for hypothesis H1
#' @details Computes Bayes factor under Beta-Binomial model for the
#' model: \eqn{p = p0} Vs \eqn{p < p0} from the given number of
#' trials \code{n} and and for given number
#' of successes \eqn{x = 0, 1, 2......n }
#' We use the following guideline for reporting the results:
#' \itemize{
#' \item 1/3 <= BaFa01 < 1: Evidence against H0 is not worth more than a bare mention.
#' \item 1/20 <= BaFa01 < 1/3: Evidence against H0 is positive.
#' \item 1/150 <= BaFa01 < 1/20: Evidence against H0 is strong.
#' \item BaFa10 < 1/150: Evidence against H0 is very strong.
#' \item 1 <= BaFa01 < 3: Evidence against H1 is not worth more than a bare mention.
#' \item 3 <= BaFa01 < 20: Evidence against H1 is positive.
#' \item 20 <= BaFa01 < 150: Evidence against H1 is strong.
#' \item 150 <= BaFa01: Evidence against H1 is very strong.}
#' @return A dataframe with
#' \item{x}{ Number of successes}
#' \item{BaFa01}{ Bayesian Factor}
#' @family Hypothesis testing
#' @examples
#' x=682; n=925; th0=0.75; a1=3; b1=3
#' hypotestBAF3x(x,n,th0,a1,b1)
#' @references
#' [1] 2006 Ghosh M, Delampady M and Samanta T.
#' An introduction to Bayesian analysis: Theory and Methods.
#' Springer, New York
#'
#' [2] 2014 Sakthivel S, Subbiah M and Ramakrishnan R
#' Default prior approach for Bayesian testing of hypotheses involving single binomial proportion
#' International Journal of Statistics and Analysis, 4 (2), 139 - 153
#' @export
#####Hypothesis 3: Theta = Theta0 Vs Theta < Theta0
hypotestBAF3x<-function(x,n,th0,a1,b1)
{
if (missing(x)) stop("'x' is missing")
if (missing(n)) stop("'n' is missing")
if (missing(th0)) stop("'th0' is missing")
if (missing(a1)) stop("'a1' is missing")
if (missing(b1)) stop("'b1' is missing")
if ((class(x) != "integer") & (class(x) != "numeric") || length(x) >1|| x>n || x<0 ) stop("'x' has to be between 0 and n")
if ((class(n) != "integer") & (class(n) != "numeric") || length(n) >1|| n<0 ) stop("'n' has to be greater or equal to 0")
if ((class(th0) != "integer") & (class(th0) != "numeric") || length(th0) >1|| th0>1 || th0<=0 ) stop("'th0' has to be between 0 and 1")
if ((class(a1) != "integer") & (class(a1) != "numeric") || length(a1) >1|| a1<=0 ) stop("'a1' has to be greater than 0")
if ((class(b1) != "integer") & (class(b1) != "numeric") || length(b1) >1|| b1<=0 ) stop("'b1' has to be greater than 0")
bet1=function(p) stats::dbeta(p,shape1=a1,shape2=b1)
bet2=function(p) stats::dbeta(p,shape1=x+a1,shape2=n-x+b1)
t1=stats::integrate(bet1,0,th0)$value
t2=stats::integrate(bet2,0,th0)$value
BaFa01=(t1/t2)*(th0^x)*((1-th0)^(n-x))
rdf=data.frame(x,BaFa01)
ndf1=subset(rdf,(BaFa01<3 & BaFa01 >= 1))
ndf2=subset(rdf,(BaFa01<20 & BaFa01 >= 3))
ndf3=subset(rdf,(BaFa01<150 & BaFa01 >= 20))
ndf4=subset(rdf,(BaFa01>=150))
ndf5=subset(rdf,(BaFa01<1 & BaFa01 >= 1/3))
ndf6=subset(rdf,(BaFa01<1/3 & BaFa01 >= 1/20))
ndf7=subset(rdf,(BaFa01<1/20 & BaFa01 >= 1/150))
ndf8=subset(rdf,(BaFa01<1/150))
if(length(ndf1$x)>0){
ndf1$Interpretation="Evidence against H1 is not worth more than a bare mention"}
if(length(ndf2$x)>0){
ndf2$Interpretation="Evidence against H1 is positive"}
if(length(ndf3$x)>0){
ndf3$Interpretation="Evidence against H1 is strong"}
if(length(ndf4$x)>0){
ndf4$Interpretation="Evidence against H1 is very strong"}
if(length(ndf5$x)>0){
ndf5$Interpretation="Evidence against H0 is not worth more than a bare mention"}
if(length(ndf6$x)>0){
ndf6$Interpretation="Evidence against H0 is positive"}
if(length(ndf7$x)>0){
ndf7$Interpretation="Evidence against H0 is strong"}
if(length(ndf8$x)>0){
ndf8$Interpretation="Evidence against H0 is very strong"}
cbdf=rbind(ndf1,ndf2,ndf3,ndf4,ndf5,ndf6,ndf7,ndf8)
ndf=cbdf[order(cbdf$x),]
row.names(ndf)<-NULL
return(ndf)
}
######################################################################################################
#' Bayesain Hypothesis testing given x: Hypothesis 4: Theta <= Theta0 Vs Theta > Theta0
#' @param x - Number of success
#' @param n - Number of trials from data
#' @param th0 - Hypothetical parameter for H0
#' @param a0 - Priors for hypothesis H0
#' @param b0 - Priors for hypothesis H0
#' @param a1 - Priors for hypothesis H1
#' @param b1 - Priors for hypothesis H1
#' @details Computes Bayes factor under Beta-Binomial model for the
#' model: \eqn{p <= p0} Vs \eqn{p > p0} from the given number of trials \code{n} and
#' for given number
#' of successes \eqn{x = 0, 1, 2......n }
#' We use the following guideline for reporting the results:
#' \itemize{
#' \item 1/3 <= BaFa01 < 1: Evidence against H0 is not worth more than a bare mention.
#' \item 1/20 <= BaFa01 < 1/3: Evidence against H0 is positive.
#' \item 1/150 <= BaFa01 < 1/20: Evidence against H0 is strong.
#' \item BaFa10 < 1/150: Evidence against H0 is very strong.
#' \item 1 <= BaFa01 < 3: Evidence against H1 is not worth more than a bare mention.
#' \item 3 <= BaFa01 < 20: Evidence against H1 is positive.
#' \item 20 <= BaFa01 < 150: Evidence against H1 is strong.
#' \item 150 <= BaFa01: Evidence against H1 is very strong.}
#' @return A dataframe with
#' \item{x}{ Number of successes}
#' \item{BaFa01}{ Bayesian Factor}
#' @family Hypothesis testing
#' @examples
#' x=682; n=925; th0=0.75;a0=0.5; b0=0.5; a1=3; b1=3
#' hypotestBAF4x(x,n,th0,a0,b0,a1,b1)
#' @references
#' [1] 2006 Ghosh M, Delampady M and Samanta T.
#' An introduction to Bayesian analysis: Theory and Methods.
#' Springer, New York
#'
#' [2] 2014 Sakthivel S, Subbiah M and Ramakrishnan R
#' Default prior approach for Bayesian testing of hypotheses involving single binomial proportion
#' International Journal of Statistics and Analysis, 4 (2), 139 - 153
#' @export
#####Hypothesis 4: Theta <= Theta0 Vs Theta > Theta0
hypotestBAF4x<-function(x,n,th0,a0,b0,a1,b1)
{
if (missing(x)) stop("'x' is missing")
if (missing(n)) stop("'n' is missing")
if (missing(th0)) stop("'th0' is missing")
if (missing(a0)) stop("'a0' is missing")
if (missing(b0)) stop("'b0' is missing")
if (missing(a1)) stop("'a1' is missing")
if (missing(b1)) stop("'b1' is missing")
if ((class(x) != "integer") & (class(x) != "numeric") || length(x) >1|| x>n || x<0) stop("'x' has to be between 0 and n")
if ((class(n) != "integer") & (class(n) != "numeric") || length(n) >1|| n<0 ) stop("'n' has to be greater or equal to 0")
if ((class(th0) != "integer") & (class(th0) != "numeric") || length(th0) >1|| th0>1 || th0<=0 ) stop("'th0' has to be between 0 and 1")
if ((class(a0) != "integer") & (class(a0) != "numeric") || length(a0) >1|| a0<=0 ) stop("'a0' has to be greater than 0")
if ((class(b0) != "integer") & (class(b0) != "numeric") || length(b0) >1|| b0<=0 ) stop("'b0' has to be greater than 0")
if ((class(a1) != "integer") & (class(a1) != "numeric") || length(a1) >1|| a1<=0 ) stop("'a1' has to be greater than 0")
if ((class(b1) != "integer") & (class(b1) != "numeric") || length(b1) >1|| b1<=0 ) stop("'b1' has to be greater than 0")
bet0=function(p) stats::dbeta(p,shape1=a0,shape2=b0)
bet01=function(p) stats::dbeta(p,shape1=x+a0,shape2=n-x+b0) #Null Posterior based
bet1=function(p) stats::dbeta(p,shape1=a1,shape2=b1)
bet11=function(p) stats::dbeta(p,shape1=x+a1,shape2=n-x+b1) #Alternate Posterior based
t0=stats::integrate(bet0,0,th0)$value
t1=stats::integrate(bet1,th0,1)$value
t01=stats::integrate(bet01,0,th0)$value
t11=stats::integrate(bet11,th0,1)$value
BaFa01=t01*t1/(t0*t11)
rdf=data.frame(x,BaFa01)
ndf1=subset(rdf,(BaFa01<3 & BaFa01 >= 1))
ndf2=subset(rdf,(BaFa01<20 & BaFa01 >= 3))
ndf3=subset(rdf,(BaFa01<150 & BaFa01 >= 20))
ndf4=subset(rdf,(BaFa01>=150))
ndf5=subset(rdf,(BaFa01<1 & BaFa01 >= 1/3))
ndf6=subset(rdf,(BaFa01<1/3 & BaFa01 >= 1/20))
ndf7=subset(rdf,(BaFa01<1/20 & BaFa01 >= 1/150))
ndf8=subset(rdf,(BaFa01<1/150))
if(length(ndf1$x)>0){
ndf1$Interpretation="Evidence against H1 is not worth more than a bare mention"}
if(length(ndf2$x)>0){
ndf2$Interpretation="Evidence against H1 is positive"}
if(length(ndf3$x)>0){
ndf3$Interpretation="Evidence against H1 is strong"}
if(length(ndf4$x)>0){
ndf4$Interpretation="Evidence against H1 is very strong"}
if(length(ndf5$x)>0){
ndf5$Interpretation="Evidence against H0 is not worth more than a bare mention"}
if(length(ndf6$x)>0){
ndf6$Interpretation="Evidence against H0 is positive"}
if(length(ndf7$x)>0){
ndf7$Interpretation="Evidence against H0 is strong"}
if(length(ndf8$x)>0){
ndf8$Interpretation="Evidence against H0 is very strong"}
cbdf=rbind(ndf1,ndf2,ndf3,ndf4,ndf5,ndf6,ndf7,ndf8)
ndf=cbdf[order(cbdf$x),]
row.names(ndf)<-NULL
return(ndf)
}
######################################################################################################
#' Bayesain Hypothesis testing given x: Hypothesis 5: Theta >= Theta0 Vs Theta < Theta0
#' @param x - Number of success
#' @param n - Number of trials from data
#' @param th0 - Hypothetical parameter for H0
#' @param a0 - Priors for hypothesis H0
#' @param b0 - Priors for hypothesis H0
#' @param a1 - Priors for hypothesis H1
#' @param b1 - Priors for hypothesis H1
#' @details Computes Bayes factor under Beta-Binomial model for the
#' model: \eqn{p = p0} Vs \eqn{p \ne p0}{p not equal to p0} from the given number of
#' trials \code{n} and and for given number
#' of successes \eqn{x = 0, 1, 2......n }
#' We use the following guideline for reporting the results:
#' \itemize{
#' \item 1/3 <= BaFa01 < 1: Evidence against H0 is not worth more than a bare mention.
#' \item 1/20 <= BaFa01 < 1/3: Evidence against H0 is positive.
#' \item 1/150 <= BaFa01 < 1/20: Evidence against H0 is strong.
#' \item BaFa10 < 1/150: Evidence against H0 is very strong.
#' \item 1 <= BaFa01 < 3: Evidence against H1 is not worth more than a bare mention.
#' \item 3 <= BaFa01 < 20: Evidence against H1 is positive.
#' \item 20 <= BaFa01 < 150: Evidence against H1 is strong.
#' \item 150 <= BaFa01: Evidence against H1 is very strong.}
#' @return A dataframe with
#' \item{x}{ Number of successes}
#' \item{BaFa01}{ Bayesian Factor}
#' @family Hypothesis testing
#' @examples
#' x=682; n=925; th0=0.75; a0=0.5; b0= 0.5;a1=1; b1=1
#' hypotestBAF5x(x,n,th0,a0,b0,a1,b1)
#' @references
#' [1] 2006 Ghosh M, Delampady M and Samanta T.
#' An introduction to Bayesian analysis: Theory and Methods.
#' Springer, New York
#'
#' [2] 2014 Sakthivel S, Subbiah M and Ramakrishnan R
#' Default prior approach for Bayesian testing of hypotheses involving single binomial proportion
#' International Journal of Statistics and Analysis, 4 (2), 139 - 153
#' @export
#####Hypothesis 1: Theta = Theta0 Vs Theta <> Theta0
hypotestBAF5x<-function(x,n,th0,a0,b0,a1,b1)
{
if (missing(x)) stop("'x' is missing")
if (missing(th0)) stop("'th0' is missing")
if (missing(a0)) stop("'a0' is missing")
if (missing(b0)) stop("'b0' is missing")
if (missing(a1)) stop("'a1' is missing")
if (missing(b1)) stop("'b1' is missing")
if ((class(x) != "integer") & (class(x) != "numeric") || length(x) >1||x>n || x<0) stop("'x' has to be between 0 and n")
if ((class(n) != "integer") & (class(n) != "numeric") || length(n) >1|| n<0 ) stop("'n' has to be greater or equal to 0")
if ((class(th0) != "integer") & (class(th0) != "numeric") || length(th0) >1|| th0>1 || th0<=0 ) stop("'th0' has to be between 0 and 1")
if ((class(a0) != "integer") & (class(a0) != "numeric") || length(a0) >1|| a0<=0 ) stop("'a0' has to be greater than 0")
if ((class(b0) != "integer") & (class(b0) != "numeric") || length(b0) >1|| b0<=0 ) stop("'b0' has to be greater than 0")
if ((class(a1) != "integer") & (class(a1) != "numeric") || length(a1) >1|| a1<=0 ) stop("'a1' has to be greater than 0")
if ((class(b1) != "integer") & (class(b1) != "numeric") || length(b1) >1|| b1<=0 ) stop("'b1' has to be greater than 0")
bet0=function(p) stats::dbeta(p,shape1=a0,shape2=b0)
bet01=function(p) stats::dbeta(p,shape1=x+a0,shape2=n-x+b0) #Null Posterior based
bet1=function(p) stats::dbeta(p,shape1=a1,shape2=b1)
bet11=function(p) stats::dbeta(p,shape1=x+a1,shape2=n-x+b1) #Alternate Posterior based
t0=stats::integrate(bet0,th0,1)$value
t1=stats::integrate(bet1,0,th0)$value
t01=stats::integrate(bet01,th0,1)$value
t11=stats::integrate(bet11,0,th0)$value
BaFa01=t01*t1/(t0*t11)
rdf=data.frame(x,BaFa01)
ndf1=subset(rdf,(BaFa01<3 & BaFa01 >= 1))
ndf2=subset(rdf,(BaFa01<20 & BaFa01 >= 3))
ndf3=subset(rdf,(BaFa01<150 & BaFa01 >= 20))
ndf4=subset(rdf,(BaFa01>=150))
ndf5=subset(rdf,(BaFa01<1 & BaFa01 >= 1/3))
ndf6=subset(rdf,(BaFa01<1/3 & BaFa01 >= 1/20))
ndf7=subset(rdf,(BaFa01<1/20 & BaFa01 >= 1/150))
ndf8=subset(rdf,(BaFa01<1/150))
if(length(ndf1$x)>0){
ndf1$Interpretation="Evidence against H1 is not worth more than a bare mention"}
if(length(ndf2$x)>0){
ndf2$Interpretation="Evidence against H1 is positive"}
if(length(ndf3$x)>0){
ndf3$Interpretation="Evidence against H1 is strong"}
if(length(ndf4$x)>0){
ndf4$Interpretation="Evidence against H1 is very strong"}
if(length(ndf5$x)>0){
ndf5$Interpretation="Evidence against H0 is not worth more than a bare mention"}
if(length(ndf6$x)>0){
ndf6$Interpretation="Evidence against H0 is positive"}
if(length(ndf7$x)>0){
ndf7$Interpretation="Evidence against H0 is strong"}
if(length(ndf8$x)>0){
ndf8$Interpretation="Evidence against H0 is very strong"}
cbdf=rbind(ndf1,ndf2,ndf3,ndf4,ndf5,ndf6,ndf7,ndf8)
ndf=cbdf[order(cbdf$x),]
row.names(ndf)<-NULL
return(ndf)
}
######################################################################################################
#' Bayesain Hypothesis testing given x: Hypothesis 6: Theta < Theta1 Vs Theta > Theta2
#' @param x - Number of success
#' @param n - Number of trials from data
#' @param th1 - Hypothetical parameter for H1
#' @param a1 - Priors for hypothesis H1
#' @param b1 - Priors for hypothesis H1
#' @param th2 - Hypothetical parameter for H2
#' @param a2 - Priors for hypothesis H2
#' @param b2 - Priors for hypothesis H2
#' @details Computes Bayes factor under Beta-Binomial model for the
#' model: \eqn{p < p1} Vs \eqn{p > p2} from the given number of
#' trials \code{n} and and for given number
#' of successes \eqn{x = 0, 1, 2......n }
#' We use the following guideline for reporting the results:
#' \itemize{
#' \item 1/3 <= BaFa01 < 1: Evidence against H0 is not worth more than a bare mention.
#' \item 1/20 <= BaFa01 < 1/3: Evidence against H0 is positive.
#' \item 1/150 <= BaFa01 < 1/20: Evidence against H0 is strong.
#' \item BaFa10 < 1/150: Evidence against H0 is very strong.
#' \item 1 <= BaFa01 < 3: Evidence against H1 is not worth more than a bare mention.
#' \item 3 <= BaFa01 < 20: Evidence against H1 is positive.
#' \item 20 <= BaFa01 < 150: Evidence against H1 is strong.
#' \item 150 <= BaFa01: Evidence against H1 is very strong.}
#' @return A dataframe with
#' \item{x}{ Number of successes}
#' \item{BaFa01}{ Bayesian Factor}
#' @family Hypothesis testing
#' @examples
#' x=682; n=925; th1=0.5; a1=1; b1=1; th2=0.9; a2=0.5; b2=0.5
#' hypotestBAF6x(x,n,th1,a1,b1,th2,a2,b2)
#' @references
#' [1] 2006 Ghosh M, Delampady M and Samanta T.
#' An introduction to Bayesian analysis: Theory and Methods.
#' Springer, New York
#'
#' [2] 2014 Sakthivel S, Subbiah M and Ramakrishnan R
#' Default prior approach for Bayesian testing of hypotheses involving single binomial proportion
#' International Journal of Statistics and Analysis, 4 (2), 139 - 153
#' @export
#####Hypothesis 6: Theta < Theta1 Vs Theta > Theta2
hypotestBAF6x<-function(x,n,th1,a1,b1,th2,a2,b2)
{
if (missing(x)) stop("'x' is missing")
if (missing(n)) stop("'n' is missing")
if (missing(th1)) stop("'th1' is missing")
if (missing(a1)) stop("'a1' is missing")
if (missing(b1)) stop("'b1' is missing")
if (missing(th2)) stop("'th2' is missing")
if (missing(a2)) stop("'a2' is missing")
if (missing(b2)) stop("'b2' is missing")
if ((class(x) != "integer") & (class(x) != "numeric") || length(x) >1||x>n || x<0) stop("'x' has to be between 0 and n")
if ((class(n) != "integer") & (class(n) != "numeric") || length(n) >1|| n<0 ) stop("'n' has to be greater or equal to 0")
if ((class(th1) != "integer") & (class(th1) != "numeric") || length(th1) >1|| th1<0 ) stop("'th1' has to be greater than 0")
if ((class(a1) != "integer") & (class(a1) != "numeric") || length(a1) >1|| a1<=0 ) stop("'a1' has to be greater than 0")
if ((class(b1) != "integer") & (class(b1) != "numeric") || length(b1) >1|| b1<=0 ) stop("'b1' has to be greater than 0")
if ((class(th2) != "integer") & (class(th2) != "numeric") || length(th2) >1|| th2<0 ) stop("'th2' has to be greater than 0")
if ((class(a2) != "integer") & (class(a2) != "numeric") || length(a2) >1|| a2<=0 ) stop("'a2' has to be greater than 0")
if ((class(b2) != "integer") & (class(b2) != "numeric") || length(b2) >1|| b2<=0 ) stop("'b2' has to be greater than 0")
#####For Hypothesis 1
bet1=function(p) stats::dbeta(p,shape1=x+a1,shape2=n-x+b1)
#####For Hypothesis 2
bet2=function(p) stats::dbeta(p,shape1=x+a2,shape2=n-x+b2)
t1=stats::integrate(bet1,0,th1)$value
t2=stats::integrate(bet2,th2,1)$value
BaFa01=t1/t2
rdf=data.frame(x,BaFa01)
ndf1=subset(rdf,(BaFa01<3 & BaFa01 >= 1))
ndf2=subset(rdf,(BaFa01<20 & BaFa01 >= 3))
ndf3=subset(rdf,(BaFa01<150 & BaFa01 >= 20))
ndf4=subset(rdf,(BaFa01>=150))
ndf5=subset(rdf,(BaFa01<1 & BaFa01 >= 1/3))
ndf6=subset(rdf,(BaFa01<1/3 & BaFa01 >= 1/20))
ndf7=subset(rdf,(BaFa01<1/20 & BaFa01 >= 1/150))
ndf8=subset(rdf,(BaFa01<1/150))
if(length(ndf1$x)>0){
ndf1$Interpretation="Evidence against H1 is not worth more than a bare mention"}
if(length(ndf2$x)>0){
ndf2$Interpretation="Evidence against H1 is positive"}
if(length(ndf3$x)>0){
ndf3$Interpretation="Evidence against H1 is strong"}
if(length(ndf4$x)>0){
ndf4$Interpretation="Evidence against H1 is very strong"}
if(length(ndf5$x)>0){
ndf5$Interpretation="Evidence against H0 is not worth more than a bare mention"}
if(length(ndf6$x)>0){
ndf6$Interpretation="Evidence against H0 is positive"}
if(length(ndf7$x)>0){
ndf7$Interpretation="Evidence against H0 is strong"}
if(length(ndf8$x)>0){
ndf8$Interpretation="Evidence against H0 is very strong"}
cbdf=rbind(ndf1,ndf2,ndf3,ndf4,ndf5,ndf6,ndf7,ndf8)
ndf=cbdf[order(cbdf$x),]
row.names(ndf)<-NULL
return(ndf)
}
######################################################################################################
|
84f3cbdc1879174bce66bfaff77f009b4e2ee697
|
67f31a9f56d85ede80920358fe40462c2cb710ed
|
/man/scrapeLEI.Rd
|
2460fbead17061a06d6cbb01d127df1bf3503e2a
|
[] |
no_license
|
vh-d/VHtools
|
ff95b01424c210b3451f4ee63d5aaa016e553c2e
|
a7907e8ba370523ca92985fb73f734a3284896b8
|
refs/heads/master
| 2020-04-12T06:25:18.169942
| 2019-04-09T20:09:34
| 2019-04-09T20:09:34
| 60,918,606
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 892
|
rd
|
scrapeLEI.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/leis.R
\name{scrapeLEI}
\alias{scrapeLEI}
\title{Scrape attributes of entitities from lei-lookup.com given list of LEI codes}
\usage{
scrapeLEI(lei_codes, token = "", proxy = paste(getIEProxy(), collapse =
":"), showMissing = F)
}
\arguments{
\item{\code{lei_codes}}{character verctor of LEI codes}
\item{\code{token}}{token for access to lei-lookup.com REST API}
\item{\code{proxy}}{proxy settings}
}
\value{
list of legal entities with all attributes
}
\description{
Scrape attributes of entitities from lei-lookup.com given list of LEI codes
}
\examples{
lei_vec <- c("259400DZXF7UJKK2AY35", "some misspelled LEI", "529900LN3S50JPU47S06", "some other LEI")
scrapeLEI(lei_vec)
scrapeLEI(lei_vec, showMissing = T)
convertEntityList2df(scrapeLEI(lei_vec))
convertEntityList2df(scrapeLEI(lei_vec), wide = F)
}
|
0e2b839c2949447bdff5194284fe8568775e829a
|
17317b5c7c0e50dd167d881b65a65677584329b0
|
/R/time_spread.R
|
045bfe027748ec03c626427bd0833e6cdd150dbb
|
[] |
no_license
|
Tazinho/timedim
|
073a09559c1e0350e30195402894ebe1b881d8ee
|
a5a3377f12ba6a0a462f5a704e51fbaeecde8df3
|
refs/heads/master
| 2020-11-30T00:31:55.821415
| 2017-06-30T23:01:28
| 2017-06-30T23:01:28
| 95,925,651
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 337
|
r
|
time_spread.R
|
#' Illustration of time_spread
#'
#' gather time dimension
#'
#' @param .data a tibble containing an id and dates
#'
#' @return a data frame with min and max date for each id
#'
#' @importFrom dplyr "%>%"
#'
#' @export
time_spread <- function(.data){
.data %>% group_by(id) %>%
summarise(d_min = min(dates), d_max = max(dates))
}
|
f0b1d0c4eeddfae23948d0f2f60ba961bbafa829
|
e4be7f8bf65953c73b3b699a81a7d8db749e3e60
|
/R/set_up_model.R
|
57e8e73675d1b847d81fd844fcfaea944ddb8cf7
|
[
"MIT"
] |
permissive
|
FLARE-forecast/FLAREr
|
0833470f9b8e744b4a0782c1a1b50a6a403534d1
|
0c1215688ea80eb7886f3ffe2d847b766b3fb6d0
|
refs/heads/master
| 2023-08-04T23:43:47.057220
| 2023-08-04T13:34:43
| 2023-08-04T13:34:43
| 292,376,842
| 4
| 8
|
MIT
| 2023-04-25T13:32:56
| 2020-09-02T19:32:21
|
R
|
UTF-8
|
R
| false
| false
| 2,646
|
r
|
set_up_model.R
|
#' Set of model for FLARE simulation
#'
#' @param config list of configurations
#' @param ens_working_directory full path of directory where ensemble member is executed
#' @param state_names vector of state names
#' @param inflow_file_names matrix or vector of inflow file names
#' @param outflow_file_names matrix or vector of outflow file names
#'
#' @return
#' @noRd
set_up_model <- function(config,
ens_working_directory,
state_names,
inflow_file_names,
outflow_file_names){
file.copy(from = file.path(config$file_path$configuration_directory, config$model_settings$base_GLM_nml),
to = file.path(ens_working_directory, "glm3.nml"), overwrite = TRUE)
non_temp_names <- state_names[which(!(state_names %in% c("temp", "salt")))]
update_var(length(non_temp_names), "num_wq_vars", ens_working_directory, "glm3.nml") #GLM SPECIFIC
if(length(non_temp_names) > 1) {
update_var(non_temp_names, "wq_names", ens_working_directory, "glm3.nml")
}
if(!is.null(ncol(inflow_file_names))) {
update_var(ncol(inflow_file_names), "num_inflows", ens_working_directory, "glm3.nml")
update_var(ncol(outflow_file_names), "num_outlet", ens_working_directory, "glm3.nml")
inflow_var_names <- c("FLOW","TEMP","SALT", non_temp_names)
update_var(inflow_var_names, "inflow_vars", ens_working_directory, "glm3.nml")
update_var(length(inflow_var_names), "inflow_varnum", ens_working_directory, "glm3.nml")
}
if(config$model_settings$model_name == "glm_aed"){
file.copy(from = file.path(config$file_path$configuration_directory, config$model_settings$base_AED_nml),
to = file.path(ens_working_directory, "aed2.nml"), overwrite = TRUE)
file.copy(from = file.path(config$file_path$configuration_directory, config$model_settings$base_AED_phyto_pars_nml),
to = file.path(ens_working_directory, "aed_phyto_pars.csv"), overwrite = TRUE)
file.copy(from = file.path(config$file_path$configuration_directory, config$model_settings$base_AED_zoop_pars_nml),
to = file.path(ens_working_directory, "aed2_zoop_pars.nml"), overwrite = TRUE)
}
update_var(length(config$modeled_depths), "num_depths", ens_working_directory, "glm3.nml") #GLM SPECIFIC
inflow_var_names <- c("FLOW","TEMP","SALT", non_temp_names)
#Create a copy of the NML to record starting initial conditions
file.copy(from = paste0(ens_working_directory, "/", "glm3.nml"), #GLM SPECIFIC
to = paste0(ens_working_directory, "/", "glm3_initial.nml"), overwrite = TRUE) #GLM SPECIFIC
}
|
314ece9874f9b535846f5cacdb719f0c73781397
|
6dcd0671066ae1b920de5ec1e7c602660f449741
|
/Datacamp - R - Data Analyst/7 - cleaning data/cleaning-data.R
|
b84dc857e6f0b8b037bd1eff9f9ffd916849bb69
|
[] |
no_license
|
Kallil12/anotacoes-cursos-online
|
581fc738f6f43f6a7bf53a9379e024ab4b005212
|
1569b2c6a3e97279dd2eb4e5a382849e252d4d31
|
refs/heads/master
| 2023-05-02T03:18:56.337772
| 2023-04-20T15:27:15
| 2023-04-20T15:27:15
| 259,153,006
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,072
|
r
|
cleaning-data.R
|
# Check the class of bmi
class(bmi)
# Check the dimensions of bmi
dim(bmi)
# View the column names of bmi
names(bmi)
# ====//==== ====//==== ====//==== ====//==== ====//==== ====//==== ====//==== ====//====
# Check the structure of bmi
str(bmi)
# Load dplyr
library(dplyr)
# Check the structure of bmi, the dplyr way
glimpse(bmi)
# View a summary of bmi
summary(bmi)
# ====//==== ====//==== ====//==== ====//==== ====//==== ====//==== ====//==== ====//====
# Print bmi to the console
print(bmi)
# View the first 6 rows
head(bmi)
# View the first 15 rows
head(bmi,15)
# View the last 6 rows
tail(bmi)
# View the last 10 rows
tail(bmi, n = 10)
# ====//==== ====//==== ====//==== ====//==== ====//==== ====//==== ====//==== ====//====
# Histogram of BMIs from 2008
hist(bmi$Y2008)
# Scatter plot comparing BMIs from 1980 to those from 2008
plot(bmi$Y1998, bmi$Y2008)
# ====//==== ====//==== ====//==== ====//==== ====//==== ====//==== ====//==== ====//====
# Histogram of BMIs from 2008
hist(bmi$Y2008)
# Scatter plot comparing BMIs from 1980 to those from 2008
plot(bmi$Y1980,bmi$Y2008)
# ====//==== ====//==== ====//==== ====//==== ====//==== ====//==== ====//==== ====//====
# Apply gather() to bmi and save the result as bmi_long
bmi_long <- gather(bmi, year, bmi_val, -Country)
# View the first 20 rows of the result
head(bmi_long,20)
# ====//==== ====//==== ====//==== ====//==== ====//==== ====//==== ====//==== ====//====
# Reverse what was done above
# Apply spread() to bmi_long
bmi_wide <- spread(bmi_long, year, bmi_val)
# View the head of bmi_wide
head(bmi_wide)
# ====//==== ====//==== ====//==== ====//==== ====//==== ====//==== ====//==== ====//====
# Apply separate() to bmi_cc
bmi_cc_clean <- separate(bmi_cc, col = Country_ISO, into = c("Country", "ISO"), sep = "/")
# Print the head of the result
head(bmi_cc_clean)
# ====//==== ====//==== ====//==== ====//==== ====//==== ====//==== ====//==== ====//====
# Apply unite() to bmi_cc_clean
bmi_cc <- unite(bmi_cc_clean, Country_ISO, Country, ISO, sep = "-")
# View the head of the result
head(bmi_cc)
# ====//==== ====//==== ====//==== ====//==== ====//==== ====//==== ====//==== ====//====
# View the head of census
head(census)
# Gather the month columns
census2 <- gather(census, month, amount, -YEAR)
# Arrange rows by YEAR using dplyr's arrange
census2_arr <- arrange(census2,YEAR)
# View first 20 rows of census2_arr
head(census2_arr,20)
# ====//==== ====//==== ====//==== ====//==== ====//==== ====//==== ====//==== ====//====
# View first 50 rows of census_long
head(census_long, 50)
# Spread the type column
census_long2 <- spread(census_long, type, amount)
# View first 20 rows of census_long2
head(census_long2, 20)
# ====//==== ====//==== ====//==== ====//==== ====//==== ====//==== ====//==== ====//====
# View the head of census_long3
head(census_long3)
# Separate the yr_month column into two
census_long4 <- separate(census_long3, col = yr_month, into = c("year", "month"))
# View the first 6 rows of the result
head(census_long4)
|
3e61b4f5d5099ae5529eb40b41158af5b6dba4d5
|
360df3c6d013b7a9423b65d1fac0172bbbcf73ca
|
/FDA_Pesticide_Glossary/1,2,4-triazole.R
|
b45129ccef9e5fcef9c783f7983e54ad01f5f72b
|
[
"MIT"
] |
permissive
|
andrewdefries/andrewdefries.github.io
|
026aad7bd35d29d60d9746039dd7a516ad6c215f
|
d84f2c21f06c40b7ec49512a4fb13b4246f92209
|
refs/heads/master
| 2016-09-06T01:44:48.290950
| 2015-05-01T17:19:42
| 2015-05-01T17:19:42
| 17,783,203
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 240
|
r
|
1,2,4-triazole.R
|
library("knitr")
library("rgl")
#knit("1,2,4-triazole.Rmd")
#markdownToHTML('1,2,4-triazole.md', '1,2,4-triazole.html', options=c("use_xhml"))
#system("pandoc -s 1,2,4-triazole.html -o 1,2,4-triazole.pdf")
knit2html('1,2,4-triazole.Rmd')
|
9aca76f5b72ea518ef05fa5c8d2ad64986d278da
|
53ea4aedc6c286c0ccafb6034119b7897ca58252
|
/code/R/trace.R
|
21c27aa9e187d3567ee746ddb0d6ddf1f547a20a
|
[] |
no_license
|
ZiqianZhuang/Integrated-Population-Modeling
|
5b4931c6bafb51fa0c99fe06cdeb9c2b5a2ce6f5
|
0d576ca6f66f4f5a9568d728998ff3316586a055
|
refs/heads/master
| 2020-11-28T04:35:37.579503
| 2019-12-23T08:13:21
| 2019-12-23T08:13:21
| 229,704,589
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 235
|
r
|
trace.R
|
trace <- function(mcmc, name, pop.id,scenario){
dir <- paste0("output/scenario",scenario,"/trace")
if(!dir.exists(dir)) dir.create(dir)
png(paste0(dir,"/",name,pop.id,"-plot%02d.png"))
plot(mcmc)
dev.off()
}
|
ecb9957906bc7ab4b5da98c8c9812f09c514e4ad
|
df1ffc1772542e47bd7c4c0d2169ed386de30d7f
|
/R/accessors.R
|
1910e1db93e5bfadc36a4cca319102644ce922bb
|
[] |
no_license
|
philipmgoddard/glmPG
|
04be0f3f9fcc5fbcfb5a1cf775d56a91829b1957
|
9fa612358bba7e62b1c15febe08f78dc8a6dd395
|
refs/heads/master
| 2021-01-10T05:35:56.902009
| 2016-01-03T16:04:06
| 2016-01-03T16:04:06
| 48,944,366
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 441
|
r
|
accessors.R
|
#' @include class-definition.R
NULL
#' Set the generic for accessor (getter) for coefficients
#' @param object object of class GLMP
#' @export
setGeneric("getCoef",
function(object){
standardGeneric("getCoef")
})
#' @describeIn getCoef
#' @export
setMethod("getCoef",
signature = "GLMP",
function(object){
out <- object@modelCoef[, 1]
return(out)
})
|
8e1a6139cfc5be37029d523dc1eb8d56f0a16623
|
0117665a818f1a2e4dd7a86e1ea0918f8c26395a
|
/kaggle-House_Prices/storico/missing_categorical_imputation.R
|
e5c7d62da8e186690a0278359232aa9d0bb1bdf0
|
[] |
no_license
|
Nabuz/My_work
|
a233440953bad75a1871c35d8a09a05767d3c1f8
|
0d13930a4060d9db154942a5fdd3627eced31165
|
refs/heads/master
| 2023-01-22T00:58:03.279680
| 2020-12-01T06:36:39
| 2020-12-01T06:36:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,032
|
r
|
missing_categorical_imputation.R
|
library(tidyverse)
library(caret)
library(skimr)
library(fastDummies)
library(EnsCat)
library(proxy)
library(class)
library(FastKNN)
cat("\014")
rm(list = ls())
trainset<-read.csv2("./input/train.csv", sep =",")
testset<-read.csv2("./input/test.csv", sep =",")
target <- trainset %>% pull(LotFrontage)
attributes <- trainset[,c(1:6)]
k_neighbors = 5
#possible_aggregation_method = list("mean", "median", "mode")
#number_observations = length(target)
#is_target_numeric = sum(apply(as.matrix(target), FUN = is.numeric, MARGIN = 1)) > 0
#nrow(attributes) != number_observations
# Get the distance matrix and check whether no error was triggered when computing it
#distances = distance_matrix(attributes, "euclidean", "hamming")
# Create the function.
getmode <- function(v) {
uniqv <- unique(v)
uniqv[which.max(tabulate(match(v, uniqv)))]
}
weighted_hamming <- function(data) {
" Compute weighted hamming distance on categorical variables. For one variable, it is equal to 1 if
the values between point A and point B are different, else it is equal the relative frequency of the
distribution of the value across the variable. For multiple variables, the harmonic mean is computed
up to a constant factor.
@params:
- data = a data frame of categorical variables
@returns:
- distance_matrix = a distance matrix with pairwise distance for all attributes"
#TO BE IMPLEMENTED
}
distance_matrix <- function(dataset_temp, numeric_distance = "euclidean", categorical_distance = "jaccard") {
"Compute the pairwise distance attribute by attribute in order to account for different variables type:
- Continuous
- Categorical
For ordinal values, provide a numerical representation taking the order into account.
Categorical variables are transformed into a set of binary ones.
If both continuous and categorical distance are provided, a Gower-like distance is computed and the numeric
variables are all normalized in the process.
If there are missing values, the mean is computed for numerical attributes and the mode for categorical ones.
params:
- data = R dataframe to compute distances on.
- numeric_distances = the metric to apply to continuous attributes.
euclidean and cityblock available.
Default = euclidean
- categorical_distances = the metric to apply to binary attributes.
jaccard, hamming, weighted-hamming and euclidean
available. Default = jaccard
returns:
- the distance matrix
"
possible_continuous_distances = list("euclidean", "maximum", "manhattan", "canberra", "minkowski")
possible_binary_distances = list("euclidean", "jaccard", "hamming", "weighted-hamming")
number_of_variables = ncol(dataset_temp)
number_of_observations = nrow(dataset_temp)
# Get the type of each attribute (Numeric or categorical)
cat <- sapply(dataset_temp, is.factor) #Select categorical variables
is_all_categorical = sum(cat) == number_of_variables
is_all_numerical = sum(cat) == 0
is_mixed_type = !is_all_categorical & !is_all_numerical
# Check the content of the distances parameter
if ((numeric_distance %in% possible_continuous_distances)==FALSE)
{
print(paste(paste("The continuous distance",numeric_distance,sep = " "),"is not supported.",sep= " "))
return(NULL)
}
else if ((categorical_distance %in% possible_binary_distances)==FALSE)
{
print(paste(paste("The binary distance",categorical_distance,sep = " "),"is not supported.",sep= " "))
return(NULL)
}
# Separate the data frame into categorical and numeric attributes and normalize numeric data
if (is_mixed_type){
number_of_categorical_var = sum(cat)
number_of_numerical_var = number_of_variables - number_of_categorical_var
data_numeric <- dataset_temp[,!cat]
data_numeric <- scale(data_numeric)
data_categorical <- dataset_temp[,cat]
}
# "Dummifies" categorical variables in place
if (!is_all_numerical & !(categorical_distance == 'hamming' | categorical_distance == 'weighted-hamming'))
{
if (is_mixed_type)
{
data_categorical <- fastDummies::dummy_cols(data_categorical,remove_selected_columns=TRUE)
}
else
{
dataset_temp <- fastDummies::dummy_cols(dataset_temp,remove_selected_columns=TRUE)
}
}
else if (!is_all_numerical & categorical_distance == 'hamming')
{
if (is_mixed_type)
{
col_names <- names(data_categorical)
data_categorical[col_names] <- lapply(data_categorical[col_names] , factor)
indx <- sapply(data_categorical, is.factor)
data_categorical[indx] <- lapply(data_categorical[indx], function(x) as.numeric(x))
}
else
{
col_names <- names(dataset_temp)
dataset_temp[col_names] <- lapply(dataset_temp[col_names] , factor)
indx <- sapply(dataset_temp, is.factor)
dataset_temp[indx] <- lapply(dataset_temp[indx], function(x) as.numeric(x))
}
}
if (is_all_numerical)
{
result_matrix = as.matrix(dist(dataset_temp, method = numeric_distance, p=3))
}
else if (is_all_categorical)
{
if (categorical_distance == "weighted-hamming")
{
result_matrix = weighted_hamming(data)
}
else if (categorical_distance == "hamming")
{
n <- nrow(dataset_temp)
result_matrix <- matrix(nrow=n, ncol=n)
for(i in seq_len(n - 1))
for(j in seq(i, n))
result_matrix[j, i] <- result_matrix[i, j] <- sum(dataset_temp[i,] != dataset_temp[j,])
}
else
{
result_matrix = as.matrix(dist(dataset_temp, method = categorical_distance))
}
}
else
{
result_numeric = as.matrix(dist(data_numeric, method = numeric_distance,p=3))
if (categorical_distance == "weighted-hamming")
{
result_categorical = weighted_hamming(data_categorical)
}
else if (categorical_distance == "hamming")
{
n <- nrow(dataset_temp)
result_categorical <- matrix(nrow=n, ncol=n)
for(i in seq_len(n - 1))
for(j in seq(i, n))
result_categorical[j, i] <- result_categorical[i, j] <- sum(data_categorical[i,] != data_categorical[j,])
}
else
{
result_categorical = as.matrix(dist(data_categorical, method = categorical_distance))
}
result_matrix = result_numeric * number_of_numerical_var + result_categorical * number_of_categorical_var
}
# Fill the diagonal with NaN values
diag(result_matrix) <- NaN
return (result_matrix)
}
knn_impute <- function(target, attributes, k_neighbors, aggregation_method="mean", numeric_distance="euclidean",
categorical_distance="jaccard", missing_neighbors_threshold = 0.5)
{
"Replace the missing values within the target variable based on its k nearest neighbors identified with the
attributes variables. If more than 50% of its neighbors are also missing values, the value is not modified and
remains missing. If there is a problem in the parameters provided, returns None.
If to many neighbors also have missing values, leave the missing value of interest unchanged.
params:
- target = a vector of n values with missing values that you want to impute. The length has
to be at least n = 3.
- attributes = a data frame of attributes with n rows to match the target variable
- k_neighbors = the number of neighbors to look at to impute the missing values. It has to be a
value between 1 and n.
- aggregation_method = how to aggregate the values from the nearest neighbors (mean, median, mode)
Default = mean
- numeric_distances = the metric to apply to continuous attributes.
euclidean and cityblock available.
Default = euclidean
- categorical_distances = the metric to apply to binary attributes.
jaccard, hamming, weighted-hamming and euclidean
available. Default = jaccard
- missing_neighbors_threshold = minimum of neighbors among the k ones that are not also missing to infer
the correct value. Default = 0.5
returns:
target_completed = the vector of target values with missing value replaced. If there is a problem
in the parameters, return None"
# Get useful variables
possible_aggregation_method = list("mean", "median", "mode")
number_observations = length(target)
is_target_numeric = sum(apply(as.matrix(target), FUN = is.numeric, MARGIN = 1)) > 0
# Check for possible errors
if (number_observations < 3)
{
print("Not enough observations.")
return(NULL)
}
if (nrow(attributes) != number_observations)
{
print("The number of observations in the attributes variable is not matching the target variable length.")
return(NULL)
}
if ((k_neighbors > number_observations) | (k_neighbors < 1))
{
print("The range of the number of neighbors is incorrect.")
return(NULL)
}
if ((aggregation_method %in% possible_aggregation_method)==FALSE)
{
print("The aggregation method is incorrect.")
return(NULL)
}
if (!is_target_numeric & aggregation_method != "mode")
{
print("The only method allowed for categorical target variable is the mode.")
return(NULL)
}
# Get the distance matrix and check whether no error was triggered when computing it
distances = distance_matrix(attributes, numeric_distance, categorical_distance)
if (is.null(distances))
{
print("null distance")
return(NULL)
}
n = ncol(distances)
for (i in 1:length(target))
{
value = target[i]
if (is.na(value))
{
# matrix of neighbours
closest_to_target = k.nearest.neighbors(i, distances, k = k_neighbors)
neighbors = target[closest_to_target]
count=0
for (p in 1:length(neighbors))
{
miss = neighbors[p]
if (is.na(miss))
{
count = count+1
}
}
# Compute the right aggregation method if at least more than 50% of the closest neighbors are not missing
if (count >= missing_neighbors_threshold * k_neighbors)
{
next
}
else if (aggregation_method == "mean")
{
target[i] = mean(na.omit(neighbors))
}
else if (aggregation_method == "median")
{
target[i] = median(na.omit(neighbors))
}
else
{
target[i] = getmode(na.omit(neighbors))
}
}
}
return(target)
}
tmp = knn_impute(target, attributes, k_neighbors, aggregation_method="mean", numeric_distance="maximum",
categorical_distance="hamming", missing_neighbors_threshold = 0.5)
|
580b3fbccefc8188e20bd0e6d209f3e4fb80540e
|
1dfec10ac8e51f63947f35ae42bbfead6ed8ce4c
|
/Walmart_Trip_Type/scripts/ClusterFeatures.R
|
d067ade6ad070223d640898fcb2191c823a21726
|
[] |
no_license
|
jothiprakash/Kaggle_Challenges
|
6f84109e5523d2afbe7eca678565be80985fb8e1
|
e5d5f815b0b52ecd5b3157596387d2184613dd8f
|
refs/heads/master
| 2021-08-23T02:22:21.383158
| 2017-12-02T13:40:39
| 2017-12-02T13:40:39
| 110,996,753
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,354
|
r
|
ClusterFeatures.R
|
library(Matrix)
library(data.table)
library(plyr)
library(entropy)
library(stringr)
source('Scripts/new_fDefinitions.R')
#Loading the train and test data
train<-fread("Data/train.csv",colClasses = c(Upc='character'))
test<-fread("Data/test.csv",colClasses = c(Upc='character'))
#Aligning the target label
#Preparing the target variable
y=as.numeric(train$TripType)
#How to fill y properly : Xgboost requirement ,label must be in [0, num_class)
sortedTripType<-sort(unique(y))
target<-c(1:length(y))
for (i in 1:length(y))
{
target[i]<-which(sortedTripType==y[i])
}
#target<-target-1 #label must be in [0, num_class)
train$TripType<-target
#Initialize "NA" in train $ FineLine to 10500
na_index<-which(is.na(train$FinelineNumber))
train$FinelineNumber[na_index]<-10000
na_index<-which(is.na(test$FinelineNumber))
test$FinelineNumber[na_index]<-10000
#Initialize NA's in Upc
na_upc_index<-which(train$Upc=="")
train$Upc[na_upc_index]<-"0000"
na_upc_index<-which(test$Upc=="")
test$Upc[na_upc_index]<-"0000"
#remove "HEALTH AND BEAUTY AIDS" as it is not present in the test
train<-train[train$DepartmentDescription!="HEALTH AND BEAUTY AIDS",]
#for feature engineering
deptlist<-sort(unique(train$DepartmentDescription))
#forming the department neighbourlist
#source('Scripts/deptCorrelation.R')
#print("Completed the smoothing part")
getClassdistribution<-function(x)
{
tracker<-c(rep(0,38))
temp_df<-count(x, vars ="TripType")
for (i in 1:nrow(temp_df))
{
tracker[as.integer(temp_df[i,1])]<-temp_df[i,"freq"]
}
tracker
}
fineLinedata<-ddply(train,c('FinelineNumber'),
function(x) c(totalCount=nrow(x),
classdist=getClassdistribution(x),
uniqueTriptype=length(unique(x$TripType))
)
)
fineLinedata$prior<-fineLinedata$totalCount/sum(fineLinedata$totalCount)
#Forming the clusters going for 100 clusters
Obj<-kmeans(fineLinedata[,c(3:40)],
centers=180,
iter.max = 1000,
nstart = 100,
algorithm = c("Lloyd"),
trace=FALSE)
fineCluster<-data.frame(fineLinedata$FinelineNumber,Obj$cluster)
fineLinelist<-unique(fineCluster$fineLinedata.FinelineNumber)
testCluster<-ddply(test,c('VisitNumber'),
function(x) c(cluster=generatefineCluster(x)
),.progress="text"
)
trainCluster<-ddply(train,c('VisitNumber'),
function(x) c(cluster=generatefineCluster(x)
),.progress = "text"
)
write.csv(file="featureVectors/trainCluster.csv",
trainCluster,
row.names=FALSE)
write.csv(file="featureVectors/testCluster.csv",
testCluster,
row.names=FALSE)
rm(trainCluster)
rm(testCluster)
#Forming the cluster for departments
departmentdata<-ddply(train,c('DepartmentDescription'),
function(x) c(totalCount=nrow(x),
classdist=getClassdistribution(x),
uniqueTriptype=length(unique(x$TripType))
),.progress = "text"
)
departmentdata$prior<-departmentdata$totalCount/sum(departmentdata$totalCount)
#performing kmeans
deptObj<-kmeans(departmentdata[, c(3:40)],
centers = 7,
iter.max = 1000,
nstart = 100,
algorithm = c("Lloyd"),
trace = FALSE)
deptCluster<-data.frame(departmentdata$DepartmentDescription,
deptObj$cluster)
names(deptCluster)<-c("deptID","clusterID")
deptestCluster<-ddply(test,c('VisitNumber'),
function(x) c(cluster=generatedeptCluster(x)
),.progress="text"
)
deptrainCluster<-ddply(train,c('VisitNumber'),
function(x) c(cluster=generatedeptCluster(x)
),.progress = "text"
)
write.csv(file="featureVectors/deptrainCluster.csv",
deptrainCluster,
row.names=FALSE)
write.csv(file="featureVectors/deptestCluster.csv",
deptestCluster,
row.names=FALSE)
rm(deptrainCluster)
rm(deptestCluster)
|
08e52d2a77876862bcb6d3e0b80b05ddbd93f815
|
58a6b7a2a650c11bb380e8e14aeb470fb8aeee59
|
/R/errorModelSelection.R
|
d7fb177ab22168792f88461842fd2bea8c7f6302
|
[] |
no_license
|
ayaadel9190/Rsmlx
|
a40df71daebd39fea01b9fe99fa76f22bfc6e3a7
|
7933b03a186d9b9cf9285b6fd60caf838b53e54f
|
refs/heads/master
| 2020-04-28T22:45:20.709635
| 2018-10-25T21:55:44
| 2018-10-25T21:55:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,582
|
r
|
errorModelSelection.R
|
errorModelSelection <- function(project=NULL, criterion="BIC", nb.model=1) {
if (!is.null(project))
loadProject(project)
obs.model <- getContinuousObservationModel()
obs.info <- getData()
i.contObs <- which(obs.info$observationTypes=="continuous")
i.contModel <- which(names(obs.model$prediction) %in% obs.info$observationNames[i.contObs])
n.out <- length(i.contModel)
pred <- getSimulatedPredictions()
d <- getObservationInformation()
if (nb.model==1) {
res.errorModel <- NULL
for (i.out in (1:n.out)) {
name.predi <- obs.model$prediction[[i.contModel[i.out]]]
name.obsi <- names(obs.model$prediction[i.contModel[i.out]])
y.obsi <- d[[name.obsi]][[name.obsi]]
# resi <- computeBIC(y.obs=y.obsi,y.pred=y.pred[[i.out]], nrep=nrep, criterion=criterion, nb.model=nb.model)
y.predi <- pred[[name.predi]][[name.predi]]
resi <- computeBIC(y.obs=y.obsi,y.pred=y.predi, criterion=criterion, nb.model=nb.model)
res.errorModel <- c(res.errorModel, as.character(resi[['error.model']]))
names(res.errorModel)[i.out] <- name.obsi
}
} else {
res.errorModel <- list()
for (i.out in (1:n.out)) {
name.predi <- obs.model$prediction[[i.contModel[i.out]]]
name.obsi <- names(obs.model$prediction[i.contModel[i.out]])
y.obsi <- d[[name.obsi]][[name.obsi]]
y.predi <- pred[[name.predi]][[name.predi]]
res.errorModel[[i.out]] <- computeBIC(y.obs=y.obsi,y.pred=y.predi, criterion=criterion, nb.model=nb.model)
# res.errorModel[[i.out]] <- computeBIC(y.obs=y.obsi,y.pred=y.pred[[i.out]], criterion=criterion, nb.model=nb.model)
names(res.errorModel)[i.out] <- name.obsi
}
}
return(res.errorModel)
}
e.min1 <- function(x,y.pred,y.obs) {
sigma2 <- (x[1]+x[2]*y.pred)^2
e <- sum((y.obs-y.pred)^2/sigma2) + sum(log(sigma2))
return(e)
}
e.min2 <- function(x,y.pred,y.obs) {
sigma2 <- x[1]^2+(x[2]*y.pred)^2
e <- sum((y.obs-y.pred)^2/sigma2) + sum(log(sigma2))
return(e)
}
computeBIC <- function(y.obs, y.pred, criterion, nb.model) {
nrep <- length(y.pred)/length(y.obs)
y.obs <- rep(y.obs, nrep)
iy <- (y.pred>0 & y.obs>0)
y.obs <- y.obs[iy]
y.pred <- y.pred[iy]
n <- length(y.obs)
N <- n/nrep
if (criterion=="BIC")
pen.bic <- log(N)
else if (criterion=="AIC")
pen.bic <- 2
else
pen.bic <- criterion
a.cons <- sqrt(mean((y.obs-y.pred)^2))
b.prop <- sqrt(mean(((y.obs/y.pred-1)^2)))
x.min <- nlm(e.min2,c(a.cons,0.2),y.pred,y.obs)
a.comb2 <- abs(x.min$estimate[1])
b.comb2 <- abs(x.min$estimate[2])
x.min <- nlm(e.min1,c(a.comb2,b.comb2),y.pred,y.obs)
a.comb1 <- x.min$estimate[1]
b.comb1 <- x.min$estimate[2]
a.expo <- sqrt(mean((log(y.obs)-log(y.pred))^2))
error.model=c("constant", "proportional", "combined1","combined2","exponential")
sigma2 <- cbind(a.cons^2, (b.prop*y.pred)^2, (a.comb1+b.comb1*y.pred)^2,
a.comb2^2+(b.comb2*y.pred)^2, a.expo^2)
df <- c(1, 1, 2, 2, 1)
ll <- pen <- bic <- NULL
ll <- NULL
for (k in 1:4)
ll <- c(ll, -0.5*sum( (y.obs-y.pred)^2/sigma2[,k] + log(2*pi*sigma2[,k]) )/nrep)
ll <- c(ll, -0.5*sum( (log(y.obs)-log(y.pred))^2/(a.expo^2) + log(2*pi*(a.expo^2)) + 2*log(y.obs) )/nrep)
pen <- pen.bic*df
bic <- -2*ll + pen
E <- data.frame(error.model=c("constant", "proportional", "combined1","combined2","exponential"),
ll=ll, df=df, criterion= bic)
nb.model <- min(nb.model, length(bic))
E <- E[order(bic)[1:nb.model],]
row.names(E) <- 1:nrow(E)
return(E)
}
|
0f0d03bbe27cc0c8fd09318cc440c070309cf893
|
7eb128f9b7899c33d4854009edbd38dd566cba72
|
/R Tutorials/Book spuRs/scripts/newton_gamma.r
|
fae1cc776bb846003c89982b4f666d1c12185026
|
[] |
no_license
|
chengjun/Research
|
1149add090ec563f544c4b5a886c01b1392a25d4
|
c01e3d2eac2bca74671abb9cd63e1b06e5566fc8
|
refs/heads/master
| 2021-06-15T16:35:15.005107
| 2019-08-06T09:03:29
| 2019-08-06T09:03:29
| 11,498,113
| 4
| 6
| null | 2021-04-15T09:27:11
| 2013-07-18T08:36:12
|
Mathematica
|
UTF-8
|
R
| false
| false
| 724
|
r
|
newton_gamma.r
|
# Code spuRs/resources/scripts/newton_gamma.r
newton <- function(f3, x0, tol = 1e-9, n.max = 100) {
# Newton's method for optimisation, starting at x0
# f3 is a function that given x returns the vector
# (f(x), f'(x), f''(x)), for some f
x <- x0
f3.x <- f3(x)
n <- 0
while ((abs(f3.x[2]) > tol) & (n < n.max)) {
x <- x - f3.x[2]/f3.x[3]
f3.x <- f3(x)
n <- n + 1
}
if (n == n.max) {
cat('newton failed to converge\n')
} else {
return(x)
}
}
gamma.2.3 <- function(x) {
# gamma(2,3) density
if (x < 0) return(c(0, 0, 0))
if (x == 0) return(c(0, 0, NaN))
y <- exp(-2*x)
return(c(4*x^2*y, 8*x*(1-x)*y, 8*(1-2*x^2)*y))
}
|
2f134344540af0d863a5bce26194293970462721
|
10cd4a108478b8b5ec91722d2058c85bafe0488f
|
/functionExercise.R
|
c32e3c3614e95592ae7e2a5df387edd92c215e7e
|
[] |
no_license
|
Hyeongrok-Oh/inc02b
|
aaab8667b775f262545a0af13f404f7aeaa0492b
|
8eba74f007d6f84c49228a1de0e52f0b800d964c
|
refs/heads/main
| 2023-04-11T10:22:40.065077
| 2021-04-17T08:22:30
| 2021-04-17T08:22:30
| 358,799,034
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 177
|
r
|
functionExercise.R
|
lightyear <- function(ly){
c <- 299792
lightMinute <- c*60
lightHour <- lightMinute * 60
lightDay <- lightHour * 24
lightYear <- lightDay * 365.2422
lightYear * ly
}
|
333f6d5117c7c6aeeff0a4bee844eebad4e5d776
|
21352b887bc324c9ce82f0bc3652115e6e89b0c7
|
/HR_vpc.R
|
21af1a36676197cbab0891c422d7cf4543bcddb5
|
[] |
no_license
|
GiselleB33/TTEmanuscript
|
636e6400efbe0b0be28b1e4767bbefd755378423
|
01a6140be9b3acf03afabe9a18eee5c7bd96d043
|
refs/heads/master
| 2021-05-14T13:13:46.629110
| 2018-01-04T22:24:00
| 2018-01-04T22:24:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,020
|
r
|
HR_vpc.R
|
setwd('~/Documents/My Papers/2016-TTE Diagnostics/TTEmanuscript')
library(readr)
library(ggplot2)
library(dplyr)
library(survival)
library(purrr)
odat <- read_csv('final_mod/run6/TTEdat_v11.csv') %>%
filter(STIME==0)
ofit <- coxph(Surv(TIME,DV)~as.factor(dose)+x, data=odat)
simdat <- read_csv('final_mod/simulatedTimesForVPC.csv')
sim_coef <- simdat %>%
split(.$irep) %>%
map( ~ coef(coxph(Surv(simTIME,simDV)~as.factor(DOSE)+X,data=.)))
sim_coef_tall <- data.frame(dose=rep(c(1,3,10),each=length(sim_coef)),
obs_log_hr = rep(coef(ofit)[1:3],each=length(sim_coef)),
sim_loghr = as.vector(t(bind_rows(sim_coef))[,1:3]))
log_hr_plot = sim_coef_tall %>%
ggplot(aes(x=sim_loghr)) +
geom_histogram(fill='white',col='black',bins=30) +
geom_vline(aes(xintercept=obs_log_hr),col='red') +
facet_grid(dose~., labeller='label_both') +
theme_bw() +
labs(x='log hazard ratio')
ggsave(log_hr_plot,height=8,width=6,filename = 'logHRplot_model1.png')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.