blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
55daadf25c726c067d242b88bcea19ff148786da
|
0597d697bfd9062630ade080a81cbac05755f725
|
/src/08_ts_decomposition.R
|
e362e47ba3aef55c4815761c44ed49f7f8d31345
|
[] |
no_license
|
ian-flores/suicidesPR
|
6a0e9c4f08b4a6df713b79328ddfc83c82ef297d
|
6a466aa3906b9361372fee68385f844b3a2bf801
|
refs/heads/master
| 2020-04-18T23:28:50.892880
| 2019-02-18T02:40:38
| 2019-02-18T02:40:38
| 167,822,498
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,077
|
r
|
08_ts_decomposition.R
|
library(fs)
library(lubridate)
library(tidyverse)
library(ggfortify)
data_files <- dir_ls('data/mortality_data_2000_2008/', type = 'file')
mortality <- map_df(data_files, read_csv, col_types = cols(.default = "c"))
ts_mortality <- mortality %>%
filter(typedeath == '2') %>%
select(yeardeath, monthdeath) %>%
group_by(yeardeath, monthdeath) %>%
count() %>%
ungroup() %>%
mutate(date = ymd(paste(yeardeath, monthdeath, '01', sep = '-'))) %>%
filter(!is.na(date), date < ymd('2010-01-01')) %>%
arrange(date) %>%
select(date, n)
ts_mortality %>%
ggplot(aes(x = date, y =n)) +
geom_line()
ts_mortality <- ts(ts_mortality$n, start = c(2000, 1), frequency = 12)
loess_decomp <- stl(ts_mortality, s.window = 'periodic')
loess_decomp %>%
autoplot(colour = 'brown') +
theme_light() +
labs(y = 'Suicide cases',
x = 'Date',
title = 'How do suicide cases vary by time in Puerto Rico?',
subtitle = 'LOESS Decomposition from 2000 to 2008',
caption = 'Graph prepared by Ian Flores Siaca')
|
95c6d406efa2a53f1965991c597721619f3b1b2d
|
289b70ac6d95d7f4585b1ac61439dfefd786fc77
|
/man/fitch.Rd
|
17157977d0407aa69f931db31bd1b6576bac4a14
|
[] |
no_license
|
syerramilli/R-sysid
|
f8ede18883a691e363b5ca3110c2583a5d7a426c
|
be2928b20b5f3e1230f292ea45166ae95cc71a23
|
refs/heads/master
| 2023-06-08T17:55:07.929065
| 2023-06-07T03:29:14
| 2023-06-07T03:29:14
| 29,390,663
| 3
| 2
| null | 2023-06-07T03:29:15
| 2015-01-17T12:38:48
|
R
|
UTF-8
|
R
| false
| true
| 829
|
rd
|
fitch.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/estpoly.R
\name{fitch}
\alias{fitch}
\title{Fit Characteristics}
\usage{
fitch(x)
}
\arguments{
\item{x}{the estimated model}
}
\value{
A list containing the following elements
\item{MSE}{Mean Square Error measure of how well the response of the model fits
the estimation data}
\item{FPE}{Final Prediction Error}
\item{FitPer}{Normalized root mean squared error (NRMSE) measure of how well the
response of the model fits the estimation data, expressed as a percentage.}
\item{AIC}{Raw Akaike Information Citeria (AIC) measure of model quality}
\item{AICc}{Small sample-size corrected AIC}
\item{nAIC}{Normalized AIC}
\item{BIC}{Bayesian Information Criteria (BIC)}
}
\description{
Returns quantitative assessment of the estimated model as a list
}
|
062e3af004febdc7ce2c05f0ca15cf4fe536cc6c
|
a8300d09f99711d3f21e4d648ad84b2b84e188e1
|
/man/surveySurvival.Rd
|
7213cf9e5babefeac337c57263a5a9e35a34ba0a
|
[] |
no_license
|
dougkinzey/Grym
|
aee89081fc67ace0881ab8c4760bdb6c4d516330
|
e960444a4edd7d29388e2f10e7a5a78effc71a54
|
refs/heads/master
| 2023-02-28T00:32:22.981848
| 2020-11-04T14:56:52
| 2020-11-04T14:56:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,779
|
rd
|
surveySurvival.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Grym.R
\name{surveySurvival}
\alias{surveySurvival}
\title{Survival to a survey period.}
\usage{
surveySurvival(yr, cls, s1, s2, Ms, M, Fs = 0, F = 0, rcls = 1)
}
\arguments{
\item{yr}{vector of survey year}
\item{cls}{vector of survey age class}
\item{s1}{vector of the first time step in the survey}
\item{s2}{vector of the final time step in the survey}
\item{Ms}{matrix of \emph{unscaled} integrated natural mortality}
\item{M}{vector of annual natural mortalities}
\item{Fs}{matrix of \emph{unscaled} integrated fishing mortality}
\item{F}{vector of annual fishing mortalities}
\item{rcls}{the reference age class to adjust to}
}
\value{
Returns a vector of the mean survival from time of
recruitment to the survey period.
}
\description{
Compute the scalings required to adjust surveyed age class
abundances to initial abundances at a reference age.
}
\details{
Given the age class, the year and the time steps within the year
at which the age class was surveyed, this function computes the
total survival from the start of the year in which the cohort were
in the reference age class to the survey period. If the surveyed
age class is younger than the reference class, the reciporical of
the total survival from the survey period to the start of the year
that the cohort will be in the reference class is computed.
If there is inter-annual variability in natural or fishing
mortality, the survey years must be labelled so that \code{yr==1}
corresponds to the first element of the vector of \code{M} and/or \code{F},
and it is no possible to compute the survival for cohorts that
recruit before year 1.
If there is no inter-annual variablity in natural or fishing
mortality, the survey year is irrelevant.
}
\examples{
## Daily time steps and 7 age classes
nsteps <- 365
Ages <- 2:8
Days <- seq(from=0, to=1, length=nsteps+1)
h <- 1/nsteps
## Constant intra-annual natural mortality
ms <- matrix(data=1, nrow=nsteps+1, ncol=length(x=Ages))
ms <- ms/mean(x=trapz(fs=ms, h=h))
Ms <- ctrapz(fs=ms, h=h)
## Survey year, period and age classes
svy <- data.frame(yr=3:5, s1=c(190, 220, 150), s2=c(201, 231, 161))
svy <- cbind(svy[rep(x=1:3, each=7), ], cls=1:7)
head(svy)
## Constant mortality
M <- 0.2
## Survival to the survey period from age class 1
surveySurvival(yr=svy$yr, cls=svy$cls, s1=svy$s1, s2=svy$s2, Ms=Ms, M=M)
## Survival to the survey period from age class 3
surveySurvival(yr=svy$yr, cls=svy$cls, s1=svy$s1, s2=svy$s2, Ms=Ms, M=M, rcls=3)
## Variable mortality
M <- rgamma(n=10, shape=20, rate=100)
M
## Survival cannot be projected outside the period for which mortality
## is specified.
surveySurvival(yr=svy$yr, cls=svy$cls, s1=svy$s1, s2=svy$s2, Ms=Ms, M=M)
}
|
2ff2506ae581e3d17abd8c9dd734fbc8d1d2304a
|
b2f61fde194bfcb362b2266da124138efd27d867
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Database/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query30_query05_1344n/query30_query05_1344n.R
|
7be70544863458f898645e69753eea2d5de78acb
|
[] |
no_license
|
arey0pushpa/dcnf-autarky
|
e95fddba85c035e8b229f5fe9ac540b692a4d5c0
|
a6c9a52236af11d7f7e165a4b25b32c538da1c98
|
refs/heads/master
| 2021-06-09T00:56:32.937250
| 2021-02-19T15:15:23
| 2021-02-19T15:15:23
| 136,440,042
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 70
|
r
|
query30_query05_1344n.R
|
f7646fd9a761932cc38b7c4791c991dc query30_query05_1344n.qdimacs 179 363
|
5f70b00b3fc8c38f4b50bddc16aebac71f3b71b5
|
cee5203605e8a913f8c6316398b80a88e9cb1395
|
/lecture_3/data_table_exercise_filled.r
|
7bf5ff3657d341382dfa1e50baef9913bd456b3e
|
[] |
no_license
|
bertozzivill/infx572_fall16
|
241a6e3cefd72a852ca3f9971f902ba3b1ed867e
|
63ec645fc29dfcbe4c9d286d73054a063b2f10b7
|
refs/heads/master
| 2020-12-02T03:20:39.788879
| 2017-01-02T22:05:58
| 2017-01-02T22:05:58
| 67,311,693
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,533
|
r
|
data_table_exercise_filled.r
|
##############################################################################
## data.table exercise
## Author: Amelia Bertozzi-Villa
##############################################################################
library(data.table)
## Creating a data.table --------------------------------------------------------------
## Create a data.table named DT that is ten elements long, with the following columns:
## V1: the integers 1-10
## V2: the letters A and B, repeating
## V3: the integers 1-5, repeating
## V4: the following vector: c(4.55, 2.6, 90.9, 21.2, 4.81, 77.1, 4.4, 8.43, 5.09, 2.33)
custom_vector <- c(4.55, 2.6, 90.9, 21.2, 4.81, 77.1, 4.4, 8.43, 5.09, 2.33)
DT <- data.table(V1=1:10,
V2=c("A", "B"),
V3=1:5,
V4=custom_vector)
## Subsetting on rows (i) --------------------------------------------------------------
## Select all rows in which V3 is equal to 4
DT[V3==4]
## Select all rows in which V3 is equal to 3 or 4
DT[V3 %in% 3:4]
DT[V3==3 | V3==4] # equivalent
## Select all rows in which V2 is equal to A
DT[V2=="A"]
## Note: can also put a comma at the end of each of these statements (e.g. DT[V3==4,])
## Subsetting on columns (j) --------------------------------------------------------------
## Select column V1
DT[, V1]
## Select columns V1 and V4
DT[, .(V1, V4)]
DT[, list(V1, V4)] # equivalent
## Take the sum of column V4
DT[, sum(V4)]
## Return the sum of column V4 (named 'sum') and the maximum value of column V1 (named 'max')
DT[, .(sum=sum(V4), max=max(V1))]
## Doing (j) *by* group --------------------------------------------------------------
## Take the sum of column V4, by column V2
DT[, .(sum(V4)), by=V2]
## Do the same as above, but name the summed column "sum_V4"
DT[, .(sum_V4=sum(V4)), by=V2]
## Do the same as above, but take the sum by column V2 and V3
DT[, .(sum_V4=sum(V4)), by=.(V2, V3)]
## Adding/Updating columns --------------------------------------------------------------
## Create a new column, V5, equal to the minimum value of V4.
DT[, V5:=min(V4)]
## Do the same thing, but grouping by column V2.
DT[, V5:=min(V4), by=V2]
## Delete column V5.
DT[, V5:= NULL]
## Create a new column, V6, equal to the standard deviation of V4,
## AND a new column, V7, equal to the sum of V3, grouped by V2.
## Note: do this in a single command.
DT[, c("V6", "V7") := .(sd(V4), sum(V3)), by=V2]
## or :
DT[, ':=' (V6=sd(V4), V7=sum(V3)), by=V2]
## Delete column V6 and V7.
DT[, c("V6", "V7") := NULL]
|
30ba26c88bde5564cdf0f5841a0c899b0d5ff491
|
f5f4b24c2588379493f6383181853ab0fe11121b
|
/scripts/skills_extraction.R
|
229c8574725d3c3d04164d9a2666903b28ac80f2
|
[] |
no_license
|
PPPeck313/team_tidy
|
6889ab9e886197de9dcead6866be304e0afa8705
|
e50c9c806a111d3397f77d6ec1cc59d2dba6e36a
|
refs/heads/main
| 2023-08-19T13:32:36.978534
| 2021-10-21T02:03:35
| 2021-10-21T02:03:35
| 416,798,042
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,518
|
r
|
skills_extraction.R
|
library(httr)
library(jsonlite)
library(tidyverse)
library(stringr)
get_token <- function(client_id, secret, scope){
url <- "https://auth.emsicloud.com/connect/token"
payload <- str_interp("client_id=${client_id}&client_secret=${secret}&grant_type=client_credentials&scope=${scope}")
encode <- "form"
response <- VERB("POST",
url,
body = payload,
add_headers(Content_Type = 'application/x-www-form-urlencoded'),
content_type("application/x-www-form-urlencoded"),
encode = encode)
token_text <- content(response, "text")
token_json <- fromJSON(token_text)
access_token <- token_json$access_token
return(access_token)
}
get_skills <- function(description, confidence_threshold, access_token){
url <- "https://emsiservices.com/skills/versions/latest/extract"
clean_description <- description %>% str_replace_all("\n","") %>% str_replace_all("\r","")
payload <- str_c("{ \"text\": \"... ", clean_description, " ...\", \"confidenceThreshold\": ", confidence_threshold, " }")
token_string <- str_interp('authorization: Bearer ${access_token}')
encode <- "json"
response <- VERB("POST",
url,
body = payload,
add_headers(
Authorization = token_string,
Content_Type = 'application/json'),
content_type("application/json"),
encode=encode
)
response_text <- content(response, "text")
response_json <- fromJSON(response_text)
skill_type <- response_json$data$skill$type$name
skill_name <- response_json$data$skill$name
skill_df <- as_tibble(skill_name)
colnames(skill_df) <- c("skill")
skill_df <- skill_df %>%
mutate(
type = skill_type
)
return(skill_df)
}
create_skills_df <- function(job_title, company_name, state, description, confidence_threshold, access_token){
base_df <- tibble(
job_title = character(),
company_name = character(),
state = character(),
description = character(),
skill = character(),
type = character()
)
skills_df <- get_skills(description, confidence_threshold, access_token)
if (length(skills_df)==0){
print("error with job")
return(base_df)
}
skills_df <- skills_df %>%
mutate(
job_title = job_title,
company_name = company_name,
state = state,
description = description
) %>%
select(job_title, company_name, state,
description, skill, type)
return(skills_df)
}
get_dataset_skills <- function(df, confidence_threshold, access_token){
base_df <- tibble(
job_title = character(),
company_name = character(),
state = character(),
description = character(),
skill = character(),
type = character()
)
for (row in 1:nrow(df)){
job_title = df[row,"job_title"][[1]]
company_name = df[row,"company_name"][[1]]
state = df[row,"state"][[1]]
description = df[row,"description"][[1]]
print(c(job_title, company_name, state,description))
skills_df = create_skills_df(job_title,
company_name,
state,
description,
confidence_threshold,
access_token)
base_df <- bind_rows(base_df, skills_df)
}
return(base_df)
}
|
369a8828e31dffbedcc508d8a37e3bfb6d7b8a48
|
6e747b34010f0613b82b292887e883f5fd4ed912
|
/plot1.R
|
43dd13717e9cdea066440b235ad46ae89d3ee739
|
[] |
no_license
|
DDHGITHUB/ExData_Plotting1
|
d3de5e4227374bfc4f00102cb4d75df0d85d6dfe
|
72f5c72480f84dfab7978ef0768aa4285902dcaf
|
refs/heads/master
| 2021-01-15T10:06:27.081234
| 2014-05-08T01:44:19
| 2014-05-08T01:44:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 594
|
r
|
plot1.R
|
## Read in table
DF <- read.table("household_power_consumption.txt", header=TRUE, as.is = T , sep=";")
## Convert Data field to Date format
DF$DD<-as.Date(DF$Date,"%d/%m/%Y")
## Filter only 2 days needed
DF.select = DF[(DF$DD == "2007-02-01" | DF$DD == "2007-02-02"),]
## make GAP field a number
DF.select$Global_active_power<-as.numeric(DF.select$Global_active_power)
## make the histogram
hist(DF.select$Global_active_power,col="Red", main = "Global Active Power", xlab="Global Active Power (kilowatts)")
# Make a png
dev.copy(png, file="plot1.png", width=480, height=480)
dev.off()
|
c47979e77c8aa583368eb820eb3cae401e8cd9c4
|
02dd84f04aa2c568440848c418b4a621c22bd07c
|
/data-raw/corn_110110.R
|
016ec854cf9c1a16a77385d83e23d64f2d437cca
|
[] |
no_license
|
BrunoProgramming/BBOToolkit
|
ba9aca6fa5c27c1daa912bf1816c1d9f021b8bb9
|
0029821f27c479231e7051ff3c28b5423022b6e0
|
refs/heads/master
| 2021-06-04T03:33:27.220592
| 2016-04-07T16:21:09
| 2016-04-07T16:21:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,282
|
r
|
corn_110110.R
|
corn_110110 <- read_fwf('data-raw/XCBT_C_FUT_110110.TXT', fwf_widths(c(8,6,8,1,3,1,4,5,7,1,7,1,1,1,1,1,1,2,1,1,1,1,1,6 ),
col_names = c("TradeDate", "TradeTime", "TradeSeq#", "SessionInd",
"TickerSym", "FOIInd", "DeliveryDate", "TrQuantity",
"Strike Price", "StrikePrDecLoc", "TrPrice", "TRPRDecLoc", "ASKBID",
"IndicativeQuote", "MarketQuote", "CloseOpen",
"OpenException", "PostClose", "CancelCode", "InsertedPrice",
"FastLast", "Cabinet", "Book", "EntryDate")),
col_types = cols("i", "c", "i", "c",
"c", "c", "i", "i",
"i", "i", "i", "i", "c",
"c", "c", "c",
"c", "c", "c", "c",
"c", "c", "c", "i"))
save(corn_110110, file = "data/corn_110110.rda")
# Path to 2012-2013 in larger file format. Two years in one file.
#'C:/Users/mallorym/BBOCORNDATA/2012Jan-2013Nov_txt/BBO_CBT_20120102-20131130_9552_00.txt'
|
08fb5c8d99d89bce9a525b8da7d3a6151379b23b
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/nivm/tests/nicqTestChecks.R
|
96e470f589814da0f8fe886cdb251adecd5b620e
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 692
|
r
|
nicqTestChecks.R
|
library(nivm)
## following gives diff in prop outside CI.
## This is because ic!=ceiling(nc*q)
## Now gives warning
#x<-nicqTest(20,g=nimDiffOR,delta0=.1,q=.2,nc=200,nt=300,
# ic=round(600*.2),conf.int=TRUE)
#x
## check that it works without specifying ic
#x<-nicqTest(20,g=nimDiffOR,delta0=.1,q=.2,nc=200,
# nt=300,conf.int=TRUE)
#x
## check that alternative="greater" works
## x=114 barely rejects at 0.025 level
#x<-nicqTest(114,g=nimDiffOR,delta0=.1,q=.2,nc=200,
# nt=300,conf.int=TRUE,alternative="greater")
## x=113 barely fails to reject at 0.025 level
#x<-nicqTest(113,g=nimDiffOR,delta0=.1,q=.2,nc=200,
# nt=300,conf.int=TRUE,alternative="greater")
|
0e5f7ad496b1c2e34345c6349abe37d5d64465a9
|
b7f4e0760240e4d5030734ae7831808fdaa55367
|
/plot4.R
|
337dcdf2469d96ac3a58574e42494dd01bf8ddc3
|
[] |
no_license
|
henriqueineves/Data-Science-Coursera
|
9556f273899d6da851ad7e5977bd956281c6387f
|
3ed07ebcfd200299caf47071a70da801adde9029
|
refs/heads/master
| 2023-04-06T04:03:56.144180
| 2021-03-29T19:51:24
| 2021-03-29T19:51:24
| 274,762,850
| 0
| 0
| null | 2021-01-02T23:29:16
| 2020-06-24T20:26:55
|
R
|
UTF-8
|
R
| false
| false
| 1,207
|
r
|
plot4.R
|
##Code for the second Peer Review assigments of course 4 Exploratory data analysis
#Loaging the packages:
library(zip); library(ggplot2)
#Download, unzip the file, openning the data:
if (!file.exists("NEI_data.zip")){
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip",
destfile = "NEI_data.zip", method = "curl")
}else{
print("File already exists")
}
unzip("NEI_data.zip", exdir = "./NEI_Data")
#Since both archives are in RDS format, to open them:
summary_data <- readRDS("./NEI_Data/summarySCC_PM25.rds")
source_code <- readRDS("./NEI_Data/Source_Classification_Code.rds")
#Transform the year in a factor:
summary_data <- transform(summary_data, year = as.factor(year))
summary_data <- transform(summary_data, SCC = as.factor(SCC))
#Plot4:
#Getting all the data involved in coal:
coal <- source_code$SCC[grep("[Cc]oal", source_code$EI.Sector)]
sub_coal <- summary_data[summary_data$SCC %in% coal, ]
sum_coal <- aggregate(sub_coal$Emissions, by = list(Year = sub_coal$year), FUN = sum)
#Plotting
png("plot4.png", width = 480, height = 480)
barplot(sum_coal$x, names.arg = sum_coal$Year, ylab = "PM25 Emission by Coal", xlab = "Year")
dev.off()
|
87064421a783caa20e32808b6559e54a239418a1
|
0d4afcc61512d15237ba9b509150326686e89ab0
|
/R/write_xts.R
|
28baceb06123458949f70a6f3851ae42cbe8a376
|
[] |
no_license
|
dleutnant/tsconvert
|
4b86475a0c182bed145969806bcb9e4503ded55b
|
3fcfba99a08de4f45517140ff961a98e0316b1a3
|
refs/heads/master
| 2021-07-07T05:14:52.089773
| 2016-09-22T14:58:20
| 2016-09-22T14:58:20
| 39,088,458
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,038
|
r
|
write_xts.R
|
#' Writes xts objects to file
#' @title Write xts objects to file
#' @param xts The xts object to write.
#' @param file A connection, or a character string naming the file to write to.
#' If file is "", print to the standard output connection.
#' @param format The time format.
#' @param sep The field separator string. Values within each row of x are
#' separated by this string.
#' @param dec The string to use for decimal points in numeric or complex
#' columns: must be a single character.
#' @rdname write_xts
#' @export
#' @seealso \code{\link[xts]{xts}}.
write_xts <- function(xts, file="", format= "%Y-%m-%d %H:%M:%S", sep=";", dec = ".") {
utils::write.table(data.frame(Index = format.POSIXct(zoo::index(xts),
format = format),
zoo::coredata(xts)),
file = file,
dec = dec,
sep = sep,
row.names = FALSE,
quote = FALSE)
}
|
40ceb2dbeda8235201bbd839feb8c3b47356a05c
|
b58997475db8fa11755a77ba1a927309bbbf4f7e
|
/SIS.R
|
5486254bf67c67f815d45d76f3b5e597d43a0a82
|
[] |
no_license
|
cnguyen351/chem160project2
|
19626382e9fb8091ff97eaa165ff174e3584ff94
|
3aae11e1d7966e3ffbb3114f1a85a2ec145c23a9
|
refs/heads/main
| 2023-01-25T00:53:21.212594
| 2020-11-11T23:17:08
| 2020-11-11T23:17:08
| 312,113,830
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,299
|
r
|
SIS.R
|
alp.m <- 0.000006 #transmission rate:male person-1 day-1
alp.f <- 0.0000009 #transmission rate:female person-1 day-1
gam.m <- 0.05 #recovery rate:male day-1
gam.f <- 0.007 #recovery rate:female day-1
Sm <- 14000 #susceptible males
Sf <- 9000 #susceptible females
Im <- 1000 #infected males
If <- 1000 #infected females
Sm.hist <- c() #Initialize vectors to hold pop. size as time goes by
Sf.hist <- c()
Im.hist <- c()
If.hist <- c()
for (day in 1:2000) { #2000 day time period
Sm.hist[day] <- Sm #Each time step will update current value of pop. sizes
Sf.hist[day] <- Sf
Im.hist[day] <- Im
If.hist[day] <-If
delta.Sm <- (gam.m*Im-alp.m*Sm*If) #Equations for change in number of susceptible
delta.Sf <- (gam.f*If-alp.f*Sf*Im)
delta.Im <- (alp.m*Sm*If-gam.m*Im) #Equations for change in number of infected
delta.If <- (alp.f*Sf*Im-gam.f*If)
Sm <- Sm + delta.Sm #Update population sizes
Sf <- Sf + delta.Sf
Im <- Im + delta.Im
If <- If + delta.If
Sm <- max(Sm,0) #Make sure population sizes stay in the positive
Sf <- max(Sf,0)
Im <- max(Im,0)
If <- max(If,0)
}
plot(Sm.hist, type="l", ylim=c(0,14000), xlab="Time (days)", ylab="Number of individuals") #Plot each pop. pool
lines(Sf.hist,col=2)
lines(Im.hist,col=3)
lines(If.hist,col=4)
|
badff4d8fe708a7c4348861818bd6eb7f83f647f
|
96b13e6429f1177dab9628da532b687e91fc1c25
|
/dfsf.R
|
9cecdd3a1877b9df315c1cc7853e731d2139f43e
|
[
"MIT"
] |
permissive
|
V-Yash/AIML_Lab
|
13073bc5862ed241fa1fa65e14242777abe1a58b
|
4c162fe5d8a67aef45a10a4623a55652fad7334d
|
refs/heads/main
| 2023-02-10T19:47:56.191988
| 2021-01-10T07:11:07
| 2021-01-10T07:11:07
| 328,324,414
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 952
|
r
|
dfsf.R
|
x=c(3,5,8,4,1)
if("8" %in% x){
print("Yes")
}
if("2" %in% x){
print("yes")
}else{
print("no")
}
num=8
if((num %% 2) == 0) {
print(paste(num,"is Even"))
} else {
print(paste(num,"is Odd"))
}
x=c("Yash Verma")
i=1
repeat
{
print(x)
i=i+1
if (i>5)
{
break
}
}
x=c("Yash Verma")
i=1
while (i<5) {
print(x)
i=i+1
}
i=1
while (i<11) {
print(i)
i=i+1
}
num=10
if(num < 0) {
print("Enter a positive number")
} else {
sum = 0
while(num > 0) {
sum = sum + num
num = num - 1
}
print(paste("The sum is", sum))
}
x=c(1,2,3,4,5)
for (2 in x) {
print("yes")
}
x <- c(2,5,3,9,8,11,6)
count <- 0
for (val in x) {
if(val %% 2 == 0) count = count+1
}
print(count)
i=1
for (i in 1:10) {
print(i)
}
i=1
sum=0
for (i in 1:10) {
sum = sum+i
}
print(sum)
fruit <- c('Apple', 'Orange', 'Passion fruit', 'Banana')
for ( i in fruit){
print(i)
}
|
b3f14c17ac269dee6e663d29f3c63e7b3e84b0eb
|
9df772af4027f13cfc76fc212d65fa03d96e6b67
|
/code/figures/Hummingbird Range Graph.R
|
2c62f3529d23f398e9db1675acfe52db684bd9b5
|
[] |
no_license
|
austinspence/hbird_transplant
|
cc4c2d82024f739a8b2ca1af6dcdc283eb64053e
|
32fc6d9c61d385ba81f3a499f3eaedf38381a816
|
refs/heads/master
| 2020-04-06T07:32:55.711493
| 2018-11-12T21:06:22
| 2018-11-12T21:06:22
| 157,276,381
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,242
|
r
|
Hummingbird Range Graph.R
|
#### Experimental Design Figure --------------------
# Austin Spence
# October 12th, 2017
par(mfrow=c(1, 2))
#par(mfrow=c(1, 1))
### Range Plot
plot(c(1:5), 1:5, type = "n", ylab = "Elevation (m)", xlab = NA,
axes = FALSE,
main="Hummingbird Range
Along Sierra Nevada Mountain")
Axis(side=2, at = 1:5, labels=c("0", "1000", "2000",
"3000", "4000"), tick = TRUE)
polygon(1:5, c(1, 3, 5, 3, 1)) #mountain
polygon(1:5, c(1, 3, 3, 3, 1), density = c(10, 20)) #Anna's Range
legend("topright", title="Species",
c("Anna","Calliope"), cex = 0.8, density = c(30, 0) )
### Historic Range Plot
plot(c(1:5), 1:5, type = "n", ylab = "Elevation (m)", xlab = NA,
axes = FALSE,
main="Hummingbird Range
Along Sierra Nevada Mountain")
Axis(side=2, at = 1:5, labels=c("0", "1000", "2000",
"3000", "4000"), tick = TRUE)
polygon(1:5, c(1, 3, 5, 3, 1)) #mountain
polygon(c(1, 1.5, 2.5, 4.5, 5), c(1, 2, 2, 2, 1),
density = c(10, 20)) #Anna's Range
legend("topright", title="Species",
c("Anna","Calliope"), cex = 0.8, density = c(30, 0) )
## Experiment Plot
plot(c(1:5), 1:5, type = "n", ylab = "Elevation (m)", xlab = NA,
axes = FALSE,
main="Acclimatization Experiment")
Axis(side=1, at = c(1,3,5), labels=c("Capture",
"Acclimatization", "End"), tick = TRUE)
Axis(side=2, at = 1:5, labels=c("0", "1000", "2000",
"3000", "4000"), tick = TRUE)
segments(x0 = 1, y0 = 2, x1 = 4.5, y1 = 2, col = "red")
segments(x0 = 4.5, y0 = 2, x1 = 4.5, y1 = 4.98, col = "red")
segments(x0 = 4.5, y0 = 4.98, x1 = 5, y1 = 4.98, col = "red")
segments(x0 = 1, y0 = 2.01, x1 = 1.5, y1 = 2.01, col = "black")
segments(x0 = 1.5, y0 = 2.01, x1 = 1.5, y1 = 5, col = "black")
segments(x0 = 1.5, y0 = 5, x1 = 5, y1 = 5, col = "black")
points(x = 1.3, y = 2, pch = 15)
points(x = 4.7, y = 5, pch = 15)
points(x = 1.7, y = 5, pch = 15)
points(x = 5, y = 5, pch = 16)
text(1.1, 5, "B")
### Current Range Plot with Overlapping Ranges
plot(c(1:5), 1:5, type = "n", ylab = "Elevation (m)", xlab = NA,
axes = FALSE,
main=
"Anna's and Calliope Hummingbird Range
Along Sierra Nevada Mountain")
Axis(side=2, at = 1:5, labels=c("0", "1000", "2000",
"3000", "4000"), tick = TRUE)
polygon(1:5, c(1, 3, 5, 3, 1), col = "darkviolet", border = NA) #mountain
polygon(c(1, 2.25, 3, 3.76, 5),
c(1, 3.5, 3.5, 3.5, 1), col = "pink",
border = NA) #Anna's Range
polygon(c(1.5, 2.24, 3, 3.76, 4.5),
c(2, 3.5, 3.5, 3.5, 2), col = "darkviolet",
border = NA, density = (3.5), lwd = 5)
legend("topright", title="Species",
c("Calliope", "Anna"), cex = .8,
fill = c("darkviolet", "pink"))
text(1.1, 5, "A")
### Current Range Plot with Anna's Only
plot(c(1:5), 1:5, type = "n", ylab = "Elevation (m)", xlab = NA,
axes = FALSE,
main=
"Anna's Hummingbird Range
Along Sierra Nevada Mountain")
Axis(side=2, at = 1:5, labels=c("0", "1000", "2000",
"3000", "4000"), tick = TRUE)
polygon(1:5, c(1, 3, 5, 3, 1)) #mountain
polygon(c(1, 2.25, 3, 3.76, 5),
c(1, 3.5, 3.5, 3.5, 1), col = "pink") #Anna's Range
|
3f854dd9281c9d06ea3c6d4600590bcaeb97d55e
|
ec9745615f10cf8aa8edd135d8d73d5fb8ba943d
|
/R/data_gen.R
|
852ba8cd0ffb1896ef146695c0e2f899dc180083
|
[] |
no_license
|
liangyuanhu/CIMTx
|
8720511ab8e9d492ee97ab4fb0cbb14e5256ded3
|
138ac444cb9c34b953015f0db44ef71b66859d8d
|
refs/heads/master
| 2022-07-01T00:47:39.928182
| 2022-06-16T14:46:52
| 2022-06-16T14:46:52
| 252,886,194
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,328
|
r
|
data_gen.R
|
#' Data generation function
#'
#'This function generates data to test different causal inference methods.
#' @param n total number of units for simulation
#' @param scenario simulation scenario 1 or scenario 2
#' @param ratio ratio of units in the treatment groups
#' @param overlap levels of covariate overlap: Please select: weak, strong, moderate
#' @param all_confounder TRUE or FALSE. overlap is lacking for a variable that is not predictive of the outcome (all_confounder equals to TRUE) or situations when it is lacking for a true confounder (all_confounder equals to FALSE)
#'
#' @return list with the 5 elements. Nested within each list, it contains
#' \item{n:}{Number of units for simulation}
#' \item{trt_ind:}{A data frame with number of rows equals to n and 11 columns}
#' \item{Y:}{Observed binary outcome for 3 treatments}
#' \item{Yobs:}{Observed binary outcome}
#' \item{Est:}{True ATE/ATT for RD/RR/OR}
#' @export data_gen
#'
#' @examples
#' library(CIMTx)
#' set.seed(3242019)
#' idata = data_gen(n = 120, ratio =1,scenario = 1)
data_gen <- function(n, scenario, ratio, overlap, all_confounder){
if (scenario == 1) {
data_gen_result <- data_gen_p1(n, ratio,all_confounder=FALSE)
}
if (scenario == 2) {
data_gen_result <- data_gen_p1(n, overlap, all_confounder)
}
return(data_gen_result)
}
|
f272958fae02bff87288ae263120f895543b9904
|
c36ef613cd20d36130b4a1ff351fcb5d76f6d63f
|
/R/PTMscape_main_functions.R
|
4f2c8f0d176b9c06050c66e3df09759150da2419
|
[] |
no_license
|
ginnyintifa/PTMscape
|
24b8913683f5a89769e939b455d0347186d5b87f
|
de86a471e92bbb954e30dedda71aa1395d908db0
|
refs/heads/master
| 2021-11-24T13:28:44.458778
| 2021-11-05T16:06:28
| 2021-11-05T16:06:28
| 117,210,690
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 60,812
|
r
|
PTMscape_main_functions.R
|
### do every thing without decoy
# pssm_generation ---------------------------------------------------------
pssm_generation = function(candidate_Rds_name,
center_position,
output_label)
{
candidate = readRDS(candidate_Rds_name)
pos_window <- candidate %>% dplyr::filter(label == "positive") %>%
dplyr::select(window)
### to check if size and center position aligns
if(center_position == (nchar(pos_window$window[1])+1)/2)
{
noc_pos_window = pos_window$window
str_sub(noc_pos_window, center_position, center_position) <- ""
}else{
warning("window size error!")
}
#neg_windows = noc_candi_window
pos_windows = noc_pos_window
positive_base_matrix = get_base_freq_matrix(pos_windows)
pssm=positive_base_matrix
pssm[is.infinite(pssm)]=0
saveRDS(pssm, file = paste0(output_label,"_pssm.Rds"))
pssm_df = data.frame(AA =c("A","R","N","D","C","Q","E","G","H","I",
"L","K","M","F","P","S","T","W","Y","V","X","U"), pssm)
write.table(pssm_df, paste0(output_label,"_pssm.tsv"),
quote = F, sep = "\t", row.names = F)
}
# window_formation --------------------------------------------------------
window_formation = function(mod_site, flanking_size,
prot_seqs, prot_ids, positive_info,
output_label)
{
candidate = get_all_candidate(mod_site, flanking_size,prot_seqs, prot_ids, positive_info)
saveRDS(candidate, file = paste0(output_label,"_candidate.Rds"))
write.table(candidate, paste0(output_label,"_candidate.tsv"),
row.names = F, quote = F, sep = "\t")
}
window_formation_no_positive = function(mod_site, flanking_size,
prot_seqs, prot_ids,
output_label)
{
candidate = get_all_candidate_no_positive(mod_site, flanking_size,prot_seqs, prot_ids)
saveRDS(candidate, file = paste0(output_label,"_candidate.Rds"))
write.table(candidate, paste0(output_label,"_candidate.tsv"),
row.names = F, quote = F, sep = "\t")
}
# pssm_feature_extraction -------------------------------------------------
## there is no way to get the protein mean of pssm features
pssm_feature_extraction = function(candidate_Rds_name,
pssm_Rds_name,
center_position,
output_label)
{
candidate = readRDS(candidate_Rds_name)
pssm = readRDS(pssm_Rds_name)
candi_window <- candidate %>% dplyr::filter(label == "negative") %>%
dplyr::select(window)
# cat("candi size: ",nrow(candi_window),"\n")
### to check if size and center position aligns
if(center_position == (nchar(candi_window$window[1])+1)/2)
{
### remove the center site
noc_candi_window = candi_window$window
str_sub(noc_candi_window, center_position, center_position) <- ""
}else{
warning("window size error!")
}
noc_candi_pssm = t(sapply(1:length(noc_candi_window), function(x) {
get_pssm_feature(pssm,noc_candi_window[x])
}))
saveRDS(noc_candi_pssm, file = paste0(output_label, "_noc_candi_pssm_matrix.Rds"))
pos_window <- candidate %>% dplyr::filter(label == "positive") %>%
dplyr::select(window)
#cat("pos size: ",nrow(pos_window),"\n")
if(nrow(pos_window)>0)
{
if(center_position == (nchar(pos_window$window[1])+1)/2)
{
### remove the center site
noc_pos_window = pos_window$window
str_sub(noc_pos_window, center_position, center_position) <- ""
}else{
warning("window size error!")
}
noc_pos_pssm = t(sapply(1:length(noc_pos_window), function(x) {
get_pssm_feature(pssm,noc_pos_window[x])
}))
saveRDS(noc_pos_pssm, file = paste0(output_label, "_noc_pos_pssm_matrix.Rds"))
}
}
# aaindex_feature_extraction ----------------------------------------------
### input: Rdata for candidate and decoy, the file of aaindex properties
### output: Rdata of the aaindex matrix
aaindex_feature_extraction = function( aaindex_property, candidate_Rds_name,
center_position,
output_label)
{
### for each protein get the protein mean of the features
candidate = readRDS(candidate_Rds_name)
candi = candidate%>%dplyr::filter(label == "negative")
candi_window <- candidate %>% dplyr::filter(label == "negative") %>%
dplyr::select(window)
pos = candidate%>%dplyr::filter(label == "positive")
pos_window <- candidate %>% dplyr::filter(label == "positive") %>%
dplyr::select(window)
rm(candidate)
### to check if size and center position aligns
if(center_position == (nchar(candi_window$window[1])+1)/2)
{
noc_candi_window = candi_window$window
str_sub(noc_candi_window, center_position, center_position) <- ""
noc_candi_cluster = get_matrix_all(noc_candi_window,
aaindex_property)
saveRDS(noc_candi_cluster,file = paste0(output_label,"_noc_candi_cluster_matrix.Rds"))
# cat("get window aaindex","\n")
}else{
warning("window size error!")
}
if(nrow(pos_window)>0)
{
if(center_position == (nchar(pos_window$window[1])+1)/2)
{
noc_pos_window = pos_window$window
str_sub(noc_pos_window, center_position, center_position) <- ""
noc_pos_cluster = get_matrix_all(noc_pos_window,
aaindex_property)
saveRDS(noc_pos_cluster,file = paste0(output_label,"_noc_pos_cluster_matrix.Rds"))
#cat("get window aaindex","\n")
}else{
warning("window size error!")
}
}
}
# spider_feature_extraction -----------------------------------------------
spider_feature_joining_without_mean = function(candidate_Rds_name,
extracted_spider_Rds_name,
spider_which_retain,
spider_which_logit,
center_position,
#spider_which_center,
output_label)
{
protID_pos_spider_site_specific = readRDS(extracted_spider_Rds_name)
###for each position there is 4 spider properties.
### so the four properties for the center site are in column
center_start = 4*(center_position-1)+1
spider_which_center = c(center_start:(center_start+3))
candidate = readRDS(candidate_Rds_name)
pos = candidate%>%dplyr::filter(label == "positive")
candi = candidate%>%dplyr::filter(label == "negative")
#cat("see mem change","\n")
#cat(mem_change(rm(candidate)),"\n")
#### pos
pos_site_specific = dplyr::left_join(pos,protID_pos_spider_site_specific,by = c("protID", "pos"))
rm(pos)
#### the only correct way to get from the back
last_col = ncol(pos_site_specific)
get_col = c((last_col-100+1):last_col)
pos_site_specific_matrix = as.matrix(pos_site_specific[,get_col])
rm(pos_site_specific)
noc_pos_structure = pos_site_specific_matrix[, -spider_which_center]
saveRDS(noc_pos_structure, file = paste0(output_label, "_noc_pos_structure_matrix.Rds"))
rm(noc_pos_structure)
# cat("pos_processed","\n")
#### candi
candi_site_specific = dplyr::left_join(candi,protID_pos_spider_site_specific,by = c("protID", "pos"))
rm(candi)
candi_site_specific_matrix = as.matrix(candi_site_specific[,get_col])
rm(candi_site_specific)
noc_candi_structure = candi_site_specific_matrix[, -spider_which_center]
saveRDS(noc_candi_structure, file = paste0(output_label, "_noc_candi_structure_matrix.Rds"))
rm(noc_candi_structure)
#cat("candi_processed","\n")
}
# feature_combining -------------------------------------------------------
combine_all_features = function(pos_aaindex = NULL,
candi_aaindex,
pos_spider = NULL,
candi_spider,
pos_pssm = NULL,
candi_pssm,
output_label)
{
noc_not_na_candi_cluster = readRDS(candi_aaindex)
noc_not_na_candi_structure = readRDS(candi_spider)
noc_not_na_candi_pssm = readRDS(candi_pssm)
noc_not_na_candi_feature = cbind(noc_not_na_candi_cluster,
noc_not_na_candi_structure,
noc_not_na_candi_pssm)
col_mean = colMeans(noc_not_na_candi_feature, na.rm = T)
cat("Take care of NA","\n")
for(i in 1:ncol(noc_not_na_candi_feature))
{
this_na = which(is.na(noc_not_na_candi_feature[,i]))
noc_not_na_candi_feature[this_na,i] = col_mean[i]
}
saveRDS(noc_not_na_candi_feature, file = paste0(output_label, "_noc_not_na_candi_feature.Rds"))
rm(noc_not_na_candi_feature)
if(!is.null(pos_aaindex))
{
noc_not_na_pos_cluster = readRDS(pos_aaindex)
noc_not_na_pos_structure = readRDS(pos_spider)
noc_not_na_pos_pssm = readRDS(pos_pssm)
noc_not_na_pos_feature = cbind(noc_not_na_pos_cluster,
noc_not_na_pos_structure,
noc_not_na_pos_pssm)
col_mean = colMeans(noc_not_na_pos_feature, na.rm = T)
for(i in 1:ncol(noc_not_na_pos_feature))
{
this_na = which(is.na(noc_not_na_pos_feature[,i]))
noc_not_na_pos_feature[this_na,i] = col_mean[i]
}
saveRDS(noc_not_na_pos_feature, file = paste0(output_label, "_noc_not_na_pos_feature.Rds"))
rm(noc_not_na_pos_feature)
}
}
combine_all_features_no_spider = function(pos_aaindex = NULL,
candi_aaindex,
pos_pssm = NULL,
candi_pssm,
output_label)
{
noc_not_na_candi_cluster = readRDS(candi_aaindex)
noc_not_na_candi_pssm = readRDS(candi_pssm)
noc_not_na_candi_feature = cbind(noc_not_na_candi_cluster,
noc_not_na_candi_pssm)
col_mean = colMeans(noc_not_na_candi_feature, na.rm = T)
for(i in 1:ncol(noc_not_na_candi_feature))
{
this_na = which(is.na(noc_not_na_candi_feature[,i]))
noc_not_na_candi_feature[this_na,i] = col_mean[i]
}
saveRDS(noc_not_na_candi_feature, file = paste0(output_label, "_noc_not_na_candi_feature.Rds"))
rm(noc_not_na_candi_feature)
if(!is.null(pos_aaindex))
{
noc_not_na_pos_cluster = readRDS(pos_aaindex)
noc_not_na_pos_pssm = readRDS(pos_pssm)
noc_not_na_pos_feature = cbind(noc_not_na_pos_cluster,
noc_not_na_pos_pssm)
col_mean = colMeans(noc_not_na_pos_feature, na.rm = T)
for(i in 1:ncol(noc_not_na_pos_feature))
{
this_na = which(is.na(noc_not_na_pos_feature[,i]))
noc_not_na_pos_feature[this_na,i] = col_mean[i]
}
saveRDS(noc_not_na_pos_feature, file = paste0(output_label, "_noc_not_na_pos_feature.Rds"))
rm(noc_not_na_pos_feature)
}
}
### more process with the negative data set , actually the negative dataset is either decoy or candidate
### and the process can be two forms now, 1 knn cleaning, 2 just randomly sample the same number as the positive set
# negative_selection ------------------------------------------------------
balanced_size_sampling = function(pos_matrix_Rds_name, neg_matrix_Rds_name,
output_label)
{
pos_matrix = readRDS(pos_matrix_Rds_name)
neg_matrix = readRDS(neg_matrix_Rds_name)
pos_size = nrow(pos_matrix)
neg_size = nrow(neg_matrix)
set.seed(123)
chosen_neg = sample(neg_size, pos_size)
balanced_neg = neg_matrix[chosen_neg, ]
saveRDS(balanced_neg, file = paste0(output_label, "_balanced_neg_feature.Rds"))
}
balanced_size_sampling_after_combining_pos_neg = function(feature_matrix_Rds_name,
feature_label_Rds_name,
output_label)
{
feature_matrix = readRDS(feature_matrix_Rds_name)
feature_label = readRDS(feature_label_Rds_name)
pos_ind = which(feature_label == 1)
neg_ind = which(feature_label == 0)
pos_feature_matrix = feature_matrix[pos_ind,]
neg_feature_matrix = feature_matrix[neg_ind,]
rm(feature_matrix)
pos_size = nrow(pos_feature_matrix)
neg_size = nrow(neg_feature_matrix)
if (pos_size<neg_size)
{
set.seed(123)
chosen_neg_ind = sample(neg_size, pos_size)
chosen_neg = neg_feature_matrix[chosen_neg_ind, ]
}else{
chosen_neg = neg_feature_matrix
}
balanced_feature_matrix = rbind(pos_feature_matrix, chosen_neg)
balanced_feature_label = c(rep(1,pos_size),rep(0, nrow(chosen_neg)))
saveRDS(balanced_feature_matrix, file = paste0(output_label, "_balanced_feature_matrix.Rds"))
saveRDS(balanced_feature_label, file = paste0(output_label, "_balanced_feature_label.Rds"))
write.table(balanced_feature_label, paste0(output_label, "_balanced_feature_label.tsv"), quote = F,
sep = "\t", row.names = F)
}
# pos_neg_combination -----------------------------------------------------
combine_pos_neg = function(pos_feature_matrix_Rds_name, neg_feature_matrix_Rds_name,
output_label)
{
pos_feature_matrix = readRDS(pos_feature_matrix_Rds_name)
neg_feature_matrix = readRDS(neg_feature_matrix_Rds_name)
### rbind data
pos_neg_feature_matrix = rbind(pos_feature_matrix, neg_feature_matrix)
col_mean = colMeans(pos_neg_feature_matrix, na.rm = T)
for(i in 1:ncol(pos_neg_feature_matrix))
{
this_na = which(is.na(pos_neg_feature_matrix[,i]))
pos_neg_feature_matrix[this_na,i] = col_mean[i]
}
saveRDS(pos_neg_feature_matrix, file = paste0(output_label, "_feature_matrix.Rds"))
write.table(pos_neg_feature_matrix, paste0(output_label, "_feature_matrix.tsv"),
row.names = F, col.names = F, sep = "\t", quote = F)
rm(pos_neg_feature_matrix)
pos_neg_feature_label = c(rep(1, nrow(pos_feature_matrix)), rep(0, nrow(neg_feature_matrix)))
saveRDS(pos_neg_feature_label, file = paste0(output_label, "_feature_label.Rds"))
write.table(pos_neg_feature_label, paste0(output_label, "_feature_label.tsv"),
row.names = F, col.names = F, sep = "\t", quote = F)
rm(pos_neg_feature_label)
}
# n_fold_cv_without_decoy -------------------------------------------------
construct_n_fold_cv_without_decoy = function(pos_candi_feature_matrix_Rds_name,pos_candi_feature_label_Rds_name,
n, output_label)
{
# pos_candi_feature_matrix_Rds_name = "glcnac_s_wp_feature_matrix.Rds"
# pos_candi_feature_label_Rds_name = "glcnac_s_wp_feature_label.Rds"
# n = 2
# output_label = "tg"
#
pos_candi_feature_matrix = readRDS(pos_candi_feature_matrix_Rds_name)
pos_candi_feature_label = readRDS(pos_candi_feature_label_Rds_name)
### so the output should be n pairs of training/test sets
### for simplicity I want to use Caret package, it is not always good to build your own wheel
pos_size = length(which(pos_candi_feature_label==1))
pos_feature_matrix = pos_candi_feature_matrix[1:pos_size,]
pos_seq = c(1:pos_size)
candi_size = length(which(pos_candi_feature_label==0))
candi_feature_matrix = pos_candi_feature_matrix[((pos_size+1):(pos_size+candi_size)),]
candi_seq = c(1:candi_size)
set.seed(123)
pos_folds = caret::createFolds(pos_seq, n, FALSE )
set.seed(123)
candi_folds = caret::createFolds(candi_seq, n, FALSE)
for(i in 1:n)
{
test_pos_ind = which(pos_folds==i)
test_candi_ind = which(candi_folds==i)
saveRDS(test_pos_ind, file = paste0(output_label,"_test_pos_ind_",i,".Rds"))
saveRDS(test_candi_ind, file = paste0(output_label,"_test_candi_ind_",i,".Rds"))
#cat(test_pos_ind,"\n")
#cat(test_candi_ind,"\n")
train_pos_ind = pos_seq[-test_pos_ind]
train_candi_ind = candi_seq[-test_candi_ind]
saveRDS(train_pos_ind, file = paste0(output_label,"_train_pos_ind_",i,".Rds"))
saveRDS(train_candi_ind, file = paste0(output_label,"_train_candi_ind_",i,".Rds"))
#cat(train_pos_ind,"\n")
#cat(train_candi_ind,"\n")
test_feature = rbind(pos_feature_matrix[test_pos_ind,],candi_feature_matrix[test_candi_ind,])
# write.table(test_feature, paste0(output_label,"_test_feature_" ,i,".tsv"),
# row.names = F, col.names = F, sep = "\t", quote = F)
saveRDS(test_feature, file = paste0(output_label,"_test_feature_" ,i,".Rds"))
rm(test_feature)
train_feature = rbind(pos_feature_matrix[train_pos_ind,],candi_feature_matrix[train_candi_ind,])
#write.table(train_feature, paste0(output_label,"_train_feature_" ,i,".tsv"),
# row.names = F, col.names = F, sep = "\t", quote = F)
saveRDS(train_feature, file = paste0(output_label,"_train_feature_" ,i,".Rds"))
rm(train_feature)
test_label = c(rep(1,length(test_pos_ind)),
rep(0, length(test_candi_ind)))
write.table(test_label, paste0(output_label,"_test_label_" ,i,".tsv"),
row.names = F, col.names = F, sep = "\t", quote = F)
saveRDS(test_label, file = paste0(output_label,"_test_label_" ,i,".Rds"))
train_label = c(rep(1,length(train_pos_ind)),
rep(0, length(train_candi_ind)))
write.table(train_label, paste0(output_label,"_train_label_" ,i,".tsv"),
row.names = F, col.names = F, sep = "\t", quote = F)
saveRDS(train_label, file = paste0(output_label,"_train_label_" ,i,".Rds"))
}
}
# scale the data ----------------------------------------------------------
scale_train_test = function(feature_train_name, feature_test_name,
n_fold,
upper_bound, lower_bound,
output_label_fortrain, output_label_fortest)
{
for(i in 1:n_fold)
{
feature_train_matrix = readRDS(paste0(feature_train_name, "_",i,".Rds"))
feature_test_matrix = readRDS(paste0(feature_test_name, "_",i,".Rds"))
get_train_range = scale_train(feature_train_matrix, upper_bound, lower_bound,
paste0(output_label_fortrain,"_",i))
scale_test(feature_test_matrix, get_train_range, upper_bound, lower_bound,
paste0(output_label_fortest,"_",i))
}
}
scale_train_test_single = function(feature_train_name, feature_test_name,
upper_bound, lower_bound,
output_label_fortrain, output_label_fortest)
{
feature_train_matrix = readRDS(feature_train_name)
feature_test_matrix = readRDS(feature_test_name)
get_train_range = scale_train(feature_train_matrix, upper_bound, lower_bound,
output_label_fortrain)
scale_test(feature_test_matrix, get_train_range, upper_bound, lower_bound,
output_label_fortest)
}
scale_train_single = function(feature_train_name,
upper_bound, lower_bound,
output_label_fortrain)
{
feature_train_matrix = readRDS(feature_train_name)
it = scale_train(feature_train_matrix, upper_bound, lower_bound,
output_label_fortrain)
}
# libsvm formating --------------------------------------------------------
libsvm_formating_single = function(train_feature_name,
train_label_name,
test_feature_name,
test_label_name,
output_label)
{
feature_train_out_Rds_name = paste0(output_label, "_train_feature")
feature_train_out_tsv_name = paste0(output_label, "_train_feature")
feature_test_out_Rds_name = paste0(output_label, "_test_feature")
feature_test_out_tsv_name = paste0(output_label, "_test_feature")
libsvm_formating(train_feature_name, train_label_name,
feature_train_out_Rds_name, feature_train_out_tsv_name)
libsvm_formating(test_feature_name, test_label_name,
feature_test_out_Rds_name, feature_test_out_tsv_name)
}
# PCA plots ---------------------------------------------------------------
plot_plsda_for_two_types = function(pos_feature_Rds_name,
candi_feature_Rds_name,
output_label)
{
# pos_feature_Rds_name = "py/py_nr_size25_nms_noc_not_na_pos_feature.Rds"
# candi_feature_Rds_name = "py/py_nr_size25_nms_noc_not_na_candi_feature.Rds"
# output_label = "zpy"
# output_label = "zpy"
#
pos_feature = readRDS(pos_feature_Rds_name)
candi_feature = readRDS(candi_feature_Rds_name)
pos_size = nrow(pos_feature)
candi_size = nrow(candi_feature)
### limit pos size to 5000
if(pos_size>5000)
{
set.seed(123)
sel_pos = pos_feature[sample(pos_size,5000),]
}else{
sel_pos = pos_feature
}
pos_size = nrow(sel_pos)
if(candi_size>=pos_size)
{
set.seed(123)
sel_candi = candi_feature[sample(candi_size,pos_size),]
}else{
sel_candi = candi_feature
}
sel_feature = rbind(sel_pos, sel_candi)
type_label = as.factor(c(rep("positive",nrow(sel_pos)),
rep("negative", nrow(sel_candi))))
get_plsda_pos_candi(sel_feature, type_label, paste0(output_label,"_plsda_plot_two.pdf"))
}
plot_plsda_for_two_types_with_score_selection= function(pos_feature_Rds_name,
candi_feature_Rds_name,
pos_score_Rds_name,
candi_score_Rds_name,
candi_score_threshold,
output_label)
{
# pos_feature_Rds_name = "py/py_nr_size25_nms_noc_not_na_pos_feature.Rds"
# candi_feature_Rds_name = "py/py_nr_size25_nms_noc_not_na_candi_feature.Rds"
# output_label = "zpy"
# output_label = "zpy"
#
pos_feature = readRDS(pos_feature_Rds_name)
candi_feature = readRDS(candi_feature_Rds_name)
pos_score = readRDS(pos_score_Rds_name)
candi_score = readRDS(candi_score_Rds_name)
pos_size = nrow(pos_feature)
candi_size = nrow(candi_feature)
### limit pos size to 5000
if(pos_size>5000)
{
set.seed(123)
sel_pos_ind = sample(pos_size,5000)
sel_pos = pos_feature[sel_pos_ind,]
sel_pos_score = pos_score[sel_pos_ind,]
}else{
sel_pos = pos_feature
sel_pos_score = pos_score
}
pos_size = nrow(sel_pos)
if(candi_size>=pos_size)
{
set.seed(123)
sel_candi_ind = sample(candi_size,pos_size)
sel_candi = candi_feature[sel_candi_ind,]
sel_candi_score = candi_score[sel_candi_ind,]
}else{
sel_candi = candi_feature
sel_candi_score = candi_score
}
sel_feature = rbind(sel_pos, sel_candi)
candi_score_label = rep("not_selected",nrow(sel_candi))
candi_score_label[which(candi_score>= candi_score_threshold)] = "selected"
score_label = as.factor(c(rep("selected", nrow(sel_pos)), candi_score_label))
type_label = as.factor(c(rep("positive",nrow(sel_pos)),
rep("negative", nrow(sel_candi))))
get_plsda_pos_candi_with_score_selection(sel_feature, type_label, score_label, paste0(output_label,"_plsda_plot_two_score.pdf"))
}
# AUC_calculation ---------------------------------------------------------
# MCC calculation for prediction score threshold --------------------------
### modifiy this so that it can hold k fold rather than 2-fold only
# get_score_threshold_for_whole_proteome_mode -----------------------------
get_score_threshold = function(prediction_score_file_names,
test_label_file_names,
specificity_level,
output_label)
{
# prediction_score_path = "/data/ginny/liblinear-2.11/test_package/ps_cv_predict/"
# test_label_path = "/data/ginny/test_package/pred/ps_cv_predict/"
#
# prediction_score_file_names = "ps_0103_predict.txt"
# test_label_file_names = "ps_0103_test_label.txt"
# output_label = "ps_0103"
# specificity_level = 0.99
score_files = readRDS(prediction_score_file_names)
label_files = readRDS(test_label_file_names)
### need to think about how I can combine them together
all_pred_df = data.frame(rbindlist(lapply(1:length(score_files), function(i) {
this_predict = data.table::fread(score_files[i], stringsAsFactors = F, header = T)
this_label = data.table::fread(label_files[i], stringsAsFactors = F, header = F)
pred_df = cbind(this_predict, this_label)
old_cn = colnames(pred_df)
new_cn = c("pred_label","pos_score","neg_score","true_label")
new_cn[which(old_cn == "labels")] = "pred_label"
new_cn[which(old_cn == "1")] = "pos_score"
new_cn[which(old_cn == "0")] = "neg_score"
colnames(pred_df) = new_cn
return(pred_df %>% dplyr::select(pos_score, true_label))
})), stringsAsFactors = F)
candi_score = all_pred_df %>%
dplyr::filter(true_label == 0) %>%
dplyr::select(pos_score)
positive_score = all_pred_df %>%
dplyr::filter(true_label == 1) %>%
dplyr::select(pos_score)
saveRDS(candi_score$pos_score, file = paste0(output_label, "_candi_score.Rds"))
saveRDS(positive_score$pos_score, file = paste0(output_label, "_positive_score.Rds"))
#### then output AUC and specificity and MCC etc
total_label = all_pred_df$true_label
total_score = all_pred_df$pos_score
#### combine all the positive together and all the candidate together
# roc_test = roc(total_label, total_score)
# cat("AUC", roc_test$auc,"\n")
#
record_mcc = rep(0,999)
record_spec = rep(0,999)
for(i in 1:999)
{
cutoff = i/1000
tp = sum(total_label==1 & total_score>cutoff)
tn = sum(total_label==0 & total_score<=cutoff)
fp = sum(total_label==0 & total_score>cutoff)
fn = sum(total_label==1 & total_score<=cutoff)
# cat(cutoff, tp, tn, fp, fn, "\n")
this_spec = tn/(tn+fp)
this_mcc = calculate_MCC(tp,fp, fn, tn)
# cat(this_mcc, "\n")
record_mcc[i] = this_mcc
record_spec[i] = this_spec
}
max_mcc = max(record_mcc, na.rm = T)
score_max_mcc = which.max(record_mcc)/1000
get_spec = abs(record_spec-specificity_level)
score_which_spec = which.min(get_spec)/1000
cat("at specificity level wanted, score cutoff is: ",
score_which_spec,"\n")
#cat("at specificity level wanted, how many sites are predicted? ",
# length(which(candi_score$pos_score>score_which_spec)), "\n")
cat("threshold at best MCC ", score_max_mcc, "\n" )
cat("best MCC", max_mcc,"\n")
btp = sum(total_label==1 & total_score>score_max_mcc)
btn = sum(total_label==0 & total_score<=score_max_mcc)
bfp = sum(total_label==0 & total_score>score_max_mcc)
bfn = sum(total_label==1 & total_score<=score_max_mcc)
sens = btp/(btp+bfn)
spec = btn/(btn+bfp)
#cat("sens and spec at best MCC ", sens,"\t", spec, "\n" )
#cat("how many candidate predicted: ", length(which(candi_score>score_max_mcc)), "\n")
# pdf(paste0(output_label,"_roc.pdf"), useDingbats = F)
# plot(roc_test)
# dev.off()
#
pdf(paste0(output_label,"_candidate_score_hist.pdf"), useDingbats = F)
hist(candi_score$pos_score,breaks = 50, main = "predicted_score_on_candidate_sites")
dev.off()
pdf(paste0(output_label,"_positive_score_hist.pdf"), useDingbats = F)
hist(positive_score$pos_score,breaks = 50, main = "predicted_score_on_positive_sites")
dev.off()
pdf(paste0(output_label,"_both_score_dens.pdf"), useDingbats = F)
pd = density(positive_score$pos_score)
cd = density(candi_score$pos_score)
ymax = max(c(pd$y, cd$y))
plot(cd, ylim = c(0,ymax),
main = "candidate_pos_score", col = "blue")
lines(pd, col = "red")
dev.off()
return(score_which_spec)
}
assemble_window_score_cv = function(candidate_df_Rds,
positive_index_file_names,
candi_index_file_names,
positive_score_Rds,
candi_score_Rds,
score_threshold,
id_convert,
output_label)
{
# candidate_df_Rds = "ps_wp_52_candidate.Rds"
# positive_index_file_names = "ps_wp_52_pos_index_names.Rds"
# candi_index_file_names = "ps_wp_52_candi_index_names.Rds"
# positive_score_Rds = "ps_wp_52_positive_score.Rds"
# candi_score_Rds = "ps_wp_52_candi_score.Rds"
# score_threshold = 0.683
# output_label = "test_0125"
candidate_df = readRDS(candidate_df_Rds)
### now get the order correct
positive_score = readRDS(positive_score_Rds)
candi_score = readRDS(candi_score_Rds)
#### tidy up the order of index and score and combine them together
### only need to look at test indices
positive_df = candidate_df%>%dplyr::filter(label == "positive")
candi_df = candidate_df%>%dplyr::filter(label == "negative")
###start from here tomorrow
### get the order of the index and the order of score in the same way
all_positive_ind = readRDS(positive_index_file_names)
all_candi_ind = readRDS(candi_index_file_names)
tidy_positive_ind = data.frame(rbindlist(
lapply( 1:length(all_positive_ind), function(i) {
this_positive_ind = readRDS(all_positive_ind[i])
return(data.frame(ind = this_positive_ind, stringsAsFactors = F))
})), stringsAsFactors = F)
positive_df_order = positive_df[tidy_positive_ind$ind,]
positive_df_order_score = data.frame(positive_df_order, pred_score = positive_score)
rm(positive_df_order)
tidy_candi_ind = data.frame(rbindlist(
lapply( 1:length(all_candi_ind), function(i) {
this_candi_ind = readRDS(all_candi_ind[i])
return(data.frame(ind = this_candi_ind, stringsAsFactors = F))
})), stringsAsFactors = F)
candi_df_order = candi_df[tidy_candi_ind$ind,]
candi_df_order_score = data.frame(candi_df_order, pred_score = candi_score)
rm(candi_df_order)
df_score = rbind(positive_df_order_score, candi_df_order_score)
rm(positive_df_order_score, candi_df_order_score)
colnames(id_convert) = c("protID","gene_name")
df_score_label = df_score %>%
dplyr::mutate(pred_label = "negative") %>%
dplyr::mutate(pred_label = replace(pred_label, pred_score >= score_threshold, "positive")) %>%
dplyr::mutate(prediction_label = pred_label) %>%
dplyr::mutate(combined_label = replace(pred_label, label == "positive", "positive")) %>%
dplyr::mutate(known_label = label)%>%
dplyr::mutate(threshold = score_threshold)%>%
dplyr::left_join(id_convert) %>%
dplyr::arrange(protID, pos) %>%
dplyr::select(protID, gene_name, pos, window, pred_score, threshold,prediction_label, known_label, combined_label)
rm(df_score)
write.table(df_score_label,paste0(output_label, "_window_score_df.tsv"),
sep = "\t", quote = F, row.names = F, na = "")
saveRDS(df_score_label, file = paste0(output_label, "_window_score_df.Rds"))
}
assemble_window_score_target = function(prediction_score_file,
predict_candidate_df_Rds,
id_convert,
score_threshold,
output_label)
{
#### simply combine
#
# prediction_score_file = "/data/ginny/liblinear-2.11/test_package/ps_predict_test_predict.tsv"
#
# predict_candidate_df_Rds = "ps_predict_candidate.Rds"
#
# score_threshold = 0.683
#
# output_label = "ps_predict"
#
#
pred_score_df = data.table::fread(prediction_score_file, stringsAsFactors = F, header = T)
### I think it is safer to code it in a matching manner
old_cn = colnames(pred_score_df)
new_cn = c("pred_label","pos_score","neg_score")
new_cn[which(old_cn == "labels")] = "pred_label"
new_cn[which(old_cn == "1")] = "pos_score"
new_cn[which(old_cn == "0")] = "neg_score"
colnames(pred_score_df) = new_cn
pred_df = readRDS(predict_candidate_df_Rds)
colnames(id_convert) = c("protID","gene_name")
pred_df_score_label = pred_df %>%
dplyr::mutate(pred_score = pred_score_df$pos_score) %>%
dplyr::mutate(pred_label = "negative") %>%
dplyr::mutate(pred_label = replace(pred_label, pred_score >= score_threshold, "positive")) %>%
dplyr::mutate(prediction_label = pred_label) %>%
dplyr::mutate(combined_label = prediction_label)%>%
dplyr::mutate(known_label = label)%>%
dplyr::mutate(threshold = score_threshold) %>%
dplyr::left_join(id_convert) %>%
dplyr::arrange(protID, pos) %>%
dplyr::select(protID, gene_name, pos, window, pred_score, threshold, prediction_label, known_label, combined_label)
write.table(pred_df_score_label,paste0(output_label, "_window_score_df.tsv"),
sep = "\t", quote = F, row.names = F, na = "")
saveRDS(pred_df_score_label, file = paste0(output_label, "_window_score_df.Rds"))
}
# select_before_feature_extraction ----------------------------------------
select_equal_size_candidate_decoy = function(candidate_Rds_name, output_label)
{
candidate_df = readRDS(candidate_Rds_name)
candi_df <- candidate_df%>%dplyr::filter(label == "negative")
candi_nrow = nrow(candi_df)
pos_df <- candidate_df%>%dplyr::filter(label == "positive")
pos_nrow = nrow(pos_df)
set.seed(123)
choose_candi = sample(candi_nrow, pos_nrow)
choose_candi_df = candi_df[choose_candi,]
choose_candidate_df = rbind(pos_df, choose_candi_df)
write.table(choose_candidate_df, paste0(output_label, "_candidate.tsv"),
quote = F, row.names = F, sep = "\t")
saveRDS(choose_candidate_df, paste0(output_label, "_candidate.Rds"))
}
### use R to control the terminal
extract_site_specific_features_new=function(feature_data, to_extract, to_logit, center_position)
{
#feature_data = half_spider_matrix1
# set.seed(123)
# feature_data = matrix(abs(rnorm(10*250)),nrow = 10, ncol = 250)
#center_position = 13
#to_extract = c(1,6,8,9)
#to_logit = c(8,9)
logit_from_extract = which(to_extract %in% to_logit)
##### extract first
total_feature_length = 2*(center_position-1)+1
extractbs = matrix(rep(seq(0, 10*(total_feature_length-1),10),length(to_extract)),
nrow=total_feature_length,
ncol=length(to_extract))
add_extractbs = sapply(1:length(to_extract), function(x) extractbs[,x]+to_extract[x])
vec_add_extractbs = sort(as.vector(add_extractbs))
extract_feature_data = feature_data[,vec_add_extractbs, with = F]
rm(feature_data)
##### log second
ef = length(to_extract)
logbs = matrix(rep(seq(0, ef*(total_feature_length-1),ef),length(logit_from_extract)),
nrow=total_feature_length,
ncol=length(logit_from_extract))
add_logbs = sapply(1:length(logit_from_extract), function(x) logbs[,x]+logit_from_extract[x])
vec_add_logbs = sort(as.vector(add_logbs))
### arrange a matrix to get all the columns need to be loggit
extract_feature_data = as.matrix(extract_feature_data)
tar = extract_feature_data[, vec_add_logbs]
tar[which(tar<0.001)]=0.001
tar[which(tar>0.999)]=0.999
logitit = function(p1){return(log(p1/(1-p1)))}
logit_tar = logitit(tar)
### place back these columns to the orignal data
extract_feature_data[,vec_add_logbs] = logit_tar
return(extract_feature_data)
}
# window score assembly ----------------------------------------------
#### ok the following two functions are still in the test procedure
# prediction_annotation ---------------------------------------------------
domain_subcellular_mapping = function(pos_window_score_Rds,
candi_window_score_Rds,
dm_df_Rds,
sc_df_Rds,
output_label)
{
# pos_window_score_Rds = "glcnac_s_pred/glcnac_s_pred_pos_window_score.Rds"
# candi_window_score_Rds = "glcnac_s_pred/glcnac_s_pred_candi_window_score.Rds"
#
# dm_df_Rds = "domain_df_pure.Rds"
# sc_df_Rds = "subcellular_location_df_pure.Rds"
#
# output_label = "glcnac_s_try"
# output_label = "glcnac_s_try"
#
dm_df = readRDS(dm_df_Rds)
sc_df = readRDS(sc_df_Rds)
pos_window_score = readRDS(pos_window_score_Rds)
pos_each_domain = map_domain(dm_df, pos_window_score)
pos_each_subcellular = map_subcellular_location(sc_df, pos_window_score)
pos_info_df = data.frame(pos_window_score,
domain = pos_each_domain,
subcellular = pos_each_subcellular,
stringsAsFactors = F)
saveRDS(pos_info_df, file = paste0(output_label, "_pos_info_df.Rds"))
write.table(pos_info_df, paste0(output_label,"_pos_info_df.tsv"),
quote = F, row.names = F, sep = "\t")
rm(pos_info_df)
rm(pos_window_score)
rm(pos_each_domain)
rm(pos_each_subcellular)
candi_window_score = readRDS(candi_window_score_Rds)
candi_each_domain = map_domain(dm_df, candi_window_score)
candi_each_subcellular = map_subcellular_location(sc_df, candi_window_score)
candi_info_df = data.frame(candi_window_score,
domain = candi_each_domain,
subcellular = candi_each_subcellular,
stringsAsFactors = F)
saveRDS(candi_info_df, file = paste0(output_label, "_candi_info_df.Rds"))
write.table(candi_info_df, paste0(output_label,"_candi_info_df.tsv"),
quote = F, row.names = F, sep = "\t")
}
retrieve_domain_all_mod = function(mod_names, mod_pos_score, mod_candi_score, domain_name,
output_label)
{
### try to use rbindlist
all_retrieve = data.frame(rbindlist(lapply(1:length(mod_names), function(i) {
mod_name = mod_names[i]
pos_info = readRDS(paste0(mod_name, "_pred_pos_info_df.Rds"))
candi_info = readRDS(paste0(mod_name, "_pred_candi_info_df.Rds"))
pos_score = mod_pos_score[i]
candi_score = mod_candi_score[i]
this_mod_retrieve = retrieve_for_each_mod(pos_info, candi_info, pos_score,candi_score,
mod_name, domain_name)
return(this_mod_retrieve)
})))
all_retrieve_df = all_retrieve %>%
dplyr::arrange(protID, pos)
saveRDS(all_retrieve_df, file = paste0(output_label, "_",domain_name,"_retrieve.Rds"))
write.table(all_retrieve_df, paste0(output_label, "_",domain_name,"_retrieve.tsv"),
quote = F, row.names= F, sep = "\t")
}
create_gglogo_plot = function(candidate_df_Rds,
output_label)
{
candidate_df = readRDS(candidate_df_Rds)
# candidate_df = ps_candidate
# output_label = "try_new"
pos_windows = candidate_df %>%
dplyr::filter(label == "positive") %>%
dplyr::select(window)
delete_center = paste0(substr(pos_windows$window,1,12),substr(pos_windows$window,14,25))
pos_windows$window = delete_center
# pdf(paste0(output_label,"_logoPlot.pdf"), useDingbats = F)
ggplot(data = ggfortify(pos_windows, "window", method = "frequency")) +
geom_logo(aes(x=position, y=info, group=element,
label=element, fill=interaction(Water, Polarity)),
alpha = 0.6) +
scale_fill_brewer(palette="Paired") +
theme(legend.position = "bottom")
#dev.off()
ggsave(filename = paste0(output_label,"_logoPlot.pdf"),device = "pdf",
width = 10, height = 8)
}
get_average_weights_of_two = function(first_train_model_file, second_train_model_file,
full_feature, arrange_feature, output_label)
{
weight_matrix = matrix(0, nrow = length(full_feature), ncol = 2)
first_weight_file = readLines(first_train_model_file)
first_ws = as.numeric(first_weight_file[7:length(first_weight_file)])
weight_matrix[,1] = first_ws
second_weight_file = readLines(second_train_model_file)
second_ws = as.numeric(second_weight_file[7:length(second_weight_file)])
weight_matrix[,2] = second_ws
ave_weights = rowMeans(weight_matrix)
ws_df = data.frame(name = full_feature,
weights = ave_weights,
abs_weights = abs(ave_weights), stringsAsFactors = F)
name_arr_ws_df = arrange_feature%>%dplyr::left_join(ws_df, by = c("arrange_feature" ="name"))
write.table(name_arr_ws_df,paste0(output_label,"_arrange_name_coeff_df.tsv"),
sep = "\t", row.names = F, quote = F)
}
plot_weights = function(coef_file, plot_name)
{
#coef_file = "/Users/ginny/PTMtopographer_2017_08/classifier_performance/model_weight/ps_arrange_name_coeff_df.tsv"
coef_df = data.table::fread(coef_file, header = T, stringsAsFactors = F)
col_hydrophobicity = rep("skyblue", 8)
col_aaindex = rep("purple", 45)
col_ASA = rep("green",24 )
col_HSE = rep("yellow",24 )
col_pC = rep("red",24 )
col_pH = rep("orange",24 )
col_pssm = rep("pink",24)
cols = c(col_hydrophobicity, col_aaindex, col_ASA, col_HSE, col_pC, col_pH, col_pssm)
# max_y = max(abs(coef_df$weights))
pdf(paste0(plot_name,"_weights_bar.pdf"), useDingbats = F)
barplot(coef_df$weights, main = plot_name, col = cols, ylim = c(-0.55, 0.55))
# abline (h = 0, col = "green", lty = 2)
# abline(v= 8, col = "red", lty = 2)
# abline(v= 53, col = "red", lty = 2)
# abline(v= 101, col = "red", lty = 2)
# abline(v= 149, col = "red", lty = 2)
#
dev.off()
}
calculate_seq_pairs = function(anchor_mod_site, cross_mod_site, distance,
anchor_mod, cross_mod,
anchor_new_merge_Rds, cross_new_merge_Rds,
output_label)
{
anchor_new_merge = readRDS(anchor_new_merge_Rds)
cross_new_merge = readRDS(cross_new_merge_Rds)
cn_anchor = colnames(anchor_new_merge)
cn_anchor[which(grepl(paste0(anchor_mod, "_label"), cn_anchor))] = "anchor_label"
colnames(anchor_new_merge) = cn_anchor
cn_cross = colnames(cross_new_merge)
cn_cross[which(grepl(paste0(cross_mod, "_label"), cn_cross))] = "cross_label"
colnames(cross_new_merge) = cn_cross
anchor_domain_list =unique(anchor_new_merge$domain)
anchor_domain_list = unique( unlist(strsplit(anchor_domain_list, split = " ")))
cross_domain_list = unique(cross_new_merge$domain)
cross_domain_list = unique( unlist(strsplit(cross_domain_list, split = " ")))
common_domain = intersect(anchor_domain_list, cross_domain_list)
common_domain = common_domain[!is.na(common_domain)]
output_df = data.frame(domain_name = common_domain, a = 0, b=0, c=0, d=0)
domain_prot_df = data.frame(domain_name = common_domain,
protIDs = character(length(common_domain)),
stringsAsFactors = F)
for(i in 1:length(common_domain))
{
if(i%%100==0)
cat(i, "\n")
domain_prot_df$protIDs[i] = NA
#which(common_domain == "CTP_synth_N")
a =0;b=0;c=0;d=0
### because some domains are connected by blanks in a row
this_domain = common_domain[i]
this_anchor_domain = anchor_new_merge %>%
dplyr:: filter(grepl(paste0("\\b",this_domain,"\\b") ,domain))
this_cross_domain = cross_new_merge %>%
dplyr:: filter(grepl(paste0("\\b",this_domain,"\\b"), domain))
if(nrow(this_anchor_domain)>0 & nrow(this_cross_domain)>0)
{
### crosstalk happen within the same protein and the same domain
this_anchor_proteins = unique(this_anchor_domain$protID)
this_cross_proteins = unique(this_cross_domain$protID)
common_proteins = intersect(this_anchor_proteins, this_cross_proteins)
if(length(common_proteins)>0)
{
domain_crosstalk_in_prot = c("")
for(j in 1:length(common_proteins))
{
this_prot_anchor_domain = this_anchor_domain %>%
filter(protID == common_proteins[j])
this_prot_cross_domain = this_cross_domain %>%
filter(protID == common_proteins[j])
### get the pairs of anchor cross positions for analysis
anchor_cross_pair_pos =
data.frame(anchor_position = numeric(), cross_position = numeric())
anchor_pos = this_prot_anchor_domain %>%
dplyr::select(pos)
anchor_pos = anchor_pos$pos
cross_pos = this_prot_cross_domain %>%
dplyr::select(pos)
cross_pos = cross_pos$pos
for(p in 1:length(anchor_pos))
{
dis = abs(anchor_pos[p]-cross_pos)
find_cross = which(dis>0 & dis<=distance)
if(length(find_cross)>0)
{
anchor_position = rep(anchor_pos[p], length(find_cross))
cross_position = cross_pos[find_cross]
pos_pair = cbind(anchor_position, cross_position)
anchor_cross_pair_pos = rbind(anchor_cross_pair_pos, pos_pair)
}
}
if(nrow(anchor_cross_pair_pos)>0)
{
get_anchor_match = match(anchor_cross_pair_pos$anchor_position,
this_prot_anchor_domain$pos)
get_cross_match = match(anchor_cross_pair_pos$cross_position,
this_prot_cross_domain$pos)
anchor_match_label = this_prot_anchor_domain$anchor_label[get_anchor_match]
cross_match_label = this_prot_cross_domain$cross_label[get_cross_match]
add_to_a = which(anchor_match_label == T & cross_match_label == T)
add_to_b = which(anchor_match_label == F & cross_match_label == T)
add_to_c = which(anchor_match_label == T & cross_match_label == F)
add_to_d = which(anchor_match_label == F & cross_match_label == F)
a = a+length(add_to_a)
b = b+length(add_to_b)
c = c+length(add_to_c)
d = d+length(add_to_d)
if(length(add_to_a)>0)
{
domain_crosstalk_in_prot = c(domain_crosstalk_in_prot, common_proteins[j])
domain_prot_df$protIDs[i] = paste(domain_crosstalk_in_prot, collapse = " ")
}
}
}
}
}
output_df$a[i] = a
output_df$b[i] = b
output_df$c[i] = c
output_df$d[i] = d
}
saveRDS(output_df, file = paste0(output_label,"_tbt_table.Rds"))
write.table(output_df, paste0(output_label, "_tbt_table.tsv"), sep = "\t",
quote = F, row.names = F)
saveRDS(domain_prot_df, file = paste0(output_label, "_domain_prot_match.Rds"))
write.table(domain_prot_df, paste0(output_label, "_domain_prot_match.tsv"), sep = "\t",
quote = F, row.names = F)
get_tbt = output_df %>%
dplyr::group_by(domain_name) %>%
dplyr::summarise(both_positive = sum(a), cross_positive = sum(b), anchor_positive = sum(c), both_negative = sum(d))
colnames(get_tbt) = c("domain", "both_positive",paste0(cross_mod, "_positive"),
paste0(anchor_mod,"_positive"), "both_negative")
### calculate fisher's exact test on this
fisher_p = rep(0, nrow(get_tbt))
or = rep(0, nrow(get_tbt))
for(i in 1:nrow(get_tbt))
{
fm = matrix(as.numeric(get_tbt[i,c(2:5)]), nrow = 2, ncol = 2, byrow = T)
ft = fisher.test(fm, alternative = "g")
fisher_p[i] = ft$p.value
or[i] = ft$estimate
}
tbt_p = get_tbt %>%
dplyr::mutate(fisher_pvalue = fisher_p)%>%
dplyr::mutate(odds_ratio = or)%>%
dplyr::arrange(fisher_pvalue)
write.table(tbt_p, paste0(output_label, "_test.tsv"),
quote = F, row.names = F, sep = "\t")
}
calculate_seq_pairs_negative = function(compete_mod_site,
anchor_mod, cross_mod,
new_merge_Rds,
output_label)
{
new_merge = readRDS(new_merge_Rds)
cn_merge = colnames(new_merge)
cn_merge[which(grepl(paste0(anchor_mod, "_label"), cn_merge))] = "anchor_label"
cn_merge[which(grepl(paste0(cross_mod, "_label"), cn_merge))] = "cross_label"
colnames(new_merge) = cn_merge
domain_list =unique(new_merge$domain)
domain_list = unique( unlist(strsplit(domain_list, split = " ")))
common_domain = domain_list[!is.na(domain_list)]
output_df = data.frame(domain_name = common_domain, a = 0, b=0, c=0, d=0)
domain_prot_df = data.frame(domain_name = common_domain,
protIDs = character(length(common_domain)),
stringsAsFactors = F)
for(i in 1:length(common_domain))
{
if(i%%100==0)
cat(i, "\n")
domain_prot_df$protIDs[i] = NA
a =0;b=0;c=0;d=0
this_domain = common_domain[i]
this_domain_df = new_merge %>%
dplyr:: filter(grepl(paste0("\\b",this_domain,"\\b") ,domain))
if(nrow(this_domain_df)>0 )
{
anchor_match_label = this_domain_df$anchor_label
cross_match_label = this_domain_df$cross_label
a = length(which(anchor_match_label == T & cross_match_label == T))
b = length(which(anchor_match_label == F & cross_match_label == T))
c = length(which(anchor_match_label == T & cross_match_label == F))
d = length(which(anchor_match_label == F & cross_match_label == F))
have_cross = this_domain_df %>%
dplyr::filter(anchor_label == T, cross_label == T) %>%
dplyr::select(protID)
if(nrow(have_cross)>0)
domain_prot_df$protIDs[i] = paste(unique(have_cross$protID),
collapse = " ")
}
output_df$a[i] = a
output_df$b[i] = b
output_df$c[i] = c
output_df$d[i] = d
}
saveRDS(domain_prot_df, file = paste0(output_label, "_domain_prot_match.Rds"))
write.table(domain_prot_df, paste0(output_label, "_domain_prot_match.tsv"), sep = "\t",
quote = F, row.names = F)
saveRDS(output_df, file = paste0(output_label,"_tbt_table.Rds"))
write.table(output_df, paste0(output_label, "_tbt_table.tsv"), sep = "\t",
quote = F, row.names = F)
get_tbt = output_df %>%
dplyr::group_by(domain_name) %>%
dplyr::summarise(both_positive = sum(a), cross_positive = sum(b), anchor_positive = sum(c), both_negative = sum(d))
colnames(get_tbt) = c("domain", "both_positive",paste0(cross_mod, "_positive"),
paste0(anchor_mod,"_positive"), "both_negative")
### calculate fisher's exact test on this
fisher_p = rep(0, nrow(get_tbt))
or = rep(0, nrow(get_tbt))
for(i in 1:nrow(get_tbt))
{
fm = matrix(as.numeric(get_tbt[i,c(2:5)]), nrow = 2, ncol = 2, byrow = T)
ft = fisher.test(fm, alternative = "g")
fisher_p[i] = ft$p.value
or[i] = ft$estimate
}
tbt_p = get_tbt %>%
dplyr::mutate(fisher_pvalue = fisher_p)%>%
dplyr::mutate(odds_ratio = or)%>%
dplyr::arrange(fisher_pvalue)
write.table(tbt_p, paste0(output_label, "_test.tsv"),
quote = F, row.names = F, sep = "\t")
}
calculate_individual_ptm = function(mod_type,
new_merge_Rds,
output_label)
{
new_merge = readRDS(new_merge_Rds)
cn_merge = colnames(new_merge)
cn_merge[which(grepl(paste0(mod_type, "_label"), cn_merge))] = "anchor_label"
colnames(new_merge) = cn_merge
num_positive_ptm = sum(new_merge$anchor_label)
domain_list = unique(new_merge$domain)
domain_list = domain_list[!is.na(domain_list)]
domain_list = unique( unlist(strsplit(domain_list, split = " ")))
output_df = data.frame(domain = domain_list, a = 0, b=0, c=0, d=0)
domain_prot_df = data.frame(domain_name = domain_list,
protIDs = character(length(domain_list)),
stringsAsFactors = F)
for(i in 1:length(domain_list))
{
if(i%%1000 == 0)
cat(i, "\n")
domain_prot_df$protIDs[i] = NA
this_domain_rows = new_merge %>%
dplyr::filter(grepl(paste0("\\b",domain_list[i],"\\b"), domain))
have_ptm_domain = this_domain_rows %>%
dplyr::filter(anchor_label == T)
if(nrow(have_ptm_domain)>0)
domain_prot_df$protIDs[i] = paste(unique(have_ptm_domain$protID),
collapse = " ")
a= sum(this_domain_rows$anchor_label)
b = num_positive_ptm - a
c = nrow(this_domain_rows) - a
d = nrow(new_merge) - a - b - c
output_df$a[i] = a
output_df$b[i] = b
output_df$c[i] = c
output_df$d[i] = d
}
saveRDS(domain_prot_df, file = paste0(output_label, "_domain_prot_match.Rds"))
write.table(domain_prot_df, paste0(output_label, "_domain_prot_match.tsv"), sep = "\t",
quote = F, row.names = F)
colnames(output_df) = c("domain", "InDomain_positive","OutDomain_positive",
"InDomain_negative", "OutDomain_negative")
saveRDS(output_df, file = paste0(output_label,"_tbt_table.Rds"))
write.table(output_df, paste0(output_label, "_tbt_table.tsv"), sep = "\t",
quote = F, row.names = F)
chisq_p = rep(0, nrow(output_df))
or = rep(0, nrow(output_df))
for(i in 1:nrow(output_df))
{
if(i%%100 == 0)
cat(i, "\n")
fm = matrix(as.numeric(output_df[i,c(2:5)]), nrow = 2, ncol = 2, byrow = T)
ft = chisq.test(fm, simulate.p.value = T)
chisq_p[i] = ft$p.value
or[i] = output_df[i,2]*output_df[i,5]/output_df[i,3]/output_df[i,4]
}
tbt_p = output_df %>%
dplyr::mutate(chisq_pvalue = chisq_p)%>%
dplyr::mutate(odds_ratio = or)%>%
dplyr::arrange(chisq_pvalue)
write.table(tbt_p, paste0(output_label, "_test.tsv"),
quote = F, row.names = F, sep = "\t")
}
map_domain_subcellular = function(window_score_label_Rds,
dm_df_Rds,
sc_df_Rds,
output_label)
{
# window_score_label_Rds = "ps_0103_window_score_df.Rds"
# dm_df_Rds = "domain_df_pure.Rds"
# sc_df_Rds = "subcellular_location_df_pure.Rds"
# output_label = "ps_0103"
dm_df = readRDS(dm_df_Rds)
sc_df = readRDS(sc_df_Rds)
window_score_label = readRDS(window_score_label_Rds)
each_domain = map_domain(dm_df, window_score_label)
each_subcellular = map_subcellular_location(sc_df, window_score_label)
info_df = data.frame(window_score_label,
domain = each_domain,
subcellular = each_subcellular,
stringsAsFactors = F)
saveRDS(info_df, file = paste0(output_label, "_mapped_df.Rds"))
write.table(info_df, paste0(output_label,"_mapped_df.tsv"),
quote = F, row.names = F, sep = "\t",na = "")
}
# chisq_test_for_single_ptm -----------------------------------------------
calculate_tbt_single_ptm = function(mapped_window_score_label_Rds,
output_label)
{
#mapped_window_score_label_Rds = "ps_0103_mapped_df.Rds"
mapped_window_score_label = readRDS(mapped_window_score_label_Rds)
colnames(mapped_window_score_label) = c("protID","gene_name","pos","window","pred_score","threshold",
"prediction_label","known_label","combined_label",
"domain","subcellular")
cn_merge = colnames(mapped_window_score_label)
cn_merge[which(grepl("combined_label", cn_merge))] = "anchor_label"
colnames(mapped_window_score_label) = cn_merge
### recode positive negative to TURE and FALSE
mapped_window_score_label = mapped_window_score_label %>%
dplyr::mutate(anchor_label = replace(anchor_label, anchor_label == "positive", TRUE)) %>%
dplyr::mutate(anchor_label = as.logical(replace(anchor_label, anchor_label == "negative", FALSE)))
num_positive_ptm = sum(mapped_window_score_label$anchor_label)
domain_list = unique(mapped_window_score_label$domain)
domain_list = domain_list[!is.na(domain_list)]
domain_list = unique(unlist(strsplit(domain_list, split = " ")))
output_df = data.frame(domain = domain_list, a = 0, b=0, c=0, d=0)
domain_prot_df = data.frame(domain_name = domain_list,
protIDs = character(length(domain_list)),
stringsAsFactors = F)
for(i in 1:length(domain_list))
{
# if(i%%1000 == 0)
# cat(i, "\n")
#
domain_prot_df$protIDs[i] = NA
this_domain_rows = mapped_window_score_label %>%
dplyr::filter(grepl(paste0("\\b",domain_list[i],"\\b"), domain))
have_ptm_domain = this_domain_rows %>%
dplyr::filter(anchor_label == T)
if(nrow(have_ptm_domain)>0)
domain_prot_df$protIDs[i] = paste(unique(have_ptm_domain$protID),
collapse = " ")
a= sum(this_domain_rows$anchor_label)
b = num_positive_ptm - a
c = nrow(this_domain_rows) - a
d = nrow(mapped_window_score_label) - a - b - c
output_df$a[i] = a
output_df$b[i] = b
output_df$c[i] = c
output_df$d[i] = d
}
saveRDS(domain_prot_df, file = paste0(output_label, "_domain_prot_match.Rds"))
write.table(domain_prot_df, paste0(output_label, "_domain_prot_match.tsv"), sep = "\t",
quote = F, row.names = F)
colnames(output_df) = c("domain", "InDomain_positive","OutDomain_positive",
"InDomain_negative", "OutDomain_negative")
saveRDS(output_df, file = paste0(output_label,"_tbt_table.Rds"))
write.table(output_df, paste0(output_label, "_tbt_table.tsv"), sep = "\t",
quote = F, row.names = F)
chisq_p = rep(0, nrow(output_df))
or = rep(0, nrow(output_df))
adjusted_or = rep(0,nrow(output_df))
for(i in 1:nrow(output_df))
{
# if(i%%100 == 0)
# cat(i, "\n")
fm = matrix(as.numeric(output_df[i,c(2:5)]), nrow = 2, ncol = 2, byrow = T)
set.seed(123)
ft = chisq.test(fm, simulate.p.value = T)
chisq_p[i] = ft$p.value
or[i] = output_df[i,2]*output_df[i,5]/output_df[i,3]/output_df[i,4]
adjusted_or[i] = (output_df[i,2]+0.5)*(output_df[i,5]+0.5)/(output_df[i,3]+0.5)/(output_df[i,4]+0.5)
}
# qv = qvalue(p = chisq_p)
tbt_p = output_df %>%
dplyr::mutate(chisq_pvalue = chisq_p)%>%
dplyr::mutate(odds_ratio = or)%>%
#dplyr::mutate(qvalue = qv$qvalues)%>%
dplyr::mutate(adjusted_odds_ratio = adjusted_or)%>%
dplyr::arrange(chisq_pvalue)
write.table(tbt_p, paste0(output_label, "_test.tsv"),
quote = F, row.names = F, sep = "\t")
}
|
aa51f752f17a377dd65c831ae4a56faa7b0d655a
|
0c91fa27c912ee29fac64e4a10fb34374ecef3a7
|
/R/airfoil.R
|
6b46a2a33646bfbca533e4b0b27dc22414c6a98e
|
[] |
no_license
|
xmengju/RRBoost
|
d2069c3cbebe98455d29c2da18abb04daeaeb1b4
|
b6e479ecd706fcb775916b367ad319e2429e17c2
|
refs/heads/master
| 2021-08-08T09:28:37.106335
| 2020-08-26T20:08:26
| 2020-08-26T20:08:26
| 215,244,704
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 674
|
r
|
airfoil.R
|
#' Airfoil data
#'
#' Here goes a description of the data.
#'
#' Here goes a more detailed description of the data.
#' There are 1503 observations and 6 variables:
#' \code{y}, \code{frequency}, \code{angle}, \code{chord_length},
#' \code{velocity}, and \code{thickness}.
#'
#' @docType data
#'
#' @usage data(airfoil)
#'
#' @format An object of class \code{"data.frame"}.
#'
#' @references Brooks, T. F., Pope, D. S., and Marcolini, M. A. (1989). Airfoil self-noise and prediction. NASA Reference Publication-1218, document id: 9890016302.
#'
#' @source The UCI Archive https://archive.ics.uci.edu/ml/datasets/Airfoil+Self-Noise,
#'
#' @examples
#' data(airfoil)
"airfoil"
|
cc491932ca6592e488a3f81d1fbd63d1445a7863
|
78c179dd7e008050b2e4a25468ee87fde776760f
|
/Tutorials/week6/code/6.R
|
9b0aacb412c940ba1a1760c107b71215c0824e5e
|
[] |
no_license
|
shonil24/Applied-Analytics
|
f2d03aea5ba34f4b95d8f575f6f75caffba0c8ed
|
700d9f2d696d613de6239e1e678524b6d0efd90b
|
refs/heads/master
| 2023-06-02T05:35:03.564689
| 2021-06-21T18:28:32
| 2021-06-21T18:28:32
| 307,153,670
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,154
|
r
|
6.R
|
library(readr)
library(dplyr)
library(epitools)
ames <- read_csv("F:/MS/Sem 1/AA/week6/ames.csv")
population <- ames$area
sample <- sample(population, 60)
# Q1
summary(sample)
hist(sample)
# Every student will have different values and mean
# Q2
mean.ci <- function(x, conf = 0.95) {
alpha <- 1- conf
t_crit <- qt(p = (1-alpha/2), df = (length(x)-1) )
lb <- mean(x) - t_crit * ( sd(x) / sqrt (length(x) ) )
ub <- mean(x) + t_crit * ( sd(x) / sqrt (length(x) ) )
return(c(lb,ub))
}
# mean.ci is function which is being defining
# Q3
mean.ci(sample)
# Q4
mean.ci(sample, conf = 0.9)
#sorry maybe let me rephrase my question with an example. I am trying to understand the correlation between significance level and confidence level by using a t test. Let me propose a situation where a new drug is introduced, to find out the if the drug is effective, I decide to use the t test at 95% confidence level, which is the alpha at 5%. we test on patients. H0 = no effect, H1 = effective. if the result of the t-test is p-value > 5%, theoretically we reject H1, and conclude the new drug is ineffective. My question is, because the p value is greater than 5% so that means, more that 5% of the mean of the sampling distribution is completely out of the 95% confidence interval range, therefore, I can also say, the drug is only effective to less than 95% of the sample, am I correct?
# Q5
t.test(sample, conf = 0.95)
t.test(sample, conf = 0.9)
samp_mean <- rep(NA, 100)
samp_sd <- rep(NA, 100)
lower_vector <- rep(NA, 100)
upper_vector <- rep(NA, 100)
n <- 60
for(i in 1:100){
samp <- sample(population, n)
samp_mean[i] <- mean(samp)
samp_sd[i] <- sd(samp)
ci<- mean.ci(samp)
lower_vector[i] <-ci[1]
upper_vector[i] <-ci[2]
}
plot_ci <- function(lo, hi, m) {
par(mar=c(2, 1, 1, 1), mgp=c(2.7, 0.7, 0))
k <- length(lo)
ci.max <- max(rowSums(matrix(c(-1*lo,hi),ncol=2)))
xR <- m + ci.max*c(-1, 1)
yR <- c(0, 41*k/40)
plot(xR, yR, type='n', xlab='', ylab='', axes=FALSE)
abline(v=m, lty=2, col='#00000088')
axis(1, at=m, paste("mu = ",round(m,4)), cex.axis=1.15)
#axis(2)
for(i in 1:k){
x <- mean(c(hi[i],lo[i]))
ci <- c(lo[i],hi[i])
if((m < hi[i] & m > lo[i])==FALSE){
col <- "#F05133"
points(x, i, cex=1.4, col=col)
# points(x, i, pch=20, cex=1.2, col=col)
lines(ci, rep(i, 2), col=col, lwd=5)
} else{
col <- 1
points(x, i, pch=20, cex=1.2, col=col)
lines(ci, rep(i, 2), col=col)
}
}
}
plot_ci(lower_vector, upper_vector, mean(population))
cdc <- read_csv("F:/MS/Sem 1/AA/week6/cdc.csv")
# Q7
cdc$exerany <- factor(cdc$exerany, levels = c(1,0), labels = c("Yes", "No"))
cdc$exerany %>% table() %>% prop.table()
# Q8
prop.ci <- function(x,n, conf = 0.95) {
alpha <- 1- conf
prop=x/n
z_crit <- qnorm(p = (1-alpha/2) )
lb <- prop - z_crit *sqrt (prop*(1-prop)/n)
ub <- prop + z_crit * sqrt (prop*(1-prop)/n)
return(c(lb,ub))
}
# Q9
#p=x/n which means x=14914, n = total of both
#rounde decimal points at the end
prop.ci(14914, 14914+5086, conf = 0.95)
# Q10
binom.approx(14914, 14914+5086, conf.level = 0.95)
|
a2346a7fa80110c06bf6845ab03acf54dc191b71
|
74de6acd13236646d5837771ced81315cd8c8f21
|
/SVM.R
|
1d99202f8b3e42634af5806495b510d0b4e1f873
|
[] |
no_license
|
amitshyamsukha/Machine-Learning
|
8482d1a08db4e618fca95d9d11b5f263cde705f1
|
7543c9cd75a13025de36ae11d191ca19faa750ed
|
refs/heads/master
| 2020-03-27T09:55:41.639376
| 2018-08-28T03:24:48
| 2018-08-28T03:24:48
| 146,383,168
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,720
|
r
|
SVM.R
|
library(kernlab)
library(readr)
library(caret)
setwd("C:/Amit/Upgrad/SVM_dataset/SVM Dataset1")
## Read the training data set
mnist_dataset <- read.csv("mnist_train.csv" , header = F)
###Structure of the dataset
str(mnist_dataset)
## Examine few records
head(mnist_dataset)
#Exploring the data
summary(mnist_dataset)
## Check if there is any missing value in data set
column_null_check <- sapply (mnist_dataset , function(x) sum(is.na(x) ))
which(column_null_check >0 ) ## No null value
column_min_value <- sapply (mnist_dataset , function(x) min(x) )
which(column_min_value < 0) ## None
which(column_min_value > 0) ## None
length(which(column_min_value == 0)) ## 785. All min value is 0
column_max_value <- sapply (mnist_dataset , function(x) max(x) )
which(column_max_value > 255) ## None
## So all the values are within range of 0 and 255. This is within permissable limit. So no outlier is there
## Check if there is any duplicate data in mnist_dataset data set
which(duplicated(mnist_dataset)) ## 0
## Plot couple of digits to check how images are looking
flip <- function(matrix){
apply(matrix, 2, rev)
}
par(mfrow=c(3,3))
for (i in 20:28){
digit <- flip(matrix(rev(as.numeric(mnist_dataset[i,-c(1, 786)])), nrow = 28)) #look at one digit
image(digit, col = grey.colors(255))
}
par(mfrow=c(3,3))
for (i in 5000:5008){
digit <- flip(matrix(rev(as.numeric(mnist_dataset[i,-c(1, 786)])), nrow = 28)) #look at one digit
image(digit, col = grey.colors(255))
}
par(mfrow=c(3,3))
for (i in 10000:10008){
digit <- flip(matrix(rev(as.numeric(mnist_dataset[i,-c(1, 786)])), nrow = 28)) #look at one digit
image(digit, col = grey.colors(255))
}
# Changing output variable "V1" to factor type
mnist_dataset$V1<-factor(mnist_dataset$V1)
## Since data set is huge. SO taking 10% sample of mnist_dataset dataset to train the model
train.indices = sample(1:nrow(mnist_dataset), 0.1*nrow(mnist_dataset))
train = mnist_dataset[train.indices, ]
## Make sure we have enough representation of each digits in the sample taken
table ( train$V1)
## Load the test data set
mnist_test_dataset <- read.csv("mnist_test.csv" , header = F)
## Take 50% of test data set for model test
test.indices = sample(1:nrow(mnist_test_dataset), 0.5*nrow(mnist_test_dataset))
test = mnist_test_dataset[test.indices, ]
test$V1<-factor(test$V1)
#Constructing Model
#Using Linear Kernel
Model_linear <- ksvm(V1~ ., data = train, scale = FALSE, kernel = "vanilladot")
## Predict the output
Eval_linear<- predict(Model_linear, test)
confusionMatrix(Eval_linear,test$V1)
## Overall Accuracy : 0.915. But sensitivity is too low for few digit e.g. 3, 5 and 8 ( close to 80%).
### So we need to move from linear to RBF
#Using RBF Kernel
Model_RBF <- ksvm(V1~ ., data = train, scale = FALSE, kernel = "rbfdot")
Eval_RBF<- predict(Model_RBF, test)
#confusion matrix - RBF Kernel
confusionMatrix(Eval_RBF,test$V1)
### Overall Accuracy increased to .95 . Accuracy for all digits increased to more than 90% now.
## So RBF kernel is better than linear one.
#####################################################################
#Hyperparameter tuning and Cross Validation - Non-Linear - SVM
######################################################################
# We will use the train function from caret package to perform Cross Validation.
#traincontrol function Controls the computational nuances of the train function.
# i.e. method = CV means Cross Validation.
# Number = 2 implies Number of folds in CV.
trainControl <- trainControl(method="cv", number=5)
# Metric <- "Accuracy" implies our Evaluation metric is Accuracy.
metric <- "Accuracy"
#Expand.grid functions takes set of hyperparameters, that we shall pass to our model.
set.seed(8)
grid <- expand.grid(.sigma=c(0.50e-07, 1.50e-07, 2.50e-07), .C=c(1,2) )
# Performing 5-fold cross validation
fit.svm_radial <- train(V1~., data=train, method="svmRadial", metric=metric,
tuneGrid=grid, trControl=trainControl)
# Printing cross validation result
print(fit.svm_radial)
# Best tune at sigma = 2.5e-07 and C = 2 for maximum accuracy
## Since sigma is very low, data is not very linear
# Plotting model results
plot(fit.svm_radial)
######################################################################
# Checking overfitting - Non-Linear - SVM
######################################################################
# Validating the model results on test data
evaluate_non_linear<- predict(fit.svm_radial, test)
confusionMatrix(evaluate_non_linear, test$V1)
# Accuracy - 97.4%
# Sensitivity - Around 95% for all digits
# Specificity - Around 99%% for all digits
|
b60a0b3aba8da9219b0a88e734d4cd63a4007439
|
c1d60db29ef427d263ff86ad5609deec4871da3e
|
/tests/testthat/testthat.R
|
1bc609c26c39f5f84570331cb8e41b1346bb1439
|
[] |
no_license
|
dabrowskia/dspace
|
46d15b978f88b680132b94ca88352c684ddf809d
|
94ed23bc1221f4384a223d1bdce0f687dcd1508c
|
refs/heads/master
| 2021-07-10T03:20:26.771048
| 2020-06-29T14:42:23
| 2020-06-29T14:42:23
| 139,340,321
| 4
| 1
| null | 2020-06-29T14:42:24
| 2018-07-01T15:29:04
|
R
|
UTF-8
|
R
| false
| false
| 1,435
|
r
|
testthat.R
|
context("ds_polygon")
test_that("regionalization of polygon data (ds_polygon)",
{
data("socioGrid")
socioGrid$class <- ds_polygon(socioGrid, k = 7,
disjoint = TRUE, plot = TRUE, explain = FALSE)
expect_equal(
head(socioGrid$class),
c(6, 5, 3, 1, 2, 6))
})
context("ds_points")
test_that("regionalization of point data (ds_points)",
{
data("realEstate")
realEstate$class <- ds_points(realEstate, k = 5, explain = FALSE)
expect_equal(
head(realEstate$class),
c(1, 5, 4, 2, 4, 4))
})
context("regionalize")
test_that("regionalization of point data (regionalize)",
{
data("realEstate")
realEstate$class <- regionalize(realEstate, k = 5, explain = FALSE)
expect_equal(
head(realEstate$class),
c(1, 5, 4, 2, 4, 4))
})
test_that("regionalization of polygon data (regionalize)",
{
data("socioGrid")
socioGrid$class <- regionalize(socioGrid, k = 7,
disjoint = TRUE, plot = TRUE,
explain = FALSE)
expect_equal(
head(socioGrid$class),
c(6, 5, 3, 1, 2, 6))
})
#Need tests for accuracy
|
60e844fdf7e98bc0c8e3cb3d19f70959550e9608
|
351a143adc1d7f9c5f424c0bf520667a41f5507d
|
/man/predict.coco.Rd
|
853a731ed539fbdcc081c5e88fa09e91c6310222
|
[
"MIT"
] |
permissive
|
GreenwoodLab/BDcocolasso
|
ade5a59890410338e063fc971e2bedc2eef3aba8
|
29b3860ac5172737bc40d97b0091e129628ddb9e
|
refs/heads/master
| 2021-11-11T02:26:15.690312
| 2021-11-01T00:10:27
| 2021-11-01T00:10:27
| 228,453,442
| 0
| 0
|
NOASSERTION
| 2020-04-10T03:32:39
| 2019-12-16T18:50:43
| null |
UTF-8
|
R
| false
| true
| 1,597
|
rd
|
predict.coco.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/methods.R
\name{predict.coco}
\alias{predict.coco}
\title{Make predictions from a coco object}
\usage{
\method{predict}{coco}(
object,
newx,
s = NULL,
lambda.pred = NULL,
type = c("response", "coefficients"),
...
)
}
\arguments{
\item{object}{Fitted \code{coco} model object}
\item{newx}{matrix of new values for \code{x} at which predictions are to be
made. Do not include the intercept (this function takes care of that). Must
be a matrix. This argument is not used for \code{type=c("coefficients")}. This
matrix must have the same number of columns originally supplied to the \code{coco}
fitting function.}
\item{s}{Value(s) of the penalty parameter \code{lambda} at which predictions
are required. Default is the entire sequence used to create the model.}
\item{lambda.pred}{Value(s) of the penalty parameter \code{lambda} at which coefficients
are extracted to calculate the response based on the matrix of new values \code{newx}.
Default is \code{lambda.sd}.}
\item{type}{Type of prediction required. Type \code{"coefficients"} computes the coefficients
at the requested values for \code{s}. Type \code{response} computes the response based
on the covariates values in \code{newx}, for coefficients corresponding to \code{lambda} value
in \code{lambda.pred}}
\item{...}{currently ignored}
}
\value{
The object returned depends on type.
}
\description{
Similar to other predict methods, this functions predicts fitted
values, logits, coefficients and more from a fitted \code{coco} object.
}
|
22ae995e0d334103c97b6e8cb8f1696f2a7259c8
|
b375db95fc50eee5368d5e8c6694d65658aaa88c
|
/ui.R
|
c12fa88b6f48be4f621fc9ccc2bbc7fa84b540bb
|
[] |
no_license
|
rcomyn/Moneyball
|
4167cca11c0a23cb792d77b7221390ef2681c604
|
d3577841f580b32219df1473ac113b74b6a5a264
|
refs/heads/master
| 2021-01-10T05:01:22.674794
| 2015-09-24T00:34:14
| 2015-09-24T00:34:14
| 43,035,021
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,839
|
r
|
ui.R
|
library(shiny)
fluidPage(
tags$head(
tags$style(HTML("
.shiny-text-output {
color:navy;
font-size:large;
font-weight:bold;
}
"))
),
headerPanel(
h1("MONEYBALL", style='color:navy')
),
h3("Estimating Runs Scored by a Baseball Team", style='color:navy'),
p(paste("This application uses a regression model built with data",
"from 2000 - 2008 for every major-league baseball team.",
"It applies this model, which predicts the number",
"of runs scored by a team. The application also gives",
"the 95% confidence interval of the prediction.")
),
p(paste("Use the sliders to adjust the inputs, such as singles,",
"doubles, etc. Notice the effects on the number of runs scored",
"by the team and the resulting confidence interval for the prediction.")
),
sidebarLayout(
sidebarPanel(
fluidRow(
column(6,
sliderInput("singles", "Singles:",
min = 0, max = 5000, value = 995, step = 1),
sliderInput("doubles", "Doubles:",
min = 0, max = 1500, value = 309, step = 1),
sliderInput("triples", "Triples:",
min = 0, max = 100, value = 34, step = 1),
sliderInput("homeruns", "Home Runs:",
min = 0, max = 600, value = 236, step = 1)
),
column(6,
sliderInput("walks", "Walks:",
min = 0, max = 2000, value = 608, step = 1),
sliderInput("hitbypitch", "Hit by Pitch:",
min = 0, max = 300, value = 93, step = 1),
sliderInput("sacrificeflies", "Sacrifice Flies:",
min = 0, max = 150, value = 52, step = 1),
sliderInput("stolenbases", "Stolen Bases:",
min = 0, max = 150, value = 47, step = 1),
sliderInput("caughtstealing", "Caught Stealing:",
min = 0, max = 150, value = 43, step = 1)
)
), width = 8
),
mainPanel(
fluidRow(
column(12,
h4('Estimated runs scored based on your input:'),
textOutput('estimatedRuns'),
br(),
h4('95% confidence interval:'),
textOutput('ci')
)
), width=4
)),
br(),
p(style='color:purple',"Data and data model from ",
a("R in a Nutshell",
href="http://web.udl.es/Biomath/Bioestadistica/R/Manuals/r_in_a_nutshell.pdf"),
"by Joseph Adler, 2010.")
)
|
95556f769c9c346b28c25c393432ccc524e0a1a2
|
3e5c13f5544298f28d78e7d6251d39410e70165e
|
/man/pca.reg2.Rd
|
29c5df613f3ee2ae7dc60be5bf2d90c43a889b68
|
[] |
no_license
|
branchlizard/ordiR
|
1288f45b3a2a461a1c2b6f058892e64fd33d6718
|
1aef23e72f33af1aece18a2b0e601f36e8f95be5
|
refs/heads/master
| 2020-06-10T09:41:45.914977
| 2015-08-27T20:58:48
| 2015-08-27T20:58:48
| 75,973,727
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,378
|
rd
|
pca.reg2.Rd
|
% Generated by roxygen2 (4.1.1.9000): do not edit by hand
% Please edit documentation in R/pca.reg2.R
\name{pca.reg2}
\alias{pca.reg2}
\title{Linear Regression on PCA objects 2}
\usage{
pca.reg2(pca.object, sp.name, group, plot = TRUE)
}
\arguments{
\item{pca.object}{PCA object from rda function (vegan).}
\item{sp.name}{Name of species vector of interest.}
\item{group}{Name of vector to be used to specify groups within the data.}
\item{plot}{Plot the regression lines Defaults to TRUE.}
}
\description{
This functions allows one to perform a linear regression on groups of
PCA points to determine the trajectory or trend of points within a specified group.
The regression lines for each group are then used to determine the correlation values between these lines and a specified species vector.
The output is a dataframe containing correlation values for each group when compared to the specified species vector.
}
\examples{
data(ffg.cast)
ffg.hel <- decostand(ffg.cast[,-1:-4], 'hellinger')
pca.hel <- rda(ffg.hel)
pca.hel.scores <- scores(pca.hel, display='sites', scaling=1)
pca.hel.plot <- pca.plot(pca.hel, ffg.cast$CU, ffg.cast$CU, ffg.hel, cex=1.5)
text(pca.hel.scores, labels=ffg.cast$YR.POSTREST, pos=3)
lines(pca.hel.scores[-4:-6,], col='red')
lines(pca.hel.scores[-1:-3,], col='green')
pca.reg2(pca.hel, 'SHREDDER', ffg.cast$CU, plot=TRUE)
}
\keyword{vegan}
|
7a8ee6273b4f79a4e1a5b5d2e052a7957088ea53
|
56a98c60765e9c2df99061666760285d1a492c29
|
/srs-cran/src/models/arima/ArimaModelPrices.R
|
f96b7a3deadf2e9f6002e45d5113aef04361b1cf
|
[] |
no_license
|
ajinkya-github/stocksimulation
|
52263a7ab03031b9f426751e05871f70cdb0b871
|
1ffc092495534c58e42f3338e05fb31f58a611f2
|
refs/heads/master
| 2021-05-05T15:41:25.320326
| 2018-01-14T10:01:20
| 2018-01-14T10:01:20
| 117,318,030
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,198
|
r
|
ArimaModelPrices.R
|
# TODO: Add comment
#
# Author: ajinkya
library(forecast)
arimaPricePrediction <- function(ret,predictionsignals)
{
winsize <- 30
x <- as.ts(ret$daily.returns)
start <- 1
end <- nrow(ret)
pred <- nrow(ret)
pred <-NULL
for(i in 1:nrow(ret)+1)
{
index <- start + winsize
window <- ret[start:index,]
x <- as.ts(window$daily.returns)
fit <- auto.arima(x)
f <- forecast(fit,h=1)
predictedrange <- cbind(f$lower,f$upper,f$mean)
pred<- rbind(pred,predictedrange)
start <- start + 1
index <- index + 1
}
ret1 <- as.data.frame(cbind(pred,ret))
for(i in 1:4)
{
ret1[,i] <- (round(ret1[,i]))
}
predicted.price.percent <- nrow(ret1)
# arima and random forest integration
for(i in 1:nrow(ret1))
{
if(predictionsignals[i] == "buy")
{
predicted.price.percent[i] <- (ret1[i,3]+ret1[i,4])/2
}
if(predictionsignals[i] == "hold")
{
predicted.price.percent[i] <- (ret1[i,2]+ret1[i,3])/2
}
if(predictionsignals[i] == "sell")
{
predicted.price.percent[i] <- (ret1[i,1]+ret1[i,2])/2
}
}
return (predicted.price.percent)
# code below is retired as of now.
#plot(f)
#fit1 <- bats(x)
#f1<-forecast(fit1)
#plot(f1)
#fit2 <- nnetar(x)
#f2<-forecast(fit2,h=200)
#plot(f2)
}
|
cf7f805d295b60c79298314ae2a144c16c1cd906
|
786974da78a3df3cf58149a006153e5a22682fbb
|
/R/make_f_alldata.R
|
7ec1455b77f8bb8447321d810a25f7408804ff91
|
[] |
no_license
|
emjosephs/qxtools
|
9954405144ad03685f97ffc17312119d21953d1e
|
96e7f94980d62efdbc401a1e0dd1670c21ab4d02
|
refs/heads/master
| 2021-04-28T08:14:36.354221
| 2018-03-06T23:04:21
| 2018-03-06T23:04:21
| 122,244,993
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 689
|
r
|
make_f_alldata.R
|
##makes a kinship matrix when there is no missing data. Takes frequency data, not # of copies. Later would be good to add a check for this. This function also drops an individual to deal with losing a degree of freedom when mean centering. The input table has rows as individuals, loci as columns.
make_f_alldata <- function(myG){
myEs = 1/(colMeans(myG)*(1-colMeans(myG)))
myEsr = replace(myEs, myEs==Inf, 0)
myS = matrix(0,nrow=dim(myG)[2], ncol=dim(myG)[2])
diag(myS) = myEsr
myM = dim(myG)[1]
myT = matrix(data = -1/myM, nrow=myM-1, ncol=myM)
diag(myT) = (myM-1)/myM
myK = dim(myG)[2]
myF = (1/(myK-1)) * myT %*% myG %*% myS %*% t(myG) %*% t(myT)
return(myF)
}
|
8820b349a9da925173bc6e7b160c5c74b9de0b78
|
c2e9d1608fcd5257a0600e1fb0ce5a9fd928ef64
|
/Chapter5_knn_cv.R
|
ab62209ffe69890026ce628b2c2ea9bc7931a19a
|
[] |
no_license
|
chandrabanerjee/StatisticalLearning
|
d7ecdaf2dc9c1049b3649c13349f00e78eb0010b
|
07490f4abbc7289ba542783884714dc4a788ff5d
|
refs/heads/master
| 2021-06-14T23:28:49.948307
| 2017-03-01T06:27:50
| 2017-03-01T06:27:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,218
|
r
|
Chapter5_knn_cv.R
|
## This script uses the caret package to estimate a cross validated KNN classifier using the Auto dataset.
library("caret")
library("ISLR")
library("ggplot2")
library("MASS")
library("class")
library("gridExtra")
library("doMC")
registerDoMC(cores = 4)
data(Auto)
View(Auto)
Auto$mpg01[Auto$mpg < median(Auto$mpg)] = 0
Auto$mpg01[Auto$mpg > median(Auto$mpg)] = 1
Auto$mpg01 = as.factor(Auto$mpg01)
Auto$cylinders = as.factor(Auto$cylinders)
Auto$year = as.factor(Auto$year)
set.seed(107)
train_ind = createDataPartition(y = Auto$mpg01, p = 0.75, list = FALSE)
Auto.train = Auto[train_ind, ]
Auto.test = Auto[-train_ind, ]
Auto.train = Auto.train[, -c(1,8,9)]
Auto.test = Auto.test[, -c(1,8,9)]
#knn using caret
set.seed(400)
ptm = proc.time()
ctrl = trainControl(method = "repeatedcv", repeats = 3)
knnfit = train(mpg01 ~ ., data = Auto.train, method = "knn", trControl = ctrl, preProcess = c("center", "scale"), tuneLength = 20)
proc.time() - ptm
knnfit
plot(knnfit)
knnpred = predict(knnfit, newdata = Auto.test)
confusionMatrix(knnpred, Auto.test$mpg01)
TeER.knn = mean(knnpred != Auto.test$mpg01)
#Choose own k values
ptm = proc.time()
ctrl = trainControl(method = "repeatedcv", repeats = 3)
knngrid = expand.grid(k = 1:10)
knnfit = train(mpg01 ~ ., data = Auto.train, method = "knn", trControl = ctrl, preProcess = c("center", "scale"), tuneGrid= knngrid)
proc.time() - ptm
knnfit
plot(knnfit)
knnpred = predict(knnfit, newdata = Auto.test)
confusionMatrix(knnpred, Auto.test$mpg01)
TeER.knn = mean(knnpred != Auto.test$mpg01)
#Experiment with different cross validation methods:
#Just k-fold cross validation
ptm = proc.time()
ctrl = trainControl(method = "cv")
knnfit = train(mpg01 ~ ., data = Auto.train, method = "knn", trControl = ctrl, preProcess = c("center", "scale"), tuneLength = 20)
proc.time() - ptm
knnfit
plot(knnfit)
#LOOCV
ptm = proc.time()
ctrl = trainControl(method = "LOOCV")
knnfit = train(mpg01 ~ ., data = Auto.train, method = "knn", trControl = ctrl, preProcess = c("center", "scale"), tuneLength = 20)
proc.time() - ptm
knnfit
plot(knnfit)
knnpred = predict(knnfit, newdata = Auto.test)
confusionMatrix(knnpred, Auto.test$mpg01)
TeER.knn = mean(knnpred != Auto.test$mpg01)
|
de9d45968450602bc747e965fc2d5cfaf9af2cf4
|
6c57bfb022993ec61d0af3593ce117a1efff6022
|
/R/plot_functions.R
|
70712ec8f4fdc56c26aca13301fb988dae3c1424
|
[] |
no_license
|
anuj-kapil/driveralertness1
|
b090f3f4f1c087463b7159d3d1e85809b84e62ca
|
a6dab9b48e328d4d5413296eeed97dd22ae09889
|
refs/heads/master
| 2020-09-06T13:37:38.585699
| 2019-11-08T11:22:55
| 2019-11-08T11:22:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 891
|
r
|
plot_functions.R
|
#' Side by Side Histogram and Box Plots
#'
#' @param dat The dataset
#' @param x The variable in the dataset
#'
#' @return nothing
#' @export
#'
#' @examples
#' nothing
hist_box_plots <- function(dat, x){
hist(dat[, x], main = paste0("Histogram of ",x), xlab=x, ylab="Frequency", col = "blue")
boxplot(dat[, x], main = paste0("Boxplot of ",x), xlab=x, col = "blue")
}
#' Stacked Histograms
#'
#' @param dat_a The dataset for stack 1
#' @param dat_b The dataset for stack 2
#' @param x The variable in the dataset
#'
#' @return nothing
#' @export
#'
#' @examples
#' nothing
stacked_bar_plots <- function(dat_a, dat_b, x){
hist(dat_a[, x], main = paste0("Stacked Histogram of ",x), xlab=x, ylab="Frequency", col=rgb(.5,.8,1,0.5))
hist(dat_b[, x], col = rgb(1,.5,.4,.5), add=T)
legend("topright", c("Alert", "Not Alert"), col=c(rgb(.5,.8,1,0.5), rgb(1,.5,.4,.5)), lwd=10)
box()
}
|
9516bc1f9a0eb44cc748d4d105d1f61118c60f62
|
35dd79ae3daa40a05be19f14f26e0884dc639b36
|
/manuscript/SuperExactTest/SuperExactTest.r
|
6b57111f1c99a9d1a708f865f8c131517f0c8483
|
[] |
no_license
|
fazekasda/HSF1base
|
816569b18e106324ff46f63ccc214a3251900dc4
|
76f558637a95a66308ec0ec62b347001378ad35f
|
refs/heads/master
| 2021-06-22T10:48:24.273300
| 2020-12-18T11:43:19
| 2020-12-18T11:43:19
| 205,836,167
| 0
| 0
| null | 2021-03-26T00:45:53
| 2019-09-02T10:51:10
|
HTML
|
UTF-8
|
R
| false
| false
| 1,222
|
r
|
SuperExactTest.r
|
#install.packages("SuperExactTest")
#install.packages("openxlsx", dependencies = TRUE)
#install.packages("dplyr")
#install.packages("tidyverse")
library(SuperExactTest)
library("dplyr")
require(openxlsx)
library(tidyverse)
xlsx_sheet <- read.xlsx("Fig1_Venn_David.xlsx", sheet = 1, startRow = 1, colNames = FALSE)
cats <- unique(xlsx_sheet$X2)
data <- lapply(cats, function(x,s) xlsx_sheet[xlsx_sheet$X2 == x,]$X1)
Result <- supertest(data,n=19608)
sink("Fig1_Venn_David.txt")
"Fig1_Venn_David.xlsx"
"Sets:"
cats
"Result:"
summary(Result)
sink(file=NULL)
xlsx_sheet <- read.xlsx("Fig2_Venn_David.xlsx", sheet = 1, startRow = 1, colNames = FALSE)
cats <- unique(xlsx_sheet$X2)
data <- lapply(cats, function(x,s) xlsx_sheet[xlsx_sheet$X2 == x,]$X1)
Result <- supertest(data,n=20993)
sink("Fig2_Venn_David.txt")
"Fig2_Venn_David.xlsx"
"Sets:"
cats
"Result:"
summary(Result)
sink(file=NULL)
xlsx_sheet <- read.xlsx("Fig3_David_Venn.xlsx", sheet = 1, startRow = 1, colNames = FALSE)
cats <- unique(xlsx_sheet$X2)
data <- lapply(cats, function(x,s) xlsx_sheet[xlsx_sheet$X2 == x,]$X1)
Result <- supertest(data,n=19608)
sink("Fig3_David_Venn.txt")
"Fig3_David_Venn.xlsx"
"Sets:"
cats
"Result:"
summary(Result)
sink(file=NULL)
|
7a683c009e43456ffb72435de1b44d161875ae4d
|
9121a31d34a3ea5ec930a7ccf87b17f3591488d9
|
/docs/articles/advanced_features.R
|
68d6b5bc99926389265eb8c0e293173ba01800c0
|
[
"MIT"
] |
permissive
|
amirmasoudabdol/DeclareDesign
|
eafe1d2adf0b11083e52170e735d8378b48f2cfb
|
239249f06687f092d291753349c0eb451783ef42
|
refs/heads/master
| 2020-03-13T13:15:57.346368
| 2018-04-21T19:13:14
| 2018-04-21T19:13:14
| 131,135,067
| 0
| 0
| null | 2018-04-26T09:48:22
| 2018-04-26T09:48:21
| null |
UTF-8
|
R
| false
| false
| 3,646
|
r
|
advanced_features.R
|
## ----echo=FALSE, warning=FALSE, message=FALSE----------------------------
set.seed(42)
library(DeclareDesign)
options(digits=2)
my_population <-
declare_population(N = 1000,
income = rnorm(N),
age = sample(18:95, N, replace = TRUE))
pop <- my_population()
my_potential_outcomes <- declare_potential_outcomes(
formula = Y ~ .25 * Z + .01 * age * Z)
pop_pos <- my_potential_outcomes(pop)
my_sampling <- declare_sampling(n = 250)
smp <- my_sampling(pop_pos)
my_assignment <- declare_assignment(m = 25)
smp <- my_assignment(smp)
my_estimand <- declare_estimand(ATE = mean(Y_Z_1 - Y_Z_0))
smp <- reveal_outcomes(smp)
## ----echo=TRUE, results="hide"-------------------------------------------
m_arm_trial <- function(numb){
my_population <- declare_population(
N = numb, income = rnorm(N), age = sample(18:95, N, replace = T))
my_potential_outcomes <- declare_potential_outcomes(
formula = Y ~ .25 * Z + .01 * age * Z)
my_sampling <- declare_sampling(n = 250)
my_assignment <- declare_assignment(m = 25)
my_estimand <- declare_estimand(ATE = mean(Y_Z_1 - Y_Z_0))
my_estimator_dim <- declare_estimator(Y ~ Z, estimand = my_estimand)
my_design <- declare_design(my_population,
my_potential_outcomes,
my_estimand,
my_sampling,
my_assignment,
reveal_outcomes,
my_estimator_dim)
return(my_design)
}
my_1000_design <- fill_out(template = m_arm_trial, numb = 1000)
head(draw_data(my_1000_design))
## ----echo=FALSE----------------------------------------------------------
knitr::kable(head(draw_data(my_1000_design)))
## ----echo=TRUE, results="hide"-------------------------------------------
my_potential_outcomes_continuous <- declare_potential_outcomes(
formula = Y ~ .25 * Z + .01 * age * Z, conditions = seq(0, 1, by = .1))
continuous_treatment_function <- function(data){
data$Z <- sample(seq(0, 1, by = .1), size = nrow(data), replace = TRUE)
data
}
my_assignment_continuous <- declare_assignment(handler = continuous_treatment_function)
my_design <- declare_design(my_population(),
my_potential_outcomes_continuous,
my_assignment_continuous,
reveal_outcomes)
head(draw_data(my_design))
## ----echo=FALSE----------------------------------------------------------
knitr::kable(head(draw_data(my_design)))
## ----echo=TRUE, results="hide"-------------------------------------------
my_potential_outcomes_attrition <- declare_potential_outcomes(
formula = R ~ rbinom(n = N, size = 1, prob = pnorm(Y_Z_0)))
my_design <- declare_design(my_population(),
my_potential_outcomes,
my_potential_outcomes_attrition,
my_assignment,
reveal_outcomes(outcome_variables = "R"),
reveal_outcomes(attrition_variables = "R"))
head(draw_data(my_design)[, c("ID", "Y_Z_0", "Y_Z_1", "R_Z_0", "R_Z_1", "Z", "R", "Y")])
## ----echo=FALSE----------------------------------------------------------
knitr::kable(head(draw_data(my_design)[, c("ID", "Y_Z_0", "Y_Z_1", "R_Z_0", "R_Z_1", "Z", "R", "Y")]))
## ----echo=TRUE, results="hide"-------------------------------------------
stochastic_population <- declare_population(
N = sample(500:1000, 1), income = rnorm(N), age = sample(18:95, N, replace = TRUE))
c(nrow(stochastic_population()),
nrow(stochastic_population()),
nrow(stochastic_population()))
|
369d86b0153bcebb0aabd64e9ff7b237cec2d85c
|
06616d126b280a447a75b4ceb300d61e1986170b
|
/R/bioclimvars.R
|
083f146d8c2c9a5f9b2ff8ba58dc390502d433c2
|
[] |
no_license
|
ilyamaclean/climvars
|
8cdfd691e5b3e498f6f1742dea8679cf4999c0f8
|
de73840be2e731758526955b97244678ebc0260e
|
refs/heads/master
| 2021-07-07T17:00:33.740062
| 2019-06-17T19:26:53
| 2019-06-17T19:26:53
| 178,940,000
| 1
| 5
| null | 2020-09-12T08:08:50
| 2019-04-01T20:15:23
|
R
|
UTF-8
|
R
| false
| false
| 49,171
|
r
|
bioclimvars.R
|
#' bio1: Calculates mean annual temperature
#'
#' @description `bio1` is used to calculated mean annual temperature.
#'
#' @param temps a vector of temperatures, normally for one year (see details).
#' @param tme a `POSIXlt` object representing the date and time of each `temps` value.
#' Ignored if method unspecified.
#' @param method an optional character string describing the method used to
#' calculate mean annual temperature. Options are "anuclim", "dailymaxmin" or unspecified
#' (see details).
#' @return a single numeric value of mean annual temperature.
#' @export
#'
#' @details If `method` is "anuclim" temperatures are aggregated by month, and
#' spline intepolated to weekly before mean annual temperature is calculated,
#' replicating the method used by http://www.worldclim.org/. If `method` is
#' "dailymaxmin", daily mean temperatures are calculated as the mean of daily
#' maximum and minimum temperatures and annual mean calculated from daily
#' means. Otherwise the mean of `temps` is returned. If using `anuclim` method
#' and data span more than one year, data are aggregated by unique month
#' irrespective of year and one value returned. If using `dailymaxmin` method
#' and data span more than one year, calculations will be performed on all data
#' and a single value returned.
#'
#' @seealso the [tmecreate()] function can be used to create a POSIXlt object.
#'
#' @examples
#' temps <- 10 * sin(c(0:1459) / (pi * 150)) + rnorm(1460)
#' tme <- tmecreate(2010, 6)
#' plot(temps~as.POSIXct(tme), type = "l", xlab = "Month", ylab = "Temperature")
#' bio1(temps, tme)
#' bio1(temps, tme, method = "anuclim")
#' bio1(temps, tme, method = "dailymaxmin")
#'
bio1 <- function(temps, tme, method = "") {
if (is.na(sd(temps, na.rm = TRUE)))
tmean <- NA
else {
if (method == "anuclim") {
if (length(unique(tme$year)) > 1) warna()
tmth <- aggregate(temps, by = list(tme$mon), mean, na.rm = TRUE)$x
twk <- spline(tmth, n = length(tmth) / 12 * 52)$y
tmean <- mean(twk, na.rm = TRUE)
} else if (method == "dailymaxmin") {
if (length(unique(tme$year)) > 1) warnb()
tmx <- aggregate(temps, by = list(tme$yday), max, na.rm = TRUE)$x
tmn <- aggregate(temps, by = list(tme$yday), min, na.rm = TRUE)$x
tme <- (tmx + tmn) /2
tmean <- mean(tme, na.rm = TRUE)
}
else {
if (length(unique(tme$year)) > 1) warna()
tmean <- mean(temps, na.rm = TRUE)
}
}
tmean
}
#' bio2: Calculates mean annual diurnal temperature range
#'
#' @description `bio2` is used to calculate the mean annual diurnal range in
#' temperature (range mean of the maximum-minimum).
#'
#' @param temps a vector of temperatures, normally for one year (see details).
#' @param tme a `POSIXlt` object representing the date and time of each `temps` value.
#' @param method an optional character string describing the method used to
#' mean annual diurnal temperature range. Options are "anuclim" or unspecified (see
#' details).
#'
#' @return a single numeric value of mean diurnal temperature range.
#' @export
#'
#' @details If using `anuclim` method and data span more than one year, data are
#' aggregated by unique month irrespective of year and one value returned. If
#' `method` is "anuclim" temperatures are aggregated by month and spline
#' intepolated to weekly before mean diurnal temperature range is calculated,
#' replicating the method used by http://www.worldclim.org/. If left
#' unspecified and time interval is <= daily, the mean difference between the
#' daily maximum and minimum values is calculated.
#'
#' @seealso the [tmecreate()] function can be used to create a POSIXlt object.
#'
#' @examples
#' temps <- 10 * sin(c(0:1459) / (pi * 150)) + rnorm(1460)
#' tme <- tmecreate(2010, 6)
#' plot(temps~as.POSIXct(tme), type = "l", xlab = "Month", ylab = "Temperature")
#' bio2(temps, tme)
#' bio2(temps, tme, method = "anuclim")
#'
bio2 <- function(temps, tme, method = "") {
if (is.na(sd(temps, na.rm = TRUE)))
tdtr <- NA
else {
if (as.numeric(tme[2]) - as.numeric(tme[1]) > 86400 & method != "anuclim") {
warning ("time interval > daily. Using anuclim method")
method <- "anuclim"
}
if (method == "anuclim") {
if (length(unique(tme$year)) > 1) warna()
tmthmx <- aggregate(temps, by = list(tme$mon), max, na.rm = TRUE)$x
tmthmn <- aggregate(temps, by = list(tme$mon), min, na.rm = TRUE)$x
twkmx <- spline(tmthmx, n = length(tmthmx) / 12 * 52)$y
twkmn <- spline(tmthmn, n = length(tmthmn) / 12 * 52)$y
tdtr <- mean((twkmx - twkmn), na.rm = TRUE)
}
else {
if (length(unique(tme$year)) > 1) warnb()
tmx <- aggregate(temps, by = list(tme$yday), max, na.rm = TRUE)$x
tmn <- aggregate(temps, by = list(tme$yday), min, na.rm = TRUE)$x
dtr <- tmx - tmn
tdtr <- mean(dtr, na.rm = TRUE)
}
}
tdtr
}
#' bio4: Calculates temperature seasonality
#'
#' @description `bio4` calculates the variation in temperature over a given year
#' as the coeeficient of variation in the mean temperatures
#'
#' @param temps a vector of temperatures, normally for one year (see details).
#' @param tme a `POSIXlt` object representing the date and time of each `temps` value.
#' @param method an optional character string describing the method used to
#' calculate temperature seasonality. Options are "anuclim" or unspecified (see
#' details).
#'
#' @return a single numeric value representng annual temperature seasonality.
#' @export
#'
#' @details If method is "anuclim" temperatures are aggregated by month and
#' monthly averages are calculated and spline intepolated to weekly values.
#' Temperature seasonality is calculated as the standard deviation of weekly
#' mean temperatures as a percentage of the mean of those temperatures. If
#' using "anuclim" method and data span more than one year, data are aggregated
#' by unique month irrespective of year and one value returned. If method is
#' not specified, calculation is based on the standard deviation of the mean of
#' all temperatures as a percentage of the mean of all temperatures. If method
#' is not specified and data span more than one year, calculations will be
#' performed on all data and a single value returned. For all calculations, the
#' mean in degrees Kelvin is used.
#'
#' @seealso the [tmecreate()] function can be used to create a POSIXlt object.
#'
#' @examples
#' temps <- 10 * sin(c(0:1459) / (pi * 150)) + rnorm(1460)
#' tme <- tmecreate(2010, 6)
#' plot(temps~as.POSIXct(tme), type = "l", xlab = "Month", ylab = "Temperature")
#' bio4(temps, tme)
#' bio4(temps, tme, method = "anuclim")
bio4 <- function(temps, tme, method = "") {
if (is.na(sd(temps, na.rm = TRUE)))
tcv <- NA
else {
if (method == "anuclim") {
if (length(unique(tme$year)) > 1) warna()
tmth <- aggregate(temps, by = list(tme$mon), mean, na.rm = TRUE)$x
twk <- spline(tmth, n = length(tmth) / 12 * 52)$y
tcv <- sd(twk, na.rm = TRUE) / mean(twk, na.rm = TRUE)*100
}
else {
if (length(unique(tme$year)) > 1) warnb()
tcv <- sd(temps, na.rm = TRUE) / mean(temps, na.rm = TRUE)*100
}
}
tcv
}
#' bio5: Calculates maximum temperature of the warmest period of the year
#'
#' @description `bio5` is used to calculate the maximum weekly or monthly temperature in each year
#'
#' @param temps a vector of temperatures, normally for one year (see details).
#' @param tme a `POSIXlt` object representing the date and time of each `temps` value.
#' @param method an optional character string describing the method to calculate
#' maximum temperature in the warmest period. Options are "anuclimmean",
#' "anuclimmax" or unspecified (see details).
#'
#' @return a single numeric value of maximum temperature (weekly or monthly) for each year.
#' @export
#'
#' @details If method is "anuclimmean", mean monthly temperatures are spline
#' interpolated to a weekly time period and the maximum weekly value returned.
#' If method is "anuclimmax", monthly maximum temperatures are spline
#' interpolated to a weekly time period and the maximum weekly value returned.
#' If method is unspecified, the maximum temperature across all values for each year is returned.
#'
#' @seealso the [tmecreate()] function can be used to create a POSIXlt object.
#'
#' @examples
#' temps <- 10 * sin(c(0:1459) / (pi * 150)) + rnorm(1460)
#' tme <- tmecreate(2010, 6)
#' plot(temps~as.POSIXct(tme), type = "l", xlab = "Month", ylab = "Temperature")
#' bio5(temps, tme)
#' bio5(temps, tme, method = "anuclimmean")
#' bio5(temps, tme, method = "anuclimmax")
#'
bio5 <- function(temps, tme, method = "") {
if (is.na(sd(temps, na.rm = TRUE)))
tmx <- NA
else {
if (method == "anuclimmean") {
if (length(unique(tme$year)) > 1) warna()
tmth <- aggregate(temps, by = list(tme$mon), mean, na.rm = TRUE)$x
twk <- spline(tmth, n = length(tmth) / 12 * 52)$y
tmx <- max(twk, na.rm = TRUE)
} else if (method == "anuclimmax") {
if (length(unique(tme$year)) > 1) warna()
tmth <- aggregate(temps, by = list(tme$mon), max, na.rm = TRUE)$x
twk <- spline(tmth, n = length(tmth) / 12 * 52)$y
tmx <- max(twk, na.rm = TRUE)
}
else {
if (length(unique(tme$year)) > 1) warnb()
tmx <- max(temps, na.rm = TRUE)
}
}
tmx
}
#' bio6: Calculates minimum temperature of the coldest period of the year
#'
#' @description `bio6` is used to calculate the minimum temperature value across
#' all months of the year.
#'
#' @param temps a vector of temperatures, normally for one year (see details).
#' @param tme a `POSIXlt` object representing the date and time of each `temps` value.
#' @param method an optional character string describing the method to calculate
#' minimum temperature. Options are "anuclimmean", "anuclimmin" or unspecified (see
#' details).
#'
#' @return a single numeric value of minimum temperature for the defiend period.
#' @export
#'
#' @details "anuclimmean" splines mean monthly temperature to weekly time period
#' across all months and returns the mean minimum weekly temperature. Anuclimmax
#' splines minimum monthly temperatures to weekly time period and returns the
#' minimum weekly temperature. If left unspecified, the minimum temperature
#' across all values of each year is returned.
#'
#' @seealso the [tmecreate()] function can be used to create a POSIXlt object.
#'
#' @examples
#' temps <- 10 * sin(c(0:1459) / (pi * 150)) + rnorm(1460)
#' tme <- tmecreate(2010, 6)
#' plot(temps~as.POSIXct(tme), type = "l", xlab = "Month", ylab = "Temperature")
#' bio6(temps, tme)
#' bio6(temps, tme, method = "anuclimmean")
#' bio6(temps, tme, method = "anuclimmin")
bio6 <- function(temps, tme, method = "") {
if (is.na(sd(temps, na.rm = TRUE)))
tmn <- NA
else {
if (method == "anuclimmean") {
if (length(unique(tme$year)) > 1) warna()
tmth <- aggregate(temps, by = list(tme$mon), mean, na.rm = TRUE)$x
twk <- spline(tmth, n = length(tmth) / 12 * 52)$y
tmn <- min(twk, na.rm = TRUE)
} else if (method == "anuclimmin") {
if (length(unique(tme$year)) > 1) warna()
tmth <- aggregate(temps, by = list(tme$mon), min, na.rm = TRUE)$x
twk <- spline(tmth, n = length(tmth) / 12 * 52)$y
tmn <- min(twk, na.rm = TRUE)
}
else {
if (length(unique(tme$year)) > 1) warnb()
tmn <- min(temps, na.rm = TRUE)
}
}
tmn
}
#' bio7: Annual temperature range
#'
#' @description `bio7` is used to calculate the annual range in temperature
#'
#' @param temps a vector of temperatures, normally for one year (see details).
#' @param tme a `POSIXlt` object representing the date and time of each `temps` value.
#' @param method An optional character string describing the method used to
#' calculate maximum and minimum temperatures. Options are "anuclimmean",
#' "anuclimmax" or unspecified (see details).
#'
#' @return a single numeric value of annnual temperature range (maximum-minimum
#' temperature values).
#' @export
#'
#' @details If method is "anuclimmean", mean monthly temperatures are spline
#' interpolated to a weekly time period and range calculated from the maximum
#' and minimum weeekly temperature values. If method is "anuclimmaxmin",
#' maximum and minimum monthly temperatues are spline interpolated to a weekly
#' time period and range is calculated from the maximum and minimum mean weekly
#' temperature values. If left unspecified, the range is calculated by
#' subtracting the maximum and minimum temperature alues for each year. To
#' satisfy the requirements for `tme`, a POSIXlt object can be created using
#' the `tmecreate` wrapper function. This calculation should return the value of
#' bio5(temps, tme)-bio6(temps, tme) when methods remain the same.
#'
#' @seealso the [tmecreate()] function can be used to create a POSIXlt object.
#'
#' @examples
#' temps <- 10 * sin(c(0:1459) / (pi * 150)) + rnorm(1460)
#' tme <- tmecreate(2010, 6)
#' plot(temps~as.POSIXct(tme), type = "l", xlab = "Month", ylab = "Temperature")
#' bio7(temps, tme)
#' bio7(temps, tme, method = "anuclimmean")
#' bio7(temps, tme, method = "anuclimmaxmin")
#'
bio7 <- function(temps, tme, method = "") {
if (is.na(sd(temps, na.rm = TRUE)))
tanr <- NA
else {
if (method == "anuclimmean") {
if (length(unique(tme$year)) > 1) warna()
tmth <- aggregate(temps, by = list(tme$mon), mean, na.rm = TRUE)$x
twk <- spline(tmth, n = length(tmth) / 12 * 52)$y
tanr <- max(twk, na.rm = TRUE) - min(twk, na.rm = TRUE)
} else if (method == "anuclimmaxmin") {
if (length(unique(tme$year)) > 1) warna()
tmthmx <- aggregate(temps, by = list(tme$mon), max, na.rm = TRUE)$x
tmthmn <- aggregate(temps, by = list(tme$mon), min, na.rm = TRUE)$x
twkmx <- spline(tmthmx, n = length(tmth) / 12 * 52)$y
twkmn <- spline(tmthmn, n = length(tmth) / 12 * 52)$y
tanr <- max(twkmx, na.rm = TRUE) - min(twkmn, na.rm = TRUE)
}
else {
if (length(unique(tme$year)) > 1) warna()
tanr <- max(temps, na.rm = TRUE) - min(temps, na.rm = TRUE)
}
}
tanr
}
#' bio3: Calculates isothermality
#'
#' @description `bio3` is used to calculate isothermality (day-to-night
#' temperature oscillations relative to annual oscillations).
#'
#' @param temps a vector of temperatures, normally for one year (see details).
#' @param tme a `POSIXlt` object representing the date and time of each `temps` value.
#' @param method An optional character string describing the method used to
#' calculate isothermality. Options include "anuclimmean", "anuclimmaxmin" or
#' unspecified (see details).
#'
#' @return a single numeric value representing isothermality for each year.
#' @export
#'
#' @details
#' If method is "anuclimmean", bio3 is calculated using "anuclim" method.
#' Temperatures are aggregated by month and spline intepolated to weekly before
#' mean diurnal temperature range is calculated, replicating the method used by
#' http://www.worldclim.org/.
#'
#' If method is "anuclimaxmin", bio3 is calculated using "anuclimmaxmin" method.
#' Maximum and minimum monthly temperatues are spline interpolated to a weekly
#' time period and range is calculated from the maximum and minimum mean weekly
#' temperature values.
#'
#' If using method "anuclimmean" or "anuclimmaxmin" and data spans more than one
#' year, data are aggregated by unique month irrespective of year and one value
#' returned.
#'
#' If method is left unspecified, bio3 is calculated using the mean of daily
#' maximum and minimum temperature. If data spans more than one year, data are
#' aggregated by unique month irrespective of year and one value returned. If
#' method is unspecified, bio7 is calculated using the maximum temperature value
#' for the year.If data spans more than one year, bio7 calculations are performed
#' on all data and single value returned.
#'
#' @seealso [bio2()] and [bio7()] for calculating diurnal and annual temperature ranges.
#' [tmecreate()] for creating a 'POSIXlt' object.
#'
#' @examples
#' temps <- 10 * sin(c(0:1459) / (pi * 150)) + rnorm(1460)
#' tme <- tmecreate(2010, 24)
#' plot(temps~as.POSIXct(tme), type = "l", xlab = "Month", ylab = "Temperature")
#' bio3(temps, tme)
#' bio3(temps, tme, method = "anuclimmean")
#' bio3(temps, tme, method = "anuclimmaxmin")
#'
bio3 <- function(temps, tme, method = "") {
if (is.na(sd(temps, na.rm = TRUE)))
tiso <- NA
else {
if (method == "anuclimmean") {
if (length(unique(tme$year)) > 1) warna()
tiso <- bio2(temps, tme, "anuclim") / bio7(temps, tme, "anuclimean")
} else if (method == "anuclimmmaxmin") {
if (length(unique(tme$year)) > 1) warna()
tiso <- bio2(temps, tme, "anuclim") / bio7(temps, tme, "anuclimmaxmin")
}
else {
if (length(unique(tme$year)) > 1) warnb()
tiso <- bio2(temps, tme) / bio7(temps, tme)
}
}
tiso
}
#' bio8: Calculates mean temperature of Wettest quarter
#'
#' @description `bio8` is used to calculate the mean temperature in the wettest
#' quarter of the year
#'
#' @param temps a vector of temperatures, normally for one year (see details).
#' @param prec a vector of precipitation values, normally for one year (see
#' details).
#' @param tme1 a `POSIXlt` object representing the date and time of each `temps` value.
#' @param tme2 a `POSIXlt` object representing the date and time of each `prec` value.
#' @param method An optional character string describing the methods used to
#' calculate mean temperature in the wettest quarter. Options are "anuclim" or unspecified
#' (see details).
#'
#' @return a single numeric value of mean temperature of the wettest quarter of
#' the year.
#' @export
#'
#' @details If method is "anuclim", mean monthly temperature and total monthly
#' precipitation is calculated and then spline interpolated to a weekly time
#' period. Precipitation is calculated for each 13-week period and the mean
#' temperature for the wettest period returned. If data spans more than one
#' year, data are aggregated by unique month irrespective of year and one value
#' returned. If method is unspecified, the mean temperature of the wettest
#' quarter is calculated using all `temps` values and precipitation per quarter
#' is calculated using the time interval for measurements. If data span more
#' than one year, calculations are performed on all data and a single value
#' returned.
#'
#' @seealso the [tmecreate()] function can be used to create a POSIXlt object.
#'
#' @examples
#' temps <- 10 * sin(c(0:1459) / (pi * 150)) + rnorm(1460)
#' tme1 <- tmecreate(2010, 6)
#' plot(temps~as.POSIXct(tme1), type = "l", xlab = "Month", ylab = "Temperature")
#' prec <- (10 * sin(c(0:364) * (pi / -360)) + rnorm(365) + 12)
#' tme2 <- tmecreate(2010, 24)
#' plot(prec~as.POSIXct(tme2), type = "l", xlab = "Month", ylab = "Precipitation")
#' bio8(temps, prec, tme1, tme2)
#' bio8(temps, prec, tme1, tme2, method = "anuclim")
#'
bio8 <- function(temps, prec, tme1, tme2, method = "") {
if (is.na(sd(prec, na.rm = TRUE)) | is.na(sd(temps, na.rm = TRUE)))
twet <- NA
else {
if (method == "anuclim") {
if (length(unique(tme1$year)) > 1) warna()
if (length(unique(tme2$year)) > 1) warna()
tmth <- aggregate(temps, by = list(tme1$mon), mean, na.rm = TRUE)$x
twk <- spline(tmth, n = length(tmth) / 12 * 52)$y
pmth <- aggregate(prec, by = list(tme2$mon), sum, na.rm = TRUE)$x
pwk <- spline(pmth, n = length(pmth) / 12 * 52)$y * 12 / 52
pwk[which(pwk < 0)] <- 0
qtr <- function(i) {
ptw <- c(pwk, pwk)
psu <- sum(ptw[i: (i + 12)], na.rm = TRUE)
psu
}
wq <- sapply(c(1:length(pwk)), qtr)
i <- which(wq == max(wq, na.rm = TRUE))[1]
twk2 <- c(twk, twk)
twet <- mean(twk2[i:(i + 12)], na.rm = TRUE)
}
else {
if (length(unique(tme1$year)) > 1) warnb()
qtr <- function(i, int) {
prec2 <- c(prec, prec)
psu <- sum(prec2[i: (i + int)], na.rm = TRUE)
psu
}
id <- (as.numeric(tme2[2]) - as.numeric(tme2[1])) / 86400
dd1 <- 24/(24/(1/id))
int <- 91 / id
wq <- sapply(c(1:length(prec)), qtr, int)
i <- which(wq == max(wq, na.rm = TRUE))[1]
tid <-(as.numeric(tme1[2]) - as.numeric(tme1[1])) / 86400
dd2 <- 24/(24/(1/tid))
tint <- 91 / tid
ti <- i*(dd1*dd2)
tte <- c(temps, temps)
twet <- mean(tte[ti:(ti + tint)], na.rm = TRUE)
}
}
twet
}
#' bio9: Calculates mean temperature of the driest quarter
#'
#' @description `bio9` is used to calculate the mean temperature of the driest
#' quarter of the year
#'
#' @param temps a vector of temperatures, normally for one year (see details).
#' @param prec a vector of precipitation values, normally for one year (see
#' details).
#' @param tme1 a `POSIXlt` object representing the date and time of each `temps` value.
#' @param tme2 a `POSIXlt` object representing the date and time of each `prec` value.
#' @param method character string describing the method used to calculate mean temperature
#' of the driest quarter. Options are "anuclim" or unspecified (see details).
#'
#' @return a single numeric value of mean temperature of the wettest quarter of
#' the year.
#' @export
#'
#' @details If method is "anuclim", mean monthly temperature values are
#' calculated and spline interpolated to a weekly time period. Precipitation
#' values are summed for all months and then spline interpolated to a weekly
#' time period. Mean temeprature of the driest 13-week period is returned.
#' Otherwise, annual precipitation values are used to calculate precipitation
#' in the driest three-month period and mean temperature in this period
#' returned.
#'
#' @seealso the [tmecreate()] function can be used to create a POSIXlt object.
#'
#' @examples
#' temps <- 10 * sin(c(0:1459) / (pi * 150)) + rnorm(1460)
#' tme1 <- tmecreate(2010, 6)
#' plot(temps~as.POSIXct(tme), type = "l", xlab = "Month", ylab = "Temperature")
#' prec <- (10 * sin(c(0:364) * (pi / -360)) + rnorm(365) + 12)
#' tme2 <- tmecreate(2010, 24)
#' plot(prec~as.POSIXct(tme2), type = "l", xlab = "Month", ylab = "Precipitation")
#' bio9(temps, prec, tme1, tme2)
#' bio9(temps, prec, tme1, tme2, method = "anuclim")
#'
bio9 <- function(temps, prec, tme1, tme2, method = "") {
if (is.na(sd(prec, na.rm = TRUE)) | is.na(sd(temps, na.rm = TRUE)))
tdry <- NA
else {
if (method == "anuclim") {
if (length(unique(tme1$year)) > 1) warna()
if (length(unique(tme2$year)) > 1) warna()
tmth <- aggregate(temps, by = list(tme1$mon), mean, na.rm = TRUE)$x
twk <- spline(tmth, n = length(tmth) / 12 * 52)$y
pmth <- aggregate(prec, by = list(tme2$mon), sum, na.rm = TRUE)$x
pwk <- spline(pmth, n = length(pmth) / 12 * 52)$y * 12 / 52
pwk[which(pwk < 0)] <- 0
qtr <- function(i) {
ptw <- c(pwk, pwk)
psu <- sum(ptw[i: (i + 12)], na.rm = TRUE)
psu
}
dq <- sapply(c(1:length(pwk)), qtr)
i <- which(dq == min(dq, na.rm = TRUE))[1]
twk2 <- c(twk, twk)
tdry <- mean(twk2[i:(i + 12)], na.rm = TRUE)
}
else {
if (length(unique(tme1$year)) > 1) warnb()
qtr <- function(i, int) {
prec2 <- c(prec, prec)
psu <- sum(prec2[i: (i + int)], na.rm = TRUE)
psu
}
id <- (as.numeric(tme2[2]) - as.numeric(tme2[1])) / 86400
dd1 <- 24/(24/1/id)
int <- 91 / id
dq <- sapply(c(1:length(prec)), qtr, int)
i <- which(dq == min(dq, na.rm = TRUE))[1]
tid <-(as.numeric(tme1[2]) - as.numeric(tme1[1])) / 86400
dd2 <- 24/(24/1/tid)
tint <- 91 / tid
ti <- i*(dd1*dd2)
tte <- c(temps, temps)
tdry <- mean(tte[ti:(ti + tint)], na.rm = TRUE)
}
}
tdry
}
#' bio10: Calculates mean temperature of the Warmest quarter
#'
#' @description `bio10` is used to calculate the mean temperature of the warmest
#' quarter (three months) of the year
#'
#' @param temps a vector of temperatures, normally for one year (see details).
#' @param tme a `POSIXlt` object representing the date and time of each `temps` value.
#' @param method An optional character string describing the method used to
#' calculate mean temperature of the warmest quarter. Options are "anuclim" or
#' unspecified (see details).
#'
#' @return a single numeric value of mean temperature in the warmest quarter of
#' the year.
#' @export
#'
#' @details If method is "anuclim", warmest quarter is determined to the nearest
#' week. Mean monthly temperature values are calculated and spline interpolated
#' to a weekly time period. Precipitation values are summed for all months and
#' then spline interpolated to a weekly time period. Otherwise, the mean
#' temperature of the warmest 3-month period is calculated from annual values.
#'
#' @seealso the [tmecreate()] function can be used to create a POSIXlt object.
#'
#' @examples
#' temps <- 10 * sin(c(0:1459) / (pi * 150)) + rnorm(1460)
#' tme <- tmecreate(2010, 6)
#' plot(temps~as.POSIXct(tme), type = "l", xlab = "Month", ylab = "Temperature")
#' bio10(temps, tme)
#' bio10(temps, tme, method = "anuclim")
#'
bio10 <- function(temps, tme, method = "") {
if (is.na(sd(temps, na.rm = TRUE)))
thot <- NA
else {
if (method == "anuclim") {
if (length(unique(tme$year)) > 1) warna()
tmth <- aggregate(temps, by = list(tme$mon), mean, na.rm = TRUE)$x
twk <- spline(tmth, n = length(tmth) / 12 * 52)$y
qtr <- function(i) {
tw <- c(twk, twk)
me <- mean(tw[i: (i + 12)], na.rm = TRUE)
me
}
hq <- sapply(c(1:length(twk)), qtr)
i <- which(hq == max(hq, na.rm = TRUE))[1]
twk2 <- c(twk, twk)
thot <- mean(twk2[i:(i + 12)], na.rm = TRUE)
}
else {
if (length(unique(tme$year)) > 1) warnb()
qtr <- function(i, int) {
tw <- c(temps, temps)
me <- mean(tw[i: (i + int)], na.rm = TRUE)
me
}
id <- (as.numeric(tme[2]) - as.numeric(tme[1])) / 86400
int <- 91 / id
hq <- sapply(c(1:length(temps)), qtr, int)
i <- which(hq == max(hq, na.rm = TRUE))[1]
tte <- c(temps, temps)
thot <- mean(tte[i:(i + int)], na.rm = TRUE)
}
}
thot
}
#' bio11: Calculates mean temperature of the coldest quarter
#'
#' @description `bio11` is used to calculate the mean temperature of the coldest
#' quarter (three months) of the year
#'
#' @param temps a vector of temperatures, normally for one year (see details).
#' @param prec a vector of precipitation values, normally for one year (see
#' details).
#' @param tme a `POSIXlt` object representing the date and time of each `temps` value.
#' @param method An optional character vector describing the method used to calculate
#' the mean temperature of the coldest quarter. Options are "anuclim" or unpsecified (see
#' details).
#'
#' @return a single numeric value of mean temperature of the warmest quarter of
#' the year.
#' @export
#'
#' @details If method is "anuclim", mean monthly temperature values are
#' calculated and spline interpolated to a weekly time period. Mean temperature of the coldest 13-week period is
#' determined. If method is left unspecified, mean temperature of the coldest 3-month (91-day) period is
#' calculated from annual temperature values.
#'
#' @seealso the [tmecreate()] function can be used to create a POSIXlt object.
#'
#' @examples
#' temps <- 10 * sin(c(0:1459) / (pi * 150)) + rnorm(1460)
#' tme <- tmecreate(2010, 6)
#' plot(temps~as.POSIXct(tme), type = "l", xlab = "Month", ylab = "Temperature")
#' bio11(temps, tme)
#' bio11(temps, tme, method = "anuclim")
#'
bio11 <- function(temps, tme, method = "") {
if (is.na(sd(temps, na.rm = TRUE)))
tcold <- NA
else {
if (method == "anuclim") {
if (length(unique(tme$year)) > 1) warna()
tmth <- aggregate(temps, by = list(tme$mon), mean, na.rm = TRUE)$x
twk <- spline(tmth, n = length(tmth) / 12 * 52)$y
qtr <- function(i) {
tw <- c(twk, twk)
me <- mean(tw[i: (i + 12)], na.rm = TRUE)
me
}
cq <- sapply(c(1:length(twk)), qtr)
i <- which(cq == min(cq, na.rm = TRUE))[1]
twk2 <- c(twk, twk)
tcold <- mean(twk2[i:(i + 12)], na.rm = TRUE)
}
else {
if (length(unique(tme$year)) > 1) warnb()
qtr <- function(i, int) {
tw <- c(temps, temps)
me <- mean(tw[i: (i + int)], na.rm = TRUE)
me
}
id <- (as.numeric(tme[2]) - as.numeric(tme[1])) / 86400
int <- 91 / id
cq <- sapply(c(1:length(temps)), qtr, int)
i <- which(cq == min(cq, na.rm = TRUE))[1]
tte <- c(temps, temps)
tcold <- mean(tte[i:(i + int)], na.rm = TRUE)
}
}
tcold
}
#' bio12: Calculates total annual precipitation
#' @description `bio12` is used to calculate total precipitation in the year
#' @param prec a vector of precipitation values, normally for one year (see
#' details).
#' @param tme a `POSIXlt` object representing the date and time of each `temps` value.
#' @param method An optional character string describing the method used to
#' calculate total annual precipitation. Options are "anuclim" or unspecified (see
#' details).
#'
#' @return a single numeric value of total annual precipitation.
#' @export
#'
#' @details If method is "anuclim", monthly precipitation values are spline
#' interpolated to a weekly time period and the total for each year returned.
#' Otherwise, all precipitation values for each year are summed.
#'
#' @seealso the [tmecreate()] function can be used to create a POSIXlt object.
#'
#' @examples
#' prec <- (10 * sin(c(0:364) * (pi / -360)) + rnorm(365) + 12)
#' tme <- tmecreate(2010, 24)
#' plot(prec~as.POSIXct(tme), type = "l", xlab = "Month", ylab = "Precipitation")
#' bio12(prec, tme)
#' bio12(prec, tme, method="anuclim")
#'
bio12 <- function(prec, tme, method = "") {
if (is.na(sd(prec, na.rm = TRUE)))
map <- NA
else {
if (method == "anuclim") {
if (length(unique(tme$year)) > 1) warna()
pmth <- aggregate(prec, by = list(tme$mon), sum, na.rm = TRUE)$x
pwk <- spline(pmth, n = length(pmth) / 12 * 52)$y * 12 / 52
pwk[which(pwk < 0)] <- 0
map <- sum(pwk, na.rm = TRUE) / length(unique(tme$year))
}
else {
if (length(unique(tme$year)) > 1) warnb()
map <- sum(prec, na.rm = TRUE) / length(unique(tme$year))
}
}
map
}
#' bio13: Calculates precipitation of the wettest period
#'
#' @description `bio13` is used to calculate the precipitation of the wettest
#' week or month of the year, depending on the time step.
#'
#' @param prec a vector of precipitation values, normally for one year (see
#' details).
#' @param tme a `POSIXlt` object representing the date and time of each `prec` value.
#' @param method An optional character string describing how the maximum weekly or
#' monthly precipitation is calculated. Options are "week" and "month" (see
#' details).
#'
#' @return a single numeric value of total precipitation in the wettest week or month of the year.
#' @export
#'
#' @details
#' If method is "week", monthly precipitation values are spline interpolated to a
#' weekly time period and the maximum weekly precipitation is returned. If method
#' is "month", monthly precipitation values are summed and the maximum monthly
#' precipitation is returned.
#'
#' If data span more than one year, data are aggregated by unique month
#' irrespective of year and one value returned.
#'
#' @seealso the [tmecreate()] function can be used to create a POSIXlt object.
#'
#' @examples
#' prec <- (10 * sin(c(0:364) * (pi / -360)) + rnorm(365) + 12)
#' tme <- tmecreate(2010, 24)
#' plot(prec~as.POSIXct(tme), type = "l", xlab = "Month", ylab = "Precipitation")
#' bio13(prec, tme, method="week")
#' bio13(prec, tme, method="month")
#' bio13(prec, tme)
#'
bio13 <- function(prec, tme, method = "week") {
if (is.na(sd(prec, na.rm = TRUE)))
wp <- NA
else {
if (method == "month"){
if (length(unique(tme$year)) > 1) warna()
pmth <- aggregate(prec, by = list(tme$mon), sum, na.rm = TRUE)$x
pmth[which(pmth < 0)] <- 0
wp <- max(pmth, na.rm = TRUE)
}
else {
if(method == "week"){
if (length(unique(tme$year)) > 1) warna()
pmth <- aggregate(prec, by = list(tme$mon), sum, na.rm = TRUE)$x
pwk <- spline(pmth, n = length(pmth) / 12 * 52)$y * 12 / 52
pwk[which(pwk < 0)] <- 0
wp <- max(pwk, na.rm = TRUE)
}
else {
if (length(unique(tme$year)) > 1) warnb()
pmth <- aggregate(prec, by = list(tme$mon), sum, na.rm = TRUE)$x
wp <- max(pmth, na.rm = TRUE)
}
}
}
wp
}
#' bio14: Calculates precipitation of the driest period
#'
#' @description `bio14` is used to calculate the precipitation in the driest
#' period of the year
#' @param prec a vector of precipitation values, normally for one year (see
#' details).
#' @param tme a `POSIXlt` object representing the date and time of each `prec` value.
#' @param method an optional character string describing how the minimum weekly
#' or monthly precipitation is calculated. Options include"week", "month" or unspecified
#' (see details).
#'
#' @return a single numeric value of total precipitation in the driest week or
#' month of the year.
#' @export
#'
#' @details If method is "week" or left unspecified, monthly precipitation values
#' are spline interpolated to a weekly time period and the minimum weekly
#' precipitation is returned. If method is "month", the minimum monthly
#' precipitation is returned.
#'
#' @seealso the [tmecreate()] function can be used to create a POSIXlt object.
#'
#' @examples
#' prec <- (10 * sin(c(0:364) * (pi / -360)) + rnorm(365) + 12)
#' tme <- tmecreate(2010, 24)
#' plot(prec~as.POSIXct(tme), type = "l", xlab = "Month", ylab = "Precipitation")
#' bio14(prec, tme)
#' bio14(prec, tme, method="week")
#' bio14(prec, tme, method="month")
#'
bio14 <- function(prec, tme, method = "week") {
if (is.na(sd(prec, na.rm = TRUE)))
dp <- NA
else {
if (method == "month"){
if (length(unique(tme$year)) > 1) warna()
pmth <- aggregate(prec, by = list(tme$mon), sum, na.rm = TRUE)$x
pmth[which(pmth < 0)] <- 0
dp <- min(pmth, na.rm = TRUE)
}
else {
if(method == "week"){
if (length(unique(tme$year)) > 1) warna()
pmth <- aggregate(prec, by = list(tme$mon), sum, na.rm = TRUE)$x
pwk <- spline(pmth, n = length(pmth) / 12 * 52)$y * 12 / 52
pwk[which(pwk < 0)] <- 0
dp <- min(pwk, na.rm = TRUE)
}
else {
if (length(unique(tme$year)) > 1) warnb()
pmth <- aggregate(prec, by = list(tme$mon), sum, na.rm = TRUE)$x
dp <- min(pmth, na.rm = TRUE)
}
}
}
dp
}
#' bio15: Calculates precipitation seasonality
#'
#' @description `bio15` is used to calculate precipitation seasonality, which is
#' the standard deviation of weekly or monthly precipitation values as a
#' percentage of the mean of those values.
#'
#' @param prec a vector of precipitation values, normally for one year (see
#' details).
#' @param tme a `POSIXlt` object representing the date and time of each `prec` value.
#' @param method an optional character string describing the method used to
#' calculate precipitation seasonality. Options include "anuclim" or unspecified (see
#' details).
#'
#' @return a single numeric value representing precipitation seasonality.
#' @export
#'
#' @details
#' If method is "anuclim", monthly precipitation is spline interpolated to a
#' weekly time period and precipitation seasonality calculated using these
#' values, replicating the method used by http://www.worldclim.org/. Otherwise,
#' precipitation seasonality is calculated using yearly values.
#'
#' If using `anuclim` method and data span more than one year, data are
#' aggregated by unique month irrespective of year and one value returned. If
#' method is left unspecified and data span more than one year, calculations
#' will be performed on all data and a single value returned.
#'
#' @seealso the [tmecreate()] function can be used to create a POSIXlt object.
#'
#' @examples
#' prec <- (10 * sin(c(0:364) * (pi / -360)) + rnorm(365) + 12)
#' tme <- tmecreate(2010, 24)
#' plot(prec~as.POSIXct(tme), type = "l", xlab = "Month", ylab = "Precipitation")
#' bio15(prec, tme, method="week")
#' bio15(prec, tme, method="month")
#'
#'
bio15 <- function(prec, tme, method = "anuclim") {
if (is.na(sd(prec, na.rm = TRUE)))
cvp <- NA
else {
if (method == "anuclim"){
if (length(unique(tme$year)) > 1) warna()
pmth <- aggregate(prec, by = list(tme$mon), sum, na.rm = TRUE)$x
pwk <- spline(pmth, n = length(pmth) / 12 * 52)$y * 12 / 52
pwk[which(pwk <= 0)] <- 1
cvp <- (sd(pwk, na.rm = TRUE) / mean(pwk, na.rm = TRUE)) * 100
}
else {
if (length(unique(tme$year)) > 1) warnb()
cvp <- (sd(prec, na.rm = TRUE) / mean (prec, na.rm = TRUE)) * 100
}
}
cvp
}
#' bio16: Calculates precipitation of the wettest quarter
#'
#' @description `bio16` is used to calculate the total precipitation of the
#' wettest quarter of the year
#'
#' @param prec a vector of precipitation values, normally for one year (see
#' details).
#' @param tme a `POSIXlt` object representing the date and time of each `prec` value.
#' @param method an optional character string describing how precipitation of the wettest
#' quarter is calculated. Options include "anuclim" or unspecified (see details).
#'
#' @return a single numeric value for precipitation in the wettest quarter of the year.
#' @export
#'
#' @details If method is "anuclim", monthly precipitation is spline interpolated
#' to a weekly time period. Precipitation for each 13-week period is calculated
#' and total precipitation in the wettest quarter returned. If data span more
#' than one year, data are aggregated by unique month irrespective of year and
#' one value returned. Otherwise, precipitation for each three-month (91-day)
#' period is calculated and total precipitation in the wettest quarter
#' returned. If data span more than one year, calculations are performed on all
#' data and a single value returned.
#'
#' @seealso the [tmecreate()] function can be used to create a POSIXlt object.
#'
#' @examples
#' prec <- (10 * sin(c(0:364) * (pi / -360)) + rnorm(365) + 12)
#' tme <- tmecreate(2010, 24)
#' plot(prec~as.POSIXct(tme), type = "l", xlab = "Month", ylab = "Precipitation")
#' bio16(prec, tme)
#' bio16(prec, tme, method="anuclim")
#'
bio16 <- function(prec, tme, method = "") {
if (is.na(sd(prec, na.rm = TRUE)))
pwet <- NA
else {
if (method == "anuclim") {
if (length(unique(tme$year)) > 1) warna()
pmth <- aggregate(prec, by = list(tme$mon), sum, na.rm = TRUE)$x
pwk <- spline(pmth, n = length(pmth) / 12 * 52)$y * 12 / 52
pwk[which(pwk < 0)] <- 0
qtr <- function(i) {
pw <- c(pwk, pwk)
su <- sum(pw[i: (i + 12)], na.rm = TRUE)
su
}
wq <- sapply(c(1:length(pwk)), qtr)
i <- which(wq == max(wq, na.rm = TRUE))[1]
pwk2 <- c(pwk, pwk)
pwet <- sum(pwk2[i:(i + 12)], na.rm = TRUE)
}
else {
if (length(unique(tme$year)) > 1) warnb()
qtr <- function(i, int) {
pw <- c(prec, prec)
su <- sum(pw[i: (i + int)], na.rm = TRUE)
su
}
id <- (as.numeric(tme[2]) - as.numeric(tme[1])) / 86400
int <- 91 / id
wq <- sapply(c(1:length(prec)), qtr, int)
i <- which(wq == max(wq, na.rm = TRUE))[1]
pre2 <- c(prec, prec)
pwet <- sum(pre2[i:(i + int)], na.rm = TRUE)
}
}
pwet
}
#' bio17: Calculates precipitation of the driest quarter
#'
#' @description `bio17` is used to calculate the precipitation in the driest
#' quarter of the year
#'
#' @param prec a vector of precipitation values, normally for one year (see
#' details).
#' @param tme a `POSIXlt` object representing the date and time of each `prec` value.
#' @param method an optional character string describing how quarterly
#' precipitation is calculated. Options include "anuclim" or unspecified (see details).
#'
#' @return a single numeric value of precipitation of the driest quarter.
#' @export
#'
#' @details If method is "anuclim", monthly precipitation is spline interpolated
#' to a weekly time period and precipitation of each 13-week period is
#' calculated. The precipitation in the driest quarter is then found. If data
#' spans more than one year, data are aggregated by unique month irrespective
#' of year and one value returned. Otherwise, precipitation in each three-month
#' period is calculated and total precipitation in the driest quarter
#' returned. If data span more than one year, calculations are performed on all
#' data and single value returned.
#'
#' @seealso the [tmecreate()] function can be used to create a POSIXlt object.
#'
#' @examples
#' prec <- (10 * sin(c(0:364) * (pi / -360)) + rnorm(365) + 12)
#' tme <- tmecreate(2010, 24)
#' plot(prec~as.POSIXct(tme), type = "l", xlab = "Month", ylab = "Precipitation")
#' bio17(prec, tme)
#' bio17(prec, tme, method="anuclim")
#'
bio17 <- function(prec, tme, method = "") {
if (is.na(sd(prec, na.rm = TRUE)))
pdry <- NA
else {
if (method == "anuclim") {
if (length(unique(tme$year)) > 1) warna()
pmth <- aggregate(prec, by = list(tme$mon), sum, na.rm = TRUE)$x
pwk <- spline(pmth, n = length(pmth) / 12 * 52)$y * 12 / 52
pwk[which(pwk < 0)] <- 0
qtr <- function(i) {
pw <- c(pwk, pwk)
su <- sum(pw[i: (i + 12)], na.rm = TRUE)
su
}
dq <- sapply(c(1:length(pwk)), qtr)
i <- which(dq == min(dq, na.rm = TRUE))[1]
pwk2 <- c(pwk, pwk)
pdry <- sum(pwk2[i:(i + 12)], na.rm = TRUE)
}
else {
if (length(unique(tme$year)) > 1) warnb()
qtr <- function(i, int) {
pw <- c(prec, prec)
su <- sum(pw[i: (i + int)], na.rm = TRUE)
su
}
id <- (as.numeric(tme[2]) - as.numeric(tme[1])) / 86400
int <- 91 / id
dq <- sapply(c(1:length(prec)), qtr, int)
i <- which(dq == min(dq, na.rm = TRUE))[1]
pre2 <- c(prec, prec)
pdry <- sum(pre2[i:(i + int)], na.rm = TRUE)
}
}
pdry
}
#' bio18: Precipitation of the warmest quarter
#'
#' @description `bio18` is used to calculate the precipitation in the warmest
#' quarter of the year
#'
#' @param temps a vector of temperature values, normally for one year (see
#' details).
#' @param prec a vector of precipitation values, normally for one year (see
#' details).
#' @param tme1 a `POSIXlt` object representing the date and time of each `temps` value.
#' @param tme2 a `POSIXlt` object representing the date and time of each `prec` value.
#' @param method an optional character string describing how quarterly
#' temperature and precipitation are calculated. Options are "anuclim" or unspecified
#' (see details).
#'
#' @return a single numeric value of precipitation in the warmest quarter of the
#' year.
#' @export
#'
#' @seealso the [tmecreate()] function can be used to create a POSIXlt object.
#'
#' @details If method is "anuclim", monthly mean temperature and total monthly
#' precipitation are interpolated to a weekly time period before calculating
#' mean temperature of each 13-week period. The precipitation in the warmest
#' quarter is then found. If data span more than one year, data are aggregated
#' by unique month irrespective of year and one value returned. If method is
#' unspecified, the mean temperature in each three-month period is calculated
#' and precipitation in the coldest quarter returned. If data span more than
#' one year, calculations are performed on all data and single value returned.
#'
#' @examples
#' prec <- (10 * sin(c(0:364) * (pi / -360)) + rnorm(365) + 12)
#' temps <- 10 * sin(c(0:1459) / (pi * 150)) + rnorm(1460)
#' tme1 <- tmecreate(2010, 6)
#' tme2<- tmecreate(2010, 24)
#' plot(temps~as.POSIXct(tme1), type = "l", xlab = "Month", ylab = "Temperature")
#' bio18(temps, prec, tme1, tme2)
#' bio18(temps, prec, tme1, tme2, method="anuclim")
bio18 <- function(temps, prec, tme1, tme2, method = "") {
if (is.na(sd(prec, na.rm = TRUE)) | is.na(sd(temps, na.rm = TRUE)))
pwarm <- NA
else {
if (method == "anuclim") {
if (length(unique(tme1$year)) > 1) warna()
if (length(unique(tme2$year)) > 1) warna()
tmth <- aggregate(temps, by = list(tme1$mon), mean, na.rm = TRUE)$x
twk <- spline(tmth, n = length(tmth) / 12 * 52)$y
pmth <- aggregate(prec, by = list(tme2$mon), sum, na.rm = TRUE)$x
pwk <- spline(pmth, n = length(pmth) / 12 * 52)$y * 12 / 52
pwk[which(pwk < 0)] <- 0
qtr <- function(i) {
tw <- c(twk, twk)
me <- mean(tw[i: (i + 12)], na.rm = TRUE)
me
}
wq <- sapply(c(1:length(twk)), qtr)
i <- which(wq == max(wq, na.rm = TRUE))[1]
pwk2 <- c(pwk, pwk)
pwarm <- sum(pwk2[i:(i + 12)], na.rm = TRUE)
}
else {
if (length(unique(tme1$year)) > 1) warnb()
if (length(unique(tme2$year)) > 1) warnb()
qtr <- function(i, int) {
tw <- c(temps, temps)
me <- mean(tw[i: (i + int)], na.rm = TRUE)
me
}
id <- (as.numeric(tme1[2]) - as.numeric(tme1[1])) / 86400
dd1 <- 24/(24/(1/id))
int <- 91 / id
wq <- sapply(c(1:length(temps)), qtr, int)
i <- which(wq == max(wq, na.rm = TRUE))[1]
pid <-(as.numeric(tme2[2]) - as.numeric(tme2[1])) / 86400
dd2 <- 24/(24/(1/pid))
pint <- 91 / pid
pi <- i*(dd2/dd1)
pre2 <- c(prec, prec)
pwarm <- sum(pre2[pi:(pi + pint)], na.rm = TRUE)
}
}
pwarm
}
#' bio19: Precipitation of the coldest quarter
#'
#' @description `bio19` is used to calculate the precipitation in the coldest
#' quarter of the year.
#'
#' @param temps a vector of temperature values, normally for one year (see
#' details)
#' @param prec a vector of precipitation values, normally for one year (see
#' details).
#' @param tme1 a `POSIXlt` object representing the date and time of each `temps` value.
#' @param tme2 a `POSIXlt` object representing the date and time of each `prec` value.
#' @param method an optional character string describing how quarterly mean
#' temperature and precipitation are calculated. Options are "anuclim" or unspecified
#' (see details).
#'
#' @return a single numeric value of precipitation in the coldest quarter of the
#' year.
#' @export
#'
#' @seealso the [tmecreate()] function can be used to create a POSIXlt object.
#'
#' @details If method is "anuclim", monthly mean temeprature and total monthly
#' precipitation are interpolated to weekly time period before calculating mean
#' temperature for each 13-week period. The precipitation in the coldest
#' quarter is then calculated. If data spans more than one year, data are
#' aggregated by unique month irrespective of year and one value returned If
#' method is left unspecified, the mean temperature in each three-month period
#' is calculated and precipitation in the coldest quarter returned. If data
#' spans more than one year, calculations are performed on all data and single
#' value returned.
#'
#' @examples
#'
#' prec <- 10 * sin(c(0:364) * (pi / -360)) + (rnorm(365) + 12)
#' temps <- 10 * sin(c(0:1459) / (pi * 150)) + rnorm(1460)
#' tme1 <- tmecreate(2010, 6)
#' tme2<- tmecreate(2010, 24)
#' plot(prec~as.POSIXct(tme), type = "l", xlab = "Month", ylab = "Precipitation")
#' bio19(temps, prec, tme1, tme2)
#' bio19(temps, prec, tme1, tme2, method="anuclim")
bio19 <- function(temps, prec, tme1, tme2, method = "") {
if (is.na(sd(prec, na.rm = TRUE)) | is.na(sd(temps, na.rm = TRUE)))
pcld <- NA
else {
if (method == "anuclim") {
if (length(unique(tme1$year)) > 1) warna()
if (length(unique(tme2$year)) > 1) warna()
tmth <- aggregate(temps, by = list(tme1$mon), mean, na.rm = TRUE)$x
twk <- spline(tmth, n = length(tmth) / 12 * 52)$y
pmth <- aggregate(prec, by = list(tme2$mon), sum, na.rm = TRUE)$x
pwk <- spline(pmth, n = length(pmth) / 12 * 52)$y * 12 / 52
pwk[which(pwk < 0)] <- 0
qtr <- function(i) {
tw <- c(twk, twk)
me <- mean(tw[i: (i + 12)], na.rm = TRUE)
me
}
cq <- sapply(c(1:length(twk)), qtr)
i <- which(cq == min(cq, na.rm = TRUE))[1]
pwk2 <- c(pwk, pwk)
pcld <- sum(pwk2[i:(i + 12)], na.rm = TRUE)
}
else {
if (length(unique(tme1$year)) > 1) warnb()
if (length(unique(tme2$year)) > 1) warnb()
qtr <- function(i, int) {
tw <- c(temps, temps)
me <- mean(tw[i: (i + int)], na.rm = TRUE)
me
}
id <- (as.numeric(tme1[2]) - as.numeric(tme1[1])) / 86400
dd1 <- 24/(24/(1/id))
int <- 91 / id
cq <- sapply(c(1:length(temps)), qtr, int)
i <- which(cq == min(cq, na.rm = TRUE))[1]
pid <-(as.numeric(tme2[2]) - as.numeric(tme2[1])) / 86400
dd2 <- 24/(24/(1/pid))
pint <- 91 / pid
pi <- i*(dd2/dd1)
pre2 <- c(prec, prec)
pcld <- sum(pre2[pi:(pi + pint)], na.rm = TRUE)
}
}
pcld
}
|
62ff3aaedf96a2a30242e306b48f5ff774cc11d7
|
ab79177ad95b0e89d70210a3478b91f98cdb6b30
|
/man/event_term.Rd
|
6573de626c3089bcdf1afa59366a2efe0d7fa418
|
[] |
no_license
|
bbuchsbaum/fmrireg
|
93e69866fe8afb655596aa23c6f9e3ca4004a81c
|
2dd004018b3b7997e70759fc1652c8d51e0398d7
|
refs/heads/master
| 2023-05-10T17:01:56.484913
| 2023-05-09T14:38:24
| 2023-05-09T14:38:24
| 18,412,463
| 6
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,649
|
rd
|
event_term.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/event_vector.R
\name{event_term}
\alias{event_term}
\title{Create an event model term from a named list of variables.}
\usage{
event_term(evlist, onsets, blockids, durations = 1, subset = NULL)
}
\arguments{
\item{evlist}{A list of named variables.}
\item{onsets}{A vector of onset times for the experimental events in seconds.}
\item{blockids}{A vector of block numbers associated with each onset.}
\item{durations}{A vector of event durations (default is 1).}
\item{subset}{A logical vector indicating the subset of onsets to retain (default is NULL).}
}
\value{
A list containing the following components:
\itemize{
\item varname: A character string representing the variable names, concatenated with colons.
\item events: A list of event variables.
\item subset: A logical vector indicating the retained onsets.
\item event_table: A tibble containing event information.
\item onsets: A vector of onset times.
\item blockids: A vector of block numbers.
\item durations: A vector of event durations.
}
}
\description{
This function generates an event model term from a list of named variables,
along with their onsets, block IDs, and durations. Optionally, a subset of
onsets can be retained.
}
\examples{
x1 <- factor(rep(letters[1:3], 10))
x2 <- factor(rep(1:3, each=10))
eterm <- event_term(list(x1=x1,x2=x2), onsets=seq(1,100,length.out=30),
blockids=rep(1,30))
x1 <- rnorm(30)
x2 <- factor(rep(1:3, each=10))
eterm <- event_term(list(x1=x1,x2=x2), onsets=seq(1,100,length.out=30),
blockids=rep(1,30), subset=x1>0)
}
|
1375468a42425fa2cae8e645ddb2a29484c69486
|
a574a2feb28729d20606f3ebf5c0414fa7ebdd49
|
/man/as_search.Rd
|
5c3a45f27c1fb350b61506fb6a0a158bb787ff74
|
[
"MIT"
] |
permissive
|
gvegayon/rtimes
|
feb8a2043388380eab432c62825fa13e80b47c45
|
0286a27bb3eb25952b1f566261479f0e86168056
|
refs/heads/master
| 2021-01-16T22:42:10.438650
| 2015-08-11T17:23:12
| 2015-08-11T17:23:12
| 41,016,002
| 1
| 0
| null | 2015-08-19T05:54:37
| 2015-08-19T05:54:37
| null |
UTF-8
|
R
| false
| false
| 3,107
|
rd
|
as_search.Rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/as_search.R
\name{as_search}
\alias{as_search}
\title{Search articles}
\usage{
as_search(q, fq = NULL, sort = NULL, begin_date = NULL, end_date = NULL,
key = NULL, fl = NULL, hl = FALSE, page = 0, facet_field = NULL,
facet_filter = NULL, ..., callopts = list())
}
\arguments{
\item{q}{Search query term. Search is performed on the article body, headline
and byline.}
\item{fq}{Filtered search query using standard Lucene syntax. The filter query
can be specified with or without a limiting field: label. See fq_fields for the
filter query fields.}
\item{sort}{(character) Default NULL. One of newest or oldest . By default, search
results are sorted by their relevance to the query term (q). Use the sort parameter
to sort by pub_date.}
\item{begin_date}{Begin date - Restricts responses to results with publication
dates of the date specified or later. In the form YYYYMMDD}
\item{end_date}{End date - Restricts responses to results with publication dates
of the date specified or earlier. In the form YYYYMMDD}
\item{key}{your New York Times API key; pass in, or loads from .Rprofile as
\code{nytimes_as_key}, or from .Renviron as \code{NYTIMES_AS_KEY}}
\item{fl}{Fields to get back, as vector. See Details for the.}
\item{hl}{(logical) Highlight or not, default is FALSE}
\item{page}{Page number. The value of page corresponds to a set of 10 results
(it does not indicate the starting number of the result set). For example, page=0
corresponds to records 0-9. To return records 10-19, set page to 1, not 10.}
\item{facet_field}{(character) Specifies the sets of facet values to include in the
facets array at the end of response, which collects the facet values from all the
search results. By default no facet fields will be returned. See details for options.}
\item{facet_filter}{(logical) Fields to facet on, as vector. When set to true,
facet counts will respect any applied filters (fq, date range, etc.) in addition to
the main query term. To filter facet counts, specifying at least one facet_field
is required.}
\item{...}{Futher args pass into query}
\item{callopts}{Curl options (debugging tools mostly) passed to \code{\link[httr]{GET}}}
}
\description{
Search articles
}
\details{
fl parameter options are: web_url, snippet, lead_paragraph, abstract, print_page,
blog, source, multimedia, headline, keywords, pub_date, document_type, news_desk,
byline, type_of_material, _id, word_count.
facet_field param options are: section_name, document_type, type_of_material,
source, day_of_week
}
\examples{
\dontrun{
as_search(q="bailout", begin_date = "20081001", end_date = '20081201')
as_search(q="bailout", facet_field = 'section_name', begin_date = "20081001",
end_date = '20081201', fl = 'word_count')
as_search(q="money", fq = 'The New York Times')
as_search(q="money", fq = 'news_desk:("Sports" "Foreign")')
as_search(q="bailout", hl = TRUE)
library('httr')
as_search("iowa caucus", callopts = verbose())
}
}
\references{
\url{http://developer.nytimes.com/docs/article_search_api/}
}
|
d50d8189a561339b73a5234a8c3247cbd8b975ad
|
423aa882d6d4385c7163eaf28025a3966c8a03a6
|
/avx2/simple/papi/plot.r
|
88f174dc99a237e267014631423c5f6f062378f4
|
[] |
no_license
|
mittmann/streaming_loads
|
e1b05ac6d9483ae06cbc2ee0e49f7236159df50d
|
399b093bf617b246cf63274af658f51ab10b5300
|
refs/heads/master
| 2021-04-02T23:07:17.373647
| 2020-07-24T18:46:48
| 2020-07-24T18:46:48
| 248,334,404
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 735
|
r
|
plot.r
|
library(dplyr);
library(ggplot2);
df <- read.csv("saidas.11.05.2020/papiallrapl");
k <- df %>% select(counter,size,memtype,temp,opt,value) %>%
group_by(counter,size,memtype,temp,opt) %>% summarise(mean=mean(value), n=n(), sd=sd(value), se=sd/sqrt(n), ic=2.576*se)
write.csv(k, "pp.csv")
exit
k <- k[k$size=="huge",]
k <- k[k$counter=="PAPI_TOT_CYC",]
ggplot(k, aes(x=temp, y = mean)) + geom_point() +
geom_errorbar(aes(ymin=mean-ic, ymax=mean+ic), color="grey") +
xlab("Versão") + ylab(k$counter) +
#scale_y_continuous(expand = c(0,0)) +
scale_y_continuous(trans="log10") +
scale_fill_grey() +
facet_wrap(~opt+memtype) +
#theme_bw() +
theme(axis.text.x = element_text(angle = 90, hjust = 1))
|
472f8543bdf6d58aca5880090cecffd1d7b5b3f6
|
9c438d7bd98ddaacd14705c10c8d3bb195c925a4
|
/code/daejeon_linear.R
|
3802144e981745d413599b31e249add6ba79f272
|
[] |
no_license
|
Hmiiing/daejeon_Call
|
f51ab8562c4902475be612259e792b1f598f934a
|
c5594ce2fb5d91bc0794b882aa65e88d7b4c2f8d
|
refs/heads/master
| 2022-11-14T06:34:28.479321
| 2020-06-20T15:50:37
| 2020-06-20T15:50:37
| 273,735,354
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,326
|
r
|
daejeon_linear.R
|
######daegeon_modeling_linear regression#####
#예측값이 양수->log transform
####전체를이용한분석####
###original_rmse###
library(Metrics)
set.seed(2019)
sqrt(mean((predict(lm(mean(log(new_n))~1.,data=train),newdata = test)-log(test$new_n))^2))
library(Metrics)
rmse(predict(lm(mean(log(new_n))~1.,data=train),newdata = test),log(test$new_n))
##0.764992
summary(train)
###simple_linear###
lm_all<-lm(log(new_n)~.,data=train)
lm_all_summary<-summary(lm_all)
write.csv(lm_all_summary$coefficients,"lm_all_summary.csv")
#train error
fitted_lm_all<-ifelse(lm_all$fitted.values<=log(5),log(5),lm_all$fitted.values)
rmse(fitted_lm_all,log(train$new_n))
#0.4337806
0.4331446
0.4337169
#test error
pred_lm_all<-predict(lm_all,test)
pred_lm_all<-ifelse(pred_lm_all<=log(5),log(5),pred_lm_all)
rmse(log(test$new_n),(pred_lm_all))
#0.5229548
0.5236249
0.524703
obs_lm_all<-as.data.frame(cbind(log(test$new_n),pred_lm_all))
colnames(obs_lm_all)<-c("real","pred_lm_all")
library(ggplot2)
ggplot(data=obs_lm_all) + geom_point(mapping=aes(x=exp(real),y=exp(pred_lm_all)))+geom_abline(intercept= 0, slope=1, color='blue', size = 1.5)+ggtitle("daejeon_lm_all")+theme(plot.title = element_text(family = "serif", face = "bold", hjust = 0.5, size = 25)) +xlab("obs")+ylab("pred")
ggsave("pred_lm_all.jpg")
#5이하인것들 5로일괄적용
#pred_lm_all<-ifelse(pred_lm_all<=log(5),log(5),pred_lm_all)
#ggsave("pred_lm_all_5.jpg")
######구별로나누어분석####
#daedeok
lm_daedeok<-lm(log(new_n)~.,data=train_daedeok)
lm_daedeok_summary<-summary(lm_daedeok)
write.csv(lm_daedeok_summary$coefficients,"lm_daedeok_summary.csv")
#train error
fitted_lm_daedeok<-ifelse(lm_daedeok$fitted.values<=log(5),log(5),lm_daedeok$fitted.values)
rmse(fitted_lm_daedeok,log(train_daedeok$new_n))
#0.4655092
0.4645112
0.465376
#test error
pred_lm_daedeok<-predict(lm_daedeok,test_daedeok)
pred_lm_daedeok<-ifelse(pred_lm_daedeok<=log(5),log(5),pred_lm_daedeok)
rmse(log(test_daedeok$new_n),pred_lm_daedeok)
#0.6291933
0.5939541
0.5976513
a<-as.data.frame(cbind(log(test_daedeok$new_n),pred_lm_daedeok))
colnames(a)<-c("real","pred_lm_daedeok")
library(ggplot2)
ggplot(data=exp(a))+geom_point(mapping=aes(x=real,y=pred_lm_daedeok))+geom_abline(intercept= 0, slope=1, color='blue', size = 1.5)+ggtitle("daejeon_lm_daedeok")+theme(plot.title = element_text(family = "serif", face = "bold", hjust = 0.5, size = 25))+xlab("obs")+ylab("pred")
ggsave("pred_lm_daedeok.jpg")
#dong
lm_dong<-lm(log(new_n)~.,data=train_dong)
lm_dong_summary<-summary(lm_dong)
write.csv(lm_dong_summary$coefficients,"lm_dong_summary.csv")
#train error
fitted_lm_dong<-ifelse(lm_dong$fitted.values<=log(5),log(5),lm_dong$fitted.values)
rmse(fitted_lm_dong,log(train_dong$new_n))
#0.4256949
0.4246244
0.4249205
#test error
pred_lm_dong<-predict(lm_dong,test_dong)
pred_lm_dong<-ifelse(pred_lm_dong<=log(5),log(5),pred_lm_dong)
rmse(log(test_dong$new_n),pred_lm_dong)
#0.4093099
0.4101709
0.4097641
a<-as.data.frame(cbind(log(test_dong$new_n),pred_lm_dong))
colnames(a)<-c("real","pred_lm_dong")
library(ggplot2)
ggplot(data=exp(a))+geom_point(mapping=aes(x=real,y=pred_lm_dong))+geom_abline(intercept= 0, slope=1, color='blue', size = 1.5)+ggtitle("daejeon_lm_dong")+theme(plot.title = element_text(family = "serif", face = "bold", hjust = 0.5, size = 25))+xlab("obs")+ylab("pred")
ggsave("pred_lm_dong.jpg")
#seo
lm_seo<-lm(log(new_n)~.,data=train_seo)
lm_seo_summary<-summary(lm_seo)
write.csv(lm_seo_summary$coefficients,"lm_seo_summary.csv")
#train error
fitted_lm_seo<-ifelse(lm_seo$fitted.values<=log(5),log(5),lm_seo$fitted.values)
rmse(fitted_lm_seo,log(train_seo$new_n))
#0.3902521
0.3892703
0.3899579
#test error
pred_lm_seo<-predict(lm_seo,test_seo)
pred_lm_seo<-ifelse(pred_lm_seo<=log(5),log(5),pred_lm_seo)
rmse(log(test_seo$new_n),pred_lm_seo)
#0.4815595
0.4815223
0.4825923
a<-as.data.frame(cbind(log(test_seo$new_n),pred_lm_seo))
colnames(a)<-c("real","pred_lm_seo")
library(ggplot2)
ggplot(data=exp(a))+geom_point(mapping=aes(x=real,y=pred_lm_seo))+geom_abline(intercept= 0, slope=1, color='blue', size = 1.5)+ggtitle("daejeon_lm_seo")+theme(plot.title = element_text(family = "serif", face = "bold", hjust = 0.5, size = 25))+xlab("obs")+ylab("pred")
ggsave("pred_lm_seo.jpg")
#yuseong
lm_yuseong<-lm(log(new_n)~.,data=train_yuseong)
lm_yuseong_summary<-summary(lm_yuseong)
write.csv(lm_yuseong_summary$coefficients,"lm_yuseong_summary.csv")
#train error
fitted_lm_yuseong<-ifelse(lm_yuseong$fitted.values<=log(5),log(5),lm_yuseong$fitted.values)
rmse(fitted_lm_yuseong,log(train_yuseong$new_n))
#0.3947048
0.3942627
0.396508
#test error
pred_lm_yuseong<-predict(lm_yuseong,test_yuseong)
pred_lm_yuseong<-ifelse(pred_lm_yuseong<=log(5),log(5),pred_lm_yuseong)
rmse(log(test_yuseong$new_n),pred_lm_yuseong)
#0.4287863
0.4277372
0.4315214
a<-as.data.frame(cbind(log(test_yuseong$new_n),pred_lm_yuseong))
colnames(a)<-c("real","pred_lm_yuseong")
library(ggplot2)
ggplot(data=exp(a))+geom_point(mapping=aes(x=real,y=pred_lm_yuseong))+geom_abline(intercept= 0, slope=1, color='blue', size = 1.5)+ggtitle("daejeon_lm_yuseong")+theme(plot.title = element_text(family = "serif", face = "bold", hjust = 0.5, size = 25))+xlab("obs")+ylab("pred")
ggsave("pred_lm_yuseong.jpg")
#jung
lm_jung<-lm(log(new_n)~.,data=train_jung)
lm_jung_summary<-summary(lm_jung)
write.csv(lm_jung_summary$coefficients,"lm_jung_summary.csv")
#train error
fitted_lm_jung<-ifelse(lm_jung$fitted.values<=log(5),log(5),lm_jung$fitted.values)
rmse(fitted_lm_jung,log(train_jung$new_n))
#0.4032288
0.4025624
0.4028036
#test error
pred_lm_jung<-predict(lm_jung,test_jung)
pred_lm_jung<-ifelse(pred_lm_jung<=log(5),log(5),pred_lm_jung)
rmse(log(test_jung$new_n),pred_lm_jung)
#0.4571876
0.4580719
0.4586944
a<-as.data.frame(cbind(log(test_jung$new_n),pred_lm_jung))
colnames(a)<-c("real","pred_lm_jung")
library(ggplot2)
ggplot(data=exp(a))+geom_point(mapping=aes(x=real,y=pred_lm_jung))+geom_abline(intercept= 0, slope=1, color='blue', size = 1.5)+ggtitle("daejeon_lm_jung")+theme(plot.title = element_text(family = "serif", face = "bold", hjust = 0.5, size = 25))+xlab("obs")+ylab("pred")
ggsave("pred_lm_jung.jpg")
|
43e90b9b1dbff39d85a21888e6fb830b40f8c56a
|
d7ff71e8ffb07419aad458fb2114a752c5bf562c
|
/tests/testthat/serialize_tests/k2-another-in_file-out.R
|
9f77f05f3d2d231cd74a597ac708afa36a1902af
|
[
"MIT"
] |
permissive
|
r-lib/styler
|
50dcfe2a0039bae686518959d14fa2d8a3c2a50b
|
ca400ad869c6bc69aacb2f18ec0ffae8a195f811
|
refs/heads/main
| 2023-08-24T20:27:37.511727
| 2023-08-22T13:27:51
| 2023-08-22T13:27:51
| 81,366,413
| 634
| 79
|
NOASSERTION
| 2023-09-11T08:24:43
| 2017-02-08T19:16:37
|
R
|
UTF-8
|
R
| false
| false
| 56
|
r
|
k2-another-in_file-out.R
|
call(1,
call2(call(3, 1, 2),
4))
|
afdf51b04cdaf73a5f6946e9a95cc67cd4c38fc0
|
0853134802bde59234f5b0bd49735b9b39042cfb
|
/Rsite/source/api/man/mx.symbol.gamma.Rd
|
26a692c73e7f32acec8063ac24c36887b71c0a7b
|
[] |
no_license
|
mli/new-docs
|
2e19847787cc84ced61319d36e9d72ba5e811e8a
|
5230b9c951fad5122e8f5219c4187ba18bfaf28f
|
refs/heads/master
| 2020-04-02T03:10:47.474992
| 2019-06-27T00:59:05
| 2019-06-27T00:59:05
| 153,949,703
| 13
| 15
| null | 2019-07-25T21:33:13
| 2018-10-20T21:24:57
|
R
|
UTF-8
|
R
| false
| true
| 539
|
rd
|
mx.symbol.gamma.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mxnet_generated.R
\name{mx.symbol.gamma}
\alias{mx.symbol.gamma}
\title{gamma:Returns the gamma function (extension of the factorial function \
to the reals), computed element-wise on the input array.}
\usage{
mx.symbol.gamma(...)
}
\arguments{
\item{data}{NDArray-or-Symbol
The input array.}
\item{name}{string, optional
Name of the resulting symbol.}
}
\value{
out The result mx.symbol
}
\description{
The storage type of ``gamma`` output is always dense
}
|
b209d16e221d7e3c85c21d2fd7b78529cb9b9acb
|
7217693dc00b148a48c6503f6fe4ec1d478f52e8
|
/mr/process_mr_results.R
|
46fcff5b8eda7f50a38e0786ec00ccb4d0d3d50b
|
[] |
no_license
|
Eugene77777777/biomarkers
|
8ac6250e1726c9233b43b393f42076b573a2e854
|
9e8dc2876f8e6785b509e0fce30f6e215421f45b
|
refs/heads/master
| 2023-07-25T10:52:30.343209
| 2021-09-08T18:12:45
| 2021-09-08T18:12:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,375
|
r
|
process_mr_results.R
|
# Our pipeline is largely based on
# https://academic.oup.com/ije/article/47/4/1264/5046668, Box 3
# Aka the Rücker model-selection framework
setwd("~/Desktop/rivaslab/biomarkers/resubmission1/")
library(data.table)
# read the trait name info
trait_names = fread("./mr_rg_traits.txt",
data.table = F,stringsAsFactors = F,
header= F)
rnames = trait_names[,1]
trait_names = trait_names[,-1]
names(trait_names) = rnames
# read the results
simplify_name<-function(s,trait_names){
if(is.element(s,rownames(trait_names))){
return(trait_names[s])
}
arr = strsplit(s,split="/|,")
return(arr[[1]][length(arr[[1]])])
}
raw_results = fread("./all.results.restructured.txt",stringsAsFactors = F,data.table = F)
pleio_res = fread("./all.intercepts.restructured.txt",stringsAsFactors = F,data.table = F)
rownames(pleio_res) = paste(pleio_res$exposure,pleio_res$outcome,sep=",")
pleio_res$exposure = sapply(pleio_res$exposure,simplify_name,trait_names=trait_names)
pleio_res$outcome = sapply(pleio_res$outcome,simplify_name,trait_names=trait_names)
het_res = fread("./all.heterogeneity.restructured.txt",stringsAsFactors = F,data.table = F)
het_res$exposure = sapply(het_res$exposure,simplify_name,trait_names=trait_names)
het_res$outcome = sapply(het_res$outcome,simplify_name,trait_names=trait_names)
# # HDL vs. CAD
# check_HDL_res<-function(x){
# inds1 = grepl("HDL",x[,"exposure"])
# inds2 = grepl("CAD",x[,"outcome"]) | grepl("cardio",x[,"outcome"])
# inds = inds1 & inds2
# return(x[inds,])
# }
# check_HDL_res(raw_results)
# check_HDL_res(het_res)
# # Diabetes
# check_diab_res<-function(x){
# inds1 = grepl("T2D",x[,"outcome"])
# inds2 = grepl("gluc|hdl|gly",x[,"exposure"],ignore.case = T)
# inds = inds1 & inds2
# x = x[inds,c("exposure","method","b","pval","nsnp")]
# x[,1] = sapply(x[,1], function(x){
# arr = strsplit(x,split='\\/')[[1]];
# arr[length(arr)]
# }
# )
# x[,2] = gsub("Inverse variance weighted","IVW",x[,2])
# x[,2] = gsub("multiplicative random effects","mRE",x[,2])
# x[,2] = gsub("bootstrap","b",x[,2])
# x[,2] = gsub("fixed effects","FE",x[,2])
# return(x)
# }
# check_diab_res(raw_results)
# Exclude some unwanted pairs before the analysis
exclude_pairs<-function(x){
xrows = apply(x,1,paste,sep=",",collapse=",")
# remove ApoB without adjustment for statins,
results_to_exclude =
grepl("Apolipoprotein_B_white_british",xrows,ignore.case = T) |
grepl("CKDGen_eGFRdecline",xrows,ignore.case = T) |
grepl("Telomere",xrows,ignore.case = T) |
grepl("EPIC",xrows,ignore.case = F)
return(x[!results_to_exclude,])
}
raw_results = exclude_pairs(raw_results)
pleio_res = exclude_pairs(pleio_res)
het_res = exclude_pairs(het_res)
# Define the thresolds
Q_p_thr = 0.01
FDR_level = 0.05
# p_het_thr = 0.01
# separate the main results by method
method2res = list()
for(method in unique(raw_results$method)){
method2res[[method]] = raw_results[raw_results$method==method,]
}
names(method2res) = gsub("Inverse variance weighted","IVW",names(method2res))
names(method2res) = gsub("random effects","RE",names(method2res))
names(method2res) = gsub("fixed effects","FE",names(method2res))
method2res = lapply(method2res,
function(x){rownames(x) = paste(x$exposure,x$outcome,sep=",");
x$exposure = sapply(x$exposure,simplify_name,trait_names=trait_names);
x$outcome = sapply(x$outcome,simplify_name,trait_names=trait_names);x})
# Add the FDR to the methods
for(mname in names(method2res)){
q_values = p.adjust(method2res[[mname]]$pval,method="fdr")
method2res[[mname]] = cbind(method2res[[mname]],q_values)
}
########################################################################
########################################################################
# Method comparison:
# plot the raw p-values
par(mfrow=c(2,3))
for(method in names(method2res)){
mname = strsplit(method,split="\\(|\\)")[[1]]
mname = paste(mname,collapse="\n")
hist(method2res[[method]]$pval,
main=mname,xlab="P-value",cex.main=1)
}
# Method similarity
shared_pairs = rownames(method2res[[1]])
for(method in names(method2res)){
shared_pairs = intersect(shared_pairs,rownames(method2res[[method]]))
}
pval_mat = sapply(method2res,function(x,y)x[y,"pval"],y=shared_pairs)
pval_mat = cbind(pval_mat,pleio_res[shared_pairs,]$pval)
colnames(pval_mat)[ncol(pval_mat)] = "Egger_pleio_p"
scaled_b_mat = sapply(method2res,function(x,y)x[y,"scaled.b"],y=shared_pairs)
library(ggcorrplot)
pval_corrs = cor(pval_mat,method="spearman")
print(ggcorrplot(t(pval_corrs),lab=T,lab_size=2.5,hc.order = F) +
ggtitle("P-value, spearman") +
theme(plot.title = element_text(hjust = 0.5,size=20)))
# remove rows with NAs
scaled_b_mat = scaled_b_mat[!apply(is.na(scaled_b_mat),1,any),]
b_corrs = cor(scaled_b_mat,method="spearman")
print(ggcorrplot(t(b_corrs),lab=T,lab_size=2.5,hc.order = F) +
ggtitle("Scaled beta, spearman") +
theme(plot.title = element_text(hjust = 0.5,size=20)))
dev.off()
########################################################################
# Adjust for FDR - get the number of results per method
get_adjusted_results<-function(x,sig=0.01){
return(x[p.adjust(x$pval,method="fdr")<sig,])
}
method2adjres = lapply(method2res,get_adjusted_results)
par(mar=c(5,10,5,5))
barplot(sapply(method2adjres,nrow),las=2,horiz = T,xlab="Number of pairs (0.01 FDR)")
dev.off()
########################################################################
# Scheme for selecting the models and their results based on
# the Rücker model-selection framework
# In our case (comparisons above) we used Egger with bootstrap to increas power.
# We then adapt the analysis of the heterogeneity as follows:
# For inignificant IVW Q scores, use the beta and p-value from IVW+FE.
# For significant Q scores use the beta valuse use IVW+ME.
# If the difference between the Egger Q and the IVW+FE Q is significant, we use the
# beta and p-value from Egger.
# IVW
ivw_fe_results = method2res$`IVW (FE)`
ivw_fe_q_results = het_res[het_res$method == "Inverse variance weighted",]
ivw_me_results = method2res$IVW
rownames(ivw_fe_q_results) = paste(ivw_fe_q_results$exposure,ivw_fe_q_results$outcome,sep=",")
rownames(ivw_fe_results) = paste(ivw_fe_results$exposure,ivw_fe_results$outcome,sep=",")
rownames(ivw_me_results) = paste(ivw_me_results$exposure,ivw_me_results$outcome,sep=",")
ivw_shared_pairs = intersect(rownames(ivw_fe_q_results),rownames(ivw_fe_results))
ivw_fe_results = ivw_fe_results[ivw_shared_pairs,]
ivw_fe_q_results = ivw_fe_q_results[ivw_shared_pairs,]
ivw_me_results = ivw_me_results[ivw_shared_pairs,]
is_ivw_fe_q_significant = ivw_fe_q_results$Q_pval < Q_p_thr
names(is_ivw_fe_q_significant) = rownames(ivw_fe_q_results)
ivw_merged_results = rbind(
ivw_fe_results[!is_ivw_fe_q_significant,],
ivw_me_results[is_ivw_fe_q_significant,]
)
# Egger
# make sure that Egger and EggerB are ordered correctly: (sanity check)
all(method2res$`MR Egger`[,1:4] == method2res$`MR Egger (bootstrap)`[,1:4])
egger_results = method2res$`MR Egger`
egger_b_inds = egger_results$nsnp > 30
egger_results[egger_b_inds,] = method2res$`MR Egger (bootstrap)`[egger_b_inds,]
egger_q_results = het_res[het_res$method == "MR Egger",]
rownames(egger_results) = paste(egger_results$exposure,egger_results$outcome,sep=",")
rownames(egger_q_results) = paste(egger_q_results$exposure,egger_q_results$outcome,sep=",")
# Methods' rownames do not perfectly fit but all Egger pairs are in the IVW pairs
egger_q_diffs = ivw_fe_q_results[rownames(egger_q_results),"Q"] - egger_q_results$Q
egger_q_diffs_pval = pchisq(egger_q_diffs,1,lower.tail = F)
table(egger_q_diffs_pval > Q_p_thr)
egger_pairs = rownames(egger_q_results)[egger_q_diffs_pval < Q_p_thr]
egger_pairs = egger_pairs[egger_q_results[egger_pairs,"Q_pval"] > 1e-100]
ivw_egger_merged_results = ivw_merged_results
ivw_egger_merged_results[egger_pairs,] = egger_results[egger_pairs,]
# Get the current significant results
selected_merged_results = p.adjust(ivw_egger_merged_results$pval,method="fdr") < FDR_level
table(selected_merged_results)
selected_merged_results = ivw_egger_merged_results[selected_merged_results,]
rownames(selected_merged_results)
selected_merged_results = selected_merged_results[,-c(1:2)]
# Fix some of the names
for(j in 1:2){
selected_merged_results[,j] = gsub("_all$","",selected_merged_results[,j],ignore.case = T)
selected_merged_results[,j] = gsub("^int_","",selected_merged_results[,j],ignore.case = T)
selected_merged_results[,j] = gsub("noncancer_illness_code_","",
selected_merged_results[,j],ignore.case = T)
selected_merged_results[,j] = gsub("diagnosed_by_doctor_","",
selected_merged_results[,j],ignore.case = T)
selected_merged_results[,j] = gsub("vascularheart","",
selected_merged_results[,j],ignore.case = T)
selected_merged_results[,j] = gsub("^\\s","",selected_merged_results[,j],ignore.case = T)
selected_merged_results[,j] = gsub("\\s$","",selected_merged_results[,j],ignore.case = T)
selected_merged_results[,j] = gsub("^_","",selected_merged_results[,j],ignore.case = T)
# transform "_" to " "
selected_merged_results[,j] = gsub("_"," ",selected_merged_results[,j],ignore.case = T)
# remove white british.1cm
selected_merged_results[,j] = gsub(" white british.1cm","",
selected_merged_results[,j],ignore.case = T)
# Extra formatting
selected_merged_results[grepl("diabetes",selected_merged_results[,j],ignore.case = T),j] = "Diabetes"
selected_merged_results[grepl("MEGASTROKE",selected_merged_results[,j],ignore.case = T),j] = "Stroke"
selected_merged_results[grepl("stroke",selected_merged_results[,j],ignore.case = T),j] = "Stroke"
selected_merged_results[grepl("T2D",selected_merged_results[,j],ignore.case = T),j] = "Diabetes"
selected_merged_results[,1] = gsub("\\s+adjstatins","",selected_merged_results[,1],ignore.case = T)
selected_merged_results[,j] = gsub(" diagnosed by doctor","",selected_merged_results[,j],ignore.case = T)
selected_merged_results[grepl("HYPOTHYROIDISM",selected_merged_results[,j],ignore.case = T),j] = "Hypothyroidism"
# Fix some of the nodes (after manual inspection)
selected_merged_results[selected_merged_results[,j]=="Fracture bones",j] = "Fractured bones"
}
unique_pairs = unique(selected_merged_results[,2:1])
rownames(unique_pairs) = NULL
write.table(unique_pairs,sep="\t",quote=F,row.names = F)
# Add alternative beta scores
selected_merged_results[["abs(b)"]] = abs(selected_merged_results$scaled.b)
selected_merged_results[["log(b^2)"]] = log(selected_merged_results$scaled.b^2,base=10)
v = as.numeric(selected_merged_results$b>0)
v[v==0]=-1
selected_merged_results[["Effect_sign"]] = v
# Node attr for cytoscape
allnodes = union(selected_merged_results[,1],selected_merged_results[,2])
m = cbind(allnodes,is.element(allnodes,set=selected_merged_results$outcome))
colnames(m) = c("node","is_outcome")
write.table(m,file="node_info.txt"
,sep="\t",row.names = F,col.names = T,quote = F)
write.table(selected_merged_results,
file="selected_results_fdr0.05_Q0.01.txt"
,sep="\t",row.names = F,col.names = T,quote = F)
# ##########################################################################
# ##########################################################################
# ##########################################################################
# # Old analysis that focuses on diseases
# # Filter the original network using "disease" regex
# disease_reg = c("angina","disease","cancer","bone","diabetes","alz","asthma",
# "gout","hypothr","hypothyroidism","multiple","pain","lupus",
# "stroke","CAD","celiac","amd","oma","degeneration","scz",
# "microalbuminuria","eczema","vascular pro")
# disease_reg = paste(disease_reg,collapse = "|")
# selected_merged_results_disease = selected_merged_results[grepl(
# disease_reg,selected_merged_results$outcome,ignore.case=T),]
# unique(selected_merged_results_disease$outcome)
# unique(selected_merged_results$outcome)
#
# # Print all the files (for the paper)
#
# # Disease, FDR 1%
# # Add two more columns
# selected_merged_results_disease$Effect_sign =
# as.numeric(selected_merged_results_disease$scaled.b > 0)
# selected_merged_results_disease$type_and_sign =
# paste(selected_merged_results_disease$Effect_sign,
# selected_merged_results_disease$edge_type,sep="")
# # remove UKB
# selected_merged_results_disease = selected_merged_results_disease[
# !grepl("ukb",selected_merged_results_disease$path,ignore.case = T),
# ]
# # remove not adjusted for statins
# nonadj_to_rem = c("Apolipoprotein B","Cholesterol","LDL direct")
# selected_merged_results_disease = selected_merged_results_disease[
# ! selected_merged_results_disease$exposure %in% nonadj_to_rem,
# ]
# # keep ischemic stroke only
# selected_merged_results_disease = selected_merged_results_disease[
# selected_merged_results_disease$title !="Any stroke",
# ]
# write.table(selected_merged_results_disease,
# file="selected_results_disease_fdr0.01_pleio0.01.txt"
# ,sep="\t",row.names = F,col.names = T,quote = F)
|
ffd940a4b9e041bed8a3774f124eafecf3e2c3e3
|
76a593d829b0d61806e3c5b5e144adcd6a1ab3e7
|
/biobank_vs_loo_effects.R
|
bd72d61a73af3a4353da9cdb511af8de2fd601ed
|
[] |
no_license
|
ktsuo/globalbiobankmeta-Asthma
|
a97a3e993780c263945512aacc18afdead1d53ee
|
4682c4122eaf9aa833de1e05dca09324185ecaff
|
refs/heads/main
| 2023-04-07T00:17:17.003412
| 2022-09-30T17:51:12
| 2022-09-30T17:51:12
| 543,720,743
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,817
|
r
|
biobank_vs_loo_effects.R
|
##############################################################
##### Biobanks VS. Leave-that-biobank-out meta-analyses ######
##############################################################
library(data.table)
library(ggplot2)
library(dplyr)
library(broom)
library(readxl)
library(ggrepel)
library(writexl)
library(tidyverse)
library(purrr)
# set working directory
output_dir=""
setwd(output_dir)
######################
###### Load data #####
######################
results_dir = ""
# load biobank results
biobank_files <- list.files(results_dir,
pattern=glob2rx("*BBONLY.txt"), full.names=TRUE)
list.biobanks <- lapply(biobank_files,function(x) fread(x, select=c('SNP_ID', 'CHR', 'POS', 'Allele1', 'Allele2', 'biobank_BETA', 'biobank_SE', 'biobank_p.value')))
list.biobanks.names <- list('BBJ', 'CKB', 'DECODE', 'ESTBB', 'FinnGen', 'GNH', 'GS', 'HUNT', 'Lifelines', 'MGB', 'QSKIN', 'TWB')
names(list.biobanks) <- list.biobanks.names
biobank_meta_files <- list.files(results_dir, pattern=glob2rx("*BBMETA.txt"), full.names=TRUE)
list.bb_meta <- lapply(biobank_meta_files, function(x) fread(x, select=c(1:8)))
list.bb_meta.names <- list('BioMe', 'BioVU', 'CCPM', 'MGI', 'UCLA', 'UKBB')
names(list.bb_meta) <- list.bb_meta.names
# load LOO meta-analyses
loo_files <- list.files(results_dir, pattern=glob2rx("*LOO.txt"), full.names=TRUE)
list.loo <- lapply(loo_files,function(x) fread(x, select=c('SNP_ID','CHR', 'POS', 'REF', 'ALT', 'inv_var_meta_beta', 'inv_var_meta_sebeta',
'inv_var_meta_p')))
list.loo.names <- list('BBJ', 'BIOME', 'BIOVU', 'CCPM', 'CKB', 'DECODE', 'ESTBB', 'FG', 'GNH', 'GS', 'HUNT', 'LIFELINES', 'MGB', 'MGI', 'QSKIN', 'TWB', 'UCLA', 'UKBB')
names(list.loo) <- list.loo.names
# load top hits
top_hits <- read_excel("")
top_hits <- top_hits %>%
select(SNP, CHR, POS, REF, ALT, all_inv_var_meta_p)
# load sample sizes
sample_sizes <- read_excel("biobanks_sampling_prevalence.xlsx")
########################
###### Format data #####
########################
# combine all biobank-specific meta-analyses into one list
list.all.biobanks <- c(list.bb_meta, list.biobanks)
list.all.biobanks <- lapply(list.all.biobanks, setNames, nm = c('SNP_ID','CHR', 'POS', 'REF', 'ALT', 'biobank_beta', 'biobank_sebeta', 'biobank_p'))
list.all.biobanks <- list.all.biobanks[order(names(list.all.biobanks))]
list.loo <- list.loo[order(names(list.loo))]
# align to LOO risk allele
add_risk_allele <- function(x, beta_col, ALT_col, REF_col){
x %>% mutate(risk_allele = ifelse(beta_col > 0, ALT_col, REF_col),
risk_allele_beta = ifelse(beta_col > 0, beta_col, -(beta_col)))
}
list.loo <- lapply(list.loo, add_risk_allele, beta_col=inv_var_meta_beta, ALT_col=ALT, REF_col=REF)
list.loo <- lapply(list.loo, setNames, nm = c('SNP_ID', 'CHR', 'POS', 'REF', 'ALT', 'inv_var_meta_beta', 'inv_var_meta_sebeta', 'inv_var_meta_p ', 'risk_allele_LOO', 'risk_allele_beta_LOO'))
align_alleles <- function(loo, biobank, beta_col, ALT_col, REF_col){
merge <- right_join(loo,biobank, by='SNP_ID')
merge <- merge %>% mutate(matched_allele_BIOBANK = ifelse(ALT_col == risk_allele_LOO, ALT_col, REF_col),
matched_allele_beta_BIOBANK = ifelse(ALT_col == risk_allele_LOO, beta_col, -(beta_col)))
}
list.aligned <- Map(align_alleles, list.loo, list.all.biobanks, beta_col=biobank_beta, ALT_col=ALT.y, REF_col=REF.y)
# add column with biobank names
biobank_names <- c(names(list.all.biobanks)[order(names(list.all.biobanks))])
list.aligned <- Map(function(x,y){x <- x %>% mutate(biobank = y)}, list.aligned, biobank_names)
df <- Reduce('rbind',list.aligned)
###################################################
##### Plot ratios of biobank vs. LOO effects ######
###################################################
# compute beta ratios
df <- df %>% mutate(beta_ratio = matched_allele_beta_BIOBANK / risk_allele_beta_LOO)
averages <- df %>% group_by(biobank) %>% summarise(average = mean(beta_ratio), n=n())
# plot average beta ratios
output_name='.jpeg'
text_size=25
barplot_ratios <- ggplot(averages, aes(x=biobank, y=average)) +
geom_bar(stat="identity", fill="steelblue3", width=0.5) +
geom_hline(yintercept = 1.0, linetype='dashed', size=1) +
ylab(paste0("Average SNP effect in biobank over", "\n",
"leave-that-biobank-out meta-analysis")) +
theme_classic() +
theme(text=element_text(size=text_size),axis.text.x=element_text(angle=65, hjust=1), axis.title.y=element_text(size=20))
ggsave(barplot_ratios, height = 9, width = 12, dpi = 300, filename=output_name)
###################################
##### Plot Deming Regressions #####
###################################
df1 <- lapply(list.aligned, as.data.frame)
ggplotDemingRegression <- function(fit, df, param) {
require(ggplot2)
deming_slope = unlist(fit$coefficients[2])
p=ggplot(fit$model, aes_string(x = names(fit$model)[2], y = names(fit$model)[1])) +
geom_errorbar(aes(ymin=df$lowV, ymax=df$highV), width=.005, alpha=0.8) +
geom_errorbarh(aes( xmin=df$lowH,xmax=df$highH), height=.005, alpha=0.8) +
geom_abline(slope=deming_slope, intercept=0, color = "orangered3") +
theme_bw() +
theme(plot.title = element_text(hjust = 0.5,size=18), axis.text=element_text(size=10),
axis.title=element_text(size=10,face="bold"),
panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(), axis.line = element_line(colour = "black"),
legend.position = "none",
plot.margin=grid::unit(c(5,5,5,5), "mm")) +
geom_point(size=0.5) +
geom_abline(intercept = 0, slope=1, col="grey3", show.legend = T, linetype = "dashed") +
xlab(param$xlabtext) + ylab(param$ylabtext)+
scale_x_continuous(expand=c(0,0), limits=c(param$MinVal-0.1,param$MaxVal+0.1)) +
scale_y_continuous(expand=c(0,0), limits=c(param$MinVal-0.1,param$MaxVal+0.1)) +
coord_cartesian(xlim=c(param$MinVal,param$MaxVal), ylim=c(param$MinVal,param$MaxVal)) +
labs(title = paste(
" Slope =",signif(deming_slope, 2)
)
)
ggsave(p,filename=param$pdffile, dpi=300,width = 5, height = 5)
}
# deming regression models for each biobank
fit_deming <- function(dat){
require(deming)
require(devtools)
demingfit <- deming(risk_allele_beta_LOO ~ matched_allele_beta_BIOBANK + 0, data=dat, xstd=biobank_sebeta, ystd=inv_var_meta_sebeta)
}
# plot parameters
ggplotDemingRegression_input_step1 <- function(data){
data <- data %>% mutate(lowH = matched_allele_beta_BIOBANK - 1.96*biobank_sebeta,
highH = matched_allele_beta_BIOBANK + 1.96*biobank_sebeta,
lowV = risk_allele_beta_LOO - 1.96*inv_var_meta_sebeta,
highV = risk_allele_beta_LOO + 1.96*inv_var_meta_sebeta)
}
ggplotDemingRegression_input_step2 <- function(data, biobank_name){
xlabtext <- paste("Effect sizes reported by", biobank_name, sep=" ")
ylabtext <- paste("Effect sizes reported by LOO meta-analysis excluding", biobank_name, sep=" ")
MinVal <- min(0, min(data$lowH)-0.01, min(data$lowV)-0.01)
MaxVal <- max(max(data$highH)+0.01, max(data$highV)+0.01)
pdffile <- paste0('tophits_bb_loo_effectsize_comparison_DemingRegression_allSNPs', biobank_name, '.jpeg')
input.df <- data.frame(xlabtext, ylabtext, as.numeric(MaxVal), as.numeric(MinVal), pdffile)
names(input.df) <- c('xlabtext', 'ylabtext', 'MaxVal', 'MinVal', 'pdffile')
return(input.df)
}
# plot
list.deming <- lapply(df1, FUN=fit_deming)
df.deming <- lapply(df1, ggplotDemingRegression_input_step1)
plot.param <- Map(ggplotDemingRegression_input_step2, df.deming, biobank_names)
Map(ggplotDemingRegression, fit=list.deming, df=df.deming, param=plot.param)
|
55790f4a5e9708469f88e2bfa6e7791939ecd4e3
|
48c952257ef4d414e822eee952fbd79bfa900d2b
|
/mult.R
|
ed88f6f620fbb1ce9dcacb85bbdd8268930e93d1
|
[] |
no_license
|
michel-briand/emath
|
1c64e3630e1cc98c6ef940996061cc27ada1fe3c
|
896185ed58b325d116762108e09adbe3559e189c
|
refs/heads/master
| 2020-03-29T23:42:53.141295
| 2018-09-26T20:19:13
| 2018-09-26T20:19:13
| 150,485,364
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,550
|
r
|
mult.R
|
#
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
#
# This application is a small workshop for mathematics.
# It is inspired from the Micmaths video (https://youtu.be/-X49VQgi86E).
# It displays a representation of multiplication on the circle.
# Playing with the number and the modulo, one creates flower petals.
#
# (c) 2018 Michel Briand
# Creative Commons Attribution-ShareAlike 4.0 International License
# http://creativecommons.org/licenses/by-sa/4.0/
library(shiny)
library(ggplot2)
# Define UI for application that draws a histogram
ui <- fluidPage(
# Application title
titlePanel("Multiplication"),
fluidRow(
column(12,
span(style="font-style: italic;font-size: 1em;",
p(style="margin:0px", "Ce petit atelier mathématique est inspiré par la vidéo de Micmaths :"),
tags$a(href="https://youtu.be/-X49VQgi86E", "La face cachée des tables de multiplication",
target="_blank",rel="noopener noreferrer", style="align: center;"),
p(style="margin:0px", "En choisissant la table de multiplication et le modulo, vous pouvez visualiser des pétales de fleur...")
)
),
column(12, br()) # blank
),
# Sidebar with a slider input for number and modulo
sidebarLayout(
sidebarPanel(
sliderInput("nombre",
"Table de multiplication :",
min = 1,
max = 500,
value = 2),
sliderInput("modulo",
"Modulo :",
min = 0,
max = 500,
value = 10)
),
# Show a plot of the generated circle and lines
mainPanel(
plotOutput("distPlot", width = "50vw", height = "50vw")
)
),
fluidRow(
column(12,
br(),br(),
p("(c) 2018 Michel Briand"),
tags$a(rel="license", href="http://creativecommons.org/licenses/by-sa/4.0/",
"Creative Commons Attribution-ShareAlike 4.0 International License"),
img(alt="Creative Commons License",style="border-width:0",src="https://i.creativecommons.org/l/by-sa/4.0/88x31.png"),
br(),br()
)
)
)
# Define server logic required to draw a histogram
server <- function(input, output) {
output$distPlot <- renderPlot({
n <- input$nombre
m <- input$modulo
theta <- 2*pi/m
N <- 100
tt <- seq(0, 2*pi, length.out = N)
i <- seq(0, m-1)
circleFun <- function(center = c(0,0),diameter = 1){
r = diameter / 2
xx <- center[1] + r * cos(tt)
yy <- center[2] + r * sin(tt)
return(data.frame(x = xx, y = yy))
}
startFun <- function(a) {
return(a*theta)
}
endFun <- function(a) {
return(n*a*theta)
}
linesFun <- function(center = c(0,0),diameter = 1){
r = diameter / 2
# do not use lapply which returns a list, use this to have a vector:
s <- sapply(i, startFun, simplify = TRUE)
e <- sapply(i, endFun, simplify = TRUE)
tt <- c(rbind(s, e))
xx <- center[1] + r * cos(tt)
yy <- center[2] + r * sin(tt)
return(data.frame(x = xx, y = yy))
}
dat <- circleFun(c(0,0), 2)
dat2 <- linesFun(c(0,0), 2)
g <- ggplot(asp = 1, dat,aes(x,y)) + geom_path() + geom_path(data=dat2)
return(g)
})
}
# Run the application
shinyApp(ui = ui, server = server)
|
5439e9d3160fc501c674975d8a6a1cd2b7290186
|
015011d242b514c0a4925b859ebb7ae371351837
|
/Rscripts/gff2gtf.R
|
cb839c7c7f7e4bf08d4ca896bba7d81c28adb240
|
[
"MIT"
] |
permissive
|
devxia/NativeRNAseqComplexTranscriptome
|
763f93b014f66b0b7b1b70046594521e3bdeaf00
|
1ef939d7606527283b3db1855be5c775a635089c
|
refs/heads/master
| 2022-02-28T18:59:15.892634
| 2019-10-29T19:13:54
| 2019-10-29T19:13:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 879
|
r
|
gff2gtf.R
|
args <- (commandArgs(trailingOnly = TRUE))
for (i in 1:length(args)) {
eval(parse(text = args[[i]]))
}
print(gff)
print(gtf)
suppressPackageStartupMessages({
library(rtracklayer)
library(readr)
library(withr)
})
## Filter out exons that can not be handled by gffcompare
x <- readr::read_tsv(gff, col_names = FALSE, col_types = "cccddcccc")
dim(x)
message("Excluding the following lines:")
x[x$X5 - x$X4 >= 30000, ]
x <- x[x$X5 - x$X4 < 30000, ]
dim(x)
withr::with_options(c(scipen = 100),
write.table(x, file = gsub("\\.gff$", ".fixed.gff", gff),
quote = FALSE, sep = "\t", row.names = FALSE,
col.names = FALSE))
x <- rtracklayer::import(gsub("\\.gff$", ".fixed.gff", gff))
x$transcript_id <- as.character(x$group)
x$group <- NULL
rtracklayer::export(x, gtf)
date()
sessionInfo()
|
c699ebdac44df080840ac108a8838f6f115897fa
|
3eb24ba0d0a6b79c441cfd393ef035d0dce36b3f
|
/personal/baby_names.R
|
d638ccd8f425309fe264b1697fe878a90acc6112
|
[] |
no_license
|
mdgbeck/projects
|
4e57c33e5919b4c77bedd27c3dd6a7ddf298a3ac
|
6e8205dcc139a5d2335e35ba667f056e2f569e10
|
refs/heads/master
| 2022-05-30T02:10:34.793217
| 2022-05-18T18:38:41
| 2022-05-18T18:38:41
| 124,141,737
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,377
|
r
|
baby_names.R
|
library(tidyverse)
library(lubridate)
library(babynames)
library(mdgr)
library(scales)
names <- babynames %>%
filter(year >= 1950 &
sex == "F" &
n > 15) %>%
group_by(name) %>%
mutate(n_years = n_distinct(year),
prev = lag(prop),
change = prop - prev,
perc_chg = 100 * change / prop) %>%
filter(n_years == max(n_years))
name_var <- names %>%
group_by(name) %>%
summarize(n = sum(n),
yrs = n_distinct(year),
avg = mean(perc_chg, na.rm = TRUE),
med = median(perc_chg, na.rm = TRUE),
sd_perc = sd(perc_chg, na.rm = TRUE)) %>%
mutate(abs_avg = abs(avg),
abs_med = abs(med)) %>%
filter(yrs == n_distinct(names$year)) %>%
#arrange(desc(n)) %>%
#arrange(abs(sd_perc))
arrange(sd_perc)
names %>%
filter(name %in% name_var$name) %>%
ggplot(aes(x=year, y = prop)) +
geom_line(aes(group = name), color = "gray75", size = .5) +
geom_line(data = filter(names,
#name %in% c("abc")),
name %in% name_var$name[name_var$abs_avg < 2][1:5]),
aes(color = name), size = 1.25) +
#coord_cartesian(ylim = c(0, .02)) +
theme_mdgr()
# boy girl names
boys <- babynames %>%
filter(year >= 1980 & sex == 'M' & n > 50) %>%
group_by(name) %>%
filter(n_distinct(year) == max(n_distinct(year))) %>%
select(year, name, n_boys = n, prop_boys = prop)
girls <- babynames %>%
filter(year >= 1980 & sex == 'F' & n > 50) %>%
group_by(name) %>%
filter(n_distinct(year) == max(n_distinct(year))) %>%
select(year, name, n_girls = n, prop_girls = prop)
bg <- boys %>%
inner_join(girls, by = c('year', 'name')) %>%
mutate(
perc_girl = n_girls / (n_boys + n_girls),
mid = abs(.5 - perc_girl)
)
bg_names <- bg %>%
group_by(name) %>%
summarize(
n_boys = sum(n_boys),
n_girls = sum(n_girls),
n = n_boys + n_girls,
perc_girl_total = n_girls / n,
mid_total = abs(.5 - perc_girl_total),
var_girl = var(perc_girl)
) %>%
arrange(desc(n))
var_names <- bg_names %>%
arrange(desc(var_girl))
# plot of highest variance names (boy / girl change)
bg %>%
filter(name %in% bg_names$name[1:1000]) %>%
ggplot(aes(year, perc_girl)) +
geom_line(aes(group = name), color = "gray75", size = .5, show.legend = NA) +
geom_line(data = filter(bg, name %in% var_names$name[1:10]),
aes(color = name), size = 1.25) +
scale_y_continuous(label = percent) +
theme_mdgr()
# overall popularity of name
plot_data <- babynames %>%
filter(name %in% var_names$name[1:5] & year > 1980)
ggplot(plot_data, aes(year, prop)) +
geom_line(data = filter(plot_data, sex == 'F'),
aes(group = name), color = 'pink') +
geom_line(data = filter(plot_data, sex == 'M'),
aes(group = name), color = 'skyblue') +
geom_point(data = filter(plot_data, sex == 'F'),
aes(color = name)) +
geom_point(data = filter(plot_data, sex == 'M'),
aes(color = name)) +
scale_y_continuous(label = percent) +
theme_mdgr()
write_csv(arrange(filter(bg_names, perc_girl_total > .05), mid_total), 'personal/names_bg1.csv', na="")
girl2017 <- babynames %>%
filter(year == 2017 & sex == 'F') %>%
arrange(desc(n)) %>%
slice(1:1000) %>%
write_csv(., 'personal/names_2017.csv')
|
e6c2fe8e8ee0423f282d73de3fc3212b4baa1713
|
f78c451bc1d7892e6684f92413b20515fddc8936
|
/R/add.missing.expression.R
|
d3a089430e0a323e51e13cba11e0b4ecb358fc30
|
[] |
no_license
|
alexjcornish/DiseaseCellTypes
|
3336f0bd93406ddad021c1ee2d6c467cc834080f
|
ea5d62898549eacf653f3c388d86ceb4af05eef0
|
refs/heads/master
| 2021-01-19T08:46:26.242215
| 2015-09-02T09:03:56
| 2015-09-02T09:03:56
| 27,534,586
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 860
|
r
|
add.missing.expression.R
|
add.missing.expression <- function(
expression,
genes,
score.missing
) {
# add genes in 'genes' missing in 'expression' to 'expression' with score 'score.missing'
# remove genes in 'expression' not represented in 'genes'
contexts <- colnames(expression)
n.contexts <- ncol(expression)
if (is.null(rownames(expression))) stop("genes not found as rownames in expression")
genes.found <- genes[genes %in% rownames(expression)]
genes.missing <- genes[!genes %in% rownames(expression)]
expression.data <- array(as.numeric(expression[genes.found, ]), dim=c(length(genes.found), n.contexts), dimnames=list(genes.found, contexts))
expression.no.data <- array(score.missing, dim=c(length(genes.missing), n.contexts), dimnames=list(genes.missing, contexts))
rbind(expression.data, expression.no.data)
}
|
19dc627e9408e01e2c25d447c0b7133820c677ac
|
f951a642ade060ee0c084b610faf33eddc7901e2
|
/util/copy_high_performing_benchmark.R
|
6f47d577b57daee22ec684a610298ef915774dd5
|
[] |
no_license
|
tadeze/ADMV
|
fc1ac13c7ff5c72f05f6aff3100565938243bd11
|
fc4a0bf011f28355f5910c5d6d1f84cfed51a18c
|
refs/heads/master
| 2022-01-15T12:56:25.767940
| 2019-05-06T23:55:05
| 2019-05-06T23:55:05
| 117,147,033
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,075
|
r
|
copy_high_performing_benchmark.R
|
pathdir="/nfs/guille/bugid/adams/meta_analysis/results_summaries/"
destination = "/nfs/guille/bugid/adams/ifTadesse/kddexperiment/group2/"
ff = list.files(pathdir)
aucs <- data.frame()
for (bench in ff){
if(bench=="new_all" || bench=="all"){
next
}
for(idx in 290:300){
#idx = 300
bench_name = paste0(pathdir,bench,"/auc_",bench,".csv")
res = read.csv(bench_name,T)
res = res[order(res$iforest,decreasing=T),]
if(bench %in% c("particle","gas","yeast","synthetic", "yearp")) next
if(bench=="abalone"){
idx = idx +40
}
cat(bench,"_",res$bench.id[idx],res$iforest[idx],"\n")
filename = paste0("/nfs/guille/bugid/adams/meta_analysis/benchmarks/",bench,"/",res$bench.id[idx],".csv")
xx = read.csv(filename)
#print(nrow(xx))
aucs <- rbind(aucs,data.frame(res$bench.id[idx], res$iforest[idx], res$loda[idx],res$egmm[idx]))
#cat(bench,"_",res$bench.id,"\n")
file.copy(filename, destination)
}
}
writet.table(aucs,"dataset_used_summary.csv",row.names=F,quote=F)
#"/nfs/guille/bugid/adams/meta_analysis/results_summaries/abalone/auc_abalone.csv"
|
a9a6954c347eed7cd7bf38c53b7410a5b14e4490
|
0a9d14249e04d4daeb7ef2df3afb2db5e47d4551
|
/man/custom_distance.Rd
|
4299717257636d26a83fc24e4d9b693be7c569c6
|
[] |
no_license
|
vda-lab/stad
|
5f61154db5e7be4527d221e52fd5a88f908f41a5
|
f7ab25e6492c0360e2093e6f73bf005d087d56db
|
refs/heads/master
| 2020-04-25T20:53:38.508275
| 2020-03-22T13:32:05
| 2020-03-22T13:32:05
| 173,063,712
| 5
| 2
| null | 2020-03-22T12:49:05
| 2019-02-28T07:31:07
|
R
|
UTF-8
|
R
| false
| true
| 590
|
rd
|
custom_distance.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/stad_with_lens.R
\name{custom_distance}
\alias{custom_distance}
\title{Custom distance}
\usage{
custom_distance(x, metric)
}
\arguments{
\item{x}{array variable. Dimension of the lens.}
\item{metric}{string or array defining the metrics supported ("polar" or "euclidean").}
}
\value{
Returns a \code{dist} object with the distance of the filter.
}
\description{
Internal distance matrix for bivariate_split. Uses polar or euclidean metric. Returns a distance matrix as sum of the two independents dimensions.
}
|
9f0c47952e7f03bc6b122c904c6846c6b11948d9
|
f374f8e079698141cde195c14717f6e56a48087b
|
/4_split.r
|
ea57e4b2c01aacaccb56bb330f0049dbd3c76c9e
|
[] |
no_license
|
DavidMoranPomes/census-profiling-and-income-prediction
|
d42601376c80569ec6444fbf475ed282d90dafdc
|
68cd49e4fdffd550a54c845d91b45dfa6d60af69
|
refs/heads/master
| 2020-06-17T20:45:01.995050
| 2019-07-09T17:13:26
| 2019-07-09T17:13:26
| 196,048,435
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 226
|
r
|
4_split.r
|
load('data/adult.rds')
set.seed(1234)
train <- sample(nrow(adult), 0.5*nrow(adult))
set <- logical(nrow(adult))
set[train] <- TRUE
adult$Set <- factor(ifelse(set, 'Train', 'Test'))
save(adult, file='data/adult_split.rds')
|
76b441761aa261758a9e3e376c77ee4130b7076e
|
95d43b808610f161c3e28b6e985ca2f5c319b754
|
/Using R/Text Mining/2/2.R
|
0a1ec624a6dbc57e7917790a9d9c92a189b0543b
|
[] |
no_license
|
dharmesh-coder/Data-Science
|
824b53a9b867e4eb268b18a5d03b1c8991491f83
|
913e6e1e2503cd2be94f458ce6add3afa2a63605
|
refs/heads/master
| 2023-04-19T20:58:39.355216
| 2021-05-15T17:00:51
| 2021-05-15T17:00:51
| 367,411,369
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 611
|
r
|
2.R
|
library(dplyr)
library(rvest)
library(NLP)
library(tm)
url <- "http://www.analytictech.com/mb021/mlk.htm"
data <- read_html(url)
text <- data %>% html_nodes("p") %>% html_text()
text
doc <- Corpus(VectorSource(text))
inspect(doc)
doc <- tm_map(doc,removeNumbers)
doc <- tm_map(doc,removeWords,stopwords("english"))
doc <- tm_map(doc,stripWhitespace)
doc <- tm_map(doc,tolower)
dtm <- DocumentTermMatrix(doc)
freq <- colSums(as.matrix(dtm))
ord <- order(freq,decreasing=TRUE)
freq[head(ord,n=20)]
findFreqTerms(dtm,lowfreq=5)
findAssocs(dtm,terms='life',corlimit=0.7)
removeSparseTerms(dtm,0.3)
|
879c01adf16e609b5216fd5a331a90621b662569
|
405c68ad20a0a48272b7fe53d85a841146fdc488
|
/R/dvinesim.R
|
44d292b98225ca9491b5deb82f35fa37cdfffd5d
|
[] |
no_license
|
cran/CopulaREMADA
|
1d65cac1c329c311f1133422b3f0748f618c85e6
|
2d861b35e51a117c2c8ec0407619b4b5da8f9d2d
|
refs/heads/master
| 2022-08-20T08:58:43.777187
| 2022-08-07T15:10:04
| 2022-08-07T15:10:04
| 31,186,515
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 830
|
r
|
dvinesim.R
|
dvinesim=function(nsim,param,qcond1,pcond1,tau2par1,qcond2,pcond2,tau2par2)
{ tau12=param[1]
tau23=param[2]
tau34=param[3]
tau13.2=param[4]
tau24.3=param[5]
tau14.23=param[6]
p = matrix(runif(nsim * 4), nsim, 4)
th=matrix(0,4,4)
th[1,2]=tau2par1(tau12)
th[1,3]=tau2par2(tau23)
th[1,4]=tau2par1(tau34)
th[2,3]=tau2par.bvn(tau13.2)
th[2,4]=tau2par.bvn(tau24.3)
th[3,4]=tau2par.bvn(tau14.23)
u1=p[,1]
q11=p[,1]
q22=p[,2]
u2=qcond1(p[,2],p[,1],th[1,2])
q12=u2
v12=pcond1(u1,u2,th[1,2])
q33=p[,3]
q23=qcondbvn(q33,v12,th[2,3])
q13=qcond2(q23,u2,th[1,3])
u3=q13
v13=pcond2(u2,u3,th[1,3])
v23=pcondbvn(v12,q23,th[2,3])
q44=p[,4]
q34=qcondbvn(q44,v23,th[3,4])
q24=qcondbvn(q34,v13,th[2,4])
q14=qcond1(q24,u3,th[1,4])
u4=q14
cbind(u1,u2,u3,u4)
}
|
08fec4879d5acdbbee49e87581985bd461da7ea3
|
ca609a94fd8ab33cc6606b7b93f3b3ef201813fb
|
/2016-April/11-Optimization/3d-visulizations.R
|
6794b8abb5418e00cb401e851e01be90d07b1e36
|
[] |
no_license
|
rajesh2win/datascience
|
fbc87def2a031f83ffceb4b8d7bbc31e8b2397b2
|
27aca9a6c6dcae3800fabdca4e3d76bd47d933e6
|
refs/heads/master
| 2021-01-20T21:06:12.488996
| 2017-08-01T04:39:07
| 2017-08-01T04:39:07
| 101,746,310
| 1
| 0
| null | 2017-08-29T09:53:49
| 2017-08-29T09:53:49
| null |
UTF-8
|
R
| false
| false
| 579
|
r
|
3d-visulizations.R
|
library(plot3D)
fun = function(x,y) {
return(x+y)
}
x = seq(-2, 4, 0.5)
y = seq(-2, 4, 0.5)
f = outer(x,y,fun)
windows(width=50, height=60)
persp3D(x, y, f, xlab="x", ylab="y", zlab="f", theta = 30, phi = 10)
X11()
persp3D(x, y, f, xlab="x", ylab="y", zlab="f",color.palette = heat.colors, theta = 30, phi = 10, colkey = FALSE)
X11()
persp3D(x, y, f, xlab="x", ylab="y", zlab="f",color.palette = heat.colors, border = "#808080", theta = 30, phi = 10, colkey = FALSE, ticktype="detailed")
x = c(-1,1)
y = c(2,0)
z = c(3,3)
points3D(x,y,z, pch = 20, col = 'red', add = TRUE)
|
94fcfdb992e5a10002efadc6f0e0248bc1ec24f4
|
2e42b670fdffe5bf4844cb73aec4ed4943aa1e36
|
/man/shift_index.Rd
|
82efd4e1fab0cbf8311a4d15dacc7a3ef5d776c0
|
[] |
no_license
|
pawelru/sudoku_r_game
|
9f8ba3c4b4862b1a77dccef696439afba55c422a
|
fbe9a7c8f3ab7b42415e130a4a0faea8c99ebd94
|
refs/heads/master
| 2020-04-02T04:42:35.292097
| 2018-10-21T17:05:57
| 2018-10-21T17:05:57
| 154,030,365
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 362
|
rd
|
shift_index.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/matrix.R
\name{shift_index}
\alias{shift_index}
\title{Move elements from tail to the head of vector}
\usage{
shift_index(x, n)
}
\arguments{
\item{x}{vecor to be shifted}
\item{n}{shift value}
}
\value{
shifted vector
}
\description{
Move elements from tail to the head of vector
}
|
69938ceac993f32f32276cab8aa94d8a6f588e16
|
cbdc14ffeac8a3ea94cb4e27be81f97c121801ef
|
/Normal Contaminada - Poder Sinal.R
|
4f98b31e6ad026179e10aa1617832efd4086b6f5
|
[] |
no_license
|
ArthurCarneiroLeao/Monte-Carlo-Sinal
|
c373e0f405253504062fa5d28413d58102e696e1
|
28c61e9c38fc8b7ad9d8db6f3915550a3a1ddb78
|
refs/heads/master
| 2020-04-02T16:16:37.471734
| 2018-11-20T19:59:02
| 2018-11-20T19:59:02
| 154,605,992
| 0
| 0
| null | null | null | null |
ISO-8859-1
|
R
| false
| false
| 2,747
|
r
|
Normal Contaminada - Poder Sinal.R
|
#NormalContaminada-Poder Sinal(5%)#
### usando o grau de contaminação 5% e lamb = 3
library(BSDA)
library(tidyverse)
delta <- 0.05
lambda <- 3
r<-1000
theta.test<-c(seq(-2,2,0.05))
M <- length(theta.test)
power <- numeric(M)
nobs<-c(5, 10, 30, 80) #Vetor Para tamanhos de amostras diferentes
power_nobs <- matrix(0,length(nobs),M) #criando o ambiente(matriz) para armazenamento
cont <- 1
for (j in nobs){
for (i in 1:M) {
theta<-theta.test[i]
p_value <- replicate(r, expr = {
x <- (1 - delta)*rnorm(j, theta, 1) + (delta*rnorm(j, theta, 1))/sqrt(lambda)
SinalTest<-SIGN.test(x,mu=0)
SinalTest$p.value })
power[i] <- mean(p_value <= 0.05)
}
power_nobs[cont,] <- power
cont = cont+1
}
x11()
par(mfrow=c(2,2))
plot(theta.test, power_nobs[1,], type = "l", xlab = bquote(theta), ylab = "Poder", main = "n = 5")
abline(v = 0.0, lwd = 2, col = "grey80", lty = 2)
plot(theta.test, power_nobs[2,], type = "l", xlab = bquote(theta), ylab = "Poder", main = "n = 10")
abline(v = 0.0, lwd = 2, col = "grey80", lty = 2)
plot(theta.test, power_nobs[3,], type = "l", xlab = bquote(theta), ylab = "Poder", main = "n = 30")
abline(v = 0.0, lwd = 2, col = "grey80", lty = 2)
plot(theta.test, power_nobs[4,], type = "l", xlab = bquote(theta), ylab = "Poder", main = "n = 80")
abline(v = 0.0, lwd = 2, col = "grey80", lty = 2)
### usando o grau de contaminação 10% e lamb = 3
delta <- 0.10
lambda <- 3
r<-1000
theta.test<-c(seq(-2,2,0.05))
M <- length(theta.test)
power <- numeric(M)
nobs<-c(5, 10, 30, 80) #Vetor Para tamanhos de amostras diferentes
power_nobs <- matrix(0,length(nobs),M) #criando o ambiente(matriz) para armazenamento
cont <- 1
for (j in nobs){
for (i in 1:M) {
theta<-theta.test[i]
p_value <- replicate(r, expr = {
x <- (1 - delta)*rnorm(j, theta, 1) + (delta*rnorm(j, theta, 1))/sqrt(lambda)
SinalTest<-SIGN.test(x,mu=0)
SinalTest$p.value })
power[i] <- mean(p_value <= 0.05)
}
power_nobs[cont,] <- power
cont = cont+1
}
x11()
par(mfrow=c(2,2))
plot(theta.test, power_nobs[1,], type = "l", xlab = bquote(theta), ylab = "Poder", main = "n = 5")
abline(v = 0.0, lwd = 2, col = "grey80", lty = 2)
plot(theta.test, power_nobs[2,], type = "l", xlab = bquote(theta), ylab = "Poder", main = "n = 10")
abline(v = 0.0, lwd = 2, col = "grey80", lty = 2)
plot(theta.test, power_nobs[3,], type = "l", xlab = bquote(theta), ylab = "Poder", main = "n = 30")
abline(v = 0.0, lwd = 2, col = "grey80", lty = 2)
plot(theta.test, power_nobs[4,], type = "l", xlab = bquote(theta), ylab = "Poder", main = "n = 80")
abline(v = 0.0, lwd = 2, col = "grey80", lty = 2)
|
c55376daa9ff2fee7e891353fc4109f16d550e3a
|
84a34111f811cc0aa836707d1c22aaece01f2ead
|
/Programming Assignment 3/rankhospital.R
|
31b42453725e9960be5dc9d70dcb9c4e9d9fd3e7
|
[] |
no_license
|
JaMedina/DS_R_Programming
|
8912cf2bfe30e9a32a3db887c5b4dbb7f32b9832
|
5220028e3efcdb213d0a108deaa18df4e9d09069
|
refs/heads/master
| 2020-05-14T11:28:50.485432
| 2014-04-30T18:44:19
| 2014-04-30T18:44:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,066
|
r
|
rankhospital.R
|
rankhospital <- function(state, outcome, num="best"){
outcome_measures <- read.csv("outcome-of-care-measures.csv", colClasses = "character");
if(!state %in% outcome_measures$State){
stop("Invalid State.");
}
outcome_measures <- outcome_measures[outcome_measures$State==state,];
number_of_deaths <- numeric();
if(outcome == 'heart attack'){
number_of_deaths <- as.numeric(outcome_measures[,11]);
} else if (outcome == 'heart failure'){
number_of_deaths <- as.numeric(outcome_measures[,17]);
} else if (outcome == 'pneumonia'){
number_of_deaths <- as.numeric(outcome_measures[,23]);
} else {
stop("Invalud outcome");
}
all_rankings <- rank(number_of_deaths, na.last = NA);
if(num == "best"){
ranking <- 1;
} else if (num == "worst"){
ranking <- length(all_rankings);
} else if (num <= length(all_rankings)){
ranking <- num;
} else {
return(NA);
}
return (outcome_measures$Hospital.Name[order(number_of_deaths, outcome_measures$Hospital.Name)[ranking]]);
}
|
785c2b046bf33ad64145c987ab3a41fe63bb0f06
|
fd0e2346e6d3002ef95eb0f826b35cd6260aea10
|
/man/adjust_Rsq.Rd
|
4f56c03055ae11b08eeff2d429674976b504ded7
|
[] |
no_license
|
cran/configural
|
91f24339a1c235245a8a7c2657db3ca13766944f
|
c192d23b7db98216dc4c07b9a282bb4f9a9d4a28
|
refs/heads/master
| 2021-06-19T14:45:02.052286
| 2021-01-18T20:30:03
| 2021-01-18T20:30:03
| 171,525,050
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,276
|
rd
|
adjust_Rsq.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utility.R
\encoding{UTF-8}
\name{adjust_Rsq}
\alias{adjust_Rsq}
\title{Adjust a regression model R-squared for overfitting}
\usage{
adjust_Rsq(Rsq, n, p, adjust = c("fisher", "pop", "cv"))
}
\arguments{
\item{Rsq}{Observed model R-squared}
\item{n}{Sample size}
\item{p}{Number of predictors}
\item{adjust}{Which adjustment to apply. Options are "fisher" for the Adjusted R-squared method used in \code{\link[stats:lm]{stats::lm()}}, "pop" for the positive-part Pratt estimator of the population R-squared, and "cv" for the Browne/positive-part Pratt estimator of the cross-validity R-squared. Based on Shieh (2008), these are the estimators for the population and cross-validity R-squared values that show the least bias with a minimal increase in computational complexity.}
}
\value{
An adjusted R-squared value.
}
\description{
Estimate shrinkage for regression models
}
\examples{
adjust_Rsq(.55, 100, 6, adjust = "pop")
}
\references{
Shieh, G. (2008).
Improved shrinkage estimation of squared multiple correlation coefficient and squared cross-validity coefficient.
\emph{Organizational Research Methods, 11}(2), 387–407. \doi{10.1177/1094428106292901}
}
|
3ddea2d03c7da8a6815210960033569cc9494899
|
4f9a3ae52cfe45a839a7f293b764ea97e0a4438e
|
/Day2.R
|
0c96c68cc4d674679fda442e7ac9aa84d1836001
|
[] |
no_license
|
eileenschaub/R_in_Git
|
40c113c95a03ea7a889186486f990f1577a2b47d
|
10e53daeac772432c99c4eff1e557247c74f3b11
|
refs/heads/master
| 2020-04-16T05:32:30.483587
| 2019-01-11T21:18:37
| 2019-01-11T21:18:37
| 165,310,074
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,171
|
r
|
Day2.R
|
# Software Carpentry R Workshop - James's Code
#doing some programming!
number <- 37
if(number > 100) {
print("greater than 100")
}
# The above produces no visible result cos 37 isn't > 100.
number <- 37
if(number > 100) {
print("greater than 100")
} else {
print("less than or equal to 100")
}
#Some notation:
# Less than: <
# Equal to: ==
# Not equal to: !=
number <- -3
if(number > 0) {
print(1)
} else if (number < 0) {
print(-1)
} else {print(0)
}
# And now... the ampersand.
# 'a bit of r & coding'
number1 <- -15
number2 <- 40
if(number1 >= 0 & number2 >= 0) {
print("both numbers are positive")
} else{
print("at least one number was negative")
}
# you could use this to figure out the boundaries of stages.
# like finding which sites have no flowers at a certain coll. period
# Loops!
# Automating & doing repetetive tasks.
numbers <- 1:10
# count to ten
# don't forget to actually ctrl+r when you assign objects
# for loop
for(number in numbers) {
print(number)
}
for(i in 1:10) {
print(i)
}
# So R stores whatever number in the loop in 'i'.
print(i)
# It prints the last number that was assigned to 'i'.
letter <- "z"
print(letter)
for(letter in c("a","b","c")) {
print(letter)
}
print(letter)
# 'c' was the last thing stored in the letter variable / the increment variable.
# Socrative: Write a for loop that will calculate the sum of a vector of numbers and print it out at the end... without using 'sum()'.
numbers <- c(4,8,16,23,42)
sum <- 0
for(number in numbers) {
sum <- sum+number
print(sum)
}
# You can define objects, like 'number', within the loop
# It will write over the value assignment each time the loop runs.
# For example, here it is basically doing "number <- 0, number <- 0 + 4,
# number <- 4+8, number <- 12+16 etc etc.
# Now you can do the for loop and have it add the things, and run the print command
# AFTER the loop close, so that you don't have a zillion numbers on the result.
for(number in numbers) {
sum <- sum+number
}
print(sum)
#viz.
# Functions
# To see the source code of it, just run it with nothing.
dim
nrow
# Farenheit to Kelvin function
fahr_to_kelvin <- function(temp) {
kelvin <- ((temp - 32) * (5/9) + 273.15)
return(kelvin) # this tells the function what to send back when you run it
}
fahr_to_kelvin(32)
fahr_to_kelvin(212)
kelvin_to_celsius <- function(temp){
celsius <- temp - 273.15
return(celsius)
}
kelvin_to_celsius(0)
# Socrative
# Function to convert fahrenheit to kelvin
fahr_to_kelvin <- function(temp) {
temp <- ((temp - 32) * (5 / 9)) + 273.15
return(temp)
}
# Store the current temperature in F
temp <- 73
# Get the temperature in kelvin
kelvin_temp <- fahr_to_kelvin(temp)
# Print the temperature
print(temp)
# Socrative: Write a function to convert a temperature in Celsius to Fahrenheit using # the formula: F = C * 9 / 5 + 32
celsius_to_fahr <- function(temp) {
fahr <- ((temp) * (9/5) + 32)
return(fahr)
}
celsius_to_fahr(0)
celsius_to_fahr(100)
celsius_to_fahr(37)
# Day 2 afternoon: data manipulation with ~*Cera*~ #
# RMarkdown
install.packages(c("tidyr","dplyr","knitr","rmarkdown","formatR"))
|
7f748fa95c05e7506f93902546a9c97aab1f0e8f
|
dccbfbe9eae4f0ad67ee5f32b38f6b2caed616ff
|
/plot3.R
|
ffd4c2e7cc27041b3941e23f084a259887a6c36c
|
[] |
no_license
|
andreaslowe/ExData_Plotting1
|
12bc4c35880f6e3887b440d4a6a9e535a7f21f35
|
a74db3f346fcc26cd4045b5d37485ebcb86ec1fd
|
refs/heads/master
| 2021-01-21T10:34:30.523257
| 2017-02-28T22:32:14
| 2017-02-28T22:32:14
| 83,455,846
| 0
| 0
| null | 2017-02-28T16:39:47
| 2017-02-28T16:39:47
| null |
UTF-8
|
R
| false
| false
| 876
|
r
|
plot3.R
|
#Exploratory Data Analysis Assignment 1
#working data from plot1.R and plot2.R is used
png(filename = "plot3.png", width = 480, height = 480) #open png device, set w x h in pixels (though 480 is typically default)
par(pty="s") #make plot square
#create plot and add submetering 1 line (default is black so don't need to specify)
plot(workingdata$datetime, workingdata$Sub_metering_1, type = "l", ylab = "Energy sub metering",
xlab = "")
#add submetering 2 line in red
lines(workingdata$datetime, workingdata$Sub_metering_2, col = "red")
#add submetering 2 line in blue
lines(workingdata$datetime, workingdata$Sub_metering_3, col = "blue")
#add the legend in the top right corner
legend("topright", lty = c(1,1,1), col = c("black", "red", "blue"),
legend = c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
dev.off() #closes device
|
c1df7a26ef050267d3631835756b16bb20043553
|
67319c944a4e8b4da2733c16803dc23b1d94bb2d
|
/Tides and lunar data scrape code.R
|
c87a1777cb19fa7af5d91bb83773112e1d94b906
|
[] |
no_license
|
Plaladin/SnowyPlover_cycles
|
d50e0810c6fccfecb27e6319611e23151978b6dc
|
4d364577a93ab8ee8cb257408d657e431f8b40d1
|
refs/heads/master
| 2021-01-20T14:39:08.988677
| 2018-06-12T15:01:55
| 2018-06-12T15:01:55
| 90,640,121
| 0
| 0
| null | 2017-05-08T14:53:06
| 2017-05-08T14:53:06
| null |
UTF-8
|
R
| false
| false
| 11,200
|
r
|
Tides and lunar data scrape code.R
|
# Scrape tide data
library(rvest)
#2006-2007 (2008 must be loaded seperately)
tides_06_07 <- lapply(paste0("http://tides.mobilegeographics.com/calendar/year/3689.html?y=",2006:2007, "&m=4&d=1"),
function(url){
url %>% read_html() %>%
html_nodes("table") %>%
html_table()
})
tides_06_07_list <- do.call(rbind, tides_06_07)
tides_06_07_df <- do.call(rbind, tides_06_07_list)
# 2008
tides_08 <- read_html("http://tides.mobilegeographics.com/calendar/year/3689.html?y=2008&m=4&d=1")
tides_08_tbls <- html_nodes(tides_08, "table")
tides_08_list <- html_table(tides_08_tbls) # list of the tables
tides_08_df <- ldply(tides_08_list, data.frame) # convert this into a big dataframe
tides_08_df$Low.2 <- NULL
tides_08_df$High.3 <- NULL # these two extra coloums were the reason I had to scrape 08 seperatly
colnames(tides_08_df) <- colnames(tides_06_07_df) # some of the colnames didn't match (needed for rbind)
# 2009-2016
tides_09_16 <- lapply(paste0("http://tides.mobilegeographics.com/calendar/year/3689.html?y=",2009:2016, "&m=4&d=1"),
function(url){
url %>% read_html() %>%
html_nodes("table") %>%
html_table()
})
tides_09_16_list <- do.call(rbind, tides_09_16)
tides_09_16_df <- do.call(rbind, tides_09_16_list)
# 2006-2016 merged together
tides_all <- rbind( tides_06_07_df, tides_08_df, tides_09_16_df)
tides_all$date <- seq(as.Date("2006/1/1"), as.Date("2016/12/31"), "days")
tides_all$date <- format(tides_all$date, "%Y-%d-%m")
tides_all$date <- as.Date(tides_all$date, "%Y-%d-%m" )
tides_all$year <- format(tides_all$date, "%Y")
tides_all <- subset(tides_all, select = -c(Day,Sunrise,Sunset) )
tides_all <- tides_all[,c(8,7,6,1,2,3,4,5)]
colnames(tides_all) <- c("year","date","event","high_1","low_1","high_2","low_2","high_3")
tides_all$high_1 <- substring(tides_all$high_1,15,19)
tides_all$low_1 <- substring(tides_all$low_1,15,19)
tides_all$high_2 <- substring(tides_all$high_2,15,19)
tides_all$low_2 <- substring(tides_all$low_2,15,19)
tides_all$high_3 <- substring(tides_all$high_3,15,19)
tides_all$high_1 <- as.numeric(tides_all$high_1)*100
tides_all$high_2 <- as.numeric(tides_all$high_2)*100
tides_all$high_3 <- as.numeric(tides_all$high_3)*100
tides_all$low_1 <- as.numeric(tides_all$low_1)*100
tides_all$low_2 <- as.numeric(tides_all$low_2)*100
tides_all$event <- gsub('First Quarter', 'fq', tides_all$event)
tides_all$event <- gsub('Full Moon', 'fm', tides_all$event)
tides_all$event <- gsub('Last Quarter', 'lq', tides_all$event)
tides_all$event <- gsub('New Moon', 'nm', tides_all$event)
# I want to have the highest high tides on a given day
ht <- tides_all[,c(1,2,4,6,8)]
ht$m= pmax(ht$high_1,ht$high_2,ht$high_3, na.rm=TRUE) #compare those two and give me the max
ht$m = ht$m * 100
# add a coloum with the max tide height to tides_all
tides_all$max_tide_height <- ht$m
## Scrape the lunar data
# 2006-2016
moon_06 <- lapply(paste0("https://www.timeanddate.com/moon/mexico/mazatlan?month=",1:12,"&year=2006"),
function(url){
url %>% read_html() %>%
html_nodes("table") %>%
html_table()
})
moon_06_list <- do.call(rbind, moon_06)
moon_06_df <- do.call(rbind, moon_06_list)
moon_06_df <- moon_06_df[,c(1,8,11)]
colnames(moon_06_df) <- c("date","time","illumination")
dig <- c(1:31)
moon_06_df=moon_06_df[moon_06_df$date %in% dig,]
moon_06_df$date <- seq(as.Date("2006/1/1"), as.Date("2006/12/31"), "days")
moon_07 <- lapply(paste0("https://www.timeanddate.com/moon/mexico/mazatlan?month=",1:12,"&year=2007"),
function(url){
url %>% read_html() %>%
html_nodes("table") %>%
html_table()
})
moon_07_list <- do.call(rbind, moon_07)
moon_07_df <- do.call(rbind, moon_07_list)
moon_07_df <- moon_07_df[,c(1,8,11)]
colnames(moon_07_df) <- c("date","time","illumination")
dig <- c(1:31)
moon_07_df=moon_07_df[moon_07_df$date %in% dig,] # works, nice!
moon_07_df$date <- seq(as.Date("2007/1/1"), as.Date("2007/12/31"), "days")
moon_08 <- lapply(paste0("https://www.timeanddate.com/moon/mexico/mazatlan?month=",1:12,"&year=2008"),
function(url){
url %>% read_html() %>%
html_nodes("table") %>%
html_table()
})
moon_08_list <- do.call(rbind, moon_08)
moon_08_df <- do.call(rbind, moon_08_list)
moon_08_df <- moon_08_df[,c(1,8,11)]
colnames(moon_08_df) <- c("date","time","illumination")
dig <- c(1:31)
moon_08_df=moon_08_df[moon_08_df$date %in% dig,] # works, nice!
moon_08_df$date <- seq(as.Date("2008/1/1"), as.Date("2008/12/31"), "days")
moon_09 <- lapply(paste0("https://www.timeanddate.com/moon/mexico/mazatlan?month=",1:12,"&year=2009"),
function(url){
url %>% read_html() %>%
html_nodes("table") %>%
html_table()
})
moon_09_list <- do.call(rbind, moon_09)
moon_09_df <- do.call(rbind, moon_09_list)
moon_09_df <- moon_09_df[,c(1,8,11)]
colnames(moon_09_df) <- c("date","time","illumination")
dig <- c(1:31)
moon_09_df=moon_09_df[moon_09_df$date %in% dig,] # works, nice!
moon_09_df$date <- seq(as.Date("2009/1/1"), as.Date("2009/12/31"), "days")
View(moon_09_df)
moon_10 <- lapply(paste0("https://www.timeanddate.com/moon/mexico/mazatlan?month=",1:12,"&year=2010"),
function(url){
url %>% read_html() %>%
html_nodes("table") %>%
html_table()
})
moon_10_list <- do.call(rbind, moon_10)
moon_10_df <- do.call(rbind, moon_10_list)
moon_10_df <- moon_10_df[,c(1,8,11)]
colnames(moon_10_df) <- c("date","time","illumination")
dig <- c(1:31)
moon_10_df=moon_10_df[moon_10_df$date %in% dig,] # works, nice!
moon_10_df$date <- seq(as.Date("2010/1/1"), as.Date("2010/12/31"), "days")
View(moon_10_df)
moon_11 <- lapply(paste0("https://www.timeanddate.com/moon/mexico/mazatlan?month=",1:12,"&year=2011"),
function(url){
url %>% read_html() %>%
html_nodes("table") %>%
html_table()
})
moon_11_list <- do.call(rbind, moon_11)
moon_11_df <- do.call(rbind, moon_11_list)
moon_11_df <- moon_11_df[,c(1,8,11)]
colnames(moon_11_df) <- c("date","time","illumination")
dig <- c(1:31)
moon_11_df=moon_11_df[moon_11_df$date %in% dig,] # works, nice!
moon_11_df$date <- seq(as.Date("2011/1/1"), as.Date("2011/12/31"), "days")
View(moon_11_df)
moon_12 <- lapply(paste0("https://www.timeanddate.com/moon/mexico/mazatlan?month=",1:12,"&year=2012"),
function(url){
url %>% read_html() %>%
html_nodes("table") %>%
html_table()
})
moon_12_list <- do.call(rbind, moon_12)
moon_12_df <- do.call(rbind, moon_12_list)
moon_12_df <- moon_12_df[,c(1,8,11)]
colnames(moon_12_df) <- c("date","time","illumination")
dig <- c(1:31)
moon_12_df=moon_12_df[moon_12_df$date %in% dig,] # works, nice!
moon_12_df$date <- seq(as.Date("2012/1/1"), as.Date("2012/12/31"), "days")
View(moon_12_df)
moon_13 <- lapply(paste0("https://www.timeanddate.com/moon/mexico/mazatlan?month=",1:12,"&year=2013"),
function(url){
url %>% read_html() %>%
html_nodes("table") %>%
html_table()
})
moon_13_list <- do.call(rbind, moon_13)
moon_13_df <- do.call(rbind, moon_13_list)
moon_13_df <- moon_13_df[,c(1,8,11)]
colnames(moon_13_df) <- c("date","time","illumination")
dig <- c(1:31)
moon_13_df=moon_13_df[moon_13_df$date %in% dig,] # works, nice!
moon_13_df$date <- seq(as.Date("2013/1/1"), as.Date("2013/12/31"), "days")
View(moon_13_df)
moon_14 <- lapply(paste0("https://www.timeanddate.com/moon/mexico/mazatlan?month=",1:12,"&year=2014"),
function(url){
url %>% read_html() %>%
html_nodes("table") %>%
html_table()
})
moon_14_list <- do.call(rbind, moon_14)
moon_14_df <- do.call(rbind, moon_14_list)
moon_14_df <- moon_14_df[,c(1,8,11)]
colnames(moon_14_df) <- c("date","time","illumination")
dig <- c(1:31)
moon_14_df=moon_14_df[moon_14_df$date %in% dig,] # works, nice!
moon_14_df$date <- seq(as.Date("2014/1/1"), as.Date("2014/12/31"), "days")
View(moon_14_df)
moon_15 <- lapply(paste0("https://www.timeanddate.com/moon/mexico/mazatlan?month=",1:12,"&year=2015"),
function(url){
url %>% read_html() %>%
html_nodes("table") %>%
html_table()
})
moon_15_list <- do.call(rbind, moon_15)
moon_15_df <- do.call(rbind, moon_15_list)
moon_15_df <- moon_15_df[,c(1,8,11)]
colnames(moon_15_df) <- c("date","time","illumination")
dig <- c(1:31)
moon_15_df=moon_15_df[moon_15_df$date %in% dig,] # works, nice!
moon_15_df$date <- seq(as.Date("2015/1/1"), as.Date("2015/12/31"), "days")
View(moon_15_df)
moon_16 <- lapply(paste0("https://www.timeanddate.com/moon/mexico/mazatlan?month=",1:12,"&year=2016"),
function(url){
url %>% read_html() %>%
html_nodes("table") %>%
html_table()
})
moon_16_list <- do.call(rbind, moon_16)
moon_16_df <- do.call(rbind, moon_16_list)
moon_16_df <- moon_16_df[,c(1,8,11)]
colnames(moon_16_df) <- c("date","time","illumination")
dig <- c(1:31)
moon_16_df=moon_16_df[moon_16_df$date %in% dig,] # works, nice!
moon_16_df$date <- seq(as.Date("2016/1/1"), as.Date("2016/12/31"), "days")
View(moon_16_df)
#2006-2016 merged
moon_all <- rbind (moon_06_df, moon_07_df, moon_08_df , moon_09_df, moon_10_df, moon_11_df, moon_12_df, moon_13_df, moon_14_df, moon_15_df, moon_16_df)
moon_all$time <- gsub("Moon does not pass the meridian on this day.","NA",moon_all$time )
moon_all$illumination <- gsub("Moon does not pass the meridian on this day.","NA",moon_all$illumination )
## Interpolation for illumination at 12 pm
Sys.setenv(TZ="UTC")
moon_all$datetime<- as.POSIXct(paste(moon_all$date, moon_all$time), format="%Y-%m-%d %H:%M")
moon_all$illumination <- gsub("%","",moon_all$illumination)
moon_all$illumination <- gsub(",",".",moon_all$illumination)
moon_all$illumination <- as.numeric(moon_all$illumination)/100
f <- approxfun(moon_all$datetime,moon_all$illumination)
start <- as.POSIXct("2006-1-01 12:00:00", "%Y-%m-%d %H:%M:%S", tz="UTC")
end <- as.POSIXct("2016-12-31 12:00:00", "%Y-%m-%d %H:%M:%S", tz="UTC")
x <- seq(start, end, "days")
moon_all$noon <- x
moon_all$interpolated <- f(x)
moon_all$year <- format(moon_all$date, "%Y")
moon_all <- moon_all[,c(7,4,3,5,6)]
colnames(moon_all) <- c("year","meridian_passing","illumination_mp","noon","illumination_noon")
|
3fa3062766590ed6cf9215c9ab31588af97c30c4
|
169adce5d523299aaf1501d5cd3d45a64044c36e
|
/profile/man/write_rprof.Rd
|
63bc66a366a0519d3c42cad610ac1a085fd1b667
|
[] |
no_license
|
r-prof/_meta
|
2fd2d1872340496635d3cb4dd21618021637b9f7
|
c0d7ebe085f4e88cd72c11bfb05feded8db2df88
|
refs/heads/master
| 2021-09-01T04:37:25.881122
| 2017-12-24T21:30:59
| 2017-12-24T21:30:59
| 115,286,849
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 447
|
rd
|
write_rprof.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rprof-write.R
\name{write_rprof}
\alias{write_rprof}
\title{Write profiler data to an R profiler file}
\usage{
write_rprof(ds, path)
}
\arguments{
\item{ds}{Profiler data, see \code{\link[=validate_profile]{validate_profile()}}}
\item{path}{Target file name}
}
\description{
Use the profvis or proftools R packages to further analyze files created by
this function.
}
|
12fdfbef84fd4d2b36cfeae9c33870858a639c57
|
187fccafa2ac14ca45fadad0d2ca395e34e1b0f3
|
/Test file.R
|
fe39471b71737d1f3b9f8ef711dcc6f34a0347a3
|
[] |
no_license
|
ronmexico7811/Econ4670researchproject
|
82dedd449f602e543d93196c02b834fff6348b1f
|
e801b7f111f4ddf890ed3c517d9cdc6860da0f2d
|
refs/heads/master
| 2020-04-18T03:45:43.334354
| 2019-04-24T16:57:00
| 2019-04-24T16:57:00
| 167,212,276
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,484
|
r
|
Test file.R
|
library(readr)
shelter_skim <- read_csv("Econ4670researchproject/shelter_skim.csv")
shelter_skim$TotalEnrollments = NULL
shelter_return <- subset(shelter_skim , StayedInShelterMoreThanOnce == 1)
shelter_no_return <- subset(shelter_skim , StayedInShelterMoreThanOnce == 0)
shelter_no <- shelter_no_return[1:328,]
shelter_sample <- merge(shelter_no , shelter_return , all = TRUE)
summary(shelter_sample)
shelter_random <- shelter_sample[sample(nrow(shelter_sample)),]
#Import Library
library(e1071) #Contains the SVM
Train <- shelter_random[1:460,]
Test <- shelter_random[461:656,]
#model linear
model_l <- svm(StayedInShelterMoreThanOnce ~.,data=Train, kernel = "linear", type = "C-classification", gamma=0.2, cost=100)
summary(model_l)
#Predict Output
preds_l <- predict(model_l,Test)
table(preds_l, Test$StayedInShelterMoreThanOnce)
#model radial
model_r <- svm(StayedInShelterMoreThanOnce ~.,data=Train, kernel = "radial", type = "C-classification", gamma=0.2, cost=100)
summary(model_r)
#Predict Output
preds_r <- predict(model_r,Test)
table(preds_r, Test$StayedInShelterMoreThanOnce)
#model polynomial
model_p <- svm(StayedInShelterMoreThanOnce ~.,data=Train, kernel = "polynomial", type = "C-classification", gamma=0.2, cost=100)
summary(model_p)
#Predict Output
preds_p <- predict(model_p,Test)
table(preds_p, Test$StayedInShelterMoreThanOnce)
count <- length(unique(shelter_skim$StayedInShelterMoreThanOnce))
count
model_l$coefs
#plot(model_l , shelter_random , )
|
abe08a9a53ac2aa83692e7106e1c42aa896c817d
|
7e1b2b59a21d58ed8058df89a7b474b9bb4f3731
|
/data/youtube datacleaner.R
|
0a5a4e22f0f9aa72ed19e9227edc45fb5a3bc6ba
|
[] |
no_license
|
yaowser/YoutubeTrending
|
6c1fcec727bd7dad00b47548d90353a10853b192
|
e85fa2355fe7531a56d14a79606faa08cd30b09a
|
refs/heads/master
| 2021-04-15T15:03:05.208470
| 2018-04-26T12:31:40
| 2018-04-26T12:31:40
| 126,577,169
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,268
|
r
|
youtube datacleaner.R
|
cat("\014")
options(warn=1)
require(survey)
require(dplyr)
require(lattice)
#read in, remove columns, from https://www.kaggle.com/datasnaek/youtube-new/data
setwd("C:/Users/Yao/Desktop/you")
youtubeRawUS <- read.csv(file="USvideos.csv", header=TRUE, sep=",")
youtubeRawUS$country <- rep("US",nrow(youtubeRawUS))
youtubeRawUS <- youtubeRawUS[-c(3:4,7,12:16)]
youtubeRawCA <- read.csv(file="CAvideos.csv", header=TRUE, sep=",")
youtubeRawCA$country <- rep("CA",nrow(youtubeRawCA))
youtubeRawCA <- youtubeRawCA[-c(3:4,7,12:16)]
youtubeRawDE <- read.csv(file="DEvideos.csv", header=TRUE, sep=",")
youtubeRawDE$country <- rep("DE",nrow(youtubeRawDE))
youtubeRawDE <- youtubeRawDE[-c(3:4,7,12:16)]
youtubeRawFR <- read.csv(file="FRvideos.csv", header=TRUE, sep=",")
youtubeRawFR$country <- rep("FR",nrow(youtubeRawFR))
youtubeRawFR <- youtubeRawFR[-c(3:4,7,12:16)]
youtubeRawGB <- read.csv(file="GBvideos.csv", header=TRUE, sep=",")
youtubeRawGB$country <- rep("GB",nrow(youtubeRawGB))
youtubeRawGB <- youtubeRawGB[-c(3:4,7,12:16)]
youtubeRaw<- rbind(youtubeRawUS, youtubeRawCA)
youtubeRaw<- rbind(youtubeRaw, youtubeRawDE)
youtubeRaw<- rbind(youtubeRaw, youtubeRawFR)
youtubeRaw<- rbind(youtubeRaw, youtubeRawGB)
head(youtubeRaw)
youtubeRaw2 <- youtubeRaw
#remove duplicates because they can be trending in multiple months, keep the least views to get on trending
youtubeRaw2 = youtubeRaw2[order(youtubeRaw2[,'video_id'],youtubeRaw2[,'views']),]
youtubeRaw2 = youtubeRaw2[!duplicated(youtubeRaw2$video_id),]
head(youtubeRaw2)
write.csv(youtubeRaw2, file = "youtubeRaw2.csv")
#replace strata categories into real names
youtubeRaw2$category_id2[youtubeRaw2$category_id == '1'] <- 'Film & Animation'
youtubeRaw2$category_id2[youtubeRaw2$category_id == '2'] <- 'Autos & Vehicles'
youtubeRaw2$category_id2[youtubeRaw2$category_id == '10'] <- 'Music'
youtubeRaw2$category_id2[youtubeRaw2$category_id == '15'] <- 'Pets & Animals'
youtubeRaw2$category_id2[youtubeRaw2$category_id == '17'] <- 'Sports'
youtubeRaw2$category_id2[youtubeRaw2$category_id == '19'] <- 'Travel & Events'
youtubeRaw2$category_id2[youtubeRaw2$category_id == '20'] <- 'Gaming'
youtubeRaw2$category_id2[youtubeRaw2$category_id == '22'] <- 'People & Blogs'
youtubeRaw2$category_id2[youtubeRaw2$category_id == '23'] <- 'Comedy'
youtubeRaw2$category_id2[youtubeRaw2$category_id == '24'] <- 'Entertainment'
youtubeRaw2$category_id2[youtubeRaw2$category_id == '25'] <- 'News & Politics'
youtubeRaw2$category_id2[youtubeRaw2$category_id == '26'] <- 'Howto & Style'
youtubeRaw2$category_id2[youtubeRaw2$category_id == '27'] <- 'Education'
youtubeRaw2$category_id2[youtubeRaw2$category_id == '28'] <- 'Science & Technology'
youtubeRaw2$category_id2[youtubeRaw2$category_id == '29'] <- 'Nonprofits & Activism'
youtubeRaw2$category_id2[youtubeRaw2$category_id == '43'] <- 'Science & Technology'
youtubeRaw2$category_id[youtubeRaw2$category_id == '43'] <- '28'
#sanity check, remove column names
head(youtubeRaw2)
rownames(youtubeRaw2) <- c()
youtubeRaw3 <- youtubeRaw2[order(youtubeRaw2$category_id2),]
head(youtubeRaw3)
write.csv(youtubeRaw3, file = "youtubeRaw3.csv", row.names=FALSE)
#use sas for more youtubeRaw3.csv dataset stats
#check initial distribution and number of rows
boxplot(youtubeRaw3$views, main="Uncleaned Boxplot Distribution of Videos Views", xlab="Trending Videos", ylab="Number of Views")
nrow(youtubeRaw3)
bwplot(views ~ category_id2 , data = youtubeRaw3, scales=list(x=list(rot=45)), main="Uncleaned Boxplot Distribution of Videos Views", xlab="Trending Videos Categories", ylab="Number of Views")
#solve for view count without cleaning data
sum(as.numeric(youtubeRaw3$views))
max(youtubeRaw3$views)
min(youtubeRaw3$views)
mean(youtubeRaw3$views)
#remove outliers more than 1.5 quant, save into new clean dataset
remove_outliers <- function(x, na.rm = TRUE, ...) {
qnt <- quantile(x, probs=c(.25, .75), na.rm = na.rm, ...)
H <- 1.5 * IQR(x, na.rm = na.rm)
y <- x
y[x < (qnt[1] - H)] <- NA
y[x > (qnt[2] + H)] <- NA
y
}
youtubeClean <- youtubeRaw3
youtubeClean$views <- remove_outliers(youtubeRaw3$views)
#check new distribution, only keep data within distribution
boxplot(youtubeClean$views, main="Cleaned Boxplot Distribution of Videos Views", xlab="Trending Videos", ylab="Number of Views")
bwplot(views ~ category_id2 , data = youtubeClean, scales=list(x=list(rot=45)), main="Cleaned Boxplot Distribution of Videos Views", xlab="Trending Videos Categories", ylab="Number of Views")
youtubeClean2 <- youtubeClean[complete.cases(youtubeClean), ]
write.csv(youtubeClean2, file = "youtubeClean2.csv", row.names=FALSE)
#solve for actual target average view count and number of rows for strata
sum(as.numeric(youtubeClean2$views))
nrow(youtubeClean2)
max(youtubeClean2$views)
min(youtubeClean2$views)
mean(youtubeClean2$views)
sd(youtubeClean2$views)
#average amount of views for the outliers removed
(sum(as.numeric(youtubeRaw3$views)) - sum(as.numeric(youtubeClean2$views))) / (nrow(youtubeRaw3) - nrow(youtubeClean2))
max(youtubeRaw3$views)-max(youtubeClean2$views)
min(youtubeRaw3$views)-min(youtubeClean2$views)
mean(youtubeRaw3$views)-mean(youtubeClean2$views)
nrow(youtubeClean2)/nrow(youtubeRaw3)
#use sas for more youtubeClean2.csv dataset stats
#how do we choose MOE? currently, MOE = 5000 views
#do we remove outliers per strata or for the whole dataset? currently, we remove for whole dataset
#after removing outliers, 88% of the dataset is kept...do we ignore fpc adjustment? currently we ignore b/c more than 10%
n0srs <- ceiling((1.96^2*sd(youtubeClean2$views)^2)/(5000^2))
n0srs
#SRS function
SrsMeanEstimate<-function(Seed, SampSize, printOutput= TRUE){
set.seed(Seed)
youtubeClean2.SRSSampled = sample_n(youtubeClean2,SampSize)
if(printOutput == TRUE){
print(nrow(youtubeClean2.SRSSampled))
print(bwplot(views ~ category_id2, data = youtubeClean2.SRSSampled, scales=list(x=list(rot=45)), main="SRS Boxplot Distribution of Videos Views", xlab="Trending Videos Categories", ylab="Number of Views"))
}
mydesign <- svydesign(id = ~1, data = youtubeClean2.SRSSampled)
srsMean = svymean(~views, design = mydesign)
srsSE = SE(srsMean)
srsCI = confint(srsMean)
rm(youtubeClean2.SRSSampled)
rm(mydesign)
return(list(as.numeric(srsMean[1]),
as.numeric(srsSE),
as.numeric(srsCI[1]),
as.numeric(srsCI[2])
)
)
}
srsMean <- SrsMeanEstimate(n0srs, n0srs)
print(paste('The Mean Estimate =', srsMean[[1]]))
print(paste('The Standard Error =', srsMean[[2]]))
mean(youtubeClean2$views)
#Proportional Strata
PropMeanEstimate<-function(Seed, SampSize, printOutput= TRUE){
set.seed(Seed)
# Identify Frequency of category_id2 Stratum
PropFreq <- as.data.frame(table(youtubeClean2[,c("category_id2")]))
names(PropFreq)[1] = 'category_id2'
PropFreq
PropFreq$N = nrow(youtubeClean2)
PropFreq$p = PropFreq$Freq/PropFreq$N
PropFreq$SampSizeh = (PropFreq$p * SampSize)
PropFreq$SampSizehRounded = round(PropFreq$SampSizeh)
youtubeClean2.PropSampled <- NULL
for (i in 1:nrow(PropFreq)){
youtubeClean2.PropSampled<-rbind(youtubeClean2.PropSampled,
sample_n(youtubeClean2[(youtubeClean2$category_id2 == PropFreq[i,"category_id2"]),]
,PropFreq[i,"SampSizehRounded"]))
}
if(printOutput == TRUE){
print(PropFreq)
print(nrow(youtubeClean2.PropSampled))
print(bwplot(views ~ category_id2, data = youtubeClean2.PropSampled, scales=list(x=list(rot=45)), main="Prop Boxplot Distribution of Videos Views", xlab="Trending Videos Categories", ylab="Number of Views"))
}
mydesign <- svydesign(id = ~1, strata = ~category_id2, data = youtubeClean2.PropSampled)
propMean = svymean(~views, design = mydesign)
propSE = SE(propMean)
propCI = confint(propMean)
rm(youtubeClean2.PropSampled)
rm(mydesign)
propCI = confint(propMean)
return(list(as.numeric(propMean[1]),
as.numeric(propSE),
as.numeric(propCI[1]),
as.numeric(propCI[2])
)
)
}
#adjusting the sample size calculation?
propMean <- PropMeanEstimate(n0srs, n0srs)
print(paste('The Mean Estimate =', propMean[[1]]))
print(paste('The Standard Error =', propMean[[2]]))
mean(youtubeClean2$views)
#deff = se_complex/se_srs
deffProp = as.numeric(propMean[[2]]/srsMean[[2]])
deffProp
n0prop = ceiling(n0srs*deffProp)
n0prop
#prop adjusted for deff
propMean <- PropMeanEstimate(n0srs, n0prop)
print(paste('The Mean Estimate =', propMean[[1]]))
print(paste('The Standard Error =', propMean[[2]]))
#task 2
SeedList <- c(10000, 20000, 30000, 40000, 50000)
df<- NULL
#SRS Seed Executions
for (seed in SeedList){
srsEstimate <- SrsMeanEstimate(seed, n0srs, FALSE)
srsEstimate <- data.frame('SRS', seed, srsEstimate)
names(srsEstimate) <- c("EstimateType","SeedValue", "MeanEstimate", "SE", "LowerCI", "UpperCI")
df<- rbind(df,srsEstimate)
}
#Prop Seed Executions
for (seed in SeedList){
PropEstimate <- PropMeanEstimate(seed, n0srs, FALSE)
PropEstimate <- data.frame('Prop', seed, PropEstimate)
names(PropEstimate) <- c("EstimateType","SeedValue", "MeanEstimate", "SE", "LowerCI", "UpperCI")
df<- rbind(df,PropEstimate)
}
#Prop Seed Executions
for (seed in SeedList){
PropEstimate <- PropMeanEstimate(seed, n0prop, FALSE)
PropEstimate <- data.frame('Prop DE', seed, PropEstimate)
names(PropEstimate) <- c("EstimateType","SeedValue", "MeanEstimate", "SE", "LowerCI", "UpperCI")
df<- rbind(df,PropEstimate)
}
#Add True Mean Value, in-line with estimates
df$TrueMeanValue <- mean(youtubeClean2$views)
#Add Bool Value for whether the Conf Limit contains the True Mean Value
df$WithinConfLimit <- df$LowerCI <= df$TrueMeanValue & df$UpperCI >= df$TrueMeanValue
#Print Results
print(df)
winner = aggregate(df[, 3:7], list(df$EstimateType), mean)
winner
#Prop wins slightly
#What is the percentage that the actual value is in the 95% confidence intervals for each design?
#abs(94384.50-93891.7)/2565.645 = 19.20734%
#how do you phrase it? true value is within 19.20734% of standard error for SRS estimation?
winner$PercentFromTrueMean <- abs(winner$TrueMeanValue - winner$MeanEstimate)/winner$SE*100
print(winner)
|
ffbe1a8b6a3123d5e974de0d42554f0876b2ce84
|
39986b2417af8bcdd6d64cc1441de82a7ae47b59
|
/man/fit_nodk.Rd
|
6818bafd337aef0c04831067254374c94fd53c07
|
[
"MIT"
] |
permissive
|
cran/guess
|
3c5a9222e67fb1c32590e807f95a89796acd1589
|
6a4ead9edc2d00ed4d7259f2f799dfc3b0fb343d
|
refs/heads/master
| 2016-08-11T15:21:04.016983
| 2016-02-08T23:44:06
| 2016-02-08T23:44:06
| 54,415,144
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 877
|
rd
|
fit_nodk.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fit_nodk.R
\name{fit_nodk}
\alias{fit_nodk}
\title{Goodness of fit statistics for data without don't know}
\usage{
fit_nodk(pre_test, pst_test, g, est.param)
}
\arguments{
\item{pre_test}{data.frame carrying pre_test items}
\item{pst_test}{data.frame carrying pst_test items}
\item{g}{estimates of \eqn{\gamma} produced from \code{\link{guesstimate}}}
\item{est.param}{estimated parameters produced from \code{\link{guesstimate}}}
}
\value{
matrix with two rows: top row carrying chi-square value, and bottom row probability of observing that value
}
\description{
For data without Don't Know, chi-square goodness of fit between true and model based multivariate distribution
}
\details{
fit_nodk
}
\examples{
\dontrun{fit_nodk(pre_test, pst_test, g, est.param)}
}
|
2f8e129f9331c1c7072bdfa7e2c728a76259080c
|
ee90b400fc8d344c576198d1e58eafac51e5dc90
|
/code/Exercicio8.R
|
444c99e95a002159bbe0becd42ff751d8437d2a2
|
[] |
no_license
|
lucasfernog/data-science-exercises
|
c2a944fd98d31c818188c4fa77341160a6687ab5
|
b07bde440561980d2c3383e83045ee163eaf3ca6
|
refs/heads/master
| 2020-03-31T14:32:16.507332
| 2018-10-09T18:17:20
| 2018-10-09T18:17:20
| 152,299,157
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 374
|
r
|
Exercicio8.R
|
install.packages("gridExtra")
library(xlsx)
library(gridExtra)
library(grid)
ex8 <- read.xlsx("data/exercicio8.xls", sheetName = "Plan1")
ex8
tabela <- table(ex8$Altura.dos.pacientes)
tabela
barplot(tabela, ylab = "Frequencia", ylim = c(0,3), main = "'Distribuicao de frequencia'")
hist(ex8$Altura.dos.pacientes, main = "Histograma Ex 8", xlab = "Altura dos Pacientes")
|
9fbde8cbe3b49becd6866607959386d9074d5014
|
fe94391f87c4a5726cf7375155139edfd7b8fe1b
|
/dplyr_code.R
|
c53d1eda7a9280d687049df475d5ef62cb36c7b1
|
[] |
no_license
|
aofcrazy/bootcamp-data-science-learning
|
497f75cefe434d75be4087e38f673dbddb89430f
|
3aa9404bd58349eee8c21e2e5e189e9a69bb5e66
|
refs/heads/master
| 2023-05-02T13:56:43.288869
| 2021-05-20T15:26:34
| 2021-05-20T15:26:34
| 369,093,776
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,562
|
r
|
dplyr_code.R
|
## Data tranformation
library(tidyverse)
library(readxl)
# read data into R
student <- read_excel("scholarships.xlsx", 1)
address <- read_excel("scholarships.xlsx",2)
scholarships <- read_excel("scholarships.xlsx",3)
## VLOOKUP (Spreadsheet) == left_join (R)
## Mutating join (left, inner, right, full)
## data pipeline (tranformation)
student %>%
left_join(address, by = "id") %>%
inner_join(scholarships, by = "id")
## Filtering JOin (ani_join & semi_join)
## anti join - select student dont qualify for scholarships
student %>%
left_join(address, by = "id") %>%
anti_join(scholarships, by = "id")
student %>%
left_join(address, by = "id") %>%
semi_join(scholarships, by = "id")
## Review dplyr
mtcars <- as_tibble(mtcars)
mtcars %>%
select(milePerGallon = mpg,
horsePower = hp, wt, am) %>%
filter(horsePower > 200) %>%
mutate(milePerGallon = milePerGallon + 1,
am = if_else(am == 0, "Auto", "Manual")) %>%
arrange(am, desc(horsePower)) %>%
summarise(avg_hp = mean(horsePower),
sd_hp = sd(horsePower),
n = n())
## Group By + Summarise
mtcars %>%
mutate(am = if_else(am == 0, "Auto", "Manual")) %>%
group_by(am) %>%
summarise(avg_hp = mean(hp),
sd_hp = sd(hp),
n = n())
#join table
library(nycflights13)
result <- flights %>%
filter(month == 9 & day == 9) %>%
count(carrier) %>%
arrange(desc(n)) %>%
left_join(airlines, by = "carrier") %>%
rename(carrier_name = name)
#write/ export csv file
write_csv(result, "learn-sql/nyc_summary.csv", )
# data wrangling 101
mtcars
head(mtcars)
tail(mtcars)
summary(mtcars)
# dplyr
## select columns you want
mtcars %>%
select(mpg, hp ,wt) %>%
head(10)
mtcars %>%
select(mpg, 3, 5, am)
mtcars %>%
select( starts_with("a"))
mtcars %>%
select(contains("w"))
# rename columns
m <- mtcars %>%
select(milePerGallon = mpg,
housePower = hp,
weight = wt) %>%
head(10)
# filter
mtcars %>%
select(milePerGallon = mpg,
horsePower = hp,
weight = wt) %>%
filter(horsePower < 100 & weight < 2) # AND
mtcars %>%
select(milePerGallon = mpg,
horsePower = hp,
weight = wt) %>%
filter(horsePower < 100 | weight < 2) # OR
mtcars %>%
select(milePerGallon = mpg,
horsePower = hp,
weight = wt,
transmission = am) %>%
filter(transmission != 0)
# rownames to column
mtcars <- mtcars %>%
rownames_to_column() %>%
rename(model = rowname) %>%
tibble()
# arrange (sort data)
mtcars %>%
select(mpg, hp, wt) %>%
arrange(hp) # asc
mtcars %>%
select(mpg, hp, wt) %>%
arrange(desc(hp)) # desc
# mutate create new column
mtcars %>%
select(mpg, hp, wt, am) %>%
mutate(hp_edit = hp + 5,
wt_double = wt * 2,
am = if_else(am == 0, "Auto", "Manual")) %>%
filter(am == "Auto")
# summarise data
mtcars %>%
select(mpg, am) %>%
mutate(am = if_else(am == 0, "Auto", "Manual")) %>%
group_by(am) %>%
summarise(avg_mpg = mean(mpg),
sum_mpg = sum(mpg),
sd_mpg = sd(mpg),
min_mpg = min(mpg),
max_mpg = max(mpg))
library(skimr)
mtcars <- mtcars %>%
mutate(am = if_else(am == 0, "Auto", "Manual"))
View(mtcars)
mtcars %>%
group_by(am) %>%
skim()
mtcars %>%
filter(hp < 150) %>%
select(mpg, hp, wt, am) %>%
group_by(am) %>%
skim()
|
c5391e21e38babcdadb5cb8a18ad744e8eac4718
|
0d054649ad79bad9c5ecfb467a1afa4fd12a0a12
|
/man/rpart_labels.Rd
|
9a0d373a8ce371928b378506a78ff38ea7e14793
|
[] |
no_license
|
joey711/ggdendro
|
101dee4ea46d418e58a59db7b8d3f5e24cfb12c1
|
f18ca86f9370895256e74271b3f74107539747ce
|
refs/heads/master
| 2020-12-25T06:02:54.945578
| 2012-02-02T15:09:40
| 2012-02-02T15:09:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 943
|
rd
|
rpart_labels.Rd
|
\name{rpart_labels}
\alias{rpart_labels}
\title{Extract labels data frame from rpart object for plotting using ggplot.}
\usage{
rpart_labels(model, splits = TRUE, label, FUN = text,
all = FALSE, pretty = NULL,
digits = getOption("digits") - 3, use.n = FALSE,
fancy = FALSE, fwidth = 0.8, fheight = 0.8, ...)
}
\arguments{
\item{model}{object of class "rpart", e.g. the output of
rpart()}
\item{...}{ignored}
}
\value{
a list with two elements: $labels and $leaf_labels
}
\description{
Extract labels data frame from rpart object for plotting
using ggplot.
}
\seealso{
\code{\link{ggdendrogram}}
Other dendro_data methods:
\code{\link{dendro_data.dendrogram}},
\code{\link{dendro_data.rpart}},
\code{\link{dendro_data.tree}},
\code{\link{dendrogram_data}}
Other rpart functions: \code{\link{dendro_data.rpart}},
\code{\link{rpart_segments}}
}
\keyword{internal}
|
b20afabc0f2ce722bbcd9593e09e0e85968d1c07
|
cebf3c6700ff85f87c61de6d7f882880315eddd2
|
/man/kernelFactory.Rd
|
da9aad984ec2226e97929e6b9bef4131d8a9c853
|
[] |
no_license
|
wrathematics/kernelFactory
|
539c3ae50949a6e42ecb595c029055e125b5ed83
|
425303ac7de92ddbc6270c2fa88150bc7aa5b28d
|
refs/heads/master
| 2021-01-10T01:54:09.944169
| 2015-11-11T17:14:00
| 2015-11-11T17:14:00
| 45,994,174
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,700
|
rd
|
kernelFactory.Rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/kernelFactory.R
\name{kernelFactory}
\alias{kernelFactory}
\title{Binary classification with Kernel Factory}
\usage{
kernelFactory(x = NULL, y = NULL, cp = 1, rp = round(log(nrow(x), 10)),
method = "burn", ntree = 500, filter = 0.01, popSize = rp * cp * 7,
iters = 80, mutationChance = 1/(rp * cp), elitism = max(1, round((rp *
cp) * 0.05)), oversample = TRUE)
}
\arguments{
\item{x}{A data frame of predictors (numeric, integer or factor). Categorical variables need to be factors. Indicator values should not be too imbalanced because this might produce constants in the subsetting process.}
\item{y}{A factor containing the response vector. Only \{0,1\} is allowed.}
\item{cp}{The number of column partitions.}
\item{rp}{The number of row partitions.}
\item{method}{Can be one of the following: POLynomial kernel function (\code{pol}), LINear kernel function (\code{lin}), Radial Basis kernel Function \code{rbf}), random choice (random={pol, lin, rbf}) (\code{random}), burn- in choice of best function (burn={pol, lin, rbf }) (\code{burn}). Use \code{random} or \code{burn} if you don't know in advance which kernel function is best.}
\item{ntree}{Number of trees in the Random Forest base classifiers.}
\item{filter}{either NULL (deactivate) or a percentage denoting the minimum class size of dummy predictors. This parameter is used to remove near constants. For example if nrow(xTRAIN)=100, and filter=0.01 then all dummy predictors with any class size equal to 1 will be removed. Set this higher (e.g., 0.05 or 0.10) in case of errors.}
\item{popSize}{Population size of the genetic algorithm.}
\item{iters}{Number of generations of the genetic algorithm.}
\item{mutationChance}{Mutationchance of the genetic algorithm.}
\item{elitism}{Elitism parameter of the genetic algorithm.}
\item{oversample}{Oversample the smallest class. This helps avoid problems related to the subsetting procedure (e.g., if rp is too high).}
}
\value{
An object of class \code{kernelFactory}, which is a list with the following elements:
\item{trn}{Training data set.}
\item{trnlst}{List of training partitions.}
\item{rbfstre}{List of used kernel functions.}
\item{rbfmtrX}{List of augmented kernel matrices.}
\item{rsltsKF}{List of models.}
\item{cpr}{Number of column partitions.}
\item{rpr}{Number of row partitions.}
\item{cntr}{Number of partitions.}
\item{wghts}{Weights of the ensemble members.}
\item{nmDtrn}{Vector indicating the numeric (and integer) features.}
\item{rngs}{Ranges of numeric predictors.}
\item{constants}{To exclude from newdata.}
}
\description{
\code{kernelFactory} implements an ensemble method for kernel machines (Ballings and Van den Poel, 2013).
}
\examples{
#Credit Approval data available at UCI Machine Learning Repository
data(Credit)
#take subset (for the purpose of a quick example) and train and test
Credit <- Credit[1:100,]
train.ind <- sample(nrow(Credit),round(0.5*nrow(Credit)))
#Train Kernel Factory on training data
kFmodel <- kernelFactory(x=Credit[train.ind,names(Credit)!= "Response"],
y=Credit[train.ind,"Response"], method=random)
#Deploy Kernel Factory to predict response for test data
#predictedresponse <- predict(kFmodel, newdata=Credit[-train.ind,names(Credit)!= "Response"])
}
\author{
Authors: Michel Ballings and Dirk Van den Poel, Maintainer: \email{Michel.Ballings@GMail.com}
}
\references{
Ballings, M. and Van den Poel, D. (2013), Kernel Factory: An Ensemble of Kernel Machines. Expert Systems With Applications, 40(8), 2904-2913.
}
\seealso{
\code{\link{predict.kernelFactory}}
}
\keyword{classification}
|
0a4834d5a769493ba053acbd44c185f22aa7d662
|
338cfd3efe0cc943d2e6b58becf7432ced163ab2
|
/01R language in action/ch6Data_IO/i0inner_dataset.R
|
5f2d9c997b904a61d3da4d24fe452be05f79c0ce
|
[] |
no_license
|
greatabel/RStudy
|
e1b82574f1a2f1c3b00b12d21f2a50b65386b0db
|
47646c73a51ec9642ade8774c60f5b1b950e2521
|
refs/heads/master
| 2023-08-20T17:07:34.952572
| 2023-08-07T13:22:04
| 2023-08-07T13:22:04
| 112,172,144
| 6
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 331
|
r
|
i0inner_dataset.R
|
data(geyser, package = "MASS")
data = read.table("i0car.txt", header=TRUE, quote="\"")
data[1:2,]
library(crayon)
cat(red$bold$bgGreen("mode(data) is "))
mode(data)
cat(blue$bold$bgGreen("names(data) is "))
names(data)
cat(yellow$bold$bgGreen("dim(data) is "))
dim(data)
cat(red$bold$bgGreen("data$lp100km is "))
data$lp100km
|
ec75e9bd2e8285a60051bddaa9fad3840e350559
|
9aafde089eb3d8bba05aec912e61fbd9fb84bd49
|
/codeml_files/newick_trees_processed/13157_0/rinput.R
|
ba66e0eb16a21dfcfdce5d7503c515eef5b45130
|
[] |
no_license
|
DaniBoo/cyanobacteria_project
|
6a816bb0ccf285842b61bfd3612c176f5877a1fb
|
be08ff723284b0c38f9c758d3e250c664bbfbf3b
|
refs/heads/master
| 2021-01-25T05:28:00.686474
| 2013-03-23T15:09:39
| 2013-03-23T15:09:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 137
|
r
|
rinput.R
|
library(ape)
testtree <- read.tree("13157_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="13157_0_unrooted.txt")
|
6b680a03b6c166e09c1d04c4f76acb0c4765d4ce
|
0b0e39a4f7fe32aa9a3988d286a3c3a393b218bf
|
/inst/script/requiredLibraries.R
|
69ae273de3c0fa901d480422ec73b841e14095e4
|
[] |
no_license
|
Manuelaio/uncoverappLib
|
e3b95b6419f23b17c4babdfa371c697577b5cc07
|
66df2cbf2bc0637c3bcc0ce20c5dfea9ae83d495
|
refs/heads/master
| 2023-03-04T02:37:02.533719
| 2023-02-13T10:00:41
| 2023-02-13T10:00:41
| 254,597,958
| 4
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 535
|
r
|
requiredLibraries.R
|
require(shiny)
require(shinyWidgets)
require(shinyBS)
require(shinyjs)
#required libraries
require(markdown)
require(DT)
require(dplyr)
require(Gviz)
require(Homo.sapiens)
require(OrganismDbi)
require(stringr)
require(condformat)
require(shinyjs)
require(bedr)
require(rlist)
require(Rsamtools)
require(TxDb.Hsapiens.UCSC.hg19.knownGene)
require(TxDb.Hsapiens.UCSC.hg38.knownGene)
#require(BSgenome.Hsapiens.UCSC.hg19)
#require(BSgenome.Hsapiens.UCSC.hg38)
require(EnsDb.Hsapiens.v75)
require(EnsDb.Hsapiens.v86)
require(org.Hs.eg.db)
|
2c6f2e2058934116c414d1829c7d806f297b3230
|
e3c744f368446d33a0836289193c7c2badf39a81
|
/plot1.R
|
b67e003b8e4fd665be306a3a47a1d26c2570033e
|
[] |
no_license
|
mbontrager/ExData_Plotting1
|
bb438676006220b19666c20721575d0622565f40
|
d0a1d3a6cdd67b735484633d94a491a9da0053cb
|
refs/heads/master
| 2021-01-22T15:12:21.274470
| 2015-06-06T14:44:09
| 2015-06-06T14:44:09
| 36,982,940
| 0
| 0
| null | 2015-06-06T14:27:58
| 2015-06-06T14:27:58
| null |
UTF-8
|
R
| false
| false
| 791
|
r
|
plot1.R
|
# Martin Bontrager
# Exploratory Data Analysis
# Project 1 - Plot1
library(data.table)
# Read power consumption data and subset to only two days: 1-2 Feb 2007
f <- "data/household_power_consumption.txt"
DT <- fread(f, sep = ";", na.strings = "?", stringsAsFactors = FALSE,
header = TRUE)
DT$Date <- as.Date(DT$Date,format = "%d/%m/%Y")
date1 <- as.Date("2007-02-01")
date2 <- as.Date("2007-02-02")
DT2 <- subset(DT, subset = (DT$Date >= date1 & DT$Date <= date2))
# Generate a histogram of Global Active Power for these dates
png(filename = "plot1.png", width = 480, height = 480)
DT2$Global_active_power <- as.numeric(DT2$Global_active_power)
hist(DT2$Global_active_power, main = "Global Active Power", col = "red",
xlab = "Global Active Power (kilowatts)")
dev.off()
|
28026a3327f13997cac42f9411cc6a29c3ab8a63
|
9cf1abc8ce339d07859eaa12d6143382bee0431a
|
/TPL_QUERY.R
|
07ccc4f4e5db7af4eaec2cf0254b153141b69c16
|
[] |
no_license
|
ccsosa/GIS_ANALYSIS
|
043e4d2d8fc76a8c50ea8e174914bf1b5612a2eb
|
f82a0b75ef67478d058216c84ad3ae78fc83cd59
|
refs/heads/master
| 2021-06-21T06:24:42.182397
| 2017-05-10T22:59:53
| 2017-05-10T22:59:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 178
|
r
|
TPL_QUERY.R
|
require(tpl)
require(tpldata)
t<-read.table("clipboard",header=F,sep="\t")
tpl2<-tpl.get(t[,1])
write.table(tpl2,"D:/CWR/TPL_GBIF_9743.csv",sep="^",quote = F,row.names = F)
|
df00f9500b8e035cbfee903d8e1c278df7dc4ebf
|
5ea830cfee38cc02964e2bf8d787824ab9275b26
|
/ConvolutionalNeuralNetwork.R
|
c78a8b9319a351f91d2949968931b123b0c0a0e5
|
[] |
no_license
|
rahul494/Environment-Image-Recognition
|
e01268b50b8d75dd7b8261e9385c9b2981d75d87
|
4006f75a8483bfd9f0eb0bd1ac92bef967de0e33
|
refs/heads/master
| 2021-02-16T08:46:11.164011
| 2020-05-12T23:29:01
| 2020-05-12T23:29:01
| 244,985,789
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,648
|
r
|
ConvolutionalNeuralNetwork.R
|
#######################################################################
## Project: Image Processing Methods for Environment Classification
## Script purpose: Analyze and classify photos via Convolutional Neural Network
## Date: 2020-03-28
## Author: Rahul Sharma
#######################################################################
library(keras)
library(EBImage)
# Read metadata
pm <- read.csv("C:\\Users\\Rahul\\Downloads\\photoMetaData.csv")
y <- as.numeric(pm$category == "outdoor-day")
# Read Images
pics <- list()
for (i in 1:800){
pics[[i]] <- readImage(paste0("C:\\Users\\Rahul\\Downloads\\columbiaImages\\", pm$name[i]))
pics[[i]] <- resize(pics[[i]], 128, 128)
}
# Split indexes such that 80% of the data goes towards training, and remaining pictures for test
train_index <- sample(1:length(pics), 0.8 * length(pics))
test_index <- setdiff(1:length(pics), train_index)
# Split our picture data into test/train sets
x.train <- pics[c(train_index)]
x.test <- pics[c(test_index)]
x.train <- combine(x.train)
x.test <- combine(x.test)
# Reorder dimensions
x.train <- aperm(x.train, c(4, 1, 2, 3))
x.test <- aperm(x.test, c(4, 1, 2, 3))
# Create our reponse variables
cat.train <- to_categorical(as.numeric(pm$category == "outdoor-day")[c(train_index)])
cat.test <- to_categorical(as.numeric(pm$category == "outdoor-day")[c(test_index)])
#Model
model <- keras_model_sequential() %>%
layer_conv_2d(filters = 16,
kernel_size = c(3,3),
activation = 'relu',
input_shape = c(128,128,3)) %>%
layer_conv_2d(filters = 16,
kernel_size = c(3,3),
activation = 'relu') %>%
layer_max_pooling_2d(pool_size = c(2, 2)) %>%
layer_conv_2d(filters = 32,
kernel_size = c(3,3),
activation = 'relu') %>%
layer_conv_2d(filters = 32,
kernel_size = c(3,3),
activation = 'relu') %>%
layer_max_pooling_2d(pool_size = c(2, 2)) %>%
layer_dropout(rate = 0.25) %>%
layer_flatten() %>%
layer_dense(units = 10,
activation = 'relu') %>%
layer_dropout(rate = 0.5) %>%
layer_dense(units = 2,
activation = 'softmax')
model %>% compile(
loss = loss_categorical_crossentropy,
optimizer = optimizer_adadelta(),
metrics = c('accuracy')
)
batch_size <- 64
epochs <- 16
# Train model
model %>% fit(
x.train,
cat.train,
batch_size = batch_size,
epochs = epochs,
validation_split = 0.2
)
# Determine our accuracy on test data
score <- model %>% evaluate(x.test,cat.test)
cat('Test loss: ', score$loss, "\n")
cat('Test accuracy: ', score$acc, "\n")
|
c9e60f521c667eacb7792aa3fca31170e706f622
|
60247e886a6b94b7da90440c35faeb8bda2c55e2
|
/plot1.R
|
17a5c2a2b7318a0e5a3c87de0aff6991cb8906fd
|
[] |
no_license
|
MridullS/exploratory_data_analysis
|
2863b041d530b75900a1a5035d6ac26bf40ffd8f
|
4580a95d2dd431469ea5c157dab9b6d8b6d450e3
|
refs/heads/master
| 2022-09-17T17:13:43.054039
| 2020-05-31T11:47:35
| 2020-05-31T11:47:35
| 268,265,502
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 671
|
r
|
plot1.R
|
library(dplyr)
plot1 <- function() {
file_read <- read.csv2('household_power_consumption.txt', dec='.', na.strings='?',
stringsAsFactors=FALSE)
start <- ymd('2007-02-01')
end <- ymd('2007-02-03')
file_read <- file_read %>%
mutate(DateTime=dmy_hms(paste(Date, Time))) %>%
select(DateTime, Global_active_power:Sub_metering_3) %>%
filter(DateTime >= start, DateTime < end)
png(filename='plot1.png', width=480, height=480, units='px')
with(file_read, hist(file_read$Global_active_power, col='red',
xlab='Global Active Power (kilowatts)',
main='Global Active Power'))
dev.off()
}
|
69f839de824836b2d8072acd6d792b3499035480
|
7d5d8492c2d88b88bdc57e3c32db038a7e7e7924
|
/SAL_BMU_Amazon/13-CRU_functions.R
|
df97d15fa0f28fb4a22a669bd32f422a43ad9405
|
[] |
no_license
|
CIAT-DAPA/dapa-climate-change
|
80ab6318d660a010efcd4ad942664c57431c8cce
|
2480332e9d61a862fe5aeacf6f82ef0a1febe8d4
|
refs/heads/master
| 2023-08-17T04:14:49.626909
| 2023-08-15T00:39:58
| 2023-08-15T00:39:58
| 39,960,256
| 15
| 17
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,661
|
r
|
13-CRU_functions.R
|
require(raster)
require(ncdf)
require(rgdal)
# require(ncdf4)
# source("13-CRU_functions.R")
CRU_cut <- function(baseDir="T:/gcm/cmip5/isi_mip", region=extent(-80, -66, -16, 5), outDir="Z:/DATA/WP2/03_Future_data/isi_mip_ft_0_5deg") {
setwd(baseDir)
if (!file.exists(outDir)) {dir.create(outDir, recursive = TRUE)}
varList <- list("pre"="prec", "tmx"="tmax", "tmn"="tmin", "dtr"="dtr", "tmn"="tmean")
for (v in 1:length(varList)){
if(!file.exists(paste0(outDir, "/", varList[[v]], "_1976_2005_climatology.nc"))){
nc <- paste0(baseDir, "/", names(varList[v]), "/cru_ts3.21.1901.2012.", names(varList[v]), ".dat.nc")
oNc <- paste0(outDir, "/", varList[v], "_1961_2010_monthly.nc")
## Cut region and years
cat("Cut region ", varList[[v]], "\n")
system(paste("cdo -sellonlatbox,",region@xmin,",",region@xmax,",",region@ymin,",",region@ymax," -selyear,1961/2010 ", nc, " ", oNc, sep=""))
## Multiyear mean calcs
cat("Clim calcs ", varList[[v]], "\n")
system(paste("cdo -ymonavg -selyear,1986/2005 ", oNc, " ", outDir, "/", varList[[v]], "_1986_2005_climatology.nc", sep=""))
system(paste("cdo -ymonavg -selyear,1971/2000 ", oNc, " ", outDir, "/", varList[[v]], "_1971_2000_climatology.nc", sep=""))
system(paste("cdo -ymonavg -selyear,1976/2005 ", oNc, " ", outDir, "/", varList[[v]], "_1976_2005_climatology.nc", sep=""))
}
}
}
## Cut CRU-TS 3.21 raw
baseDir <- "S:/observed/gridded_products/cru-ts-v3-21/raw"
region <- extent(-80, -66, -16, 5)
outDir <- "Z:/DATA/WP2/02_Gridded_data/cru_0_5deg"
otp <- CRU_cut(baseDir, region, outDir)
|
8cc41aa1c48d8d5dd38a8fc9535bae2ef8c821bb
|
4901ec89e81d76ea8ee197f49367e0e293e989c4
|
/R/ds-functions.R
|
2d44c4965236f85b9b90d170aea66780b41fb87b
|
[] |
no_license
|
RomeroBarata/IN1165-MCS-EL3
|
19166f4e2bde2ec1a1755c208897317f3a94b030
|
9eef9f9f8d2b6864fd8e3132941471045714ab9f
|
refs/heads/master
| 2021-01-10T22:38:02.196227
| 2016-10-10T23:35:25
| 2016-10-10T23:35:25
| 69,709,443
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,615
|
r
|
ds-functions.R
|
predict.bagging_ola <- function(object, newdata, valdata, ...){
names(valdata)[ncol(valdata)] <- "Class"
val_preds <- predict.bagging(object, valdata[, -ncol(valdata)])
knn_idx <- kknn::kknn(Class ~ .,
train = valdata,
test = newdata,
k = 5,
kernel = "rectangular")$C
final_preds <- vector("logical", length = nrow(newdata))
for (i in seq_len(nrow(newdata))){
nns_idx <- knn_idx[i, ]
nns_preds <- val_preds[nns_idx, ]
nns_class <- unlist(valdata[nns_idx, "Class"], use.names = FALSE)
overall_acc <- colMeans(nns_preds == nns_class)
best_classifier <- which.max(overall_acc)
final_preds[i] <- predict(object[[best_classifier]], newdata[i, ],
type = "class")
}
levels(unlist(valdata[, ncol(valdata)], use.names = FALSE))[final_preds]
}
predict.bagging_lca <- function(object, newdata, valdata, ...){
names(valdata)[ncol(valdata)] <- "Class"
val_preds <- predict.bagging(object, valdata[, -ncol(valdata)])
test_preds <- predict.bagging(object, newdata)
knn_idx <- kknn::kknn(Class ~ .,
train = valdata,
test = newdata,
k = 5,
kernel = "rectangular")$C
final_preds <- vector("logical", length = nrow(newdata))
for (i in seq_len(nrow(newdata))){
nns_idx <- knn_idx[i, ]
nns_preds <- val_preds[nns_idx, ]
nns_class <- unlist(valdata[nns_idx, "Class"], use.names = FALSE)
f <- function(j){
current_pred <- test_preds[i, j]
same_class <- current_pred == nns_class
if (!any(same_class)) return(0)
mean(nns_preds[same_class, j] == nns_class[same_class])
}
local_acc <- vapply(seq_len(ncol(test_preds)), f, numeric(1))
best_classifier <- which.max(local_acc)
final_preds[i] <- predict(object[[best_classifier]], newdata[i, ],
type = "class")
}
levels(unlist(valdata[, ncol(valdata)], use.names = FALSE))[final_preds]
}
predict.bagging_dsknn <- function(object, newdata, valdata, ...){
names(valdata)[ncol(valdata)] <- "Class"
val_preds <- predict.bagging(object, valdata[, -ncol(valdata)])
test_preds <- predict.bagging(object, newdata)
knn_idx <- kknn::kknn(Class ~ .,
train = valdata,
test = newdata,
k = 5,
kernel = "rectangular")$C
final_preds <- vector("logical", length = nrow(newdata))
for (i in seq_len(nrow(newdata))){
nns_idx <- knn_idx[i, ]
nns_preds <- val_preds[nns_idx, ]
nns_class <- unlist(valdata[nns_idx, "Class"], use.names = FALSE)
f <- function(j){
current_pred <- test_preds[i, j]
same_class <- current_pred == nns_class
if (!any(same_class)) return(0)
mean(nns_preds[same_class, j] == nns_class[same_class])
}
local_acc <- vapply(seq_len(ncol(test_preds)), f, numeric(1))
# Highest accuracies
best_classifiers <- order(local_acc, decreasing = TRUE)[1:5]
#
diversity_matrix <- diversityMatrix(val_preds[nns_idx, best_classifiers],
valdata[nns_idx, ncol(valdata)],
"doubleFault")
best_idx <- order(colMeans(diversity_matrix))[1:3]
best_classifiers <- best_classifiers[best_idx]
preds <- predict.bagging(object[best_classifiers], newdata[i, ])
final_preds[i] <- names(which.max(table(preds)))
}
final_preds
}
|
71d540386c42caf3193bedaa473c6d528d95887f
|
3f17ed44ae94cc7570aecd38fe075626e5df84ff
|
/app2020/LakeAssessmentApp_v1/buildAppModules/2018IRversions/buildStationTable.R
|
62ab46f26a5ba824c4094b438c53eae1870e9b8d
|
[] |
no_license
|
EmmaVJones/LakesAssessment2020
|
24c57a343ec7df5b18eada630cc2e11e01c8c83c
|
72f4b6e9721c947e8b7348c9d06e3baf8f2da715
|
refs/heads/master
| 2020-05-18T02:41:57.259296
| 2019-06-20T13:38:55
| 2019-06-20T13:38:55
| 184,124,597
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,797
|
r
|
buildStationTable.R
|
lake_filter <- filter(lakeStations, SIGLAKENAME == 'Claytor Lake')
conventionals_Lake <- filter(conventionals, FDT_STA_ID %in% unique(lake_filter$FDT_STA_ID)) %>%
left_join(dplyr::select(lakeStations, FDT_STA_ID, SEC, CLASS, SPSTDS,PWS, ID305B_1, ID305B_2, ID305B_3,
STATION_TYPE_1, STATION_TYPE_2, STATION_TYPE_3, ID305B, SEC187, SIG_LAKE, USE,
SIGLAKENAME, Chlorophyll_A_limit, TPhosphorus_limit, Assess_TYPE), by='FDT_STA_ID')
AUData <- filter(conventionals_Lake, ID305B_1 %in% "VAW-N16L_NEW01A02" | #"VAW-N16L_NEW01A02" "VAW-N16L_NEW01B14" "VAW-N17L_PKC01A10" "VAW-N17L_PKC02A10"
ID305B_2 %in% "VAW-N16L_NEW01A02" |
ID305B_2 %in% "VAW-N16L_NEW01A02") %>%
left_join(WQSvalues, by = 'CLASS')
stationData <- filter(AUData, FDT_STA_ID %in% "9-NEW087.14") #"9-NEW087.14" "9-NEW089.34"
point <- dplyr::select(stationData[1,], FDT_STA_ID:FDT_SPG_CODE, STA_LV2_CODE:ID305B_3, Latitude, Longitude ) %>%
st_as_sf(coords = c("Longitude", "Latitude"),
remove = F, # don't remove these lat/lon cols from df
crs = 4269) # add projection, needs to be geographic for now bc entering lat/lng
AU <- filter(lakeAU, ID305B %in% as.character(point$ID305B_1) |
ID305B %in% as.character(point$ID305B_2) |
ID305B %in% as.character(point$ID305B_3))
map1 <- mapview(AU,zcol = 'ID305B', label= AU$ID305B, layer.name = 'Assessment Unit (ID305B_1)',
popup= popupTable(AU, zcol=c("ID305B","Acres","CYCLE","WATER_NAME"))) +
mapview(point, color = 'yellow', lwd = 5, label= point$FDT_STA_ID, layer.name = c('Selected Station'),
popup= popupTable(point, zcol=c("FDT_STA_ID","STA_DESC","ID305B_1", "ID305B_2", "ID305B_3")))
map1@map
|
92530d620d756c72623efced711c2646917e070b
|
95989f087d37032cc39739fc2b42449268fc4d69
|
/tests/testthat/tests.R
|
22478b09f24b8c333a01c9b6050e300865ebb87e
|
[] |
no_license
|
CharlesNaylor/walker
|
c3e4986ce3783165222788a25b60c147398ee732
|
95f44692673f32430bd3ee19fbbb90cf5488a52b
|
refs/heads/master
| 2021-06-18T14:44:48.825576
| 2017-06-26T01:02:37
| 2017-06-26T01:02:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,285
|
r
|
tests.R
|
context("Test walker")
test_that("arguments work as intended", {
library(walker)
expect_error(walker("aa"))
expect_error(walker(rnorm(2) ~ 1:4))
expect_error(walker(rnorm(10) ~ 1))
expect_error(walker(y ~ 1))
expect_error(walker(rnorm(10) ~ 1, beta_prior = 0))
x <- 1:3
expect_identical(c(1,1,1,1:3), c(walker(1:3 ~ x, return_x_reg = TRUE)))
})
test_that("stan side works", {
y <- x <- 1:3
set.seed(1)
expect_warning(fit <- walker(y ~ x, beta_prior = cbind(0, c(2, 2)),
sigma_prior = cbind(0, c(2,2,2)), iter = 10, chains = 1, refresh = 0),NA)
expect_equivalent(structure(c(0.575776440370937, 0.608739297869922, 0.600646410430753,
2.47394156830475, 0.503598122307422), .Dim = 5L, .Dimnames = structure(list(
iterations = NULL), .Names = "iterations")),
extract(fit, pars = "sigma_y")$sigma_y)
set.seed(1)
expect_warning(fit <- walker(y ~ x, naive = TRUE, beta_prior = cbind(0, c(2, 2)),
sigma_prior = cbind(0, c(2,2,2)), iter = 10, chains = 1, refresh = 0),NA)
expect_equivalent(structure(c(1.27535032198269, 1.27535032198269, 1.27535032198269,
1.07187289906723, 1.24949754280559), .Dim = 5L, .Dimnames = structure(list(
iterations = NULL), .Names = "iterations")),
extract(fit, pars = "sigma_y")$sigma_y)
})
|
d1012ea065920eb749df91e58c8d45b7ee8d43a2
|
326da72853050febce950f5aabe89c97d896b7b2
|
/man/ByState.Rd
|
97dcff48cab01350ff78a932bfb1ce699d7dd041
|
[] |
no_license
|
lvjensen/PhysicsEdCoalition
|
f12f37ebe4b05790d7967ed23734b1b9808226cf
|
d74897d4945be3ed2ea4b27e6aff150646e1ac90
|
refs/heads/master
| 2021-06-25T16:41:11.482797
| 2021-03-17T17:14:33
| 2021-03-17T17:14:33
| 213,503,179
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 462
|
rd
|
ByState.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ByState.R
\name{ByState}
\alias{ByState}
\title{ByState}
\usage{
ByState(Year, with_summary = TRUE)
}
\arguments{
\item{Year}{is digit numeric number of the year}
\item{with_summary}{defaults to TRUE. Adds a summary (total) row and the bottom of the table.}
}
\description{
Creates an table of wrangled data from the Physics Teacher Education Coalition websites organized by State
}
|
77c4bf90ccab8c984b5e124f9064aed49f73768c
|
d804682583257b6fd029f4b9728e4407624560c8
|
/15-RStudio-essentials/2-Debugging/palindrome.R
|
b589e4ccc71ed40f92524f4cf05a3872766de468
|
[
"CC-BY-4.0"
] |
permissive
|
garrettgman/webinars
|
2f099b06779d73c65d2515660aca3b5af27e84e8
|
a34336aa7ba411695f57c845bd0b58b1c64bd4e7
|
refs/heads/master
| 2022-02-14T21:54:27.447675
| 2019-09-04T19:40:34
| 2019-09-04T19:40:34
| 103,402,448
| 9
| 6
| null | 2017-09-13T13:20:48
| 2017-09-13T13:20:48
| null |
UTF-8
|
R
| false
| false
| 760
|
r
|
palindrome.R
|
# Extract nth (from left) digit of a number
get_digit <- function(num, n) {
# remove numbers on left, then numbers on right
(num %% (10 ^ n)) %/% (10 ^ n)
}
# Indicate whether a positive number is a palindrome
palindrome <- function(num) {
digits <- floor(log(num, 10)) + 1
for (x in 1:((digits %/% 2))) {
digit1 <- get_digit(num, x)
digit2 <- get_digit(num, (digits + 1) - x)
if (digit1 != digit2)
return(FALSE)
}
return(TRUE)
}
# Find the largest palindrome that is the product of two 3-digit numbers
biggest_palindrome <- function() {
best <- 0
for (x in 100:999) {
for (y in x:999) {
candidate <- x * y
if (candidate > best && palindrome(candidate)) {
best <- candidate
}
}
}
best
}
|
43b788bea9d9bd7b42b9e05c42553a0302b7d863
|
3dfc04ba341ba3f6a17c5f424735ca6d54c982b8
|
/Scripts/MMM2019_Finalists.R
|
269cbc34589d1cfc440fdb90694ea5c1967ca27d
|
[] |
no_license
|
trcsmallwood/MMM2019
|
f77fcf7d39d83dd786746d3fb9757b7d8d2bb340
|
53b87dbb5a8bf8ca56ed65eff4e4bd227c08ea49
|
refs/heads/master
| 2021-06-14T11:27:39.260076
| 2021-03-25T14:50:59
| 2021-03-25T14:50:59
| 173,454,411
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,323
|
r
|
MMM2019_Finalists.R
|
##MMM2019_Finalists
##Calculate the proportion of brackets that pick each species as finalists and champions
#Load in packages
library(stringr)
library(ggplot2)
#Read in dataframe of predictions and results
MMM_df <- read.csv("../Submissions/MMM2019_PredictionsSummary.csv", stringsAsFactors = F)
#Extract finalists
finalists_df <- data.frame(table(unlist(MMM_df[which(MMM_df$Round == "Semi Final"),-c(1:5)])))
names(finalists_df) <- c("Species", "Finalist")
#Extract champions
champions_df <- data.frame(table(unlist(MMM_df[which(MMM_df$Round == "Final"),-c(1:5)])))
names(champions_df) <- c("Species", "Champion")
#Merge dataframes including all species
combined_df <- merge(finalists_df, champions_df, by = "Species", all = T)
#Assign zeros for finalists who don't become champions
combined_df[which(is.na(combined_df$Champion) == T),3] <- 0
#Calculate Runners Up
combined_df$RunnerUp <- combined_df$Finalist - combined_df$Champion
#Calculate proportion of brackets which predicted each finalist, champion and runner up
combined_prop_df <- data.frame("Species" = combined_df[,1], combined_df[,c(2:4)]/ apply(combined_df[,c(2:4)],2, sum))
#Reshape for plotting, including only champion and runner up columns
combined_plot_df <- melt(combined_df[,-2],
id.vars = "Species", variable.name = "Position", value.name = "Count")
#Create output .pdf in relative dir
pdf(file = "../Plots/MMM2019_Finalists.pdf", width=11.69, height=8.27)
#Plot proportion of participants who predicted each species to be a champion or runner up
ggplot(combined_plot_df, aes(x = Species, y = Count, fill = Position)) +
geom_col(position = position_stack(reverse = T)) +
theme_bw() + #Simple balck and white base theme
#coord_flip() +
theme(axis.ticks.length = unit(-0.2, "cm"), #Ticks marks inside
axis.ticks.x = element_blank(), #No ticks on x axis
axis.text.x = element_text(size = 8, margin=margin(10,10,0,10,"pt"), angle = 45, hjust =1), #x axis text size and spacing
axis.text.y = element_text(size = 12, margin=margin(10,10,10,10,"pt")), #y axis text size and spacing
panel.border = element_blank(), #No border
axis.line.x = element_line(size = 0.5, color = "black"), axis.line.y = element_line(size = 0.5, color = "black"), #Axes colours and thickness
axis.title.x = element_text(size = 14, margin=margin(0,0,0,0,"pt")), axis.title.y = element_text(size = 14, margin=margin(5,5,5,5,"pt")), #Axis titles size and space=ing
panel.grid.major = element_blank(), panel.grid.minor = element_blank(), #No grid lines
legend.position = "bottom", #Legend postion
plot.margin = unit(c(0.5,0.2,0.1,0.1), "cm"), #Space around the outside, including space for the ends of the axes
legend.title = element_text(size = 14), legend.text = element_text(size = 12)) + #Legend title and text size
scale_y_continuous(name = "Number of Brackets", expand = c(0,0), breaks = c(0,5,10)) + #y axis title and limits
scale_x_discrete("", labels = str_wrap(combined_prop_plot_df$Species, width = 20)) + #x axis title and include all rounds, not just those with scores
scale_fill_manual("", values = c("gold", "grey70"), labels= c("Champion", "Runner Up")) + #Legend title and colour by preferred colours
NULL
#Close .pdf
dev.off()
|
9f400c3542e4af62526b7733d730bf0e130457cf
|
ce26f322943418a1d729f52ba84726bdbd567f81
|
/plot3.R
|
23cd0cb61db9cfe4ceca44900c009fa6f85d2423
|
[] |
no_license
|
Sarpwus/ExData_Plotting1
|
35c24e428c25b2f5601320488687cb8380e7671a
|
b828d89627236238ed649210f8bea26add4111f4
|
refs/heads/master
| 2021-01-18T10:03:49.766010
| 2014-12-08T00:32:55
| 2014-12-08T00:32:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 670
|
r
|
plot3.R
|
## Load the dataset from the source directory
## assume we are the location of the file in the working directory
library(sqldf) # I use sqldf package for reading the data
source("load_hsepwr.R")
png(file = "plot3.png", width = 480, height = 480, units = "px", bg = "transparent")
with(hsepwr, plot(DateTime, Sub_metering_1, type = "l", col = "black", xlab = "",
ylab = "Energy sub metering"))
with(hsepwr, lines(DateTime, Sub_metering_2, col = "red"))
with(hsepwr, lines(DateTime, Sub_metering_3, col = "blue"))
legend("topright", col = c("black", "red", "blue"),
c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lwd = 1)
dev.off()
|
5320fb06d883df8bda37b8ece2b4f6e24f12375a
|
7c1f0f97a327331c9a09b5440880b354d94431b9
|
/man/write_mallet_state.Rd
|
e2dcdd08c9ba0807ea2d3580658875a1e0c25c87
|
[
"MIT",
"GPL-1.0-or-later"
] |
permissive
|
agoldst/dfrtopics
|
e9cae65bb98283b227187a1ec7b81f8de71458ca
|
b547081f5159d38e24309c439192f48bfd0a2357
|
refs/heads/master
| 2022-07-27T09:34:38.664365
| 2022-07-15T13:37:22
| 2022-07-15T13:37:22
| 18,853,085
| 41
| 13
|
MIT
| 2021-01-25T10:10:58
| 2014-04-16T19:38:02
|
R
|
UTF-8
|
R
| false
| true
| 529
|
rd
|
write_mallet_state.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sampling_state.R
\name{write_mallet_state}
\alias{write_mallet_state}
\title{Save the Gibbs sampling state to a file}
\usage{
write_mallet_state(m, outfile = "state.gz")
}
\arguments{
\item{m}{the \code{mallet_model} model object}
\item{outfile}{the output file name}
}
\description{
Saves the MALLET sampling state using MALLET's own state-output routine,
which produces a ginormous gzipped textfile.
}
\seealso{
\code{\link{read_sampling_state}}
}
|
2f5124a9db7bfeb8408b6171fb23d52f288a0d13
|
0a906cf8b1b7da2aea87de958e3662870df49727
|
/bravo/inst/testfiles/colSumSq_matrix/libFuzzer_colSumSq_matrix/colSumSq_matrix_valgrind_files/1609959260-test.R
|
71a216fec0fd2a8c2f37af85d53ac80a9c8f5a15
|
[] |
no_license
|
akhikolla/updated-only-Issues
|
a85c887f0e1aae8a8dc358717d55b21678d04660
|
7d74489dfc7ddfec3955ae7891f15e920cad2e0c
|
refs/heads/master
| 2023-04-13T08:22:15.699449
| 2021-04-21T16:25:35
| 2021-04-21T16:25:35
| 360,232,775
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 168
|
r
|
1609959260-test.R
|
testlist <- list(x = structure(c(1.61100627174858e+126, NaN, 1.44202388027275e+135 ), .Dim = c(3L, 1L)))
result <- do.call(bravo:::colSumSq_matrix,testlist)
str(result)
|
f1d5b0825fe0ed160306fda1c9e6346692082ac9
|
bcb329b2dfaa867fbf7a39b7366cfbb80552ad97
|
/CollectDealerInfor/ChevroletDealersLinks.R
|
013b227aa4aee46026800e2945f324f78b8e0803
|
[] |
no_license
|
jpzhangvincent/Dealership-Scraping
|
3c8ecfa72e7692f0f709afcbac840899781d27e2
|
13634892a8098cca260ddf1c4017946b76f0deca
|
refs/heads/master
| 2021-01-12T14:25:19.400164
| 2015-10-06T21:55:17
| 2015-10-06T21:55:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,435
|
r
|
ChevroletDealersLinks.R
|
#Collect all the nation-wide Chevrolet dealerships with information about name, address, website, inventory link and geo location
install.packages("XML", repos = "http://cran.cnr.Berkeley.edu/")
library(XML)
install.packages("plyr", repos = "http://cran.cnr.Berkeley.edu/")
library(plyr)
load("zipdata.rdata")
cities = unique(zipdata$city)
#length(cities)
cities = gsub(' ','',cities)
#eg. url = "http://www.chevydealer.com/SanDiego/dealers"
allsearchpages = unname(sapply(cities, function(city_str) paste0("http://www.chevydealer.com/", city_str,"/dealers")))
getdealerinfo<- function(url){
# print(url)
tryCatch({
doc = htmlParse(url)
dealerNameNodes = getNodeSet(doc,'//div[@class="dealer-name-and-address"]/a[@class="dealer-name"]/span/text()')
dealerName = xmlSApply(dealerNameNodes,xmlValue,trim=T)
roadNodes = getNodeSet(doc,'//div[@class="dealer-name-and-address"]/div[1]/text()')
roadName = xmlSApply(roadNodes,xmlValue,trim=T)[-1]
cityNodes = getNodeSet(doc,'//div[@class="cityStateZip"]/span[1]/text()')
cityName = xmlSApply(cityNodes,xmlValue,trim=T)
stateNodes = getNodeSet(doc,'//div[@class="cityStateZip"]/span[2]/text()')
stateName = xmlSApply(stateNodes,xmlValue,trim=T)
zipcodeNodes = getNodeSet(doc,'//div[@class="cityStateZip"]/span[3]/text()')
zipcode = xmlSApply(zipcodeNodes,xmlValue,trim=T)
dealerAddress = paste0(roadName,', ',cityName,', ',stateName,' ',zipcode)
dealerWebsiteNodes = getNodeSet(doc,"//div[@class='dealer-name-and-address']/a")
dealerWebsite = unlist(lapply(xmlSApply(dealerWebsiteNodes,xmlGetAttr,"href")[-c(1,2)],gsub,patter='(.*/).*',replacement='\\1'))
dealerIVwebsiteNodes = getNodeSet(doc,"//a[contains(./text(),'View Inventory')]")
dealerIVWebsite = unlist(lapply(xmlSApply(dealerIVwebsiteNodes,xmlGetAttr,"href"),gsub,patter='(.*)referrer.*',replacement='\\1'))
dealerInventoryLink = paste0(dealerIVWebsite,'search=new')
GeoNodes = getNodeSet(doc,'//div[@class="dealer-listing-item"]')
Latitude = xmlSApply(GeoNodes,xmlGetAttr,"data-latitude")
Longitude = xmlSApply(GeoNodes,xmlGetAttr,"data-longitude")
df <- data.frame(dealerName, dealerAddress, dealerWebsite, zipcode, dealerInventoryLink, Latitude, Longitude, stringsAsFactors=F )
colnames(df) = c("Dealer","Address","Link","zipcode","IV_link", "Latitude", "Longitude")
return(df)
}, error = function(err){
return()
})
}
#url2 = "http://www.chevydealer.com/NewYork/dealers"
chevroletDealers = ldply(allsearchpages, function(url){
out = try(getdealerinfo(url))
if(class(out)=='try-error') next;
return(out)
}, .progress = "text" )
ChevroletDealers = chevroletDealers[!duplicated(chevroletDealers$Dealer),]
ChevroletDealers = merge(ChevroletDealers, zipdata)
save(ChevroletDealers, file="chevroletDealers.rdata")
head(ChevroletDealers)
=============================#further cleaning
require(RSelenium)
RSelenium::startServer()
remDr = remoteDriver(browserName = "firefox")
remDr$open(silent = TRUE)
getDealerInforInRS = function(doc){
doc = htmlParse(doc)
dealerNameNodes = getNodeSet(doc,'//div[@class="dealer-name-and-address"]/a[@class="dealer-name"]/span/text()')
dealerName = xmlSApply(dealerNameNodes,xmlValue,trim=T)
roadNodes = getNodeSet(doc,'//div[@class="dealer-name-and-address"]/div[1]/text()')
roadName = xmlSApply(roadNodes,xmlValue,trim=T)[-c(1,2,3,4)]
cityNodes = getNodeSet(doc,'//div[@class="cityStateZip"]/span[1]/text()')
cityName = xmlSApply(cityNodes,xmlValue,trim=T)
stateNodes = getNodeSet(doc,'//div[@class="cityStateZip"]/span[2]/text()')
stateName = xmlSApply(stateNodes,xmlValue,trim=T)
zipcodeNodes = getNodeSet(doc,'//div[@class="cityStateZip"]/span[3]/text()')
zipcode = xmlSApply(zipcodeNodes,xmlValue,trim=T)
dealerAddress = paste0(roadName,', ',cityName,', ',stateName,' ',zipcode)
dealerWebsiteNodes = getNodeSet(doc,"//div[@class='dealer-name-and-address']/a")
dealerWebsite = unlist(lapply(xmlSApply(dealerWebsiteNodes,xmlGetAttr,"href")[-c(1,2)],gsub,patter='(.*/).*',replacement='\\1'))
dealerIVwebsiteNodes = getNodeSet(doc,"//a[contains(./text(),'View Inventory')]")
dealerIVWebsite = unlist(lapply(xmlSApply(dealerIVwebsiteNodes,xmlGetAttr,"href"),gsub,patter='(.*)referrer.*',replacement='\\1'))
dealerInventoryLink = paste0(dealerIVWebsite,'search=new')
GeoNodes = getNodeSet(doc,'//div[@class="dealer-listing-item"]')
Latitude = xmlSApply(GeoNodes,xmlGetAttr,"data-latitude")
Longitude = xmlSApply(GeoNodes,xmlGetAttr,"data-longitude")
df <- data.frame(dealerName, dealerAddress, dealerWebsite, zipcode, dealerInventoryLink, Latitude, Longitude, stringsAsFactors=F )
colnames(df) = c("Dealer","Address","Link","zipcode","IV_link", "Latitude", "Longitude")
return(df)
}
remDr$navigate(url)
ndataset = list()
i=0
for(zip in aa){
i = i+1
print(zip)
webElem <- remDr$findElement(using = 'id',value="zip")
webElem$sendKeysToElement(list(zip,"\uE007"))
doc <- remDr$getPageSource()[[1]]
ndataset[[i]] = getDealerInforInRS(doc)
webElem$clearElement()
}
|
498c60c4f4db0582e970937ce118e9548a4a43bd
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/libamtrack/examples/AT.SPC.spectrum.at.depth.step.Rd.R
|
3f379618ae614cb4deabc6e63e99f834908306c5
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 179
|
r
|
AT.SPC.spectrum.at.depth.step.Rd.R
|
library(libamtrack)
### Name: AT.SPC.spectrum.at.depth.step
### Title: AT.SPC.spectrum.at.depth.step
### Aliases: AT.SPC.spectrum.at.depth.step
### ** Examples
# None yet.
|
88218dc79671a33dfc71ed2aa27acb818eef3e27
|
b827162c6a43fe46b313e8b7736018a34f28e76e
|
/man/Specmodule.Rd
|
743269515dccfc94219d83c290e8ad734e176a22
|
[] |
no_license
|
JasonBason/Mosaic
|
f6fffa851155796d3f3b6838aba11b836130567c
|
5d8e487efbce2e0e95a58ef9b9c4766b45960c57
|
refs/heads/master
| 2020-03-22T20:38:07.682386
| 2018-06-01T03:35:12
| 2018-06-01T03:35:12
| 136,076,296
| 0
| 0
| null | 2018-07-10T19:49:57
| 2018-06-04T20:01:38
|
R
|
UTF-8
|
R
| false
| true
| 690
|
rd
|
Specmodule.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/interactive_plotting_modules.R
\name{Specmodule}
\alias{Specmodule}
\title{Specmodule}
\usage{
Specmodule(input, output, session, tag, set = list(spec = list(xrange = NULL,
yrange = NULL, maxxrange = NULL, maxyrange = NULL, sel = NULL, mz = NULL, data
= NULL, MS2 = T), layout = list(lw = 1, cex = 1, controls = F, ppm = 5, active
= T, highlights = NULL, height = 550), msdata = NULL), keys)
}
\arguments{
\item{input}{}
\item{output}{}
\item{session}{}
\item{tag}{id to be used in ns()}
\item{set}{Import data from the shiny session}
}
\description{
server module for interactive mass spectrum view
}
|
3f3d962aebcd359fbf1b50b37d81d8a3057e3024
|
19a851f0a04b8fbced83254a0a0589060e9b2035
|
/analyses/snv-callers/scripts/01-calculate_vaf_tmb.R
|
4100f330d0d68991d21e90edeb174e9ecbc72fd1
|
[] |
no_license
|
gonzolgarcia/OpenPBTA-analysis
|
da0118d5edfba585786e297d4d312245ab3643f1
|
7a7b40aadff351599f7dbbdeca85d6bebaafe696
|
refs/heads/master
| 2020-08-07T12:38:00.508549
| 2019-10-07T15:51:30
| 2019-10-07T15:51:30
| 213,451,868
| 0
| 0
| null | 2019-10-07T18:01:55
| 2019-10-07T18:01:55
| null |
UTF-8
|
R
| false
| false
| 11,090
|
r
|
01-calculate_vaf_tmb.R
|
# Run variant caller evaluation for a given MAF file.
#
# C. Savonen for ALSF - CCDL
#
# 2019
#
# Option descriptions
#
# -label : Label to be used for folder and all output. eg. 'strelka2'. Default is 'maf'.
# -output : File path that specifies the folder where the output should go.
# New folder will be created if it doesn't exist. Assumes file path is
# given from top directory of 'OpenPBTA-analysis'.
# --maf : Relative file path to MAF file to be analyzed. Can be .gz compressed.
# Assumes file path is given from top directory of 'OpenPBTA-analysis'.
# --metadata : Relative file path to MAF file to be analyzed. Can be .gz compressed.
# Assumes file path is given from top directory of 'OpenPBTA-analysis'.
# --annot_rds : Relative file path to annotation object RDS file to be analyzed.
# Assumes file path is given from top directory of 'OpenPBTA-analysis'.
# --bed_wgs : File path that specifies the caller-specific BED regions file.
# Assumes from top directory, 'OpenPBTA-analysis'.
# --bed_wxs : File path that specifies the WXS BED regions file. Assumes file path
# is given from top directory of 'OpenPBTA-analysis'
# --overwrite : If specified, will overwrite any files of the same name. Default is FALSE.
#
# Command line example:
#
# Rscript analyses/snv-callers/scripts/01-calculate_vaf_tmb.R \
# --label strelka2 \
# --output analyses/snv-callers/results \
# --maf scratch/snv_dummy_data/strelka2 \
# --metadata data/pbta-histologies.tsv \
# --bed_wgs data/WGS.hg38.mutect2.unpadded.bed \
# --bed_wxs data/WXS.hg38.100bp_padded.bed \
# --annot_rds scratch/hg38_genomic_region_annotation.rds
################################ Initial Set Up ################################
# Establish base dir
root_dir <- rprojroot::find_root(rprojroot::has_dir(".git"))
# Import special functions
source(file.path(root_dir, "analyses", "snv-callers", "util", "wrangle_functions.R"))
# Magrittr pipe
`%>%` <- dplyr::`%>%`
# Load library:
library(optparse)
################################ Set up options ################################
# Set up optparse options
option_list <- list(
make_option(
opt_str = c("-l", "--label"), type = "character",
default = "maf", help = "Label to be used for folder and all
output. eg. 'strelka2'. Default is 'maf'",
metavar = "character"
),
make_option(
opt_str = c("-o", "--output"), type = "character", default = "none",
help = "File path that specifies the folder where the output should
go. Assumes from top directory, 'OpenPBTA-analysis'. New folder
will be created if it doesn't exist.",
metavar = "character"
),
make_option(
opt_str = "--maf", type = "character", default = "none",
help = "Relative file path (assuming from top directory of
'OpenPBTA-analysis') to MAF file to be analyzed. Can be .gz compressed.",
metavar = "character"
),
make_option(
opt_str = "--metadata", type = "character", default = "none",
help = "Relative file path (assuming from top directory of
'OpenPBTA-analysis') to MAF file to be analyzed. Can be .gz compressed.",
metavar = "character"
),
make_option(
opt_str = c("-a", "--annot_rds"), type = "character", default = "none",
help = "Relative file path (assuming from top directory of
'OpenPBTA-analysis') to annotation object RDS file to be analyzed.",
metavar = "character"
),
make_option(
opt_str = "--bed_wgs", type = "character", default = "none",
help = "File path that specifies the caller-specific
BED regions file. Assumes from top directory, 'OpenPBTA-analysis'",
metavar = "character"
),
make_option(
opt_str = "--bed_wxs", type = "character", default = "none",
help = "File path that specifies the WXS BED regions file. Assumes
from top directory, 'OpenPBTA-analysis'",
metavar = "character"
),
make_option(
opt_str = "--overwrite", action = "store_true",
default = FALSE, help = "If TRUE, will overwrite any files of
the same name. Default is FALSE",
metavar = "character"
)
)
# Parse options
opt <- parse_args(OptionParser(option_list = option_list))
########### Check that the files we need are in the paths specified ############
needed_files <- c(opt$maf, opt$metadata, opt$bed_wgs, opt$bed_wxs, opt$annot_rds,
opt$cosmic)
# Add root directory to the file paths
needed_files <- file.path(root_dir, needed_files)
# Get list of which files were found
files_found <- file.exists(needed_files)
# Report error if any of them aren't found
if (!all(files_found)) {
stop(paste("\n Could not find needed file(s):",
needed_files[which(!files_found)],
"Check your options and set up.",
sep = "\n"
))
}
################## Create output directories for this caller ##################
# Caller specific results directory path
caller_results_dir <- file.path(root_dir, opt$output)
# Make caller specific results folder
if (!dir.exists(caller_results_dir)) {
dir.create(caller_results_dir, recursive = TRUE)
}
####################### File paths for files we will create ####################
vaf_file <- file.path(caller_results_dir, paste0(opt$label, "_vaf.tsv"))
region_annot_file <- file.path(caller_results_dir, paste0(opt$label, "_region.tsv"))
tmb_file <- file.path(caller_results_dir, paste0(opt$label, "_tmb.tsv"))
# Declare metadata file name for this caller
metadata_file <- file.path(
caller_results_dir,
paste0(opt$label, "_metadata_filtered.tsv")
)
##################### Check for files if overwrite is FALSE ####################
# If overwrite is set to FALSE, check if these exist before continuing
if (!opt$overwrite) {
# Make a list of the output files
output_files <- c(vaf_file, region_annot_file, tmb_file)
# Find out which of these exist
existing_files <- file.exists(output_files)
# If all files exist; stop
if (all(existing_files)) {
stop(cat(
"Stopping; --overwrite is not being used and all output files already exist: \n",
vaf_file, "\n",
region_annot_file, "\n",
tmb_file
))
}
# If some files exist, print a warning:
if (any(existing_files)) {
warning(cat(
"Some output files already exist and will not be overwritten unless you use --overwrite: \n",
paste0(output_files[which(existing_files)], "\n")
))
}
}
########################### Set up this caller's data ##########################
# Print progress message
message(paste("Reading in", opt$maf, "MAF data..."))
# Read in this MAF, skip the version number
maf_df <- data.table::fread(opt$maf, skip = 1, data.table = FALSE)
# Print progress message
message(paste("Setting up", opt$label, "metadata..."))
# Isolate metadata to only the samples that are in the datasets
metadata <- readr::read_tsv(opt$metadata) %>%
dplyr::filter(Kids_First_Biospecimen_ID %in% maf_df$Tumor_Sample_Barcode) %>%
dplyr::distinct(Kids_First_Biospecimen_ID, .keep_all = TRUE) %>%
dplyr::arrange() %>%
dplyr::rename(Tumor_Sample_Barcode = Kids_First_Biospecimen_ID) %>%
readr::write_tsv(metadata_file)
# Print out completion message
message(paste("Filtered metadata file saved to: \n", metadata_file))
# Make sure that we have metadata for all these samples.
if (!all(unique(maf_df$Tumor_Sample_Barcode) %in% metadata$Tumor_Sample_Barcode)) {
stop("There are samples in this MAF file that are not in the metadata.")
}
################## Calculate VAF and set up other variables ####################
# If the file exists or the overwrite option is not being used, calculate VAF
if (file.exists(vaf_file) && !opt$overwrite) {
# Stop if this file exists and overwrite is set to FALSE
warning(cat(
"The VAF file already exists: \n",
vaf_file, "\n",
"Use --overwrite if you want to overwrite it."
))
} else {
# Print out warning if this file is going to be overwritten
if (file.exists(vaf_file)) {
warning("Overwriting existing VAF file.")
}
# Print out progress message
message(paste("Calculating VAF for", opt$label, "MAF data..."))
# Use the premade function to calculate VAF this will also merge the metadata
vaf_df <- set_up_maf(maf_df, metadata) %>%
readr::write_tsv(vaf_file)
# Print out completion message
message(paste("VAF calculations saved to: \n", vaf_file))
}
######################### Annotate genomic regions #############################
# If the file exists or the overwrite option is not being used, run regional annotation analysis
if (file.exists(region_annot_file) && !opt$overwrite) {
# Stop if this file exists and overwrite is set to FALSE
warning(cat(
"The regional annotation file already exists: \n",
region_annot_file, "\n",
"Use --overwrite if you want to overwrite it."
))
} else {
# Print out warning if this file is going to be overwritten
if (file.exists(vaf_file)) {
warning("Overwriting existing regional annotation file.")
}
# Print out progress message
message(paste("Annotating genomic regions for", opt$label, "MAF data..."))
# Annotation genomic regions
maf_annot <- annotr_maf(vaf_df, annotation_file = opt$annot_rds) %>%
readr::write_tsv(region_annot_file)
# Print out completion message
message(paste("Genomic region annotations saved to:", region_annot_file))
}
############################# Calculate TMB ####################################
# If the file exists or the overwrite option is not being used, run TMB calculations
if (file.exists(region_annot_file) && !opt$overwrite) {
# Stop if this file exists and overwrite is set to FALSE
warning(cat(
"The Tumor Mutation Burden file already exists: \n",
tmb_file, "\n",
"Use --overwrite if you want to overwrite it."
))
} else {
# Print out warning if this file is going to be overwritten
if (file.exists(vaf_file)) {
warning("Overwriting existing TMB file.")
}
# Print out progress message
message(paste("Calculating TMB for", opt$label, "MAF data..."))
# Set up BED region files for TMB calculations
wgs_bed <- readr::read_tsv(opt$bed_wgs, col_names = FALSE)
wxs_bed <- readr::read_tsv(opt$bed_wxs, col_names = FALSE)
# Calculate size of genome surveyed
wgs_genome_size <- sum(wgs_bed[, 3] - wgs_bed[, 2])
wxs_exome_size <- sum(wxs_bed[, 3] - wxs_bed[, 2])
# Print out these genome sizes
cat(
" WGS size in bp:", wgs_genome_size,
"\n",
"WXS size in bp:", wxs_exome_size,
"\n"
)
# Only do this step if you have WXS samples
if (any(metadata$experimental_strategy == "WXS")) {
# Filter out mutations for WXS that are outside of these BED regions.
vaf_df <- wxs_bed_filter(vaf_df, wxs_bed_file = opt$bed_wxs)
}
# Calculate TMBs and write to TMB file
tmb_df <- calculate_tmb(vaf_df,
wgs_size = wgs_genome_size,
wxs_size = wxs_exome_size
) %>%
readr::write_tsv(tmb_file)
# Print out completion message
message(paste("TMB calculations saved to:", tmb_file))
}
|
55f159a2242cce2ec41eedec31b8943c1fa32246
|
0c61299c0bfab751bfb5b5eac3f58ee2eae2e4b0
|
/Nitrogen_Algae/old_code/sim_ode.R
|
2c9522192ab224b0c689982bfd9e3c35b794806f
|
[] |
no_license
|
jwerba14/Species-Traits
|
aa2b383ce0494bc6081dff0be879fc68ed24e9c2
|
242673c2ec6166d4537e8994d00a09477fea3f79
|
refs/heads/master
| 2022-10-13T10:57:54.711688
| 2020-06-12T01:57:21
| 2020-06-12T01:57:21
| 105,941,598
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,219
|
r
|
sim_ode.R
|
## simulation for ode
library(rstan)
rstan_options(auto_write = TRUE)
options(mc.cores = parallel::detectCores())
sim_mod <- stan_model(file = "sim_mod.stan", model_name = "sim_mod", verbose = T)
sim_out <- sampling(sim_mod, seed = 2, data = list(N = nrow(dat_27),
y = dat_27[c(8,5)],
t0 = 0,
t_obs= seq(1,11),
run_estimation = 0),chains=1,iter=4000,
control = list(adapt_delta = 0.99, max_treedepth =18))
y_sim <- rstan::extract(sim_out, pars = "y_hat_n")
y_sim <- data.frame(y_sim)
true_param <- rstan::extract(sim_out)
fit_sum_sim<- summary(sim_out)
fsss<-data.frame(fit_sum_sim$summary[c(1:9),])
fsss1 <- fsss %>% mutate(param = rownames(fsss)) %>% gather(-c(n_eff, Rhat, param), key = "quantile", value = "value")
fsss1 <- fsss1 %>% filter(quantile != "mean" & quantile != "se_mean" & quantile != "sd") %>% mutate(trial = "sim")
fsss_med <- fsss1 %>% filter(quantile == "X50.")
fsss_upr <- fsss1 %>% filter(quantile == "X75.")
fsss_lwr <- fsss1 %>% filter(quantile == "X25.")
simdat <- apply(y_sim$y_hat_n, 2:3,median)
dgp_recapture_fit <- sampling(sim_mod, data = list(N = nrow(simdat),
y = simdat,
t0 = 0,
t_obs= seq(1,11),
run_estimation = 1),chains=1,
control = list(adapt_delta = 0.99, max_treedepth =15))
fit_sum_check<- summary(dgp_recapture_fit)
fsso<-data.frame(fit_sum_check$summary[c(1:9),])
fsso1 <- fsso %>% mutate(param = rownames(fsso)) %>% gather(-c(n_eff, Rhat, param), key = "quantile", value = "value")
fsso1 <- fsso1 %>% filter(quantile != "mean" & quantile != "se_mean" & quantile != "sd") %>% mutate(trial = "recap")
ndat <- rbind(fsso1,fsss1)
ndat <- ndat %>% filter(param != "y0[1]" & param != "y0[2]" & param != "sigma[1]" & param != "sigma[2]")
ggplot(ndat, aes(value, param)) + geom_point(aes(color=quantile, shape = trial))
|
63b90b55b8fb13c127d88f774fba9ee8a47108a6
|
a82713f80e7481cc189255e87a5a9425379045ff
|
/hidrantesmanizales.R
|
3ed52805ebac171b0bc51ef4b43cd40f759cdb91
|
[] |
no_license
|
dechontaduro/DataScienceExamples
|
38714d74a359e8a7bbc1422a35794acf67b447cf
|
0c3cbd6f5f28184670a4a67847a55e1b5b31a43d
|
refs/heads/master
| 2021-01-12T11:03:02.230015
| 2016-11-04T01:21:02
| 2016-11-04T01:21:02
| 72,801,972
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,275
|
r
|
hidrantesmanizales.R
|
getData <- function(variableName, url, filename, ...){
if(!exists(variableName)){
if (!file.exists(filename)) {
download.file(url, filename)
}
read.csv(filename, ...)
}
}
geocodeAdddress <- function(address) {
require(RJSONIO)
url <- "http://maps.google.com/maps/api/geocode/json?address="
url <- URLencode(paste(url, address, "&sensor=false", sep = ""))
x <- fromJSON(url, simplify = FALSE)
if (x$status == "OK") {
out <- c(x$results[[1]]$geometry$location$lng,
x$results[[1]]$geometry$location$lat)
} else {
out <- NA
}
Sys.sleep(0.2) # API only allows 5 requests per second
out
}
setwd("C:\\Users\\juanc\\Dropbox\\DataScience\\pruebas\\hidrantes")
#rm(dataHidra)
url <- "https://www.datos.gov.co/resource/ygcd-j498.csv"
dataHidra <- getData("dataHidra", url, "hidrantesmanizales.csv")
str(dataHidra)
geo <- apply (dataHidra, 1,
function(x) {
geocodeAdddress(paste(x[2], ', Manizales, Colombia'))})
dataHidra$lat <- sapply(geo, function(x) x[2])
dataHidra$lon <- sapply(geo, function(x) x[1])
View(dataHidra)
library(leaflet)
library(dplyr)
leaflet(data = dataHidra) %>% addTiles() %>%
addMarkers(~lon, ~lat, popup = ~direccion,
clusterOptions = markerClusterOptions())
|
6e2e556f4e4496c85a514f961a56626e08fc481f
|
8f2e62d1cb1e323639fd30b457331fc9082babc2
|
/BootSU2C-TCGA-HeatmapApp.R
|
0caf1d6796fdd36784d21e4508965691a05cacc8
|
[] |
no_license
|
NateDee/YuLabSU2CHeat
|
b3513c34a2a509d43ce4fe4205e70caf206e4e55
|
6c313d94fe143a361f10f78deca96bf282279345
|
refs/heads/master
| 2021-05-01T20:00:51.485785
| 2018-03-23T15:26:40
| 2018-03-23T15:26:40
| 120,956,212
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 667
|
r
|
BootSU2C-TCGA-HeatmapApp.R
|
#Set working directory to SU2C_script
setwd("R:/Medicine/Hematology-Oncology/Yu_Lab/Nate/scripts_and_tools/YuLab-SU2C-TCGA")
#Create function to test for packages that are needed to run app
installPkges <- function(pkg){
if(!pkg %in% installed.packages()) install.packages(pkg, repos = "https://mirror.las.iastate.edu/CRAN/")
}
#Required packages, "data.table", "gplots", "RColorBrewer", "shiny"
print("Checking for required packages, installing if needed")
installPkges("data.table")
installPkges("gplots")
installPkges("RColorBrewer")
installPkges("shiny")
#Load shiny
library(shiny)
#Load Heatmap app
runApp("YuLab-SU2C-TCGA-HeatmapApp")
|
33929cb3477708d54a2e5736723fd2e7e22e38fb
|
5d72e421cdf578655997ff1ad1f06ce59d1240db
|
/man/hx_timeline.Rd
|
1e22843ba0834fc8303e5f9760243e9ddf9350da
|
[
"MIT"
] |
permissive
|
news-r/hoaxy
|
104b41014c0a5109c1ca090eac42c2ceae9af5a0
|
dc127acbd78881f72b5c208ec580ff8e37526cb7
|
refs/heads/master
| 2020-06-04T22:18:52.114066
| 2019-06-25T07:48:28
| 2019-06-25T07:48:28
| 192,213,055
| 8
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 575
|
rd
|
hx_timeline.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/call.R
\name{hx_timeline}
\alias{hx_timeline}
\title{Timeline}
\usage{
hx_timeline(ids, resolution = c("D", "M", "W", "H"))
}
\arguments{
\item{ids}{A list or vector of article ids to query, see \code{\link{hx_articles}}.}
\item{resolution}{The resolution of timeline. \code{H}: hour, \code{D}: day, \code{W}: week, \code{M}: month.}
}
\description{
Return timeline of tweets on given articles.
}
\examples{
\dontrun{
articles <- hx_articles("pizzagate")
tl <- hx_timeline(articles$id[1:5])
}
}
|
8755e909a8f27f7e539694cf82dc806a20857a7a
|
5099820fe4e5a0d72ea9da02d7a65d0056b41ee8
|
/R/calc-mean.R
|
c9803656a7bdc41e558f5e024a9a9d84e5e1ec2f
|
[] |
no_license
|
milanwiedemann/psychdata
|
a10a5b9ed58a90472333ad16a06796b729ccec2e
|
cb1ed4c3e45103d93b4a9307dfa43108b787d128
|
refs/heads/master
| 2021-07-11T08:46:35.646129
| 2020-06-05T08:36:55
| 2020-06-05T08:36:55
| 143,755,559
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,564
|
r
|
calc-mean.R
|
#' Calculate mean of variables
#' @description Calculate mean addressing item-level missing data using proration
#' @param data Wide dataframe.
#' @param id_str String of identifier variable.
#' @param var_str String of variable to calculate mean for.
#' @param session_str String of session number.
#' @param n_min Minimum number of available scores to calculate mean.
#' @param item_scores Add item scores after mean.
#' @param sep seperator for variable names.
#' @param sort_mean_item Logical, if TRUE and multiple sessions then output dataframe will be organised mean_timepoint followed by all items for that timepoint, if FALSE all means will come after id variable followed by all items.
#' @export
#'
calc_mean <- function(data, id_str, var_str, session_str, n_min, item_scores = FALSE, short_var_name = TRUE, timepoint_str = "s" , sep = "_"){
session_str <- base::c(session_str)
# Select variables based on variable names
data_select_var <- data %>%
dplyr::select(id_str, contains(var_str))
# Create emptty string for list of variable names
var_names <- ""
# Create tibble with only ids used to start joining at the end and also will be the return object
data_join_start_end <- data %>%
dplyr::select(id_str)
# Start looping through list of variables
for (i in 1:base::length(session_str)) {
# Select all items from a sepecific session
data_select_var_ses <- data_select_var %>%
dplyr::select(id_str, contains(session_str[i]))
# Get all variable names of items to inclode in rowMeans mutate
var_names <- data_select_var %>%
dplyr::select(contains(session_str[i])) %>%
base::names()
item_count <- base::length(var_names)
# Extract session number or str from session_str
# If no digit, take the letters, if there is a digit, take digit
if (base::is.na(stringr::str_extract(session_str[i], "\\d+")) == TRUE) {
session_str_var_name <- stringr::str_extract(session_str[i], "[:alpha:]+")
} else if (base::is.na(stringr::str_extract(session_str[i], "\\d+")) == FALSE) {
session_str_var_name <- stringr::str_extract(session_str[i], "\\d+")
session_str_var_name <- base::paste0(timepoint_str, session_str_var_name)
}
# Create variable name for mean
if (short_var_name == FALSE) {
var_str_i <- base::paste0(var_str, sep, "mean", sep, session_str_var_name)
} else if (short_var_name == TRUE) {
var_str_i <- base::paste0(var_str, sep, session_str_var_name)
}
# # Calvulate number of available scores
# data_select_var_ses_mean <- data_select_var_ses %>%
# mutate(n = sum(is.na(variable)))
# Calculate mean
data_select_var_ses_mean <- data_select_var_ses %>%
dplyr::mutate(!!var_str_i := psychdata:::calc_mean_n_min(.[ , 2:(item_count + 1)], n_min))
data_loop <- data_select_var_ses_mean %>%
dplyr::select(id_str, !!var_str_i)
# Here create dataframe, keep on adding to the same dataframe as looping through sessions
# I'm not happy with this left_join approach but it works and I cant think of a better way right now
if (item_scores == FALSE){
# If item_scores not asked for just add the data_loop (with the session means)
data_join_start_end <- dplyr::left_join(data_join_start_end, data_loop, by = id_str)
} else {
data_join_start_end <- dplyr::left_join(data_join_start_end, data_loop, by = id_str)
data_join_start_end <- dplyr::left_join(data_join_start_end, data_select_var_ses, by = id_str)
}
}
return(data_join_start_end)
}
|
57efa1e73ab1a3a827e2e095494dcba1f6350261
|
41395c8fbe6fd5c6a5752599b49cb81dd4c70819
|
/man/fit_gamlss1.Rd
|
817510dd658c03446eaf6487e8e1c6620457771e
|
[] |
no_license
|
cran/childsds
|
7d022ff8b4551c735060a1d922d7fdb4e7dbdedf
|
84799b3f488438f4e451722304c2b5e24f2a8c7b
|
refs/heads/master
| 2022-02-21T05:33:26.726171
| 2022-02-10T15:40:02
| 2022-02-10T15:40:02
| 23,198,825
| 0
| 2
| null | 2021-07-15T02:27:17
| 2014-08-21T18:44:07
|
R
|
UTF-8
|
R
| false
| true
| 1,391
|
rd
|
fit_gamlss1.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/createlms.R
\name{fit_gamlss1}
\alias{fit_gamlss1}
\title{fit_gamlss1}
\usage{
fit_gamlss1(
data,
age.min = 0,
age.max = 80,
age.int = 1/12,
keep.models = F,
dist = "BCCGo",
formula = NULL,
sigma.formula = ~1,
nu.formula = ~1,
tau.formula = ~1,
method.pb = "ML"
)
}
\arguments{
\item{data}{dataframe as return by select_meas()}
\item{age.min}{lower bound of age}
\item{age.max}{upper bound of age}
\item{age.int}{stepwidth of the age variable}
\item{keep.models}{indicator whether or not models in each iteration should be kept}
\item{dist}{distribution used for the fitting process, has to be one of BCCGo, BCPEo, BCTo as they are accepted by lms()}
\item{formula}{formula for the location parameter}
\item{sigma.formula}{formula for the sigma parameter}
\item{nu.formula}{formula for the nu parameter}
\item{tau.formula}{formula for the tau parameter}
\item{method.pb}{GAIC or ML}
}
\value{
list containing a dataframe of the fitted lms parameter at the given age points and the fitted model
}
\description{
fit_gamlss
}
\details{
wrapper around the \code{\link[gamlss]{gamlss}} function from the gamlss package
returns the fitted lms-parameter at given age points
the function is called inside \code{\link{do_iterations}} and may not be called directly
}
\author{
Mandy Vogel
}
|
1417660752a2ed7595bb42463dc2a62996ac101d
|
bc57437c2c1493388add2435693f2d41ad4ca6d7
|
/tests/testthat.R
|
3e59fcb5ee70a990877db342405fec278b625282
|
[
"MIT"
] |
permissive
|
MilesMcBain/capsule
|
374ade3e25f014a1526cbb1722c03b4fe79c1813
|
401d0c98adc329c17d0bb129069c9ec220a26646
|
refs/heads/master
| 2023-06-25T09:00:42.944246
| 2022-08-04T01:11:26
| 2022-08-04T01:11:26
| 215,474,942
| 136
| 8
|
NOASSERTION
| 2023-06-09T15:45:36
| 2019-10-16T06:36:59
|
R
|
UTF-8
|
R
| false
| false
| 58
|
r
|
testthat.R
|
library(testthat)
library(capsule)
test_check("capsule")
|
85cafcd6dafa5792e0a7704f938eccf29257a727
|
fe0429febc0409adcd5ae19797907c47810779c5
|
/Plot3.R
|
032195c87414bc1a3d23133f864bf94229ab2db9
|
[] |
no_license
|
nikotbg/FourPlots
|
124acaa72cb175e0aa543f582d2bb61a2bfe7c1c
|
dec0b0d86f4cbe2c5d725e16de9f1d563cd27e1b
|
refs/heads/master
| 2021-01-10T06:26:55.785626
| 2015-11-08T20:06:43
| 2015-11-08T20:06:43
| 45,796,408
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,323
|
r
|
Plot3.R
|
#ensure the file household_power_consumption is downloaded into your working directory
#read the file inot R
powerful<-read.table("household_power_consumption.txt",header=TRUE, sep=";")
#combine Date and Time columns
library(dplyr)
verypowerful<-mutate(powerful,datetime=paste(Date,Time, sep=" "))
#convert datetime column to Date class and add to the data frame
library(lubridate)
asdate<-dmy_hms(verypowerful$datetime)
verypowerful$DateTime<-asdate
#convert the columns into numeric
verypowerful$Global_Active_Power<-as.numeric(paste(verypowerful$Global_active_power))
verypowerful$Global_Reactive_Power<-as.numeric(paste(verypowerful$Global_reactive_power))
verypowerful$VOLTAGE<-as.numeric(paste(verypowerful$Voltage))
verypowerful$Global_Intensity<-as.numeric(paste(verypowerful$Global_intensity))
verypowerful$Sub_Metering_1<-as.numeric(paste(verypowerful$Sub_metering_1))
verypowerful$Sub_Metering_2<-as.numeric(paste(verypowerful$Sub_metering_2))
verypowerful$Sub_Metering_3<-as.numeric(paste(verypowerful$Sub_metering_3))
#filter only the 1st and 2nd of February
powerful1<-select(verypowerful,DateTime,Global_Active_Power,Global_Reactive_Power,VOLTAGE,Global_Intensity,Sub_Metering_1,Sub_Metering_2,Sub_Metering_3)
powerful2<-powerful1[grep("2007-02-01",powerful1$DateTime),]
powerful3<-powerful1[grep("2007-02-02",powerful1$DateTime),]
power4<-rbind(powerful2,powerful3)
#melt the dataset to create long dataset
library(reshape2)
melted<- melt(power4, id.vars = c("DateTime", "Global_Active_Power","Global_Reactive_Power","VOLTAGE","Global_Intensity"),
variable.name = "Sub_Metering_variable",
value.name = "Sub_Metering_value")
par(mfcol=c(1,1))
par(mar=c(2,4,2,2))
with(melted,plot(DateTime,Sub_Metering_value,ylab="Energy sub metering",type="n"))
with(subset(melted,Sub_Metering_variable=="Sub_Metering_1"),lines(DateTime,Sub_Metering_value,pch=".",col="black"))
with(subset(melted,Sub_Metering_variable=="Sub_Metering_2"),lines(DateTime,Sub_Metering_value,pch=".",col="red"))
with(subset(melted,Sub_Metering_variable=="Sub_Metering_3"),lines(DateTime,Sub_Metering_value,pch=".",col="blue"))
legend("topright",cex=0.6,lwd=c(1,1,1),col=c("black","red","blue"),legend=c("Sub_Metering_1","Sub_Metering_2","Sub_Metering_3"))
#copy into PNG file
dev.copy(png,file="Plot3.png")
dev.off()
|
cfc1e93fb2e9dbffb3fe27aa35eb9cc5847ee9b3
|
edd192f33044e894f01091014d481fcb3de64449
|
/transcriptomics_scripts/DESeq2.R
|
3349df4adc6e1bf11a54e5a353407b3d455d3839
|
[] |
no_license
|
karinlag/BioinfTraining
|
d2379e5f387c0e73cb8e86f6969ab8d9534f3d75
|
94e16a7f1f190b132199fbb798affd656c1687af
|
refs/heads/master
| 2022-03-01T13:32:36.435699
| 2019-10-28T10:31:28
| 2019-10-28T10:31:28
| 111,698,637
| 1
| 2
| null | 2017-11-22T15:06:06
| 2017-11-22T15:06:06
| null |
UTF-8
|
R
| false
| false
| 848
|
r
|
DESeq2.R
|
## To install cummeRbund and DESeq2 (do it once)
# source("https://bioconductor.org/biocLite.R")
# biocLite("DESeq2")
getwd()
setwd('../Desktop/course_data/DESeq2/')
library('DESeq2')
data <- read.delim('../featureCounts/count_gene', skip=1, sep="\t")
dim(data)
head(data)
colnames(data)
count <- data[7:12]
colnames(count) <- c('Con1_Rep1', 'Con1_Rep2', 'Con1_Rep3', 'Con2_Rep1', 'Con2_Rep2', 'Con2_Rep3')
rownames(count) <- data$Geneid
condition <- data.frame(c('con1', 'con1', 'con1', 'con2', 'con2', 'con2'))
colnames(condition) <- 'group'
rownames(condition) <- colnames(count)
dds <- DESeqDataSetFromMatrix(count, condition, design = ~group)
dds <- DESeq(dds)
res <- results(dds)
summary(res)
summary(res, alpha=0.05)
res_05 <- subset(res, padj <= 0.05)
write.table(res_05, 'DESeq2.txt', quote=F, row.names=F, sep='\t')
plotMA(res)
|
5dc782f7223a69c57d1a6be3f6fc324f405ddf69
|
23cad221b4fd1656e27038880f500eed6695fde0
|
/man/celdaCGMod.Rd
|
83eecb51e4f76ce6fa7d7079f1defa1a8cb12c18
|
[
"GPL-2.0-only",
"MIT"
] |
permissive
|
campbio/celda
|
91f8c64424fe24a74a1359b6dde371ab8ff2aea1
|
92905bda2833c9beda48c6a9404a86a102cd0553
|
refs/heads/master
| 2023-02-17T09:41:27.551599
| 2023-02-15T19:01:52
| 2023-02-15T19:01:52
| 158,611,235
| 134
| 32
|
MIT
| 2023-02-17T01:39:55
| 2018-11-21T22:01:57
|
R
|
UTF-8
|
R
| false
| true
| 333
|
rd
|
celdaCGMod.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{celdaCGMod}
\alias{celdaCGMod}
\title{celdaCGmod}
\format{
A celda_CG object
}
\usage{
celdaCGMod
}
\description{
celda_CG model object generated from \code{celdaCGSim} using
old \code{celda_CG} function.
}
\keyword{datasets}
|
84e4c0882d9f6110232d12c67fbdd33f91f9453d
|
fd0622e97276bba2c04d3c2fcba902cdfb65e214
|
/packages/nimble/inst/classic-bugs/vol1/litters/test1.R
|
9a568e418a048e237a9ad82acfcb3cb8574650d3
|
[
"GPL-2.0-only",
"BSD-3-Clause",
"CC-BY-4.0",
"GPL-1.0-or-later",
"MPL-2.0",
"GPL-2.0-or-later"
] |
permissive
|
nimble-dev/nimble
|
7942cccd73815611e348d4c674a73b2bc113967d
|
29f46eb3e7c7091f49b104277502d5c40ce98bf1
|
refs/heads/devel
| 2023-09-01T06:54:39.252714
| 2023-08-21T00:51:40
| 2023-08-21T00:51:40
| 20,771,527
| 147
| 31
|
BSD-3-Clause
| 2023-08-12T13:04:54
| 2014-06-12T14:58:42
|
C++
|
UTF-8
|
R
| false
| false
| 272
|
r
|
test1.R
|
source("../../R/Rcheck.R")
d <- read.jagsdata("litters-data.R")
inits <- read.jagsdata("litters-init.R")
m <- jags.model("litters.bug", d, inits, n.chains=2)
update(m, 5000)
x <- coda.samples(m, c("mu","theta"), n.iter=50000, thin=50)
source("bench-test1.R")
check.fun()
|
9cb2408444473efcb5a0b4fdcf7e2eb987c2b954
|
0997c835d2706cebf0419cdf50ea8899395f7226
|
/Arcelor/R/Shiny_code/ui.R
|
910caa31f7391d138784f44c3210eee99867b3a4
|
[] |
no_license
|
tdekelver-bd/DRBChack
|
d4a2e54d5fde45cdcc0f5213095bdc7e4eeb7eb0
|
54ef00402513552b1eabcacc006c24c95f5f51ec
|
refs/heads/master
| 2021-03-12T00:15:15.101777
| 2020-03-12T14:12:11
| 2020-03-12T14:12:11
| 246,571,438
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,677
|
r
|
ui.R
|
#
# This is the user-interface definition of a Shiny web application. You can
# run the application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(shinydashboard)
library(shinydashboardPlus)
setwd('C:/Users/rbelhocine/Desktop/BD/Arcelor/R/')
library(reticulate)
source_python("Search_engine_and_topic_prediction.py")
sidebar <- dashboardSidebar(width = 300,
sidebarMenu(
menuItem(span("Safety Assistant",style="font-size:18px;"), tabName = "safety", icon = icon("search"))
)
)
body <- dashboardBody(
tabItems(
tabItem(tabName = "safety",
hr()
,h1("Hello Safe.ly User ! I'm your Safety Assistant of the Future.", align="center")
,br()
, HTML('<center><img src="photo.png" width="400"></center>')
,br()
,hr()
,br()
,fluidRow(column(12,tabBox(
title=tagList(shiny::icon("search"), "Ask me a Question !"), width=12,
fluidRow(column(12,align="center",textInput("quest", "", value = "You can write your question here.", placeholder = NULL))),
fluidRow(column(12,align="center",actionButton("go", "Search")))))),
fluidRow(column(12,tabBox(width=12,
tabPanel("TopDoc",DT::dataTableOutput("top_documents")),
tabPanel("TopAnswer", DT::dataTableOutput("top_answer")))))
)))
# Put them together into a dashboardPage
dashboardPage(skin = "yellow",
dashboardHeader(title = "Safe.Ly", titleWidth = 300),
sidebar,
body
)
|
9646c523d647e2f1f1a2ecf8a06009bca4172c1f
|
51af5871f74d13198b8fa8e3e679f173c8c6ca14
|
/category.R
|
d26350adb63433686ab8c60fe1198836afe1c341
|
[] |
no_license
|
Mira0507/maternity_leave
|
0f96b2d31f441cff99e616b49effcd475b3b0623
|
a5f14587830fd445d3b16533b6cb7e2abc4212f8
|
refs/heads/master
| 2022-06-12T20:17:07.641914
| 2020-05-07T19:17:09
| 2020-05-07T19:17:09
| 261,359,648
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,571
|
r
|
category.R
|
library(tidyverse)
library(ggplot2)
h <- head
s <- summary
g <- glimpse
t <- tail
gen <- read.csv("Gender_StatsData.csv", stringsAsFactors = FALSE)
indicator <- unique(gen$Indicator.Name)
country <- unique(gen$?..Country.Name)
gen1 <- gen %>%
rename(Country = ?..Country.Name)
topics_education <- c(
"GDP (current US$)",
"GDP per capita (Current US$)",
"Government expenditure on education, total (% of GDP)",
"Children out of school, primary, female",
"Children out of school, primary, male",
"Literacy rate, adult female (% of females ages 15 and above)",
"Literacy rate, adult male (% of males ages 15 and above)",
"Educational attainment, at least Bachelor's or equivalent, population 25+, female (%) (cumulative)",
"Educational attainment, at least Bachelor's or equivalent, population 25+, male (%) (cumulative)",
"Educational attainment, at least completed lower secondary, population 25+, female (%) (cumulative)",
"Educational attainment, at least completed lower secondary, population 25+, male (%) (cumulative)",
"Educational attainment, at least completed post-secondary, population 25+, female (%) (cumulative)",
"Educational attainment, at least completed post-secondary, population 25+, male (%) (cumulative)",
"Educational attainment, at least completed primary, population 25+ years, female (%) (cumulative)",
"Educational attainment, at least completed primary, population 25+ years, male (%) (cumulative)",
"Educational attainment, at least completed short-cycle tertiary, population 25+, female (%) (cumulative)",
"Educational attainment, at least completed short-cycle tertiary, population 25+, male (%) (cumulative)",
"Educational attainment, at least completed upper secondary, population 25+, female (%) (cumulative)",
"Educational attainment, at least completed upper secondary, population 25+, male (%) (cumulative)",
"Educational attainment, at least Master's or equivalent, population 25+, female (%) (cumulative)",
"Educational attainment, at least Master's or equivalent, population 25+, male (%) (cumulative)",
"Educational attainment, Doctoral or equivalent, population 25+, female (%) (cumulative)",
"Educational attainment, Doctoral or equivalent, population 25+, male (%) (cumulative)")
topics_employment <- c(
"GDP (current US$)",
"GDP per capita (Current US$)",
"Employment in agriculture, female (% of female employment) (modeled ILO estimate)",
"Employment in agriculture, male (% of male employment) (modeled ILO estimate)",
"Employment in industry, female (% of female employment) (modeled ILO estimate)",
"Employment in industry, male (% of male employment) (modeled ILO estimate)",
"Employment in services, female (% of female employment) (modeled ILO estimate)",
"Employment in services, male (% of male employment) (modeled ILO estimate)",
"Proportion of seats held by women in national parliaments (%)",
"Labor force with advanced education, female (% of female working-age population with advanced education)",
"Labor force with advanced education, male (% of male working-age population with advanced education)",
"Labor force with basic education, female (% of female working-age population with basic education)",
"Labor force with basic education, male (% of male working-age population with basic education)",
"Labor force with intermediate education, female (% of female working-age population with intermediate education)",
"Labor force with intermediate education, male (% of male working-age population with intermediate education)",
"Labor force, female",
"Labor force, female (% of total labor force)",
"Labor force, total",
"Unemployment with advanced education, female (% of female labor force with advanced education)",
"Unemployment with advanced education, male (% of male labor force with advanced education)",
"Unemployment with basic education, female (% of female labor force with basic education)",
"Unemployment with basic education, male (% of male labor force with basic education)",
"Unemployment with intermediate education, female (% of female labor force with intermediate education)",
"Unemployment with intermediate education, male (% of male labor force with intermediate education)",
"Unemployment, female (% of female labor force) (modeled ILO estimate)",
"Unemployment, female (% of female labor force) (national estimate)",
"Unemployment, male (% of male labor force) (modeled ILO estimate)",
"Unemployment, male (% of male labor force) (national estimate)",
"Vulnerable employment, female (% of female employment) (modeled ILO estimate)",
"Vulnerable employment, male (% of male employment) (modeled ILO estimate)",
"Length of paid maternity leave (days)",
"Maternity leave benefits (% of wages paid)",
"Mothers are guaranteed an equivalent position after maternity leave (1=yes; 0=no)")
topics_life <- c("GDP (current US$)",
"GDP per capita (Current US$)",
"Age at first marriage, female",
"Age at first marriage, male",
"Birth rate, crude (per 1,000 people)",
"Death rate, crude (per 1,000 people)",
"Completeness of birth registration, female (%)",
"Completeness of birth registration, male (%)",
"Suicide mortality rate, female (per 100,000 female population)",
"Suicide mortality rate, male (per 100,000 male population)",
"Fertility rate, total (births per woman)",
"Wanted fertility rate (births per woman)" ,
"Sex ratio at birth (male births per female births)",
"Proportion of time spent on unpaid domestic and care work, female (% of 24 hour day)",
"Proportion of time spent on unpaid domestic and care work, male (% of 24 hour day)",
"Total alcohol consumption per capita, female (liters of pure alcohol, projected estimates, female 15+ years of age)",
"Total alcohol consumption per capita, male (liters of pure alcohol, projected estimates, male 15+ years of age)"
)
|
303d0b906b26c9428345861e6c57d5c10b928ef9
|
14032d4d0a7e0ad6ce1df0fcd72117272e66a0ba
|
/R/parse_dataset.R
|
23f699db733577632d10ad0d4e3d386705494b33
|
[] |
no_license
|
5l1v3r1/dyncli
|
67f5e223dfc7759a9d8e86dcc0321155b129a52d
|
9fbadd904b58ca0f34b1be01bcdc2308a4f41430
|
refs/heads/master
| 2023-04-23T18:52:12.810151
| 2019-09-18T15:32:23
| 2019-09-18T15:32:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,716
|
r
|
parse_dataset.R
|
#' @importFrom fs is_file
#' @importFrom hdf5r H5File h5attr_names
#' @importFrom Matrix sparseMatrix Matrix t
parse_dataset <- function(x, loom_expression_layer = NULL) {
assert_that(
is.character(x),
length(x) == 1,
fs::is_file(x) || fs::is_link(x),
msg = "--dataset should contain a pathname of a .loom or .h5 file. Add a '-h' flag for help."
)
extra_input <- NULL
expression <- NULL
##########################
### LOOM ###
##########################
if (grepl("\\.loom$", x)) {
file_h5 <- H5File$new(x, mode = "r")
on.exit(file_h5$close_all())
assert_that(file_h5 %has_names% c("matrix", "row_attrs", "col_attrs", "layers"))
counts <- file_h5[["matrix"]][,] %>% Matrix(sparse = TRUE)
feature_paths <- paste0("row_attrs/", c("gene_names", "Gene"))
cell_paths <- paste0("col_attrs/", c("cell_names", "CellID"))
feature_exists <- map_lgl(feature_paths, file_h5$exists) %>% which()
cell_exists <- map_lgl(cell_paths, file_h5$exists) %>% which()
feature_ids <-
if (length(feature_exists) == 1) {
file_h5[[feature_paths[[feature_exists]]]][]
} else {
warning("feature IDs could not be found in the loom format!")
paste("Feature", seq_len(ncol(counts)))
}
if (any(duplicated(feature_ids))) {
stop("duplicated feature IDs found!")
}
cell_ids <-
if (length(cell_exists) == 1) {
file_h5[[cell_paths[[cell_exists]]]][]
} else {
warning("cell IDs could not be found in the loom format!")
paste("Cell", seq_len(nrow(counts)))
}
if (any(duplicated(cell_ids))) {
stop("duplicated cell IDs found!")
}
dimnames(counts) <- list(cell_ids, feature_ids)
if (!is.null(loom_expression_layer)) {
expression <- file_h5[[paste0("layers/", loom_expression_layer)]][,] %>% Matrix(sparse = TRUE)
dimnames(expression) <- list(cell_ids, feature_ids)
}
} else if (grepl("\\.h5$", x)) {
file_h5 <- H5File$new(x, mode = "r")
on.exit(file_h5$close_all())
if (file_h5 %has_names% c("data", "names") && "object_class" %in% h5attr_names(file_h5)) {
##########################
### OWN H5 ###
##########################
tmp <- dynutils::read_h5_(file_h5)
counts <- tmp$counts
expression <- tmp$expression
extra_input <- list()
if ("parameters" %in% names(tmp)) {
extra_input$parameters <- tmp$parameters
}
if ("priors" %in% names(tmp)) {
extra_input$priors <- tmp$priors
}
if ("prior_information" %in% names(tmp)) {
extra_input$priors <- tmp$prior_information
}
if ("seed" %in% names(tmp)) {
extra_input$seed <- tmp$seed
}
# add dataset prior if given
if (any(c("milestone_percentages", "divergence_regions", "milestone_network", "progressions") %in% names(tmp))) {
extra_input$priors$dataset <- tmp[c("milestone_network", "progressions", "milestone_percentages", "divergence_regions")]
}
} else if (file_h5 %has_names% "matrix" && file[["matrix"]] %has_names% c("barcodes", "data", "features", "indices", "indptr", "shape")) {
##########################
### CELLRANGER V3 ###
##########################
subfile <- file_h5[["matrix"]]
counts <-
Matrix::sparseMatrix(
i = subfile[["indices"]][],
p = subfile[["indptr"]][],
x = subfile[["data"]][],
dims = subfile[["shape"]][],
dimnames = list(
subfile[["features/id"]][],
subfile[["barcodes"]][]
),
index1 = FALSE
) %>%
Matrix::t()
} else if (length(names(file_h5)) == 1 && file_h5[[names(file_h5)]] %has_names% c("barcodes", "data", "genes", "indices", "indptr", "shape")) {
##########################
### CELLRANGER V2 ###
##########################
subfile <- file_h5[[names(file_h5)]]
counts <-
Matrix::sparseMatrix(
i = subfile[["indices"]][],
p = subfile[["indptr"]][],
x = subfile[["data"]][],
dims = subfile[["shape"]][],
dimnames = list(
subfile[["genes"]][],
subfile[["barcodes"]][]
),
index1 = FALSE
) %>%
Matrix::t()
}
}
if (is.null(expression)) {
expression <- normalise(counts)
}
out <- lst(counts, expression)
c(out, extra_input)
}
normalise <- function(counts) {
# TODO: provide better normalisation :(
# TODO: Also print out warning that better normalisation should be added
expr <- counts
expr@x <- log2(expr@x + 1)
expr
}
|
5505cb1ebc16e377894d24013696975f2b6a054c
|
9531c36e90445e884c0834d10f3f741263cc54e8
|
/Data exploration/ui.R
|
c956e7898a06d753a32ca92880536d210148fc2b
|
[] |
no_license
|
Ghaith701/Exploring-top-song-charts-in-each-decade
|
880b0cc0f3ff2f6ffa3ea54a6af217c9b0bd3e6b
|
69c5305f64ac1f5bda66dab150f7090c95c1f6fc
|
refs/heads/main
| 2023-02-19T19:09:46.790883
| 2021-01-20T03:46:45
| 2021-01-20T03:46:45
| 331,147,830
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,466
|
r
|
ui.R
|
# ui.R
library(plotly)
library(shiny)
library(shinythemes)
shinyUI(fluidPage(
theme = shinytheme("cosmo"),
# Application title
titlePanel("Top 50 Songs Each Decade"),
navbarPage(title = "My project",
tabPanel("About",
titlePanel("My app"), sidebarLayout(
sidebarPanel(
h3("Describtion: "),
h5("This application explores the top 50 songs in each decade in relation to the billboard
songs data. The reason behind this application is to show the diversity and similarity
in each decade. Two plots where made, Word Cloud plot and bubble chart plot.")
),
mainPanel(
h3("Plots used: "),
h4("Word cloud"),
h5("Is a plotting method to show the most used words in a text.",br(), "In this application
, we explored the words of lyrics and showed the word cloud for each decade."),
br(),
h4("Bubble chart"),
h5("Is an intresting plotting method have many dimmensions to control, such as; color,
size, etc",
br(), "In this application, we used the bubble chart to show the popularity of genres
in each decade."),
br(),
h3("Refrences: "),
h5("Application development by ",
HTML("<a href = 'https://shiny.rstudio.com/articles/'>shiny</a>")
),
h5("Billboard Weekly Hot 100 singles chart between 8/2/1958 and 12/28/2018",
HTML("<a href = 'https://data.world/kcmillersean/billboard-hot-100-1958-2017'>Dataset</a>"), ".")
)
)
),
tabPanel("Lyrics",
# Sidebar with a slider input for the decade
sidebarLayout(
sidebarPanel(
selectInput("Decade", "please select the decade", choices = c("1958 - 1969", "1970 - 1979",
"1980 - 1989", "1990 - 1999",
"2000 - 2009", "2010 - 2018")),
br(),
sliderInput("words",
"Number of words",
min = 50,
max = 200,
value = 100)
),
# Show two plots in deffirent tabs
mainPanel(
h3("Most used words for decades"),
plotOutput("distPlot2", width = "100%", height = "500px"),
h5("This plot shows most used words for each decade inside of a word cloud.", align = "center")
)
)),
tabPanel("Genre",
# Sidebar with a slider input for the decade
sidebarLayout(
sidebarPanel(
selectInput("Decade1", "please select the decade", choices = c("1958 - 1969", "1970 - 1979",
"1980 - 1989", "1990 - 1999",
"2000 - 2009", "2010 - 2018",
"All decades"))
),
# Show two plots in deffirent tabs
mainPanel(
h3("Songs count for decades"),
plotlyOutput("distPlot", width = "100%", height = "500px")
)
)))
)
)
|
2eff7d8b1fa932955db220ece5f1cc5b400c0e05
|
c3063b1798acc6ac01e74f4b4dcec11826da0ea5
|
/code/random_forest.building_energy.grid2.R
|
163743626f94392a7803def9e452c30155a8d098
|
[
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-public-domain",
"MIT"
] |
permissive
|
DarrenCook/h2o
|
1aa14b97c60bcd7c50bf07d7582953f35c8477c3
|
c077af549ab539b5f71218155f11e5fe3c25042c
|
refs/heads/bk
| 2021-05-04T06:29:50.348447
| 2017-12-22T09:43:01
| 2017-12-22T09:43:01
| 70,473,775
| 95
| 117
|
MIT
| 2018-02-02T01:55:35
| 2016-10-10T09:38:13
|
R
|
UTF-8
|
R
| false
| false
| 575
|
r
|
random_forest.building_energy.grid2.R
|
g <- h2o.grid("randomForest",
search_criteria = list(
strategy = "RandomDiscrete",
stopping_metric = "mse",
stopping_tolerance = 0.001,
stopping_rounds = 10,
max_runtime_secs = 120
),
hyper_params = list(
ntrees = c(50, 100, 150, 200, 250),
mtries = c(2, 3, 4, 5),
sample_rate = c(0.5, 0.632, 0.8, 0.95),
col_sample_rate_per_tree = c(0.5, 0.9, 1.0)
),
x = x, y = y, training_frame = train,
nfolds = 5, max_depth = 40,
stopping_metric = "deviance",
stopping_tolerance = 0,
stopping_rounds = 4,
score_tree_interval = 3
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.