blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
189b52507c949ce9e6e1e981ccdfdbd103cb93a6 | f9c6ab2809c85c3eb4bb6cda84f274f2907806a7 | /texttwit.R | dcdcfb12ff2918f2e1392dee6b56f0acdbd56bad | [] | no_license | monicamurugesan/Text-Mining-DS | e00347170a1db464d8811c67a2db66254acaf394 | b0a2e247cae2d79df73dce05727e447672077e21 | refs/heads/master | 2022-12-14T21:52:49.239038 | 2020-08-27T15:01:01 | 2020-08-27T15:01:01 | 290,805,403 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,417 | r | texttwit.R |
#devtools::install_github("jrowen/twitteR", ref = "oauth_httr_1_0")
library("twitteR")
#install.packages("ROAuth")
library("ROAuth")
cred <- OAuthFactory$new(consumerKey='BagGgBbanzbdpPNNp8Uy6TQBP', # Consumer Key (API Key)
consumerSecret='pFxap1Jzc1fClDQ9psLNU3RKSQ5FvS2PhJz8E2R7ix0cawPKfa', #Consumer Secret (API Secret)
requestURL='https://api.twitter.com/oauth/request_token',
accessURL='https://api.twitter.com/oauth/access_token',
authURL='https://api.twitter.com/oauth/authorize')
save(cred, file="twitter authentication.Rdata")
load("twitter authentication.Rdata")
#Access Token Secret
setup_twitter_oauth("BagGgBbanzbdpPNNp8Uy6TQBP", # Consumer Key (API Key)
"pFxap1Jzc1fClDQ9psLNU3RKSQ5FvS2PhJz8E2R7ix0cawPKfa", #Consumer Secret (API Secret)
"1076425245521731584-Ev31ZLB7Cf0idVMqDI8BxiVG2SgRnu", # Access Token
"ZVUw0Z0mFrX7d6sjQxuB08l48JHhmnjmlAm86G2OPG7BS") #Access Token Secret
#registerTwitterOAuth(cred)
origop <- options("httr_oauth_cache")
options(httr_oauth_cache = TRUE)
Tweets <- userTimeline('climate', n = 1000,includeRts = T)
TweetsDF <- twListToDF(Tweets)
dim(TweetsDF)
View(TweetsDF)
setwd('H://RStudio')
write.csv(TweetsDF, "Tweets_Climate.csv",row.names = F)
getwd()
#
handleTweets <- searchTwitter('cyclone', n = 10000)
# handleTweetsDF <- twListToDF(handleTweets)
# dim(handleTweetsDF)
# View(handleTweetsDF)
# #handleTweetsMessages <- unique(handleTweetsDF$text)
# #handleTweetsMessages <- as.data.frame(handleTweetsMessages)
# #write.csv(handleTweetsDF, "TefalHandleTweets.csv")
#
library(rtweet)
climate <-read.csv(file.choose())
head(climate$text)
?Corpus
library(tm)
clim<-Corpus(VectorSource(climate))
inspect(clim[1:5])
climate$stripped_text
clim$stripped_text <-gsub("http.*","",climate$text)
clim$stripped_text <-gsub("http.*","",climate$stripped_text)
install.packages("tidytext")
library(tidytext)
install.packages(c("mnormt", "psych", "SnowballC", "hunspell",
"broom", "tokenizers", "janeaustenr"))
library(dplyr)
library(ggplot2)
climate_tweets_clean <- climate%>%dplyr::select(text)%>%unnest_tokens(word,text)
climate_tweets_clean %>%
count(word,sort=TRUE) %>%
top_n(20) %>%
mutate(word = reorder(word, n)) %>%
ggplot(aes(x = word, y = n)) +
geom_col() +
xlab(NULL) +
coord_flip() +
labs(x = "Count",
y = "Unique words",
title = "Count of unique words found in #YouthSDGs tweets")
data("stop_words")
head(stop_words)
climate_tweets_words <- climate_tweets_clean %>%anti_join(stop_words)
climate_tweets_words %>%
count(word,sort=TRUE) %>%
top_n(20) %>%
mutate(word = reorder(word, n)) %>%
ggplot(aes(x = word, y = n)) +
geom_col() +
xlab(NULL) +
coord_flip() +
labs(x = "Count",
y = "Unique words",
title = "Count of unique words found in Climate tweets with stop words")
nrow(climate_tweets_clean)
library(wordcloud)
library(reshape2)
climate_tweets_words%>%
inner_join(get_sentiments("bing")) %>%
count(word, sentiment,sort = TRUE) %>%
acast(word ~ sentiment, value.var = "n", fill = 0) %>%
comparison.cloud(colors = c("blue","purple"),
max.words = 150)
|
97f3bc00e733ed835752ab34ff9e72f7800e6dd0 | e77fb8b6c6c756c0f392b3d7846688af04ca587b | /hw10/00_DownloadFiles.R | aed258b6179df5518e02673034272bb4ec6b68fe | [] | no_license | gbraich/STAT547-hw-Braich-Gurneet | 2d3f6f9f4df3777b3db854e428280caa92d0b5f1 | ae34ed1776943b4d295f227b905d42c01a1795ce | refs/heads/master | 2021-08-28T10:02:59.798137 | 2017-12-11T23:35:35 | 2017-12-11T23:35:35 | 109,421,418 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,963 | r | 00_DownloadFiles.R | library(rvest)
library(readr)
#Let's download some data on cities, specifically on top 50 ranked cities by quality of living. Let's also get their population data
#Download Mercer quality of living rankings (top50) from Wikipedia by scraping using rvest
url <- "https://en.wikipedia.org/wiki/Mercer_Quality_of_Living_Survey"
ranking <- url %>%
html() %>%
html_nodes(xpath='//*[@id="mw-content-text"]/div/table[2]') %>%
html_table()
ranking <- ranking[[1]]
write_csv(ranking, "ranking.csv")
#Download city population data from another website using rvest and webscraping
url2 <- "http://worldpopulationreview.com/world-cities/"
popdata <- url2 %>%
html() %>%
html_nodes(xpath='//*[@id="main-page-content"]/div/div/table') %>%
html_table()
popdata <- popdata[[1]]
write_csv(popdata, "popdata.csv")
#Download population data on missing US cities, Japanese, German and Swiss cities using rvest and webscraping
url3 <- "http://worldpopulationreview.com/us-cities/"
uspopdata <- url3 %>%
html() %>%
html_nodes(xpath='//*[@id="main-page-content"]/div/div/table') %>%
html_table()
uspopdata <- uspopdata[[1]]
write_csv(uspopdata, "uspopdata.csv")
url4 <- "https://en.wikipedia.org/wiki/List_of_cities_in_Japan"
jppopdata <- url4 %>%
html() %>%
html_nodes(xpath='//*[@id="mw-content-text"]/div/table[3]') %>%
html_table()
jppopdata <- jppopdata[[1]]
write_csv(jppopdata, "jppopdata.csv")
url5 <- "https://en.wikipedia.org/wiki/List_of_places_in_Switzerland"
swisspopdata <- url5 %>%
html() %>%
html_nodes(xpath='//*[@id="mw-content-text"]/div/dl/dd/table') %>%
html_table()
swisspopdata <- swisspopdata[[1]]
write_csv(swisspopdata, "swisspopdata.csv")
url6 <- "https://en.wikipedia.org/wiki/List_of_cities_in_Germany_by_population"
grpopdata <- url6 %>%
html() %>%
html_nodes(xpath='//*[@id="mw-content-text"]/div/table[1]') %>%
html_table()
grpopdata <- grpopdata[[1]]
write_csv(grpopdata, "grpopdata.csv")
|
38c4353c35456c5d725a751b02a3db417c9189dc | 46c1d2ae7a3e5deaaa56ead7c1e25f672723508b | /data/random/generator.R | cbf52ae6706c36ee1be195ee68493430779d99e8 | [
"MIT"
] | permissive | ACharbonneau/upset | 4e832fef00b7727ee1703f2a6481935224a51784 | effc33d97bb5a2506997c6cfaefe7598855556fe | refs/heads/master | 2022-08-29T11:08:08.889247 | 2022-08-16T19:04:08 | 2022-08-16T19:04:08 | 236,061,707 | 1 | 1 | MIT | 2020-01-24T18:41:59 | 2020-01-24T18:41:58 | null | UTF-8 | R | false | false | 278 | r | generator.R | # Random Set Generator for UpSet
membership = function( p ) {
return ( runif( ))
}
# number of items to generate
i <- 1000;
# number of sets to generate
s <- 10;
# probability of an item to be contained in any given set
p <- 0.5;
sets <- matrix( 0, nrow=i, ncol=s );
|
b1b3b5861462d0b8d544d9950fa788cd63668917 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/multilevel/examples/sam.cor.Rd.R | 9a103accf22d8a9707781d7afeefbbfce25ce068 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 277 | r | sam.cor.Rd.R | library(multilevel)
### Name: sam.cor
### Title: Generate a Sample that Correlates with a Fixed Set of
### Observations
### Aliases: sam.cor
### Keywords: programming
### ** Examples
data(bh1996)
NEWVAR<-sam.cor(x=bh1996$LEAD,rho=.30)
cor(bh1996$LEAD,NEWVAR)
|
e8515bc4ef48d7c196f68781990f69852445b1ab | ebd99d96b39898c5590d4914b3c0a737875ca39b | /tests/testthat/test-occ_download_datasets.R | 551a597d1745faa3b6e8b9e84d88c82336673786 | [
"MIT"
] | permissive | ropensci/rgbif | 6071ec87eb87763757bdfd7c8ffd8f32c0df72a3 | e2853dbd4be02f524dd85c4bbfdb60ef2057c09b | refs/heads/master | 2023-08-31T10:24:08.427742 | 2023-08-31T08:12:11 | 2023-08-31T08:12:11 | 2,273,724 | 127 | 68 | NOASSERTION | 2023-09-11T07:31:43 | 2011-08-26T11:28:18 | R | UTF-8 | R | false | false | 1,122 | r | test-occ_download_datasets.R | context("occ_download_datasets")
test_that("occ_download_datasets", {
skip_on_cran()
skip_on_ci()
vcr::use_cassette("occ_download_datasets", {
tt <- occ_download_datasets("0003983-140910143529206")
})
expect_is(tt, "list")
expect_is(tt$meta, "data.frame")
expect_equal(sort(names(tt$meta)),
c("count", "endofrecords", "limit", "offset"))
expect_is(tt$results$downloadKey, "character")
expect_is(tt$results$datasetKey, "character")
expect_type(tt$results$numberRecords, "integer")
expect_equal(NROW(tt$meta), 1)
expect_gt(NROW(tt$result), 3)
vcr::use_cassette("occ_download_datasets_error", {
expect_error(occ_download_datasets("foo-bar"))
})
})
test_that("occ_download_datasets fails well", {
skip_on_cran()
# no key given
expect_error(occ_download_datasets(), "is missing")
# type checking
expect_error(occ_download_datasets(5),
"key must be of class character")
expect_error(occ_download_datasets("x", "x"),
"limit must be of class integer, numeric")
expect_error(occ_download_datasets("x", 5, "x"),
"start must be of class integer, numeric")
})
|
b80e98333f46fa393c4d3b069dc174b4f3941fff | a86fbd6fd727c20fe94cbf1c3cfa469a460f83b6 | /code/plot/fig7_plot_root_freqs.R | 1dfd5bd346db89492ccac2916f19b7108d5aaed7 | [
"MIT"
] | permissive | mlandis/biome_shift | 040acf5c9644a9da01ed4aa5d041f8edfba63f9c | 2a616f3f0efe725f531a5bfbba0f4163e3e122c8 | refs/heads/master | 2023-03-31T11:54:28.173610 | 2021-03-25T17:35:58 | 2021-03-25T17:35:58 | 233,412,205 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,318 | r | fig7_plot_root_freqs.R | library(HDInterval)
library(ggplot2)
library(reshape2)
library(ggridges)
library(dplyr)
library(bayestestR)
source("biome_shift_util.R")
# filesystem
fp = "/Users/mlandis/projects/gh_biome_shift/"
out_fp = paste0(fp, "output/")
plot_fp = paste0(fp, "code/plot/fig/")
plot_fn = paste0(plot_fp, "fig7_root_freqs.pdf")
col_fn = paste0(fp, "code/plot/biome_region_colors.txt")
fn = paste0(out_fp, c("run_1.paleo.model.log", "run_1.modern.model.log", "run_1.null.model.log"))
# get colors and names for biome+region states
dat_col = read.csv(col_fn, stringsAsFactors=F) # color data
n_states = nrow(dat_col)
st_lbl = dat_col$str
st_colors = as.vector(dat_col$color)
st_shape = c( rep(22, 6), rep(21, 6), rep(24, 6) )
names(st_colors) = st_lbl
names(st_shape) = st_lbl
# process files
model_name = c("Paleo", "Modern", "Null")
df0 = data.frame(rf=NULL, biome=NULL, region=NULL, prob=NULL)
x = list()
for (i in 1:length(fn)) {
xtmp = read.csv(fn[i], sep="\t", stringsAsFactors=F)
x[[ model_name[i] ]] = xtmp
for (j in 1:18) {
strtok = strsplit( st_lbl[j], split="\\+" )[[1]]
rfj = paste("rf_simplex.",j,".",sep="")
xtmp[[rfj]] = sort( xtmp[[rfj]] )
hpd95 = hdi(xtmp[[rfj]], ci=0.95)
hpd80 = hdi(xtmp[[rfj]], ci=0.80)
df1 = data.frame(Model=model_name[i], Biome=strtok[1], Region=strtok[2], State=st_lbl[j],
Mean=mean(xtmp[[rfj]]),
lower95=hpd95$CI_low, upper95=hpd95$CI_high,
lower80=hpd80$CI_low, upper80=hpd80$CI_high)
df0 = rbind(df0, df1)
}
}
m = df0
m$State = factor(m$State, ordered=T, levels=rev(st_lbl))
m$Model = factor(m$Model, ordered=T, levels=c("Null","Modern","Paleo"))
m$y = c( rev(sort(rep(3:1,18))) + ((rep(18:1,3)-9.5)/18)*0.7 )
# plot data
p = ggplot(m)
p = p + geom_vline(xintercept = 1/18, linetype=2, color="gray")
p = p + geom_segment(data=m, mapping=aes(x=lower80, xend=upper80, y=y, yend=y, color=State), size=1.25, alpha=0.5)
p = p + geom_segment(data=m, mapping=aes(x=lower95, xend=upper95, y=y, yend=y, color=State), size=0.65, alpha=0.5)
p = p + geom_point(data=m, mapping=aes(x=Mean, y=y, color=State),size=2)
p = p + geom_point(data=m, mapping=aes(x=Mean, y=y),size=0.5, color="white")
p = p + ylab("Biome structure")
p = p + xlab(expression(paste("Posterior root stationary probability, ", pi,"(", italic(m)[root] ,")",sep="")))
p = p + scale_color_manual( name="Biome+Region", values=st_colors, breaks=names(st_colors) )
p = p + scale_shape_manual( name="Biome+Region", values=st_shape, breaks=names(st_colors) )
p = p + guides(shape = guide_legend(override.aes = list(size = 0.5)))
p = p + xlim(0.0,0.175)
p = p + scale_y_continuous( breaks=c(1,2,3), labels=c("Null","Modern","Paleo") )
p = p + theme_classic()
p = p + theme(axis.text.y = element_text(angle=90, hjust=0.5, size=10),
legend.position = "top",
legend.key.size = unit(0, "lines"))
my_guide_legend = guide_legend(title="Biome+Region",
title.position="top", title.hjust=0.5,
nrow=3, ncol=6, byrow =T)
p = p + guides( color=my_guide_legend)
pdf(plot_fn, height=8, width=6)
print(p)
dev.off()
|
0775b8c708f2309f5d905b610acb8a98d7ed1845 | c31e49a759e372c3be878ee04b42e07c4d720505 | /cl.R | 5c62217f1b4e710505fc7c96117ca5c0eac8c59e | [] | no_license | wangqing207/Rscripts | a5868daca905de60735d5f4d62728d2956b9eaad | 2216b7dcd21ed7d022cef169b331b517c854f894 | refs/heads/master | 2021-07-17T19:35:08.736206 | 2017-10-23T03:26:46 | 2017-10-23T03:26:46 | 107,925,908 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 102 | r | cl.R | library(parallel)
cl.cores <- 12
cl <- makeCluster(cl.cores)
clusterEvalQ(cl,source(file="dmrs.R")) |
03ed692ecb9b43e9532d46c5e756620f465958ae | 91605bb70a092de5e29efdecc92d7fbb9fb745ca | /LinearPolynomialRegression/LinearRegression.R | 9d78c16d4b39991fa502a251fb0c24d380f7abec | [] | no_license | sinderpl/dataMiningRegression | c33e4da98c9838a323f6d5c4d3b6903010b1aca6 | 53dd54e29c389beb3671fcc50956bd4c2a09c2b8 | refs/heads/main | 2023-02-09T07:38:09.918890 | 2020-12-21T09:33:34 | 2020-12-21T09:33:34 | 313,036,832 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,051 | r | LinearRegression.R | #library(moments)
#Analyses computational power data and attempts to predict the total performance score.
#Uses linear regression
#Read in data and clean unnecessary columns and null values
machine_information <- read.csv("data/machine.data", strip.white=TRUE)
machine_information <- machine_information[complete.cases(machine_information),]
colnames(machine_information)<- c("Company",
"Machine.name",
"Machine.cycle.time",
"Machine.memory.min",
"Machine.memory.max",
"Machine.memory.cache",
"Machine.channels.min",
"Machine.channels.max",
"Published.performace",
"Estimated.performance")
machine_information <- subset(machine_information, select = -c(Company, Machine.name))
#Leave out some data for testing
machine_information_test_data <- machine_information[201:208, ]
machine_information <- machine_information[1:200, ]
#Initial analysis
summary(machine_information)
boxplot(machine_information)
#Quick look at the performance data distribution
hist(machine_information$Published.performace)
#Requires the "moments" library at the top
#kurtosis(machine_information$Published.performace)
#skewness(machine_information$Published.performace)
cor(machine_information$Published.performace,machine_information)
pairs( machine_information, panel=function(x,y){
points(x, y)
model <- lm(x ~ y)
# In case we want to display line of best fit
#abline(model, col='red')
}, cex.labels=1)
#Modelling Linear Regresssion
#This is the final highest scoring model
machine_information_model <- lm(machine_information$Published.performace ~
+Machine.memory.max*Machine.memory.min
-Machine.memory.min
+Machine.memory.cache
+Machine.channels.max*Machine.channels.min
-Estimated.performance #ignore the hardcoded predictions
,data=machine_information)
machine_information_model$coefficients
summary(machine_information_model)
# Second scoring model
machine_information_model2 <- lm(machine_information$Published.performace ~
+Machine.memory.max
+Machine.memory.min
+Machine.memory.cache
+Machine.channels.max
-Machine.channels.min
-Estimated.performance #ignore the hardcoded predictions
,data=machine_information)
machine_information_model2$coefficients
summary(machine_information_model2)
# Third exploratory model
machine_information_model3 <- lm(machine_information$Published.performace ~
+Machine.memory.max:Machine.memory.min
+Machine.memory.cache
+Machine.channels.max:Machine.channels.min
-Estimated.performance #ignore the hardcoded predictions
,data=machine_information)
machine_information_model3$coefficients
summary(machine_information_model3)
#Predicitions based on initial model to compare to the baseline
predicted_machine_model <- round(predict(machine_information_model, machine_information_test_data))
machine_information_test_data$Estimated.My.performance = predicted_machine_model
#Evaluation
#Line of best fit again to see the patterns
pairs( machine_information_test_data, panel=function(x,y){
points(x, y)
model <- lm(x ~ y)
abline(model, col='blue')
})
#Residuals
performace_resid = resid(machine_information_model)
par(mfrow = c(2,2))
plot(machine_information_model, which=1)
plot(machine_information_model, which=2)
plot(machine_information_model, which=3)
plot(machine_information_model, which=5) |
fb9a02b1b105ec00943c94357473f72c4f0bb49d | 8b3fa8e3d995a29c13ef24d976f8f1841d849888 | /inst/shiny/ui.R | 09b88c64aff83327e274e6148761adec1f0223d0 | [] | no_license | PolMine/annolite | c9b0d2beade917474378a9e932beec2f1fb3febe | 66f5c7989a0732931ca7899f4b243181c8b96001 | refs/heads/master | 2023-01-04T21:48:40.087221 | 2020-11-02T07:38:40 | 2020-11-02T07:38:40 | 143,249,089 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,523 | r | ui.R | annotatorObjects <- polmineR::getObjects(class = 'Annotator', envir = .GlobalEnv)
shinyUI(fluidPage(
useShinyjs(),
tags$head(tags$script(src = "jquery.min.js")),
tags$head(tags$script(src = "annotator-full.min.js")),
includeCSS(system.file("js", "annotator.min.css", package = "polmineR.anno")),
tags$head(tags$script(src = "annotator.offline.min.js")),
tags$head(tags$script(src = "annotator.plugin.polmine.js")),
tags$head(tags$script(src = "tags-annotator.min.js")),
includeCSS(system.file("js", "tags-annotator.min.css", package="polmineR.anno")),
extendShinyjs(script="/Users/blaette/Lab/gitlab/polmineR.anno/inst/shiny/www/shinyjs.interface.js"),
sidebarLayout(
sidebarPanel(
selectInput("object", "object", choices = annotatorObjects),
actionButton("restore", "restore")
),
mainPanel(
tabsetPanel(
id = "tabs",
tabPanel("fulltext", id = "fulltext", uiOutput("fulltext")),
tabPanel("table", id = "table", dataTableOutput("table"))
)
)
),
tags$script("var content = $('body').annotator();"),
tags$script("content.annotator('addPlugin', 'Offline');"),
tags$script("content.annotator('addPlugin', 'StoreLogger');"),
tags$script("var optionstags = {tag:'imagery:red,parallelism:blue,sound:green,anaphora:orange'};"),
tags$script("console.log(optionstags);"),
tags$script("content.annotator('addPlugin','HighlightTags', optionstags);")
# tags$script("content.annotator('addPlugin', 'Tags');")
)) |
ce488819f42205aefa02b9b2a7692224f2252770 | f1af4d31ae62962fe1880a184143d47543dab569 | /JHU-PML-FA/FInalFile.R | 2bcda09afd65ae39a933d8e7e85a10d0e8b88cf4 | [] | no_license | Alex0141/testing | 987c969bd35d446a7e55f84a288c43114c075453 | 1e9e87c9e31ae7266f5f22a452efc7cfb8707b28 | refs/heads/master | 2021-05-19T06:54:25.375657 | 2020-06-04T07:17:27 | 2020-06-04T07:17:27 | 251,574,762 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 18,656 | r | FInalFile.R | PROBLEM STATEMENT
“Using devices such as Jawbone Up, Nike FuelBand, and Fitbit it is now possible to collect a large amount of data about personal activity relatively inexpensively. These type of devices are part of the quantified self movement ??? a group of enthusiasts who take measurements about themselves regularly to improve their health, to find patterns in their behavior, or because they are tech geeks. One thing that people regularly do is quantify how much of a particular activity they do, but they rarely quantify how well they do it. In this project, your goal will be to use data from accelerometers on the belt, forearm, arm, and dumbell of 6 participants. They were asked to perform barbell lifts correctly and incorrectly in 5 different ways.”
What should be submitted.
The goal of your project is to predict the manner in which they did the exercise. This is the "classe" variable in the training set. You may use any of the other variables to predict with. You should create a report describing how you built your model, how you used cross validation, what you think the expected out of sample error is, and why you made the choices you did. You will also use your prediction model to predict 20 different test cases.
Data
The training data for this project are available here:
https://d396qusza40orc.cloudfront.net/predmachlearn/pml-training.csv
The test data are available here:
https://d396qusza40orc.cloudfront.net/predmachlearn/pml-testing.csv
SOLUTION
Open the R tool and install the following packages by typing
Install.packages(“caret”)
Install.packages(“randomForest”)
Install.packages(“e1071”)
Load the Give Libraries using the following command.
Library(caret)
library(randomForest)
Library(e1071)
The training and Testing data set is available as links online.You can store the CSV Files as per the following command.
Urltrain = "http://d396qusza40orc.cloudfront.net/predmachlearn/pml-training.csv"
training = read.csv(url(Urltrain), na.strings=c("NA","#DIV/0!",""))
Urltest = "http://d396qusza40orc.cloudfront.net/predmachlearn/pml-testing.csv"
testing11 <- read.csv(url(Urltest), na.strings=c("NA","#DIV/0!",""))
Addtionally,you can manually download the CSV Files from the given link by using the:-
Getwd()
And setting to the Location where you have stored the .CSV Files.
There are many Columns which show high Variance, We get rid of them first by using the command:-
training.first <- training[ , colSums(is.na(training)) == 0]
remove = c('X', 'user_name', 'raw_timestamp_part_1', 'raw_timestamp_part_2', 'cvtd_timestamp', 'new_window', 'num_window')
training.second <- training.first[, -which(names(training.first) %in% remove)]
We check the dimensions of the first and second row:-
> nrow(training.first)
[1] 19622
> nrow(training.second)
[1] 19622
There are many Columns which show high Variance,We get rid of them first by using the command:-
> zV= nearZeroVar(training.dere[sapply(training.second, is.numeric)], saveMetrics = TRUE)
> training.nonzerovar = training.second[,zeroVar[, 'nzv']==0]
> dim(training.nonzerovar)
[1] 19622 53
> cM <- cor(na.omit(training.nonzerovar[sapply(training.nonzerovar, is.numeric)]))
> dim(cM)
[1] 52 52
cDF <- expand.grid(row = 1:52, col = 1:52)
cDF$correlation <- as.vector(cM)
> cDF <- expand.grid(row = 1:52, col = 1:52)
> cDF$correlation <- as.vector(cM)
¬ levelplot(correlation ~ row+ col, cDF)
> rcor = findCorrelation(corrMatrix, cutoff = .87, verbose = TRUE)
Compare row 10 and column 1 with corr 0.992
Means: 0.27 vs 0.168 so flagging column 10
Compare row 1 and column 9 with corr 0.925
Means: 0.25 vs 0.164 so flagging column 1
Compare row 9 and column 4 with corr 0.928
Means: 0.233 vs 0.161 so flagging column 9
Compare row 8 and column 2 with corr 0.966
Means: 0.245 vs 0.157 so flagging column 8
Compare row 2 and column 11 with corr 0.884
Means: 0.228 vs 0.154 so flagging column 2
Compare row 19 and column 18 with corr 0.918
Means: 0.09 vs 0.154 so flagging column 18
Compare row 46 and column 31 with corr 0.914
Means: 0.101 vs 0.158 so flagging column 31
Compare row 46 and column 33 with corr 0.933
Means: 0.082 vs 0.161 so flagging column 33
All correlations <= 0.87
> training.decor = training.nonzerovar[,-rcor]
> dim(training.decor)
[1] 19622 45
We now split our Training Data into 2 Sets with a 65% split to our modified training set and the remaining 35% which stays in the testing set.
> inTrain <- createDataPartition(y=training.decor$classe, p=0.65, list=FALSE)
> train <- training.decor[inTrain,];
> test <- training.decor[-inTrain,]
> dim(training);
[1] 13737 46
> dim(testing)
[1] 5885 46
set.seed(999)
rf.training=randomForest(classe~.,data=train,ntree=100, importance=TRUE)
rf.training
y=varImpPlot(rf.training,)
> y=varImpPlot(rf.training,)
> y
MeanDecreaseAccuracy MeanDecreaseGini
yaw_belt 32.029426 724.83447
total_accel_belt 12.604981 255.56995
gyros_belt_x 15.593406 86.91657
gyros_belt_y 8.756169 101.48180
gyros_belt_z 19.055566 340.55547
magnet_belt_x 17.916008 215.04822
magnet_belt_y 16.679845 408.57591
magnet_belt_z 15.244762 328.98098
roll_arm 20.024840 236.10620
pitch_arm 10.497100 144.78818
yaw_arm 15.096575 204.52550
total_accel_arm 11.077107 80.05923
gyros_arm_y 20.526429 123.59521
gyros_arm_z 15.786235 61.49579
accel_arm_x 10.504261 184.80547
accel_arm_y 13.453446 135.70542
accel_arm_z 13.766164 109.37582
magnet_arm_x 8.604656 188.95669
magnet_arm_y 9.299032 168.88421
magnet_arm_z 15.245923 147.37576
roll_dumbbell 15.598563 326.60878
pitch_dumbbell 8.315829 135.90273
yaw_dumbbell 13.627680 194.76769
total_accel_dumbbell 12.948539 205.95248
gyros_dumbbell_y 13.828084 223.95488
accel_dumbbell_x 13.039806 189.44484
accel_dumbbell_y 17.225338 286.71295
accel_dumbbell_z 16.209837 244.10784
magnet_dumbbell_x 14.924324 337.38854
magnet_dumbbell_y 20.257029 493.11831
magnet_dumbbell_z 28.686235 572.07015
roll_forearm 14.688614 425.69150
pitch_forearm 19.962761 566.22108
yaw_forearm 14.481442 142.72084
total_accel_forearm 14.186970 88.99483
gyros_forearm_x 15.437910 70.97954
gyros_forearm_y 20.587655 115.92990
gyros_forearm_z 17.605637 77.98011
accel_forearm_x 15.020910 226.22867
accel_forearm_y 12.898990 119.38238
accel_forearm_z 17.083710 211.92138
magnet_forearm_x 10.898452 164.74667
magnet_forearm_y 13.269938 189.39069
magnet_forearm_z 23.045708 227.39173
tree.pred=predict(rf.training,test,type="class")
predMatrix = with(testing,table(tree.pred,classe))
> confusionMatrix(tree.pred,test$classe)
Confusion Matrix and Statistics
Reference
Prediction A B C D E
A 1949 5 0 0 0
B 4 1317 10 0 0
C 0 6 1186 18 0
D 0 0 1 1104 2
E 0 0 0 3 1260
Overall Statistics
Accuracy : 0.9929
95% CI : (0.9906, 0.9947)
No Information Rate : 0.2845
P-Value [Acc > NIR] : < 2.2e-16
Kappa : 0.991
Mcnemar's Test P-Value : NA
Statistics by Class:
Class: A Class: B Class: C Class: D Class: E
Sensitivity 0.9980 0.9917 0.9908 0.9813 0.9984
Specificity 0.9990 0.9975 0.9958 0.9995 0.9995
Pos Pred Value 0.9974 0.9895 0.9802 0.9973 0.9976
Neg Pred Value 0.9992 0.9980 0.9981 0.9964 0.9996
PrevalPROBLEM STATEMENT
“Using devices such as Jawbone Up, Nike FuelBand, and Fitbit it is now possible to collect a large amount of data about personal activity relatively inexpensively. These type of devices are part of the quantified self movement ??? a group of enthusiasts who take measurements about themselves regularly to improve their health, to find patterns in their behavior, or because they are tech geeks. One thing that people regularly do is quantify how much of a particular activity they do, but they rarely quantify how well they do it. In this project, your goal will be to use data from accelerometers on the belt, forearm, arm, and dumbell of 6 participants. They were asked to perform barbell lifts correctly and incorrectly in 5 different ways.”
What should be submitted.
The goal of your project is to predict the manner in which they did the exercise. This is the "classe" variable in the training set. You may use any of the other variables to predict with. You should create a report describing how you built your model, how you used cross validation, what you think the expected out of sample error is, and why you made the choices you did. You will also use your prediction model to predict 20 different test cases.
Data
The training data for this project are available here:
https://d396qusza40orc.cloudfront.net/predmachlearn/pml-training.csv
The test data are available here:
https://d396qusza40orc.cloudfront.net/predmachlearn/pml-testing.csv
SOLUTION
Open the R tool and install the following packages by typing
Install.packages(“caret”)
Install.packages(“randomForest”)
Install.packages(“e1071”)
Load the Give Libraries using the following command.
Library(caret)
library(randomForest)
Library(e1071)
The training and Testing data set is available as links online.You can store the CSV Files as per the following command.
Urltrain = "http://d396qusza40orc.cloudfront.net/predmachlearn/pml-training.csv"
training = read.csv(url(Urltrain), na.strings=c("NA","#DIV/0!",""))
Urltest = "http://d396qusza40orc.cloudfront.net/predmachlearn/pml-testing.csv"
testing11 <- read.csv(url(Urltest), na.strings=c("NA","#DIV/0!",""))
Addtionally,you can manually download the CSV Files from the given link by using the:-
Getwd()
And setting to the Location where you have stored the .CSV Files.
There are many Columns which show high Variance, We get rid of them first by using the command:-
training.first <- training[ , colSums(is.na(training)) == 0]
remove = c('X', 'user_name', 'raw_timestamp_part_1', 'raw_timestamp_part_2', 'cvtd_timestamp', 'new_window', 'num_window')
training.second <- training.first[, -which(names(training.first) %in% remove)]
We check the dimensions of the first and second row:-
> nrow(training.first)
[1] 19622
> nrow(training.second)
[1] 19622
There are many Columns which show high Variance,We get rid of them first by using the command:-
> zV= nearZeroVar(training.dere[sapply(training.second, is.numeric)], saveMetrics = TRUE)
> training.nonzerovar = training.second[,zeroVar[, 'nzv']==0]
> dim(training.nonzerovar)
[1] 19622 53
> cM <- cor(na.omit(training.nonzerovar[sapply(training.nonzerovar, is.numeric)]))
> dim(cM)
[1] 52 52
cDF <- expand.grid(row = 1:52, col = 1:52)
cDF$correlation <- as.vector(cM)
> cDF <- expand.grid(row = 1:52, col = 1:52)
> cDF$correlation <- as.vector(cM)
¬ levelplot(correlation ~ row+ col, cDF)
> rcor = findCorrelation(corrMatrix, cutoff = .87, verbose = TRUE)
Compare row 10 and column 1 with corr 0.992
Means: 0.27 vs 0.168 so flagging column 10
Compare row 1 and column 9 with corr 0.925
Means: 0.25 vs 0.164 so flagging column 1
Compare row 9 and column 4 with corr 0.928
Means: 0.233 vs 0.161 so flagging column 9
Compare row 8 and column 2 with corr 0.966
Means: 0.245 vs 0.157 so flagging column 8
Compare row 2 and column 11 with corr 0.884
Means: 0.228 vs 0.154 so flagging column 2
Compare row 19 and column 18 with corr 0.918
Means: 0.09 vs 0.154 so flagging column 18
Compare row 46 and column 31 with corr 0.914
Means: 0.101 vs 0.158 so flagging column 31
Compare row 46 and column 33 with corr 0.933
Means: 0.082 vs 0.161 so flagging column 33
All correlations <= 0.87
> training.decor = training.nonzerovar[,-rcor]
> dim(training.decor)
[1] 19622 45
We now split our Training Data into 2 Sets with a 65% split to our modified training set and the remaining 35% which stays in the testing set.
> inTrain <- createDataPartition(y=training.decor$classe, p=0.65, list=FALSE)
> train <- training.decor[inTrain,];
> test <- training.decor[-inTrain,]
> dim(training);
[1] 13737 46
> dim(testing)
[1] 5885 46
set.seed(999)
rf.training=randomForest(classe~.,data=train,ntree=100, importance=TRUE)
rf.training
y=varImpPlot(rf.training,)
> y=varImpPlot(rf.training,)
> y
MeanDecreaseAccuracy MeanDecreaseGini
yaw_belt 32.029426 724.83447
total_accel_belt 12.604981 255.56995
gyros_belt_x 15.593406 86.91657
gyros_belt_y 8.756169 101.48180
gyros_belt_z 19.055566 340.55547
magnet_belt_x 17.916008 215.04822
magnet_belt_y 16.679845 408.57591
magnet_belt_z 15.244762 328.98098
roll_arm 20.024840 236.10620
pitch_arm 10.497100 144.78818
yaw_arm 15.096575 204.52550
total_accel_arm 11.077107 80.05923
gyros_arm_y 20.526429 123.59521
gyros_arm_z 15.786235 61.49579
accel_arm_x 10.504261 184.80547
accel_arm_y 13.453446 135.70542
accel_arm_z 13.766164 109.37582
magnet_arm_x 8.604656 188.95669
magnet_arm_y 9.299032 168.88421
magnet_arm_z 15.245923 147.37576
roll_dumbbell 15.598563 326.60878
pitch_dumbbell 8.315829 135.90273
yaw_dumbbell 13.627680 194.76769
total_accel_dumbbell 12.948539 205.95248
gyros_dumbbell_y 13.828084 223.95488
accel_dumbbell_x 13.039806 189.44484
accel_dumbbell_y 17.225338 286.71295
accel_dumbbell_z 16.209837 244.10784
magnet_dumbbell_x 14.924324 337.38854
magnet_dumbbell_y 20.257029 493.11831
magnet_dumbbell_z 28.686235 572.07015
roll_forearm 14.688614 425.69150
pitch_forearm 19.962761 566.22108
yaw_forearm 14.481442 142.72084
total_accel_forearm 14.186970 88.99483
gyros_forearm_x 15.437910 70.97954
gyros_forearm_y 20.587655 115.92990
gyros_forearm_z 17.605637 77.98011
accel_forearm_x 15.020910 226.22867
accel_forearm_y 12.898990 119.38238
accel_forearm_z 17.083710 211.92138
magnet_forearm_x 10.898452 164.74667
magnet_forearm_y 13.269938 189.39069
magnet_forearm_z 23.045708 227.39173
tree.pred=predict(rf.training,test,type="class")
predMatrix = with(testing,table(tree.pred,classe))
> confusionMatrix(tree.pred,test$classe)
Confusion Matrix and Statistics
Reference
Prediction A B C D E
A 1949 5 0 0 0
B 4 1317 10 0 0
C 0 6 1186 18 0
D 0 0 1 1104 2
E 0 0 0 3 1260
Overall Statistics
Accuracy : 0.9929
95% CI : (0.9906, 0.9947)
No Information Rate : 0.2845
P-Value [Acc > NIR] : < 2.2e-16
Kappa : 0.991
Mcnemar's Test P-Value : NA
Statistics by Class:
Class: A Class: B Class: C Class: D Class: E
Sensitivity 0.9980 0.9917 0.9908 0.9813 0.9984
Specificity 0.9990 0.9975 0.9958 0.9995 0.9995
Pos Pred Value 0.9974 0.9895 0.9802 0.9973 0.9976
Neg Pred Value 0.9992 0.9980 0.9981 0.9964 0.9996
Prevalence 0.2845 0.1934 0.1744 0.1639 0.1838
Detection Rate 0.2839 0.1918 0.1728 0.1608 0.1835
Detection Prevalence 0.2846 0.1939 0.1763 0.1613 0.1840
Balanced Accuracy 0.9985 0.9946 0.9933 0.9904 0.9989
Random Forests give us Highly Accurate results.Hence,we go in for this for testing our Test Data.
Now,we use our Random Forest Model to test our Test set predictors.
TESTING DATA PREDICTIONS
> Testsetpredictors <- predict(rf.training, testing11)
> Testsetpredictors
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
B A B A A E D B A A B C B A E E A B B B
Levels: A B C D E
ence 0.2845 0.1934 0.1744 0.1639 0.1838
Detection Rate 0.2839 0.1918 0.1728 0.1608 0.1835
Detection Prevalence 0.2846 0.1939 0.1763 0.1613 0.1840
Balanced Accuracy 0.9985 0.9946 0.9933 0.9904 0.9989
Random Forests give us Highly Accurate results.Hence,we go in for this for testing our Test Data.
Now,we use our Random Forest Model to test our Test set predictors.
TESTING DATA PREDICTIONS
> Testsetpredictors <- predict(rf.training, testing11)
> Testsetpredictors
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
B A B A A E D B A A B C B A E E A B B B
Levels: A B C D E
|
72fcf085a27ee8583e04e9c9f4cb3a9990446397 | 3a18590b29c93037cd42b8b15a704f99afe8bf7b | /Genome Analysis/GWAS & Linkage Analysis/GWAS Pediatric/R/GWAS.R | 0dad7f1e9a2787f57cbff8be6e2d95be5419af14 | [] | no_license | jperilla/Bioinformatics | a4575ed9eb48e5cfb8c387f3cd87dbde8fd9f25e | 53d367b8c519e6bec3b5be482cc9f4400893837c | refs/heads/master | 2023-06-24T05:05:03.873432 | 2023-06-09T16:39:55 | 2023-06-09T16:39:55 | 131,012,606 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,878 | r | GWAS.R | #read in Fisher's test results from plink output
data = read.table("C:\\TEMP\\datasets\\plink.assoc.fisher", header=T)
dim(data)
data[1:4,]
colnames(data)
# plot
par(mar=c(8,5,5,5))
plot(-log10(data$P), type="n",
xaxt="n", xlab="", ylab="-log10(p-value)",
main="Distribution of p-values from Fisher's Test",
col = "black")
xtick<-seq(1, 1668, by=166)
axis(side=1,at=xtick,labels=data$BP[xtick], las=2)
lines(-log10(data$P),
type = "h", col = "black")
abline(2.0,0,col="red",lty="dashed")
mtext("Position", side=1, line=6)
plessthan01 <- data[data$P < 0.01,]
dim(plessthan01)
plessthan05 <- data[data$P < 0.05,]
dim(plessthan05)
#read MDS results from plink and plot
mds = read.table("C:\\TEMP\\datasets\\plink.mds", header=T)
colnames(mds)
mds
plot.df <- data.frame(pc1=mds$C1, pc2=mds$C2)
plot(plot.df, col=c(2,4), xlab="Eigenvector 1",
ylab="Eigenvector 2", main="MDS eigenvector 1 vs. eigenvector 2")
legend(0.1, -0.1, c("group 1", "group 2"), col = c(2,4),pch = c(1,1))
mycov <- mds[,c(1,2,4,5)]
write.table(mycov,file="C:\\TEMP\\datasets\\mycov.txt", row.names=FALSE)
covar = read.table("C:\\TEMP\\datasets\\plink.assoc.logistic", header=T)
dim(covar)
colnames(covar)
covar.add <- covar[covar$TEST=="ADD",]
dim(covar.add)
par(mar=c(8,5,5,5))
plot(-log10(covar.add$P), type="n",
xaxt="n", xlab="", ylab="-log10(p-value)",
main="Distribution of p-values from Linear Regression",
col = "black")
xtick<-seq(1, 1668, by=166)
axis(side=1,at=xtick,labels=covar.add$BP[xtick], las=2)
lines(-log10(covar.add$P),
type = "h", col = "black")
abline(2.0,0,col="red",lty="dashed")
mtext("Position", side=1, line=6)
plessthan01.covar <- covar.add[covar.add$P < 0.01,]
dim(plessthan01.covar)
plessthan05.covar <- covar.add[covar.add$P < 0.05,]
dim(plessthan05.covar)
|
c6d32dd32e81f0b24015e4b3d9a625df5aa9c2b7 | c5aefe7be5406eadde1c009896b4704ed820695a | /man/tile_coords.Rd | c31d84aa11b560e65894e77dcff8092d83800b11 | [
"MIT"
] | permissive | cran/rtrek | d48bf4713e9b3ba75259399bc11d62ce2b72c86a | 12d9c47f15292a5a0ee962e019caee4117d25101 | refs/heads/master | 2021-06-07T00:13:51.143895 | 2021-06-01T14:50:06 | 2021-06-01T14:50:06 | 136,319,587 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,515 | rd | tile_coords.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tiles.R
\name{tile_coords}
\alias{tile_coords}
\title{Simple CRS coordinates}
\usage{
tile_coords(data, id)
}
\arguments{
\item{data}{a data frame containing columns named \code{col} and \code{row}. These contain column-row number pairs defining matrix cells in tile set \code{id}. See details.}
\item{id}{character, name of map tile set ID. See \code{\link{stTiles}}.}
}
\value{
a data frame.
}
\description{
Convert \code{(column, row)} numbers to \code{(x, y)} coordinates for a given tile set.
}
\details{
This function converts column and row indices for an available map tile set matrix to coordinates that can be used in a Leaflet map. See \code{\link{stTiles}} for available tile sets.
\code{data} cannot contain columns named \code{x} or \code{y}, which are reserved for the column-appended output data frame.
Each tile set has a simple/non-geographical coordinate reference system (CRS). Respective coordinates are based on the dimensions of the source image used to generate each tile set.
The same column and row pair will yield different map coordinates for different tile sets. Typical for matrices, columns are numbered increasing from left to right and rows increasing from top to bottom.
The output of \code{tile_coords} is a typical Cartesian coordinate system, increasing from left to right and bottom to top.
}
\examples{
d <- data.frame(row = c(0, 3222, 6445), col = c(0, 4000, 8000))
tile_coords(d, "galaxy1")
}
|
f0742afa103094afd5e887763233df0a08d92e6c | 2bec5a52ce1fb3266e72f8fbeb5226b025584a16 | /accelerometry/man/unidata.Rd | 9b15217e0ebde04264b7105f5a9d4466ed1c40f8 | [] | no_license | akhikolla/InformationHouse | 4e45b11df18dee47519e917fcf0a869a77661fce | c0daab1e3f2827fd08aa5c31127fadae3f001948 | refs/heads/master | 2023-02-12T19:00:20.752555 | 2020-12-31T20:59:23 | 2020-12-31T20:59:23 | 325,589,503 | 9 | 2 | null | null | null | null | UTF-8 | R | false | true | 449 | rd | unidata.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/accelerometry-data.R
\docType{data}
\name{unidata}
\alias{unidata}
\title{Uniaxial Sample Data}
\source{
\url{https://wwwn.cdc.gov/nchs/nhanes/search/datapage.aspx?Component=Examination&CycleBeginYear=2003}
}
\description{
Accelerometer data for the first 5 participants in the National Health and
Nutrition Examination Survey (NHANES) 2003-2004 dataset.
}
|
ced0e01aa8d0e081553c8fdf3685322aac13a257 | 7f3bc9e70bd966acc41bb2c430c86840d59c808d | /Scripts/SibSp_Survived.R | e33f9504541cef4cdc2310362f9f4fd3b133b81b | [] | no_license | ArnabBir/Kaggle_Titanic | 7daf2d1aeeffdd3f1a3964de86adfe66468f0b86 | f8ad78f84420f6004e675486ca1a29129b0b9cf7 | refs/heads/master | 2021-06-11T16:51:17.135824 | 2017-03-20T08:46:19 | 2017-03-20T08:46:19 | 69,182,894 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 329 | r | SibSp_Survived.R | data <- read.csv("D://Github//Kaggle_Titanic//train.csv")
data <- data[!(data$SibSp == 0),]
data <- data[!(data$SibSp == 1),]
counts <- table(data$Survived, data$SibSp)
barplot(counts, main="SibSp vs Survived Plot",
xlab= "SibSp",ylab = "Number of People", col=c("darkblue","red"),
legend = c(rownames(counts)))
|
1d5c29a81db65a1a0564cb0be8395e012611c0ec | f1c8bb430a2ca29f8e99cac27eaed4fc58c33090 | /scripts/functions.R | e8be4d124839debef61e70cab001e8f6caacbdf0 | [] | no_license | fernandoprudencio/MOD11A2_MONITORING | 61e26ded231179ee38ddc677b1e7758c3ef058de | 501766fe69bc8c88b27f2170a108f902f7cd3e4e | refs/heads/master | 2022-12-23T09:16:37.072964 | 2020-08-28T18:17:56 | 2020-08-28T18:17:56 | 290,056,204 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,118 | r | functions.R | #' INSTALL PACKAGES
pkg <- c("tidyverse", "raster", "DescTools")
sapply(
pkg,
function(x) {
is.there <- x %in% rownames(installed.packages())
if (is.there == FALSE) {
install.packages(x)
}
}
)
#' LOAD LIBRARIES
library(tidyverse)
library(raster)
library(DescTools)
#' change months from english language to spanish language
english.months <- c(
"january", "february", "march", "april", "may", "june", "july", "august",
"september", "october", "november", "december"
)
spanish.months <- c(
"Enero", "Febrero", "Marzo", "Abril", "Mayo", "Junio", "Julio", "Agosto",
"Septiembre", "Octubre", "Noviembre", "Diciembre"
)
to.spanish <- spanish.months
names(to.spanish) <- english.months
translate.date <- function(date, output.lang = "es") {
if (output.lang == "es") {
str_replace_all(tolower(date), to.spanish)
}
}
#' this function filters MODIS dataset by quality band
#' this is the order of 8 bits of the quality band
# (07)(06)(05)(04)(03)(02)(01)(00) - MODIS NOMENCLATURE
# (01)(02)(03)(04)(05)(06)(07)(08) - R NOMENCLATURE
#'
qaFilter <- function(band, qaband, type, filter) {
if (type == "mxd11a2") {
dataBIN <- sprintf("%08d", DecToBin(1:255) %>% as.numeric())
df.bin <- tibble(bin = dataBIN) %>%
mutate(dec = 1:n()) %>%
filter(
str_sub(bin, 7, 8) %in% filter[[1]] | # Mandatory QA flags
str_sub(bin, 5, 6) %in% filter[[2]] | # Data quality flag
str_sub(bin, 3, 4) %in% filter[[3]] | # Emiss Error flag
str_sub(bin, 1, 2) %in% filter[[4]] # LST Error flag
)
}
#' changing the values of the quality band to NA and 1
qaband[is.na(qaband)] <- 256
qaband[qaband %in% df.bin$dec] <- NA
qaband[!is.na(qaband)] <- 1
return(band * qaband)
}
#' this function extrats average value of raster by polygon vector
extract_data <- function(file, st) {
return(file %>% mask(st) %>% getValues() %>% mean(na.rm = T))
}
#' this function return a logic value if it is an outlier vlaue or no
is_outlier <- function(x) {
return(x < quantile(x, 0.25) - 1.5 * IQR(x) | x > quantile(x, 0.75) + 1.5 * IQR(x))
} |
1c04159fb18eed6bc92a2514ab3bb2e5c28ab494 | e0494fbf13e6caaf532682f41959f50e28120f54 | /cert_class/Lesson_3/command_line_R.r | 8ba9e9514d026141566171da93c82ec3696ae539 | [] | no_license | lagerratrobe/data_science | 3f9c263d8de2c83acef984a782fc60833d138887 | dca487c12763432dfeb0af60da6b4206e83ecfa7 | refs/heads/master | 2021-09-13T11:01:46.848341 | 2018-03-23T22:45:02 | 2018-03-23T22:45:02 | 95,619,213 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 508 | r | command_line_R.r | library(tidyverse)
### creating data frame
music <- c("Blues", "Hip-hop", "Jazz", "Metal", "Rock")
number <- c(8, 7, 4, 6, 11)
df.music <- data.frame(music, number)
colnames(df.music) <- c("Music", "Amount")
### Create the plot
myplot <- ggplot(data=df.music, aes(x=music, y=number)) +
geom_bar(stat="identity") +
xlab(colnames(df.music)[1]) +
ylab(colnames(df.music)[2]) +
ylim(c(0,11)) +
ggtitle("Ulubiony typ muzyki ród studentów")
pdf("Myplot.pdf", width=5, height=5)
plot(myplot)
dev.off()
|
2768826c748bd0a3d8a524d2afaee6d53c4de0a3 | 1545ba82ade1e54622b4da2673bd70298c29d962 | /inst/testScripts/devel/C1C2/31.PairedPSCBS,DP,deShear.R | 8ad450258d175a252ec22f5907f26fab6bd116a0 | [] | no_license | HenrikBengtsson/aroma.cn | 536d1a3352f4d4bc054f33e2cd9802616f65cd32 | 4e56fc240804f5aa2bde92226153416f692ae768 | refs/heads/master | 2022-08-15T11:37:51.600802 | 2022-07-20T19:07:46 | 2022-07-20T19:07:46 | 20,846,363 | 1 | 1 | null | 2018-03-29T01:10:59 | 2014-06-15T02:23:38 | R | UTF-8 | R | false | false | 5,170 | r | 31.PairedPSCBS,DP,deShear.R | library("aroma.cn");
library("PSCBS");
library("R.devices");
library("R.menu");
verbose <- Arguments$getVerbose(-10);
# Local functions
deShearC1C2 <- deShearC1C2_20120922;
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Local functions
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
setMethodS3("doPlots", "PairedPSCBS", function(fit, sampleName=NULL, tags=NULL, ...) {
# Argument 'sampleName':
if (is.null(sampleName)) {
sampleName <- sampleName(fit);
}
stopifnot(!is.null(sampleName));
nCPsTag <- sprintf("#CPs=%d", nbrOfChangePoints(fit));
toPNG(sampleName, tags=c("(C1,C2)", nCPsTag, tags), width=800, {
plotC1C2Grid(fit);
linesC1C2(fit);
stext(side=3, pos=0, sampleName);
stext(side=3, pos=1, nCPsTag);
stext(side=4, pos=0, dataSet, cex=0.7);
stext(side=4, pos=1, chipType, cex=0.7);
});
toPNG(sampleName, tags=c("tracks", nCPsTag, tags), width=1200, aspectRatio=0.25, {
plotTracks(fit, tracks="tcn,c1,c2");
stext(side=4, pos=0, sampleName);
stext(side=4, pos=1, nCPsTag);
});
}) # doPlots()
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Setup Paired PSCBS segmentation data set
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
rootPath <- "pscbsData";
path <- Arguments$getReadablePath(rootPath);
dataSets <- list.files(rootPath);
if (length(dataSets) > 1) {
dataSet <- textMenu(dataSets, value=TRUE);
} else {
dataSet <- dataSets[1];
}
path <- file.path(rootPath, dataSet);
path <- Arguments$getReadablePath(path);
chipTypes <- list.files(path);
if (length(chipTypes) > 1) {
chipType <- textMenu(chipTypes, value=TRUE);
} else {
chipType <- chipTypes[1];
}
ds <- PairedPSCBSFileSet$byName(dataSet, chipType=chipType);
print(ds);
dsName <- getName(ds);
if (length(ds) == 0) {
throw("No PairedPSCBS data file found.")
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Select tumor-normal pair
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if (length(ds) > 1) {
ii <- textMenu(getNames(ds));
} else {
ii <- 1L;
}
if (!exists("fit") || !inherits(fit, "PairedPSCBS")) {
df <- getFile(ds, ii);
fit <- loadObject(df);
sampleName <- getName(df);
rm(segList, fitList);
}
fit0 <- fit;
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Configure report
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
figPath <- file.path("figures", dataSet);
options("devEval/args/path"=figPath);
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Plot (C1,C2)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
doPlots(fit);
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Prune change points using dynamic programming
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if (!exists("segList", mode="list")) {
segList <- seqOfSegmentsByDP(fit, verbose=-10);
modelFit <- attr(segList, "modelFit");
modelFit$seqOfSegmentsByDP <- NULL;
str(modelFit);
}
toPNG(sampleName, tags=c("DP", "RSEvsCPs"), width=800, aspectRatio=0.7, {
plot(modelFit$nbrOfChangePoints, modelFit$rse,
xlab="Number of change points", ylab="RSE");
stext(side=3, pos=0, sampleName);
stext(side=4, pos=0, dataSet, cex=0.7);
stext(side=4, pos=1, chipType, cex=0.7);
});
nbrOfCPs <- c(100, 50, 25)[1:2];
if (!exists("fitList", mode="list")) {
fitList <- list();
}
for (kk in seq_along(nbrOfCPs)) {
key <- sprintf("nbrOfCPs=%d", nbrOfCPs[kk]);
verbose && enter(verbose, sprintf("Change point set #%d ('%s') of %d", kk, key, length(nbrOfCPs)));
verbose && cat(verbose, "Number of change points: ", nbrOfCPs[kk]);
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Pruning CPs via dynamic programming
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
fitT <- fitList[[key]];
if (is.null(fitT)) {
verbose && enter(verbose, "Resegmenting");
knownSegments <- segList[[nbrOfCPs[kk]+1L]];
fitT <- resegment(fit, knownSegments=knownSegments, undoTCN=+Inf, undoDH=+Inf);
fitList[[key]] <- fitT;
verbose && exit(verbose);
}
sampleName(fitT) <- sampleName(fit);
fitDP <- fitT;
doPlots(fitDP);
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Deshear
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
fitD <- deShearC1C2(fitDP);
doPlots(fitD, tags="deShear");
nCPsTag <- sprintf("#CPs=%d", nbrOfChangePoints(fitD));
toPNG(sampleName, tags=c("cpCallDensity", nCPsTag, "deShear"), width=800, aspectRatio=0.5, {
debug <- fitD$modelFit$debug;
d <- debug$cpAngleDensity;
pfp <- debug$pfp;
expected <- attr(pfp, "expected");
par(mar=c(5,4,2,2));
plot(d, lwd=2, main="");
abline(v=expected);
text(x=expected, y=par("usr")[4], names(expected), adj=c(0.5,-0.5), cex=1.5, xpd=TRUE);
# Annotate called peaks
idxs <- match(pfp$call, expected);
text(x=pfp$x, y=pfp$density, names(expected)[idxs], adj=c(0.5,-0.5), cex=1.5, col="blue");
stext(side=4, pos=0, sampleName);
stext(side=4, pos=1, nCPsTag);
});
verbose && exit(verbose);
} # for (kk ...)
|
f4d0f3f9c4c0ac9ec673665f9c72dc832f4d9841 | cfd6ba53782490abe9e80615d9ba92df612c0a85 | /admbtools/R/bounding_functions.R | c99f3e071391ef519cb0dcf52f2137cac5c40fb3 | [] | no_license | colemonnahan/admb_guide | c4cb5474f7508553e3420bd9c0d14b928afc72ab | b341198ce093aaef8a7106ae5a6a4a1d980bceba | refs/heads/master | 2020-12-24T13:16:49.958119 | 2015-02-17T03:50:44 | 2015-02-17T03:50:44 | 21,586,562 | 3 | 0 | null | 2015-02-17T03:50:44 | 2014-07-07T20:59:42 | R | UTF-8 | R | false | false | 1,224 | r | bounding_functions.R | #' The bounding function.
#'
#' @template bounding_template
boundp <- function(x, minb, maxb, hbf=0){
## The internal transformations used in ADMB depending on the value of the
## Hybrid_bounded_flag (hbf) value.
if(hbf==1)
result <- minb+(maxb-minb)/(1+exp(-x))
else if(hbf==0)
result <- minb+(maxb-minb)*(.5*sin(x*pi/2)+.5)
else stop("Invalid hbf value, should be 0 or 1")
return(result)
}
#' Inverse bounding transformation function used by ADMB.
#'
#' @template bounding_template
boundpin <- function(x, minb, maxb, hbf) {
## The inverse of the transformation
if(hbf==1)
result <- -log( (maxb-x)/(x-minb) )
else if(hbf==0)
result <- asin(2*(x-minb)/(maxb-minb)-1)/(pi/2)
else stop("Invalid hbf value, should be 0 or 1")
return(result)
}
#' Derivative of the bounding transformation function used by ADMB.
#'
#' @template bounding_template
ndfboundp <- function(x, minb, maxb, hbf) {
## The derivative used to find the "scales"
if(hbf==1)
result <- (maxb-minb)*exp(-x)/(1+exp(-x))^2
else if(hbf==0)
result <- (maxb-minb)*.5*pi/2*cos(x*pi/2)
else stop("Invalid hbf value, should be 0 or 1")
return(result)
}
|
c587ac2163cdd6a4c17c3918a8a4efe292f09c95 | 4dde346f604373c813ceda011ea3658a5fed28f6 | /webinar_figures/webinar_figures.R | da8a11d997a30f609c6be3ce6d1fd23b0486f787 | [] | no_license | matthewkling/climclust | 89a2f1eb160c310099ca964ea12dcb59c805193a | 47a7d39520e7e2bc44acdf5d4e95b1885cee49f2 | refs/heads/master | 2020-12-01T04:38:05.437266 | 2016-10-03T18:36:47 | 2016-10-03T18:36:47 | 67,259,802 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 8,109 | r | webinar_figures.R |
library(raster)
library(dplyr)
library(tidyr)
library(fastcluster)
library(FNN)
library(colormap)
library(ggplot2)
library(rgdal)
setwd("C:/Lab_projects/2016_climate_classification/climclust/webinar_figures")
##### data setup ######
# load bcm layers
files <- list.files("C:/Lab_projects/2016_Phylomodelling/Data/Climate/BCM_normals/Normals_30years",
pattern="HST", full.names=T)
r <- lapply(files[2:5], readRDS) %>%
do.call("stack", .)
# log-transform ppt
r[[4]] <- log(r[[4]])
names(r) <- c("cwd", "djf", "jja", "ppt")
# convert raster to matrix
v <- na.omit(cbind(coordinates(r), scale(values(r))))
colnames(v) <- c("x", "y", "cwd", "djf", "jja", "ppt")
# subsample pixels for speed
px <- sample(nrow(v), 200000) # change this to 100k for production run
# find sampled pixel most similar to each non-sampled pixel
nn <- get.knnx(v[px,3:6], v[,3:6], k=1)
# pca for colorspace
pc <- prcomp(v[,3:6])$x[,1:3]
col3d <- colors3d(pc) %>%
col2rgb() %>%
t()
# continuous color plot
p <- ggplot(as.data.frame(v), aes(x, y)) +
geom_raster(fill=rgb(col3d, maxColorValue=255)) +
ggmap::theme_nothing() +
coord_fixed()
png(paste0("continuous.png"), width=5, height=6, units="in", res=1000)
plot(p)
dev.off()
# perservation ranch bounary shapefile
pr <- readOGR("preservation_ranch", "PreservationRanch_boundary")
prd <- broom::tidy(pr)
# coastal conservancy acquisitions shapefile
cc <- readOGR("Acquisitions", "projects_scc_2016_07_13_10_40_28")
ccd <- broom::tidy(cc)
# coastal jusrisdiction shapefile
cj <- readOGR("SCCJurisdiction2015", "SCCJurisdiction2015_Dissolve")
cj <- spTransform(cj, crs(cc))
cj <- crop(cj, r)
cjd <- broom::tidy(cj)
###### build state-level hclust tree ##########
tree <- hclust.vector(v[px,3:6], method="ward")
###### figure 1 #######
# histogram of percent land area per type, for state vs coastal conservancy, at k=20
# cut tree into clusters and transfer to rasters
clust <- cutree(tree, 20)
cluster <- clust[nn$nn.index]
kr <- r[[1]]
kr[!is.na(values(kr))] <- cluster
# rasterize shapefiles and stack with clusters
ccr <- rasterize(cc, r[[1]]) %>% reclassify(c(-1, Inf, 1))
cjr <- rasterize(cj, r[[1]]) %>% reclassify(c(-1, Inf, 1))
kr <- stack(kr, ccr, cjr)
names(kr) <- c("cluster", "conservancy", "coastal")
kr <- stack(kr, r)
# create conservancy vs all partitions, by double-adding conservancy lands
cd1 <- as.data.frame(rasterToPoints(kr)) %>%
filter(!is.na(cluster))
ccdd <- filter(cd1, !is.na(conservancy))
cd1$conservancy <- 0
cd <- rbind(cd1, ccdd)
cdh <- group_by(cd, conservancy, cluster) %>%
filter(!is.na(coastal)) %>%
summarize(n=n(), coastal=length(na.omit(coastal))) %>%
group_by(conservancy) %>%
mutate(p=n/sum(n))# %>%
#filter(coastal > 0) # exclude climate types entirely outside the coastal region
coastal_types <- unique(cdh$cluster[cdh$coastal!=0])
cdo <- cd %>%
group_by(cluster) %>%
summarize(jja=mean(jja)) %>%
arrange(jja)
cdh$cluster <- factor(cdh$cluster, levels=cdo$cluster)
cdh <- arrange(cdh, cluster, conservancy)
# expand
cdh <- expand.grid(cluster=unique(cdh$cluster),
conservancy=unique(cdh$conservancy)) %>%
left_join(cdh)
cdh$cluster <- factor(cdh$cluster, levels=cdo$cluster)
# reference map
p <- ggplot() +
geom_raster(data=as.data.frame(rasterToPoints(r[[1]])),
aes(x,y), fill="gray85") +
geom_polygon(data=cjd, aes(long, lat, group=group),
fill="darkseagreen", color=NA) +
geom_polygon(data=ccd, aes(long, lat, group=group),
fill="darkgreen", color="darkgreen") +
ggmap::theme_nothing() +
coord_fixed() +
xlim(extent(r)[c(1,2)]) +
ylim(extent(r)[c(3,4)]) +
annotate(geom="text", label=c("SCC Jurisdiction", "SCC Acquisitions"),
x=150000, y=c(250000, 200000), color=c("darkseagreen", "darkgreen"),
size=6, hjust=0, fontface="bold")
ggsave("reference_map.png", p, width=6, height=9, units="in")
# histogram
p <- ggplot(filter(cdh, cluster %in% coastal_types), aes(cluster, p, group=conservancy,
fill=factor(conservancy, labels=c("state", "conservancy")))) +
geom_bar(stat="identity", position="dodge", width=.9) +
scale_fill_manual(values=c("gray", "darkgreen")) +
theme_minimal() +
scale_y_continuous(breaks=seq(0, 1, .1)) +
labs(y="proportion of of total land within domain",
fill="domain",
x="climate type (coastal types only, sorted by ascending JJA)") +
theme(legend.position=c(.5,.9))
ggsave("histogram.png", p, width=9, height=6, units="in")
ggsave("histogram_tall.png", p, width=9, height=16, units="in")
# coastal cluster map
#clrs <- distant_colors(length(unique(cdh$cluster)))
clrs <- distant_colors(length(unique(cd$cluster)))
eb <- element_blank()
p <- ggplot(cd) +
geom_raster(aes(x, y, fill=factor(cluster, levels=cdo$cluster))) +
geom_polygon(data=cjd, aes(long, lat, group=group),
fill=NA, color="black") +
theme(panel.background=eb, panel.grid=eb,
axis.text=eb, axis.title=eb, axis.ticks=eb) +
scale_fill_manual(values=clrs) +
labs(fill="climate\ntype")
ggsave("coastal_cluster_map.png", p, width=6, height=6, units="in")
# histogram colored to match map
p <- ggplot() +
geom_bar(data=cdh,
aes(cluster, p, fill=cluster,
group=conservancy),
stat="identity", position="dodge", width=.9,
color=NA) +
geom_bar(data=cdh,
aes(cluster, p,
alpha=factor(conservancy, labels=c("state", "conservancy")),
group=conservancy),
stat="identity", position="dodge", width=.9,
fill="black", color=NA) +
scale_fill_manual(values=clrs[unique(cd$cluster) %in% coastal_types],
guide=F) +
scale_alpha_manual(values=c(0, 1)) +
theme_minimal() +
scale_y_continuous(breaks=seq(0, 1, .1)) +
labs(y="proportion of of total land within domain",
alpha="domain",
x="climate type (coastal types only, sorted by ascending JJA)") +
theme(legend.position=c(.5,.9))
ggsave("histogram_colored.png", p, width=9, height=6, units="in")
ggsave("histogram_colored_tall.png", p, width=9, height=16, units="in")
###### figure 2 #######
# statewide and preservation ranch cluster maps
for(k in c(20, 50, 100, 1000)){
clust <- cutree(tree, k)
cluster <- clust[nn$nn.index]
kr <- r[[1]]
kr[!is.na(values(kr))] <- cluster
palette <- distant_colors(k)
clrs <- palette[cluster]
hclrs <- as.data.frame(cbind(cluster, col3d)) %>%
group_by(cluster) %>%
mutate_each(funs(mean)) %>%
mutate(hex=rgb(red, green, blue, maxColorValue=255))
kd <- kr %>%
rasterToPoints() %>%
as.data.frame() %>%
mutate(color=palette[layer.1])
p <- ggplot(kd, aes(x, y)) +
geom_raster(fill=kd$color) +
geom_polygon(data=prd, aes(long, lat, group=group), fill=NA, color="black") +
ggmap::theme_nothing() +
coord_fixed()
ggsave(paste0("statewide_", k, ".png"), p, width=6, height=9, units="in")
prkd <- crop(kr, pr) %>%
rasterToPoints() %>%
as.data.frame() %>%
mutate(color=palette[layer.1])
p <- ggplot(prkd, aes(x, y)) +
geom_raster(fill=prkd$color) +
geom_polygon(data=prd, aes(long, lat, group=group), fill=NA, color="black") +
ggmap::theme_nothing() +
coord_fixed()
ggsave(paste0("pr_", k, ".png"), p, width=6, height=6, units="in")
}
|
940af61b35c90e957173213c48dc0ae8721c5c0d | 7f00f9804e7de68e135162157f746363455109f8 | /R/rs_get_selection.R | 8dc1ef77b3fe27b77127e2f64ef860cbf99c8f5a | [
"MIT"
] | permissive | GegznaV/addin.tools | ab3022f3266e3d3f811b524a88362b400630d61f | 377fad8756fc33a7db7d1357692febbc524de07d | refs/heads/master | 2023-08-18T09:40:51.034110 | 2023-08-08T12:34:59 | 2023-08-08T12:34:59 | 122,939,940 | 1 | 0 | MIT | 2023-08-08T12:21:37 | 2018-02-26T08:44:38 | R | UTF-8 | R | false | false | 3,128 | r | rs_get_selection.R | #' Get selection text.
#'
#' Get the text in either the first selection or all selections.
#'
#' @inheritParams rs_get_index
#' @param as_list (logical)
#' Flag, if result should be a list, if `selection` is either
#' `"first"` or `"last"`.
#' @return A character vector.
#' @export
rs_get_selection_text <- function(selection = c("all", "first", "last"),
as_list = FALSE,
context = rs_get_context()) {
selection <- match.arg(selection)
str <- switch(selection,
"all" = purrr::map_chr(context$selection, "text"),
# "first" = context$selection[[1]]$text,
"first" = rstudioapi::selectionGet(id = context$id)$value,
"last" = context$selection[[rs_get_n_selections(context = context)]]$text
)
if (isTRUE(as_list)) {
str <- as.list(str)
}
str
}
#' Get length of selection.
#'
#' Calculate number of characters in each selection.
#'
#' @inheritParams rs_get_index
#'
#' @return An integer vector with number of characters in each selection.
#' @export
rs_get_selection_length <- function(selection = c("all", "first", "last"),
context = rs_get_context()) {
nchar(rs_get_selection_text(context = context, selection = selection))
}
#' Get lengths of selected rows.
#'
#' Calculate number of characters in each selected row.
#'
#' @inheritParams rs_get_index
#' @param row (numeric) \cr
#' Index of the first row of interest of a vector of row indices.
#' @param end_row (numeric | `NULL`) \cr
#' Index of the last row of interest or `NULL`.
#'
#' @return An integer vector with number of characters in each selection.
#' @export
rs_get_row_lengths <- function(row, end_row = NULL, context = rs_get_context()) {
nchar(rs_get_text(row = row, end_row = end_row, context = context))
}
#' Get number of selections.
#'
#' @inheritParams rs_get_index
#'
#' @return Number of selections.
#' @export
rs_get_n_selections <- function(context = rs_get_context()) {
length(context$selection)
}
#' Get range of selection.
#'
#' Get the range of the first/each selection.
#'
#' @inheritParams rs_get_index
#' @param as_list (locical) \cr
#' Indicates if output sould be returned as a list.
#'
#' @return Either a "document_range" object, if `selection` is "first" or
#' "last", and `as_list = TRUE`, or a list of those objects otherwise.
#' @export
rs_get_selection_range <- function(selection = c("all", "first", "last"),
as_list = FALSE, # TODO: default to as_list = TRUE
context = rs_get_context()) {
selection <- match.arg(selection)
range_obj <- switch(selection,
"all" = purrr::map(context$selection, "range"), # returns a list of range objects
"first" = context$selection[[1]]$range, # returns range object
"last" = {
n <- rs_get_n_selections(context = context)
context$selection[[n]]$range
}
)
if (isTRUE(as_list)) {
range_obj <- switch(selection,
"first" = ,
"last" = list(range_obj),
range_obj
)
}
range_obj
}
|
0711b6f26aa20fdfb6af6772a73807ed79cebb3d | 34dcb8ba6b7f1965b2a8a891c6509dc87518f52e | /script.R | 2005a327d8e021c84e97279287095790f1d9eb17 | [] | no_license | UgyenNorbu/fatal_police_shootings | 0227f184912a88d542e6411de262ce00d0dd7ec0 | 5c702ecccff9d138d807765c5b234af4e2f4b6cd | refs/heads/master | 2022-11-19T12:31:46.617423 | 2020-07-24T07:45:16 | 2020-07-24T07:45:16 | 282,145,096 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 293 | r | script.R | library(tidyverse)
library(ggplot2)
library(lubridate)
data <- read_csv("fatal-police-shootings-data.csv")
data <- data %>%
select(-id)
str(data)
data %>%
group_by(month_year=floor_date(date, "month")) %>%
tally() %>%
ggplot(aes(x = month_year, y = n)) +
geom_line()
|
cabe21163c0c08f74da1924b14c050d2d49074f2 | 9fbd34dd260879468ee3710dc80f1a96478d39f9 | /R/manuscript/analyses/old/map.future.scenarios.R | 6fc8cc50d640c44477196273f08163c464f50799 | [] | no_license | Kah5/bimodality | 2fa26842ba50cdceff22a2f9eb335fc73bcec496 | 2b53dd59777292f84666ac2fcbd7031eda8ddb71 | refs/heads/master | 2023-02-01T05:48:49.361038 | 2023-01-21T01:25:00 | 2023-01-21T01:25:00 | 49,456,870 | 1 | 2 | null | null | null | null | UTF-8 | R | false | false | 10,189 | r | map.future.scenarios.R | # this script contins functions to determine places projected to be bimodal under future climate
# and will map out these in space:
#right now there are separate functions for the modern and the pls veg-environment relationships
# function for the FIA Vegetation-environment relationships
bimodal.future.rNA <- function(data, binby, density, binby2, rcp){
bins <- as.character(unique(data[,binby]))
coeffs <- matrix(NA, length(bins), 4)
for (i in 1:length(bins)){
coeffs[i,1] <- bimodality_coefficient(na.omit(data[data[,binby] %in% bins[i], c(density)])) # calculation bimoality coefficient
coeffs[i,2] <- diptest::dip.test(na.omit(density(data[data[,binby] %in% bins[i], c(density)])$y))$p # calculate p-value for hte diptest
peaks <- find_modes(na.omit(density(data[data[,binby] %in% bins[i], c(density)])$y)) # calculate the modes or peaks of the distribution
# if there is more than one peak, list the first 2 peaks
if(length(peaks > 1)) {
coeffs[i,3] <- peaks[1]
coeffs[i,4] <- peaks[2]
}else{
coeffs[i,3] <- 0
coeffs[i,4] <- 0
}
}
coeffs[is.na(coeffs)]<- 0 # replace NANs with 0 values here
coef.bins <- data.frame(cbind(coeffs, bins))
colnames(coef.bins) <- c("BC", "dipP", "mode1", "mode2", "bins") # rename columns
coef.bins$BC <- as.numeric(as.character(coef.bins$BC))
coef.bins$dipP <- as.numeric(as.character(coef.bins$dipP))
coef.bins$mode1 <- as.numeric(as.character(coef.bins$mode1))
coef.bins$mode2 <- as.numeric(as.character(coef.bins$mode2))
#merge bins iwth the second binby -> here is is future climate
merged <- merge(coef.bins, data, by.x = "bins", by.y = binby2)
#define bimodality
merged$bimodal <- "Unimodal"
#criteria for bimodality
bimodal<- ifelse(merged$BC >= 0.55 & merged$dipP <= 0.05 & na.omit(merged$mode1) <= 99 & na.omit(merged$mode2) >=99,
"Bimodal", "Unimodal")
merged$bimodal <- bimodal
merged[merged[,c(paste0("rcp",rcp,"NA"))] %in% 'out-of-sample',]$bimodal <- "out-of-sample"
#define bimodal savanna/forest and not bimodal savanna & forest
#merged
ggplot()+geom_polygon(data = mapdata, aes(group = group,x=long, y =lat), color = 'black', fill = 'white')+
geom_raster(data = merged, aes(x = x, y = y, fill = bimodal))+ scale_fill_manual(values = c(
'#2c7bb6',
'black',
'#d7191c'
), limits = c('Unimodal',"out-of-sample",'Bimodal') )+geom_polygon(data = mapdata, aes(group = group,x=long, y =lat), color = 'black', fill = 'NA')+
theme_bw()+ theme(axis.line=element_blank(),axis.text.x=element_blank(),
axis.text.y=element_blank(),axis.ticks=element_blank(),
axis.title.x=element_blank(),
axis.title.y=element_blank())+
xlab("easting") + ylab("northing") +coord_equal() + ggtitle(binby2)
}
# for PLS veg-envrionment relationships:
bimodal.future.NA <- function(data, binby, density, binby2, rcp){
bins <- as.character(unique(data[,binby]))
coeffs <- matrix(NA, length(bins), 4)
for (i in 1:length(bins)){
coeffs[i,1] <- bimodality_coefficient(na.omit(data[data[,binby] %in% bins[i], c(density)])) # calculation bimoality coefficient
coeffs[i,2] <- diptest::dip.test(na.omit(density(data[data[,binby] %in% bins[i], c(density)])$y))$p # calculate p-value for hte diptest
peaks <- find_modes(na.omit(density(data[data[,binby] %in% bins[i], c(density)])$y)) # calculate the modes or peaks of the distribution
# if there is more than one peak, list the first 2 peaks
if(length(peaks > 1)) {
coeffs[i,3] <- peaks[1]
coeffs[i,4] <- peaks[2]
}else{
coeffs[i,3] <- 0
coeffs[i,4] <- 0
}
}
coeffs[is.na(coeffs)]<- 0 # replace NANs with 0 values here
coef.bins <- data.frame(cbind(coeffs, bins))
colnames(coef.bins) <- c("BC", "dipP", "mode1", "mode2", "bins") # rename columns
coef.bins$BC <- as.numeric(as.character(coef.bins$BC))
coef.bins$dipP <- as.numeric(as.character(coef.bins$dipP))
coef.bins$mode1 <- as.numeric(as.character(coef.bins$mode1))
coef.bins$mode2 <- as.numeric(as.character(coef.bins$mode2))
#merge bins iwth the second binby -> here is is future climate
merged <- merge(coef.bins, dens.pr, by.x = "bins", by.y = binby2)
#define bimodality
merged$bimodal <- "Unimodal"
#criteria for bimodality
merged[merged$BC >= 0.55 & merged$dipP <= 0.05 & na.omit(merged$mode1) <= 99 & na.omit(merged$mode2) >=99, ]$bimodal <- "Bimodal"
merged[merged[,c(paste0("rcp",rcp,"NA"))] %in% 'out-of-sample',]$bimodal <- "out-of-sample"
#define bimodal savanna/forest and not bimodal savanna & forest
#merged
ggplot()+geom_polygon(data = mapdata, aes(group = group,x=long, y =lat), color = 'black', fill = 'white')+
geom_raster(data = merged, aes(x = x, y = y, fill = bimodal))+ scale_fill_manual(values = c(
'#2c7bb6',
'black',
'#d7191c'
), limits = c('Unimodal',"out-of-sample",'Bimodal') )+
theme_bw()+ theme(axis.line=element_blank(),axis.text.x=element_blank(),
axis.text.y=element_blank(),axis.ticks=element_blank(),
axis.title.x=element_blank(),
axis.title.y=element_blank())+
xlab("easting") + ylab("northing") +coord_equal() + ggtitle(binby2)
}
bimodal.future <- function(data, binby,binby2, density){
bins <- as.character(unique(data[,binby]))
coeffs <- matrix(NA, length(bins), 4)
for (i in 1:length(bins)){
if(nrow(na.omit(data[data[,binby] %in% bins[i],])) > 1){
coeffs[i,1] <- bimodality_coefficient(na.omit(data[data[,binby] %in% bins[i], c(density)])) # calculation bimoality coefficient
coeffs[i,2] <- diptest::dip.test(na.omit(density(data[data[,binby] %in% bins[i], c(density)])$y))$p # calculate p-value for hte diptest
peaks <- find_modes(na.omit(density(data[data[,binby] %in% bins[i], c(density)])$y)) # calculate the modes or peaks of the distribution
# if there is more than one peak, list the first 2 peaks
if(length(peaks > 1)) {
coeffs[i,3] <- peaks[1]
coeffs[i,4] <- peaks[2]
}else{
coeffs[i,3] <- 0
coeffs[i,4] <- 0
}
}else{
coeffs[i,1] <- "NA"
coeffs[i,2] <- "NA"
}
}
coeffs[is.nan(coeffs)]<- 0 # replace NANs with 0 values here
coef.bins <- data.frame(cbind(coeffs, bins))
colnames(coef.bins) <- c("BC", "dipP", "mode1", "mode2", "bins") # rename columns
coef.bins$BC <- as.numeric(as.character(coef.bins$BC))
coef.bins$dipP <- as.numeric(as.character(coef.bins$dipP))
coef.bins$mode1 <- as.numeric(as.character(coef.bins$mode1))
coef.bins$mode2 <- as.numeric(as.character(coef.bins$mode2))
#merge bins iwth the second binby -> here is is future climate
merged <- merge(coef.bins, dens.pr, by.x = "bins", by.y = binby2)
#define bimodality
merged$bimodal <- "Unimodal"
#criteria for bimodality
bimodal<- ifelse(merged$BC >= 0.55 & merged$dipP <= 0.05 & na.omit(merged$mode1) <= 99 & na.omit(merged$mode2) >=99,
"Bimodal", "Unimodal")
merged$bimodal <- bimodal
#define bimodal savanna/forest and not bimodal savanna & forest
ggplot()+geom_polygon(data = mapdata, aes(group = group,x=long, y =lat), color = 'black', fill = 'white')+
geom_raster(data = merged, aes(x = x, y = y, fill = bimodal))+ scale_fill_manual(values = c(
'#d7191c','#2c7bb6'
#'black',
), limits = c('Bimodal',"Unimodal") )+
geom_polygon(data = mapdata, aes(group = group,x=long, y =lat), color = 'black', fill = 'NA')+theme_classic()+ xlim(-150000, 1150000)+
xlab("easting") + ylab("northing")+coord_equal()+xlim(-150000, 1150000)
}
# bimodal.df function outputs the dataframe of bimodal/not bimodal
bimodal.df <- function(data, binby, density, binby2){
bins <- as.character(unique(data[,binby]))
coeffs <- matrix(NA, length(bins), 4)
for (i in 1:length(bins)){
if(nrow(na.omit(data[data[,binby] %in% bins[i],])) > 1){
coeffs[i,1] <- bimodality_coefficient(na.omit(data[data[,binby] %in% bins[i], c(density)])) # calculation bimoality coefficient
coeffs[i,2] <- diptest::dip.test(na.omit(density(data[data[,binby] %in% bins[i], c(density)])$y))$p # calculate p-value for hte diptest
peaks <- find_modes(na.omit(density(data[data[,binby] %in% bins[i], c(density)])$y)) # calculate the modes or peaks of the distribution
# if there is more than one peak, list the first 2 peaks
if(length(peaks > 1)) {
coeffs[i,3] <- peaks[1]
coeffs[i,4] <- peaks[2]
}else{
coeffs[i,3] <- 0
coeffs[i,4] <- 0
}
}else{
coeffs[i,1] <- "NA"
coeffs[i,2] <- "NA"
}
}
coeffs[is.nan(coeffs)]<- 0 # replace NANs with 0 values here
coef.bins <- data.frame(cbind(coeffs, bins))
colnames(coef.bins) <- c("BC", "dipP", "mode1", "mode2", "bins") # rename columns
coef.bins$BC <- as.numeric(as.character(coef.bins$BC))
coef.bins$dipP <- as.numeric(as.character(coef.bins$dipP))
coef.bins$mode1 <- as.numeric(as.character(coef.bins$mode1))
coef.bins$mode2 <- as.numeric(as.character(coef.bins$mode2))
#merge bins iwth the second binby -> here is is future climate
merged <- merge(coef.bins, dens.pr, by.x = "bins", by.y = binby2)
#define bimodality
merged$bimodal <- "Unimodal"
#criteria for bimodality
bimodal<- ifelse(merged$BC >= 0.55 & merged$dipP <= 0.05 & na.omit(merged$mode1) <= 99 & na.omit(merged$mode2) >=99,
"Bimodal", "Unimodal")
merged$bimodal <- bimodal
#define bimodal savanna/forest and not bimodal savanna & forest
if(density == "PLSdensity"){
merged$classification <- "test"
merged$classification <- paste(merged$bimodal, merged$ecotype)
merged[merged$classification %in% 'Bimodal prairie',]$classification <- "Prairie"
merged[merged$classification %in% 'Unimodal prairie',]$classification <- "Prairie"
}else{
merged$classification <- "test"
merged$classification <- paste(merged$bimodal, merged$fiaecotype)
}
merged
}
|
dcb48a83ba0ec761f0730f8619e27df3c544c751 | 201398772b3822744c6fb77529880ca974a795fb | /man/coloc-package.Rd | 238011fbe4c67b2fd8020af07559e5f6288660d4 | [] | no_license | Hui-Guo/coloc | bbcfe99c2a38015f47e8bd537279af638b12b790 | 4904d3b2a9fb674759ba7ab7b54395698ecbbd58 | refs/heads/master | 2020-12-11T03:25:56.651519 | 2013-12-09T11:34:09 | 2013-12-09T11:34:09 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 732 | rd | coloc-package.Rd | \docType{package}
\name{coloc-package}
\alias{coloc-package}
\title{Colocalisation tests of two genetic traits}
\description{
Performs the colocalisation tests described in Plagnol et
al (2009) and Wallace et al (in preparation) and draws
some plots.
}
\details{
\code{coloc.test()} tests for colocalisation and returns
an object of class \code{coloc}.
}
\author{
Chris Wallace <chris.wallace@cimr.cam.ac.uk>
}
\references{
Plagnol et al (2009). Statistical independence of the
colocalized association signals for type 1 diabetes and
RPS26 gene expression on chromosome 12q13. Biostatistics
10:327-34.
\url{http://www.ncbi.nlm.nih.gov/pubmed/19039033}
Wallace et al (in preparation).
}
\keyword{package}
|
0beed1b1b19d3b504df532a900c665185d92c167 | 8eb63410d10cac89d443593ab2db3d3e614bc8e8 | /CS412/HW1/R/startercode.R | 1181317fad0a2720fe002b5b976c5d5944946a17 | [] | no_license | tsodapop/UIC_Courses | 5198c6c9b6dd2dd12433979a8e10bf4a3ef63973 | 37de906302080c40eadbefbbce68c4b632e19331 | refs/heads/master | 2023-08-05T02:55:22.252610 | 2020-02-21T05:11:27 | 2020-02-21T05:11:27 | 150,374,870 | 3 | 1 | null | 2023-07-06T21:36:20 | 2018-09-26T05:51:05 | Jupyter Notebook | UTF-8 | R | false | false | 1,891 | r | startercode.R | #Installing packages ONLY RUN THIS ONCE!
#Make sure that you have the up to date GCC and R
install.packages("caret",dependencies=TRUE)
install.packages("e1071")
install.packages("dplyr")
install.packages("ggplot2")
#Linking the packages
library(caret)
library(e1071)
library(dplyr)
library(ggplot2)
# Read data
# If you have trouble loading the data,
# Select "Source File Location" under Session -> Set Working Directory
data = read.csv("data.csv",sep=" ",header=FALSE,col.names=append("Digit",seq(1,257,by=1)))
data$X257 = NULL
#selecting
data = filter(data,Digit==1|Digit==5)
#force R to treat 1,5 as categories, not numerics
data$Digit = as.factor(data$Digit)
#Partitioning the data
set.seed(100)
index = createDataPartition(data$Digit, p = 0.2, list = F )
train = data[index,]
test = data[-index,]
#pick the first and second pixel and plot them
#https://www.rstudio.com/wp-content/uploads/2015/03/ggplot2-cheatsheet.pdf
graph <- ggplot(train,
aes(x=X72,X88))+# this sets the axes CHOOSE BETTER AXES THAN THIS
geom_point(aes(color=Digit)) #this tells the plot to make a scatter plot and color them based on digit
graph #this will display the graph
#Set the level of cross-validation
trControl <- trainControl(method = "cv",
number = 10)
#this will build the model
model1 <- train(Digit~. , # the . character means use all other variables
data = train,
trControl = trControl,
method = "knn",
tuneGrid = expand.grid(k = 1:49)) #modeling 1s and 5s for 256 dimensions
plot(model1)
model2 <- train(Digit~X72+X88 , #these are the predictive variables
data = train,
method = "knn",
trControl = trControl,
tuneGrid = expand.grid(k = 1:49)) #modeling 1s and 5s for 256 dimensions
plot(model2)
m
|
f280abf4a29ba63244d84bdcd2cc6130724390f3 | adcfb06e4b8fec803bb177887028ad55058b7f7e | /Day02_homework_Petra.R | 43fb09db708f5157e6639a787d1d261fba665b26 | [] | no_license | petrabradley/Petra_Day02_R_Homework | 42a860c4891ca452a2b8d7072d41c0292af69b41 | f76d80016406888ad5c6afc551b5ba2e803d1323 | refs/heads/master | 2021-01-11T23:29:56.786643 | 2017-01-11T01:27:03 | 2017-01-11T01:27:03 | 78,590,575 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,876 | r | Day02_homework_Petra.R | # Complete all of the items below
# Use comments where you're having trouble or questions
# 1. Read your data set into R
?read.table
adaption.innovation <- read.csv("Adaption_Innovation_Analysis.csv")
# 2. Peek at the top few rows
head(adaption.innovation, 5)
# 3. Peek at the top few rows for only a few columns
adaption.innovation[1:5, 1:3]
# 4. How many rows does your data have?
nrow(adaption.innovation)
# 5. Get a summary for every column
summary(adaption.innovation)
# 6. Get a summary for one column
summary(adaption.innovation[ ,1])
# 7. Are any of the columns giving you unexpected values?
#Yes - the first row below the header is variable information and I think it's reading it in incorrectly.
#Also, I think I need to rename my variables becasue it's doing odd things with the spaces.
# 8. Select a few key columns, make a vector of the column names
row.identifiers.for.first.test <- c(1, 2, 5, 9)
chosen.columns <- names(adaption.innovation[, row.identifiers.for.first.test])
names(adaption.innovation[, row.identifiers.for.first.test])
# 9. Create a new data.frame with just that subset of columns
subset.columns <- adaption.innovation[, row.identifiers.for.first.test]
# 10. Create a new data.frame that is just the first 10 rows
# and the last 10 rows of the data from the previous step
first.ten.rows <- head(subset.columns, 10)
last.ten.rows <- tail(subset.columns, 10)
head(subset.columns, 10)
tail(subset.columns, 10)
first.and.last <- (c(first.ten.rows, last.ten.rows))
objects(first.and.last) #this doesn't work. I don't know why. I've spent well over an hour
#trying different things. I give up.
# 11. Create a new data.frame that is a random sample of half of the rows.
# HINT: ?sample
half.adaption.innovation <- (nrow(adaption.innovation)/2)
new.data.frame <- sample(adaption.innovation, half.adaption.innovation)
# 12. Find a comparison in your data that is interesting to make
# (comparing two sets of numbers)
# - run a t.test for that comparison
comp.difficulty.noMT <- adaption.innovation[ ,9]
comp.difficulty.staticMT <- adaption.innovation[ ,32]
summary(comp.difficulty.noMT)
summary(comp.difficulty.staticMT)
t.test(comp.difficulty.noMT, comp.difficulty.staticMT)
t.test.results1 <- t.test(comp.difficulty.noMT, comp.difficulty.staticMT)
names(adaption.innovation)
# - decide whether you need a non-default testt.test.results1
# (e.g., Student's, paired)
# Condition (noMT or staticMT) was manipulated within subjects, so a paired t-test is appropriate
# - run the t.test with BOTH the formula and "vector"
t.test(comp.difficulty.noMT, comp.difficulty.staticMT, paired = TRUE)
paired.t.test.results1 <- t.test(comp.difficulty.noMT, comp.difficulty.staticMT, paired = TRUE)
# formats, if possible
# - if one is NOT possible, say why you can't do it
#I think I did a vector t-test, but I don't think a forumla test, at least like the one we did in
# class would work because my other factor is not a grouping variable.
# 13. Repeat #12 for TWO more comparisons
# - ALTERNATIVELY, if correlations are more interesting,
# do those instead of t-tests (and try both Spearman and
# Pearson correlations)
# - Tip: it's okay if the comparisons are kind of nonsensical, this is
# just a programming exercise
trans.difficulty.noMT <- adaption.innovation[ ,3]
trans.difficulty.staticMT <- adaption.innovation[ ,15]
summary(trans.difficulty.noMT)
summary(trans.difficulty.staticMT)
t.test(trans.difficulty.noMT, trans.difficulty.staticMT)
t.test.results2 <- t.test(trans.difficulty.noMT, trans.difficulty.staticMT)
names(adaption.innovation)
t.test(trans.difficulty.noMT, trans.difficulty.staticMT, paired = TRUE)
paired.t.test.results2 <- t.test(comp.difficulty.noMT, comp.difficulty.staticMT, paired = TRUE)
trans.confidence.accuracy.noMT <- adaption.innovation[ ,4]
trans.confidence.fidelity.noMT <- adaption.innovation[ ,5]
cor(trans.confidence.accuracy.noMT, trans.confidence.fidelity.noMT)
correlation.result <- cor(trans.confidence.accuracy.noMT, trans.confidence.fidelity.noMT)
#I'm really lost as to why this isn't working. I think there is non-numeric data in the column
#that it doesn't know what to do with, but I have no idea how to fix that.
# 14. Save all results from #12 and #13 in an .RData file
save(t.test.results1, paired.t.test.results1, t.test.results2, paired.t.test.results2, correlation.result, file = "Petras_day2_homework_results.RData")
# 15. Email me your version of this script, PLUS the .RData
# file from #14
# - ALTERNATIVELY, push your version of this script and your .RData results
# to a repo on GitHub, and send me the link
|
e3346affa6d3c46790802a0802171638e14016ef | 623226a7f14a26b755c107ba087076b7300186e0 | /cachematrix.R | cf3f990be975a4d4bd61cccbcb5bf15728c9292e | [] | no_license | GTolen/ProgrammingAssignment2 | f3a7ddc58abc7788326a3c0bdaa4b872dd745e6c | 551d4eebe21f0b7e17fa32bc518035c28fbff24d | refs/heads/master | 2021-01-17T23:06:46.363590 | 2016-02-07T04:38:03 | 2016-02-07T04:38:03 | 51,188,857 | 0 | 0 | null | 2016-02-06T04:06:48 | 2016-02-06T04:06:48 | null | UTF-8 | R | false | false | 1,391 | r | cachematrix.R | ## Assignment 2: Lexical Scoping
##
## makeCacheMatrix function creates a special matrix
## object that can cache its inverse
## This returns a list of function:
## 1) set: set the value of the matrix
## 2) get: get the value of the matrix
## 3) setInverseMat: set the value of the inverse of the matrix
## 4) getInverseMat: get the value of the inverse of the matrix
makeCacheMatrix <- function(x = matrix()) {
test <- NULL
set <- function(y) {
x <<- y
test <<- NULL
}
get <- function() x
setInverseMat <- function(InvMat) test<<- InvMat
getInverseMat <- function() test
list(set = set,
get = get,
setInverseMat = setInverseMat,
getInverseMat = getInverseMat)
}
## cacheSolve computes the inverse of the matrix from
## the makeCacheMatrix function above. If the inverse has already
## been calculated (and the matrix has not changed), cacheSolve will
## retrieve the inverse from the cache
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
test <- x$getInverseMat()
if(!is.null(test)) {
message("getting cached data")
return(test)
}
TargetMat <- x$get()
test <- solve(TargetMat, ...)
x$setInverseMat(test)
test
}
|
4630343d1047b6ee6d823bba609ef716ec104516 | 7b102f9c8f2e3f9240090d1d67af50333a2ba98d | /gbd_2017/nonfatal_code/clinical_team/cf_models/02_cf2_stgpr_run.R | 8ccf379b82ad3a4197eddd4c5d153692d91182cc | [] | no_license | Nermin-Ghith/ihme-modeling | 9c8ec56b249cb0c417361102724fef1e6e0bcebd | 746ea5fb76a9c049c37a8c15aa089c041a90a6d5 | refs/heads/main | 2023-04-13T00:26:55.363986 | 2020-10-28T19:51:51 | 2020-10-28T19:51:51 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 19,987 | r | 02_cf2_stgpr_run.R |
rm(list = ls())
## Maybe fix it not getting to pdf?
closeAllConnections()
library(ggplot2)
library(data.table)
library(lme4)
library(htmlwidgets, lib = 'filepath')
library(merTools, lib = 'filepath')
library(boot)
library(RMySQL)
library(slackr)
library(mortcore, lib = "filepath")
library(parallel)
library(magrittr)
library(readr)
library(nlme)
source('filepath')
source('filepath')
source('filepath')
source('filepath')
print(commandArgs(trailingOnly = T))
bundle <- commandArgs()[5]
make_draws <- as.logical(commandArgs()[6])
print(make_draws)
## For vetting
prep_data <- fread('filepath')
prep_data[age_group_id == 28, age_end := 1]
prep_data[cf2 == '.', cf2 := NA]
prep_data$cf2 <- as.numeric(prep_data$cf2)
prep_data[, cf2_adjust := cf2-1]
prep_data[cf2_adjust == 0, cf2_adjust := 0.00001]
locs <- get_location_metadata(35)
locs <- locs[, c('location_id', 'location_name', 'region_name', 'super_region_name', 'level')]
locs_to_merge <- locs[, c('location_id', 'region_name', 'super_region_name', 'location_name')]
##### RUN ###############
print(class(bundle))
print(bundle)
bun_df <- fread('filepath')
bun <- unique(bun_df[bundle_id == bundle]$bundle_name)
print(bun)
lri <- prep_data[bundle_id == bundle]
lri <- merge(lri, locs[, c('location_id', 'region_name', 'super_region_name', 'location_name')], by = 'location_id')
#print(head(lri))
## Will have to add more outliers when they come up
## Make high values outliers
lri[, is_outlier := 0][location_id == 16 & age_start == 70 & sex_id == 1 & bundle_id == 19, is_outlier := 1]
lri[cf2 > 1000000, is_outlier := 1]
lri <- lri[is_outlier == 0][!(is.na(cf2))]
#### MODEL #####
## CF2: Same polynomial model adjusted for sex
## only takes in inpatient envelope, haqi mean with random effects on location
#rm(base)
## If only one source, take out random efects
## base is for vetting the p-values and not having to eye the t-scores for easier looks
lo <- FALSE
if(length(unique(lri$age_start)) <= 4) {
base_lme4_loc <- loess(log(cf2) ~ age_start + sex_id + location_id,
data = lri[cf2 <10000 & is_outlier != 1], parametric = c('sex_id', 'location_id'))
base_lme4 <- loess(log(cf2) ~ age_start + sex_id,
data = lri[cf2 <10000 & is_outlier != 1], parametric = c('sex_id'))
print('lo')
lo <- TRUE
} else if(length(unique(lri$location_id)) == 1){
base_lme4 <- glm(log(cf2) ~ poly(age_start, 3) + sex_id + ip_envelope,
data = lri[cf2 < 10000])
} else{
base_lme4 <- lmer(log(cf2) ~ poly(age_start, 3) + sex_id + ip_envelope + (1|location_id),
data = lri[cf2 < 10000])
}
#summary(base)
### Make predictions ####
## Predictions are all in log space for CF2
preddf <- fread('filepath')
preddf <- preddf[year_id == 2010 & location_id != 533 & age_group_id.x != 164][, pred := NULL]
preddf <- unique(preddf[age_group_id.x != 33][, age_group_id.y := NULL][, V1 := NULL])
preddf[, pred := predict(base_lme4, newdata = preddf, allow.new.levels = T)]
## Predict the location-specific ones for where we have data
## overwrites the prediction
if(lo == TRUE){
preddf[location_id %in% lri$location_id, pred := predict(base_lme4_loc,
newdata = preddf[location_id %in% lri$location_id],
allow.new.levels = TRUE)]
}
setnames(preddf, 'age_group_id.x', 'age_group_id')
## Add one back to the prediction to line up with CF2 for residuals (and not CF2 adjust)
locs[location_id %in% preddf$location_id, keep := 1]
locs[!(location_id %in% preddf$location_id), keep := 0]
## Calculate residuals in prep_data
## want everything in log space
## For input data: predict out with model that's location-specific
if(lo == TRUE){
lri[, pred := predict(base_lme4_loc, newdata = lri, allow.new.levels = T)]
} else{
lri[, pred := predict(base_lme4, newdata = lri, allow.new.levels = T)]
}
## Get residual by scaling the prediction back
## Pred is in log space and has already had 1 added back
lri[, pred_resid_log := log(cf2) - (pred)]
ggplot() +
geom_point(data = lri, aes(x = age_start, y = cf2, color = location_name)) +
#geom_point(data = lri, color = 'red') +
geom_point(data = preddf[!(is.na(pred))][location_id %in% lri$location_id], aes(x = age_start, y = exp(pred)), alpha = 0.2, color = 'blue') +
facet_wrap(~location_id)
## For adding back on:
if(make_draws == TRUE){
print('MAKING DRAWS')
if(lo == TRUE){
print('loess draws')
## Make location and non-locationspecific predictions
preds_locs <- predict(base_lme4_loc, newdata = preddf[location_id %in% lri$location_id], allow.new.levels = TRUE, se = TRUE)
preds <- predict(base_lme4, newdata = preddf[!(location_id%in% lri$location_id) ], allow.new.levels = T, se = TRUE)
pred_dt <- data.table(preds = preds$fit,
se = preds$se.fit)
pred_locs_dt <- data.table(preds = preds_locs$fit,
se = preds_locs$se.fit)
pred_dt <- rbind(pred_dt, pred_locs_dt)
## Need to resort preddf
preddf1 <- preddf[!(location_id %in% lri$location_id)]
preddf2 <- preddf[location_id %in% lri$location_id]
preddf <- rbind(preddf1, preddf2)
preddf <- cbind(preddf, pred_dt)
preddf$ID <- seq.int(nrow(preddf))
print(names(preddf))
na_df <- preddf[is.na(preds)]
na_df[, c('pred', 'preds') := 1][, se := 0]
draws_df <- preddf[!(is.na(preds))] ## Decreases the length
draws_df <- rbind(draws_df, na_df)
## Now need to get 1000 draws of every row
test_draws <- rbindlist(lapply(c(1:nrow(draws_df)), function(i){
single_draw_fit <- draws_df[i]$preds
single_draw_se <- draws_df[i]$se
dt <- data.table(draw_pred = rnorm(1000, single_draw_fit, single_draw_se))
dt[, ID := draws_df[i]$ID]
dt[, draw := seq.int(nrow(dt))][, draw := draw - 1]
dt[, draw := paste0('indv_cf_', draw)]
}))
preddf <- merge(preddf, test_draws, by = 'ID', all.x = TRUE, all.y = TRUE)
preddf <- preddf[!(is.na(draw_pred))]
## Get same columns as other
preddf <- preddf[, .(location_id, sex_id, age_start, age_end, age_group_id, ip_envelope, op_envelope,
haqi_mean, pred, draw, draw_pred)]
} else{
print('mixed effects draws')
test <- predictInterval(base_lme4, newdata = preddf, n.sims = 1000, level = 0.9, stat = 'mean', returnSims = TRUE)
preds <- data.table(attr(test, 'sim.results'))
setnames(preds, grep('[[:digit:]]', names(preds), value = TRUE), paste0('incidence_', 0:999))
preddf <- cbind(preddf[, c('location_id', 'sex_id', 'age_start', 'age_end', 'age_group_id', 'ip_envelope', 'haqi_mean', 'pred')],
preds)
preddf <- melt(preddf, measure = patterns('incidence_'), variable.name = 'draw', value.name = c('draw_pred'))
means <- preddf[, .(mean_draw = mean(draw_pred)),
by = .(location_id, sex_id, age_start, age_end, age_group_id)]
preddf <- merge(preddf, means, by = c('location_id', 'sex_id', 'age_start', 'age_end', 'age_group_id'))
setkey(preddf, 'draw')
## vet plots
ggplot(data = preddf[location_id == 6]) +
geom_point(aes(x = age_start, y = exp(draw_pred), color = 'draw predictions')) +
geom_point(aes(x = age_start, y = exp(pred), color = 'predictions'))
}
old <- Sys.time()
## Get draws, based off of CF2-1 (need to adjust post-hoc)
savedfs <- split(preddf, by = 'draw')
draws_df <- rbindlist(mclapply(c(1:1000), function(draw_num){
print(draw_num)
firststagedf <- savedfs[draw_num][[1]]
stlocsdf <- firststagedf[, 'location_id'] %>%
merge(locs, by = 'location_id') %>% unique
## Calculate space distance for each location
prep_locs <- lri[, c('location_id', 'region_name', 'super_region_name', 'location_name')] %>% unique
## Spits out data frame with distances from datapoints for the predictions
## Give ref to know what location it's referring to
############ Space weighting ##################
spdistdf <- rbindlist(lapply(unique(stlocsdf$location_id), function(x){
loc_ref <- locs_to_merge[location_id == x]
## Use reference super region and region
stlocsdf$ref <- x
## Want just spdist of 0 and 1 for if country/if not country
copy(prep_locs)[location_id == x, spdist := 0][location_id != x, spdist := 1][, ref := x]
}))
zeta <- 0.94
## Assign weights relative to how many input sources there are and whether they are equal to predicted countries
## Now weight adds up to 1
## Calculate residual
for (l in unique(spdistdf$ref)){
spdistdf[spdist == 0 & ref == l, spweight := zeta][spdist == 1 & ref == l, spweight := (1-zeta)/nrow(spdistdf[ref == l & spdist == 1])] ## Divide by number of other sources
}
################## Age weighting #############
## Get out individual ages (similar to stlocsdf)
st_agesdf <- data.table(ages = unique(firststagedf$age_group_id))
st_agesdf <- st_agesdf[ages != 33]
ref_ages <- data.table(ref_age = unique(firststagedf$age_group_id))
ref_ages <- ref_ages[ref_age != 33]
## Calculate distance
st_agesdf[, age_group_position := (factor(ages, levels = c(164, 28, 5:20, 30:32, 235)))]
ref_ages[, ref_age := (factor(ref_age, levels = c(164, 28, 5:20, 30:32, 235)))]
## Map and calculate distances
st_agesdf <- st_agesdf[, .(ref_age = ref_ages$ref_age),
by = .(ages, age_group_position)]
st_agesdf[, age_dist := abs(as.numeric(age_group_position)-as.numeric(ref_age))]
omega <- 0.5
st_agesdf[, age_wt := 1/(exp(omega*abs(age_dist)))]
st_agesdf$age_group_position <- NULL
setnames(st_agesdf, 'ages', 'age_group_id')
st_agesdf_1 <- copy(st_agesdf)
residsdf <- lri[is_outlier == 0, .(location_id, location_name, sex_id, age_start, age_group_id,
cf2, pred, pred_resid_log)]
stpreddf <- rbindlist(lapply(unique(spdistdf$ref), function(x){
weight_df <- data.table()
for (age in unique(residsdf$age_group_id)){
## Apply age map
age_set_1 <- st_agesdf[age_group_id == age]
resid_subset <- residsdf[, .(sex_id, location_id, pred_resid_log, as.factor(age_group_id))] %>%
setnames('V4', 'ref_age')
resid_subset <- merge(resid_subset, age_set_1, by = 'ref_age')
resid_subset[, age_wt := age_wt/sum(age_wt)]
## Merge on for single loc and age, getting weight for that individual age and location
## Have input data going into the Taiwan prediction at a single age
## Merge on space weights
subset_1 <- spdistdf[ref == x]
newdf_1 <- merge(resid_subset, subset_1, by = 'location_id')
#newdf_1 <- merge(residsdf[age_group_id == age], subset_1, by = 'location_id')
## merge on age weights
newdf_1[, age_space_weight := spweight*age_wt] ## calculate net weight
## Collapses to a single value with the location, sex, and age, along with the weighted residual
test <- newdf_1[, .(weighted_resid_0.5 = weighted.mean(pred_resid_log, w = age_space_weight, na.rm = TRUE)),
by = .(ref,sex_id, age_group_id)]
setnames(test, 'ref', 'location_id')
weight_df <- rbind(weight_df, test)
}
return(weight_df)
})) %>% merge(unique(lri[, .(age_group_id, age_start, age_end)]))
print('STPREDDF')
print(nrow(stpreddf))
firststagedf <- savedfs[draw_num][[1]]
print('FIRSTSTAGEDF')
print(nrow(firststagedf))
preddf <- merge(firststagedf[, .(location_id, sex_id, age_start, draw_pred)], stpreddf, by = c('location_id', 'sex_id', 'age_start'))
preddf[, log_stpred := draw_pred + weighted_resid_0.5]
preddf <- merge(preddf, locs_to_merge[, c('location_id', 'location_name')], by = 'location_id')
preddf[location_name == 'United States', location_name := 'Marketscan']
preddf[, mod_incidence := exp(log_stpred)]
preddf[, draw := draw_num]
preddf[, year_id := 2010]
preddf[, bundle_id := bundle]
preddf <- preddf[, .(location_id, sex_id, age_start, age_end, mod_incidence, draw)]
return(preddf)
## Create and write
}, mc.cores = 5))
#draws_df <- copy(preddf)
casted <- dcast(draws_df, location_id + sex_id + age_start + age_end ~ draw, value.var = 'mod_incidence')
setnames(casted, grep('[[:digit:]]', names(casted), value = TRUE), paste0('incidence_', 0:999))
new <- Sys.time()-old
print(new)
casted$bundle_id <- bundle
print('WRITING DRAWS WIDE')
write_csv(casted, paste0('filepath'))
} else{
print('Not making draws')
preddf[, pred := predict(base_lme4, newdata = preddf, allow.new.levels = T)]
ggplot(data = lri[cf2 < 10000], aes(x = age_start, y = cf2)) +
geom_point(aes(y = exp(pred), color = 'prediction')) +
geom_point(aes(y = cf2, color = 'input_data')) +
facet_wrap(location_id ~ sex_id) +
## Do I need to add exp(1) to the residual??? Or something else
geom_segment(aes(xend = age_start, yend = exp(pred)))
firststagedf <- copy(preddf)
stlocsdf <- firststagedf[, 'location_id'] %>%
merge(locs, by = 'location_id') %>% unique
## Calculate space distance for each location
locs_to_merge <- get_location_metadata(35)
locs_to_merge <- locs_to_merge[, c('location_id', 'region_name', 'super_region_name', 'location_name')]
prep_locs <- lri[, c('location_id', 'region_name', 'super_region_name', 'location_name')] %>% unique
## Spits out data frame with distances from datapoints for the predictions
## Give ref to know what location it's referring to
############ Space weighting ##################
spdistdf <- rbindlist(mclapply(unique(stlocsdf$location_id), function(x){
loc_ref <- locs_to_merge[location_id == x]
## Use reference super region and region
stlocsdf$ref <- x
## Want just spdist of 0 and 1 for if country/if not country
copy(prep_locs)[location_id == x, spdist := 0][location_id != x, spdist := 1][, ref := x]
}, mc.cores = 5))
zeta <- 0.96
## Assign weights relative to how many input sources there are and whether they are equal to predicted countries
## Now weight adds up to 1
## Calculate residual
for (l in unique(spdistdf$ref)){
spdistdf[spdist == 0 & ref == l, spweight := zeta][spdist == 1 & ref == l, spweight := (1-zeta)/nrow(spdistdf[ref == l & spdist == 1])] ## Divide by number of other sources
}
################## Age weighting #############
## Get out individual ages (similar to stlocsdf)
st_agesdf <- data.table(ages = unique(firststagedf$age_group_id))
st_agesdf <- st_agesdf[ages != 33]
ref_ages <- data.table(ref_age = unique(firststagedf$age_group_id))
ref_ages <- ref_ages[ref_age != 33]
## Calculate distance
st_agesdf[, age_group_position := (factor(ages, levels = c(164, 28, 5:20, 30:32, 235)))]
ref_ages[, ref_age := (factor(ref_age, levels = c(164, 28, 5:20, 30:32, 235)))]
## Map and calculate distances
st_agesdf <- st_agesdf[, .(ref_age = ref_ages$ref_age),
by = .(ages, age_group_position)]
st_agesdf[, age_dist := abs(as.numeric(age_group_position)-as.numeric(ref_age))]
## Set omega and the age weights based on distance in age group id's
omega <- 0.5
st_agesdf[, age_wt := 1/(exp(omega*abs(age_dist)))]
st_agesdf$age_group_position <- NULL
setnames(st_agesdf, 'ages', 'age_group_id')
#age_map$ref_age <- NULL
## age_group_id.x is the group from the model
## ref_age_group is the group to merge onto
## Calculate predicted residual
## get weighted mean of spatial log
## Do I want weighting by sdi quintile?? I think that'd make sense
## So Taiwan would take in zero data from Phillipines
## Would make sense.... but we'll get to it
## resids: location id is where the actual data comes from
## ref refers to the predicted country that the weighted residual is going to affect
## Returns single weighted residual for each location
## Again, residuals in log space
residsdf <- lri[is_outlier == 0, .(location_id, location_name, sex_id, age_start, age_group_id, cf2, pred, pred_resid_log)]
#residsdf[age_group_id == 235, age_group_id := 33]
stpreddf <- rbindlist(mclapply(unique(spdistdf$ref), function(x){
weight_df <- data.table()
for (age in unique(residsdf$age_group_id)){
## Apply age map
age_set <- st_agesdf[age_group_id == age]
resid_subset <- residsdf[, .(sex_id, location_id, pred_resid_log, as.factor(age_group_id))] %>%
setnames('V4', 'ref_age')
## merge on age weights
age_weight_df <- merge(resid_subset, age_set, by = c('ref_age'))
## Subset by age, blown up with age weights
ages_subset <- age_weight_df[age_group_id == age]
## Scale to 1
ages_subset[, age_wt := age_wt/sum(age_wt)]
## all_age_weighted ia a dt of each input location with each age_group_id with the age_wt relative to the ref_ages ( so 4*20*20)
## Merge on for single loc and age, getting weight for that individual age and location
## Have input data going into the Taiwan prediction at a single age
## Merge on space weights
subset <- spdistdf[ref == x]
newdf <- merge(resid_subset, subset, by = 'location_id')
#newdf <- merge(newdf, age_weight_df, by = 'ref_age')
## merge on age weights
## Both 163 rows. It's literally the space weights + age weights
newdf <- merge(newdf, ages_subset, by = c('ref_age', 'sex_id', 'location_id', 'pred_resid_log'))
newdf[, age_space_weight := spweight*age_wt] ## calculate net weight
## Collapses to a single value with the location, sex, and age, along with the weighted
newdf <- merge(newdf, unique(lri[, c('age_group_id', 'age_start', 'age_end')]), by = 'age_group_id')
test <- newdf[, .(weighted_resid_0.5 = weighted.mean(pred_resid_log, w = age_space_weight, na.rm = TRUE)),
by = .(ref, sex_id, age_start)]
setnames(test, 'ref', 'location_id')
weight_df <- rbind(weight_df, test)
print(head(weight_df))
}
## ages_subset is the age weight for the ref age
return(weight_df)
}, mc.cores = 5))
## Plot weighted residuals
plot_data <- merge(lri[, c('location_id', 'sex_id', 'age_start','cf2', 'pred')], stpreddf, by = c('location_id', 'sex_id', 'age_start'))
ggplot(data = plot_data, aes(x = age_start, y = cf2)) +
geom_point(aes(y = cf2, color = 'input_data'), size = 3) +
facet_wrap(location_id ~ sex_id) +
geom_segment(aes(xend = age_start, yend = exp(pred + weighted_resid_0.5))) +
geom_point(data = plot_data, aes(y = exp(pred + weighted_resid_0.5)))
firststagedf <- firststagedf[year_id == 2010]
firststagedf[, exp_pred := exp(pred)]
preddf <- merge(firststagedf[, .(location_id, sex_id, age_start, pred)], stpreddf, by = c('location_id', 'sex_id', 'age_start'))
preddf[, log_stpred := pred + weighted_resid_0.5]
preddf[, modeled_cf2 := exp(log_stpred)]
if(interactive()) { ## SEE HOW PREDS COMPARE AGAINST THE FIRST STAGE
ggplot() + geom_point(data = lri[location_id == 16], aes(x = age_start, y = cf2), shape = 19, size = 3, alpha = 0.5) +
geom_point(data = preddf[location_id == 16], aes(x = age_start, y =exp(log_stpred), color = 'second_stage'), size = 1.15, color = 'blue') +
geom_point(data = preddf[location_id == 16], aes(x = age_start, y = exp(pred) + 1, color = 'first_stage'), size = 1.15, color = 'red') +
facet_wrap(~ sex_id)
}
|
65c53da615e8f01ce3b81e50dd3e4df153ce9e42 | f0bb7b739b8109def549b8acdbcb15cc79cc8d11 | /rolling-train-test.R | c7af015d3fc8bd30e163774d5735952f52ee3e57 | [] | no_license | vikasgupta1812/rsnippets | 2ba764b47334f33487768ca506eca5ab1835c792 | a4572b1ed5289de06c4cc8f7de5736e9e2b85043 | refs/heads/master | 2021-01-21T00:01:28.739303 | 2016-06-08T21:58:06 | 2016-06-08T21:58:06 | 60,504,464 | 0 | 0 | null | 2016-06-06T06:39:16 | 2016-06-06T06:39:14 | null | UTF-8 | R | false | false | 3,352 | r | rolling-train-test.R | # Description : Building models over rolling time periods
# Website : http://petewerner.blogspot.in/2013/09/building-models-over-rolling-time.html
doInstall <- TRUE # Change to FALSE if you don't want packages installed.
toInstall <- c("quantmod","kernlab")
if(doInstall){install.packages(toInstall, repos = "http://cran.r-project.org")}
lapply(toInstall, library, character.only = TRUE)
library(quantmod)
library(kernlab)
getSymbols("^GSPC")
cl <- ROC(Cl(GSPC))
cl <- na.omit(cl)
###
#I have daily data, and want to build a model based on n weeks of previous data and see how it performs over m weeks going forward.
#
#First convert our data into what we want, in this case we are looking at log closes.
#Y is close at time t, x1 close at time t-2, x2 t-2 and so on.
###
data_prep <- function(data, lookback=5)
{
tmp <- cbind(data, Lag(data, 1:lookback))
colnames(tmp) <- c("Y", paste("X", 1:(ncol(tmp) - 1), sep=''))
return(tmp)
}
#head(cl)
data <- data_prep(cl)
data <- na.omit(data)
#head(data)
#for each subset of data, we further split it into 2 groups, a training set of "train" periods, and a test set of "test" periods
#will return a list with the train/test set
train_test_split <- function(data, train=4, test=1, period="weeks")
{
ep <- endpoints(data, on=period)
if (length(ep) < (train+test+1))
stop(sprintf("wanted %d %s, only got %d", train + test, period, length(ep)-1))
train_end <- ep[train + 1]
trainset <- data[1:train_end,]
test_start <- train_end + 1
test_end <- ep[train + test + 1]
testset <- data[test_start:test_end,]
return(list(train=trainset, test=testset))
}
#l <- train_test_split(data[1:30])
#once we have our list, we further split the test set x/y
#then we build the model, and see how it goes on our test set
run_model <- function(data, trainsz=4, testsz=1, period='weeks')
{
tt <- train_test_split(data, trainsz, testsz, period)
trainset <- tt[["train"]]
testset <- tt[["test"]]
testX <- testset[,-1]
testY <- testset[,1]
mod <- ksvm(Y~., trainset)
pr <- predict(mod, testX)
mat <- cbind(pr, testY)
colnames(mat) <- c("pred", "actual")
return(mat)
}
#finally we have the main function, which loops through all the data
#and calls run_model, collecting the results
roll_model <- function(data, trainsz=4, testsz=1, period='weeks', verbose=FALSE, sinkfile=NA)
{
#how much data we need for each model run
totsz <- trainsz + testsz
#get the end point indexes
ep <- endpoints(data, period)
#we work "forward" from idx 1, so we need to stop a little early
endlen <- length(ep) - totsz
mr <- c()
for (i in 1:endlen) {
startidx <- ep[i] + 1 #the starting index (note endpoints has 0 as the first index)
endidx <- ep[i + totsz] #the end index for this run
if (verbose && i %% 10 == 0)
cat(sprintf("%.2f %d %d %d\n", i/endlen, i, startidx, endidx))
datasub <- data[startidx:endidx,] #our data subset
if (!is.na(sinkfile))
sink(sinkfile)
#run the model
mr <- rbind(mr, run_model(datasub, trainsz, testsz, period))
if (!is.na(sinkfile))
sink()
}
return(mr)
}
res <- roll_model(data, trainsz=13, testsz=1, period="months", sinkfile='/dev/null')
#see how it went at predicting the direction
acc <- ifelse(sign(res[,1]) == sign(res[,2]), 1, 0)
cat(sprintf("accuracy: %.2f\n", sum(acc)/nrow(res))) |
6125584dd2165b2bf8b869c262dc4306f672b5b8 | 8830e3bbea7ce0aea0f709502a5e682a1a6bb041 | /4_hexagons.R | a45bcdeaa234ec2c38a9c66c10f0104e026685e8 | [] | no_license | zaaaana/30daymap | 24aab12f958e8dd2fdfc61b403d4ed81df6c8ee3 | b2bd8b6ab0cf2cb17ee118dcc35f8a04433a145e | refs/heads/master | 2023-01-19T22:28:52.971461 | 2020-12-01T16:38:04 | 2020-12-01T20:26:58 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,657 | r | 4_hexagons.R | # #30DayMapChallenge
# Día 4: hexágonos
# Temperatura superficial enero
# Fuente datos: Procesados y descargados de Google Earth Engine
# https://developers.google.com/earth-engine/datasets/catalog/MODIS_006_MOD11A1
# Autora: Stephanie Orellana (@sporella)
library(stars)
library(tidyverse)
library(sf)
library(extrafont)
library(rcartocolor)
# font_import()
loadfonts()
# Cargar datos ------------------------------------------------------------
temp <- read_stars("data/ene_val_san_2010_2020.tif") %>%
st_as_sf(as_points = FALSE, merge = TRUE) %>%
st_transform(crs=32719) %>%
rename(enero = 1)
comunas <- read_sf("data/comunas_chile.geojson") %>%
st_transform(crs=32719) %>%
filter(codregion %in% c(4,5,6,13))
# Hacer grilla hexagonal --------------------------------------------------
## Codigo original en: https://rpubs.com/dieghernan/beautifulmaps_I
initial <- temp
initial$index_target <- 1:nrow(initial)
target <- st_geometry(initial)
grid <- st_make_grid(target,
5000,
crs = st_crs(initial),
what = "polygons",
square = FALSE
)
grid <- st_sf(index = 1:length(lengths(grid)), grid)
cent_grid <- st_centroid(grid)
cent_merge <- st_join(cent_grid, initial["index_target"], left = F)
grid_new <- inner_join(grid, st_drop_geometry(cent_merge))
hex_geom <-
aggregate(
grid_new,
by = list(grid_new$index_target),
FUN = min,
do_union = FALSE
)
hex_comb <-
left_join(hex_geom %>% select(index_target), st_drop_geometry(initial)) %>%
select(-index_target)
# Visualización -----------------------------------------------------------
# * Cortar área de interés ------------------------------------------------
hex_comb_cut <- hex_comb %>%
st_filter(comunas) %>%
st_transform(crs = 4326) %>%
mutate(grados = (enero * 0.02) - 273.15)
# * Límites para zoom -----------------------------------------------------
limx <- st_bbox(hex_comb_cut)[c(1, 3)] #+ c(-10000,+10000)
limy <- st_bbox(hex_comb_cut)[c(2, 4)] #+ c(-10000,+10000)
p <- ggplot()+
geom_sf(data = hex_comb_cut, aes(fill = grados), colour = "transparent")+
geom_sf(data = comunas, fill = "transparent", colour = "grey85", size = 0.3)+
scale_fill_gradientn(colours = carto_pal(n = 7, "Temps"))+
labs(title = "Temperatura Superficial Mes de Enero",
subtitle = "MOD11A1 PROMEDIO 2010-2020\nRegiones Valparaíso y Metropolitana, Chile.",
fill = "Temperatura [°C]",
caption = "@sporella")+
theme(text = element_text(family = "Arial Narrow", colour = "mediumturquoise"),
plot.caption.position = "plot",
plot.title.position = "plot",
plot.title = element_text(size = 20, face = "bold"),
panel.background = element_rect(fill = NA),
plot.background = element_rect(fill = "grey33", colour = "grey33"),
axis.text = element_text(colour = "mediumturquoise"),
axis.ticks = element_line(colour = "mediumturquoise"),
panel.grid = element_line(colour = "mediumturquoise", linetype = "dotted"),
legend.background = element_rect(fill = "grey33"),
legend.key = element_rect(fill = "grey33"),
legend.text = element_text(colour = "mediumturquoise"),
panel.ontop = TRUE)+
guides(fill = guide_colourbar(
title.position = "left",
title.theme = element_text(
angle = 90,
family = "Arial Narrow",
colour = "mediumturquoise",
hjust = 0.5
),
))+
coord_sf(crs = 4326, xlim = limx, ylim = limy)
ggsave(
"plots/4_temp_ene.png",
plot = p,
device = "png",
height = 6,
width = 6,
bg = "grey33"
)
|
9ba3e86df1dab44b21eef385f64716fa0d5cbaa3 | 4d2cb20823e6bb238be47574bfd4aa6c1973c9de | /HW9/HW9-33.R | de72905186bc5f2088935b06433da8906f9a5f2a | [] | no_license | praal/data_analysis_course | 072d289725430bb8980f556e96100ca15fb09623 | 882407822c638f4197cf179dd440b52fd5348c10 | refs/heads/master | 2020-03-22T23:27:46.839451 | 2018-07-22T21:49:51 | 2018-07-22T21:49:51 | 140,811,887 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 847 | r | HW9-33.R | library(readr)
library(stringr)
library(dplyr)
files = list.files("~/Downloads/class_data/stock_dfs")
n = length(files)
s = files[1]
name = strsplit(s, "\\.")
t = paste("~/Downloads/class_data/stock_dfs/" , s, sep = "")
x = read_csv(t)
x %>% select(Date,Close, Open, Volume) -> x
x$Volume = as.integer(x$Volume)
x %>% mutate(trade = abs(Close - Open) * Volume) %>% select(Date, trade) -> x
tot = x
for (i in 1:n){
s = files[i]
name = strsplit(s, "\\.")
t = paste("~/Downloads/class_data/stock_dfs/" , s, sep = "")
x = read_csv(t)
x %>% select(Date,Close, Open, Volume) -> x
x$Volume = as.integer(x$Volume)
x %>% mutate(trade = abs(Close - Open) * Volume) %>% select(Date, trade) -> x
tot = rbind(tot, x)
gc()
}
r = tot %>% group_by(Date) %>% summarise(total = sum(trade)) %>% arrange(-total)
head(r, 1)$Date
|
37881680028b980ee34a44c6bbaf67ad718b0f8e | fa571db675071e4b322f49d0d70702e148c24543 | /combineanova_tabs.R | 7fe4af385a4505423b29708e6faaf1d07e7f2a93 | [] | no_license | CYGUBICKO/hh | 49fcf28cde43d0908b50db45ebc9fef9acb5b293 | 198dcd66e4d948707c3fa3ebe177e54d647497ed | refs/heads/master | 2021-06-19T13:38:03.177664 | 2021-03-02T02:14:12 | 2021-03-02T02:14:12 | 187,945,920 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 673 | r | combineanova_tabs.R | #### ---- Project: APHRC Wash Data ----
#### ---- Task: Modeling real data ----
#### ---- Combine all anova tables ----
#### ---- By: Steve and Jonathan ----
#### ---- Date: 2020 Nov 03 (Tue) ----
library(dplyr)
load("garbage_anova.rda")
load("garbageP_anova.rda")
load("water_anova.rda")
load("waterP_anova.rda")
load("toilet_anova.rda")
load("toiletP_anova.rda")
anova_tabs <- list(garbage_anova, garbageP_anova
, water_anova, waterP_anova
, toilet_anova, toiletP_anova
)
anova_tabs <- (bind_rows(anova_tabs)
%>% mutate(vars = gsub("watersourceP|garbagedposalP|toilettypeP", "StatusP", vars))
)
head(anova_tabs)
save(file = "combineanova_tabs.rda"
, anova_tabs
)
|
a230b464f52c8151993be68015b63fa3342007ed | c7fe6a88582766a710325d3a9fc6b7328c1cfd7a | /R/list_size.R | 5467264c79193cc83104c41d205f1e192e995732 | [] | no_license | fergustaylor/openprescribingR | 023c1b9cb30d269e3da1fc8329ec4f9b0e8d004b | 124f6d0ed4ce41b2fc8f2ccca5b1ac98f19964ac | refs/heads/master | 2021-01-16T00:10:06.271612 | 2018-04-24T18:08:39 | 2018-04-24T18:08:39 | 99,961,777 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,614 | r | list_size.R | #'Search for details about a CCG or practice by code or name. Returns values for all months available.
#'
#' @param list_size_by_code A practice or CCG code found using organisation_codes().
#' @param ASTRO_PU_by_code A practice or CCG code found using organisation_codes().
#' @return Returns values for all months available.
#' @importFrom magrittr "%>%"
#' @export
#' @examples
#' Total list size for all CCGs = list_size()
#' Total list size for all practices by practice code, or CCG code = list_size(list_size_by_code= "...")
#' ASTRO-PU cost and items for practices by practice code, or CCG code = list_size(ASTRO_PU_by_code= "...")
#' Or a variation of the above.
#' Read the [wiki](https://github.com/fergustaylor/openprescribingR/wiki) for more help.
list_size <- function(list_size_by_code = NULL, ASTRO_PU_by_code = NULL){
if (is.null(list_size_by_code)&is.null(ASTRO_PU_by_code)){variablesegment1 <- stringr::str_c("ccg&keys=total_list_size")}
if (!is.null(list_size_by_code)){variablesegment2 <- stringr::str_c("practice&org=", list_size_by_code, "&keys=total_list_size")}
if (!is.null(ASTRO_PU_by_code)){variablesegment3 <- stringr::str_c("practice&org=", ASTRO_PU_by_code, "&keys=astro_pu_items,astro_pu_cost")}
variablesegment <- stringr::str_c(
if(exists("variablesegment1")){variablesegment1},
if(exists("variablesegment2")){variablesegment2},
if(exists("variablesegment3")){variablesegment3})
stringr::str_c("https://openprescribing.net/api/1.0/org_details/?org_type=", variablesegment, "&format=csv") %>%
RCurl::getURL() %>%
textConnection() %>%
read.csv()
} |
d0fe97107a31b7981bfe9356495c080ea1d5436d | e1cbbf8791b0ac6d40f6d5b397785560105441d9 | /R/z.par2cdf.R | e7057b1910d503e75e826c7218ad4b53a3abd55a | [] | no_license | wasquith/lmomco | 96a783dc88b67017a315e51da3326dfc8af0c831 | 8d7cc8497702536f162d7114a4b0a4ad88f72048 | refs/heads/master | 2023-09-02T07:48:53.169644 | 2023-08-30T02:40:09 | 2023-08-30T02:40:09 | 108,880,810 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 763 | r | z.par2cdf.R | "z.par2cdf" <-
function(x,p,para,z=0,...) {
if(is.null(p)) {
warning("p is NULL, this function will not assume p=0, returning NULL")
return(NULL)
}
if(length(p) != 1) {
warning("only the first element of scalar argument p will be used")
p <- p[1]
}
if(length(z) != 1) {
warning("only the first element of scalar argument z will be used")
z <- z[1]
}
# assume f and para are valid and qlmomco() will check that anyway
z.of.fit <- par2qua(0, para, ...)
if(z.of.fit <= z) {
warning("evidently inconsistent z argument relative to that of the ",
"fitted distribution, returning NULL")
}
f <- p + (1-p)*par2cdf(x, para, ...)
f[x <= z] <- 0
names(f) <- NULL
return(f)
}
|
de0d788a390a8290d7093ff636dcec29a4c019d0 | c88eca63f8093b70becb0fe268139262cc49deda | /subsetting_data.R | b5a7bd691439011df4b16fb33176b6858de9f815 | [] | no_license | amcmil/Data_manipulation_and_statistics | 497634d3a23a3cff8fc96af3bae8ec7dc647f96f | c392e2149216e46d082669b0434c604799bf9857 | refs/heads/master | 2021-01-21T18:50:40.498824 | 2017-05-23T18:41:05 | 2017-05-23T18:41:05 | 92,087,589 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 827 | r | subsetting_data.R | #Amy McMillan 05/22/17
#basic commands for subsetting data in R
#to keep samples in metadata table with variable = x or y. x and y are numbers in your metadata table. "|" denotes "OR"
keep<-mdata[mdata$"variable"==x | mdata$"variable"==y,]
#to keep samples in metadata table with variable1 = x and variable2 = y. "&" denotes "AND"
keep<-mdata[mdata$"variable1"==x & mdata$"variable2"==y,]
#to keep samples in metadata table with variable between numbers x and y.
keep<-mdata[mdata$"variable1"<x & mdata$"variable1">y,]
#to make new metabolite table with only samples in "keep"
met_new<-met_t[rownames(keep),]
#using not operator (!)
met_rem<-met[!(rownames(met) %in% rownames(ids)),]
#to keep samples in "met" based on parameters in "mdata" use "which" operator
met_keep<-a.data.frame(met[which(mdata$"variable1"==x),])
|
16299e06f9695cf55a9d10c5de2edc6b80337d2e | 7a35925457082bbf7eb5d142b1043286863ebe40 | /R/readQN.R | d9bc99ae9dea14dbab62ca6b674c6bc133393065 | [] | no_license | timflutre/hierfstat | 9a64418ea32812aa828213da42790ad55a0dd89d | ae049d7427db93de71cb76fe281644972820a552 | refs/heads/master | 2022-04-11T17:56:26.758313 | 2020-03-26T21:07:52 | 2020-03-26T21:07:52 | 250,365,950 | 0 | 0 | null | 2020-03-26T20:35:34 | 2020-03-26T20:35:33 | null | UTF-8 | R | false | false | 2,079 | r | readQN.R | #################################
#' @title Read QuantiNemo extended format for genotype files
#'
#' @description Read QuantiNemo (\url{http://www2.unil.ch/popgen/softwares/quantinemo/}) genotype files extended format (option 2)
#'
#' @usage qn2.read.fstat(fname, na.s = c("NA","NaN"))
#' @param fname quantinemo file name
#' @param na.s na string used
#' @return dat a data frame with nloc+1 columns, the first being the population
#' to which the individual belongs and the next being the genotypes, one column per locus;
#' and ninds rows
#' @return sex the sex of the individuals
#' @author Jerome Goudet \email{jerome.goudet@@unil.ch}
#' @seealso \code{\link{read.fstat}}
#' @references \href{http://www2.unil.ch/popgen/softwares/quantinemo/2008_Neuenschwander_et_al_BioInf_quantiNEMO.pdf}{Neuenschwander S, Hospital F, Guillaume F, Goudet J (2008)}
#' quantiNEMO: an individual-based program to simulate quantitative traits with explicit
#' genetic architecture in a dynamic metapopulation Bioinformatics 24, 1552-1553.
#' @examples
#' dat<-qn2.read.fstat(system.file("extdata","qn2_sex.dat",package="hierfstat"))
#' sexbias.test(dat[[1]],sex=dat[[2]])
#' @export
########################################################################################
qn2.read.fstat<-function (fname, na.s = c("NA","NaN")) {
#written to allow direct reading of quantinemo genotype files extended format (option 2) in R
#eliminates juveniles and ignores the columns after age (ind and parents id)
#split the data set in correct format (dat) and the vector of sexes (1:M and 2:F)
x <- scan(fname, n = 4)
nloc <- x[2]
lnames <- scan(fname, what = character(), skip = 1, nlines = nloc)
lnames <- c("Pop", lnames)
dat <- scan(fname, skip = nloc + 1, na.strings = na.s,comment.char="_")
dat <- data.frame(matrix(dat, ncol = nloc + 4, byrow = TRUE))
age<-dat[,nloc+2]
sex<-dat[age==2,nloc+3]
asex<-character(length(sex))
asex[sex==0]<-"M"
asex[sex==1]<-"F"
dat<-dat[age==2,1:(nloc+1)]
names(dat) <- lnames
return(list(dat=dat,sex=asex))
}
|
09c5eba42b132b93ebedac6ca0523f3bbf6681b9 | cbbf1cdfc053a7fe8983a484cc12b9cef3b03bf8 | /data-raw/china_statistical_yearbook/popn_survey_fraction.R | 9dfd6482c0e796f9e158886f5c3ae03167dcd2c3 | [
"MIT"
] | permissive | bayesiandemography/marital | 1201c070d2472753b0fae99617ae914e68a3fcec | 464bb7596c27b266d274fb313952bc9a377664cd | refs/heads/master | 2021-07-15T20:17:02.569154 | 2020-07-27T08:18:55 | 2020-07-27T08:18:55 | 192,147,452 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 232 | r | popn_survey_fraction.R |
popn_survey_fraction <- array(c(0.01325, 0.0155),
dim = 2,
dimnames = list(time = c(2005, 2015)))
save(popn_survey_fraction,
file = "data/popn_survey_fraction.rda")
|
66e857a31c1d79e2440321b163cfbfb007e5d496 | 083b79cd8efa8168f17dfd50a9ae1d799038f88c | /Scripts/Effects/Rates.R | 716bac0e5b2af2fb1f836e9182359a93666a6281 | [] | no_license | AleMorales/combinedstress | 2f96953fb89dd83901fb21b3a8db0bdabfd57541 | 7a54154df06eada8b8ed0ca743369e876b81de8c | refs/heads/master | 2022-12-19T13:25:51.363643 | 2020-09-18T05:52:17 | 2020-09-18T05:52:17 | 296,331,574 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,902 | r | Rates.R |
# Load packages and data --------------------------------------------------
library(tidyverse)
coefs = readRDS(file = "Intermediate/Rates/LinearFits.rds")
# Combine into single table
effects = do.call("rbind", coefs)
# Compute the different combinations to get the effects on RGR and LeafRate
effects = effects %>% group_by(Trait) %>%
mutate(# An-1
An1_HT = `Time:TreatmentHT`/Time,
An1_S = `Time:TreatmentS`/Time,
An1_PS = `Time:TreatmentPS`/Time,
An1_D = `Time:TreatmentD`/Time,
An1_HTD = `Time:TreatmentHTD`/Time,
An1_PSD = `Time:TreatmentPSD`/Time,
An1_HT_D = (`Time:TreatmentHTD` - `Time:TreatmentHT` - `Time:TreatmentD`)/Time,
An1_PS_D = (`Time:TreatmentPSD` - `Time:TreatmentPS` - `Time:TreatmentD`)/Time,
# Bay-0
Bay0_HT = (`Time:TreatmentHT` + `Time:GenotypeBay-0:TreatmentHT`)/(Time + `Time:GenotypeBay-0`),
Bay0_S = (`Time:TreatmentS` + `Time:GenotypeBay-0:TreatmentS`)/(Time + `Time:GenotypeBay-0`),
Bay0_PS = (`Time:TreatmentPS` + `Time:GenotypeBay-0:TreatmentPS`)/(Time + `Time:GenotypeBay-0`),
Bay0_D = (`Time:TreatmentD` + `Time:GenotypeBay-0:TreatmentD`)/(Time + `Time:GenotypeBay-0`),
Bay0_HTD = (`Time:TreatmentHTD` + `Time:GenotypeBay-0:TreatmentHTD`)/(Time + `Time:GenotypeBay-0`),
Bay0_PSD = (`Time:TreatmentPSD` + `Time:GenotypeBay-0:TreatmentPSD`)/(Time + `Time:GenotypeBay-0`),
Bay0_HT_D = (`Time:TreatmentHTD` + `Time:GenotypeBay-0:TreatmentHTD` -
`Time:TreatmentHT` - `Time:GenotypeBay-0:TreatmentHT` -
`Time:TreatmentD` - `Time:GenotypeBay-0:TreatmentD`)/(Time + `Time:GenotypeBay-0`),
Bay0_PS_D = (`Time:TreatmentPSD` + `Time:GenotypeBay-0:TreatmentPSD` -
`Time:TreatmentPS` - `Time:GenotypeBay-0:TreatmentPS` -
`Time:TreatmentD` - `Time:GenotypeBay-0:TreatmentD`)/(Time + `Time:GenotypeBay-0`),
# Col-0
Col0_HT = (`Time:TreatmentHT` + `Time:GenotypeCol-0:TreatmentHT`)/(Time + `Time:GenotypeCol-0`),
Col0_S = (`Time:TreatmentS` + `Time:GenotypeCol-0:TreatmentS`)/(Time + `Time:GenotypeCol-0`),
Col0_PS = (`Time:TreatmentPS` + `Time:GenotypeCol-0:TreatmentPS`)/(Time + `Time:GenotypeCol-0`),
Col0_D = (`Time:TreatmentD` + `Time:GenotypeCol-0:TreatmentD`)/(Time + `Time:GenotypeCol-0`),
Col0_HTD = (`Time:TreatmentHTD` + `Time:GenotypeCol-0:TreatmentHTD`)/(Time + `Time:GenotypeCol-0`),
Col0_PSD = (`Time:TreatmentPSD` + `Time:GenotypeCol-0:TreatmentPSD`)/(Time + `Time:GenotypeCol-0`),
Col0_HT_D = (`Time:TreatmentHTD` + `Time:GenotypeCol-0:TreatmentHTD` -
`Time:TreatmentHT` - `Time:GenotypeCol-0:TreatmentHT` -
`Time:TreatmentD` - `Time:GenotypeCol-0:TreatmentD`)/(Time + `Time:GenotypeCol-0`),
Col0_PS_D = (`Time:TreatmentPSD` + `Time:GenotypeCol-0:TreatmentPSD` -
`Time:TreatmentPS` - `Time:GenotypeCol-0:TreatmentPS` -
`Time:TreatmentD` - `Time:GenotypeCol-0:TreatmentD`)/(Time + `Time:GenotypeCol-0`),
# Lp2-6
Lp26_HT = (`Time:TreatmentHT` + `Time:GenotypeLp2-6:TreatmentHT`)/(Time + `Time:GenotypeLp2-6`),
Lp26_S = (`Time:TreatmentS` + `Time:GenotypeLp2-6:TreatmentS`)/(Time + `Time:GenotypeLp2-6`),
Lp26_PS = (`Time:TreatmentPS` + `Time:GenotypeLp2-6:TreatmentPS`)/(Time + `Time:GenotypeLp2-6`),
Lp26_D = (`Time:TreatmentD` + `Time:GenotypeLp2-6:TreatmentD`)/(Time + `Time:GenotypeLp2-6`),
Lp26_HTD = (`Time:TreatmentHTD` + `Time:GenotypeLp2-6:TreatmentHTD`)/(Time + `Time:GenotypeLp2-6`),
Lp26_PSD = (`Time:TreatmentPSD` + `Time:GenotypeLp2-6:TreatmentPSD`)/(Time + `Time:GenotypeLp2-6`),
Lp26_HT_D = (`Time:TreatmentHTD` + `Time:GenotypeLp2-6:TreatmentHTD` -
`Time:TreatmentHT` - `Time:GenotypeLp2-6:TreatmentHT` -
`Time:TreatmentD` - `Time:GenotypeLp2-6:TreatmentD`)/(Time + `Time:GenotypeLp2-6`),
Lp26_PS_D = (`Time:TreatmentPSD` + `Time:GenotypeLp2-6:TreatmentPSD` -
`Time:TreatmentPS` - `Time:GenotypeLp2-6:TreatmentPS` -
`Time:TreatmentD` - `Time:GenotypeLp2-6:TreatmentD`)/(Time + `Time:GenotypeLp2-6`)) %>%
dplyr::select(Trait, An1_HT, An1_S, An1_PS, An1_D, An1_HTD, An1_PSD, An1_HT_D, An1_PS_D,
Bay0_HT, Bay0_S, Bay0_PS, Bay0_D, Bay0_HTD, Bay0_PSD, Bay0_HT_D, Bay0_PS_D,
Col0_HT, Col0_S, Col0_PS, Col0_D, Col0_HTD, Col0_PSD, Col0_HT_D, Col0_PS_D,
Lp26_HT, Lp26_S, Lp26_PS, Lp26_D, Lp26_HTD, Lp26_PSD, Lp26_HT_D, Lp26_PS_D)
# Create average of genotypes
effects = mutate(effects,
Average_HT = (An1_HT + Bay0_HT + Col0_HT + Lp26_HT )/4,
Average_S = (An1_S + Bay0_S + Col0_S + Lp26_S )/4,
Average_PS = (An1_PS + Bay0_PS + Col0_PS + Lp26_PS )/4,
Average_D = (An1_D + Bay0_D + Col0_D + Lp26_D )/4,
Average_HTD = (An1_HTD + Bay0_HTD + Col0_HTD + Lp26_HTD)/4,
Average_PSD = (An1_PSD + Bay0_PSD + Col0_PSD + Lp26_PSD)/4,
Average_HT_D = (An1_HT_D + Bay0_HT_D + Col0_HT_D + Lp26_HT_D)/4,
Average_PS_D = (An1_PS_D + Bay0_PS_D + Col0_PS_D + Lp26_PS_D)/4)
# Split dataset across genotypes, add genotype, rename and rbind it
effects = map(c("An1", "Bay0", "Col0", "Lp26", "Average"), function(x) {
out = select(effects, Trait, contains(x))
names(out) = c("Trait", "HT", "S", "PS", "D", "HTD", "PSD", "HT_D", "PS_D")
mutate(out, Genotype = x)}) %>%
do.call("rbind", .)
# Reshape to long format
effects = pivot_longer(effects, c(-Trait, -Genotype),
names_to = "Effect", values_to = "Value")
effects = mutate(effects, mu = NA)
# Save results -------------------------------------------------------------
saveRDS(object = effects, file = "Intermediate/Rates/Effects.rds")
|
f50a5258928bca308154650b55bbb9d42224c56e | 6fb289b31fe41385d18819291ecdebcf840b43f5 | /RWebScrape/Test.R | 647e83a66d635f1e0309efecd2c761c493ab6209 | [] | no_license | CC-94/FinalYearProject | c934af6f67afb4ce27020cc68a52fc9fcbbb7c4d | ee850d52732e5b8bf420aacdc6c19ef3e5ad5e7a | refs/heads/main | 2023-05-01T12:34:39.148077 | 2021-05-16T13:20:10 | 2021-05-16T13:20:10 | 367,616,710 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 571 | r | Test.R | library(rvest)
library(dplyr)
link = "https://en.wikipedia.org/wiki/Category:Luxury_brands"
page = read_html(link)
name = page %>% html_nodes("#mw-pages a , #mw-subcategories a") %>% html_text()
industry = page %>% html_nodes("#mw-pages a , #mw-subcategories a") %>%
html_attr("href") %>% paste("https://en.wikipedia.org", ., sep="")
get_industry = function(industry) {
industry = "https://en.wikipedia.org/wiki/Fiorucci"
indsutry_page = read_html(industry)
industry_title = industry_page %>% html_nodes(".category , .org") %>% html_text()
} |
7971633d8833b5321bb67e9dc2e302c740d8f053 | eeea10b971ed75bf87305d7b4163cf355eac1240 | /RF LOWESS sim/Real Data Scripts/COMP.R | 847acd78a0126967a6198eb72630b7160b8e2d5d | [] | no_license | AndrewjSage/RF-Robustness | 42e0caa6cc5c1f46031f6a3b77e33a56dc4fc83b | bace62de6a191832c1a9d19462c140686a15bf1b | refs/heads/master | 2022-11-21T12:09:04.041716 | 2020-07-24T04:52:51 | 2020-07-24T04:52:51 | 106,871,057 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 912 | r | COMP.R | setwd("/work/STAT/ajsage")
#setwd("~/Box Sync/Iowa State/Research/Robustness of Random Forest/RFLOWESS sim/Real Data Scripts")
library(RFLOWESS)
dataset <- read.csv("COMP.csv")
parvec <- c(1000,100,seq(from=3, to=30, by=0.25))
#uncontaminated
#set.seed(02042017) #Important to keep seed same for all files, so we're dealing with same datasets
Res <- sapply(X=1:30, simplify="array", FUN=function(i){Assess_Real_Data(dataset, nfolds=11, p=0, ntrees=1000, ndsize=5, ntreestune=100, parvec=parvec, cvreps=1, cvfolds=10, tol=10^-6 )})
#save(Res, file="CompRes.Rdata")
#contaminated
set.seed(02042017) #Important to keep seed same for all files, so we're dealing with same datasets
Res <- sapply(X=1:30, simplify="array", FUN=function(i){Assess_Real_Data(dataset, nfolds=11, p=0.15, ntrees=1000, ndsize=5, ntreestune=100, parvec=parvec, cvreps=1, cvfolds=10, tol=10^-6 )})
save(Res, file="CompRescont.Rdata")
|
bebb604444f3bfbf2bbbc65e7ba5a004123f0042 | abfa5f844935045a013dc55b04e9e2f2277ba0ed | /R/data.R | fd6fc0ca3de437dc0f0ee379a268e86b46c8a609 | [] | no_license | aleighbrown/dasper | c4994686643fd57ef27f7e08cb3e1f49d7478594 | 336d8584bbc2d5edddc39bb473a56f4cafaf28b1 | refs/heads/master | 2022-12-06T13:34:30.093844 | 2020-08-25T13:53:46 | 2020-08-25T13:53:46 | 276,595,172 | 0 | 0 | null | 2020-07-02T08:45:43 | 2020-07-02T08:45:43 | null | UTF-8 | R | false | false | 867 | r | data.R | #' Set of example junctions
#'
#' A dataset containing the example junction data for 2 case and 3 control
#' samples outputted from \code{\link{junction_load}}. The junctions have been
#' filtered for only those lying on chromosome 21 or 22.
#'
#' @format
#' [RangedSummarizedExperiment-class][SummarizedExperiment::RangedSummarizedExperiment-class]
#' object from \code{\link{SummarizedExperiment}} detailing the counts,
#' co-ordinates of junctions lying on chromosome 21/22 for 2 example samples
#' and 3 controls: \describe{ \item{assays}{matrix with counts for junctions
#' (rows) and 5 samples (cols)} \item{colData}{example sample metadata}
#' \item{rowRanges}{\code{\link[GenomicRanges]{GRanges}} object describing the
#' co-ordinates and strand of each junction} }
#'
#' @source generated using data-raw/junctions_example.R
"junctions_example"
|
c5c65ec548d279ed556320a7a7e3c237cfa89a94 | 37368f2eb54c09283edbcc6a14051bde22b2be6e | /R Programming/pollutantmean.R | 51f10f726fa80afbfe24ebaa31a74f982f26a21f | [] | no_license | EAVWing/Data-Science-Toolbox | ee9c2fb39e1507f24cd93261a3c4a480393d5c88 | 2a0ca33afd7cd2ee1eea21edc556abf4d79837ac | refs/heads/master | 2020-03-21T16:38:33.461772 | 2018-08-16T15:44:41 | 2018-08-16T15:44:41 | 138,783,001 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 437 | r | pollutantmean.R | pollutantmean <- function(directory, pollutant, id = 1:332){
fileList <- list.files(directory, full.names = TRUE)
dat <- data.frame()
i <- 1
for (i in 1:length(id)){
dat <- rbind(dat, read.csv(fileList[id[i]]))
i <- i + 1
}
p <- if(pollutant == "sulfate"){
2
}else{
3
}
mean(dat[,p], na.rm = TRUE)
}
|
fcc41b9bf1ed579ff73aff927e84c173e7fe4e78 | c255c8e7ed8057413fece1823ffe78ed2b88ba35 | /leek.r | 34adfb70c44f78afa191e11834d0a7d041002a6c | [] | no_license | soroosj/Getting-Data | a93b569c834010d8fc687cff8bfb214598300f47 | debfb4bbddf14fda169bb7d82b7cf3d5075e29f6 | refs/heads/master | 2021-05-09T18:52:03.764841 | 2018-03-04T20:23:28 | 2018-03-04T20:23:28 | 119,175,877 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 286 | r | leek.r | #load XML library
library(XML)
#define path to HTML file
url<-"http://biostat.jhsph.edu/~jleek/contact.html"
#download the HTML file to a character vector
doc <- readLines(url)
#calculate number of characters per code line
nchar(doc[10])
nchar(doc[20])
nchar(doc[30])
nchar(doc[100]) |
d61c715601cf2b9d78d1717d605307ea4abfe44f | 55cc71fbc75726044bc9843364bb9dc0430382f3 | /Assign_R.R | 524b8fdc0ee52685add6db4ed670e19d2fc9dcaa | [] | no_license | aayrm5/temp_add_to_version_control | 6c0eccf0b5319509d8e57db6d91d75a40e504c80 | 1ab7b4da66931801f63f40ce9cb72bc614df8d89 | refs/heads/master | 2020-12-28T00:51:49.427082 | 2020-02-10T00:05:03 | 2020-02-10T00:05:03 | 238,127,025 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 848 | r | Assign_R.R | rm=(list=ls())
setwd("E:/Riz/Edwisor/Rprog")
getwd()
install.packages(c("dplyr","plyr","reshape","ggplot2","data.table"))
df=read.csv("IMDB_data.csv", header=TRUE)
#Removing the Row2
df1=read.csv("IMDB_data.csv", header=TRUE)[-2,]
#Extracting unique values in Genre
unique(df1$Genre)
#Count of unique values in Genre
length(unique(df1$Genre))
#Storing the length of unique value count in a data frame with index key:
datafile=as.data.frame(length(unique(df1$Genre)))
#Checking the type of variable
typeof(df1$imdbVotes)
typeof(df1$imdbRating)
#Converting required data type
df1$imdbVotes=as.numeric(df1$imdbVotes)
df1$imdbRating=as.numeric(df1$imdbRating)
#Sorting Genre by its name
df1=df1[order(df1$Genre),]
#Creating new variable
new_v=with(df1,(df1$imdbRating-df1$imdbVotes)^2)
write.csv(df1,"IMDB_data_assgnmt.csv",row.names = FALSE)
|
182a807e32863d800aeacddf1013dc0ebdd310be | 4191b75c5ef63767a18e4a80d09294204ab94489 | /R/cost_functions.R | 7a0c1742b00b3afeb7ccae5056987c57bb62778a | [] | no_license | kaerosen/tilemaps | 64e33ab1b5a1d39240477f92404d999daf1bba8f | 153f2499ceddd43ed669aea65619ef9f4630c853 | refs/heads/master | 2022-11-16T23:09:04.829716 | 2020-07-13T19:56:40 | 2020-07-13T19:56:40 | 250,876,182 | 39 | 1 | null | null | null | null | UTF-8 | R | false | false | 2,119 | r | cost_functions.R | # location cost
location_cost <- function(transformed_centroids, tile_centroids, s) {
as.numeric(mean(sf::st_distance(transformed_centroids, tile_centroids, by_element = TRUE)) / s)
}
# adjacency cost
adjacency_cost <- function(original_neighbors, tile_neighbors) {
missing <- rep(0, length(original_neighbors))
for (i in 1:length(original_neighbors)) {
missing[i] <- 1 - mean(original_neighbors[[i]] %in% tile_neighbors[[i]])
}
mean(missing)
}
# angle (relative orientation) cost
angle_cost <- function(original_centroids, tile_centroids, original_neighbors) {
original_coords <- data.frame(sf::st_coordinates(original_centroids))
tile_coords <- data.frame(sf::st_coordinates(tile_centroids))
region_means <- rep(0, length(original_centroids))
for (i in 1:length(original_centroids)) {
angle <- rep(0, length(original_neighbors[[i]]))
for (j in 1:length(original_neighbors[[i]])) {
# calculate slope of line from original centroid to neighbor centroid
slope1 <- (original_coords$Y[original_neighbors[[i]][j]] - original_coords$Y[i]) /
(original_coords$X[original_neighbors[[i]][j]] - original_coords$X[i])
# calculate slope of line from tile centroid to neighbor centroid
slope2 <- (tile_coords$Y[original_neighbors[[i]][j]] - tile_coords$Y[i]) /
(tile_coords$X[original_neighbors[[i]][j]] - tile_coords$X[i])
# calculate angle between lines
if (slope2 == Inf | slope2 == -Inf) {
angle[j] <- atan(abs(1/slope1))
} else {
angle[j] <- atan(abs((slope1-slope2) / (1+slope1*slope2)))
}
}
region_means[i] <- mean(angle)
}
mean(region_means)
}
# roughness cost
roughness_cost <- function(square, tile_map) {
# find number of edges of each tile
n <- ifelse(square == TRUE, 4, 6)
# find number of tiles
R <- length(tile_map)
# find number of shared edges
m <- 2*sum(sf::st_geometry_type(sf::st_intersection(tile_map)) == "LINESTRING")
# find minimum perimeter
a <- ifelse(square == TRUE, 1, 3*sqrt(3)/2)
P <- 2*sqrt(pi*a*R)
# calculate cost
(n*R - m - P) / P
}
|
797f1f5bb2d473baf5e56634bd3bd19e26d90334 | f3913e6f5d7897f56133e794c6e831e7f3e4f162 | /man/easyanova-package.Rd | 4a440a8c4db7e7c39157c0884dbe22040a86b3a9 | [] | no_license | cran/easyanova | e52165d506c13b073f3779d8fa1bad6d399b9d3c | 4fca62b80f040fb30e90c096b214471027eafca7 | refs/heads/master | 2022-09-06T00:01:59.380507 | 2022-06-25T17:00:02 | 2022-06-25T17:00:02 | 17,695,699 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 2,473 | rd | easyanova-package.Rd | \name{easyanova-package}
\alias{easyanova-package}
\alias{easyanova}
\docType{package}
\title{
Analysis of Variance and Other Important Complementary Analyzes
}
\description{
Perform analysis of variance and other important complementary analyzes. The functions are easy to use. Performs analysis in various designs, with balanced and unbalanced data.
}
\details{
\tabular{ll}{
Package: \tab easyanova\cr
Type: \tab Package\cr
Version: \tab 8.0\cr
Date: \tab 2022-06-24\cr
License: \tab GPL-2\cr
}
}
\author{
Emmanuel Arnhold <emmanuelarnhold@yahoo.com.br>
}
\references{
CRUZ, C.D. and CARNEIRO, P.C.S. Modelos biometricos aplicados ao melhoramento genetico. 2nd Edition. Vicosa, UFV, v.2, 2006. 585p.
KAPS, M. and LAMBERSON, W. R. Biostatistics for Animal Science: an introductory text. 2nd Edition. CABI Publishing, Wallingford, Oxfordshire, UK, 2009. 504p.
SAMPAIO, I. B. M. Estatistica aplicada a experimentacao animal. 3nd Edition. Belo Horizonte: Editora FEPMVZ, Fundacao de Ensino e Pesquisa em Medicina Veterinaria e Zootecnia, 2010. 264p.
SANDERS W.L. and GAYNOR, P.J. Analysis of switchback data using Statistical Analysis System, Inc. Software. Journal of Dairy Science, 70.2186-2191. 1987.
PIMENTEL-GOMES, F. and GARCIA C.H. Estatistica aplicada a experimentos agronomicos e florestais: exposicao com exemplos e orientacoes para uso de aplicativos. Editora Fealq, v.11, 2002. 309p.
RAMALHO, M. A. P.; FERREIRA, D. F. and OLIVEIRA, A. C. Experimentacao em Genetica e Melhoramento de Plantas. Editora UFLA, 2005, 322p.
}
\seealso{ea1, ea2, ec
}
\examples{
# Kaps and Lamberson(2009)
data(data1)
data(data2)
data(data3)
data(data4)
# analysis in completely randomized design
r1<-ea1(data1, design=1)
names(r1)
r1
# analysis in randomized block design
r2<-ea1(data2, design=2)
# analysis in latin square design
r3<-ea1(data3, design=3)
# analysis in several latin squares design
r4<-ea1(data4, design=4)
r1[1]
r2[1]
r3[1]
r4[1]
# analysis in unbalanced randomized block design
response<-ifelse(data2$Gain>850, NA, data2$Gain)
ndata<-data.frame(data2[-3],response)
ndata
r5<-ea1(ndata, design=2 )
r5
# multivariable response (list argument = TRUE)
t<-c('a','a','a','b','b','b','c','c','c')
r1<-c(10,12,12.8,4,6,8,14,15,16)
r2<-c(102,105,106,125,123,124,99,95,96)
r3<-c(560,589,590,658,678,629,369,389,378)
d<-data.frame(t,r1,r2,r3)
results=ea1(d, design=1, list=TRUE)
names(results)
results
results[1][[1]]
names(results[1][[1]])
}
|
ac25bd26b44e0d7a9d1e637c11a7f75bd9f91229 | 5273a49a586b3f4b67aab56daef7d763e40d13ef | /Descargapp/servidor/app.R | d3e423e267d2bfe69338ed0865e2b8a38892f59d | [] | no_license | Politica-y-redes-sociales/Interfaz-Grafica-Opazo | b467ffd15f95f90bc14f1b026d7a39d69a8d65bb | 097ba5cc5ba8d37f3834d8e392ee603a8c94953f | refs/heads/main | 2023-07-07T18:55:01.153409 | 2021-08-10T16:44:22 | 2021-08-10T16:44:22 | 394,715,091 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 128 | r | app.R | library(shiny)
ui<- function(){}
server <- function(input, output)
{
}
# Run the application
shinyApp(ui,server = server)
|
9793380b02342517288f3cee5080f1be7f3776f1 | c717070acb36b6e68d8effbdc27c3f6860c464ba | /RSAT_problem/differentstrands.r | 6030e020f89959288c29a265ae13eb2712148546 | [] | no_license | aidaghayour/FPWManalysis | 0479171fb3f4bb0a07690b5d4f6f738d947e8793 | 75ef6427d406df85fb3a7426269f744ddc26237a | refs/heads/master | 2021-01-05T06:42:50.889220 | 2020-02-25T19:35:16 | 2020-02-25T19:35:16 | 240,918,667 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,021 | r | differentstrands.r | cebpb <- read.delim("~/Documents/R/fpwm-Thesis-new/RSAT problem/Reverse complemetn/CEBPB JASPAR format (reverse complement).ft", header=FALSE, comment.char="#")
cebpb <- data.frame(cbind(cebpb[,4],cebpb[,5], cebpb[,6],cebpb[,9]))
cebpb[,4]<-as.numeric(as.character(cebpb[,4]))
cebpb[,3]<-as.numeric(as.character(cebpb[,3]))
cebpb[,2]<-as.numeric(as.character(cebpb[,2]))
cebpb[,4]<-(-log10(cebpb[,4]))
center_pos.cebpb <- rowMeans(cebpb[,2:3])
cebpb_box <- data.frame(pos = center_pos.cebpb, pval = cebpb[,4])
binMap.cebpb <- cut( cebpb_box$pos, breaks = seq(-200,0, by = 5), labels = seq(-200,-1, by = 5 ))
boxplot(cebpb_box$pval~binMap.cebpb,ylab="-log10 Pval",xaxt="n",main="Comparision between CEBPB matrix from JASPAr and its reverse complement on RSAT",ylim=c(3,8),col = rgb(red = 1, green = 0, blue = 0, alpha = 0.3), lty=3, pch=3)
cebpb <- read.delim("~/Documents/R/fpwm-Thesis-new/RSAT problem/Reverse complemetn/CEBPB JASPAR format (Positive strand).ft", header=FALSE, comment.char="#")
cebpb <- data.frame(cbind(cebpb[,4],cebpb[,5], cebpb[,6],cebpb[,9]))
cebpb[,4]<-as.numeric(as.character(cebpb[,4]))
cebpb[,3]<-as.numeric(as.character(cebpb[,3]))
cebpb[,2]<-as.numeric(as.character(cebpb[,2]))
cebpb[,4]<-(-log10(cebpb[,4]))
center_pos.cebpb <- rowMeans(cebpb[,2:3])
cebpb_box <- data.frame(pos = center_pos.cebpb, pval = cebpb[,4])
binMap.cebpb <- cut( cebpb_box$pos, breaks = seq(-200,0, by = 5), labels = seq(-200,-1, by = 5 ))
boxplot(cebpb_box$pval~binMap.cebpb,ylab="-log10 Pval",xaxt="n",ylim=c(3,8),col = rgb(red = 0, green = 0, blue = 1, alpha = 0.3),add = TRUE, lty=1)
legend(0,7, c("Default","reverse compliment","+ : The compliment", "O : Default", ".... : The compliment", "____ : Defualt"),lty=c(1,1), lwd=c(2.5,2.5),col=c("blue","red","black","yellow","green","orange"),density = 20,cex = 0.75)
############################# Frequency
cebpb <- read.delim("~/Documents/R/fpwm-Thesis-new/RSAT problem/Reverse complemetn/CEBPB JASPAR format (reverse complement).ft", header=FALSE, comment.char="#")
reverse <- data.frame(cbind(cebpb[,4],cebpb[,5], cebpb[,6],cebpb[,9]))
cebpb <- read.delim("~/Documents/R/fpwm-Thesis-new/RSAT problem/Reverse complemetn/CEBPB JASPAR format (Positive strand).ft", header=FALSE, comment.char="#")
positive <- data.frame(cbind(cebpb[,4],cebpb[,5], cebpb[,6],cebpb[,9]))
ggplot() +geom_histogram(data = reverse,aes(X3),binwidth = 5,alpha=.2, fill="grey") +
geom_freqpoly(data = reverse,aes(X3),binwidth = 5,col="red")+ geom_freqpoly(data = positive,aes(X3),binwidth = 5,col="blue") +theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(), axis.line = element_line(colour = "black"))+geom_density(alpha=.2, fill="#FF6666")+labs(x = "Sequence Start Point")+labs(title = "Comparison between Matrix (blue) and its reverse compliment (red)")+ geom_line(size = 2)
|
6612ca8441750d4f16d9caf3aa2d134a0c8e04e6 | fbe8f1d0a11dceee69dfdcc391c877904289b82b | /R_scripts/data_processing/CZ_additional_processing.R | a6eacbe7d27036339bb5b81b679a3f6f24e4ea2a | [] | no_license | victorabelmurcia/V4Lab_Analyses | 30f036c90e122ef7922c8356d7e9ac9591c29640 | 406a0e727825d08e2600c03d60acef6e03a12cde | refs/heads/master | 2021-01-16T20:46:58.226818 | 2015-05-24T21:30:08 | 2015-05-24T21:30:08 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,209 | r | CZ_additional_processing.R | #################################################################
### Additional processing of the Czecg dataset; it contains: ###
### - classification of respondents' study programmes ###
### - removal of respondents according to age/edu criteria ###
#################################################################
### Load data
load(normalizePath("./Data/MainData/CZ_main_correct.RData"))
### Save it under a more easy-to-type name
data <- data_cz_correct
### Classification of the respondents' study programms
### They are grouped into four categories:
### - social sciences and humanities (SSH)
### - art programmes (Art)
### - economics, bussiness and management studies, and finance and actuarial science (EBMF)
### - STEM programmes (science, engineering, technology and math)
###
### Classification is conducted as follows:
###
### SSH <---- Administration (2 respondents)
### <---- Demography (11 respondents)
### <---- Diplomacy (3 respondents)
### <---- Internationl Relations (26 respondents)
### <---- Journalism (3 respondent)
### <---- Law (6 respondents)
### <---- Political Science (5 respondnts)
### <---- PR (23 respondents)
### <---- Religion Studies (1 respondents)
### <---- Sociology (34 respondent)
###
### Art <---- Art (13 respondents)
###
### EBMF <---- Bussiness (7 respondents)
### <---- Econometrics (1 respondent)
### <---- Economy (59 respondents)
### <---- Finance/Actuarial Science (27 respondents)
### <---- Management (36 respondents)
###
### STEM <---- Math/CS (62 respondents)
###
### Classification code
eduprog4 <- as.character(data$uni_programme)
eduprog4[grep("Math.*CS", eduprog4, perl = TRUE)] <- "STEM"
eduprog4[grep("Bussi|Econom|Fina*.Actu|Manag", eduprog4, perl = TRUE)] <- "EBMF"
eduprog4[grep("STEM|Art|EBMF", eduprog4, perl = TRUE, invert = TRUE)] <- "SSH"
### Save the result as a new variable in the main dataset
data$eduprog4 <- factor(eduprog4)
### Additionally another classification has been prepared, in which Art and SSH are added together to make one group (SSHA)
eduprog3 <- as.character(data$eduprog4)
eduprog3[grep("^SSH$|^Art$", eduprog3, perl = TRUE)] <- "SSHA"
### Save the result as a new variable in the main dataset
data$eduprog3 <- factor(eduprog3)
################################
### Selection of respondents ###
################################
### Since the research is focused on the typical population of university students two selection criteria has been adopted:
### - respondents have to be 18 to 30 years old
### - respondents have to be enrolled in a university BA or MA programme (or equivalent)
### Check the first criterion
which(data$age > 30)
### three respondents have to be excluded
data <- data[-which(data$age > 30), ]
### Check the second criterion
which(data$year_at_uni == "PHD")
### Nothing to remove
#####################
### Save new data ###
#####################
### Rename the new dataset
data_cz_select <- data
### Save as a .txt file
### field separator is set to "\t"
write.table(data_cz_select, sep = "\t", row.names = TRUE,
file = normalizePath("./Data/MainData/CZ_selected.txt"))
### Save as an R data object
save(data_cz_select, file = normalizePath("./Data/MainData/CZ_selected.RData"))
### Clean the workspace
### (optional: uncomment to remove all objects from RStudio working memory)
# rm(list = ls())
### !!! <--- END OF SCRIPT ---> !!! ###
### Session info
# sessionInfo()
#
# R version 3.2.0 (2015-04-16)
# Platform: x86_64-pc-linux-gnu (64-bit)
# Running under: Ubuntu 14.04.2 LTS
#
# locale:
# [1] LC_CTYPE=pl_PL.UTF-8 LC_NUMERIC=C LC_TIME=pl_PL.UTF-8 LC_COLLATE=pl_PL.UTF-8
# [5] LC_MONETARY=pl_PL.UTF-8 LC_MESSAGES=pl_PL.UTF-8 LC_PAPER=pl_PL.UTF-8 LC_NAME=C
# [9] LC_ADDRESS=C LC_TELEPHONE=C LC_MEASUREMENT=pl_PL.UTF-8 LC_IDENTIFICATION=C
#
# attached base packages:
# [1] stats graphics grDevices utils datasets methods base
#
# loaded via a namespace (and not attached):
# [1] tools_3.2.0 |
78c4004c61e3752403da2162ff8175d12f0fda72 | b2f0d80d872a2a29b48d46beb6a23d4d9518e16a | /plot2.R | 5d50760f9a1b520b1d2d04a958d5baddb3646d80 | [] | no_license | minimenchmuncher/pm25Exploration | c190c68de5f47324350dfcc0748e96d4c4a800ac | 7915ebdd4b447eda3f55d818f918219069801a1f | refs/heads/master | 2021-01-10T06:42:47.465979 | 2016-03-12T20:41:43 | 2016-03-12T20:41:43 | 53,674,024 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 896 | r | plot2.R | # plot2.R #
library(dplyr)
# Universal Inputs #######
data_dir <- '~/Downloads/exdata_data_NEI_data/'
# Read data #####
if (!(exists('NEI'))) {
NEI <- readRDS(file.path(data_dir, 'summarySCC_PM25.rds'))
}
if (!(exists('SCC'))) {
SCC <- readRDS(file.path(data_dir, 'Source_Classification_Code.rds'))
}
# Question 2 ######
# Have total emissions from PM2.5 decreased in the Baltimore City, Maryland (fips == "24510") from 1999 to 2008? Use the base plotting system to make a plot answering this question.
NEI_summary <- NEI %>%
filter(fips == 24510) %>%
group_by(year) %>%
summarise(Emissions = sum(Emissions))
plot(Emissions ~ year, data = NEI_summary, type = 'l', ylab = 'PM 2.5 Emissions (tons)', xlab = 'Year', main = 'Baltimore Emissions Over Time')
dev.copy(png,'plot2.png')
dev.off()
# it looks like emissions have declined from about 3,300 tons in 1999 to 1,900 tons in 2008. |
65669568e010de8d6bc17d824aebb1e288ed251e | 06b9d2ece554bda6b4402785bc9c7b7a627a6c2f | /R/compareModels.ParamEsts.R | df14657cb51e6db79cf9f54e214809dac53ec4b6 | [
"MIT"
] | permissive | wStockhausen/rTCSAM2015 | 4f2dd392b32d9a3ea9cce4703e25abde6440e349 | 7cfbe7fd5573486c6d5721264c9d4d6696830a31 | refs/heads/master | 2020-12-26T04:56:03.783011 | 2016-09-30T01:59:06 | 2016-09-30T01:59:06 | 26,103,387 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,174 | r | compareModels.ParamEsts.R | #'
#'@title Function to compare parameter values from different TCSAM2015 models.
#'
#'@description This function extracts and plots parameters values, together with their limits
#'(if any) and the posterior distributions implied by their estimated standard
#'errors from several TCSAM2015 models.
#'
#'@param tcsams - list of TCSAM2015 model results objects (each is a list with elements 'prsObj' and 'stdObj')
#'@param dp - percent difference between parameter value and upper/lower limits used to flag outliers
#'@param fac - number of std devs to extend uncertainty plots
#'@param nc - number of columns of plots per page
#'@param nr - number of rows of plots per page
#'@param showPlot - flag to show plots immediately
#'@param pdf - file name for printing plots to a pdf file (or NULL to print to screen)
#'@param verbose - flag (T/F) to print diagnostic info
#'
#'@return - list with dfr, vfr, and plots as elements
#'
#'@export
#'
compareModels.ParamEsts<-function(tcsams,dp=0.01,fac=2,
nc=3,nr=4,showPlot=TRUE,
pdf="ModelComparisons.Params.pdf",
verbose=FALSE){
#extract dataframe with parameter estimates and info
if (verbose) cat('Extracting params info\n')
res<-extractModelResults.Params(tcsams,dp=dp,verbose=verbose);
# #extract dataframe with parameter uncertainty info
# if (verbose) cat("Extracting uncertainty info\n")
# vfr<-extractModelResults.StdDevs(tcsams,fac=fac,verbose=verbose);
#plot parameters as scalar values
if (verbose) cat("Plotting parameter results\n")
plots<-plotModelResults.ScalarParams(dfr=res$prsDFR,
vfr=res$stdDFR,
nc=nc,nr=nr,
showPlot=showPlot,
pdf=pdf,
verbose=verbose);
return(invisible(list(dfr=res$prsDFR,vfr=res$stdDFR,plots=plots)))
}
# resPar<-compareModels.ParamEsts(resLst,dp=0.01,fac=3,
# nc=3,nr=5,showPlot=TRUE)
|
3a5b2318070938678448de3a56bd4348b47e8e6e | b844fc764deff4c305d5a5499f78266f2ec817e9 | /man/confint.mylm.Rd | fe7c44e688b2dd8d519939d4a02cd60dbc0a9413 | [] | no_license | jenper/mylm | 0aa7c1e7498a35dc9225e26dc2f75f35ab5a808a | 785316d117b822c2edf9b63ff5d1c1e7f7902c22 | refs/heads/main | 2023-08-03T10:49:26.281973 | 2021-07-22T01:41:54 | 2021-07-22T01:41:54 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 649 | rd | confint.mylm.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/funcs.R
\name{confint.mylm}
\alias{confint.mylm}
\title{Confidence intervals for parameters}
\usage{
\method{confint}{mylm}(object, parm = NULL, level = 0.95, ...)
}
\arguments{
\item{object}{object of class "mylm"}
\item{parm}{A specification of which parameters are to be given confidence intervals, either a vector of numbers or a vector of names. If missing, all parameters are considered.}
\item{level}{The confidence level required (default = 0.95).}
\item{...}{additional arguments to be passed to methods}
}
\description{
Confidence intervals for parameters
}
|
22cd20e8b6d5f60f750a0d0f28454200928d4fe4 | f064ecae355e2eada3c7438247ba784129f8f028 | /datasciencecoursera/regression/galton.R | 9b466d947bb4256534bb76724df2d3f8a21b3448 | [
"CC0-1.0"
] | permissive | ArnulfoPerez/R | c0a1a43680dd486bd9d0f8379ae05057efcbc235 | 70fb8a57b19d86eceb7a4d5f75aec712bfebfff5 | refs/heads/master | 2022-11-06T16:00:57.404891 | 2020-06-28T07:04:19 | 2020-06-28T07:04:19 | 260,382,530 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 225 | r | galton.R | library(UsingR)
library(ggplot2)
data(galton)
library(reshape)
long <- melt(galton)
g <- ggplot(long,aes(x = value, fill = variable))
g <- g + geom_histogram(colour = "black", binwidth = 1)
g <- g + facet_grid(. ~ variable)
g |
fb73ed6e86e951abedea6269185d67e853ebc154 | 9c816995a8ec8a7596d1c5889bae0d061a797e18 | /man/plotFunctions.Rd | fc1e9a3e7ea105ce19bb899ef0a215251a9d598f | [
"MIT"
] | permissive | wuaipinglab/sitePath | 0ae7da9011bc7f7ad72f7ea35467765aa4b941a0 | e8e9d188413225a29fb6cb08948dacb4e75f6900 | refs/heads/master | 2022-10-03T09:00:43.991187 | 2022-09-26T07:48:32 | 2022-09-26T07:48:32 | 147,183,108 | 16 | 2 | MIT | 2021-07-22T03:06:47 | 2018-09-03T09:35:04 | R | UTF-8 | R | false | true | 3,319 | rd | plotFunctions.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plotFunctions.R
\name{plot.phyMSAmatched}
\alias{plot.phyMSAmatched}
\alias{plot.lineagePath}
\alias{plot.parallelSites}
\alias{plot.fixationSites}
\alias{plot.sitePath}
\alias{plot.fixationIndels}
\alias{plot.fixationPath}
\title{Visualize the results}
\usage{
\method{plot}{phyMSAmatched}(x, y = TRUE, ...)
\method{plot}{lineagePath}(x, y = TRUE, showTips = FALSE, ...)
\method{plot}{parallelSites}(x, y = TRUE, ...)
\method{plot}{fixationSites}(x, y = TRUE, tipsGrouping = NULL, ...)
\method{plot}{sitePath}(x, y = NULL, select = NULL, showTips = FALSE, ...)
\method{plot}{fixationIndels}(x, y = TRUE, ...)
\method{plot}{fixationPath}(x, y = TRUE, ...)
}
\arguments{
\item{x}{The object to plot.}
\item{y}{Whether to show the fixation mutation between clusters. For
\code{lineagePath} object and \code{sitePath} object, it is deprecated and
no longer have effect since 1.5.4.}
\item{...}{Other arguments. Since 1.5.4, the function uses
\code{\link{ggtree}} as the base function to make plots so the arguments in
\code{plot.phylo} will no longer work.}
\item{showTips}{Whether to plot the tip labels. The default is \code{FALSE}.}
\item{tipsGrouping}{A \code{list} to hold the grouping of tips for how the
tree will be colored.}
\item{select}{For a \code{sitePath} object, it can have result on more than
one evolution pathway. This is to select which path to plot. The default is
\code{NULL} which will plot all the paths. It is the same as \code{select}
in \code{\link{plotSingleSite}}.}
}
\value{
A ggplot object to make the plot.
}
\description{
The plot function to visualize the return of functions in the
package. The underlying function applies \code{\link{ggplot2}}. The
function name \code{plot} is used to keep the compatibility with previous
versions, but they do not behave like the generic \code{\link{plot}}
function since 1.5.4.
A \code{\link{phyMSAmatched}} object will be plotted as a tree
diagram.
A \code{\link{lineagePath}} object will be plotted as a tree
diagram and paths are black solid line while the trimmed nodes and tips
will use gray dashed line.
A \code{\link{parallelSites}} object will be plotted as original
phylogenetic tree marked with parallel mutations attached as dot plot.
A \code{\link{fixationSites}} object will be plotted as original
phylogenetic tree marked with fixation substitutions.
A \code{sitePath} object can be extracted by using
\code{\link{extractSite}} on the return of \code{\link{fixationSites}}.
A \code{\link{fixationIndels}} object will be plotted as
original phylogenetic tree marked with indel fixation.
A \code{\link{fixationPath}} object will be plotted as a
\code{phylo} object. The tips are clustered according to the fixation
sites. The transition of fixation sites will be plotted as a phylogenetic
tree. The length of each branch represents the number of fixation mutation
between two clusters.
}
\examples{
data(zikv_tree)
data(zikv_align)
tree <- addMSA(zikv_tree, alignment = zikv_align)
plot(tree)
paths <- lineagePath(tree)
plot(paths)
parallel <- parallelSites(paths)
plot(parallel)
fixations <- fixationSites(paths)
plot(fixations)
sp <- extractSite(fixations, 139)
plot(sp)
x <- fixationPath(fixations)
plot(x)
}
|
813f5d1840335f910aaf099d65f0e1a3861dfae0 | a47ce30f5112b01d5ab3e790a1b51c910f3cf1c3 | /B_analysts_sources_github/maelle/cooldissviz/viz.R | 811d3bb254698f9c02a11f83201c48d95fbc7857 | [] | no_license | Irbis3/crantasticScrapper | 6b6d7596344115343cfd934d3902b85fbfdd7295 | 7ec91721565ae7c9e2d0e098598ed86e29375567 | refs/heads/master | 2020-03-09T04:03:51.955742 | 2018-04-16T09:41:39 | 2018-04-16T09:41:39 | 128,578,890 | 5 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,322 | r | viz.R | library("ggplot2")
library("dplyr")
library("emojifont")
library("gganimate")
list.emojifonts()
library("lubridate")
date1 <- ymd("2012-10-08")
date2 <- ymd("2016-03-07")
difftime(date2, date1, units = "days")
load.emojifont('OpenSansEmoji.ttf')
data <- readr::read_csv2("gestation.csv") %>%
arrange(gestation) %>%
mutate(animal = factor(animal,
levels = animal[order(gestation,
decreasing = TRUE)],
ordered = TRUE))
p <- ggplot(data) +
geom_bar(aes(x = animal,
y = gestation,
frame = gestation,
cumulative = TRUE,
fill = color),
stat = "identity") +
scale_fill_manual(values = c("grey30",
"darkgoldenrod1")) +
geom_text(aes(x = animal,
y = gestation + 45,
frame = gestation,
cumulative = TRUE,
label = emoji(label)),
family="OpenSansEmoji", size=8) +
theme(axis.text.y=element_blank(),
axis.ticks=element_blank(),
text = element_text(size=20),
legend.position="none")+
coord_flip() +
xlab("Animal") +
ylab("Gestation in days")
gg_animate(p, "gestation.gif",
interval = c(rep(1,11), 4)) |
825b16f8bb418d65abb4d2037fe0b9303a706da2 | 4d969bc86afed6ede3596878ad5a7ecbcf5b3b5f | /files/JAM.R | 74503235d4c9b4b1f54286009ae9270de7346add | [] | no_license | GeneticResources/FM-pipeline | 9f3b2006fe1b0b782fb7cde370622029c05af654 | cf876c6f497a2eb1dffbcdb57eb9a4fbaf71509e | refs/heads/master | 2020-04-09T06:40:06.444431 | 2018-10-26T20:24:31 | 2018-10-26T20:24:31 | 160,122,171 | 1 | 0 | null | 2018-12-03T02:39:18 | 2018-12-03T02:39:18 | null | UTF-8 | R | false | false | 3,313 | r | JAM.R | # 9-2-2018 MRC-Epid JHZ
require(plink2R)
# require(snpStats)
require(R2BGLiMS)
require(methods)
require(openxlsx)
options(scipen=20, width=2000)
f <- Sys.getenv("f")
cat(f,"\n")
bed <- paste0(f,".bed")
bim <- paste0(f,".bim")
fam <- paste0(f,".fam")
# summary statistics
sumstats.name <- c("RS_ID","A1","A2","freqA1","b","se","P","N","chr","pos","SNP_ID")
sumstats <- read.table(paste0(f,".dat"), as.is=TRUE, col.names=sumstats.name)
beta <- with(sumstats, b)
rsid <- with(sumstats, RS_ID)
snpid <- with(sumstats, SNP_ID)
# reference panel with mean substitution for (small) proportion of missing data
p <- read_plink(f)
R <- with(p, as.data.frame(2-bed))
# p <- read.plink(bed,bim,fam)
# R <- as(with(p,genotypes),"numeric")
R[] <- lapply(R, function(x) {
x[is.na(x)] <- mean(x, na.rm = TRUE)
x
})
X.ref <- R
# JAM modeling
ssnpid <- paste0("snp", 1:length(beta))
names(beta) <- colnames(X.ref) <- ssnpid
priors <- list("a"=1, "b"=length(beta), "Variables"=ssnpid)
n <- 15234
j <- JAM(marginal.betas=beta, n=n, X.ref=X.ref, n.mil=5, tau=n, full.mcmc.sampling = FALSE, model.space.priors=priors)
save(j,file=paste0(f,".j"))
pst <- slot(j, "posterior.summary.table")
tm <- TopModels(j)
ssr <- data.frame(ssnpid=ssnpid, snpid=snpid, rsid=rsid)
cs <- CredibleSet(j, credible.percentile.threshold=0.75)
msbf <- ModelSizeBayesFactors(j)[[1]]
sink(paste0(f, ".jam"))
pst
ssr
cat("\nCredible set\n")
cs
cat("\nModel size Bayes Factors\n")
msbf
sink()
sink(paste0(f, ".top"))
tm
sink()
n.col <- ncol(tm)
n.snps <- n.col-1
post.prob <- tm[,n.col]
n.sel <- apply(tm[,1:n.snps],1,sum)
sink(paste0(f,".sum"))
cbind(n.sel,post.prob)
sink()
sink(paste0(f,".cs"))
cbind(subset(ssr,ssnpid%in%cs),subset(pst,rownames(pst)%in%cs))
sink()
if(identical(cs,character(0))) unlink(paste0(f,".cs"))
tm1 <- tm[1,-n.col]
selected <- names(tm1[tm1==1])
if(n.sel[1]>0&n.sel[1]!=n.snps)
{
PostProb_model <- rep(post.prob[1],n.sel[1])
t <- cbind(subset(ssr,ssnpid%in%selected), PostProb_model, subset(pst,rownames(pst)%in%selected))
write.table(t,paste0(f,".sel"),row.names=FALSE,quote=FALSE)
}
png(paste0(f,".png"), units = 'in', width=18, height=12, res=300)
ManhattanPlot(j)
dev.off()
xlsx <- paste0(f,".xlsx")
wb <- createWorkbook(xlsx)
addWorksheet(wb, "ID")
writeDataTable(wb, "ID", ssr)
addWorksheet(wb, "TopModels")
writeDataTable(wb, "TopModels", as.data.frame(tm))
addWorksheet(wb, "Model.1")
PostProb_model <- rep(post.prob[1],n.sel[1])
writeDataTable(wb, "Model.1", cbind(subset(ssr,ssnpid%in%selected),PostProb_model,subset(pst,rownames(pst)%in%selected)))
addWorksheet(wb, "CredibleSet")
writeDataTable(wb, "CredibleSet", cbind(subset(ssr,ssnpid%in%cs),subset(pst,rownames(pst)%in%cs)))
addWorksheet(wb, "ModelSizeBayesFactors")
writeDataTable(wb, "ModelSizeBayesFactors", as.data.frame(msbf))
addWorksheet(wb, "posterior.summary.table")
writeDataTable(wb, "posterior.summary.table", cbind(ID=rownames(pst), as.data.frame(pst)))
addWorksheet(wb, "Manhattan.plot")
insertImage(wb, "Manhattan.plot", paste0(f, ".png"), width=18, height=12)
saveWorkbook(wb, file=xlsx, overwrite=TRUE)
# obsolete as it only deals with complete data
# cc <- complete.cases(t(R))
# beta <- beta[cc]
# X.ref <- R[,cc]
# ssnpid <- paste0("snp", 1:length(beta[cc]))
# ssr <- data.frame(ssnpid=ssnpid, snpid=snpid[cc], rsid=rsid[cc])
|
2535957b8090e49df29172d2736513f571de0832 | 3eece832206dca8130fdd5b1bced87fb752f3aef | /code/feat/5-gen-factor.R | 656dba48bc2a2d093ddc0dab0289b78ef97fa6ef | [] | no_license | salayatana66/Kaggle-BNP-2016 | 2457563bf35e70a4b907255cfa3ff04f93889a4c | 50bf9d37f0b4c275cce1573ddedb3eae3acd94dc | refs/heads/master | 2021-01-10T03:55:42.277959 | 2016-03-20T15:37:19 | 2016-03-20T15:37:19 | 54,325,396 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,352 | r | 5-gen-factor.R | ##########################################################################
# Transform and generate new factor variables
# It makes sense to impact some factors, but impacting v22 leads
# to overfitting; thus replace other factors by frequencies in the data
# (both relative and a factor version of them)
##########################################################################
library(bit64)
library(data.table) # more efficient
setwd('/Users/schioand/leave_academia/kaggle/bnp-paribas/code/feat')
source('../param_config.R')
source('../utils.R')
Alldf <- fread(paste(ParamConfig$feat_dir, "all-factor-raw-16-2-21.csv", sep = ''))
load(paste(ParamConfig$output_dir, 'raw-summaries-16-2-19.RData', sep = ''))
# extrapolate letters from columns with strings of length > 1
extrapolate_char(Alldf, col = which(names(Alldf) %in% c('v22', 'v56', 'v113', 'v125')))
# add # of each letter A--Z across factors
letters_across(Alldf, fcol = c(3:ncol(Alldf)))
# For letter factors with more that 10 levels, EXCLUDING v22, create posteriors
# For numerical factors it is not needed
# Informally we call this technique 'impacting'
to.impact <- setdiff(names(which(all_summary$`fac-levels` > 10)), 'v22')
create_impacted(Alldf, tcol = 2, fcol = which(names(Alldf) %in% to.impact)) #!! some levels have not been
# shared between training and testing
# numerical columns to go to factors
num.to.fac <- c('v38', 'v62', 'v72', 'v129')
# keep a numeric copy
Alldf[, paste('Num_', num.to.fac, sep = '') := .SD, .SDcols = num.to.fac]
Alldf.coltype <- Alldf[, sapply(.SD, class), .SDcols = c(3:ncol(Alldf))]
# convert to factors
tofac <- c(which(colnames(Alldf) %in% num.to.fac),
which(colnames(Alldf) %in% names(which(Alldf.coltype == 'character'))))
Alldf[, c(tofac) := lapply(.SD, factor), .SDcols = tofac]
to.rfreqs <- names(which(all_summary$`fac-levels` >= 25))
factor_to_freqs(Alldf, cols = to.rfreqs, buckets = rep(25, length(to.rfreqs)))
Alldf[, c(to.rfreqs) := NULL]
write.table(Alldf, paste(ParamConfig$feat_dir, "all-factor-genfea-16-2-29.csv",
sep = ''),
sep = ",", row.names=FALSE, quote=FALSE)
cat("File size (MB):", round(file.info(paste(ParamConfig$feat_dir, "all-factor-genfea-16-2-29.csv",
sep = ''))$size/1024^2),"\n")
|
86e2124bc6d83d13ca3fe572747912dcd8ccaf69 | 4769290ffe5f0597ce8ef190bb86494781cc1127 | /409_63583_cf_WNS_Analytics_Hackathon_code.R | 1ba609f550177e7570ce93673f801c7d8c2ce4a4 | [] | no_license | rohanpaul93/backup | 351dc8bdc29059b742fe806d66918b97459aa992 | bb0276b073b561820f5414da3ea21964bd3903aa | refs/heads/master | 2020-04-04T09:27:15.598485 | 2018-11-02T05:42:32 | 2018-11-02T05:42:32 | 155,818,476 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 28,593 | r | 409_63583_cf_WNS_Analytics_Hackathon_code.R | # Clear environment
rm(list = ls())
# Load essential libraries
library(dplyr)
library(tidyr)
library(lubridate)
library(ggplot2)
library(ggthemes)
library(caret)
library(gridExtra)
library(corrplot)
library(h2o)
library(caTools)
library(xgboost)
#install.packages('ggthemes')
# Load train and test files
list.files()
train_data <- read.csv("train_LZdllcl.csv", stringsAsFactors = F)
test_data <- read.csv("test_2umaH9m.csv", stringsAsFactors = F)
# List general attributes of the data
dim(train_data) # 54808 rows with 14 cols
dim(test_data) # 23490 rows with 13 cols
str(train_data)
# Convert employee id and is_promoted to factor type in both train and test
train_data$employee_id <- as.factor(train_data$employee_id)
test_data$employee_id <- as.factor(test_data$employee_id)
train_data$is_promoted <- as.factor(ifelse(train_data$is_promoted == 0, "No", "Yes"))
# Check missing values column wise in train and test
sapply(train_data, function(x) sum(is.na(x))) # 4124 missing values for previous rating
sapply(test_data, function(x) sum(is.na(x))) # 1812 missing values for previous rating
# Character variables may instead have Blanks. Check if any character variables have blanks
sapply(train_data, function(x) sum(x == "")) # 2409 Blank values in education
sapply(test_data, function(x) sum(x == "")) # 1034 Blank values in education
# Looking at the data it is not possible to make any reasonable assumption for the missing
# education values. Hence we will treate the Blanks as a separate category
# Convert character type vars to factors
character_variables <- as.integer(which(sapply(train_data, function(x) is.character(x))))
train_data_factor <- as.data.frame(sapply(train_data[,character_variables], function(x) as.factor(x)))
train_data[,character_variables] <- train_data_factor
character_variables_test <- as.integer(which(sapply(test_data, function(x) is.character(x))))
test_data_factor <- as.data.frame(sapply(test_data[,character_variables], function(x) as.factor(x)))
test_data[,character_variables_test] <- test_data_factor
rm(train_data_factor, test_data_factor)
# Check a summary of data
summary(train_data) # At a glance there aren't any unusual values
summary(test_data)
# Check distribution of target
prop.table(table(train_data$is_promoted))
# 91% people were not promoted compared to only 9% promoted. So dataset is skewed.
# Missing values imputation
# 1) Impute missing values in previous_year_rating. Missing values in previous_year_rating may
# occur if the employee hasn't had a rating yet. Let us check if this is true for all cases
unique(train_data$length_of_service[is.na(train_data$previous_year_rating)]) # 1.
# Indeed our suspicion is correct. So in this case we can't just impute a random rating.
# Instead let's convert previous_year_rating to a categorical variable and convert the NA's to a
# new category
train_data$previous_year_rating <- paste("Rating", train_data$previous_year_rating, sep = "_")
train_data$previous_year_rating <- as.factor(train_data$previous_year_rating)
test_data$previous_year_rating <- paste("Rating", test_data$previous_year_rating, sep = "_")
test_data$previous_year_rating <- as.factor(test_data$previous_year_rating)
# 2) Similarly create new category for Blanks in education
train_data$education <- as.character(train_data$education)
test_data$education <- as.character(test_data$education)
train_data$education[train_data$education == "" | train_data$education == " "] = "Unknown"
test_data$education[test_data$education == "" | test_data$education == " "] = "Unknown"
train_data$education <- as.factor(train_data$education)
test_data$education <- as.factor(test_data$education)
# EDA - Data Visualization #
# 1) For categorical variables
# Create a function which outputs two plots. Count of the target variable categories and
# percentage of the target variable in each category
Plotter_Categorical <- function(data, source_var, target_var){
p1 <- ggplot(data, aes(x = data[,c(source_var)], fill = data[,c(target_var)])) + geom_bar() +
scale_fill_tableau() + theme_solarized() + theme(axis.text.x = element_text(angle = 90)) +
geom_text(stat = "count", aes(label = ..count..), vjust = -0.1, position = "nudge") +
labs(x = source_var, y = target_var) + theme(legend.title = element_blank())
p2 <- ggplot(data, aes(x = data[,c(source_var)], fill = data[,c(target_var)])) + geom_bar(position = "fill") +
scale_fill_tableau() + theme_solarized() + theme(axis.text.x = element_text(angle = 90)) +
labs(x = source_var, y = target_var) + theme(legend.title = element_blank())
x11()
grid.arrange(p1, p2)
}
# a) For department
Plotter_Categorical(train_data, "department", "is_promoted")
# Sales and Marketing is the most common department. There does not seem to be an appreciable
# differnce between the classes as far as the response is concerned however. Unlikely to be an
# important factor.
# b) For region
Plotter_Categorical(train_data, "region", "is_promoted")
# Region has too many unique categories. Let's see the percentage distribution
# of promotions in each
round(prop.table(table(train_data$region, train_data$is_promoted),1),2)
# Variation between 3 to 4%. Not worth keeping this variable for analysis. So we
# will drop region
train_data <- train_data %>% dplyr::select(-region)
test_data <- test_data %>% dplyr::select(-region)
# c) For education
Plotter_Categorical(train_data, "education", "is_promoted")
# The unknown category seems to have lowest percentage of promotions.
# d) For gender
Plotter_Categorical(train_data, "gender", "is_promoted")
# Almost equal percentage of males and females get promoted (indicating no Gender bias)
# e) For recruitment_channel
Plotter_Categorical(train_data, "recruitment_channel", "is_promoted")
# Referred people are fewest. However they seem to get promoted more as compared to others.
# f) For previous_year_rating
Plotter_Categorical(train_data, "previous_year_rating", "is_promoted")
# As expected there is a steady and observable increase in no. of promotions with rise in Ratings
# g) KPIs _met_80- Let's convert this to a factor variable and change labels to "Yes/No"
train_data$KPIs_met..80. <- ifelse(train_data$KPIs_met..80. == 0, "No", "Yes")
test_data$KPIs_met..80. <- ifelse(test_data$KPIs_met..80. == 0, "No", "Yes")
Plotter_Categorical(train_data, "KPIs_met..80.", "is_promoted")
# Very Important variable. The %age of people getting promotions increases by almost 5 times if they meet
# the KPI > 80 criteria
# 2) For numeric variables
Plotter_Numeric <- function(data, source_var, target_var){
p1 <- ggplot(data, aes(x = data[,c(source_var)], fill = data[,c(target_var)])) +
geom_histogram(aes(y = ..density..),position = "dodge", col = "black", bins = 30) +
theme_gdocs() + scale_fill_tableau(name = target_var) + geom_density(alpha = 0.3) +
labs(x = source_var, y = "density")
p2 <- ggplot(train_data, aes(x = data[,c(target_var)], y = data[,c(source_var)], fill = data[,c(target_var)])) +
geom_boxplot() + theme_gdocs() + scale_fill_tableau(name = target_var) +
labs(x = target_var, y = source_var)
x11()
grid.arrange(p1, p2)
}
# a) For no_of_trainings
Plotter_Numeric(train_data, "no_of_trainings", "is_promoted")
# Clearly indicates that an overwhelming majority of employees havve
# only undergone 1 or 2 trainings and it doesnt seem to have much bearing on promotions
# b) For age
Plotter_Numeric(train_data, "age", "is_promoted")
# Histogram and density plots indicate Age is normally distributed and does
# not seem to have much influence on being promoted or not
# c) For length_of_service
Plotter_Numeric(train_data, "length_of_service", "is_promoted")
# length_of_service falls of sharply below 10 years but is not an influential variable
# d) For awards_won
Plotter_Numeric(train_data, "awards_won.", "is_promoted")
# There are only 2 values of awrds_won, 0 and 1. Clearly people winning awards
# are much more likely to be promoted. So convert awards_won to a categorical var.
train_data$awards_won. = ifelse(train_data$awards_won. == 1, "Awards_won", "No_awards")
test_data$awards_won. = ifelse(test_data$awards_won. == 1, "Awards_won", "No_awards")
Plotter_Categorical(train_data, "awards_won.", "is_promoted")
# e) For avg_training_score
Plotter_Numeric(train_data, "avg_training_score", "is_promoted")
# Two observations can be made-
# 1) 25th percentile score of people getting promoted is at least 60
# 2) Training scores above 80 are much more likely to see people promoted.
# It would be worthwhile to perform a bivariate analysis of avg_training_score
# against other variables
# avg_training_score vs is_promoted vs KPI's met
ggplot(train_data, aes(x = avg_training_score, fill = is_promoted)) +
geom_histogram(aes(y = ..density..),position = "dodge", col = "black", bins = 30) +
theme_economist() + scale_fill_tableau() + geom_density(alpha = 0.3) +
facet_wrap(~KPIs_met..80.)
# avg_training_score vs is_promoted vs awards_won
ggplot(train_data, aes(x = avg_training_score, fill = is_promoted)) +
geom_histogram(aes(y = ..density..),position = "dodge", col = "black", bins = 30) +
theme_economist() + scale_fill_tableau() + geom_density(alpha = 0.3) +
facet_wrap(~awards_won.)
# avg_training_score vs is_promoted vs previous_year_rating
ggplot(train_data, aes(x = avg_training_score, fill = is_promoted)) +
geom_histogram(aes(y = ..density..),position = "dodge", col = "black", bins = 30) +
theme_economist() + scale_fill_tableau() + geom_density(alpha = 0.3) +
facet_wrap(~previous_year_rating)
# Check if any of the numeric variables have very high correlation
cor_matrix <- cor(train_data[,c(6,7,9,12)])
str(train_data)
corrplot(cor_matrix, method = "number", type = "upper", bg = "lightgreen")
# Age and length_of_service have quite high correlation as expected. Also as
# we had seen earlier both variables have seemingly little influence on promotions
# So we will elect to drop length_of_service
train_data <- train_data %>% select(-length_of_service)
test_data <- test_data %>% select(-length_of_service)
# EDA completed #
# Check structure again
str(train_data)
str(test_data)
# Convert KPI's and awards won to factor vars
train_data$KPIs_met..80. = as.factor(train_data$KPIs_met..80.)
train_data$awards_won. = as.factor(train_data$awards_won.)
test_data$KPIs_met..80. = as.factor(test_data$KPIs_met..80.)
test_data$awards_won. = as.factor(test_data$awards_won.)
# Dummy variable creation- For algorithms needing it such as logistic regression
is_promoted <- train_data$is_promoted
combined_data <- rbind(train_data[,-ncol(train_data)], test_data)
combined_data_with_dummies <- combined_data
# Dummy for department
dummy <- as.data.frame(model.matrix(~department, data = combined_data_with_dummies))
combined_data_with_dummies <- cbind(combined_data_with_dummies[,-2], dummy[,-1])
# For education
dummy <- as.data.frame(model.matrix(~education, data = combined_data_with_dummies))
combined_data_with_dummies <- cbind(combined_data_with_dummies[,-2], dummy[,-1])
# For gender
combined_data_with_dummies$gender <- ifelse(combined_data_with_dummies$gender == "m",
1, 0)
combined_data_with_dummies$gender <- as.factor(combined_data_with_dummies$gender)
# For recruitment_channel
dummy <- as.data.frame(model.matrix(~recruitment_channel, data = combined_data_with_dummies))
combined_data_with_dummies <- cbind(combined_data_with_dummies[,-3], dummy[,-1])
# For previous_year_rating
dummy <- as.data.frame(model.matrix(~previous_year_rating, data = combined_data_with_dummies))
combined_data_with_dummies <- cbind(combined_data_with_dummies[,-5], dummy[,-1])
# Awards won and KPI met
combined_data_with_dummies$KPIs_met..80. <- ifelse(combined_data_with_dummies$KPIs_met..80. == "Yes",1,0)
combined_data_with_dummies$awards_won. <- ifelse(combined_data_with_dummies$awards_won. == "Awards_won",1,0)
combined_data_with_dummies$KPIs_met..80. <- as.factor(combined_data_with_dummies$KPIs_met..80.)
combined_data_with_dummies$awards_won. <- as.factor(combined_data_with_dummies$awards_won.)
str(combined_data_with_dummies)
sapply(combined_data_with_dummies, function(x) sum(is.na(x))) # No missing values
# Again separate into train and test
train_data_with_dummies <- cbind(combined_data_with_dummies[1:nrow(train_data),], is_promoted)
test_data_with_dummies <- combined_data_with_dummies[(nrow(train_data) + 1):nrow(combined_data_with_dummies),]
train_data_with_dummies$is_promoted <- ifelse(train_data_with_dummies$is_promoted == "Yes",1,0)
# Separate into train and validation
set.seed(123)
indices = sample.split(train_data_with_dummies$is_promoted, SplitRatio = 0.75)
train_data_with_dummies_2 = train_data_with_dummies[indices,]
validation_data_with_dummies = train_data_with_dummies[!(indices),]
#### Model Building ####
# 1) Try Logistic regression
h2o.init(nthreads = -1)
# Transfer data to cluster
train_data_with_dummies.h2o <- as.h2o(train_data_with_dummies_2)
validation_data_with_dummies.h2o <- as.h2o(validation_data_with_dummies)
test_data_with_dummies.h2o <- as.h2o(test_data_with_dummies)
#check column index number
colnames(train_data_with_dummies.h2o)
# Set dependent and independent vars
y.dep <- 26
x.indep <- 2:25
#### LR in H2O ####
lr.model <- h2o.glm(y = y.dep, x = x.indep, training_frame = train_data_with_dummies.h2o,
validation_frame = validation_data_with_dummies.h2o,
nfolds = 3, family = "binomial", seed = 123)
summary(lr.model)
h2o.varimp(lr.model)
# Predict on test data
validation_predictions <- as.data.frame(h2o.predict(lr.model, validation_data_with_dummies.h2o))
# Find optimal probability cutoff
validation_data_with_dummies$probability <- validation_predictions$p1
summary(validation_data_with_dummies$probability)
# Selecting cutoff values
cutoff_data <- data.frame(cutoff = 0, TP = 0, TN = 0, FP = 0,FN = 0)
cutoffs <- seq(0.0002834,0.9999993,length=200)
for(cutoff in cutoffs){
predicted <- as.numeric(validation_data_with_dummies$probability > cutoff)
TP = sum(predicted==1 & validation_data_with_dummies$is_promoted==1)
TN = sum(predicted==0 & validation_data_with_dummies$is_promoted==0)
FP = sum(predicted==1 & validation_data_with_dummies$is_promoted==0)
FN = sum(predicted==0 & validation_data_with_dummies$is_promoted==1)
cutoff_data <- rbind(cutoff_data, c(cutoff, TP, TN, FP, FN))
}
cutoff_data <- cutoff_data[-1,]
# calculate metrics
cutoff_data <- cutoff_data %>% mutate(P = TP+FN, N = TN+FP)
cutoff_data <- cutoff_data %>% mutate(Accuracy = (TP+TN)/(P+N),
Precision = TP/(TP+FP),
Recall = TP/(TP+FN))
cutoff_data <- cutoff_data %>% mutate(F1_score = 2*(Precision*Recall)/(Precision+Recall))
cutoff_max_F1 <- cutoff_data$cutoff[which.max(cutoff_data$F1_score)]
# Now predict on test data with entire train set
train_data_with_dummies_full.h2o <- as.h2o(train_data_with_dummies)
lr.model <- h2o.glm(y = y.dep, x = x.indep, training_frame = train_data_with_dummies_full.h2o,
nfolds = 3, family = "binomial", seed = 123)
predictions_glm <- as.data.frame(h2o.predict(lr.model, test_data_with_dummies.h2o))
# Submission 1- with the tested cutoff
submission_1 <- as.data.frame(cbind(as.integer(as.character(test_data$employee_id)), predictions_glm$p1))
colnames(submission_1) = c("employee_id","is_promoted")
submission_1$is_promoted <- ifelse(submission_1$is_promoted >= cutoff_max_F1,1,0)
write.csv(submission_1, "submission_1_Logistic_Regression_optimum_cutoff.csv", row.names = F)
# Submission 2- H2O predictions direct
submission_2 <- as.data.frame(cbind(as.integer(as.character(test_data$employee_id)), predictions_glm$predict))
colnames(submission_2) = c("employee_id","is_promoted")
write.csv(submission_2, "submission_2_Logistic_Regression_h2O_direct_predictions.csv", row.names = F)
# 2) Try Random Forest
train.h2o <- as.h2o(train_data)
test.h2o <- as.h2o(test_data)
colnames(train.h2o)
y.dep = 12
x.indep = 2:11
rf.model <- h2o.randomForest(y = y.dep, x = x.indep, training_frame = train.h2o,
nfolds = 3, ntrees = 500, seed = 123)
summary(rf.model)
h2o.varimp(rf.model)
# Predict on test data
predictions_rf <- as.data.frame(h2o.predict(rf.model, test.h2o))
# Submission 3- RF default
submission_3 <- as.data.frame(cbind(as.integer(as.character(test_data$employee_id)), as.character(predictions_rf$predict)))
colnames(submission_3) = c("employee_id","is_promoted")
submission_3$is_promoted <- ifelse(submission_3$is_promoted == "Yes",1,0)
write.csv(submission_3, "H:/Career Development/Analytics Vidhya/WNS Analytics/Submissions/submission_3_Random_Forest_default.csv", row.names = F)
# F1 score 0.49
# 3) Try Naive Bayes
nb.model <- h2o.naiveBayes(y = y.dep, x = x.indep, training_frame = train.h2o,
nfolds = 3, seed = 123)
summary(nb.model)
# Predict on test data
predictions_nb <- as.data.frame(h2o.predict(nb.model, test.h2o))
# Submission 4- NB default
submission_4 <- as.data.frame(cbind(as.integer(as.character(test_data$employee_id)), as.character(predictions_nb$predict)))
colnames(submission_4) = c("employee_id","is_promoted")
submission_4$is_promoted <- ifelse(submission_4$is_promoted == "Yes",1,0)
write.csv(submission_4, "H:/Career Development/Analytics Vidhya/WNS Analytics/Submissions/submission_4_Naive_Bayes.csv", row.names = F)
# 4) Try GBM with alpha = 0.1
gbm.model <- h2o.gbm(y = y.dep, x = x.indep, training_frame = train.h2o,
ntrees = 1000, learn_rate = 0.1, seed = 123)
summary(gbm.model)
# Predict on test data
predictions_gbm <- as.data.frame(h2o.predict(gbm.model, test.h2o))
# Submission 4- GBM with learn rate 0.1
submission_5 <- as.data.frame(cbind(as.integer(as.character(test_data$employee_id)), as.character(predictions_gbm$predict)))
colnames(submission_5) = c("employee_id","is_promoted")
submission_5$is_promoted <- ifelse(submission_5$is_promoted == "Yes",1,0)
write.csv(submission_5, "H:/Career Development/Analytics Vidhya/WNS Analytics/Submissions/submission_5_GBM_learn_rate_0.1.csv", row.names = F)
# Leaderbaord F1 score and CV score 0.51
#### Final Model Tuning ####
# Since the GBM model is giving best results we will try to tune it to further
# improve leaderbord rank
# Grid search H2O
# Split the data for tuning
splits <- h2o.splitFrame(
data = train.h2o,
ratios = c(0.6,0.2), ## only need to specify 2 fractions, the 3rd is implied
destination_frames = c("train.hex", "valid.hex", "test.hex"), seed = 1234
)
train <- splits[[1]]
valid <- splits[[2]]
test <- splits[[3]]
## Try different depths
hyper_params = list( max_depth = seq(1,29,2) )
#hyper_params = list( max_depth = c(4,6,8,12,16,20) ) ##faster for larger datasets
grid <- h2o.grid(
## hyper parameters
hyper_params = hyper_params,
## full Cartesian hyper-parameter search
search_criteria = list(strategy = "Cartesian"),
## which algorithm to run
algorithm="gbm",
## identifier for the grid, to later retrieve it
grid_id="depth_grid",
## standard model parameters
x = x.indep,
y = y.dep,
training_frame = train,
validation_frame = valid,
## more trees is better if the learning rate is small enough
## here, use "more than enough" trees - we have early stopping
ntrees = 10000,
## smaller learning rate is better
## since we have learning_rate_annealing, we can afford to start with a bigger learning rate
learn_rate = 0.05,
## learning rate annealing: learning_rate shrinks by 1% after every tree
## (use 1.00 to disable, but then lower the learning_rate)
learn_rate_annealing = 0.99,
## sample 80% of rows per tree
sample_rate = 0.8,
## sample 80% of columns per split
col_sample_rate = 0.8,
## fix a random number generator seed for reproducibility
seed = 1234,
## early stopping once the validation AUC doesn't improve by at least 0.01% for 5 consecutive scoring events
stopping_rounds = 5,
stopping_tolerance = 1e-4,
stopping_metric = "AUC",
## score every 10 trees to make early stopping reproducible (it depends on the scoring interval)
score_tree_interval = 10
)
## sort the grid models by decreasing AUC
sortedGrid <- h2o.getGrid("depth_grid", sort_by="auc", decreasing = TRUE)
sortedGrid
# Higher depths lead to less AUC as do lower depths. So for further optimization
# we will use only depths between 2 to 10
minDepth = 2
maxDepth = 10
# Final parameter tuning
hyper_params = list(
## restrict the search to the range of max_depth established above
max_depth = seq(minDepth,maxDepth,1),
## search a large space of row sampling rates per tree
sample_rate = seq(0.2,1,0.01),
## search a large space of column sampling rates per split
col_sample_rate = seq(0.2,1,0.01),
## search a large space of column sampling rates per tree
col_sample_rate_per_tree = seq(0.2,1,0.01),
## search a large space of how column sampling per split should change as a function of the depth of the split
col_sample_rate_change_per_level = seq(0.9,1.1,0.01),
## search a large space of the number of min rows in a terminal node
min_rows = 2^seq(0,log2(nrow(train.h2o))-1,1),
## search a large space of the number of bins for split-finding for continuous and integer columns
nbins = 2^seq(4,10,1),
## search a large space of the number of bins for split-finding for categorical columns
nbins_cats = 2^seq(4,12,1),
## search a few minimum required relative error improvement thresholds for a split to happen
min_split_improvement = c(0,1e-8,1e-6,1e-4),
## try all histogram types (QuantilesGlobal and RoundRobin are good for numeric columns with outliers)
histogram_type = c("UniformAdaptive","QuantilesGlobal","RoundRobin")
)
search_criteria = list(
## Random grid search
strategy = "RandomDiscrete",
## limit the runtime to 60 minutes
max_runtime_secs = 3600,
## build no more than 100 models
max_models = 100,
## random number generator seed to make sampling of parameter combinations reproducible
seed = 1234,
## early stopping once the leaderboard of the top 5 models is converged to 0.1% relative difference
stopping_rounds = 5,
stopping_metric = "AUC",
stopping_tolerance = 1e-3
)
grid <- h2o.grid(
## hyper parameters
hyper_params = hyper_params,
## hyper-parameter search configuration (see above)
search_criteria = search_criteria,
## which algorithm to run
algorithm = "gbm",
## identifier for the grid, to later retrieve it
grid_id = "final_grid",
## standard model parameters
x = x.indep,
y = y.dep,
training_frame = train,
validation_frame = valid,
## more trees is better if the learning rate is small enough
## use "more than enough" trees - we have early stopping
ntrees = 10000,
## smaller learning rate is better
## since we have learning_rate_annealing, we can afford to start with a bigger learning rate
learn_rate = 0.05,
## learning rate annealing: learning_rate shrinks by 1% after every tree
## (use 1.00 to disable, but then lower the learning_rate)
learn_rate_annealing = 0.99,
## early stopping based on timeout (no model should take more than 1 hour - modify as needed)
max_runtime_secs = 3600,
## early stopping once the validation AUC doesn't improve by at least 0.01% for 5 consecutive scoring events
stopping_rounds = 5, stopping_tolerance = 1e-4, stopping_metric = "AUC",
## score every 10 trees to make early stopping reproducible (it depends on the scoring interval)
score_tree_interval = 10,
## base random number generator seed for each model (automatically gets incremented internally for each model)
seed = 1234
)
## Sort the grid models by AUC
sortedGrid <- h2o.getGrid("final_grid", sort_by = "auc", decreasing = TRUE)
sortedGrid
# Get the best model by AUC
gbm.model <- h2o.getModel(sortedGrid@model_ids[[1]])
gbm.model@parameters
#### Build Final model on entire train data with these parameters ####
gbm_final_model <- h2o.gbm(y = y.dep, x = x.indep, training_frame = train.h2o,
ntrees = 10000, learn_rate = 0.05, learn_rate_annealing = 0.99,
max_depth = 7, distribution = "bernoulli", sample_rate = 0.57,
col_sample_rate = 0.92, col_sample_rate_change_per_level = 1.04,
min_split_improvement = 0, histogram_type = "QuantilesGlobal",
score_tree_interval = 10, nbins = 256, nbins_cats = 16, stopping_rounds = 5,
stopping_metric = "AUC", stopping_tolerance = 0.0001,
nfolds = 5, seed = 1234)
# Cross validation parameters
gbm_final_model@model$cross_validation_metrics
# With length_of_service
train_old <- read.csv("train.csv")
test_old <- read.csv("test.csv")
train_data <- cbind(train_data, train_old$length_of_service)
test_data <- cbind(test_data, test_old$length_of_service)
data.table::setnames(train_data, "train_old$length_of_service", "length_of_service")
data.table::setnames(test_data, "test_old$length_of_service", "length_of_service")
train.h2o <- as.h2o(train_data)
test.h2o <- as.h2o(test_data)
colnames(train.h2o)
y.dep <- 12
x.indep <- c(2:11,13)
gbm_final_model <- h2o.gbm(y = y.dep, x = x.indep, training_frame = train.h2o,
ntrees = 10000, learn_rate = 0.05, learn_rate_annealing = 0.99,
max_depth = 7, distribution = "bernoulli", sample_rate = 0.57,
col_sample_rate = 0.92, col_sample_rate_change_per_level = 1.04,
min_split_improvement = 0, histogram_type = "QuantilesGlobal",
score_tree_interval = 10, nbins = 256, nbins_cats = 16, stopping_rounds = 5,
stopping_metric = "AUC", stopping_tolerance = 0.0001,
nfolds = 5, seed = 1234)
# Cross validation parameters
gbm_final_model@model$cross_validation_metrics
# Interesting- f1 score actually improved
# Use this as predictions
predictions_gbm <- as.data.frame(h2o.predict(gbm_final_model, test.h2o))
# Submission 7- GBM with learn rate 0.1
submission_7 <- as.data.frame(cbind(as.integer(as.character(test_data$employee_id)), as.character(predictions_gbm$predict)))
colnames(submission_7) = c("employee_id","is_promoted")
submission_7$is_promoted <- ifelse(submission_7$is_promoted == "Yes",1,0)
write.csv(submission_7, "H:/Career Development/Analytics Vidhya/WNS Analytics/Submissions/submission_7_GBM_with_employment_length.csv", row.names = F)
# We will choose this model with length_of_service as final model
#### The End #### |
dde1028fbf22c665d3a483b69cd90c486edb9051 | 1d3ed002e7749a5c144481989886626bce627e1a | /R/worker_apply.R | f8ed817986f423d6e28512d6998bbb30bc78c5d4 | [
"Apache-2.0"
] | permissive | francoisjehl/sparkworker | 2f570b5221f3e106a8945772f97b56cfbedc30a9 | d01d3c0514151d3825ba28035dc10ee7c2a6d527 | refs/heads/master | 2020-12-30T16:28:21.033864 | 2017-05-09T14:24:27 | 2017-05-09T14:24:27 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 269 | r | worker_apply.R | spark_worker_apply <- function(sc) {
spark_context <- invoke_static(sc, "sparklyr.Backend", "getSparkContext")
log("sparklyr worker retrieved context")
spark_split <- invoke_static(sc, "sparklyr.WorkerRDD", "getSplit")
log("sparklyr worker retrieved split")
}
|
182b267f0342e98af31293822f3a9004f7c638ba | 436ace74a695893aad73229b723fac6be6814129 | /man/shapleyLinearGaussian.Rd | 7aa4796f4974fac62af8acc1d4ee02682e352d02 | [] | no_license | cran/sensitivity | 18657169c915857dcde8af872e0048fef77107f4 | 2b2cbcb7f1bebecfd05e589e459fdf4334df3af1 | refs/heads/master | 2023-04-06T05:36:54.290801 | 2023-03-19T18:10:02 | 2023-03-19T18:10:02 | 17,699,584 | 17 | 17 | null | 2021-04-07T00:57:30 | 2014-03-13T06:16:44 | R | UTF-8 | R | false | false | 2,691 | rd | shapleyLinearGaussian.Rd | \name{shapleyLinearGaussian}
\alias{shapleyLinearGaussian}
\title{Computation of the Shapley effects in the linear Gaussian framework}
\description{
\code{shapleyLinearGaussian} implements the computation of
the Shapley effects in the linear Gaussian framework, using the linear model
(without the value at zero) and the covariance matrix of the inputs.
It uses the block-diagonal covariance trick of Broto et al. (2019) which allows
to go through high-dimensional cases (nb of inputs > 25).
It gives a warning in case of dim(block) > 25.
}
\usage{
shapleyLinearGaussian(Beta, Sigma, tol=10^(-6))
}
\arguments{
\item{Beta}{a vector containing the coefficients of the linear model (without the value at zero).}
\item{Sigma}{covariance matrix of the inputs. Has to be positive semi-definite matrix with same size that Beta.}
\item{tol}{a relative tolerance to detect zero singular values of Sigma.}
}
\value{
\code{shapleyLinearGaussian} returns a numeric vector containing all the Shapley effects.
}
\references{
B. Broto, F. Bachoc, M. Depecker, and J-M. Martinez, 2019, \emph{Sensitivity indices
for independent groups of variables}, Mathematics and Computers in Simulation, 163, 19--31.
B. Broto, F. Bachoc, L. Clouvel and J-M Martinez, 2022,\emph{Block-diagonal
covariance estimation and application to the Shapley effects in sensitivity analysis},
SIAM/ASA Journal on Uncertainty Quantification, 10, 379--403.
B. Iooss and C. Prieur, 2019, \emph{Shapley effects for sensitivity analysis with
correlated inputs: comparisons with Sobol' indices, numerical estimation and
applications}, International Journal for Uncertainty Quantification, 9, 493--514.
A.B. Owen and C. Prieur, 2016, \emph{On Shapley value for measuring importance
of dependent inputs}, SIAM/ASA Journal of Uncertainty Quantification, 5, 986--1002.
}
\author{
Baptiste Broto
}
\seealso{
\link{shapleyBlockEstimation}, \link{shapleyPermEx}, \link{shapleyPermRand}, \link{shapleySubsetMc}
}
\examples{
library(MASS)
library(igraph)
# First example:
p=5 #dimension
A=matrix(rnorm(p^2),nrow=p,ncol=p)
Sigma=t(A)\%*\%A
Beta=runif(p)
Shapley=shapleyLinearGaussian(Beta,Sigma)
plot(Shapley)
# Second Example, block-diagonal:
K=5 #number of groups
m=5 # number of variables in each group
p=K*m
Sigma=matrix(0,ncol=p,nrow=p)
for(k in 1:K)
{
A=matrix(rnorm(m^2),nrow=m,ncol=m)
Sigma[(m*(k-1)+1):(m*k),(m*(k-1)+1):(m*k)]=t(A)\%*\%A
}
# we mix the variables:
samp=sample(1:p,p)
Sigma=Sigma[samp,samp]
Beta=runif(p)
Shapley=shapleyLinearGaussian(Beta,Sigma)
plot(Shapley)
} |
1acacf4d0f79eed2dcc124e6fa7a40387268767a | bf7ee5b92c94df1902cd73b4a1c0bc0149371c8e | /scripts/downloads/pub/van_groningen_2017/download_process.R | 423d74f7a60020a3be7240a80a01e62ba754011a | [] | no_license | JimmyVdEynden/DLG2 | d49dd01b619c68efafd1b368433af8775fe1ea1f | c557197257e21aec9c08e7aabfbf9e8669656fe4 | refs/heads/master | 2022-12-01T10:49:05.597263 | 2020-08-13T09:38:56 | 2020-08-13T09:38:56 | 244,052,914 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 527 | r | download_process.R | # Download markers from nature genetics paper
# wget https://media.nature.com/original/nature-assets/ng/journal/v49/n8/extref/ng.3899-S3.xlsx
# Process
# library(gdata)
# diff_marker<- read.xls("downloads/diff_markers/ng.3899-S3.xlsx")
diff_marker<- as.data.frame(readxl::read_xlsx("ng.3899-S3.xlsx"))
MES<- as.character(diff_marker[!is.na(diff_marker[,2])&diff_marker[,2]=="MES",1])
ADRN<- as.character(diff_marker[!is.na(diff_marker[,2])&diff_marker[,2]=="ADRN",1])
# Save
save(MES,ADRN,file = "vGron_diff_markers.RData")
|
c507763a33a22f1b0e2a2da6898e885ff08d878a | 9f3b8cd37ec1e63c51bfe1c2a5c1b5a955a10e34 | /Yidi_Wang_Final Project.R | 41e93556554b66687909d7485e85b08f2cd50972 | [] | no_license | IndyNYU/R-in-Finance | 3fd3cbd58bd9d4d80f6472bc6bdd1afeddc874f5 | fa1108909a3e555a2ae756af4fa47b956013ca26 | refs/heads/master | 2020-04-01T18:35:04.714059 | 2018-10-17T18:16:39 | 2018-10-17T18:16:39 | 153,501,789 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 9,435 | r | Yidi_Wang_Final Project.R | # FRE 6871 - Final Project by Yidi Wang
# 5/10/2018
# My project is divided by two Part.
# Part 1. Cover as much as possible what I have learned from the textbook and class.
# Part 2. Do machine learning prediction about the Kaggle Titanic Prediction.
# Part 1
# Although the data is quite simple, it is worthwhile to learn from it.
# THis part is organized as the following:
# Introduction and Goal Statement.
# 1. Set the enviroment.
# 2. Load the data.
# 3. Do data visualization and statistics analysis.
# 4. Do analysis of variance.
# 5. Time Series Analyis.
# Introduction
# After the whole semester working with R with the help of professor and classmates,
# I think I make a greate progess in this field.
# Honestly speaking, I really enjoy data analysis with the help of R.
# I choose to work with the classic "iris" dataset to do linear regression and anova analysis.
# 1. Set the working enviroment and load the data.
# 1.1 Set the working drectory to the disp D and the final project file.
# The setting of working enviroment is very important.
# 1.2 Set efficient digis equal to 7, which is quite reasonable.
rm(list=ls())
setwd('D:/R/Final Project/')
options(digits=7, scipen=0)
opar <- par(no.readonly=TRUE)
# 2. Load the data.
# 2.1 Have an overview of the data.
data("iris")
View(iris)
str(iris)
summary(iris)
# This is a dataset about three kinds of flowers.
# With the data of the length and width of the sepal, length and width of the petal.
# 2.2 Check if there are any missing values.
# Here I want to state the importance of working with the misssing values.
sum(is.na(iris))
# It shows there aren't any missing values.
# I prefer to work with complete dataset, which is easy to work with.
# Most statistical methods assume that the input data are complete and don't include missing values.
# But in reality, there are so many missing data for different reasons.
# There are two popular methods about dealing with the missing data.
# Either delete the missing data or substitute it.
# 3. Do data visualization and statistics analysis.
# 3.1 Work with graphs.
attach(iris)
plot(Sepal.Width,Sepal.Length)
plot(Petal.Width,Petal.Length)
detach(iris)
# According to this two plots
# I conclude that there may exist a positive relationship between the Petal.Length and Petal Width.
# 3.2 Work with scatter plots and line plots.
attach(iris)
plot(Petal.Width,Petal.Length,pch=21)
abline(lm(Petal.Length~Petal.Width),lty=5,col="red")
title("Regression of Petal Length on Petal Width.")
detach(iris)
# 3.3 Combining graphs.
# In order to have a better overview of the dataset, get a combing graphs analysis.
attach(iris)
par(mfrow = c(2,2))
hist(Petal.Length, main = "Histogram of Petal.Length")
boxplot(Petal.Width, main = "Boxplot of Petal.Width")
plot(Petal.Width, Petal.Length, pch = 21)
hist(Sepal.Length, main = "Histogram of Sepal.Length")
detach(iris)
# 3.4 Data Analysis of each species of the flower.
setosa <- subset(iris, iris$Species == "setosa")
versicolor <- subset(iris, iris$Species == "versicolor")
virginica <- subset(iris, iris$Species == "virginica")
# 3.5 Plot the relationship between Petal.Length and Petal.Width.
par(mfrow = c(1,1))
attach(setosa)
plot(Petal.Width,Petal.Length,pch=21)
abline(lm(Petal.Length~Petal.Width),lty=5,col="red")
title("Regression of Petal Length on Petal Width for setosa.")
detach(setosa)
attach(versicolor)
plot(Petal.Width,Petal.Length,pch=21)
abline(lm(Petal.Length~Petal.Width),lty=5,col="red")
title("Regression of Petal Length on Petal Width for versicolor.")
detach(versicolor)
attach(virginica)
plot(Petal.Width,Petal.Length,pch=21)
abline(lm(Petal.Length~Petal.Width),lty=5,col="red")
title("Regression of Petal Length on Petal Width for versicolor.")
detach(virginica)
# So after subsetting and plot the relationship for each, there doesn't show an obvious relationship between length and width.
# The graphs are really easy for us to analyze the relationship between variables.
# I am fond of working directly with graphs, and it's useful in the communication with cilents and co-workers.
# 4. Do analysis of variance.
# The meaning of ANOVA technology is used to analyze a wide variety of experimental design.
# Try to understand the difference with different groups.
# 4.1 Try to get the distribution between three kinds of flowers.
attach(iris)
table(Species)
aggregate(Sepal.Length, by = list(Species),FUN = mean)
aggregate(Sepal.Width, by = list(Species), FUN = mean)
aggregate(Petal.Length, by = list(Species), FUN = mean)
aggregate(Petal.Width, by = list(Species), FUN = mean)
# 4.2 According to the distribution, it seems that the Petal.Width show obvious difference between different groups.
# Use aovna to analyze the data.
fit <- aov(Petal.Width ~ Species)
summary(fit)
# 4.3 Plot the mean according to the original data.
library(gplots)
plotmeans(Petal.Width ~ Species, xlab = "Kind of Flower", ylab = "Petal.Width",
main = "Mean PlOt")
# According to the plots, it is obviously showed that different kinds of flowers have different Petal Width.
# 4.4 Tukey HSD pairwise group comparisions.
# Analyze the confidence interval for the mean of different groups.
# Plot the outcomes to show directly the outcomes.
TukeyHSD(fit)
par(las = 2)
par(mar = c(5,8,4,2))
plot(TukeyHSD(fit))
# 4.5 Make use of the glht() function to analyze more specifically.
library(multcomp)
par(mar = c(5, 4, 6, 2))
tuk <- glht(fit, linfct= mcp(Species = "Tukey"))
plot(cld(tuk, level = 0.05), col = "lightgrey")
# According to the plots, it verifies the above statement that the Petal.Width shows difference between groups.
# 4.6 Assess test assumptions.
# In the anova analysis, it's very important to test the assumptions.
# After analyze each outcome, try to test the assumption to get the overall understanding.
# Plot the QQ plot to find if the data is normally distributed with different groups.
library(car)
qqPlot(lm(Petal.Width ~ Species), simulate = T, main = "Q-Q Plot")
# Use the Bartlett's test to analyze if the data have the equality variances.
bartlett.test(Petal.Width ~ Species)
# 4.7 Use ANOVA as regression
library(multcomp)
levels(Species)
fit.aov <- aov(Petal.Width ~ Species)
summary(fit.aov)
# ANOVA analysis is different from the linar model.
fit.lm <- lm(Petal.Width ~ Species)
summary(fit.lm)
# 5. Time Series Analysis.
# Time series data is very common and it's of urgent significance to learn how to deal with time series data.
# Esepecially for financial data, it shows a strong relationship.
# For this part I will first construct a time series data, which is the stock price of MS for two years.
# 5.1 Create the time series data.
stock <- c(24.55, 23.57, 23.87, 25.82, 26.26, 24.93,
27.57, 30.98, 30.98, 32.43, 40.20, 41.07,
41.30, 44.59, 41.83, 42.35, 40.94, 43.71,
46.00, 44.86, 47.50, 49.30, 48.16, 51.99)
tstock <- ts(stock, start = c(2016,1), frequency = 12)
# 5.2 Plot it.
plot(tstock)
# 5.3 Smoothing and seasonal decompositions.
library(forecast)
ylim <- c(min(stock), max(stock))
plot(tstock)
plot(ma(tstock, 1))
plot(ma(tstock, 2))
# 5.4 Fit the Time Series Model.
fit <- ets(tstock, model = "AAA")
fit
accuracy(fit)
# 5.5 Predict with the ets model.
pred <- forecast(fit, 5)
plot(pred, main = "Forecast for the stock price of MS.")
# Part 2 Machine Learning prediction about the Kaggle Titanic Prediction.
# Reference: Kaggle ML Competition.
# Detailed Information is from Kaggle.
# 1. Data exploration and visualization
# Step 1. Load data and libraries.
# Step 2. Data cleaning and visualisation.
# Step 3. Data analysis.
# Step 4. Fit the machine learning algorithms.
# 1. Load data and libraries.
# 1.1 Load the lirbraries.
library('ggplot2')
library('ggthemes')
library('dplyr')
library('scales')
library('randomForest')
library('corrplot')
library('plyr')
# 1.2 Load the data.
# The original data is csv type.
Train <- read.csv('D:/R/Final Project/train.csv', stringsAsFactors = F)
Test <- read.csv('D:/R/Final Project/test.csv', stringsAsFactors = F)
# Initial work with the data.
str(Train)
summary(Train)
# 2. Fill the missing data.
# Fill with the mean of each variable.
Train$Age[is.na(Train$Age)] = mean(Train$Age, na.rm = TRUE)
Test$Age[is.na(Test$Age)] = mean(Test$Age, na.rm = TRUE)
# 3. Data Analysis and create variables.
nonvars = c("PassengerId","Name","Ticket","Embarked","Cabin")
Train = Train[,!(names(Train) %in% nonvars)]
str(Train)
# Analyze the correlation between variables.
Train$Sex = as.numeric(Train$Sex)
Test$Sex = as.numeric(Test$Sex)
cor(Train)
# 4. Fit the LR Machine Learning Algorithm.
TitanicLog1 = glm(Survived~., data = Train, family = binomial)
summary(TitanicLog1)
# Analyze the outcomes.
TitanicLog2 = glm(Survived ~ . - Parch, data = Train, family = binomial)
summary(TitanicLog2)
TitanicLog3 = glm(Survived ~ . - Parch - Fare, data = Train, family = binomial)
summary(TitanicLog3)
# Test the accuracy.
predictTest = predict(TitanicLog3, type = "response", newdata = Test)
# Make prediction.
Test$Survived = as.numeric(predictTest >= 0.5)
table(Test$Survived)
Predictions = data.frame(Test[c("PassengerId","Survived")])
|
98b091aded6a69326eec9942cae61fe44055b510 | c50c50b12c9a41df2184538de073ed51bd29fb16 | /Mythical forest/random forest v2.R | 5bc446286c48554a669181d10809c77822b95562 | [] | no_license | abarciauskas-bgse/kaggle-onlinenewspopularity | 03aaffebceb83c4f93bac0f68b84faabb19b983e | 9ce38948d04d9f6e130664b41d59b30e5f3c3df5 | refs/heads/master | 2020-04-10T16:51:31.374472 | 2016-03-17T20:30:21 | 2016-03-17T20:30:21 | 50,348,423 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,324 | r | random forest v2.R | # RANDOM FOREST
# This will attempt to de-outlier the random forest. Plan:
# The ongoing problem is that 3s and 1s are being under-classified. We have about 77%
# accuracy on 2s, whereas 3s and 1s are correctly classified 8% and 45% of the time respectively.
# What shall we do about this?
# For each sample, the code will run 5 times. Each of those times, the predictions will
# be captured. What I'm hoping is that we'll find something like, 'actual 3s are likely
# to be predicted as 3 2/3 of the time, while 2s are predicted as 3s 1/3 of the time'.
# If this is the case, we an simply run the random forest 9 times and classify as 3s
# anything that is predicted as 3 more than 1/2 the time. It's a kind of ad-hoc boosting,
# if you will.
# Inshallah.
# It's also possible that pattern won't turn up - let's hope it does.
library(randomForest)
filepath <- '/home/beeb/Documents/Data_Science/News Competition/OnlineNewsPopularity'
setwd(filepath)
newspop <- read.csv('news_popularity_training.csv')
#Create binary from y variable
for(t in sort(unique(newspop[,'popularity']))) {
newspop[paste("pop",t,sep="")] <- ifelse( newspop[,'popularity'] == t , 1 , 0 )
}
# Now do the same thing but with the engineered data
source('../../kaggle-onlinenewspopularity/Feature engineering/feature engineering v2-3.R')
newdata$popularity <- as.factor(newdata$popularity)
newspop <- newspop[,4:ncol(newspop)]
reps <- 20
success.rate <- rep(NA, reps)
success.rate2 <- rep(NA, reps)
training.sample <- sample(nrow(newspop), 0.8*nrow(newspop))
newspop.train <- newspop[training.sample,]
newspop.test <- newspop[setdiff(1:nrow(newspop), training.sample),]
prediction.frame <- data.frame(newspop.test$popularity)
for(i in 1:reps) {
newspop.train$popularity <- as.factor(newspop.train$popularity)
newtree <- randomForest(popularity ~ ., data = newspop.train[,1:59])
# it is so random and forestyyyyyyyyyyyyyy
# omg takes nearly as long as the knn algo
# foreeeeeeeeeeeeeest
#now what do I do?!
random.predictions <- predict(newtree, newdata = newspop.test)
success.rate[i] <- length(which(random.predictions==newspop.test$popularity))/nrow(newspop.test)
prediction.frame[i+1] <- random.predictions
}
# let's do some things
# These tables show misclassification stats
for(i in 2:ncol(prediction.frame)) {
t <- as.matrix(table(unlist(prediction.frame[1]), unlist(prediction.frame[i])))
t <- cbind(t, rowSums(t))
keeptrack <- rep(NA, 5)
for(k in 1:5) {
keeptrack[k] <- t[k,k]/t[k,6]
}
t <- cbind(t, keeptrack)
assign(paste0('t', i), t)
}
# This will give us a list of how many times 1s and 3s were listed as probably 1 or 3
prediction.frame$count.3s <- 0
prediction.frame$count.1s <- 0
for(i in 2:ncol(prediction.frame)) {
prediction.frame$count.3s[prediction.frame[i] == 3] <- prediction.frame$count.3s[prediction.frame[i] == 3] + 1
prediction.frame$count.1s[prediction.frame[i] ==1] <- prediction.frame$count.1s[prediction.frame[i] == 1] + 1
}
# Now let's see if we can use this in a way to give us analytical leverage
table(filter(prediction.frame, count.1s>10)$newspop.test.popularity)
table(filter(prediction.frame, count.1s>10 & count.1s < 20)$newspop.test.popularity)
table(filter(prediction.frame, count.1s>10 & count.1s < 18)$newspop.test.popularity)
table(filter(prediction.frame, count.1s>10 & count.1s < 13)$newspop.test.popularity)
# This is very promising - basically, it says that if we run the random forest many
# times, then we can use that to boost the number of 1s we are classifying.
table(filter(prediction.frame, count.1s>5)$newspop.test.popularity)
table(filter(prediction.frame, count.1s>5 & count.1s < 15)$newspop.test.popularity)
# In fact - perhaps astonishingly - this is true for *any number over 0*
table(prediction.frame$newspop.test.popularity)
table(filter(prediction.frame, count.1s>0)$newspop.test.popularity)
# This could be A Thing.
# Now let's do the same with 3s.
table(filter(prediction.frame, count.3s>0)$newspop.test.popularity)
table(filter(prediction.frame, count.3s>3)$newspop.test.popularity)
table(filter(prediction.frame, count.3s>5)$newspop.test.popularity)
table(filter(prediction.frame, count.3s>10)$newspop.test.popularity)
table(filter(prediction.frame, count.3s>15)$newspop.test.popularity)
table(filter(prediction.frame, count.3s>18)$newspop.test.popularity)
# So it looks like around 5 is where the effect starts to kick in, but it's much
# smaller than the effect with 1s.
# The last thing to worry about is what to do with the numbers where count.1s>5 AND
# count.3s > 5
table(filter(prediction.frame, count.3s>3, count.1s > 0)$newspop.test.popularity)
table(filter(prediction.frame, count.3s>5, count.1s > 0)$newspop.test.popularity)
# There's basically none of them. Phew!
# OK, let's now just do the same thing with the proper submission data.
# PUT THE KETTLE ON. THE CODE MUST RUN.
test <- read.csv('news_popularity_test.csv')
sample <- read.csv('news_popularity_sample.csv')
final.predict <- predict(newtree, newdata = test)
sample$popularity <- final.predict
prediction.frame <- data.frame(rep(NA, nrow(test)))
for(i in 1:reps) {
newspop$popularity <- as.factor(newspop$popularity)
newtree <- randomForest(popularity ~ ., data = newspop[,1:59])
random.predictions <- predict(newtree, newdata = test)
prediction.frame[i+1] <- random.predictions
}
final.predict <- rep(2, nrow(test))
final.predict[prediction.frame$count.1s>1] <- 1
length(which(final.predict==1))
final.predict[prediction.frame$count.3s>5] <- 3
length(which(final.predict==1))
length(which(final.predict == 3))
final.predict[prediction.frame$count.3s>5 & prediction.frame$count.1s > 1] <- 2
# Now let's see if this matches with the observed frequencies of the 1s, 2s etc in
# the training data
table.final.predict <- as.matrix(table(final.predict))
table.final.predict <- cbind(table.final.predict, table.final.predict/nrow(test))
table.training <- as.matrix(table(newspop$popularity))
table.training <- cbind(table.training, table.training/nrow(newspop))
# Well, we still have too many 2s..... *but* we have increased the numbers of 1s and 3s
# in our predictions. The question is: have we chosen the correct ones to predict?
# Tune in next week....
sample$popularity <- final.predict
write.csv(sample, 'finalrforestextra.csv', row.names = FALSE)
|
0e7393351c5a3817c80944a4022a63c15aa45bd7 | 370b0324d777c264082977849c9232a24593e668 | /regressionPack/R/printRegression.R | 47d074bfcc3eea860d3e1db160b9b04d5c82ccb6 | [] | no_license | eozdemir/RPackageProblemSet | 812fc71e12217200038c8a37c8ff229c1d56775f | cb8f94a03155e475985334b7ae30256dbee11f6b | refs/heads/master | 2020-05-20T19:29:24.078196 | 2014-03-18T02:20:07 | 2014-03-18T02:20:07 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 987 | r | printRegression.R | #' Prints summary info for Regression class objects
#'
#' @usage print(x)
#'
#' @param x A Regression class object
#'
#' @author Elif Ozdemir: \email{eozdemir@wustl.edu}
#' @seealso \code{\link{showRegression}}
#' @rdname printRegression
#' @aliases printRegression, Regression-method
#' @export
setGeneric(name="printRegression",
def=function(object)
{standardGeneric("printRegression")}
)
#' @export
setMethod("printRegression", "Regression",
definition=function(object){
cat("Number of observations:", length(object@y), "\n")
cat("Number of regressions:", length(object@coef), "\n")
cat("Maximum R-squared:", max(object@Rsquare), "\n")
cat("Minimum R-squared:", min(object@Rsquare), "\n")
cat("Mean coefficient:\n")
sapply(1:nrow(object@coef), function(x){
cat(mean(object@coef[x,],na.rm=TRUE),"\n")
}) #end of sapply
})#end of setMethod |
170a346037eadf80769c4b5d75ab2a9828365a30 | 44728f5dca2dccca87bc5e5c6ebaa16e5d6518bd | /man/tabcont.qual_.Rd | 02cae5a09774204af3ed54cf4573ebc9860b5183 | [] | no_license | Marie-PerrotDockes/RACCoON | 36ce73f2c75784121b19b5e9f0484d32fd8823bf | 4df1c0be28ee03a8d6674e6a86a48b3df6315340 | refs/heads/master | 2020-05-02T19:56:37.030582 | 2019-01-23T16:44:21 | 2019-01-23T16:44:21 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 294 | rd | tabcont.qual_.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tabcont.qual_.R
\name{tabcont.qual_}
\alias{tabcont.qual_}
\title{Title}
\usage{
tabcont.qual_(data, x_all, x2, xall_name, x2_name, nb_dec, pcol, plig,
ptot, ...)
}
\arguments{
\item{...}{}
}
\description{
Title
}
|
c2b4bf6ce5c5921e680dc6750a8b3069d0ce2d33 | e18ace5496bcaaeb111df0e49c152a503488c79e | /man/prepCql.Rd | 87fd1078dc8cb92f21709daa536c68901b21474d | [] | no_license | patzaw/neo2R | ad4ded8adddde2ed928a258fe502c7a8b73a33c0 | 514b11b721a5b1f66db00b713487f285d9d004d4 | refs/heads/master | 2023-02-25T15:52:42.581788 | 2023-02-10T16:09:44 | 2023-02-10T16:09:44 | 119,698,430 | 6 | 2 | null | 2022-11-23T06:17:28 | 2018-01-31T14:31:57 | R | UTF-8 | R | false | true | 490 | rd | prepCql.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/prepCql.R
\name{prepCql}
\alias{prepCql}
\title{Prepares a CQL query from a character vector}
\usage{
prepCql(...)
}
\arguments{
\item{...}{character vectors with cQL commands}
}
\value{
A well formated CQL query
}
\description{
Prepares a CQL query from a character vector
}
\examples{
prepCql(c(
"MATCH (n)",
"RETURN n"
))
}
\seealso{
\code{\link[=cypher]{cypher()}} and \code{\link[=readCql]{readCql()}}
}
|
42b524ac9136eb122930c3331b47decb1f385998 | ec6593750c1ea49cb47c3c93bbd2c87efc5cfc6e | /R/raw_data.R | c4dfda6a4ad19cc8e7e5461ac301197716cc66db | [] | no_license | jamesalsbury/Food-Hygiene-Project | 122ff194d0cb3fbe937bce2c96f5c59fb799f794 | 0c5bb4cae4d3ebee9856dc673d0f4b027617c8e1 | refs/heads/master | 2023-04-14T15:49:11.661968 | 2021-04-21T10:12:08 | 2021-04-21T10:12:08 | 299,603,202 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 290 | r | raw_data.R | library(dplyr)
Eng_Wal_NI_Data <- readRDS("data/Eng_Wal_NI_data.rds")
Eng_Wal_NI_Data <- Eng_Wal_NI_Data %>%
mutate(rawScore = s_hygiene + s_structural + s_management)
saveRDS(Eng_Wal_NI_Data, file="data/Eng_Wal_NI_data.rds")
notNA <- Eng_Wal_NI_Data %>%
filter(!is.na(rawScore))
|
a4cfd905d79a329499a8508e04e6f638544a03b3 | 392bfbe08220f1584ab2a9b694e64df12e4def71 | /p9.r | 116a8732632418014e3d0dc3560a006b08d0cdd4 | [] | no_license | theQuarky/R-programming | 734551313de86ae77cf2bd10feda9dba4170cfff | 6c968e648906d0158ed6aeba6d2d2cf3f246a956 | refs/heads/master | 2023-01-08T05:03:47.081992 | 2017-09-26T00:40:45 | 2017-09-26T00:40:45 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 145 | r | p9.r | num=as.integer(readline(prompt="Enter number for finding factorial :"))
fact=1
for(i in 1:num)
{
fact=fact*i
}
cat(paste(num,"!","=",fact,"\n")) |
bff982326930e2603a969f8aa27a310be0a2c5a0 | 44598c891266cd295188326f2bb8d7755481e66b | /DbtTools/nanHandling/man/allnans.Rd | 34b54aa31f7bad6ed676c34ae2025d16b1764965 | [] | no_license | markus-flicke/KD_Projekt_1 | 09a66f5e2ef06447d4b0408f54487b146d21f1e9 | 1958c81a92711fb9cd4ccb0ea16ffc6b02a50fe4 | refs/heads/master | 2020-03-13T23:12:31.501130 | 2018-05-21T22:25:37 | 2018-05-21T22:25:37 | 131,330,787 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 486 | rd | allnans.Rd | \name{allnans}
\alias{allnans}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{ Count all NAs/NaNs }
\description{
Count NAs and NaNs in the data
}
\usage{
allnans(x)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{ a mtraix or a vector }
}
\value{
\item{n}{number of NAs and NaNs}
}
\author{ Zornitsa Manolova }
\seealso{ See also \code{\link{allnotnans}}}
\keyword{ NA }
\keyword{ NaN }
\keyword{ count }
\keyword{ find } |
ebe2603acb820d8becdf2224a0799f4ca783d073 | 8b6eca47db727d02fd4f589b633f75a91ad8d523 | /man/sum_data.Rd | a57298d2a9076825254cef229fdb50a93005b35c | [] | no_license | wanjarast/accelerateR | 7c5394958eecd7d0b7263af6bc75889a1b473b83 | fe4813926f59dfe3505c6aa0c45da961443422db | refs/heads/master | 2022-07-03T09:36:54.493148 | 2022-06-16T14:30:17 | 2022-06-16T14:30:17 | 140,834,266 | 3 | 1 | null | null | null | null | UTF-8 | R | false | false | 6,242 | rd | sum_data.Rd | \name{sum_data}
\alias{sum_data}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{Summary statistics from e-obs raw acceleration data
%% ~~function to do ... ~~
}
\description{Calculate summary statistics from e-obs raw acceleration data. Statistical variables composition can be chosen at will. Read the Details for the necessary preparetions of the raw data.
%% ~~ A concise (1-5 lines) description of what the function does. ~~
}
\usage{sum.data(data , time , stats , windowstart = 1 , burstcount = NULL , x = NULL , y = NULL , z = NULL , IntDur = NA , ID = NA , behaviour = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{data}{data.frame of raw acceleration e-obs data exported without subseconds (for necessary format see Details.)}
\item{time}{column where the timestamps of each burst are stored - column name needs to be put in " "}
\item{stats}{vector of statiscial variables that should be calculated (for possible input see Details)}
\item{windowstart}{in case a sliding window is used within the bursts this parameter set the start of the window,
can be used in a loop to shift the starting value of the window}
\item{IntDur}{duration of a single burst in seconds(only needed when the weigthed mean should be calculated (functional but not advised))}
\item{burstcount}{expected number of measurments per burst per axis (needed for Fast Fourier Transformation), also necessary for the sliding window approach to define window length}
\item{x, y, z}{column in which the acceleration measurments for the axis are stored - column name needs to be put in " "}
\item{id}{ID the the focal animal (not needed for calculation)}
\item{behaviour}{if the raw data set has a column containing behaviour labels it can be named here and the labels will be added to the output data frame.}
%% ~~Describe \code{x} here~~
}
\details{This function will prepare the raw data from an e-obs acceleration tag for the use with machine learning. When the data from the tag is extracted from the logger.bin file it has to be in the format without subseconds. Every row of data belonging to the same timestamp need to have the same value in the time column.
To avoide confuison were data for one time of the day is recorded on several day the columns of the date and time have to be combied in one column. The name of that cloumn has to be specified as the time argument in the function.
The coloumn corresponding to the x-,y-,and z-axis can be named at will. The coloumn names of every axis have to be specified. By default calculations for the y and z axis are disabled. In cases where there are 2 or 3 axes measured there names can be included. The predictors q, Pitch, and Roll are dependend on all three axes so they will not be calculated for data set with only or 2 axes. The cloumn names have to be put in quotes to be recognised.
The argument id will create an additional column with supporting information. This information can be left out if not needed or unknown.
The stats argument provides a handle to choose predictors that will be calculated for the model. Possible inputs are:
"all" will calculate the folling summary statistics: "mean","sd","max","min","range","cov","cor",
"meandiff","sddiff","mdocp","sdocp","Var","q","Pitch","Roll","Yaw","ICV","CV","Kurtosis",
"Skewness","ODBA"
"mean" for the mean of each axis
"sd" for the standard deviation of each axis
"max" for the maximum value of each axis
"min" for the minimum value of each axis
"range" for the difference between the maximum and minimum value of each axis
"cov" for the covariance between two axes for each combination of axes
"cor" for the correlation between two axes for each combination of axes
"meandiff" for the mean of the difference between two axes for each combination of axes
"sddiff" for the standard devidation of the difference between two axes for each combination of axes
"mdocp" for the mean difference of continues points for each axis
"sdocp" for the standard devidation of the difference of continues points for each axis
"Var" for the variance (1/N) of x, y and z
"CV" for the coefficient of variation of x, y and z
"ICV" for the inverse coefficient of variation of x, y and z
"q" for the square root of the sum of squares of x, y and z
"Pitch" for rotation of up and down
"Roll" for the rotation from side to side
"Yaw" for the rotation in the horizonal plane
"Kurtosis" for the kurtosis of x, y and z
"Skewness" for the skewness of x, y and z
"ODBA" for the overall dynamic body acceleration for all 3 axes
"FFT" for adding the positive half of the fast fourier spectrum to the predictor set, if this is used a burstcount has to be provided with burstcount = ...
This function can be used for a sliding window approach. For this the function can be put in a loop with the windowstart parameter set to i. The burstcount parameter will set the size of the window. The summary statistics will be calculated seperatly for every window fragment.
%% ~~ If necessary, more details than the description above ~~
}
\value{The output will be a data.frame with the summary statistics for a single time stamp in one row.
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
}
\references{
%% ~put references to the literature/web site here ~
}
\author{Wanja Rast
%% ~~who you are~~
}
\note{The weighted mean was inspired by Anne Berger
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
acceleration <- data.frame(time = rep(seq(5),each=20) , x = runif(n = 100,min = 1900,max=2100) ,
y = runif(n = 100,min = 2100,max=2300) , z = runif(n = 100,min = 1800,max=2000))
sumstats <- sum.data(data=acceleration , time="time" , x="x" ,
y="y" , z="z" , stats=c("mean" , "sd" , "Var"))
}
|
b05fff0cce72530835d7f1da94ef59b532fae625 | a8ba437a77c7708fcdb04185f5b18890782feb02 | /inst/shinyapps/mdaplot/server.r | e10a66ba91f8f0989c58e61a14e5b78dab130717 | [] | no_license | reyzaguirre/rhep | 79248d3432fddea1a662f990375ed01c747375d7 | eef36c56b8f1132539a41c122aef9c9d09335e6b | refs/heads/master | 2022-10-05T16:30:43.998277 | 2022-08-26T18:38:07 | 2022-08-26T18:38:07 | 28,341,518 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,034 | r | server.r |
shiny::shinyServer(function(input, output) {
# Expression that generates a histogram. The expression is
# wrapped in a call to renderPlot to indicate that:
#
# 1) It is "reactive" and therefore should re-execute automatically
# when inputs change
# 2) Its output type is a plot
output$distPlot1 <- shiny::renderPlot( {
media <- input$media
dest <- input$dest
asim <- input$asim
set.seed(1)
x <- sn::rsn(1000, xi = media, omega = dest, alpha = asim)
# draw the histogram with the specified number of bins
hist(x, breaks = 20, col = "darkgray", border = "white",
xlim = c(-15, 15), main = "Histograma")
})
output$distPlot2 <- shiny::renderPlot( {
media <- input$media
dest <- input$dest
asim <- input$asim
set.seed(1)
x <- sn::rsn(1000, xi = media, omega = dest, alpha = asim)
# draw the histogram with the specified number of bins
boxplot(x, col = "darkgray", ylim = c(-15,15),
main = "Boxplot", horizontal = TRUE)
})
})
|
009d2c652a98d8f3ddbc51e9d66b915997752642 | d0899d9f443505a95008c8022687344f50e734ea | /tests/testthat/test-watershed.R | d02320225cc8bf93b820be8f355da8f4ff34dd24 | [
"MIT"
] | permissive | mtalluto/WatershedTools | 8603ef5352db423ad26458d2f04e88d165332a4f | 0350c992856b13beb03bd372858562d5abd84238 | refs/heads/main | 2023-02-08T16:12:45.065078 | 2022-05-19T13:01:56 | 2022-05-19T13:01:56 | 159,832,657 | 3 | 2 | null | null | null | null | UTF-8 | R | false | false | 283 | r | test-watershed.R | ws <- readRDS(system.file("testdata/testWS.rds", package="WatershedTools"))
test_that("Topology functions", {
skip_on_cran()
points =confluences(ws)[, 'id']
expect_error(dm <- siteByPixel(ws, points), regex=NA)
expect_equal(sum(dm, na.rm=TRUE), 3754613, tolerance = 0.001)
})
|
755ca841b7df0d62c1427cea74321384c638fa86 | 3d0cc57b1908da75fc1bd5a1ad3b074a32835055 | /man/listMetaGenomes.Rd | a3372fe535fcf0558f0e2897b0e119ed939c9f05 | [] | no_license | flopezo/biomartr | c964109dfa2356559ae7f566664313615080ef86 | cbd022783c7e1e779096ea3e8abbc2a790b5cb8b | refs/heads/master | 2020-12-24T12:40:15.877281 | 2016-10-31T17:00:35 | 2016-10-31T17:00:35 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,057 | rd | listMetaGenomes.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/listMetaGenomes.R
\name{listMetaGenomes}
\alias{listMetaGenomes}
\title{List available metagenomes on NCBI Genbank}
\usage{
listMetaGenomes(details = FALSE)
}
\arguments{
\item{details}{a boolean value specifying whether only the scientific names of stored metagenomes shall be returned
(\code{details = FALSE}) or all information such as "organism_name","bioproject", etc (\code{details = TRUE}).}
}
\description{
List available metagenomes on NCBI genbank. NCBI genbank allows users
to download entire metagenomes of several metagenome projects. This function lists
all available metagenomes that can then be downloaded via \code{\link{getMetaGenomes}}.
}
\examples{
\dontrun{
# retrieve available metagenome projects at NCBI Genbank
listMetaGenomes()
# retrieve detailed information on available metagenome projects at NCBI Genbank
listMetaGenomes(details = TRUE)
}
}
\author{
Hajk-Georg Drost
}
\seealso{
\code{\link{getMetaGenomes}}, \code{\link{getMetaGenomeSummary}}
}
|
36d19f4bb3931b0245123039fc4315349d470e0a | 3db19b3f22c3709a05afd02a5b5e1152c7dd6ea8 | /man/mapHourlyMSLP.Rd | 1d1997996c8ece2e0563ade62c38e7544d5f3cad | [] | no_license | rijaf-iri/mtoadtNMA | e01cc9dbdd3ddc2c6f6734497b8570d36bf3ae7b | 814740263f72a310ff35854f39d81b6e547227d0 | refs/heads/main | 2023-07-27T07:30:22.981829 | 2021-09-17T02:43:20 | 2021-09-17T02:43:20 | 401,233,451 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 495 | rd | mapHourlyMSLP.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/app_plotmap_mslp.R
\name{mapHourlyMSLP}
\alias{mapHourlyMSLP}
\title{Compute hourly mean sea level pressure.}
\usage{
mapHourlyMSLP(time, aws_dir)
}
\arguments{
\item{time}{the time to display in the format "YYYY-MM-DD-HH"}
\item{aws_dir}{full path to the directory containing ADT.\cr
Example: "D:/NMA_AWS_v2"}
}
\value{
a JSON object
}
\description{
Compute hourly mean sea level pressure data to display on map.
}
|
de59b5277021691debdb0d3a119c6ad09bcfb187 | 5a01fb018387de190e43d512a8085ff31db46a2e | /man/text_year_minmax.Rd | be1bc6454b85d7812abb455f648c1cf6bfc8e40d | [
"MIT"
] | permissive | YuanchenZhu2020/antgreens | cc5abb04ee2773ffecf5791754fadd3fd80bd1f9 | 60057fc84c1d680d6432386d9b212d74d803c5a8 | refs/heads/main | 2023-07-20T07:33:29.647260 | 2021-09-05T15:09:44 | 2021-09-05T15:09:44 | 388,802,783 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 776 | rd | text_year_minmax.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/text_utils.R
\name{text_year_minmax}
\alias{text_year_minmax}
\title{Generate Text Describing Minimum and Maximum Rate Item By Year}
\usage{
text_year_minmax(data, year_range, rate_text, item_name, rate_name)
}
\arguments{
\item{data}{a data frame}
\item{year_range}{numeric vector represents the year.}
\item{rate_text}{character. The character used in the text to represent the rate name.}
\item{item_name}{character. The name of column in \code{data} storing the name of item.}
\item{rate_name}{character. The name of column in \code{data} storing the value of rate.}
}
\value{
a character of sentence.
}
\description{
Generate text describing minimum and maximum rate item for each year.
}
|
98a8705c8a01c2b3909d73f645da4776098620dc | f3fbe9e4f49764e088531485f14c8f0962569420 | /R-code/ar_coeffs_to_sdf_single_freqs.R | e71bb173ab09f99ce7bf885c1040453f29805e89 | [] | no_license | dmn001/sauts | d3a678091a081679561db2ada077a7dc9630847d | 2797e0ab943fb02ebea82df32bd1cc2748c54dcc | refs/heads/master | 2023-04-30T12:13:14.502267 | 2021-05-22T16:11:15 | 2021-05-22T16:11:15 | 369,851,701 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 638 | r | ar_coeffs_to_sdf_single_freqs.R | ### compute SDF for AR process over one or more selected frequencies
ar_coeffs_to_sdf_single_freqs <- function(f,innov_var=0.002,coeffs=c(2.7607, -3.8106, 2.6535, -0.9238),delta_t=1)
{
p <- length(coeffs)
return(sapply(f,function(f) innov_var*delta_t/abs( 1- sum(coeffs*exp(complex(imag=-2*pi*f*delta_t*(1:p)))))^2))
}
### deprecated version that allows only one frequency
###
### ar_coeffs_to_sdf_single_freq <- function(f,innov_var=0.002,coeffs=c(2.7607, -3.8106, 2.6535, -0.9238),delta_t=1)
### {
### p <- length(coeffs)
### innov_var*delta_t/abs( 1- sum(coeffs*exp(complex(imag=-2*pi*f*delta_t*(1:p)))))^2
### }
|
b165981764f418599a4fe562f4978ef37ce21627 | 35d0ef7ca1dff2cd659f3e0b8a69a1eff9a2c95b | /plot3.R | 0679843e6f56bf52c4dbe8f2fcea71afc950fa5d | [] | no_license | cschne05/ExData_Plotting1 | 257d5976c1de045f7536b01d7ef6514e9cfa6ad1 | 3edc976db87aecd075409d98f45aea47edea68c0 | refs/heads/master | 2021-01-12T19:52:24.591159 | 2014-11-09T20:52:06 | 2014-11-09T20:52:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,348 | r | plot3.R | #Course Project 1
#Exploratory data analysis
#Script for plotting individual sub metering over time from household power consumption data
#Load lubridate package
library(lubridate)
#Read in data
data <- read.table("household_power_consumption.txt", header = TRUE, sep = ";")
#Subset data by date
dataFeb <- data[(data$Date == "1/2/2007" | data$Date == "2/2/2007"),]
#Make Date and Time a single date vector
dataDay <- dataFeb$Date
dataDay <- as.character(dataDay)
dataTime <- dataFeb$Time
dataTime <- as.character(dataTime)
daytime <- paste(dataDay, dataTime)
daytime_format <- parse_date_time(daytime, "dmy, HMS")
#Make submetering data numeric
dataSub1 <- as.character(dataFeb$Sub_metering_1)
dataSub1 <- as.numeric(dataSub1)
dataSub2 <- as.character(dataFeb$Sub_metering_2)
dataSub2 <- as.numeric(dataSub2)
dataSub3 <- as.character(dataFeb$Sub_metering_3)
dataSub3 <- as.numeric(dataSub3)
#Plot sub metering data over day
plot(daytime_format, dataSub1,type = "n", xlab = "", ylab = "Energy sub metering")
lines(daytime_format, dataSub1)
lines(daytime_format, dataSub2, col = "red")
lines(daytime_format, dataSub3, col = "blue")
#Add legend
legend("topright", legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lwd = 1, col = c("black", "red", "blue"))
#Create png file of plot
dev.copy(png, file = "plot3.png")
dev.off() |
3a068df8d857a16bbaf43f6493943a3b3bc40957 | e000337612c06a9b90e8e89e1bdfc1dded429b07 | /R/app_ui.R | 41ae8998fad08ccba0ed5531b8afdc1f048f37cf | [
"MIT"
] | permissive | PascalCrepey/BiostatsAppsMPH | aa8c662a43ce5ad3da46580fae6b8350d8d248a8 | 3085e54d66fff345b10489874d881f443effbc83 | refs/heads/master | 2020-07-24T22:15:54.414219 | 2019-09-24T14:15:44 | 2019-09-24T14:15:44 | 208,065,997 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,816 | r | app_ui.R | #' @import shiny
#' @import shinydashboard
#' @import shinyWidgets
app_ui <- function() {
sidebar <- dashboardSidebar(
sidebarMenu(id = "sideMenu",
menuItem("Logistic Regression", icon = icon("th"), tabName = "LogisticRegression",
badgeLabel = "new", badgeColor = "green", selected = TRUE),
menuItem("Probit Regression", icon = icon("chart-line"), tabName = "ProbitRegression",
badgeLabel = "new", badgeColor = "green", selected = FALSE),
menuItem("Power", icon = icon("th"), tabName = "Power",
badgeLabel = "new", badgeColor = "green", selected = FALSE)
)
)
body <- dashboardBody(tabItems(
tabItem(tabName = "LogisticRegression",
fluidPage(
mod_logistic_regression_ui("logistic_regression_ui_1")
)
),
tabItem(tabName = "ProbitRegression",
fluidPage(
mod_probit_regression_ui("probit_regression_ui_1")
)
),
tabItem(tabName = "Power",
fluidPage(
mod_power_ui("power_ui_1")
)
)
))
tagList(
# Leave this function for adding external resources
golem_add_external_resources(),
dashboardPage(
dashboardHeader(title = "BiostatApps"),
sidebar,
body
)
)
}
#' @import shiny
golem_add_external_resources <- function(){
# addResourcePath(
# 'www', system.file('app/www', package = 'BiostatApps')
# )
tags$head(
golem::activate_js(),
golem::favicon()
# Add here all the external resources
# If you have a custom.css in the inst/app/www
# Or for example, you can add shinyalert::useShinyalert() here
#tags$link(rel="stylesheet", type="text/css", href="www/custom.css")
)
}
|
823cd3d6aa1d691bf0d467a5ab0122bc3d75ae29 | cef3b5e2588a7377281a8f627a552350059ca68b | /paws/man/greengrassv2_cancel_deployment.Rd | 827a23c8cd2b8621f1fe7916082983c0bdffaa24 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | sanchezvivi/paws | b1dc786a9229e0105f0f128d5516c46673cb1cb5 | 2f5d3f15bf991dcaa6a4870ed314eb7c4b096d05 | refs/heads/main | 2023-02-16T11:18:31.772786 | 2021-01-17T23:50:41 | 2021-01-17T23:50:41 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 671 | rd | greengrassv2_cancel_deployment.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/greengrassv2_operations.R
\name{greengrassv2_cancel_deployment}
\alias{greengrassv2_cancel_deployment}
\title{Cancels a deployment}
\usage{
greengrassv2_cancel_deployment(deploymentId)
}
\arguments{
\item{deploymentId}{[required] The ID of the deployment.}
}
\description{
Cancels a deployment. This operation cancels the deployment for devices
that haven't yet received it. If a device already received the
deployment, this operation doesn't change anything for that device.
}
\section{Request syntax}{
\preformatted{svc$cancel_deployment(
deploymentId = "string"
)
}
}
\keyword{internal}
|
b75aa043167cf2c9de6ae9fd243c115897cae0df | d1b1cead5e9525fbfec5b7df989ebc0a09c8d782 | /man/createExperiment.Rd | 7dad65b2ba765712423ddda57ed7775371659c41 | [] | no_license | AmundsenJunior/pfsrsdk | 0ce8195f9d9a96562d31992f44303ee151bd4111 | d799defb9447a4e70cb2906205f6023020fc621a | refs/heads/master | 2020-06-06T00:24:06.491018 | 2019-07-15T16:59:12 | 2019-07-15T20:37:59 | 192,584,459 | 0 | 1 | null | null | null | null | UTF-8 | R | false | true | 1,546 | rd | createExperiment.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/createExperiment.R
\name{createExperiment}
\alias{createExperiment}
\title{createExperiment - Create a new instance of an experiment.}
\usage{
createExperiment(coreApi, experimentType, assayType, assayBarcode,
protocolType, protocolBarcode, body = NULL, fullMetadata = FALSE,
...)
}
\arguments{
\item{coreApi}{coreApi object with valid jsessionid}
\item{experimentType}{experiment type to get as character string}
\item{assayType}{assay type}
\item{assayBarcode}{assay barcode}
\item{protocolType}{protocol type}
\item{protocolBarcode}{protocol barcode}
\item{body}{values for experiment attributes and associations as a list of key-values pairs}
\item{fullMetadata}{get full metadata, default is FALSE}
\item{...}{additional arguments passed to \code{apiPOST}}
}
\value{
List of length 2, containing \code{entity} and \code{response} objects:
\itemize{
\item{\code{entity}} is the HTTP response content.
\item{\code{response}} is the entire HTTP response.
}
}
\description{
\code{createExperiment} Creates a new experiment.
}
\details{
\code{createExperiment} Creates a new instance of an entity.
}
\examples{
\dontrun{
api <- coreAPI("PATH TO JSON FILE")
login <- authBasic(api)
experiment <- createExperiment(
login$coreApi,
"Experiment_Type",
"Assaybarcode",
"Protocolbarcode"
)
logOut(login$coreApi)
}
}
\author{
Craig Parman info@ngsanalytics.com
Natasha Mora natasha.mora@thermofisher.com
Scott Russell scott.russell@thermofisher.com
}
|
4636b955dacbfdf813fc5b3437776164ce6da36a | 184180d341d2928ab7c5a626d94f2a9863726c65 | /valgrind_test_dir/eweib_trunc-test.R | 990443b65de3cb22ede8747d6d7cd39500ff3be3 | [] | no_license | akhikolla/RcppDeepStateTest | f102ddf03a22b0fc05e02239d53405c8977cbc2b | 97e73fe4f8cb0f8e5415f52a2474c8bc322bbbe5 | refs/heads/master | 2023-03-03T12:19:31.725234 | 2021-02-12T21:50:12 | 2021-02-12T21:50:12 | 254,214,504 | 2 | 1 | null | null | null | null | UTF-8 | R | false | false | 229 | r | eweib_trunc-test.R | function (a, b, k, lambda)
{
e <- get("data.env", .GlobalEnv)
e[["eweib_trunc"]][[length(e[["eweib_trunc"]]) + 1]] <- list(a = a,
b = b, k = k, lambda = lambda)
.Call("_mixR_eweib_trunc", a, b, k, lambda)
}
|
e9bca93375858532ecc3d6dd535700ad31f74425 | 5247d313d1637170b6bbc5e367aba46c88725efd | /man/tw_api_get_users_search.Rd | 1cad0d7aea955d91e0da61982f7a294ea0cf385e | [] | no_license | fentonmartin/twitterreport | dac5c512eea0831d1a84bef8d2f849eab2b12373 | 5ddb467b8650289322ae83e0525b4ff01fba0d1d | refs/heads/master | 2021-08-22T04:25:01.834103 | 2017-11-29T07:47:43 | 2017-11-29T07:47:43 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,566 | rd | tw_api_get_users_search.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/twitter_api.R
\name{tw_api_get_users_search}
\alias{tw_api_get_users_search}
\title{Search users}
\usage{
tw_api_get_users_search(q, twitter_token, page = NULL, count = 20,
quietly = TRUE, ...)
}
\arguments{
\item{q}{Query}
\item{twitter_token}{An object of class \link[httr:oauth1.0_token]{Token1.0} as
generated by \link{tw_gen_token}.}
\item{page}{Page number to retrieve}
\item{count}{Number of accounts per page}
\item{quietly}{Whether or not to show the 'success' message}
\item{...}{Further parameters to be passed to \code{\link[=GET]{GET()}}}
}
\value{
A list of twitter accounts
}
\description{
Search users via approximate string matching
}
\details{
\subsection{From Twitter}{Provides a simple, relevance-based search interface to public user
accounts on Twitter. Try querying by topical interest, full name, company name,
location, or other criteria. Exact match searches are not supported.}
}
\references{
Twitter REST API (GET users/search) https://dev.twitter.com/rest/reference/get/users/search
}
\seealso{
Other API functions: \code{\link{tw_api_get_followers_ids}},
\code{\link{tw_api_get_followers_list}},
\code{\link{tw_api_get_friends_ids}},
\code{\link{tw_api_get_search_tweets}},
\code{\link{tw_api_get_statuses_sample}},
\code{\link{tw_api_get_statuses_user_timeline}},
\code{\link{tw_api_get_trends_place}},
\code{\link{tw_api_get_users_show}},
\code{\link{tw_api_trends_available}},
\code{\link{tw_gen_token}}
}
\concept{API functions}
|
3366bea4d5de1d79c99ad075a6ab5aa44b905b85 | 9e02faab697c634d341c04948b72e424839b0889 | /man/evaluateLogConDens.Rd | 54ea742ff8d11867202f1abc332d7f556cc6f0a9 | [] | no_license | cran/logcondens | ee14d41d1f6d585340ba6f553086fc9fbcda3f93 | fb013534190ee54999c1da3bbaa3d386778502a5 | refs/heads/master | 2023-01-12T04:43:18.493903 | 2023-01-06T17:50:48 | 2023-01-06T17:50:48 | 17,697,149 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,072 | rd | evaluateLogConDens.Rd | \name{evaluateLogConDens}
\alias{evaluateLogConDens}
\title{Evaluates the Log-Density MLE and Smoothed Estimator at Arbitrary Real Numbers xs}
\description{Based on a \code{"dlc"} object generated by \code{\link{logConDens}}, this function computes the values of
\deqn{\widehat \phi_m(t)}{\hat \phi_m(t)}
\deqn{\widehat f_m(t) = \exp(\widehat \phi_m(t))}{\hat f_m(t) = exp(\hat \phi_m(t))}
\deqn{\widehat F_m(t) = \int_{x_1}^t \exp(\widehat \phi_m(x)) dx}{\hat F_m(t) = int_{x_1}^t exp(\hat \phi_m(x)) dx}
\deqn{\widehat f_m^*(t) = \exp(\widehat \phi_m^*(t))}{\hat f_m^*(t) = exp(\hat \phi_m^*(t))}
\deqn{\widehat F_m^*(t) = \int_{x_1}^t \exp(\widehat \phi_m^*(x)) dx}{\hat F_m^*(t) = int_{x_1}^t \exp(\hat \phi_m^*(x)) dx}
at all real number \eqn{t} in \code{xs}. The exact formula for \eqn{\widehat F_m}{\hat F_m} and \eqn{t \in [x_j,x_{j+1}]} is
\deqn{\widehat F_m(t) = \widehat F_m(x_j) + (x_{j+1}-x_j) J\Big(\widehat \phi_j, \widehat \phi_{j+1}, \frac{t-x_j}{x_{j+1}-x_j} \Big)}{\hat F_m(t) = \hat F_m(x_j) + (x_{j+1}-x_j) J(\hat \phi_j, \hat \phi_{j+1}, (t-x_j)/(x_{j+1}-x_j))}
for the function \eqn{J} introduced in \code{\link{Jfunctions}}. Closed formulas can also be given for \eqn{\widehat f_m^*(t)}{\hat f_m^*(t)}
and \eqn{\widehat F_m^*(t)}{\hat F_m^*(t)}.
}
\usage{evaluateLogConDens(xs, res, which = 1:5, gam = NULL, print = FALSE)}
\arguments{
\item{xs}{Vector of real numbers where the functions should be evaluated at.}
\item{res}{An object of class \code{"dlc"}, usually a result of a call to \code{logConDens}.}
\item{which}{A (sub-)vector of \code{1:5} specifying which of the above quantities should be computed.}
\item{gam}{Only necessary if \code{smoothed = TRUE}. The standard deviation of the normal kernel. If equal to
\code{NULL}, \code{gam} is chosen such that the variances of the original sample \eqn{x_1, \ldots, x_n}
and \eqn{\widehat f_n^*}{\hat f_n^*} coincide. See \code{\link{logConDens}} for details.}
\item{print}{Progress in computation of smooth estimates is shown.}
}
\value{Matrix with rows \eqn{(x_{0, i}, \widehat \phi_m(x_{0, i}), \widehat f_m(x_{0, i}), \widehat F_m(x_{0, i}), \widehat f_m^*(x_{0, i}), \widehat F_m^*(x_{0, i}))}{(x_{0, i}, \hat \phi_m(x_{0, i}), \hat f_m(x_{0, i}), \hat F_m(x_{0, i}), \hat f_m^*(x_{0, i}), \hat F_m^*(x_{0, i}))}
where \eqn{x_{0,i}} is the \eqn{i}-th entry of \code{xs}.}
\author{
Kaspar Rufibach, \email{kaspar.rufibach@gmail.com}, \cr \url{http://www.kasparrufibach.ch}
Lutz Duembgen, \email{duembgen@stat.unibe.ch}, \cr \url{https://www.imsv.unibe.ch/about_us/staff/prof_dr_duembgen_lutz/index_eng.html}}
\examples{
## estimate gamma density
set.seed(1977)
x <- rgamma(200, 2, 1)
res <- logConDens(x, smoothed = TRUE, print = FALSE)
## compute function values at an arbitrary point
xs <- (res$x[100] + res$x[101]) / 2
evaluateLogConDens(xs, res)
## only compute function values for non-smooth estimates
evaluateLogConDens(xs, res, which = 1:3)
}
\keyword{htest}
\keyword{nonparametric}
|
e650ada8b272dd085cff4ab4b67215c2496f30fd | 2bec5a52ce1fb3266e72f8fbeb5226b025584a16 | /highfrequency/man/sampleTDataRawMicroseconds.Rd | 50637e481d4c81d09d9fef8ca31d7340643064c3 | [] | no_license | akhikolla/InformationHouse | 4e45b11df18dee47519e917fcf0a869a77661fce | c0daab1e3f2827fd08aa5c31127fadae3f001948 | refs/heads/master | 2023-02-12T19:00:20.752555 | 2020-12-31T20:59:23 | 2020-12-31T20:59:23 | 325,589,503 | 9 | 2 | null | null | null | null | UTF-8 | R | false | true | 451 | rd | sampleTDataRawMicroseconds.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{sampleTDataRawMicroseconds}
\alias{sampleTDataRawMicroseconds}
\title{Sample of raw trades for stock XXX for 2 days}
\format{
A data.table object.
}
\usage{
sampleTDataRawMicroseconds
}
\description{
An imaginary data.table object containing the raw trades for stock XXX for 2 days, in the typical NYSE TAQ database format.
}
\keyword{datasets}
|
0f5904e35aa248b160b8660613941a1ce7ba5ad5 | f74392892aa553623ff9c73d99834ff0abd48179 | /scripts/01_data_preparation/01-data_cleaning-survey.R | eb94a48d5a3a6ea1e6d16b3f2672916ef401ef77 | [
"MIT"
] | permissive | dojennifer/sta304-ps4 | a5ed1136a45d9340edc5f20e2d3af9980cec1862 | 00148f88cc79585fb022e46ad950900a26c17c49 | refs/heads/main | 2023-07-13T06:05:41.133319 | 2021-08-25T23:21:23 | 2021-08-25T23:21:23 | 329,102,747 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,734 | r | 01-data_cleaning-survey.R | #### Preamble ####
# Purpose: Prepare and clean the survey data (nationscape) downloaded from voterstudygroup.org
# Author: Annie Collins, Jennifer Do, Andrea Javellana, and Wijdan Tariq
# Data: 2 November 2020
# Contact: annie.collins@mail.utoronto.com, jenni.do@mail.utoronto.com,
# andrea.javellana@mail.utoronto.com, wijdan.tariq@mail.utoronto.com
# License: MIT
# Pre-requisites:
# - Need to have downloaded the nationscape data set from voterstudygroup.org
# and save the folder that you're interested in to inputs/data
#### Workspace setup ####
library(haven)
library(tidyverse)
library(labelled)
# Read in the raw data.
raw_UCLA <- read_dta("inputs/data/ns20200625.dta")
# Just keep some variables that may be of interest (change
# this depending on your interests)
names(raw_UCLA)
reduced_UCLA <-
raw_UCLA %>%
select(vote_2020, #
employment, #
foreign_born,#
gender,#
census_region, #
hispanic,#
race_ethnicity, #
household_income,#
education, #
state, #
age #
)
UCLA <- reduced_UCLA
#deleting responses if not Trump(1)/Biden(2)
UCLA <- subset(UCLA, vote_2020 < 3 )
# Assign a vote for Joe Biden a value of 0
UCLA$vote_2020[UCLA$vote_2020 == 2] <- 0
UCLA$vote_2020 <- as.numeric(UCLA$vote_2020)
state.abb#deleting responses who picked "other" as employment
UCLA <- subset(UCLA, employment <= 8 )
# EDUCATION
UCLA$education = cut(UCLA$education,c(0,2,3,4,6,9,11), labels=c(1:6))
# levels(UCLA$education) = c('less than high school', 'some high school',
# 'completed high school', 'some post-secondary',
# 'post-secondary degree', 'post-graduate degree'
# )
UCLA$education <- as.numeric(UCLA$education)
# GENDER
UCLA$gender = cut(UCLA$gender,c(0,1,2))
levels(UCLA$gender) = c('female', 'male')
table(UCLA$gender)
# AGE
# put age into bins
UCLA$age = cut(UCLA$age,c(17, 29, 44, 59, 74, 93))
levels(UCLA$age) = c('18 to 29', '30 to 44',
'45 to 59', '60 to 74',
'74 and above')
# BIRTHPLACE
UCLA$foreign_born = cut(UCLA$foreign_born,c(0,1,2))
levels(UCLA$foreign_born) = c('USA', 'another country')
table(UCLA$foreign_born)
# RACE
#hispanic (make binary)
UCLA$hispanic = cut(UCLA$hispanic,c(0,1,15))
levels(UCLA$hispanic) = c('not hispanic', 'hispanic')
table(UCLA$hispanic)
#Simplifying/grouping UCLA races
UCLA$race_ethnicity = cut(UCLA$race_ethnicity,c(0,1,2,3,4,5,14,15))
levels(UCLA$race_ethnicity) = c('white', 'black',
'native american', 'other asian/pacific islander',
'chinese', 'other asian/pacific islander 1', 'other'
)
UCLA$race_ethnicity <- gsub('other asian/pacific islander 1', 'other asian/pacific islander', UCLA$race_ethnicity)
table(UCLA$race_ethnicity)
#RACE including hispanics as a race
UCLA$race_ethnicity <- UCLA$race_ethnicity
UCLA$race_ethnicity[UCLA$hispanic == 'hispanic'] <- "hispanic"
UCLA$race_ethnicity <- as.character(UCLA$race_ethnicity)
#discard hispanic column
UCLA <-
UCLA %>%
select(vote_2020, #
employment, #
foreign_born,#
gender,#
census_region, # UNFINISHED
race_ethnicity, #
household_income,
education, #
state,
age #
)
table(UCLA$race_ethnicity)
# EMPLOYMENT
UCLA$employment = cut(UCLA$employment,c(0,1,3,4,5,7,8))
levels(UCLA$employment) = c('employed', 'not in labor force',
'unemployed', 'employed1',
'not in labor force1', 'employed2')
table(UCLA$employment)
UCLA$employment <- gsub('employed1', 'employed', UCLA$employment)
UCLA$employment <- gsub('employed2', 'employed', UCLA$employment)
UCLA$employment <- gsub('not in labor force1', 'not in labor force', UCLA$employment)
table(UCLA$employment)
# STATE
# Replace state abbreviations with state names, adding "DC" to the
# state.abb vector and "district of columbia" to the state.name vector
UCLA$state <- append(state.name, values=c("district of columbia"))[match(
UCLA$state, append(state.abb, values=c("DC")))]
# Make all state names lowercase
UCLA$state <- tolower(UCLA$state)
# Assign state names a numeric value between 1 and 51 in alphabetical order
UCLA$state <- as.factor(UCLA$state)
levels(UCLA$state) <- c(1:51)
UCLA$state <- as.numeric(UCLA$state)
###################################################################################
# Add the labels
UCLA <- labelled::to_factor(UCLA)
# INCOME
sum(table(UCLA$household_income))
table(UCLA$state)
nrow(table(UCLA$state))
sum(table(UCLA$state))
# create clean output file
write_csv(UCLA, "outputs/data/UCLA.csv") |
9a41e7acbe6af798592ac35f0c402940f1fc8515 | 2c9c670c4f8076b70430d406b5c59912dc78695d | /man/fitBmeaBatch.Rd | b39ba5bc473507d43585dd16c80dff903d015d4c | [] | no_license | smped/BMEA | 60e10861df734e2ae869d9cff01fc3655f9a66d1 | 1fe0dfb8fda370e422867ede3153d53812257654 | refs/heads/master | 2023-04-17T19:22:13.253330 | 2018-10-21T17:38:23 | 2018-10-21T17:38:23 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 3,443 | rd | fitBmeaBatch.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fitBmeaBatch.R
\name{fitBmeaBatch}
\alias{fitBmeaBatch}
\title{Fit the BMEA model for a single batch of units}
\usage{
fitBmeaBatch(celSet, bgCelSet, units, conditions, contMatrix, ...,
paramToSave = c("c", "mu", "phi"), keepSims = FALSE, zGene = 4.265,
zExon = 1.645)
}
\arguments{
\item{celSet}{an \code{AffymetrixCelSet} with the data to be fit}
\item{bgCelSet}{a list with components \code{$lambda} & \code{$delta}.
Each of these must be an \code{AffymetrixCelSet} containing the means & standard deviations
for the background signal priors}
\item{units}{the units (i.e. genes) to be fit}
\item{conditions}{a vector of factors specifying which cell-type/condition each array in the \code{celSet} belongs to}
\item{contMatrix}{a contrast matrix for the summarised output}
\item{...}{used for passing further arguments such as \code{mcmcParam} to \code{runMCMC.BMEA}}
\item{paramToSave}{the model parameters to be saved for downstream processing.
The parameters "c", "mu" & "phi" will always be saved.}
\item{keepSims}{logical variable.
If \code{TRUE} all sims from the process & contrasts will be kept}
\item{zGene}{the zScore below which a gene is classified as not detectable above background}
\item{zExon}{the zScore below which an exon is classified as not detectable above background}
}
\value{
An object of class("BMEA.Batch"), which is a list with the following components:
\itemize{
\item{$celSet}{ the \code{celSet} being analysed, as supplied to the function}
\item{$summaries}{ a \code{list} with a component for each unit.
Each component contains the summary statistics for the unit, including the convergence statistics "rHat" & "nEff."}
\item{$logFC}{ a \code{list} with a component for each contrast supplied in \code{contMatrix}.
Each row contains the summary statistics for a single unit, for that contrast}
\item{$phiLogFC}{ a \code{list} with a component for each contrast supplied in \code{contMatrix}.
Each row represents an exon (group).}
\item{$conditions}{ the cell-types (or conditions) as factors, as supplied to the function}
\item{$units}{ a \code{data.frame} with the units fit & the corresponding unitNames.}
\item{$paramToSave}{ the parameters requested to be saved.}
\item{$sims}{ a \code{list} with a component for each unit.
If \code{keepSims=FALSE}, will return \code{NULL} for each component.}
}
}
\description{
Fits the BMEA model sequentially for more than one unit
}
\details{
This is the function used to fit the BMEA model to a batch of units (or genes).
Each unit is tested to see if it contains multiple exons, and is expressed detectably
above background before analysis.
For single exon genes, all exon-level terms are omitted from the model,
as the PLM model used for conventional 3' Arrays holds for these genes & can be used
with minimal computational effort.
Units that are not fitted are also removed from the output vector of units.
Restricting the parameters to be saved, via the \code{paramToSave} argument can significantly
save the memory requirements for large batches of genes.
This will default to the parameters "c", "mu" & "phi".
The signal parameter "S" is the most demanding on memory resources &
is generally not advised to be saved unless it is of specific interest.
}
\seealso{
\code{\link{fitBmeaSingle}}, \code{\link{writeBmeaBatch}}
}
|
9f2b805b0baf1562d0fae18dbad5c5ed4785c278 | 16f42cb9ce208dbf6d83aa570897b42fbac81cce | /scripts/03_table_2.R | f85bea589c0de4e58b15a6910475710e0747a13f | [] | no_license | soodoku/kirkuk | 7522cacb1a82ab7f50c549da9d7e57b21c80868c | 3c9e29aca10671f665f5b43079b43892945e9015 | refs/heads/master | 2020-03-17T07:20:49.666196 | 2019-05-24T16:21:05 | 2019-05-24T16:21:05 | 133,394,548 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,807 | r | 03_table_2.R | #
# Delib. in Kirkuk
# Table 2
#
# set directory
setwd(githubdir)
setwd("kirkuk/")
# Load libs
library(tidyr)
library(dplyr)
library(reshape2)
library(broom)
# Read in the data
source("scripts/01_recode.R")
# Table 2: Knowledge
# ----------------------
know <- paste0("know", 1:5)
know_all <- all_dat[, c(know, "know", "cond", "wave")] %>%
group_by(cond, wave) %>%
summarise_all(funs(mean(., na.rm = TRUE)))
# Get Condition/Wave concat
know_all$cond <- paste0(know_all$cond, know_all$wave)
# Transpose
know_all_t <- know_all %>%
gather(key = var_name, value = value, 2:8) %>%
spread_(key = names(know_all)[1], value = "value") %>%
filter(var_name != "wave")
know_all_t$diff_delib <- know_all_t$delib2 - know_all_t$delib1
know_all_t$diff_delib_info <- know_all_t$delib_info2 - know_all_t$delib_info1
# Pooled t1
know_t1_pooled <- all_dat[, c(know, "know", "wave")] %>%
group_by(wave) %>%
filter(wave == 1) %>%
summarise_all(funs(mean(., na.rm = TRUE))) %>%
melt(variable.name = "var_name",
value.name = "t1_pooled")
# Merge t1 pooled and other results
know_all <- know_t1_pooled %>%
left_join(know_all_t) %>%
filter(var_name != "wave")
# p-values (no missing issue as missing = 0)
# ---------------------------------------------
tee_1 <- paste0(c(know, "know"), "_t1")
tee_2 <- paste0(c(know, "know"),"_t2")
diff_delib <- wall_dat[wall_dat$cond_t1 == "delib", tee_2] - wall_dat[wall_dat$cond_t1 == "delib", tee_1]
diff_delib <- subset(diff_delib, select = tee_2)
res_delib <- do.call(rbind, lapply(diff_delib, function(x) tidy(t.test(x, mu = 0))))
names(res_delib) <- paste0(names(res_delib), "_d")
res_delib$var_name <- gsub("_t2", "", rownames(res_delib))
res_delib <- subset(res_delib, select = c("var_name", "estimate_d", "p.value_d"))
diff_delib_info <- wall_dat[wall_dat$cond_t1 == "delib_info", tee_2] - wall_dat[wall_dat$cond_t1 == "delib_info", tee_1]
diff_delib_info <- subset(diff_delib_info, select = tee_2)
res_delib_info <- do.call(rbind, lapply(diff_delib_info, function(x) tidy(t.test(x, mu = 0))))
names(res_delib_info) <- paste0(names(res_delib_info), "_di")
res_delib_info$var_name_di <- gsub("_t2", "", rownames(res_delib_info))
res_delib_info <- subset(res_delib_info, select = c("var_name_di", "estimate_di", "p.value_di"))
tab_2 <- know_all %>%
left_join(res_delib) %>%
left_join(res_delib_info, by = c("var_name" = "var_name_di"))
tab_2_col_order <- c("var_name", "t1_pooled", "control1",
"delib1", "delib2", "diff_delib", "p.value_d", "estimate_d",
"delib_info1", "delib_info2", "diff_delib_info", "p.value_di", "estimate_di")
tab_2 <- tab_2[, tab_2_col_order]
write.csv(tab_2, file = "tabs/02_table_2_know.csv", row.names = F)
|
828e524b14e4d03e17e3a7a73f7612da649ffa7c | 63bc7e142f273144641b37a1c180f8678d845499 | /CatMisc/man/relativePath.Rd | 5bfdf859817e7a6cf3df356ac5e0e5dc659c87ae | [] | no_license | maptracker/CatMisc | ddec33c061d5a4079bf3648e50f1fbab48ce3ae4 | df94630e699665305af546c834cbb6c11086ccbe | refs/heads/master | 2021-05-23T05:48:22.025097 | 2019-04-11T18:47:52 | 2019-04-11T18:47:52 | 94,934,181 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 2,073 | rd | relativePath.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/CatMisc.R
\name{relativePath}
\alias{relativePath}
\title{Relative Path}
\usage{
relativePath(parent, child, mustWork = FALSE, normChild = TRUE)
}
\arguments{
\item{parent}{Required, the file path that presumably is an
ancestor of the child in the directory structure. To return a
non-NA value, this object presumably needs to resolve to a
directory.}
\item{child}{Required, the file path of the "deeper" object (can be
any component of the file system - file, directory, link, etc.}
\item{mustWork}{Default \code{FALSE}. Passed to normalizePath, set
to TRUE if you wish to assure that both child and parent exist.}
\item{normChild}{Default \code{TRUE}, which will cause the child
path to be normalized as well. This is not always desirable;
For example, \code{normalizePath} will convert links to their
ultimate target path. If you wish to leave links as-is, set
normChild to FALSE.}
}
\value{
If either child or parent are any of \code{NULL}, \code{NA}
or an empty string, then \code{NA}. If child is the same as
parent (after normalization), an empty string. If child is not
a descendant of the parent, \code{NA}. In all other cases, a
single string representing the relative path.
}
\description{
Reports the relative file path from a parent directory to a child object
}
\details{
Given 'child' and 'parent' file paths, return the relative path
needed to reach the child from the parent, or \code{NA} if the
child is not a descendant of the parent.
By default, neither child nor parent will be checked for existance,
or if they are an appropriate object. Both will have their paths
normalized via \code{normalizePath()}. If you wish to force
existance of both, set \code{mustWork=TRUE}.
}
\examples{
relativePath("/tmp/RtmpaacRRB", "/tmp/RtmpaacRRB/output.txt")
relativePath(file.path(Sys.getenv('HOME'), "data"), "~/data/plots/x.png")
relativePath("/bin/bang/boom", "/bin/etc/etc/etc.txt")
relativePath("/usr/bin", "")
}
\seealso{
\code{\link[base]{normalizePath}}
}
|
358a881b8625e9cc8f81bfd4bec3da15e9e499d9 | 475372c181820a5079a67300e6d1d7632786425a | /R/theme_mip.R | ead2edd88cb1b2e14d2fca723968b85af1411351 | [] | no_license | IAMconsortium/mip | ba58053436adf9fbedcd4486ab856ede9744891b | 8557058c235dbc5fd6c7971f358fb6270c1f6335 | refs/heads/master | 2021-07-07T08:08:00.585891 | 2017-10-06T11:59:00 | 2017-10-06T11:59:00 | 114,863,504 | 0 | 2 | null | 2017-12-20T08:39:11 | 2017-12-20T08:39:11 | null | UTF-8 | R | false | false | 914 | r | theme_mip.R | #' MIP theme settings
#'
#' @param size Font size
#' @author Jan Philipp Dietrich
#' @examples
#'
#' \dontrun{
#' p <- mipArea(x) + theme_mip(10)
#' }
#' @importFrom ggplot2 theme element_text unit
#' @export
theme_mip <- function(size=12) {
return(theme(plot.title = element_text(size=size+4, face="bold", vjust=1.5),
strip.text.x = element_text(size=size, margin=margin(4,2,4,2,"pt")),
axis.title.y = element_text(angle=90, size=size, face="bold", vjust=1.3),
axis.text.y = element_text(size=size, colour="black"),
axis.title.x = element_text(size=size, face="bold", vjust=-0.3),
axis.text.x = element_text(size=size, angle=90, hjust=.5, colour="black"),
legend.text = element_text(size=size-3, vjust=1.5),
legend.position = "bottom"))
} |
dd13b334b16d3e63ca21acccdf68c28e4eddec3b | 0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb | /cran/paws.analytics/man/datapipeline_create_pipeline.Rd | 55484db394bec04bdd8029e09b16800db30c1375 | [
"Apache-2.0"
] | permissive | paws-r/paws | 196d42a2b9aca0e551a51ea5e6f34daca739591b | a689da2aee079391e100060524f6b973130f4e40 | refs/heads/main | 2023-08-18T00:33:48.538539 | 2023-08-09T09:31:24 | 2023-08-09T09:31:24 | 154,419,943 | 293 | 45 | NOASSERTION | 2023-09-14T15:31:32 | 2018-10-24T01:28:47 | R | UTF-8 | R | false | true | 2,177 | rd | datapipeline_create_pipeline.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/datapipeline_operations.R
\name{datapipeline_create_pipeline}
\alias{datapipeline_create_pipeline}
\title{Creates a new, empty pipeline}
\usage{
datapipeline_create_pipeline(name, uniqueId, description = NULL, tags = NULL)
}
\arguments{
\item{name}{[required] The name for the pipeline. You can use the same name for multiple
pipelines associated with your AWS account, because AWS Data Pipeline
assigns each pipeline a unique pipeline identifier.}
\item{uniqueId}{[required] A unique identifier. This identifier is not the same as the pipeline
identifier assigned by AWS Data Pipeline. You are responsible for
defining the format and ensuring the uniqueness of this identifier. You
use this parameter to ensure idempotency during repeated calls to
\code{\link[=datapipeline_create_pipeline]{create_pipeline}}. For example, if the
first call to \code{\link[=datapipeline_create_pipeline]{create_pipeline}} does not
succeed, you can pass in the same unique identifier and pipeline name
combination on a subsequent call to
\code{\link[=datapipeline_create_pipeline]{create_pipeline}}.
\code{\link[=datapipeline_create_pipeline]{create_pipeline}} ensures that if a
pipeline already exists with the same name and unique identifier, a new
pipeline is not created. Instead, you'll receive the pipeline identifier
from the previous attempt. The uniqueness of the name and unique
identifier combination is scoped to the AWS account or IAM user
credentials.}
\item{description}{The description for the pipeline.}
\item{tags}{A list of tags to associate with the pipeline at creation. Tags let you
control access to pipelines. For more information, see \href{https://docs.aws.amazon.com/datapipeline/latest/DeveloperGuide/dp-control-access.html}{Controlling User Access to Pipelines}
in the \emph{AWS Data Pipeline Developer Guide}.}
}
\description{
Creates a new, empty pipeline. Use \code{\link[=datapipeline_put_pipeline_definition]{put_pipeline_definition}} to populate the pipeline.
See \url{https://www.paws-r-sdk.com/docs/datapipeline_create_pipeline/} for full documentation.
}
\keyword{internal}
|
4d8f3f8f924340609d6692358321513569968915 | 81de910c2709361dff0f83cdf087a47ef4e31818 | /figures/Figure6/loopingPlotscode/IKZF1.R | 3f3265fefe412a48c461a45b734051f23b494e8e | [] | no_license | fl-yu/singlecell_bloodtraits | 1fdfb4edeac23a3763199f377a7115e9541fa5df | 1da2a246b6f1ad13e8bfcbd424080f8ea5917c86 | refs/heads/master | 2022-02-05T10:02:17.033827 | 2019-07-09T22:38:39 | 2019-07-09T22:38:39 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,379 | r | IKZF1.R | library(Gviz)
library(data.table)
library(GenomicRanges)
library(GenomicInteractions)
library(InteractionSet)
library(diffloop)
library(BuenColors)
source("geneinfoLoad.R") #hi-C interactions?
genome <- "hg19"
chr <- "chr7"
fromBP <- 50080000
toBP <- 50560000
bp <- 50343720
gene <- "IKZF1"
snps <- makeGRangesFromDataFrame(data.frame(chr = rep(chr,3),
start = c(50187623, 50427982, 50497912),
end = c(50187623, 50427982, 50497912)))
# Make GRange of region
g_region <- makeGRangesFromDataFrame(data.frame(chr = chr, start = fromBP, end = toBP))
# Get relevant peaks
snpsInRegion <- snps
snpsTrack <- AnnotationTrack(snpsInRegion, fill = c("black"))
geneLoci <- geneinfo[geneinfo$chromosome == chr & geneinfo$start > fromBP & geneinfo$end < toBP & geneinfo$symbol == gene,]
snp_track <- AnnotationTrack(padGRanges(snpsInRegion, pad = 1000), stacking = "dense", fill = c("#0081C9", "#8F1336", "#A65AC2"))
displayPars(snp_track) <- list( max.height = 25, stackHeight = 1, shape = "box")
# Build Interactions set
anchor.one <- snps
anchor.two <- makeGRangesFromDataFrame(data.frame(chr = chr, start = bp, end = bp))[rep(1,3)]
interaction_counts<- c(5,5,5)
gi <- GenomicInteractions(anchor.one, anchor.two, counts=interaction_counts)
gi <- gi[mcols(gi)$counts > 0]
interaction_track <- InteractionTrack(gi, chromosome=chr)
displayPars(interaction_track) = list(col.interactions= c("#0081C9", "#8F1336", "#A65AC2"),
col.anchors.fill ="black",
col.anchors.line = "black",
interaction.dimension=100,
anchor.height = 0,
rotation = 0)
#availableDisplayPars(interaction_track)
itrack <- IdeogramTrack(genome = genome, chromosome = chr)
gtrack <- GenomeAxisTrack()
grtrack <- GeneRegionTrack(geneLoci, genome = genome, chromosome = chr, name = " ", transcriptAnnotation = "symbol", fill = "black")
pdf(file = paste0("../plots/",gene, ".loops.pdf"), width = 8, height = 4)
plotTracks(list(itrack, gtrack, interaction_track, snp_track, grtrack), from = fromBP, to = toBP,
background.title = "white", sizes = c(0.05, 0.15, 0.2, 0.01, 0.05), innerMargin = 0, margin = 0)
dev.off()
|
f5f6c325d95c80848d455ed7a8930bad5e1e6462 | 864de5871194247f7ec4319afed1f6b413601db1 | /man/input_button.Rd | 3f4ea9dc69bfcbff4845f723d5793ff6045ee7ad | [
"MIT"
] | permissive | han-tun/g2r | d3762b82277cdf5d397aa8016608b892f41914bd | a48baf1fcceacef5c9f960b52d6054f5fa8d5c70 | refs/heads/master | 2023-07-26T07:38:34.951377 | 2021-09-06T19:57:30 | 2021-09-06T19:57:30 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 578 | rd | input_button.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/inputs.R
\name{input_button}
\alias{input_button}
\title{Button Input}
\usage{
input_button(id, label, class = "default")
}
\arguments{
\item{id}{Id of the button.}
\item{label}{Label to display.}
\item{class}{Class of the button.}
}
\description{
Add a button input.
}
\details{
The \code{class} argument defines the style of
the button in Bootstrap 3, generally accepts:\code{for}
\itemize{
\item \code{default}
\item \code{info}
\item \code{success}
\item \code{warning}
\item \code{danger}
}
}
|
97dec33512643e544a2a48289f06bca3280dd7e9 | f640a0f5e82204b7ad2364fe9bff4df7de501b0f | /velocity/R/pred_time_on_xygrid.R | b458f5057b740bde4fc18b2c4f71168b89965bf4 | [] | no_license | laispfreitas/Colombia_DZC_satscan_velocity | 3faaf6aab2c534a2c83c55054984ac64b7520080 | 57dbd613cf00936ef76d37a383f6021e93f265bd | refs/heads/main | 2023-04-17T03:39:46.761005 | 2022-08-30T15:11:46 | 2022-08-30T15:11:46 | 498,443,102 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,390 | r | pred_time_on_xygrid.R | #' Fit polynomial models
#'
#' This function fits the polynomia functions and predict the delay time for a new xy grid
#' used to generate the dataset for the contour map
#' @param ds_new=ds Dataframe providing the date of outbreak and X and Y coordinates
#' @param max.order Integer of highest order polynomial to attempt; defaults to 10
#' @param shpfile A polynomial shapefile object \code{"SpatialPolygonsDataFrame"} from maptools
#' @param r The front-wave velocity summary from the \code{\link{outbreak_velocity}} function
#' @param bestorder The order of the best performance model
#' @export
pred_time_on_xygrid = function(ds_new, r, bestorder,shpfile, max.order=10) {
#order = 1:bestorder
new.df = expand.grid(X = seq((min(r$ds$X)*0.8), (max(r$ds$X)*1.2), length.out = max(r$ds$time, na.rm = T)),
Y = seq((min(r$ds$Y)*0.8), (max(r$ds$Y)*1.2), length.out = max(r$ds$time, na.rm = T)))
new.df$XY = new.df[,'X'] * new.df[,'Y']
for(i in 2:bestorder) {
name.x = paste0("X",i)
name.y = paste0("Y",i)
new.df[,name.x] = (new.df$X^i)
new.df[,name.y] = (new.df$Y^i)
}
trend.fit = estimate_surfacetrend_models(ds_new, max.order)
new.df$time = predict(trend.fit[[bestorder]], new.df) # bestorder set to 6
new.df = new.df[,c("X", "Y", "time")]
new.df = clip_xygrid(new.df, shpfile)
return(new.df)
}
|
58e664c64c525e70a37d3f3304bbe4bc3c2ef02e | 02c27fc07ee76bf11d21c4cf59ae5d3b94194a3e | /R/Cauchy_ID.R | 080a494de868dab7106a6196e176f6dad158b9bc | [] | no_license | rnorouzian/BayesianforL2 | cd581e5d5bba2de6d12411aa26214dab85490deb | e97f4d7fccf3c1b9ee1619dec496584b0d22f505 | refs/heads/master | 2021-01-20T16:09:22.503864 | 2017-12-02T06:49:24 | 2017-12-02T06:49:24 | 90,819,012 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 5,327 | r | Cauchy_ID.R | #' Cauchy Prior Distribution Identifier
#'
#' Uses the subject matter researcher's knowledge to generate
#' a corresponding Cauchy prior distribution.
#'
#' @param Low researcher's LOWEST plausible value for the parameter.
#' @param High researcher's HIGHEST plausible value for the parameter.
#' @param Cover researcher's suggested coverage for the Low and High values provided.
#'
#' @return Provides graphical as well as full textual description of a suitable Cauchy
#' distribution for researcers based on their knowledge about how High or Low
#' the parameter has been found in the literature. Also, helps researcher
#' to revise their prior by issuing various messages.
#'
#' @details Uses optimization techniques to provide graphical and textual information about
#' an appropriate Cauchy prior distribution.
#'
#' @author Reza Norouzian <rnorouzian@gmail.com>
#' @export
#'
#' @examples
#' # Suppose a researcher needs a Cauchy prior for a Cohen d effect size that in
#' # his/her view can't be less than -6 and more than +6. The researcher believes
#' # these two limit values cover 90% of all possible values that this parameter
#' # can take:
#'
#'
#' Cauchy_ID (Low = -6, High = 6, Cover = '90%')
#'
#'
#'
#' # User can also use any value that is between 0 and 1 for the argument
#' # Cover without using percentage sign:
#'
#'
#'
#' Cauchy_ID (Low = -6, High = 6, Cover = 90)
#'
Cauchy_ID = function (Low, High, Cover= NULL){
original_par = par(no.readonly = TRUE)
on.exit(par(original_par))
options(warn = -1)
coverage <- if (is.character(Cover)) { as.numeric(substr(Cover, 1, nchar(Cover)-1)) / 100
} else if (is.numeric(Cover)) { Cover / 100 } else { .90 }
Low.percentile = (1 - coverage) / 2
p1 = Low.percentile
p2 = Low.percentile + coverage
## Start Optimization:
if( p1 <= 0 || p2 >= 1 || Low > High || p1 > p2 || coverage >= 1 ) {
par(family = 'serif')
plot(1, axes = FALSE, type = 'n', ann = FALSE)
text(1, 1, "Unable to find such a prior", cex = 3.5, col = 'red4', font = 2)
return( message("\n\tUnable to find such a prior, make sure you have selected the correct values.") )
} else {
f <- function(x) {
y <- c(Low, High) - qcauchy(c(p1, p2), location=x[1], scale=x[2])
}
## SOLVE:
AA <- optim(c(1, 1), function(x) sum(f(x)^2), control=list(reltol=(.Machine$double.eps)) )
parms = unname(AA$par)
}
## CHECK:
q <- qcauchy( c(p1, p2), parms[1], parms[2] )
unequal = function(a, b, sig = 4) { return (round(a, sig) != round(b, sig) ) } # Complex, if Low & High and estimated quantiles[1 & 2] are Unequal by 4 digits say TRUE
if( p1 <= 0 || p2 >= 1 || Low >= High || p1 >= p2 || unequal(Low, q[1]) || unequal(High, q[2]) ) {
par(family = 'serif')
plot(1, axes = FALSE, type = 'n', ann = FALSE)
text(1, 1, "Unable to find such a prior", cex = 3.5, col = 'red4', font = 2)
message("\n\tUnable to find such a prior, make sure you have selected the correct values")
} else
{
equal = function(a, b, sig = 4) { return (round(a, sig) == round(b, sig)) } # Complex, if L and estimated quantiles[1] are Unequal by 4 digits say TRUE
decimal <- function(x, k){
if( equal(x, 0) ){ format( round(0, k), nsmall = k ) } else
{ as.numeric(format(round(x, k), nsmall = k, scientific =
ifelse(x >= 1e+05 || x <= -1e+05 || x <= 1e-05 & x >= -1e-05, TRUE, FALSE) )) }
}
## call 'location' mean and 'scale' sd fo simplicity:
mean = parms[1]
sd = parms[2]
x.min = mean - 12*sd
x.max = mean + 12*sd
par(mgp = c(3.7, 1, 0), mar = c(5.1, 5.5, 4.1, 1.1) )
curve ( dcauchy(x, mean, sd), lwd = 4, from = x.min,
to = x.max, xlab = 'Parameter of Interest', ylab = 'Density',
n = 1e4, xaxt = 'n', las = 1, font.lab = 2, cex.lab = 1.4,
frame.plot = FALSE, font.axis = 2, cex.axis = 1.1 )
axis(1, at = decimal(seq(x.min, x.max, length.out = 9), 1), font = 2, cex.axis = 1.3 )
low.extreme = par('usr')[3]
prior.peak = dcauchy(mean, mean, sd)
segments(mean, low.extreme, mean, prior.peak, lty = 3)
arrows(q[1], 0, q[2], 0, lwd = 2, col = 'red', angle = 90, code = 3, length = .15)
text(c(q[1],q[2]), rep(0, 2), round(c(q[1], q[2]), 3), col = 'blue', pos = 3, font = 2, cex = 2, xpd = TRUE)
mtext(side = 3, "This is the \"Cauchy Prior\" you have in mind", cex = 1.5, bty = 'n', font = 2)
mtext(side = 3, bquote(bold(Mode == .(decimal (mean, 3)))), line = -4, cex = 1.8, adj = .05, col = 'red4')
mtext(side = 3, bquote(bold(Scale == .(decimal (sd, 3)))), line = -6, cex = 1.8, adj = .05, col = 'red4')
cat(message("\nCAUTION: \"ALWAYS\" visually inspect the shape of the prior generated to see \n \t if it accurately represents your belief and revise if necessary.\n"))
cat(message("\nNOTE: \"Cauchy\" is like a NORMAL distribution but has VERY VERY EXTENDED tails.\n\tThus, using a coverage of \"90%\" for the low and high values is enough .\n"))
if (all.equal(mean, 0, tol = 1e-4)) { text(mean, prior.peak / 3, "Neutral Position", cex = 1.5, pos = 3, srt = 90, font = 2) }
structure(list(Mode = decimal(parms[1], 7), Scale = decimal(parms[2], 7)), class = "power.htest")
}
}
|
27936dad5c60e7ce07d87d91b0986a87655660c6 | 776345c4d2d6ce713646a43f47fea3d7f7c86a7d | /server.R | 1ef347f888389378fbdca8ac1273a487a7a0bdff | [] | no_license | jLBasilio/150_solver | ff7a765330c596e55dbf1cb4cfbec7f0dac5c657 | 3245e76918e8cd64261bd2690b73400af0e3c5f3 | refs/heads/master | 2020-04-10T03:43:46.395773 | 2018-12-10T04:15:15 | 2018-12-10T04:15:15 | 160,778,393 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 7,420 | r | server.R | library(shinyjs)
library(rhandsontable)
source("./controllers.R")
server = function(input, output, session) {
useShinyjs()
pageNo = reactiveVal(1)
maxPageNo = reactiveVal(NULL)
stateListGlobal = NULL
fileToMatrixPR = reactive({
if (is.null(input$fileInputPR)) return(NULL)
fileToRead = input$fileInputPR
df = read.csv(input$fileInputPR$datapath, header=input$headerCheckPR, sep=",", quote="")
return(as.matrix(df, nrow = 1, ncol = 1))
})
output$fileContentsPR = renderTable({
if (is.null(fileToMatrixPR())) return(NULL)
vectorNames = c("x", "y")
matrixOutput = fileToMatrixPR()
updateSliderInput(session,
"degreeNPR",
label = "Degree",
min = 1,
max = length(matrixOutput[, 1]) - 1,
step = 1,
value=0
)
colnames(matrixOutput) = vectorNames
if(input$sortedXPR) {
matrixOutput = matrixOutput[order(matrixOutput[,1]), ]
}
if(!input$dispAllPR) {
return(head(matrixOutput))
} else {
return(matrixOutput)
}
})
getFunction = eventReactive(input$solveButtonPR, {
if (is.null(fileToMatrixPR())) return(NULL)
matrixHandler = fileToMatrixPR()
result = PolynomialRegression(matrixHandler[,1], matrixHandler[,2], input$degreeNPR)
show("funcLabel")
show("xInputPR")
show("solveXPR")
return(result)
})
getFunctionText = reactive({
if(is.null(getFunction())) return(NULL)
functionText = getFunction()$textForm
return(functionText)
})
output$answerFunctionPR = renderText({
getFunctionText()
})
output$answerGivenX = eventReactive(input$solveXPR, {
if (is.null(fileToMatrixPR())) return(NULL)
show("ansLabel")
round(PRSolver(fileToMatrixPR()[,1], fileToMatrixPR()[,2], input$degreeNPR, input$xInputPR), digits=4)
})
fileToMatrixQSI = reactive({
if (is.null(input$fileInputQSI)) return(NULL)
fileToRead = input$fileInputQSI
df = read.csv(fileToRead$datapath, header=input$headerCheckQSI, sep=",", quote="")
toReturn = as.matrix(df, nrow = 1, ncol = 1)
return(toReturn)
})
output$fileContentsQSI = renderTable({
if (is.null(fileToMatrixQSI())) return(NULL)
vectorNames = c("x", "y")
matrixOutput = fileToMatrixQSI()
colnames(matrixOutput) = vectorNames
if(input$sortedXQSI) {
matrixOutput = matrixOutput[order(matrixOutput[,1]), ]
}
if(!input$dispAllQSI) {
return(head(matrixOutput))
} else {
return(matrixOutput)
}
})
generateFunctionsQSI = eventReactive(input$solveButtonQSI, {
if (is.null(fileToMatrixQSI())) return(NULL)
matrixHandler = fileToMatrixQSI()
resultQSI = QSI(matrixHandler[,1], matrixHandler[,2])
show("xInputQSI")
show("solveXQSI")
return(resultQSI$functionSet)
})
output$generatedFunctions = renderTable({
toReturn = generateFunctionsQSI()
vectorNames = c("Interval", "Function", "Range")
colnames(toReturn) = vectorNames
return(toReturn)
})
output$answerGivenXQSI = eventReactive(input$solveXQSI, {
if (is.null(fileToMatrixQSI())) return(NULL)
show("ansLabelQSI")
round(QSISolver(fileToMatrixQSI()[,1], fileToMatrixQSI()[,2], input$xInputQSI), digits=4)
})
initialSimplexInput = reactive({
plantsInput = c("Denver", "Phoenix", "Dallas", "Demands by")
supplyInput = c(310, 260, 280, NA)
w1 = c(10, 6, 3, 180)
w2 = c(8, 5, 4, 80)
w3 = c(6, 4, 5, 200)
w4 = c(5, 3, 5, 160)
w5 = c(4, 6, 9, 220)
dfInput = data.frame(Plants=plantsInput, Supply=supplyInput, Sacramento=w1, SaltLake=w2, Chicago=w3, Albuquerque=w4, NewYorkCity=w5)
return(dfInput)
})
solveSimplex = reactive({
# Get table from ui
tableFromUI = hot_to_r(input$inTable)
# Extract RHS
rhs = c()
for(i in 3:7)
rhs = c(rhs, -tableFromUI[4,i])
rhs = c(rhs, tableFromUI[1,2], tableFromUI[2,2], tableFromUI[3,2], 0)
# Extract the last row
lastRow = c()
for(i in 1:3) {
for(j in 3:7) {
lastRow = c(lastRow, tableFromUI[i,j])
}
}
# Populate slack variables
for(i in 1:8) {
lastRow = c(lastRow, 0)
}
# Put z at the end
lastRow = c(lastRow, 1)
return(SimplexMin(rhs, lastRow))
})
generateSimplexOutput = reactive({
localInputTable = hot_to_r(input$inTable)
croppedInputTable = localInputTable[-4, -(1:2)]
# print(croppedInputTable)
resultSimplex = solveSimplex()
finalMatrix = resultSimplex$finalMatrix
solutionVector = resultSimplex$solutionVector
totalEachPlant = resultSimplex$totalEachPlant
totalEachState = resultSimplex$totalEachState
shippingTotal = -finalMatrix[length(finalMatrix[,1]), length(finalMatrix[1,])]
stateList = resultSimplex$stateList
stateCount = resultSimplex$stateCount
# Place values in their positions
plantOutput = c("Denver", "Phoenix", "Dallas", NA, "Totals", "Shipping")
totalOutput = c(totalEachPlant, NA, NA, shippingTotal)
# Copy all number of ships
w1 = c(solutionVector[1], solutionVector[6], solutionVector[11])
w1 = c(w1, NA, totalEachState[1], sum(w1 * croppedInputTable[,1]))
w2 = c(solutionVector[2], solutionVector[7], solutionVector[12])
w2 = c(w2, NA, totalEachState[2], sum(w2 * croppedInputTable[,2]))
w3 = c(solutionVector[3], solutionVector[8], solutionVector[13])
w3 = c(w3, NA, totalEachState[3], sum(w3 * croppedInputTable[,3]))
w4 = c(solutionVector[4], solutionVector[9], solutionVector[14])
w4 = c(w4, NA, totalEachState[4], sum(w4 * croppedInputTable[,4]))
w5 = c(solutionVector[5], solutionVector[10], solutionVector[15])
w5 = c(w5, NA, totalEachState[5], sum(w5 * croppedInputTable[,5]))
dfOutput = data.frame(Plants=plantOutput, Total=totalOutput, Sacramento=w1, SaltLake=w2, Chicago=w3, Albuquerque=w4, NewYorkCity=w5)
return(list(dfOutput = dfOutput, stateList=stateList, stateCount=stateCount))
})
observeEvent(input$hideInput, {
toggle("inTable")
})
output$inTable = renderRHandsontable({
rhandsontable(initialSimplexInput()) %>%
hot_col("Plants", readOnly=TRUE) %>%
hot_cell(4, "Supply", readOnly=TRUE)
})
observeEvent(input$hideOutput, {
toggle("outTable")
})
showOutputTableau = eventReactive(input$solveButtonSimplex, {
show("oTableLabel1")
show("oTableLabel2")
show("showSteps")
show("hideOutput")
return(rhandsontable(generateSimplexOutput()$dfOutput, readOnly=TRUE))
})
output$outTable = renderRHandsontable({
showOutputTableau()
})
observeEvent(input$showSteps, {
show("prevStep")
show("nextStep")
show("hideSteps")
show("matrixSteps")
simplexResult = generateSimplexOutput()
stateListGlobal = simplexResult$stateList
maxPageNo(simplexResult$stateCount)
pageNo(1)
})
observeEvent(input$hideSteps, {
toggle("matrixSteps")
})
getMatrixSteps = reactive({
simplexOutput = generateSimplexOutput()
return(simplexOutput$stateList[[pageNo()]])
})
output$matrixSteps = renderPrint({
getMatrixSteps()
})
observeEvent(input$nextStep, {
if(pageNo() == maxPageNo()) return(NULL)
pageNo(pageNo()+1)
})
observeEvent(input$prevStep, {
if(pageNo() == 1) return(NULL)
pageNo(pageNo()-1)
})
}
|
9b8c45be5f2809e830c13a8986c8f8c9f25f8797 | 751554b83b004bb64d90cdf2786f42ab4794ab95 | /Factor_Hair_Analysis.R | 1bbf89c827c7ebf3a7b76b99600494c14c8a4198 | [] | no_license | Sakshi-jain31/Product-Market-Segmentation-PCA-Regression-Analysis | ac63bab06fd0583733fc78b5f25b30b00f5d7067 | 7ff333ade25a13df3c0566f8cab7e4830a9fe4d6 | refs/heads/master | 2022-12-13T04:08:18.562403 | 2020-09-15T05:43:03 | 2020-09-15T05:43:03 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,993 | r | Factor_Hair_Analysis.R | #### Exploratory Data Analysis####
setwd("C:/Users/Sakshi/Desktop/Great Learning/Sample R Projects/Factor_Hair_Analysis")
Hair=read.csv("Factor-Hair-Revised.csv", header = TRUE)
library(ggplot2)
library(psych)
library(corrgram)
library(car)
library(corrplot)
library(nFactors)
library(dplyr)
library(DataExplorer)
library(kableExtra)
str(Hair) #Structure of the dataset
any(is.na(Hair)) #Missing Values
summary(Hair) #Summary of dataset
dim(Hair) #Rows no of rows & columns
plot_intro(Hair) #Plot of missing values
Hair1=Hair[,2:12] #creating New Data frame, removing 1st Column
cor.h=round(cor(Hair1), 3) #Correlations between Independent variables
cor.h
corrplot(cor.h, method="shade") #Correlations Plot
attach(Hair1)
attach(Hair)
Hair2=lm(Satisfaction~., data=Hair1) #Combined Linear Regression
summary(Hair2)
vif(Hair2) #Evidence of Multicollearnity
#####Simple Linear Models Summary######
summary(Hair2)
Model1=lm(Satisfaction~ProdQual, data=Hair)
summary(Model1)
Model2=lm(Satisfaction~Ecom, data=Hair)
summary(Model2)
Model3=lm(Satisfaction~TechSup, data=Hair)
summary(Model3)
Model4=lm(Satisfaction~CompRes,data=Hair)
summary(Model4)
Model5=lm(Satisfaction~Advertising, data=Hair)
summary(Model5)
Model6=lm(Satisfaction~ProdLine, data=Hair)
summary(Model6)
Model7=lm(Satisfaction~SalesFImage, data=Hair)
summary(Model7)
Model8=lm(Satisfaction~ComPricing, data=Hair)
summary(Model8)
Model9=lm(Satisfaction~WartyClaim, data=Hair)
summary(Model9)
Model10=lm(Satisfaction~OrdBilling, data=Hair)
summary(Model10)
Model11=lm(Satisfaction~DelSpeed, data=Hair)
summary(Model11)
###To run Factor analysis two tests need to be done ######
cortest.bartlett(cor.h, nrow(Hair1))
#####PCA/Factor Analysis #######
library(nFactors)
EV=eigen(cor(Hair1))
Eigenvalue=EV$values
Factor=c(1,2,3,4,5,6,7,8,9,10,11)
scree=data.frame(Factor, Eigenvalue)
plot(scree, main="Scree Values", col="blue", ylim=c(0,4))
lines(scree, col="red")
Unrotate=principal(Hair1, nfactors = 4, rotate = "none")
Unrotate
fa.diagram(Unrotate)
Rotate=principal(Hair1, nfactors = 4, rotate = "Varimax")
Rotate
fa.diagram(Rotate)
##### Multiple Regression Analysis #######
Scores=round((Rotate$scores),2)
as.data.frame(Scores)
colnames(Scores)=c("Buyepr", "Brand", "AfSSr", "Prodt")
Hair3=Hair %>% select("Satisfaction")
Hair3
Hair_New=cbind(Hair3, Scores)
Hair_New
attach(Hair_New)
Model_New=lm(Satisfaction~Buyepr+Brand+AfSSr+Prodt, data=Hair_New)
summary(Model_New)
#### Predicting the Satisfaction ######
Predict=predict(Model_New)
as.data.frame(Predict)
Predicted=round(Predict,1)
Predicted
Hair_New=cbind(Hair_New, Predicted)
Hair_New
PredictedSatisfaction=Hair_New$Predicted
BackTrack=data.frame(Hair_New$Satisfaction, PredictedSatisfaction)
plot(Hair_New$Satisfaction, col="red")
lines(Hair_New$Satisfaction, col="red")
plot(PredictedSatisfaction, col="blue")
lines(PredictedSatisfaction, col="blue")
|
f7b3d99f15da11df73e33b955634015e5d4dce2e | 9d98b117ce88845da0dc6e50f316628713aaf619 | /R_mean.r | 34ed0f48babb71654873a733e0ce46e7f13ed57c | [] | no_license | marvinh/DialogFlowWithR | 21574d9bdb546dbcdd6012579516cefe43383385 | 8b7a635bc2a9be8095f812aaa0d2489640b3cff2 | refs/heads/master | 2020-03-11T06:46:43.015970 | 2018-04-17T03:59:57 | 2018-04-17T03:59:57 | 129,839,609 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 560 | r | R_mean.r | # mean.R
needs(magrittr)
mean(do.call(rnorm,input))
#do.call(what, args, quote = FALSE, envir = parent.frame())
#Arguments
#what
#either a function or a non-empty character string naming the function to be called.
#args
#a list of arguments to the function call. The names attribute of args gives the argument names.
#quote
#a logical value indicating whether to quote the arguments.
#envir
#an environment within which to evaluate the call. This will be most useful if what is a character string and the arguments are symbols or quoted expressions. |
1497fbef6965f1e3b8bad3c90f1c47ecca5cb146 | 1677597e02878af81be32e55c1be874e32a546e3 | /R/sentence_weights.R | 71780737973a66d40888a08686dd2048abe99fc7 | [
"MIT"
] | permissive | LJCovingtonJr/SumBasicR | bc55cd1b10e18ce16544e21456d0f3a0180d39e8 | 48d55c85d07bdafbf96103308d60f6e598a1b446 | refs/heads/master | 2022-09-26T05:28:50.313047 | 2020-06-04T06:45:00 | 2020-06-04T06:45:00 | 262,671,055 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,100 | r | sentence_weights.R | #' Compute sentence weights
#'
#' For a given text and probability distribution, calculates the weights for each sentence
#' @param text A lengthy string of text
#' @param dist A probability distribution, generated by compute_probability_dist()
#' @return A data frame, consisting of the sentences and their weights
#' @import tokenizers
#' @import dplyr
#' @export
sentence_weights <- function(text, dist){
# For a given text and probability distribution — as returned by compute_probability_dist() — returns the weights for each sentence (the average probability of the words in the sentence)
sentences <- unlist(tokenize_sentences(text))
weights <- vector(mode = "numeric")
for(i in sentences){
sentence_words <- unlist(tokenize_words(i))
sentence_probs <- dist %>%
filter(dist$words %in% sentence_words)
sentence_weight <- mean(sentence_probs$probs)
weights <- c(weights, sentence_weight)
}
result <- data.frame(sentences, weights, stringsAsFactors = FALSE)
names(result) <- c("sentences", "weights")
return(result)
}
|
2c810d42557717cc1427830677fc10ee3dcedc62 | d7ad10a6c17e28fd169194ddfd64cded3f0d56af | /man/eval_Dstar_g.Rd | 312de775c2976293ec2ae28a3c39c60cbbf30b8a | [
"MIT"
] | permissive | benkeser/drtmle | ac08013264883b708446c0610e71f22730a7dc93 | 538a3a264c1ca984b6d88978ca7f96165f43152c | refs/heads/main | 2023-01-12T15:08:45.289555 | 2022-12-29T16:56:27 | 2022-12-29T16:56:27 | 75,324,341 | 18 | 10 | NOASSERTION | 2022-12-29T14:39:21 | 2016-12-01T19:16:01 | R | UTF-8 | R | false | true | 943 | rd | eval_Dstar_g.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/inf_functions.R
\name{eval_Dstar_g}
\alias{eval_Dstar_g}
\title{Evaluate extra piece of efficient influence function resulting from
misspecification of outcome regression}
\usage{
eval_Dstar_g(A, DeltaY, DeltaA, Qrn, gn, a_0)
}
\arguments{
\item{A}{A vector of binary treatment assignment (assumed to be equal to 0 or
1)}
\item{DeltaY}{Indicator of missing outcome (assumed to be equal to 0 if
missing 1 if observed)}
\item{DeltaA}{Indicator of missing treatment (assumed to be equal to 0 if
missing 1 if observed)}
\item{Qrn}{List of estimated reduced-dimension outcome regression evaluated
at observations}
\item{gn}{List of estimated propensity scores evaluated at observations}
\item{a_0}{Vector of values to return marginal mean}
}
\description{
Evaluate extra piece of efficient influence function resulting from
misspecification of outcome regression
}
|
3a2b6d260502fbe0f6ab6af3ee0cec148bfc275c | 9348839bfe4519dbbe861a467ba40ff714a27c85 | /R/interpret_word.R | 91bf7bbe40056c735b900374edf947c683f17929 | [
"MIT"
] | permissive | dtburk/texanaaid | def4412e9d947f203750ba62b9380f962ba6c55c | 4fefcbd1531227880ff59537e03b80e50af08aad | refs/heads/master | 2020-09-06T01:52:43.552113 | 2019-11-12T21:13:55 | 2019-11-12T21:13:55 | 220,277,985 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,460 | r | interpret_word.R | #' Interpret word
#'
#' Outputs information on the frequency and context of a word in documents analyzed by a topic model
#' @param word The word to be interpreted.
#' @param tkd_texts The tokenized texts used to create the document-term matrix.
#' @param doc_term_mtrx A document-term matrix summarizing the content of \code{tkd_texts}.
#' @param stemmed_texts If the texts were stemmed, this should be the stemmed and tokenized texts, otherwise \code{NULL}.
#' @param summary Logical: Should only a summary be printed, or all information returned?
#' @param custom_stem A character containing the regex pattern to match if \code{word} is a custom stem.
#' @export
interpret_word <- function(word,
tkd_texts,
doc_term_mtrx,
stemmed_texts=NULL,
summary=TRUE,
custom_stem=NULL) {
if(word=="") {
cat("Please enter a word to interpret.")
return(NULL)
}
# Get indices of documents in which word appears
vocab_idx <- which(doc_term_mtrx$dimnames$Terms==word)
# Which documents does it appear in?
which_docs <- with(doc_term_mtrx, i[j==vocab_idx])
if(length(which_docs)==0) {
cat(paste0('"', word, '" not found. Either this word doesn\'t appear in any documents, or you may need to enter it in stemmed form.'))
return(NULL)
}
# In what context?
contexts <- character(0)
doc_of_contexts <- character(0)
for(j in which_docs) {
if(!is.null(stemmed_texts) & is.null(custom_stem)) {
d <- stemmed_texts[[j]]
} else d <- tkd_texts[[j]]
if(is.null(custom_stem)) {
which_words <- which(d==word)
} else which_words <- which(str_detect(d, custom_stem))
doc_of_contexts <- c(doc_of_contexts, rep(names(tkd_texts)[j], length(which_words)))
contexts <- c(contexts, unlist(sapply(which_words, function(x) {
start <- max(0, x-10)
end <- min(length(d), x+10)
str_c(tkd_texts[[j]][start:end], collapse=" ")
}))
)
}
contexts <- cbind(doc_of_contexts, contexts)
colnames(contexts) <- c("Document", "Usage")
if(summary) {
cat(paste0('"', word, '"', " appears in ", length(which_docs), " documents.\n\n"))
cat(paste0("Example of use: ", '"', sample(contexts[ , "Usage"], 1), '"'))
} else return(contexts)
}
|
1d9a6b7465e1ef6d4a4046e47648f3ddad80162c | d02fd5b1482be2daab6f812b514758d8dab1b053 | /R/waldP.R | 3f609dd6e1d893040ddbcca16b82f22f45a25fc4 | [] | no_license | LuXiaoEei/MWPCR-with-R | a7d6d97a01625643ad8ec165a3d11cf02194535c | 837a948f56d8ce157e64c68de7cb4325e9a62313 | refs/heads/master | 2021-07-22T05:22:54.429629 | 2017-10-31T11:43:47 | 2017-10-31T11:43:47 | 107,981,537 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 159 | r | waldP.R | # wald.test return p value
waldP <- function(X,Y){
df <- lm(Y~X)
P <- wald.test(Sigma = vcov(df),b = coef(df),Terms = 2)$result$chi2['P']
return(c(P))
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.