blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9be7433e159c1b961b5b09123ecdd1fec0da0bb2
|
953e84446de8d060683c87669f2c62350939ed5f
|
/code/climate/random_forest_MDS.R
|
3c84cc3cff57708013496cb394509032e82ecf50
|
[] |
no_license
|
tkarasov/pathodopsis
|
29e23821c33ac727421158d9e40b277a09b8b8ca
|
c5d486ac02b1f0b2ff525439d3bf2d3843231e4d
|
refs/heads/master
| 2023-08-25T02:31:53.053463
| 2023-08-15T21:37:10
| 2023-08-15T21:37:10
| 172,746,993
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,956
|
r
|
random_forest_MDS.R
|
# This function does feature selection using the caret and mbench packages then does random forest modeling
# https://machinelearningmastery.com/feature-selection-with-the-caret-r-package/
library(caret)
library(mlbench)
library(Hmisc)
library(dplyr)
#library(randomForest)
devtools::install_git(url = 'https://github.com/tkarasov/taliaRgeneral.git')
library(taliaRgeneral)
library(phyloseq)
library(gplots)
run_rf <-function(fin_predictors, metric = "RMSE"){
set.seed(116)
control <- trainControl(method="repeatedcv",
number=10,
repeats = 3,
verboseIter = TRUE,
savePredictions = TRUE)
rf.output <- caret::train(response~.,
data = fin_predictors,
method="rf",
#importance = "permutation",
metric=metric,
trControl = control,
verbose = TRUE)
return(rf.output)
}
# Create a control object to set the details of feature selection that will occur next
feature_elim <- function(x, y, subsets = c(1:20,25, 30, 33)){
set.seed(16)
ctrl <- rfeControl(functions = rfFuncs,
method = "repeatedcv",
repeats = 5,
saveDetails = TRUE,
verbose = TRUE)
#I was getting a weird error with lmfuncs. Not clear if the problem was me or a bug. The following error was also found in this blog:https://community.rstudio.com/t/rfe-error-logistic-classification-undefined-columns-selected-and-match-requires-vector-arguments/23988
rfProfile <- rfe(x = x, y = y,
sizes = subsets,
rfeControl = ctrl, #rfeControl(functions = caretFuncs))
na.action = na.omit)
return(rfProfile)
}
#################################
# Step 1: Read in metadata and OTU data and filter to only plant
#################################
#metadata was edited and such in the prep_metadata.R script in the climate folder
#load("/ebio/abt6_projects9/pathodopsis_microbiomes/pathodopsis_git/data/all_metadata.rds")
load("/ebio/abt6_projects9/pathodopsis_microbiomes/data/OTU_clim.rds")
plant_val = which(OTU_clim$clim_data$Host_Species=="Ath")
cap_val = which(OTU_clim$clim_data$Host_Species!="Soil")
load("/ebio/abt6_projects9/pathodopsis_microbiomes/data/plant_clim.rds")
# plant_clim <- list(otu_table = OTU_clim$otu_table[plant_val,],
# clim_data = OTU_clim$clim_data[plant_val,],
# tax_table = OTU_clim$tax_table,
# phy_tree = OTU_clim$phy_tree,
# refseq = OTU_clim$refseq)
cap_clim <- list(otu_table = OTU_clim$otu_table[cap_val,],
clim_data = OTU_clim$clim_data[cap_val,],
tax_table = OTU_clim$tax_table,
phy_tree = OTU_clim$phy_tree,
refseq = OTU_clim$refseq)
#################################
# Step 2: Read in response variable
#################################
#load(file = "/ebio/abt6_projects9/pathodopsis_microbiomes/data/figures_misc/OTUtab_GP1000_at15.rds")
otu_name = "seq_1"
#my.response <- otu_table(GP_at15_all)[,otu_name]
#only_ath <- subset_samples(GP_at15_all, Subject %in% all_metadata$Sequence_ID)
#################################
# Match response variable and metadata then filter matrix and convert to factors and numerics
#################################
#MDS <- sqrt(sqrt(plant_clim$otu_table)) %>% dist() %>% cmdscale(eig = TRUE)
#my.responseorig <- MDS$points
my.var <- as.factor(plant_clim$clim_data$cluster) #my.responseorig[,1]
names(my.var) <- plant_clim$clim_data$Sequence_ID #rownames(my.responseorig)
data_frame_predictors <- plant_clim$clim_data #%>% select (-c(PlantID, Subject))
my.response1 <- my.var[match(data_frame_predictors$Plant_ID, names(my.var))]
data_frame_predictors <- data_frame_predictors %>% select(-c(Plant_ID, Sequence_ID, cluster, Date, Tour_ID, Host_Species))
my.total.matrix <- cbind(data_frame_predictors, "otu" = my.response1)
my.total.matrix <- filter(my.total.matrix, is.na(otu) == FALSE) #%>% select (-c(Plant_ID, Sequence_ID))
my.total.matrix$Lat = as.numeric(as.character(my.total.matrix$Lat))
my.total.matrix$Long = as.numeric(as.character(my.total.matrix$Long))
my.total.matrix[which(is.na(my.total.matrix$Land_cov)),]$Land_cov = "5000"
#################################
# Step 3: Preprocess predictors (center and scale)
#################################
# Tutorial on RFE in caret: http://topepo.github.io/caret/recursive-feature-elimination.html
#center and scale the predictors
normalization <- preProcess(my.total.matrix %>% select(-otu),
method = c("knnImpute", "center", "scale"),
na.remove = TRUE)
x <- predict(normalization, my.total.matrix %>% select(-otu))
x <- as.data.frame(x)
x <- x[,colSums(is.na(x))==0,] %>% select(-c(Site_ID)) %>% select(-c(Site_name))
y <- as.factor(my.total.matrix$otu)
#################################
# Step 4: Choose features via recursive feature elimination
#################################
rfProfile <- feature_elim(x, y, subsets = c(1:20,25, 30, 33))
my.predictors = predictors(rfProfile)
#trellis.par.set(caretTheme())
#plot(rfProfile, type = c("g", "o"))
#################################
# Step 5: Esimate importance of features via random forest
#################################
# Subset predictors to those chosen in Step 1
fin_predictors <- x %>% select(my.predictors)
fin_predictors <- cbind(fin_predictors, response = y)
rf.output <-run_rf(fin_predictors, metric = "RMSE")
importance <- varImp(rf.output, scale=TRUE)
import_MDS1 <- plot(importance)
#################################
# Step 6: Plotting
#################################
# First the plot for the MDS on the total dataset
MDS.all <- (sqrt(OTU_clim$otu_table)) %>% dist() %>% cmdscale(eig = TRUE)
exp1 <- ((MDS.all$eig) / sum(MDS.all$eig))[1]*100
exp2 <- ((MDS.all$eig) / sum(MDS.all$eig))[2]*100
colnames(MDS.all$points) = c("MDS1", "MDS2")
col3 = OTU_clim$clim_data$Host_Species
all_data_MDS <- ggplot(data = data.frame(MDS.all$points), aes(x=MDS1, y=MDS2)) +
geom_point(aes(color = col3), cex = 1, alpha = 0.8) +
scale_color_viridis_d(labels = c(expression(italic("A. thaliana")), expression(italic("Capsella bursa-pastoris")), "Soil")) +
xlab(paste(paste("MDS1 (", round(exp1), sep=""),"%)",sep="")) +
ylab(paste(paste("MDS2 (", round(exp2), sep=""),"%)",sep="")) +
theme_bw() +
theme(legend.justification=c(0,0),
legend.position=c(.7,.9),
legend.title = element_blank(),
legend.text.align = 0,
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
#legend.box.background = element_rect(colour = "black")
)
MDS.cap <- (sqrt(cap_clim$otu_table)) %>% dist() %>% cmdscale(eig = TRUE)
col2 = cap_clim$clim_data$Host_Species
exp3 <- ((MDS.cap$eig) / sum(MDS.cap$eig))[1]*100
exp4 <- ((MDS.cap$eig) / sum(MDS.cap$eig))[2]*100
colnames(MDS.cap$points) = c("MDS1", "MDS2")
thaliana_cap_MDS <- ggplot(data = data.frame(MDS.cap$points), aes(x=MDS1, y=MDS2)) +
geom_point(cex = 1, alpha = 0.8, aes(col = col2)) +
scale_color_manual(values = viridis_pal()(3)[c(1,2)], labels = c(expression(italic("A. thaliana")), expression(italic("Capsella bursa-pastoris")))) +
xlab(paste(paste("MDS1 (", round(exp3), sep=""),"%)",sep="")) +
ylab(paste(paste("MDS2 (", round(exp4), sep=""),"%)",sep="")) +
theme_bw() +
theme(legend.justification=c(0,0),
legend.position=c(.7,.9),
legend.title = element_blank(),
legend.text.align = 0,
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
#legend.box.background = element_rect(colour = "black")
)
legend <- get_legend(all_data_MDS) +
theme(legend.position = "bottom")
all_capsella <- plot_grid(all_data_MDS + theme(legend.position = "none"), thaliana_cap_MDS + theme(legend.position = "none"))
pdf("/ebio/abt6_projects9/pathodopsis_microbiomes/data/figures_misc/MDS_cap_soil.pdf", useDingbats = FALSE, width = 3.5, height = 2)
plot_grid(all_capsella, legend, ncol = 1, rel_heights = c(1,0.1))
dev.off()
# Now Let's plot MDS with our random forest
my.plantID <- my.total.matrix %>% select(c(PDSI, Tour_ID, Plant_ID))
rownames(my.plantID) <-my.total.matrix$Plant_ID
col1 = as.factor(sample_data(only_ath)$Clim)
col2 = my.plantID[rownames(my.responseorig)]
p1 <- ggplot(data = my.responseorig, aes(x=MDS1, y=MDS2)) +
geom_point(aes(color = col1), cex = 3) +
scale_color_viridis_d() +
theme_bw()
p2 <- ggplot(data = my.responseorig, aes(x=MDS1, y=MDS2)) +
geom_point(aes(color = col2), cex = 3) +
scale_colour_gradient2() +
theme_bw()
plot_grid(import_MDS1, p2)
####
#Now do the same thing as above but with MDS2
# # calculate correlation matrix. Only possible with numeric predictors
# nums <- unlist(lapply(data_frame_predictors, is.numeric))
#
# correlationMatrix <- cor(data_frame_predictors[,nums], use = "pairwise.complete.obs")
#
# heatmap.2(correlationMatrix, scale = "none", density.info="none", trace="none")
#
# # summarize the correlation matrix
# # find attributes that are highly corrected (ideally >0.75)
# highlyCorrelated <- findCorrelation(correlationMatrix, cutoff = 0.75)
#Notes: rfe is a simple backwards selection (recursive feature elimination algorith)
# problems with feature selection: https://stats.stackexchange.com/questions/27750/feature-selection-and-cross-evalidation
# #need to get the contrast
#factor.vars <- my.total.matrix %>% select(c(TourID))
#contrasts <- lapply(my.total.matrix[, sapply(my.total.matrix, is.factor)], contrasts, contrasts = FALSE)
#dummy.model <- model.matrix(otu ~ ., data = my.total.matrix, contrasts.arg = contrasts)
|
ee0f1533f8cdad644568cb93cbbe90dc57da0b00
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/cdom/examples/cdom_spectral_curve.Rd.R
|
536fbdab39d58c409122c305291b86d5ff6a6166
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 265
|
r
|
cdom_spectral_curve.Rd.R
|
library(cdom)
### Name: cdom_spectral_curve
### Title: Calculate the spectral curve of CDOM spectra.
### Aliases: cdom_spectral_curve
### ** Examples
data(spectra)
res <- cdom_spectral_curve(spectra$wavelength, spectra$spc2)
plot(res$wl, res$s, type = "l")
|
f22146aa25044d63b5432b0045b70e2a38559604
|
b35d8d930b0fd5255bf6ade8e05070badddaadf0
|
/R/eurusds.R
|
10f5f70c0651fda1d46758001a641471699f6fe2
|
[] |
no_license
|
ilda-kacerja/foRex
|
2e8af71faf33c4708ab86fb527bb04676c3d7b1a
|
1b642a1096e5f051c12c54f1c41070e71aa6e9cb
|
refs/heads/master
| 2020-05-29T16:56:26.164542
| 2019-05-29T16:45:04
| 2019-05-29T16:45:04
| 189,263,152
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 773
|
r
|
eurusds.R
|
#' Exchange rate for a certain day
#' @param date_from first date we are interested on year-month-date format
#' @param date_to last date we are interested on year-month-date format
#' @return data.table object
#' @export
#' @importFrom httr GET content
#' @importFrom logger log_info log_debug log_trace
#' @importFrom data.table data.table
eurusds <- function(date_from, date_to){
response<- GET(
'https://api.exchangeratesapi.io/history',
query = list(
start_at = date_from,
end_at = date_to,
base = 'USD',
symbols = 'EUR'
))
exchange_rates <- content(response)
exchange_rates <- exchange_rates$rates
usdeurs <- data.table(
date = as.Date(names(exchange_rates)),
usdeur = as.numeric(unlist(exchange_rates)))
}
|
50c222f171164081b9a0de5d3f5e6c889d7b6295
|
c128f9c2a463b9ce131afeba75e37ec7a7dfce95
|
/AFRP_TempOxyInterpolate_Func_1.R
|
4a89107781b07ec4fbd939ac13b86672745bbe48
|
[] |
no_license
|
bmq215/AFRP-Data-Functions
|
de672b518b73e657fb88e81db58cf23d344ed182
|
055dc80371d4bbbc8e73c5f1f9e7ef24430c49e4
|
refs/heads/master
| 2022-08-04T16:52:05.905403
| 2020-05-25T12:51:01
| 2020-05-25T12:51:01
| 266,571,969
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,220
|
r
|
AFRP_TempOxyInterpolate_Func_1.R
|
#Function to interpolate AFRP temperature and oxygen profiles
#Takes a water name and .csv inputs to the two database tables (MEASUREMENT and SAMPLE)
#Returns data.frame with estimates of the temp and oxy within the data's range of dates and depths
#Rounding (for averages) and interpolation resolution can be set if desired
#Also includes provisions for min/max limitations on the dates used
#Ben MQ - 5/12/2020
AFRP.tempoxy = function(water, samp, meas,
depthround = 1, dateround = 30, depthres = 0.25, dateres = 1,badsamps = NA,
monthmin = NA, monthmax = NA, yearmin = NA, yearmax = NA, interpdates = T)
{
# Setup ----
library(dplyr)
#Read in data
cat("Reading in data...\n")
wcs.samp = read.csv(samp, stringsAsFactors = F)
wcs.meas = read.csv(meas, stringsAsFactors = F)
#Join by YSAMP
wcs = wcs.samp %>% right_join(wcs.meas, by = "YSAMP_N")
#Cut down to just the water of interest
wcs.sub = wcs %>% filter(WATER == water)
#If there are any limits to the date range, subset to that
if(!is.na(monthmin))
{
wcs.sub = wcs.sub %>% filter(MONTH >= monthmin)
}
if(!is.na(monthmax))
{
wcs.sub = wcs.sub %>% filter(MONTH <= monthmax)
}
if(!is.na(yearmin))
{
wcs.sub = wcs.sub %>% filter(YEAR >= yearmin)
}
if(!is.na(yearmax))
{
wcs.sub = wcs.sub %>% filter(YEAR <= yearmax)
}
#If there are any bad samples listed, filter those out
if(!is.na(badsamps))
{
wcs = wcs[which(!(wcs$YSAMP_N %in% badsamps)),]
}
unique(wcs.sub$DAY_N)
#Also remove any measurements without a value
wcs.sub = wcs.sub[which(!is.na(wcs.sub$VALUE_1)),]
#Order measurements by depth and round to desired value
wcs.sub = wcs.sub[order(wcs.sub$DEPTH_M),]
wcs.sub$DEPTHROUND = (round(wcs.sub$DEPTH_M/depthround)*depthround)
#Aggregate by desired date interval
wcs.sub$DateInt = ((floor(wcs.sub$DAY_N/dateround))*dateround)
#If you only want depth interpolation, replace that with simple dates
if(!interpdates)
{
wcs.sub$DateInt = wcs.sub$DATE_COL
}
#Clear out NAs
wcs.sub = wcs.sub[which(!is.na(wcs.sub$DateInt)),]
#Filter out temp and oxy components
wcs.sub.T = wcs.sub %>% filter(METRIC == "WATER TEMPERATURE") %>% group_by(WATER, DateInt, DEPTHROUND) %>% summarize(N = n(), meanTemp = median(VALUE_1),medTemp = median(VALUE_1), sdTemp = sd(VALUE_1))
wcs.sub.O = wcs.sub %>% filter(METRIC == "DISSOLVED OXYGEN") %>% group_by(WATER, DateInt, DEPTHROUND) %>% summarize(N = n(), meanOxy = median(VALUE_1),medOxy = median(VALUE_1), sdOxy = sd(VALUE_1))
wcs.sub.T = wcs.sub.T %>% filter(!is.na(meanTemp))
wcs.sub.O = wcs.sub.O %>% filter(!is.na(meanOxy))
#Set up output holding variables
wcs.temp.T = NULL
wcs.temp.O = NULL
cat("Interpolating between depths...\n")
#Loop through date intervals and interpolate between depths
for(j in unique(wcs.sub.T$DateInt))
{
#Set min and max depths
mindepth = 0
maxdepth = min(max(wcs.sub.T$DEPTHROUND, na.rm = T),max(wcs.sub.O$DEPTHROUND, na.rm = T))
#Subset by date
wcs.dateint.T = wcs.sub.T %>% filter(DateInt == j)
wcs.dateint.O = wcs.sub.O %>% filter(DateInt == j)
#Exit if there isn't enough temp data
if(dim(wcs.dateint.T)[1] < 5 || (maxdepth - max(wcs.dateint.T$DEPTHROUND, na.rm = T)) > 10){ next}
#Interpolate temperatures between depths
interp.T = approx(wcs.dateint.T$DEPTHROUND,wcs.dateint.T$meanTemp, xout = seq(0,maxdepth,by = depthres), rule = 2)
wcs.temp.T = rbind(wcs.temp.T, data.frame(WATER = water, DateInt = j, DEPTHROUND = interp.T$x, ValEst = interp.T$y))
#Exit if there isn't enough oxygen data
if(dim(wcs.dateint.O)[1] < 5 || (maxdepth - max(wcs.dateint.O$DEPTHROUND, na.rm = T)) > 10){ next}
#Interpolate oxygen concentrations between depths
interp.O = approx(wcs.dateint.O$DEPTHROUND,wcs.dateint.O$meanOxy, xout = seq(0,maxdepth,by = depthres), rule = 2)
wcs.temp.O = rbind(wcs.temp.O, data.frame(WATER = water, DateInt = j, DEPTHROUND = interp.O$x, ValEst = interp.O$y))
}
#Set up output holding variables
wcs.grid = NULL
#If interpolation between dates is desired (e.g. you want a generalized profile throughout the sampling range)
if(interpdates)
{
cat("Interpolating between dates...\n")
#Loop through depth intervals and interpolate between dates
for(k in unique(wcs.temp.T$DEPTHROUND))
{
wcs.depth.T = wcs.temp.T %>% filter(DEPTHROUND == k)
wcs.depth.O = wcs.temp.O %>% filter(DEPTHROUND == k)
#Get min and max
mindateint = min(wcs.depth.T$DateInt)
maxdateint = max(wcs.depth.T$DateInt)
#Interpolate temperatures and oxygen concentrations
interp.T = approx(wcs.depth.T$DateInt,wcs.depth.T$ValEst, xout = seq(mindateint, maxdateint,by = dateres))
interp.O = approx(wcs.depth.O$DateInt,wcs.depth.O$ValEst, xout = seq(mindateint, maxdateint,by = dateres))
#if(dim(wcs.depth.O)[1] < 5){next}
#interp.O = approx(wcs.depth.O$DateInt,wcs.depth.O$ValEst, xout = seq(mindateint, maxdateint,by = dateres))
#interp.T = spline(wcs.depth.T$WEEK,wcs.depth.T$ValEst, xout = seq(minmonth, maxmonth,by = monthres))
#interp.O = spline(wcs.depth.O$WEEK,wcs.depth.O$ValEst, xout = seq(minmonth, maxmonth,by = monthres))
#Bind together outputs
wcs.grid = rbind(wcs.grid, data.frame(Water = water, Date = interp.T$x, Depth = k, TempEst = interp.T$y,OxyEst = interp.O$y))
}
} else {
cat("Date interpolation disabled, returning individual profiles...\n")
#Format and join temp/oxy depth interpolations
wcs.temp.O$DateInt = as.character(wcs.temp.O$DateInt )
wcs.temp.T$DateInt = as.character(wcs.temp.T$DateInt )
wcs.grid = data.frame(Water = water, Date = wcs.temp.T$DateInt, Depth = wcs.temp.T$DEPTHROUND, TempEst = wcs.temp.T$ValEst, stringsAsFactors = F)
wcs.grid = wcs.grid %>% left_join(select(wcs.temp.O,c("DateInt","DEPTHROUND", "ValEst")) , by = c("Date" = "DateInt", "Depth" = "DEPTHROUND"))
colnames(wcs.grid)[which(colnames(wcs.grid) == "ValEst")] = "OxyEst"
}
cat("Done\n")
return(wcs.grid)
}
|
d53629d64a5c7758bc2efd8453e622ddda4929d8
|
fdab0c18eab28477d0980723c5ac5b4ba10c506f
|
/hl20181122/SHOCKSPAN1/RUNME.r
|
a8b6aa398ca34a8f84b99d8feff28c0e506f303f
|
[
"MIT"
] |
permissive
|
MIT-Informatics/PreservationSimulation
|
58b53595841c39e1fe00a05241be43ed0bcf6430
|
38c6641a25108022ce8f225a352f566ad007b0f3
|
refs/heads/master
| 2021-08-25T10:35:46.066554
| 2021-08-24T20:17:13
| 2021-08-24T20:17:13
| 17,369,426
| 9
| 0
|
NOASSERTION
| 2021-03-20T02:55:37
| 2014-03-03T15:03:30
|
R
|
UTF-8
|
R
| false
| false
| 321
|
r
|
RUNME.r
|
# where to run?
# setwd("C:/cygwin64/home/landau/working/PreservationSimulation/pictures/SHOCKSPAN1SERVER5")
debugprint<-0
source("./GetShockSpan1Server1Data.r")
source("./GetShockSpan1Server2Data.r")
source("./GetShockSpan1Server3Data.r")
source("./GetShockSpan1Server4Data.r")
source("./GetShockSpan1Server1234Data.r")
|
5e22bdf0a64da39fa6dae66887b82f95879cc402
|
a845065b2b31e2fde530ad5eb192780340c5f481
|
/man/ex01.56.Rd
|
fe9412548805eede794c1a3d0394f9ae4809adf4
|
[] |
no_license
|
cran/Devore7
|
319c8a7bd4daca50ba1b7f7acdd53e40957a55bb
|
9592edf605e0a7927bdce1e0081796b183b1b5ad
|
refs/heads/master
| 2021-01-13T16:10:38.867717
| 2014-03-25T00:00:00
| 2014-03-25T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 666
|
rd
|
ex01.56.Rd
|
\name{ex01.56}
\alias{ex01.56}
\docType{data}
\title{R Data set: ex01.56}
\description{
The \code{ex01.56} data frame has 26 rows and 1 column.
}
\usage{data(ex01.56)}
\format{
A data frame with 26 observations on the following variable.
\describe{
\item{\code{C1}}{a numeric vector}
}
}
\details{
Consult the web site \url{http://www.thomsonedu.com/statistics/devore} for additional online resources that are available for this book.
}
\source{
Devore, J. L. (2008) \emph{Probability and Statistics for Engineering and the Sciences (7th Edition)}, ISBN-10: 0495382175 ISBN-13: 9780495382171
}
\examples{
data(ex01.56)
str(ex01.56)
}
\keyword{datasets}
|
0bae421508e34aa91a968e75b2308a3e59870164
|
61e55df4c1c22d42fc863102aff141c21823f2ae
|
/Bivalve analysis.R
|
d7244e5c418bd258e3df57b28aacc4260253d9e3
|
[] |
no_license
|
sbashevkin/SDP
|
da102ad56cee17c2337819cd9bdb16c23211bea6
|
0ddcf80466c7badcf1af1c091d055610e9bc38cb
|
refs/heads/master
| 2023-07-23T23:50:54.900991
| 2023-07-11T20:49:07
| 2023-07-11T20:49:07
| 211,150,171
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,963
|
r
|
Bivalve analysis.R
|
require(leaflet)
require(leaflet.minicharts)
require(readxl)
require(lubridate)
require(tidyverse)
require(brms)
Fieldfiles <- list.files(path = "Data/Water quality", full.names = T, pattern="Field")
Labfiles <- list.files(path = "Data/Water quality", full.names = T, pattern="Lab")
WQ<-sapply(Fieldfiles, function(x) read_excel(x, guess_max = 5e4))%>%
bind_rows()%>%
select(Date=SampleDate, Station=StationCode, Parameter=AnalyteName, Value=Result, Notes=TextResult)%>%
filter(Parameter%in%c("Temperature", "Secchi Depth", "Conductance (EC)", "Oxygen", "Depth"))%>%
group_by(Date, Station, Parameter, Notes)%>%
summarise(Value=mean(Value, na.rm=T))%>%
ungroup()%>%
bind_rows(sapply(Labfiles, function(x) read_excel(x, guess_max = 5e4))%>%
bind_rows()%>%
select(Station=StationCode, Date=SampleDate, Parameter=ConstituentName, Value=Result, Notes=LabAnalysisRemarks)%>%
filter(Parameter=="Chlorophyll a")%>%
group_by(Date, Station, Parameter, Notes)%>%
summarise(Value=mean(Value, na.rm=T))%>%
ungroup())%>%
spread(key=Parameter, value=Value)%>%
rename(Chlorophyll=`Chlorophyll a`, Secchi_depth=`Secchi Depth`, Conductivity=`Conductance (EC)`)%>%
bind_rows(read_excel("Data/Water quality/EMP WQ Combined_2000-2018.xlsx", na=c("N/A", "<R.L.", "Too dark"), col_types = c(rep("text", 3), "date", rep("text", 37)))%>%
select(Station=`Station Name`, Date, Chlorophyll=starts_with("Chlorophyll"), Latitude=`North Latitude Degrees (d.dd)`, Longitude=`West Longitude Degrees (d.dd)`, Secchi_depth=`Secchi Depth Centimeters`, Temperature=starts_with("Water Temperature"), Conductivity=starts_with("Specific Conductance"),
Oxygen=starts_with("Dissolved Oxygen"), Depth=starts_with("Water Depth"), Oxygen_Bottom=starts_with("(Bottom) Dissolved"), Conductivity_Bottom=starts_with("(Bottom) Specific"), Temperature_Bottom=starts_with("(Bottom) Water Temperature"), Turbidity_Bottom=starts_with("(Bottom) Turbidity"), Fluorescence_Bottom=starts_with("(Bottom) Fluorescence"))%>%
mutate(Chlorophyll=parse_double(ifelse(Chlorophyll%in%c("<0.05", "<0.5"), 0, Chlorophyll)),
Turbidity_Bottom=parse_double(ifelse(Turbidity_Bottom=="<0.10000000000000001", 0, Turbidity_Bottom)))%>%
mutate_at(c("Secchi_depth", "Temperature", "Conductivity", "Oxygen", "Depth", "Temperature_Bottom", "Conductivity_Bottom", "Oxygen_Bottom", "Fluorescence_Bottom"), parse_double))%>%
mutate(MonthYear=floor_date(Date, unit = "month"),
Year=year(Date),
Salinity=((0.36966/(((Conductivity*0.001)^(-1.07))-0.00074))*1.28156),
Salinity_Bottom=((0.36966/(((Conductivity_Bottom*0.001)^(-1.07))-0.00074))*1.28156))%>%
select(-Conductivity, -Conductivity_Bottom)%>%
group_by(MonthYear, Year, Station)%>%
summarise_at(vars(c("Chlorophyll", "Secchi_depth", "Temperature", "Salinity", "Oxygen", "Depth", "Temperature_Bottom", "Salinity_Bottom", "Oxygen_Bottom", "Fluorescence_Bottom", "Turbidity_Bottom")), ~mean(., na.rm=T))%>%
ungroup()
#Stations<-read_excel("Bivalves app/1975-18 CPUE bivalves only, 2019Sept9.xlsx",
# sheet = "75-17 station locations", skip=1)%>%
# select(BivStation=Site_Code, Latitude, Longitude)
Stations<-read_csv("Data/Water quality/Master station key.csv",
col_types = "ccddc")%>%
select(-StationID)%>%
filter(Source=="EMP")%>%
drop_na()
Biv<-read_excel("Bivalves app/1975-18 CPUE bivalves only, 2019Sept9.xlsx",
sheet = "75-18 CPUE per m2", skip=1)%>%
select(Date, BivStation=StationCode, `Potamocorbula amurensis`, `Corbicula fluminea`)%>%
gather(key="Taxa", value="CPUE", -BivStation, -Date)%>%
mutate(Year=year(Date),
MonthYear=floor_date(Date, unit = "month"))%>%
separate(BivStation, into=c("Station", "Position"), sep="-", remove=F)%>%
group_by(Year, MonthYear, Taxa, Station)%>%
summarise(CPUE=mean(CPUE, na.rm=T))%>%
ungroup()%>%
left_join(WQ, by=c("Year", "MonthYear", "Station"))%>%
#left_join(Stations, by="BivStation")%>%
left_join(Stations, by="Station")%>%
filter(!is.na(Salinity) & !is.na(Oxygen))%>%
mutate(Date_num=as.numeric(MonthYear))%>%
mutate_at(vars(c("Chlorophyll", "Secchi_depth", "Temperature", "Salinity", "Salinity_Bottom", "Oxygen", "Depth", "Temperature_Bottom", "Oxygen_Bottom", "Fluorescence_Bottom", "Turbidity_Bottom", "Date_num")), list(s=~(.-mean(., na.rm=T))/sd(., na.rm=T)))%>%
mutate(Month=month(MonthYear, label=T))
PA<-Biv%>%
filter(Taxa=="Potamocorbula amurensis" & Year>1990)
model<-brm(CPUE~Salinity_s, data=PA, family=hurdle_gamma(),
prior=prior(normal(0,10), class="Intercept")+
prior(normal(0,5), class="b"),
chains=1, cores=1,
iter = 1e4, warmup = 2.5e3)
model<-add_criterion(model, c("waic", "loo"))
model2<-brm(CPUE~s(Salinity_s), data=PA, family=hurdle_gamma(),
prior=prior(normal(0,10), class="Intercept")+
prior(normal(0,5), class="b"),
chains=1, cores=1, control=list(adapt_delta=0.99),
iter = 1e4, warmup = 2.5e3)
model2<-add_criterion(model2, c("waic", "loo"), reloo=T)
modelln<-brm(CPUE~Salinity_s, data=PA, family=hurdle_lognormal(),
prior=prior(normal(0,10), class="Intercept")+
prior(normal(0,5), class="b"),
chains=1, cores=1,
iter = 1e4, warmup = 2.5e3)
modelln<-add_criterion(modelln, c("waic", "loo"))
modelln2<-brm(CPUE~s(Salinity_s), data=PA, family=hurdle_lognormal(),
prior=prior(normal(0,10), class="Intercept")+
prior(normal(0,5), class="b"),
chains=1, cores=1, control=list(adapt_delta=0.99),
iter = 1e4, warmup = 2.5e3)
modelln2<-add_criterion(modelln2, c("waic", "loo"), reloo=T)
#hurdle_gamma is better than lognormal by loo
#elpd_diff se_diff
#model2 0.0 0.0
#modelln2 -42.5 24.8
#model -94.2 24.8
#modelln -227.2 28.7
model3<-brm(CPUE~poly(Salinity_s,3), data=PA, family=hurdle_gamma(),
prior=prior(normal(0,10), class="Intercept")+
prior(normal(0,5), class="b"),
chains=1, cores=1, control=list(adapt_delta=0.99),
iter = 1e4, warmup = 2.5e3)
model3<-add_criterion(model3, c("waic", "loo"), reloo=T)
model4<-brm(CPUE~poly(Salinity_s,2), data=PA, family=hurdle_gamma(),
prior=prior(normal(0,10), class="Intercept")+
prior(normal(0,5), class="b"),
chains=1, cores=1,
iter = 1e4, warmup = 2.5e3)
model4<-add_criterion(model4, c("waic", "loo"), reloo=T)
#Model with smoother is slightly better than poly(2) model, which is slightly better than poly(3) model. Smoother probably not worth the computational time though.
#elpd_diff se_diff
#model2 0.0 0.0
#model4 -5.6 6.2
#model3 -10.9 7.0
#model -94.2 24.8
model5<-brm(CPUE~poly(Salinity_s,2)+Month, data=PA, family=hurdle_gamma(),
prior=prior(normal(0,10), class="Intercept")+
prior(normal(0,5), class="b"),
chains=1, cores=1,
iter = 1e4, warmup = 2.5e3)
model5<-add_criterion(model5, c("waic", "loo"), reloo=T)
#Month has no effect and is a worse model
model6<-brm(CPUE~poly(Salinity_s,2)+poly(Oxygen_s,2), data=PA, family=hurdle_gamma(),
prior=prior(normal(0,10), class="Intercept")+
prior(normal(0,5), class="b"),
chains=1, cores=1,
iter = 1e4, warmup = 2.5e3)
model6<-add_criterion(model6, c("waic", "loo"), reloo=T)
model7<-brm(CPUE~poly(Salinity_s,2)+Oxygen_s, data=PA, family=hurdle_gamma(),
prior=prior(normal(0,10), class="Intercept")+
prior(normal(0,5), class="b"),
chains=1, cores=1,
iter = 1e4, warmup = 2.5e3)
model7<-add_criterion(model7, c("waic", "loo"), reloo=T)
model8<-brm(CPUE~s(Salinity_s)+s(Oxygen_s), data=PA, family=hurdle_gamma(),
prior=prior(normal(0,10), class="Intercept")+
prior(normal(0,5), class="b"),
chains=1, cores=1,
iter = 1e4, warmup = 2.5e3)
model8<-add_criterion(model8, c("waic", "loo"), reloo=T)
model9<-brm(CPUE~poly(Salinity_s,2) + (1|Station), data=PA, family=hurdle_gamma(),
prior=prior(normal(0,10), class="Intercept")+
prior(normal(0,5), class="b"),
chains=1, cores=1, control=list(adapt_delta=0.99),
iter = 1e4, warmup = 2.5e3)
model9<-add_criterion(model9, c("waic", "loo"), reloo=T)
model10<-brm(CPUE~poly(Salinity_s,2) + t2(Latitude, Longitude, Date_num_s), data=PA, family=hurdle_gamma(),
prior=prior(normal(0,10), class="Intercept")+
prior(normal(0,5), class="b"),
chains=1, cores=1, control=list(adapt_delta=0.99),
iter = 1e4, warmup = 2.5e3)
model10<-add_criterion(model10, c("waic", "loo"), reloo=T)
|
83c48d53114ef6e2397a227e4e9212c9f795cf23
|
514b2e947c8e998e725488633275512b58a70e23
|
/R/s4_object.R
|
e3ae9cb46a8ec32d79e768dcb8646ec6150fffc0
|
[
"MIT"
] |
permissive
|
AlgoSkyNet/acyRsa
|
1f655454f98205f147dd3cb147e6680bf26f3c05
|
d2537bc958c857111c0c4effb327a596c69e8f76
|
refs/heads/master
| 2022-04-25T01:32:35.556741
| 2020-04-21T17:11:51
| 2020-04-21T17:11:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,491
|
r
|
s4_object.R
|
#' @export token
#' @export base_url
#' @export token<-
#' @export base_url<-
#' @export valid_until
NULL
# Connection Class
setClass("acyRsaConnection",
slots = c(
token = "character",
base_url = "character",
valid_until = "character"
),
prototype = list(
token = NA_character_,
base_url = NA_character_,
valid_until = NA_character_
)
)
setValidity("acyRsaConnection", function(object){
if (length(object@token) != 1 || length(object@base_url) != 1) {
"@token and @base_url must have lenght 1"
} else if (object@token == "" || object@base_url == "") {
"@token and @base_url can not be empty"
} else {
TRUE
}
})
setGeneric("token", function(x) standardGeneric("token"))
setMethod("token", "acyRsaConnection", function(x) x@token)
setGeneric("token<-", function(x, value) standardGeneric("token<-"))
setMethod("token<-", "acyRsaConnection", function(x, value) {
x@token <- value
validObject(x)
x
})
setGeneric("base_url", function(x) standardGeneric("base_url"))
setMethod("base_url", "acyRsaConnection", function(x) x@base_url)
setGeneric("base_url<-", function(x, value) standardGeneric("base_url<-"))
setMethod("base_url<-", "acyRsaConnection", function(x, value) {
x@base_url <- value
validObject(x)
x
})
setGeneric("valid_until", function(x) standardGeneric("valid_until"))
setMethod("valid_until", "acyRsaConnection", function(x) x@valid_until)
|
758b937f9d57ff0568ac745170d5699e5ed734ae
|
192df3858b212b89c54a5d16ac555ff08079afd4
|
/plot3.R
|
03af44bbab6221c8a401a3709d392c687d8d72e3
|
[] |
no_license
|
marcoscattolin/ExData_Exploratory2
|
3326d0b25d62df393e7d59645a7e5a60e37ff179
|
a3f5ca7b47af0cf70a72001ffb44862013f7db04
|
refs/heads/master
| 2021-01-23T03:53:03.775435
| 2014-05-12T13:23:32
| 2014-05-12T13:23:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 661
|
r
|
plot3.R
|
#read data
NEI <- readRDS("../data/summarySCC_PM25.rds")
SCC <- readRDS("../data/Source_Classification_Code.rds")
#extract data for Baltimore and reshape by year and type
BaltimoreNEI <- subset(NEI, fips == "24510")
library(reshape2)
melted <- melt(BaltimoreNEI,id = c("year","type"), measure.vars="Emissions")
aggregated <- dcast(melted,year + type ~ variable, sum)
#plot and add regression line
library(ggplot2)
qplot(year, Emissions, data=aggregated, facets=.~type, ylab="PM25 Emissions (tons)", main="PM25 Emissions by type of source, Baltimore") + geom_smooth(method="lm")
#save plot to png
dev.copy(png,"plot3.png", width=800, height=600)
dev.off()
|
170e19f58767d53a3a1c64ba15e287c91367385c
|
f5865f24bf8bb033ccbf6c0ba4a72c1894be0f72
|
/man/ALKr-class.Rd
|
e2a4f62c8c8a3e5985b1473571b7f3284d38ce7f
|
[] |
no_license
|
cran/ALKr
|
f8dbc6d1c6130114c0d0f54b78bc2058ddd74895
|
a2185b0c6ab2d0e5184494e31a68b9d891fa4bd7
|
refs/heads/master
| 2016-09-06T18:22:47.133968
| 2014-02-20T00:00:00
| 2014-02-20T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 732
|
rd
|
ALKr-class.Rd
|
\docType{class}
\name{ALKr-class}
\alias{ALKr-class}
\title{ALKr}
\description{
Every function used to calculate Age-Length Keys returns an
\code{ALKr} object.
}
\details{
\describe{ \item{alk}{A \eqn{i \times j} matrix with the
probability of an individual of length \eqn{i} having age
\eqn{j}, i.e. \eqn{P(j|i)}} \item{N}{A \eqn{i \times j}
matrix with the estimated number of individuals of length
\eqn{i} and age \eqn{j}} \item{method}{A string with the
name of the algorithm used to calculate the ALK}
\item{params}{A named list with any parameters needed by
the algorithm} \item{name}{A string with a user-defined
name for the ALKr object} \item{description}{A string with
a user-defined description for the ALKr object} }
}
|
a4441263826d17f54bf0371ff3e6cd9e2327bae6
|
e6ee5f739acfe20843e180143818667a2cfe8996
|
/bin/awe_creator.R
|
b732ea71a730656d6dcea9357b9af07a7685f252
|
[] |
no_license
|
pawan1992/AWE
|
375c64ab7ae561c375c32ca039db9f5a0b1eb56d
|
c6639ef44e8448dbd1cdf5beeeee04f60112ee05
|
refs/heads/master
| 2021-01-06T20:46:14.135163
| 2014-09-18T08:24:14
| 2014-09-18T08:24:14
| 24,179,552
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 513
|
r
|
awe_creator.R
|
### Script to get the data for AWE populations for subsequent analysis
### Singh P. | September 2014
### ------------------------------------------------------------------
setwd('~/pawan/climate_git/AWE/bin/')
y<-read.table('../data/adr_inp.txt')
x<-read.csv('../data/adr_inp_hdr (copy)')
x<-x[9:68,]
u <- x$X.2
u<-as.character(u)
length(u)
length(names(y))
y<-y[,8:67]
indx <- which(u=='y')
y.indx <- y[,indx]
write.table(y.indx,file='../data/adr_inp_awe',sep='\t',eol='\t \n', row.names=FALSE, col.names=FALSE)
|
141ef59211a7a0f72a3bbd3bd19188778c1c0982
|
3ad2cb3cced8dcee4ee638303a97e5d0ced090ee
|
/man/impute.Rd
|
4eb770b0028c7d8bcbb1701de53894334ad98fca
|
[] |
no_license
|
cran/relations
|
a7cad6a1f2f1a435bbbe8ef4b241b6faf4a86871
|
44e22a60b9cefbd72a209ea1794d80a56609d525
|
refs/heads/master
| 2023-03-16T21:26:51.984375
| 2023-03-07T13:19:04
| 2023-03-07T13:19:04
| 17,699,123
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,935
|
rd
|
impute.Rd
|
\name{impute}
\alias{relation_impute}
\title{Impute relations}
\description{
Impute missing incidences in relations by averaging all possible
completions within a specified family.
}
\usage{
relation_impute(x, method = NULL, control = list(), \dots)
}
\arguments{
\item{x}{an endorelation or an ensemble of endorelations.}
\item{method}{character string specifying the method to be used (see
\bold{Details}). If \code{NULL}, it is guessed from the
relation with missing \emph{objects} removed.}
\item{control}{named list of control arguments. Currently, only
\code{n} is accepted by the \code{any/\var{F}} methods, indicating
the number of solutions to be returned. Default is 1; \code{"all"}
or \code{NA} will generate all possible completions. Note that
\code{n} is currently ignored if \code{x} is a relation ensemble.}
\item{\dots}{named list of control arguments, overriding the ones
in \code{control}.}
}
\value{
If \code{x} is an ensemble or more than one solution is requested
using the \code{n} control argument: an ensemble
of endorelations. An endorelation otherwise.
}
\details{
Endorelations with missing elements (i.e., whose incidence is
\code{NA}) are imputed using one of the methods described as follows.
\describe{
\item{\code{"omit"}}{Missing incidences are replaced by zeros, i.e.,
the corresponding elements are removed from the graph.}
\item{\code{"any/\var{F}"}}{The incidences are replaced by
arbitrary values suitable for family \var{F}, with possible values:
\describe{
\item{\code{G}}{General (unrestricted) relations.}
\item{\code{L}}{Linear orders.}
\item{\code{W}}{Weak orders.}
\item{\code{O}}{Partial orders.}
}
\code{L}, \code{W}, and \code{O} can optionally be complemented by
\code{/first} or \code{/last} to further restrict imputed elements
to be placed on top or bottom of the given ordering.
}
\item{\code{"average/\var{F}"}}{Computes the relation with average
incidences, based on all possible completions as indicated for the
\code{any/\var{F}} methods. Note that these completions are not
explicitly generated to compute the averages, and that the
resulting relation will typically be fuzzy.}
}
}
\examples{
## create a relation with a missing object
R <- ranking(1:2, 1:3)
print(R)
R <- as.relation(R)
## find all suitable completions within L
ens <- relation_impute(R, method = "any/L", n = "all")
lapply(ens, as.ranking)
if(require("Rgraphviz")) plot(ens)
## find 3 suitable partial orders
ens <- relation_impute(R, method = "any/O", n = 3)
lapply(ens, relation_incidence)
if(require("Rgraphviz")) plot(ens)
## compute average completion
R1 <- relation_impute(R, method = "average/O")
relation_incidence(R1)
## check correctness of averaging
R2 <- mean(relation_impute(R, "any/O", n = "all"))
stopifnot(all.equal(R1, R2))
}
\keyword{math}
|
57e980f3a05aacd1d4e7fa497262168fd81d404f
|
b4dcae4cfc38dc0ca7440d8f67d922981b2f4430
|
/RCode/functions/zones_that_burned_all.R
|
0c1464b229b137815fd8508893b7f2ad0191e30c
|
[] |
no_license
|
cont-limno/DoLakesFeelTheBurn
|
daa0db45b72220c0a6982e9b07cbeadebfc2bcc5
|
bab1fbe3fbff1cf9f2f248a1a9c5d0b65133e0cc
|
refs/heads/master
| 2020-03-23T20:10:31.179063
| 2019-04-30T18:08:49
| 2019-04-30T18:08:49
| 142,026,133
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 759
|
r
|
zones_that_burned_all.R
|
### function that determines which lakes (by lagoslakeid) that have experienced fire
# uses intersection of fire polygons and lake watersheds/buffer areas
library(raster)
zones_that_burned_all = function(burn_polygons, zone_shp){
#burn_polygons: MTBS polygons
#zone_shp: polygons of zones in which to calculate area burned; could be watersheds or buffers around lakes
#function ASSUMES all inputs in same coordinate system
# Fire_ID column: each fire has state abbrev as prefix; can subset based on that
burn_area_zone = suppressWarnings(raster::intersect(zone_shp, burn_polygons))
unique_lakeIDs = burn_area_zone@data[!duplicated(burn_area_zone@data$lagoslakei),]
unique_lakeIDS = unique(unique_lakeIDs$lagoslakei)
return(unique_lakeIDs)
}
|
211fac6927611ffca98c5a6b19abfb6a3fc8538a
|
d481adea8b5f993c766a7842c2cb9babcef3deef
|
/SERVER/03_compare_charts.R
|
6b76ec8f3bb6e62a6aa908063a112be3006e3d1b
|
[] |
no_license
|
AurelieFrechet/world_happiness
|
991e1be543a2789267fc9a35685a957ab5e960ff
|
7ae1bd22325b2c21815ff30eac08b351b79be1cf
|
refs/heads/master
| 2021-06-24T04:27:50.677840
| 2021-01-19T16:08:01
| 2021-01-19T16:08:01
| 192,379,296
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 614
|
r
|
03_compare_charts.R
|
compare_filter <- reactive({
validate(need(input$compare_select != "", "Choose countries"))
dplyr::filter(current$data, country %in% current$compare)
})
output$compare_stakedbar <-
renderPlotly({
stackedbar_wh(data = compare_filter(),
year = current$year)
})
output$compare_lines <-
renderPlotly({
lines_wh(data = compare_filter(),
indicator = current$indicator)
})
# Titles ------------------------------------------------------------------
output$compare_lines_title <-
renderUI({HTML(paste(
"<h2>Evolution of", current$indicator, "by year</h2>"
))})
|
81673db96626989238e00caccd0b1f50ef861848
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/NetWeaver/examples/rc.plot.heatmap.Rd.R
|
147a4f118a735d73b79e2fa5ec94fe4878459ef4
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 148
|
r
|
rc.plot.heatmap.Rd.R
|
library(NetWeaver)
### Name: rc.plot.heatmap
### Title: Plot Heat-map
### Aliases: rc.plot.heatmap
### ** Examples
##see example of ?Modules
|
c6686952bc7e44f9642d59c452463f3902bc7c9a
|
71f61b96c7f7a4d981e0a823ea5fb78e4bd7f2d2
|
/assignment 1/plot1.R
|
961f5274df4c73b4852bb5c23b6287debcb6e4db
|
[] |
no_license
|
bradweiner/exploratorydataanalysis
|
f359ec1f114e8e8e4ff2a6171f2a37aefc9f7c68
|
922b58249549a5b93df1a34dc85e8426af5a2f99
|
refs/heads/master
| 2020-08-07T12:42:05.254770
| 2014-05-08T17:05:25
| 2014-05-08T17:05:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 794
|
r
|
plot1.R
|
library(data.table)
elec<-fread(input="C:/Users/wein0339/Desktop/Dropbox/Coursera/Exploratory Data Analysis/household_power_consumption.txt", sep=";"
, na.strings=c("?"))
elec$Date<-as.Date(elec$Date, "%d/%m/%Y")
#Subset February 1st and 2nd, 2007
#In this file formats are dd/mm/yyyy
#Searching for 1/2/2007 & 2/2/2007
elec1<-subset(elec,Date=="2007-02-01" | Date=="2007-02-02")
elec1$GAP<-as.numeric(elec1$Global_active_power)
#set up plot output
png("C:/Users/wein0339/Desktop/Dropbox/Coursera/Exploratory Data Analysis/plot1.png", width = 480, height = 480)
#build histogram
elec1$GAP<-as.numeric(elec1$Global_active_power)
xlab<-"Global Active Power (kilowatts)"
main="Global Active Power"
hist(elec1$GAP,xlab=xlab,main=title,col="RED")
#turn plot off
dev.off()
|
7b56ad862d9c13190b13cb9b465a8b13faccd00b
|
3fc4a512a40350e5acfee574886683e04c2edd0a
|
/man/Queue.Rd
|
3ea3fe97432f946b99a8f76eae2c75c461e3572d
|
[] |
no_license
|
cran/Containers
|
d09e0fe0fec9cf7e46cb7717fdf4cc8ac15db7c1
|
aca021b3e46576050973ca896db282881832093d
|
refs/heads/master
| 2020-05-20T07:27:20.993341
| 2008-03-27T00:00:00
| 2008-03-27T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 439
|
rd
|
Queue.Rd
|
\name{Queue}
\alias{Queue}
\title{Constructor for Class Queue}
\description{
This method instanciates class \code{Queue}.
}
\usage{
aQueue = Queue()
}
\examples{
aQueue = Queue() # Instanciate a Queue.
aQueue$insertBack(2) # Insert a few elements.
aQueue$insertBack(3)
aQueue$insertBack(-4)
aQueue$front() # Inspect the front element.
}
\author{John Hughes}
\keyword{package}
\keyword{methods}
|
1d2a2bee2f18959df1aad8dbd9f1bd492ea3e7cd
|
6d65a534673543684f0a97740e7e7b831f50ea47
|
/inst/scripts/hh2/2.8-2.R
|
9474dbe6121550fbb77d7d4f28649ebc3a2043fb
|
[] |
no_license
|
cran/HH
|
91151d240d6ecc1334fd79f1b0dfbbc28ca68df6
|
a6ee768cedcebd4477bb9a5b4d0baa3d16e4dca0
|
refs/heads/master
| 2022-09-01T20:50:52.605306
| 2022-08-09T15:10:07
| 2022-08-09T15:10:07
| 17,691,800
| 3
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 536
|
r
|
2.8-2.R
|
R282 <- t(sapply(strsplit(Design_2.8_2$trt,""),
function(trtcomb)
as.numeric(letters[1:8] %in% trtcomb)))
dimnames(R282) <- list(Design_2.8_2$trt, letters[1:8])
R282 <- data.frame(blocks=Design_2.8_2$blocks, R282)
R282
data(R282.y) ## R282.y was randomly generated
R282.aov <- aov(R282.y ~ blocks + (a+b+c+d+e+f+g+h)^2, data=R282)
anova(R282.aov)
model.matrix(R282.aov)
## confirm aliasing
R282E.aov <- aov(R282.y ~ Error(blocks) + (a+b+c+d+e+f+g+h)^2,
data=R282)
summary(R282E.aov)
|
5a42ede78cf98d4241362d58fc8499921fbd81d5
|
cb20aa7ca2d807d038d5e3c536a768f160d517f1
|
/plot_proportions_against_fn.R
|
2c12aee8e56a3588d149cf2e30d99eca8eb0f382
|
[] |
no_license
|
mathii/spectrum
|
dc393e3e1e425183112f7fc2a814fd3cd416936a
|
e9f0f990e8c0e5aac598886109bc5e89b15c3bc0
|
refs/heads/master
| 2021-01-10T07:15:47.871805
| 2020-06-02T15:02:15
| 2020-06-02T15:02:15
| 54,673,436
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,550
|
r
|
plot_proportions_against_fn.R
|
## Plot the proportion of variants in each class, as a function of n, as fn increases
source("~/spectrum/code/spectrumlib.R")
####################################################
what <- ""
cA <- commandArgs(TRUE)
if(length(cA)>0){
what <- paste0(".", cA[1])
}
####################################################
sig.name <- 1
sig <- c("TCC.T", "ACC.T", "TCT.T", "CCC.T")
ylim=c(0.07, 0.11)
spr<-0.5
ns <- 1:30
wts<-c(10,20,10,5,rep(1,length(ns)-4))
## sig.name <- 2
## sig <- c("ACG.T", "CCG.T", "GCG.T", "TCG.T")
## ylim=c(0.12, 0.19)
## in.ind <- c("S_Chane.1", "S_Piapoco.2", "S_Quechua.3", "S_Mayan.1", "S_Mayan.2", "S_Quechua.1", "S_Nahua.1", "S_Quechua.2", "S_Nahua.2", "S_Zapotec.1", "S_Mixtec.1")
## spr <- 0.25
## ns <- 1:15
## wts<-c(100,100,10,10,10,20,rep(1,length(ns)-6))
####################################################
if(what==".poly_Africa" & sig.name==2){ylim <- c(0.13, 0.6)}
if(what==".private" & sig.name==2){ylim <- c(0.13, 0.24)}
####################################################
sig.name.map <- c(4,1)
sig.map=c(2,4,3,1)
####################################################
regions <- sort(unique(info$Region))
ltys <- rep(1, length(cols))
names(ltys) <- names(cols)
if(sig.name==2){
regions <- c("Africa", "America_Hi", "America_Lo", "CentralAsiaSiberia", "EastAsia", "Oceania", "SouthAsia", "WestEurasia")
hi.ind <- c("S_Chane.1", "S_Piapoco.2", "S_Quechua.3", "S_Mayan.1", "S_Mayan.2", "S_Quechua.1", "S_Nahua.1", "S_Quechua.2", "S_Nahua.2", "S_Zapotec.1", "S_Mixtec.1")
cols <- c(cols, "America_Hi"="#984EA3", "America_Lo"="#984EA3")
ltys <- rep(1, length(cols))
names(ltys) <- names(cols)
ltys["America_Lo"] <- 2
}
proportions <- counts <- as.data.frame(matrix(0, nrow=length(ns), ncol=length(regions)))
names(proportions) <- names(counts) <- regions
rownames(info) <- gsub("-", ".", info$ID)
if(sig.name==2){
info[hi.ind,"Region"] <- "America_Hi"
info[info[rownames(info),"Region"]=="America","Region"] <- "America_Lo"
}
for(i in 1:length(ns)){
n <- ns[i]
data <- read.table(paste0("~/spectrum/data/spectrum_matrix.n", n, what, ".txt"), as.is=TRUE, header=TRUE)
freq <- t(t(data)/colSums(data))
cnts <- read.table(paste0("~/spectrum/data/count_matrix.n", n, what, ".txt"), as.is=TRUE, header=TRUE)
for(reg in regions){
proportions[i,reg] <- mean(colSums(freq[sig,info[colnames(freq),"Region"] %in% reg]))
counts[i,reg] <- sum(colSums(cnts[sig,info[colnames(cnts),"Region"] %in% reg]))
}
}
regions <- regions[!is.nan(unlist(proportions[1,]))]
pdf(paste0("~/spectrum/plots/fn_sig", sig.name, what, ".pdf"))
par(mar=c(5.1, 5.1, 4.1, 2.1))
plot(ns, proportions[,regions[1]], pch=16, cex=0.75, col=cols[regions[1]], ylim=ylim, xlab="Allele count", ylab=bquote("Proportion of signature"~.(sig.name)~"mutations"), xlim=range(ns), cex.axis=1.4, cex.lab=1.5)
lines(smooth.spline(ns, proportions[,regions[1]], spar=spr, w=wts), col=cols[regions[1]], lty=ltys[regions[1]], lwd=2)
for(i in 2:length(regions)){
points(ns, proportions[,regions[i]], pch=16, cex=0.75, col=cols[regions[i]])
inc<-!is.na(proportions[,regions[i]])
if(sig.name==1){
lines(smooth.spline(ns[inc], proportions[,regions[i]][inc], spar=spr, w=wts[inc]), col=cols[regions[i]], lty=ltys[regions[i]], lwd=2)
}else{
lines(ns[inc], proportions[,regions[i]][inc], col=cols[regions[i]], lty=ltys[regions[i]], lwd=2)
}
}
legend("topright", regions, lwd=2, col=cols[regions], lty=ltys[regions], pch=16, bty="n", ncol=1, cex=1.2)
dev.off()
|
313497b74b1184b0f374d292aaf7d7541e185abd
|
5b9e81414f73103919ff5da0f5424afd11ab3e0c
|
/2_Forecast_ML/cv_dt.R
|
cbbc8d75d28fdcecb64aab7ebcaa19e91001aeb8
|
[] |
no_license
|
kevinyz/Forex_Forecast
|
a1d61e1372decd08bc28bb1bf87c38200fb249a2
|
200bdc1042443e6ca0a5f303958f75167c5c5078
|
refs/heads/master
| 2020-05-22T00:09:35.463480
| 2019-05-11T20:06:59
| 2019-05-11T20:06:59
| 186,166,427
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,759
|
r
|
cv_dt.R
|
cv_dt <- function(train_test_data,
run_parallel = TRUE,
no_cores = 3) {
if ("Hour" %in% names(train_test_data$train)) {
train_partition <- train_test_data$train %>%
select(-Date,-Hour)
test_partition <- train_test_data$test %>%
select(-Date,-Hour)
} else {
train_partition <- train_test_data$train %>%
select(-Date)
test_partition <- train_test_data$test %>%
select(-Date)
}
#### Training Method
tc_window <- train_partition %>% nrow() / 10
tc_horizon <- tc_window / 4
tr_control <- trainControl(method = "timeslice",
initialWindow = tc_window,
horizon = tc_horizon,
skip = tc_window + tc_horizon - 1)
#### Cross-Validation
print("Run DT-Model..")
if (run_parallel) {
library(doParallel)
cl <- makeCluster(no_cores)
registerDoParallel(cl)
}
dt_fit <- train(as.factor(Target) ~ .,
data = train_partition,
method = "rpart",
preProcess = c("scale"),
parms = list(split = "information"),
trControl = tr_control)
if (run_parallel) {
stopCluster(cl)
}
#### Validate Model with Test-Partition
y_pred_dt <- predict(dt_fit, test_partition)
conf_matrix <- confusionMatrix(as.factor(y_pred_dt),
as.factor(test_partition$Target),
positive = "1",
mode = "everything")
return (list(model = dt_fit,
conf_matrix = conf_matrix,
predictions = as.numeric(as.character(y_pred_dt))))
}
|
2462eae7d15191e46fc799cd6a358682af5529b1
|
a1f4c164ad71a2350bda7cb8349a1ba1cbe26b0f
|
/InformationAbility.R
|
80a7032956787783a1d5a26a5e36b6d909c7949a
|
[] |
no_license
|
JoshuaZe/RepeatedGameInR
|
2e138c7a48c1a1b5f7e438974a2b67cfe55e0541
|
60d866f022ce5c12891295a5e2200beddb035b61
|
refs/heads/master
| 2021-01-01T18:06:40.826665
| 2015-03-05T06:50:26
| 2015-03-05T06:50:26
| 27,116,563
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 736
|
r
|
InformationAbility.R
|
i.onlySelfInfoWithCompetitor<-function(p.NO,c.NO){
info=t(data.frame(
c=t(game.decisionlog[c.NO,(!is.na(game.decisionlog[p.NO,1:ncol(game.decisionlog)]))&
(!is.na(game.decisionlog[c.NO,1:ncol(game.decisionlog)]))])))
}
i.onlySelfInfoAboutTypeOfCompetitor<-function(p.NO){
e.info <- data.frame(data=0)
for(i in 1:players.size){
c=t(game.decisionlog[i,(!is.na(game.decisionlog[p.NO,1:ncol(game.decisionlog)]))&
(!is.na(game.decisionlog[i,1:ncol(game.decisionlog)]))])
e.info[i]=c[2]
}
e.info
}
i.allInfoAboutCompetitor<-function(c.NO){
info=t(data.frame(
c=t(game.decisionlog[c.NO,(!is.na(game.decisionlog[c.NO,1:ncol(game.decisionlog)]))])))
}
|
6bbda460367ba719ad584e6c34c8a1627a38620e
|
bf1aae7fed65cbb5eeef065ae137844d12fcf53b
|
/Valentines Day Survey Dashboard/ui_server.R
|
64302ecede45b571b46a2684ea3c0d6952adaae2
|
[] |
no_license
|
elayenikoylu/R-Shiny-Dashboard
|
ef46a96026f52ff4c8701f5fb761433eb60d22d1
|
1e5c73fee79b865ba36c4c4767d3d7d1c5827aaa
|
refs/heads/main
| 2023-06-02T03:25:49.774308
| 2021-06-19T18:32:51
| 2021-06-19T18:32:51
| 378,480,240
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 18,621
|
r
|
ui_server.R
|
if (interactive()) {
#- MAIN UI START
ui <- dashboardPagePlus(
title = "Valentine's Day Survey",
skin = "red",
#- Dashboard Title
dashboardHeader(title = span(tagList(icon("heart"), "Valentine's Day Survey")), titleWidth = 300),
#- Left Menu
dashboardSidebar(
sidebarMenu(
menuItem("Sentimental WordCloud", tabName = "cloud_lo", icon = icon("cloud")),
menuItem("Frequency Histogram", tabName = "freq_lo", icon = icon("search")),
menuItem("Sentiment Analysis", tabName = "senti_lo", icon = icon("smile")),
menuItem("TF-IDF Histogram", tabName = "tfidf_lo", icon = icon("home")),
menuItem("N-gram and Neural Network", tabName = "ngram_lo", icon = icon("user-friends")),
menuItem("Naive Bayes Analysis", tabName = "nba_lo", icon = icon("user-friends"))
)
),
#- Dashboard Body
dashboardBody(
tabItems(
# First tab content
tabItem(tabName = "nba_lo",
fluidRow(
column(width = 4,
h2("Naive-Bayes Simulation"),
textInput('llife', "1. Describe your love life?", value = "", width = NULL, placeholder = ''),
textInput('favday', "2. What has been your favorite valentines day yet?", value = "", width = NULL, placeholder = ''),
textInput('perf', "3. Describe your perfect date.", value = "", width = NULL, placeholder = ''),
textInput('gift', "4. What is an ideal gift for you?", value = "", width = NULL, placeholder = ''),
textInput('worst', "5. Describe your worst Valentines Day.", value = "", width = NULL, placeholder = ''),
actionButton("goButton", "Go!")),
column(h3(textOutput("userText")),width = 4, offset= 2
)
)),
tabItem(tabName = "cloud_lo",
fluidRow(
box(
selectInput('ques', 'Select The Question', c("All Questions", "1. Describe your love life?","2. What has been your favorite valentines day yet?","3. Describe your perfect date.","4. What is an ideal gift for you?","5. Describe your worst Valentines Day."))
,width=3),
box(
plotOutput("senticloudPlot"),width=9,offset=9
)
)
),
tabItem(tabName = "freq_lo",
fluidRow(
box(
sliderInput('ques_no', 'Select Question Number : ', min=1,max=5,value=1,step=1)
,width=3),
box(
plotly::plotlyOutput("freqPlot"),width=8
)
)
),
# Second tab content
tabItem(tabName = "senti_lo",
fluidRow(
box(
selectInput('ques_sa', 'Choose The Question', c("Question 1","Question 2","Question 3","Question 4","Question 5"),selected="Question 3")
,width=3),
box(
plotly::plotlyOutput("sentiPlot"),width=9
)
)
),
tabItem(tabName = "tfidf_lo",
fluidRow(
box(
sliderInput('ques_no2', 'Select Question Number : ', min=1,max=5,value=1,step=1)
,width=3),
box(
plotly::plotlyOutput("tfidfPlot"),width=8
)
)
),
tabItem(tabName = "ngram_lo",
fluidRow(
column(
h2("Bigram Graph"),
plotOutput('bigramPlot'),width=4),
column(
h2("Quadrograms"),
tableOutput("quad_Table"), style = "font-size: 80%; width: 80%",width=4))
)
)
)
)
server <- function(input, output, session){
# Function to create a data frame of top 10 names by sex and year
library(shiny)
library(shinydashboard)
library(shinydashboardPlus)
library(DT)
library(shinythemes)
library(dplyr)
library(textreadr)
library(textdata)
library(tidyr)
library(tidytext)
library(tidyverse)
library(stringr)
library(wordcloud)
library(igraph)
library(ggraph)
library(pdftools)
library(plotly)
library(ggplot2)
library(tm)
library(quanteda)
library(RColorBrewer)
library(quanteda.textmodels)
library(widyr)
library(reshape2)
cap3 <- eventReactive(input$goButton, {
my_naive_fn()
})
output$userText <- renderText({
cap3()
})
my_naive_fn<- function(){
######## Naive Bayes prediction Model ##########
################################################
input_string <- paste(input$llife , input$favday , input$perf , input$gift , input$worst , sep=" ")
print(input_string)
my_df$AllQues <-
paste(my_df$Question1, my_df$Question2, my_df$Question3,
my_df$Question4, my_df$Question5, sep = " ")
de<-data.frame(input$llife , input$favday , input$perf , input$gift , input$worst,"yesorno",input_string)
names(de)<-c("Question1", "Question2", "Question3",
"Question4", "Question5", "Question6","AllQues")
newdf <- rbind(my_df, de)
corp <- corpus(newdf, text_field = c("AllQues"))
msg.dfm <- dfm(corp, tolower = TRUE) #generating document
msg.dfm <- dfm_trim(msg.dfm, min_termfreq = 2, min_docfreq = 1)
msg.dfm <- dfm_weight(msg.dfm)
msg.dfm<-dfm_select(msg.dfm, pattern = c("broke","ready","single","mingle","life","married","boring","coding","engaged","hard","messy","relationship","beach","night","cafe","cancun","enjoying","morning","eating","feel","walk","wine","bad","fight","remember","huge","memory","roses","school","2007","grade","outfit","pay","ago","girlfriend","picnic","lot","wife","cooked","dad","hotel","park","surprise","vacation","visit","weekend","visited","chocalate","experience","jewelry","money","related","sanitizer","personal","buy","chocalates","diamond","diamonds","expensive","monetary","rings","woman"), selection = "keep", valuetype = "fixed")
q6 %>% count( text == 'yes' | text == 'Yes' | text == 'Yes.' | text == 'Yeah.' )
q6 %>% count( text == 'No' | text == 'no' | text == 'No.' )
## 35 Yes ..... 22 No .....
35/57
0.9*57
8 * (35/57)
msg.dfm
#Splitting the docs into training and testing data using 90/10
# Splitting the data with equal no of yes's & no's - STRATIFIED split
msg.dfm.train<-msg.dfm[-c(1,5,15,23,28,33,38,43),]
# msg.dfm.test<-msg.dfm[c(1,5,15,23,28,33,38,43),]
msg.dfm.test<-msg.dfm[c(58),]
#building the Naive Bayes model:
NB_classifier <- textmodel_nb(msg.dfm.train,
c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1 )) # We are imputiing this success values manually
NB_classifier
summary(NB_classifier)
odds<- 0
# predicting the testing data
pred <- predict(NB_classifier, msg.dfm.test,type='prob')
outp <- paste("Thank You !! The likelihood of you celebrating Valentines Day with someone this year is",round((pred[1,'1'] * 100),2),'% !!!')
outp
}
output$senticloudPlot <- renderPlot({
user_ques_selection <- input$ques
if (user_ques_selection == 'All Questions'){
tidy_var<- tidy_survey
} else if ( user_ques_selection == "1. Describe your love life?") {
tidy_var<- tidy_q1
} else if ( user_ques_selection == "2. What has been your favorite valentines day yet?") {
tidy_var<- tidy_q2
} else if ( user_ques_selection == "3. Describe your perfect date.") {
tidy_var<- tidy_q3
} else if ( user_ques_selection == "4. What is an ideal gift for you?") {
tidy_var<- tidy_q4
} else if ( user_ques_selection == "5. Describe your worst Valentines Day.") {
tidy_var<- tidy_q5
}
tidy_var %>%
inner_join(get_sentiments("nrc")) %>%
count(word, sentiment, sort=TRUE) %>%
acast(word ~sentiment, value.var="n", fill=0) %>%
comparison.cloud(color = c("red", "blue"),
max.words=200,scale=c(1,1))
},height = 530, width = 760)
output$freqPlot <- plotly::renderPlotly({
colorch2 <- "coral2"
user_ques_selection2 <- input$ques_no
if (user_ques_selection2 == 1){
tidy_var2<- tidy_q1
colorch2 <- "coral2"
questio<- "1. Describe your love life?"
} else if ( user_ques_selection2 == 2) {
tidy_var2<- tidy_q2
colorch2 <- "turquoise4"
questio<- "2. What has been your favorite valentines day yet?"
} else if ( user_ques_selection2 == 3) {
tidy_var2<- tidy_q3
colorch2 <- "violetred4"
questio<- "3. Describe your perfect date."
} else if ( user_ques_selection2 == 4) {
tidy_var2<- tidy_q4
colorch2 <- "palegreen3"
questio<- "4. What is an ideal gift for you?"
} else if ( user_ques_selection2 == 5) {
tidy_var2<- tidy_q5
colorch2 <- "burlywood3"
questio<-"5. Describe your worst Valentines Day."
}
freq_hist <- tidy_var2 %>%
count(word, sort=TRUE) %>%
mutate(word=reorder(word, n)) %>%
top_n(10) %>%
ggplot(aes(word, n))+
geom_col(fill=colorch2)+
labs(x = "Words",
y = "Frequency",
title = questio) +
coord_flip()
print(freq_hist)
})
output$sentiPlot <- plotly::renderPlotly({
user_ques_selectionsa <- input$ques_sa
if (user_ques_selectionsa == "Question 1"){
tidy_var_sa<- tidy_q1
questio2<- "1. Describe your love life?"
# colorch2 <- "coral2"
} else if ( user_ques_selectionsa == "Question 2") {
tidy_var_sa<- tidy_q2
# colorch2 <- "turquoise4"
questio2<- "2. What has been your favorite valentines day yet?"
} else if ( user_ques_selectionsa == "Question 3") {
tidy_var_sa<- tidy_q3
questio2<- "3. Describe your perfect date."
# colorch2 <- "violetred4"
} else if ( user_ques_selectionsa == "Question 4") {
tidy_var_sa<- tidy_q4
questio2<- "4. What is an ideal gift for you?"
# colorch2 <- "palegreen3"
} else if ( user_ques_selectionsa == "Question 5") {
tidy_var_sa<- tidy_q5
# colorch2 <- "burlywood3"
questio2<-"5. Describe your worst Valentines Day."
}
bing_survey <- tidy_var_sa %>%
count(word, sort=TRUE)
bing_survey <- bing_survey %>%
inner_join(get_sentiments("bing")) %>%
ungroup()
sentiment_count <- bing_survey %>%
count(sentiment, sort = TRUE)
bing_survey %>%
group_by(sentiment) %>%
ungroup() %>%
mutate(word = reorder(word, n)) %>%
head(20) %>%
ggplot(aes(reorder(word, n), n, fill = sentiment)) +
geom_col(show.legend = FALSE) +
facet_wrap(~sentiment, scales = "free_y") +
labs(x = NULL,
y = "Frequency",
title = questio2) +
coord_flip() + theme_bw()
})
output$tfidfPlot <- plotly::renderPlotly({
colorch3 <- "purple3"
user_ques_selection3 <- input$ques_no2
if (user_ques_selection3 == 1){
q_choice<- "Question1"
colorch3 <- "purple3"
questio3<- "1. Describe your love life?"
} else if ( user_ques_selection3 == 2) {
q_choice<- "Question2"
colorch3 <- "tomato2"
questio3<- "2. What has been your favorite valentines day yet?"
} else if ( user_ques_selection3 == 3) {
q_choice<- "Question3"
colorch3 <- "turquoise4"
questio3<- "3. Describe your perfect date."
} else if ( user_ques_selection3 == 4) {
q_choice<- "Question4"
colorch3 <- "violetred4"
questio3<- "4. What is an ideal gift for you?"
} else if ( user_ques_selection3 == 5) {
q_choice<- "Question5"
colorch3 <- "palegreen3"
questio3<-"5. Describe your worst Valentines Day."
}
####### Applying TF-IDF
survey_words <- survey_words %>%
bind_tf_idf(word, question, n)
survey_words
survey_words %>%
arrange(desc(tf_idf)) # %>%
survey_words %>%
arrange(desc(tf_idf)) %>%
mutate(word=factor(word, levels=rev(unique(word)))) %>%
filter(question==q_choice) %>%
top_n(10) %>%
ggplot(aes(reorder(word, tf_idf),tf_idf))+
geom_col(show.legend=FALSE,fill=colorch3)+
labs(x=NULL, y="TF-IDF",title=questio3)+
facet_wrap(~question, ncol=2, scales="free")+
coord_flip()
})
output$bigramPlot <- renderPlot({
######################### N-grams and Neural Network #########################
my_bigrams <- survey %>%
unnest_tokens(bigram, text, token = "ngrams", n=2) %>%
filter(!is.na(bigram))
my_bigrams %>%
count(bigram, sort = TRUE) #this has many stop words, need to remove them
#to remove stop words from the bigram data, we need to use the separate function:
bigrams_separated <- my_bigrams %>%
separate(bigram, c("word1","word2"), sep = " ")
bigrams_filtered <- bigrams_separated %>%
filter(!word1 %in% stop_words$word) %>%
filter(!word2 %in% stop_words$word)
bigram_count <- bigrams_filtered %>%
count(word1, word2, sort = TRUE)
bigram_count
bigram_graph <- bigram_count %>%
filter(n>1) %>% #lower n to 1 or 2
graph_from_data_frame()
ggraph(bigram_graph, layout = "fr") +
geom_edge_link(colour= "grey")+
geom_node_point(colour = "black")+
geom_node_text(aes(label=name,color=name), vjust =1, hjust=1,show.legend = FALSE)
},height = 350, width = 600)
output$quad_Table <- renderTable({
my_quadrogram <- survey %>%
unnest_tokens(quadrogram, text, token = "ngrams", n=4) %>%
filter(!is.na(quadrogram)) %>%
separate(quadrogram, c("words1", "words2", "words3", "words4"), sep=" ") %>%
filter(!words1 %in% stop_words$word) %>%
filter(!words2 %in% stop_words$word) %>%
filter(!words3 %in% stop_words$word) %>%
filter(!words4 %in% stop_words$word)
my_quadrogram
})
my_data <- read_document(file="C:/Users/18579/Documents/MSBA/R/textual/team/flie/all_text.txt") #This comes out as a vector
#Define parameters and create a empty dataframe
rows <- 57 #how many observations to you have - how many people you have
cols <- 6 #how many variables do you have - how many answers per person
my_df <- as.data.frame(matrix(nrow=rows, ncol=cols))
# Creating a nested for loop to fill in dataframe with corresponding line item
for(z in 1:cols){
for(i in 1:rows){
my_df[i,z]<- my_data[i*cols+z-cols]
}#closing z loop
}#closing i loop
#Create a dataframe for each question
q1 <- data_frame(text=my_df$V1)
q2 <- data_frame(text=my_df$V2)
q3 <- data_frame(text=my_df$V3)
q4 <- data_frame(text=my_df$V4)
q5 <- data_frame(text=my_df$V5)
q6 <- data_frame(text=my_df$V6)
questions <- c("Question1", "Question2", "Question3",
"Question4", "Question5", "Question6")
colnames(my_df) <- questions
survey <- bind_rows(mutate(q1, question = questions[1]),
mutate(q2, question = questions[2]),
mutate(q3, question = questions[3]),
mutate(q4, question = questions[4]),
mutate(q5, question = questions[5]),
mutate(q6, question = questions[6]))
######################## Creating custome Stop_words #####################
custom_stop_words <- tribble(
~word, ~lexicon,
"valentine's","CUSTOM",
"valentines","CUSTOM",
"day","CUSTOM",
"worst","CUSTOM",
"perfect","CUSTOM",
"favorite","CUSTOM",
"gift","CUSTOM",
"ideal","CUSTOM",
"describe","CUSTOM",
"a lot","CUSTOM",
"date","CUSTOM",
"jeez","CUSTOM",
"gosh","CUSTOM",
"haha","CUSTOM",
"memory","CUSTOM"
)
#"celebrate","CUSTOM",
stop_words <- stop_words %>%
bind_rows(custom_stop_words)
stop_words
######################## Creating tidy df's##################
tidy_q1 <- q1 %>%
unnest_tokens(word, text) %>%
anti_join(stop_words)
#counting frequencies for tokens
tidy_q1 %>%
count(word, sort=TRUE)
tidy_q2 <- q2 %>%
unnest_tokens(word, text) %>%
anti_join(stop_words)
#counting frequencies for tokens
tidy_q2 %>%
count(word, sort=TRUE)
tidy_q3 <- q3 %>%
unnest_tokens(word, text) %>%
anti_join(stop_words)
#counting frequencies for tokens
tidy_q3 %>%
count(word, sort=TRUE)
tidy_q4 <- q4 %>%
unnest_tokens(word, text) %>%
anti_join(stop_words)
#counting frequencies for tokens
tidy_q4 %>%
count(word, sort=TRUE)
tidy_q5 <- q5 %>%
unnest_tokens(word, text) %>%
anti_join(stop_words)
#counting frequencies for tokens
tidy_q5 %>%
count(word, sort=TRUE)
# Getting the dataset ready for TF-IDF
tidy_survey <- survey %>%
group_by(question) %>%
unnest_tokens(word, text) %>%
anti_join(stop_words) %>% #here's where we remove tokens
count(word, sort=TRUE) %>%
ungroup()
# Counting the total words
total_words <- tidy_survey %>%
group_by(question) %>%
summarize(total=sum(n))
# Joining it to the original set
survey_words <- left_join(tidy_survey, total_words)
}
shinyApp(ui = ui, server = server)
}
|
5c600e72e473231aa82af01c89840f66bda98c80
|
14a5eae233df75d47eedb77c91c1bfee5b39f736
|
/azure/hot-cold/results/coldAfterPython/draw.r
|
1afb7b40ed1705f49bd3abef513f9625bbbf6284
|
[] |
no_license
|
slnowak/cloud-functions-research
|
55a38fde29c0f9fce6810559f1a18cbd9758be5d
|
a22b7cb87bddf0c4eb2fe1345e45ceccef4ebc3f
|
refs/heads/master
| 2020-12-03T00:01:32.917331
| 2017-09-09T21:20:24
| 2017-09-09T21:21:56
| 95,975,600
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 574
|
r
|
draw.r
|
library("ggplot2")
library("scales")
png(filename="graph.png", width=1280, height=1024)
dat = read.csv("data.csv")
dat$timestamp = as.POSIXct(dat$timestamp, format = "%Y-%m-%dT%H:%M:%S")
ggplot(data=dat, aes(timestamp)) +
geom_line(aes(y = percentile_duration_100)) +
theme_bw() +
scale_x_datetime(labels = date_format("%H:%M"), breaks = pretty_breaks(n=10)) +
scale_y_continuous(breaks=pretty_breaks(n=10)) +
labs(x = "Czas wykonania zapytania HTTP (hh:mm:ss)", y = "Czas odpowiedzi (ms)") +
ggtitle("Czas wykonania funkcji po sesji warmup w języku Python (10RPS)")
|
c676b881536fe927e5706d63b9c71916d37b2684
|
969d4316ad794a0eef0213b01a7b06ddfdf8d90d
|
/01_data_structures/03_matrices/1.R
|
87dffd782a7cafcca6aedcd933de23f527d62f4a
|
[] |
no_license
|
Bohdan-Khomtchouk/adv-r-book-solutions
|
adaa5b5f178999d130aff1359a23e978e39e86ae
|
e1b3a63c0539de871728b522604110c0aa18c7d1
|
refs/heads/master
| 2021-01-22T00:36:17.450660
| 2015-12-06T02:54:02
| 2015-12-06T02:54:02
| 47,481,353
| 1
| 1
| null | 2015-12-06T02:49:46
| 2015-12-06T02:49:46
| null |
UTF-8
|
R
| false
| false
| 106
|
r
|
1.R
|
### What does dim() return when applied to a vector?
> dim(c(1, 2, 3))
NULL
> dim(c('a', 'b', 'c'))
NULL
|
0581f8449623cfd21561f5013d994e51a0f3af36
|
c425084c5f4a4a8ccbe1a0d1d5370037edf62efb
|
/college.R
|
6db17ab320d903b29c4bc37215ca94e59b06a958
|
[] |
no_license
|
anand1692/data-science
|
6793123b425fbed7239ab3d3844340126d10bb55
|
bd4d0ced3b70395cd393470d2cd5b18668ce207e
|
refs/heads/master
| 2021-01-18T21:44:34.281576
| 2016-03-30T19:59:03
| 2016-03-30T19:59:03
| 28,952,079
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 777
|
r
|
college.R
|
college = read.csv("College.csv")
dim(college)
fix(college)
rownames(college) = college[,1]
fix(college)
college = college[,-1]
fix(college)
summary(college)
pairs(college[,5:10])
attach(college)
plot(Outstate, Private, col="red")
Elite = rep("No", nrow(college))
Elite[Top10perc > 50] = "Yes"
Elite = as.factor(Elite)
college = data.frame(college, Elite)
summary(college)
plot(Outstate, Elite, col="blue")
par(mfrow=c(2,2));
hist(Apps, col=2, breaks=10)
hist(Top10perc, col=5, breaks = 10)
hist(Accept, col=3)
hist(perc.alumni, col=4)
ind = which(college$Top10perc == max(college$Top10perc, na.rm=TRUE), arr.ind = TRUE)
maxTop10perc = rownames(college)[ind]
ind2 = which(college$Accept == min(college$Accept, na.rm = TRUE), arr.ind = TRUE)
minAccept = rownames(college)[ind2]
|
561c3b298c0001acaa25e3954515d6a68b40e3f9
|
002fe4ceedf168f1182989fe8a1130560706b37d
|
/plot3.R
|
54fadfc910808de0b69e60a1a002553a6ed0f619
|
[] |
no_license
|
gscrepis/ExData_Plotting1
|
daa4dacd2d942d602de035111fd8c58779b75fcb
|
e4a265bca179db3424dd9dffbf43e239be952769
|
refs/heads/master
| 2021-01-12T19:43:10.048209
| 2015-12-11T09:24:12
| 2015-12-11T09:24:12
| 47,815,052
| 0
| 0
| null | 2015-12-11T08:42:10
| 2015-12-11T08:42:09
| null |
UTF-8
|
R
| false
| false
| 1,287
|
r
|
plot3.R
|
# Create a data directory, download compressed file and unzip
if(!file.exists("./CS_course_project1")) {dir.create("./CS_course_project1")}
fileUrl <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(fileUrl, destfile="./CS_course_project1/power_consumption.zip")
unzip(zipfile="./CS_course_project1/power_consumption.zip", exdir="./CS_course_project1")
# Read data and extract subset
dataFile <- "./CS_course_project1/household_power_consumption.txt"
data <- read.table(dataFile, header=TRUE, sep=";", dec=".", na.strings="?")
subData <- subset(data, Date == "1/2/2007" | Date == "2/2/2007")
# Plot graf3
datetime <- strptime(paste(subData$Date, subData$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
globalActivePower <- subData$Global_active_power
subMetering1 <- subData$Sub_metering_1
subMetering2 <- subData$Sub_metering_2
subMetering3 <- subData$Sub_metering_3
png("./CS_course_project1/plot3.png", width=480, height=480)
plot(datetime, subMetering1, type="l", ylab="Energy sub metering", xlab="")
lines(datetime, subMetering2, type="l", col="red")
lines(datetime, subMetering3, type="l", col="blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=1, lwd=2.5, col=c("black", "red", "blue"))
dev.off()
|
ab905f3337b9f24c485dd849f7dc68b21c0ef946
|
d765a553330f46d7a629dcb1d1a942b002fc1ff0
|
/Tools/loadTEDDYtools.R
|
35793e0217dfbfcd6ac890e6cc6beeed66aafe3e
|
[] |
no_license
|
ConesaLab/TEDDY_Multi-Omics_Integration
|
bc70184c5f812518126c16cb45cc1e5cc0f48c3c
|
61d221858191f9446d8cfc4abb529dec7cc00e70
|
refs/heads/master
| 2023-02-16T05:34:30.306481
| 2021-01-14T21:06:04
| 2021-01-14T21:06:04
| 237,418,899
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,235
|
r
|
loadTEDDYtools.R
|
###########################################################
############### loadTEDDYtools.R ##################
###########################################################
# Authors: Ricardo Ramirez Flores
# Genetics Institute, University of Florida (Gainesville)
# TEDDY tools: Requirements
# Loads all the functions, objects and collection of functions of TEDDYtools
###########################################################
setwd("/media/data/leobalzano/ScriptsForTEDDY")
# The location of TEDDYtoolsV2
#setwd("/home/rramirez/Dropbox (UFL)/myTEDDY_V2/TEDDYtoolsV2/")
# Global Variables : RAW DATA
# Gene Expression
load("Data/Piano/GlobalData/GEXtargets.ro") #Target Matrix
load("Data/Piano/GlobalData/rawTEDDY_GEX.ro") #Count Matrix (Original Table)
load("Data/Piano/GlobalData/rawTEDDY_GEX_inversevst.ro") #New Matrix
# Metabolomics
load("Data/Piano/GlobalData/MetabolicTargets.ro") #Target Matrix
load("Data/Piano/GlobalData/Metabolic_Counts_Raw.ro") #Complete Matrix
load("Data/Piano/GlobalData/GCTOFRAW_counts.ro") #GCTOF
load("Data/Piano/GlobalData/negLipRAW_counts.ro") #NegLip
load("Data/Piano/GlobalData/posLipRAW_counts.ro") #PosLip
# GeneSets
load("Data/Piano/GlobalData/TEDDY_geneSets.ro")
# External Libraries
library(dplyr)
library(data.table)
library(illuminaHumanv4.db)
library(GO.db)
library(splines)
library(limma)
library(piano)
library(matrixStats)
library(maSigPro)
library(ggplot2)
library(gplots)
library(RColorBrewer)
library(gridExtra)
library(cluster)
library(gtools)
library(stringdist)
library(fpc)
library(mclust)
library(scales)
# Load TEDDYtools Suite
print("Loading Processing and filtering tools")
source("Data/Piano/Suite/Processing_Filtering_TT.R")
print("Loading Count Manipulation tools")
source("Data/Piano/Suite/CountManipulation_TT.R")
print("Loading GSEA tools")
source("Data/Piano/Suite/GSEA_TT.R")
print("Loading Linear Modelling tools")
source("Data/Piano/Suite/LinearModels_DEA_TT.R")
print("Loading Visual Manager tools")
source("Data/Piano/Suite/VisualManager_TT.R")
print("Loading Transcriptional Signature Comparison tools")
source("Data/Piano/Suite/TranscriptionalSignComp_TT.R")
print("Loading Clustering tools")
source("Data/Piano/Suite/Clustering_TT.R")
|
8c447124a80d4993b3faa4743718766286569e5d
|
9594de75a331360baa35eacbfbc22f8a716b602d
|
/R/tdr_income_asisa_get.R
|
facfcf8b8b941c86876e6f2fba5c291239efa54b
|
[] |
no_license
|
mustafaascha/inndxrs
|
922198378741d7981f7b77f61fbfb3ba3b098f6a
|
ce8a8522eaebc5cc42f4ffed978d50a9b64e3577
|
refs/heads/master
| 2020-03-20T11:13:58.665823
| 2018-06-12T22:16:44
| 2018-06-12T22:16:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,775
|
r
|
tdr_income_asisa_get.R
|
#' @return transactions tbl
#'
#' @export
#'
tdr_income_asisa_get <- function(con_tdr, universe) {
tbl_income <- dplyr::tbl(con_tdr, "arc_asisa_Income_Converted")
tbl_income <- tbl_income %>%
filter(
CurrencyTo == ReportCurrency,
Duplicated == 0
)
# tbl_income <- tbl_income %>%
# rename(
# portfoliocode = PortfolioIDCode,
# instrumentcode = InstrumentCode
# )
tbl_income <- tbl_income %>%
select(
-PortfolioIDCode,
-InstrumentCode,
-PortfolioName,
-AssetManagerCode,
-AssetManagerName,
-ReportEndDate,
-ReportRunDate,
-ReportRunTime,
-ReportStartDate,
-ClosingBalance_MAX,
-ClosingBalance_MIN,
-DividendIncomeEarned_MAX,
-DividendIncomeEarned_MIN,
-WithholdingTax_MAX,
-WithholdingTax_MIN,
-PrevClosingBalance_MAX,
-PrevClosingBalance_MIN,
-OpeningBalance_MAX,
-OpeningBalance_MIN,
-InterestIncomeEarned_MAX,
-InterestIncomeEarned_MIN,
-InterestIncomePaid_MAX,
-InterestIncomePaid_MIN,
-DividendIncomePaid_MAX,
-DividendIncomePaid_MIN,
-Id
)
tbl_income <- tbl_income %>%
mutate(
effectivedate_int = sql("dbo.fn_DateTime2Obelix(EffectiveDate)")
)
tbl_income <- tbl_income %>%
mutate(
date_int = effectivedate_int
)
universe <- universe %>%
select(
portfoliocode, instrumentcode, CompanyID, HiportDBID, SecurityType, SecuritySubType, SecurityClassName, ObelixDatabaseName, PortfolioID, SecurityID
)
tbl_income <- tbl_income %>%
inner_join(
universe, by = c("PortfolioID", "SecurityID", "CompanyID", "HiportDBID")
)
# col_names <- names(tbl_income)
#
# key_names <- c("portfoliocode",
# "instrumentcode",
# "SecurityClassName",
# "SecurityType",
# "SecuritySubType",
# "ObelixDatabaseName",
# "CompanyID",
# "HiportDBID", "date_int")
#
#
# other_names <- col_names[!col_names %in% key_names]
#
# tbl_income <- tbl_income %>%
# select(
# key_names, other_names
# )
tbl_income <- reorder_cols(tbl_income)
tbl_income <- tbl_income %>%
rename(
cb = ClosingBalance,
pcb = PrevClosingBalance,
ob = OpeningBalance,
ie = InterestIncomeEarned,
ip = InterestIncomePaid,
r = Rate
)
tbl_income <- tbl_income %>%
mutate(
cb = cb*r
, pcb = pcb*r
, ob = ob*r
, WithholdingTax = WithholdingTax*r
, DividendIncomeEarned = DividendIncomeEarned*r
, DividendIncomePaid = DividendIncomePaid*r
, ie = ie*r
, ip = ip*r
)
return(tbl_income)
}
|
2ec28c2c0a77cef2b4aa9fff6a25b93d70a5058b
|
32fa4e8db7efd2ff4adbb87eaae8141b4b282309
|
/man/GUI.Rd
|
d6392666c2b9d3e45253c05a791eb5664e0b3844
|
[] |
no_license
|
martin-vasilev/EMreading
|
dc13c7940e2c4074aa97650a06bb9f0cd19a0bf8
|
3ad260ad425890bb606ce421a3e18e2ad1f19c6d
|
refs/heads/master
| 2023-01-29T00:41:09.201495
| 2023-01-18T13:40:44
| 2023-01-18T13:40:44
| 112,669,247
| 3
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 302
|
rd
|
GUI.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/GUI.R
\name{GUI}
\alias{GUI}
\title{Graphical user interface for data pre-processing using R shiny}
\usage{
GUI()
}
\description{
Graphical user interface for data pre-processing using R shiny
}
\author{
Martin R. Vasilev
}
|
9bddc578087cc9d93ebc24ea4b5b917cd86f2c43
|
7caf18910d70cc9561605ed2778a3346a46c77d3
|
/R/EDA_script.R
|
9b44dd551460610796adbcf72e2f1b30dab7b0f0
|
[] |
no_license
|
alexvlima/Customer-Revenue-Prediction
|
14eaf95a006e7af537c1943ecbe6f72c823f3d64
|
b0957fd78c4b03a74fbf586503634257f909258f
|
refs/heads/main
| 2023-08-23T09:27:23.659516
| 2021-09-29T12:50:41
| 2021-09-29T12:50:41
| 410,065,718
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 14,165
|
r
|
EDA_script.R
|
############
### PATH ###
############
getwd()
setwd('~/Documents/Kaggle/Google-Analytics-Customer-Revenue-Prediction')
###############
### LIBRARY ###
###############
library(tidyverse)
library(jsonlite)
library(data.table)
library(lubridate)
library(gridExtra)
library(countrycode)
library(highcharter)
library(ggExtra)
library(glmnet)
library(keras)
library(forecast)
library(knitr)
library(Rmisc)
library(caret)
library(ggalluvial)
library(xgboost)
library(zoo)
###############
### DATASET ###
###############
dtrain <- read_csv('./Input/train.csv')
nrow(dtrain)
glimpse(dtrain)
ID_unique <- unique(dtrain$fullVisitorId)
length(ID_unique)
#####################
### PREPROCESSING ###
#####################
# View(dtrain$totals)
sample(dtrain$totals,100)
class(dtrain$totals)
# convert date column from character to Date class
dtrain$date <- as.Date(as.character(dtrain$date), format='%Y%m%d')
# convert visitStartTime to POSIXct
dtrain$visitStartTime <- as_datetime(dtrain$visitStartTime)
# treating json columns
tr_device <- paste("[", paste(dtrain$device, collapse = ","), "]") %>% fromJSON(flatten = T)
tr_geoNetwork <- paste("[", paste(dtrain$geoNetwork, collapse = ","), "]") %>% fromJSON(flatten = T)
tr_totals <- paste("[", paste(dtrain$totals, collapse = ","), "]") %>% fromJSON(flatten = T)
tr_trafficSource <- paste("[", paste(dtrain$trafficSource, collapse = ","), "]") %>% fromJSON(flatten = T)
dtrain <- cbind(dtrain, tr_device, tr_geoNetwork, tr_totals, tr_trafficSource) %>%
as.data.table()
# drop the old json columns
dtrain[, c('device', 'geoNetwork', 'totals', 'trafficSource') := NULL]
# values to convert to NA
na_vals <- c('unknown.unknown', '(not set)', 'not available in demo dataset',
'(not provided)', '(none)', '<NA>')
for(col in names(dtrain)) {
set(dtrain, i=which(dtrain[[col]] %in% na_vals), j=col, value=NA)
}
# get number of unique values in each column
unique <- sapply(dtrain, function(x) { length(unique(x[!is.na(x)])) })
# subset to == 1
one_val <- names(unique[unique <= 1])
# but keep bounces and newVisits
one_val = setdiff(one_val, c('bounces', 'newVisits'))
# drop columns from dtrain
dtrain[, (one_val) := NULL]
glimpse(dtrain)
# character columns to convert to numeric
num_cols <- c('hits', 'pageviews', 'bounces', 'newVisits',
'transactionRevenue')
# change columns to numeric
dtrain[, (num_cols) := lapply(.SD, as.numeric), .SDcols=num_cols]
# Divide transactionRevenue by 1,000,000
dtrain[, transactionRevenue := transactionRevenue / 1e+06]
######################
### MISSING VALUES ###
######################
data.table(
pmiss = sapply(dtrain, function(x) { (sum(is.na(x)) / length(x)) }),
column = names(dtrain)
) %>%
ggplot(aes(x = reorder(column, -pmiss), y = pmiss)) +
geom_bar(stat = 'identity', fill = 'steelblue') +
scale_y_continuous(labels = scales::percent) +
theme(axis.text.x = element_text(angle = 45, hjust = 1)) +
labs(
title='Missing data by feature',
x='Feature',
y='% missing')
###################
### EXPLORATION ###
###################
time_range <- range(dtrain$date)
print(time_range)
###############################
## target variable (revenue) ##
###############################
rev_range <- round(range(dtrain$transactionRevenue, na.rm=TRUE), 2)
print(rev_range)
# distribution of revenue from individual visits
dtrain %>%
ggplot(aes(x=log(transactionRevenue), y=..density..)) +
geom_histogram(fill='steelblue', na.rm=TRUE, bins=40) +
geom_density(aes(x=log(transactionRevenue)), fill='orange', color='orange', alpha=0.3, na.rm=TRUE) +
labs(
title = 'Distribution of transaction revenue',
x = 'Natural log of transaction revenue'
)
# daily revenue over the time period
g1 <- dtrain[, .(n = .N), by=date] %>%
ggplot(aes(x=date, y=n)) +
geom_line(color='steelblue') +
geom_smooth(color='orange') +
labs(
x='',
y='Visits (000s)',
title='Daily visits'
)
g2 <- dtrain[, .(revenue = sum(transactionRevenue, na.rm=TRUE)), by=date] %>%
ggplot(aes(x=date, y=revenue)) +
geom_line(color='steelblue') +
geom_smooth(color='orange') +
labs(
x='',
y='Revenue (unit dollars)',
title='Daily transaction revenue'
)
grid.arrange(g1, g2, nrow=2)
# revenue by hour of day
g1 <-
dtrain[, .(visitHour = hour(visitStartTime))][
, .(visits = .N), by = visitHour] %>%
ggplot(aes(x = visitHour, y = visits / 1000)) +
geom_line(color = 'steelblue', size = 1) +
geom_point(color = 'steelblue', size = 2) +
labs(
x = 'Hour of day',
y = 'Visits (000s)',
title = 'Aggregate visits by hour of day (UTC)',
subtitle = 'August 1, 2016 to August 1, 2017'
)
g2 <-
dtrain[, .(transactionRevenue, visitHour = hour(visitStartTime))][
, .(revenue = sum(transactionRevenue, na.rm =
T)), by = visitHour] %>%
ggplot(aes(x = visitHour, y = revenue / 1000)) +
geom_line(color = 'steelblue', size = 1) +
geom_point(color = 'steelblue', size = 2) +
labs(
x = 'Hour of day',
y = 'Transaction revenue (000s)',
title = 'Aggregate revenue by hour of day (UTC)',
subtitle = 'August 1, 2016 to August 1, 2017'
)
grid.arrange(g1, g2, nrow = 2)
# transaction revenue grouped by channel
g1 <- dtrain[, .(n = .N), by=channelGrouping] %>%
ggplot(aes(x=reorder(channelGrouping, -n), y=n/1000)) +
geom_bar(stat='identity', fill='steelblue') +
labs(x='Channel Grouping',
y='Visits (000s)',
title='Visits by channel grouping')
g2 <- dtrain[, .(revenue = sum(transactionRevenue, na.rm=TRUE)), by=channelGrouping] %>%
ggplot(aes(x=reorder(channelGrouping, revenue), y=revenue/1000)) +
geom_bar(stat='identity', fill='steelblue') +
coord_flip() +
labs(x='Channel Grouping',
y='Revenue (dollars, 000s)',
title='Total revenue by channel grouping')
g3 <- dtrain[, .(meanRevenue = mean(transactionRevenue, na.rm=TRUE)), by=channelGrouping] %>%
ggplot(aes(x=reorder(channelGrouping, meanRevenue), y=meanRevenue)) +
geom_bar(stat='identity', fill='steelblue') +
coord_flip() +
labs(x='',
y='Revenue (dollars)',
title='Mean revenue by channel grouping')
g1
grid.arrange(g2, g3, ncol = 2)
#####################
## device features ##
#####################
# visits/revenue by device
g1 <- dtrain[, .(n=.N/1000), by=operatingSystem][
n > 0.001
] %>%
ggplot(aes(x=reorder(operatingSystem, -n), y=n)) +
geom_bar(stat='identity', fill='steelblue') +
labs(x='Operating System',
y='# of visits in data set (000s)',
title='Distribution of visits by device operating system') +
theme(axis.text.x = element_text(angle = 90, hjust = 1))
g2 <- dtrain[, .(revenue = sum(transactionRevenue, na.rm=TRUE)), by=operatingSystem][
revenue > 0,
] %>%
ggplot(aes(x=reorder(operatingSystem, -revenue), y=revenue)) +
geom_bar(stat='identity', fill='steelblue') +
labs(x='Operating System',
y='Revenue (unit dollars)',
title='Distribution of revenue by device operating system') +
theme(axis.text.x = element_text(angle = 90, hjust = 1))
grid.arrange(g1, g2, nrow=2)
# visits/revenue by browser
g1 <- dtrain[, .(n=.N/1000), by=browser][
1:10
] %>%
ggplot(aes(x=reorder(browser, -n), y=n)) +
geom_bar(stat='identity', fill='steelblue') +
labs(x='Browser',
y='# of visits in data set (000s)',
title='Distribution of visits by browser (Top 10 browsers)') +
theme(axis.text.x = element_text(angle = 90, hjust = 1))
g2 <- dtrain[, .(revenue = sum(transactionRevenue, na.rm=TRUE)/1000), by=browser][
1:10
] %>%
ggplot(aes(x=reorder(browser, -revenue), y=revenue)) +
geom_bar(stat='identity', fill='steelblue') +
labs(x='Browser',
y='Revenue (dollars, 000s)',
title='Distribution of revenue by browser (top 10 browsers)') +
theme(axis.text.x = element_text(angle = 90, hjust = 1))
grid.arrange(g1, g2, nrow=2)
# visits/revenue by device category
g1 <- dtrain[, .(n=.N/1000), by=deviceCategory]%>%
ggplot(aes(x=reorder(deviceCategory, -n), y=n)) +
geom_bar(stat='identity', fill='steelblue') +
labs(x='Device Category',
y='# of records in data set (000s)',
title='Distribution of records by device category') +
theme(axis.text.x = element_text(angle = 45, hjust = 1))
g2 <- dtrain[, .(revenue = sum(transactionRevenue, na.rm=TRUE)/1000), by=deviceCategory] %>%
ggplot(aes(x=reorder(deviceCategory, -revenue), y=revenue)) +
geom_bar(stat='identity', fill='steelblue') +
labs(x='Device category',
y='Revenue (dollars, 000s)',
title='Distribution of revenue by device category') +
theme(axis.text.x = element_text(angle = 45, hjust = 1))
grid.arrange(g1, g2, ncol=2)
# difference in transaction revenue between mobile and non-mobile devices
dtrain %>%
ggplot(aes(x=log(transactionRevenue), y=..density.., fill=isMobile)) +
geom_density(alpha=0.5) +
scale_fill_manual(values = c('steelblue', 'orange')) +
labs(title='Distribution of log revenue by mobile and non-mobile devices')
#########################
## geographic features ##
#########################
# revenue by continent
dtrain[, .(revenue = sum(transactionRevenue, na.rm=TRUE)/1000), by = continent][
!is.na(continent),
] %>%
ggplot(aes(x=reorder(continent, revenue), y=revenue)) +
geom_bar(stat='identity', fill='steelblue') +
coord_flip() +
labs(x='', y='Revenue (dollars, 000s)', title='Total transaction revenue by continent')
# group by country and calculate total transaction revenue (log)
by_country <- dtrain[, .(n = .N, revenue = log(sum(transactionRevenue, na.rm=TRUE))), by = country]
by_country$iso3 <- countrycode(by_country$country, origin='country.name', destination='iso3c')
by_country[, rev_per_visit := revenue / n]
# create the highcharter map of revenue by country
highchart() %>%
hc_add_series_map(worldgeojson, by_country, value = 'revenue', joinBy = 'iso3') %>%
hc_title(text = 'Total transaction revenue by country (natural log)') %>%
hc_subtitle(text = "August 2016 to August 2017") %>%
hc_tooltip(useHTML = TRUE, headerFormat = "",
pointFormat = "{point.country}: ${point.revenue:.0f}")
# function to map transaction revenue by continent
map_by_continent <- function(continent, map_path) {
mdata <- dtrain[
continent == continent, .(n = .N, revenue = log(sum(transactionRevenue, na.rm=TRUE))), by=country]
mdata$iso3 <- countrycode(mdata$country, origin='country.name', destination='iso3c')
hcmap(map=map_path, data=mdata, value='revenue', joinBy=c('iso-a3', 'iso3')) %>%
hc_title(text = 'Total transaction revenue by country (natural log of unit dollars)') %>%
hc_subtitle(text = "August 2016 to August 2017") %>%
hc_tooltip(useHTML = TRUE, headerFormat = "",
pointFormat = "{point.country}: {point.revenue:.0f}")
}
# call function for Europe
map_by_continent(continent='Europe', map_path='custom/europe')
# call function for Africa
map_by_continent('Africa', 'custom/africa')
# call function for Asia
map_by_continent('Asia', 'custom/asia')
# call function for South America
map_by_continent('Americas', 'custom/south-america')
# call function for North America
map_by_continent('Americas', 'custom/north-america')
# call function for Antartica
map_by_continent('Antarctica', 'custom/antarctica')
##########################################
## visits and revenue by network domain ##
##########################################
# split networkDomain column on '.', add to dtrain
dtrain[, domain := tstrsplit(dtrain$networkDomain, '\\.', keep=c(2))][
# add the '.' back in
!is.na(domain), domain := paste0('.', domain)
]
g1 <- dtrain[!is.na(networkDomain), .(n = .N), by = domain][order(-n)][!is.na(domain), ][1:20] %>%
ggplot(aes(x=reorder(domain, -n), y=n/1000)) +
geom_bar(stat='identity', fill='steelblue') +
theme(axis.text.x = element_text(angle = 90, hjust = 1)) +
labs(title='Number of visits from top-level domains',
y='Visits (000s)',
x='Top-level domain',
subtitle='Unknown domains excluded')
g2 <- dtrain[!is.na(networkDomain), .(revenue = sum(transactionRevenue, na.rm=TRUE)), by = domain][
order(-revenue)][
!is.na(domain), ][1:20] %>%
ggplot(aes(x=reorder(domain, -revenue), y=revenue/1000)) +
geom_bar(stat='identity', fill='steelblue') +
theme(axis.text.x = element_text(angle = 90, hjust = 1)) +
labs(
title='Revenue from top-level domains',
y='Revenue (000s)',
x='Top-level domain',
subtitle='Unknown domains excluded')
grid.arrange(g1, g2)
#####################
## totals features ##
#####################
# features probably correlated with revenue
g1 <- ggplot(dtrain, aes(x=log(pageviews), y=log(transactionRevenue))) +
geom_point(color='steelblue') +
geom_smooth(method='lm', color='orange') +
labs(
y='Transaction revenue (log)',
title='Pageviews vs transaction revenue',
subtitle='visit-level')
g2 <- ggplot(dtrain, aes(x=log(hits), y=log(transactionRevenue))) +
geom_point(color='steelblue') +
geom_smooth(method='lm', color='orange') +
labs(
y='Transaction revenue (log)',
title='Hits vs transaction revenue',
subtitle='visit-level')
m1 <- ggMarginal(g1, type='histogram', fill='steelblue')
m2 <- ggMarginal(g2, type='histogram', fill='steelblue')
grid.arrange(m1, m2, nrow = 1, ncol = 2)
#############################
## Traffic Source Features ##
#############################
g1 <- dtrain[, .(visits = .N), by = medium][
!is.na(medium)] %>%
ggplot(aes(x=reorder(medium, visits), y=visits / 1000)) +
geom_bar(stat='identity', fill='steelblue') +
coord_flip() +
labs(
x='Medium',
y='Visits (000s)',
title='Distribution of visits by medium')
g2 <- dtrain[, .(revenue = sum(transactionRevenue, na.rm=TRUE)), by = medium][
!is.na(medium)] %>%
ggplot(aes(x=reorder(medium, revenue), y=revenue / 1000)) +
geom_bar(stat='identity', fill='steelblue') +
coord_flip() +
labs(
x='',
y='Transaction revenue (dollars, 000s)',
title='Distribution of revenue by medium')
grid.arrange(g1, g2, ncol=2)
|
fbdecc866de44f3f1c9963abdbb8db567bd5efa7
|
a9fadb4eddb93c2a9a831af35aaa804d0ebaade0
|
/acc_err_regression.R
|
190460361337b6db8aa01d1e35d5dce9d3aecb22
|
[] |
no_license
|
nabhagat/R_analysis
|
18658647423bcd7c262fa0ee9d6083cea335f25f
|
e68399ac2d3676a865ff6dac83bad2591caecf49
|
refs/heads/master
| 2021-01-15T15:46:42.552576
| 2020-08-27T08:39:04
| 2020-08-27T08:39:04
| 25,889,484
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,183
|
r
|
acc_err_regression.R
|
# Code to fit linear regression to accuracy and error_min for all subjects
# and determine statistical significance of slope of linear regression
library('R.matlab')
# Read MAT variables
acc_per_session <- as.data.frame(readMat("C:/NRI_BMI_Mahi_Project_files/All_Subjects//acc_per_session.mat", fixNames = FALSE))
err_per_session <- as.data.frame(readMat("C:/NRI_BMI_Mahi_Project_files/All_Subjects//err_per_session.mat", fixNames = FALSE))
rownames(acc_per_session) <- c("BNBO","ERWS","JF","LSGR","PLSH")
rownames(err_per_session) <- c("BNBO","ERWS","JF","LSGR","PLSH")
acc_per_session["JF",c(1,2)] <- NA
acc_per_session["ERWS",1] <- NA
err_per_session["JF",c(1,2)] <- NA
err_per_session["ERWS",1] <- NA
days <- c(3,4,5)
# help(summary.lm)
for(i in 1:5){
acc_regression <- lm(100*as.numeric(acc_per_session[i,]) ~ days,na.action = na.omit)
print(rownames(acc_per_session[i,]))
print(summary(acc_regression))
}
print("######################################################################")
for(i in 1:5){
err_regression <- lm(as.numeric(err_per_session[i,]) ~ days,na.action = na.omit)
print(rownames(err_per_session[i,]))
print(summary(err_regression))
}
|
96c4d068ace6531d932671f26eb3339eb4c32a01
|
0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb
|
/cran/paws.networking/man/directconnect_allocate_connection_on_interconnect.Rd
|
e9e98ac1eaa554630d1ecf9b0bb709172c66bccd
|
[
"Apache-2.0"
] |
permissive
|
paws-r/paws
|
196d42a2b9aca0e551a51ea5e6f34daca739591b
|
a689da2aee079391e100060524f6b973130f4e40
|
refs/heads/main
| 2023-08-18T00:33:48.538539
| 2023-08-09T09:31:24
| 2023-08-09T09:31:24
| 154,419,943
| 293
| 45
|
NOASSERTION
| 2023-09-14T15:31:32
| 2018-10-24T01:28:47
|
R
|
UTF-8
|
R
| false
| true
| 1,368
|
rd
|
directconnect_allocate_connection_on_interconnect.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/directconnect_operations.R
\name{directconnect_allocate_connection_on_interconnect}
\alias{directconnect_allocate_connection_on_interconnect}
\title{Deprecated}
\usage{
directconnect_allocate_connection_on_interconnect(
bandwidth,
connectionName,
ownerAccount,
interconnectId,
vlan
)
}
\arguments{
\item{bandwidth}{[required] The bandwidth of the connection. The possible values are 50Mbps,
100Mbps, 200Mbps, 300Mbps, 400Mbps, 500Mbps, 1Gbps, 2Gbps, 5Gbps, and
10Gbps. Note that only those Direct Connect Partners who have met
specific requirements are allowed to create a 1Gbps, 2Gbps, 5Gbps or
10Gbps hosted connection.}
\item{connectionName}{[required] The name of the provisioned connection.}
\item{ownerAccount}{[required] The ID of the Amazon Web Services account of the customer for whom the
connection will be provisioned.}
\item{interconnectId}{[required] The ID of the interconnect on which the connection will be provisioned.}
\item{vlan}{[required] The dedicated VLAN provisioned to the connection.}
}
\description{
Deprecated. Use \code{\link[=directconnect_allocate_hosted_connection]{allocate_hosted_connection}} instead.
See \url{https://www.paws-r-sdk.com/docs/directconnect_allocate_connection_on_interconnect/} for full documentation.
}
\keyword{internal}
|
82d9db16c9a046dd9949a5f257973f2cae64924c
|
2049af77bb51dcb4b078018093db5caa0cd1a27b
|
/unit4_target/smartphoneSensor.R
|
7c3dc0966c904c19d4e67af421bcc558c11f7350
|
[] |
no_license
|
ZhenJie-Zhang/DataMining
|
ad77a6ccf0181836f6b0ee4ef6ceaf79ee402360
|
2ec6e8c1aab57dec5f189ee80f8c3fff93bc2695
|
refs/heads/master
| 2020-08-14T02:39:11.032613
| 2019-10-14T15:42:34
| 2019-10-14T15:42:34
| 215,082,298
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 597
|
r
|
smartphoneSensor.R
|
acc <- read.table("acc.txt", sep = "\t")
ori <- read.table("orient.txt", sep = "\t")
colnames(acc) <- c("deviceID", "sensorName", "x", "y", "z")
par(mfrow=c(3,1))
plot(acc$x, type = "l")
lines(filter(acc$x, rep(1/20, 20), sides = 1), col = "blue")
lines(filter(acc$x, rep(1/20, 20), sides = 2), col = "red")
plot(acc$y, type = "l")
lines(filter(acc$y, rep(1/20, 20), sides = 1), col = "blue")
lines(filter(acc$y, rep(1/20, 20), sides = 2), col = "red")
plot(acc$z, type = "l")
lines(filter(acc$z, rep(1/20, 20), sides = 1), col = "blue")
lines(filter(acc$z, rep(1/20, 20), sides = 2), col = "red")
|
7f553b1565a6fbbe1286446f0c83309426ad65e9
|
ddd147af3b80855ebea82f993b2aab30c10671fd
|
/cachematrix.R
|
a5e588563ee8356991caa1c1518376d25c916624
|
[] |
no_license
|
dashess/ProgrammingAssignment2
|
3690836a68e3df189ee5e1d0f345d7de1ff31404
|
ec2d3dbc42f1b1fc5ff6df82f0ce29ca8627641a
|
refs/heads/master
| 2021-04-26T22:25:54.921847
| 2018-03-07T11:50:22
| 2018-03-07T11:50:22
| 124,090,811
| 0
| 0
| null | 2018-03-06T14:32:23
| 2018-03-06T14:32:22
| null |
UTF-8
|
R
| false
| false
| 3,474
|
r
|
cachematrix.R
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
## makeCacheMatrix uses a matrix m and returns a list with functions.
## each function can be called using $set(), $get(), $setInverse() and
## $getInverse() on the variable name you stored makeCacheMatrix(m) in.
## comments explain what each variable used in the function does.
## args:
## x: matrix (OPTIONAL)
## returns:
## list of functions to get/set value & get/set inverse of a matrix
makeCacheMatrix <- function(x = matrix()) {
m <- NULL # sets m to NULL in this function environment
set <- function(y) { # Not used but can set new x and sets m to NULL in
x <<- y # the function environment. m2$set(m) for instance.
m <<- NULL # If removed still works, also remove it in the
} # output list
get <- function() x # calls x from this function environment, is the matrix
# used in function argument
setInverse <- function(inverse) m <<- inverse # when used stores variable
# declared "inverse" into
# "m" in the parent
# environment of this
# function
getInverse <- function() m # calls m from this function environment
list(set = set, get = get, # puts the variables into a list that can
setInverse = setInverse, # be called with by the $
getInverse = getInverse)
}
## Write a short comment describing this function
## This function returns the inverse of a specified matrix and stores
## the output into the functions memory. If used again it can quickly
## output it from memory instead of calculating it again.
## args:
## x: matrix (OPTIONAL)
## ...: Extra arguments
## returns:
## 1st use the inverse of the matrix from calculation
## after 1st use the inverse of the matrix from memory + message
## "already did this"
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getInverse() # gets the variable m stored in the"makeCacheMatrix"
# environment stored in m2
if(!is.null(m)) { # if this is TRUE the m value in
message("already did this") # the "makeCacheMatrix" environment
return(m) # stored in m2 has been overwritten
} # with the value m from this function's
# environment. (x$setInverse(m)) and
# is not longer "NULL"
data <- x$get() # puts the value from "x$get()" which is the matrix x
# from the "makeCacheMatrix" environment stored in m2
m <- solve(data, ...) # uses the data variable and inverses the data frame
x$setInverse(m) # overwrites the m in the "makeCacheMatrix" environment stored in m2
m # prints m to the console
}
## Example
# m <- matrix(c(2,1,1,2), nrow = 2, ncol = 2, byrow = TRUE)
# [,1] [,2]
# [1,] 2 1
# [2,] 1 2
#
# m2 <- makeCacheMatrix(m)
# cacheSolve(m2)
# [,1] [,2]
# [1,] 0.6666667 -0.3333333
# [2,] -0.3333333 0.6666667
# cacheSolve(m2)
# already did this
# [,1] [,2]
# [1,] 0.6666667 -0.3333333
# [2,] -0.3333333 0.6666667
|
5328a9df7f58693ba18f04388965d95d17cae0bf
|
5707fa70258d02ca5981010f078f635d0c5a09ca
|
/code/07-reports.R
|
38597ce175379f8a83a3c3e84999d874502be80e
|
[
"MIT"
] |
permissive
|
cgpu/2019-feature-selection
|
b60c3a3fe13abcc22836b272dd8bda27d0cb93bf
|
c7b33508cf92359718c47e13e01688a82d30a7bd
|
refs/heads/master
| 2020-08-07T15:49:53.064371
| 2019-12-10T08:31:56
| 2019-12-21T17:25:19
| 213,512,464
| 0
| 0
|
NOASSERTION
| 2019-12-21T17:25:20
| 2019-10-08T00:28:15
| null |
UTF-8
|
R
| false
| false
| 706
|
r
|
07-reports.R
|
reports_plan_paper <- drake_plan(
eda_wfr = wflow_publish(knitr_in("analysis/eda.Rmd"), view = FALSE, verbose = TRUE),
spectral_signatures_wfr = wflow_publish(knitr_in("analysis/spectral-signatures.Rmd"), view = FALSE),
filter_correlations_wfr = wflow_publish(knitr_in("analysis/filter-correlation.Rmd"), view = FALSE, verbose = TRUE),
eval_performance = wflow_publish(knitr_in("analysis/eval-performance.Rmd"), view = FALSE, verbose = TRUE),
response_normality = wflow_publish(knitr_in("analysis/response-normality.Rmd"), view = FALSE, verbose = TRUE)
)
reports_plan_project <- drake_plan(
defoliation_maps_wfr = wflow_publish(knitr_in("analysis/report-defoliation.Rmd"), view = FALSE)
)
|
cfffcbd9b7fcb7272f15e6e6f6966ce1775e8990
|
76261a184e7aef8de40a3aa41a469148fe6d02f6
|
/R-demos/packages/ggplot_plot_example.R
|
2c4f66775cdf3ff54aa602a371d9b58b093f043f
|
[] |
no_license
|
AngelinaBao/Rnotes
|
e3b7dbd80df24fd7f0a3c2f10588f07f37e22659
|
cb9864738d776a19c3cf4d95d37cefcac46374c6
|
refs/heads/master
| 2020-03-24T21:16:08.090091
| 2019-02-09T01:41:17
| 2019-02-09T01:41:17
| 143,022,423
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 481
|
r
|
ggplot_plot_example.R
|
library(ggplot2)
mpg <- mpg %>% filter(class %in% c("suv", "compact", "subcompact"))
ggplot(mpg, aes(displ, hwy)) +
geom_point(aes(color = drv)) +
geom_smooth(se = FALSE) +
coord_cartesian(xlim = c(1.5, 7), ylim = c(10, 45)) +
facet_wrap(~ class, nrow = 3) +
labs(title = "displ vs hwy in mpg",
x = "displ(in litres)",
y = "hwy(highway miles per gallon)",
caption = "based on data from mpg in ggplot2 package",
color = "DRV") +
theme_bw()
|
46c910427fcec15d886ed32e36ad4ebec0900801
|
c3d00c53859d9b9492cc5ac18bd02eb61d724445
|
/man/SSLB-package.Rd
|
ea8d635c18b71ef97f1d7d6a2f8eac984388f18b
|
[] |
no_license
|
gemoran/SSLB
|
8c8e7d5bd70f24b28347cf2dd109826b2736b1a6
|
aba8e406b03be28c8a83ac51ad39bb8ba927740a
|
refs/heads/master
| 2021-11-13T04:28:02.359598
| 2021-10-29T15:21:46
| 2021-10-29T15:21:46
| 157,580,978
| 3
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 416
|
rd
|
SSLB-package.Rd
|
\name{SSLB-package}
\alias{SSLB-package}
\docType{package}
\title{
\packageTitle{SSLB}
}
\description{
\packageDescription{SSLB}
}
\details{
The DESCRIPTION file:
\packageDESCRIPTION{SSLB}
\packageIndices{SSLB}
}
\author{
\packageAuthor{SSLB}
Maintainer: \packageMaintainer{SSLB}
}
\references{
Moran, G. E., Rockova, V. and George, E. I. (2021) "Spike-and-Slab Lasso Biclustering" Annals of Applied Statistics
}
|
49365da72f740469a8f97fa461cf48410fe20b91
|
add4690e78eb93ec89c0afdfb9676186232418ff
|
/plot2.R
|
159a621db42381f82c1629e1bf3c8ba595b16321
|
[] |
no_license
|
LovisaReinius/ExData_Plotting1
|
46b0251de19ca06596222d9f3d2948c11e609a94
|
e447ecdfbbb22218556f53f56ec62e1e5da2ea63
|
refs/heads/master
| 2021-01-19T17:28:07.622896
| 2017-08-22T12:37:18
| 2017-08-22T12:37:18
| 101,060,753
| 0
| 0
| null | 2017-08-22T12:32:39
| 2017-08-22T12:32:39
| null |
UTF-8
|
R
| false
| false
| 723
|
r
|
plot2.R
|
### Exploratory Data Analysis - Course project 1
# Download data from file and unpack zip in working directoy.Load data set into R
epc <- read.table("household_power_consumption.txt", header = TRUE, sep = ";", na.strings = "?")
# Select the rows between dates 2007-02-01 and 2007-02-02
epcdates <- epc[as.character(epc$Date) %in% c("1/2/2007", "2/2/2007"),]
# Combine date and time variables
library(lubridate)
epcdates$datetime<- paste(epcdates$Date,epcdates$Time)
epcdates$datetime<-dmy_hms(epcdates$datetime)
### Plot 2
png("plot2.png")
plot(epcdates$datetime, as.numeric(as.character(epcdates$Global_active_power)), type="l", xlab="", ylab="Global Active Power (kilowatts)")
dev.off()
|
6a6851498276572947e4148d04a5bc6de0d68fc2
|
68f63ff26f6ba800eaf445f844cee713a7e3cf7d
|
/R/create-data/get-lockdown-duration.R
|
d36e9a470e1eaddf20874b4ad815721e982e5f98
|
[
"MIT"
] |
permissive
|
zackbh/covid19-vsl
|
343fb8c7cd6a4501cb7743d08d78e99b5847ecaa
|
e6d9feeaad1fff5253a9f45dbe0c101329ddaf04
|
refs/heads/master
| 2021-05-21T18:18:14.448798
| 2020-09-03T13:39:11
| 2020-09-03T13:39:11
| 252,749,782
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 950
|
r
|
get-lockdown-duration.R
|
###############################################################################
# Read lockdown data ----
library(dplyr)
library(lubridate)
# How long were lockdowns on average? -----
# Using Wikipedia data by country
## https://en.wikipedia.org/wiki/COVID-19_pandemic_lockdowns#Table_of_pandemic_lockdowns
## Current as of 2020-06-18
df <- readr::read_csv(here::here("data/raw-data/lockdown-duration.csv"),
col_names = c("country", "area", "start_date", "end_date", "level"), skip = 0) %>%
filter(level == "National") %>%
janitor::clean_names() %>%
mutate(start_date = lubridate::ymd(stringr::str_sub(start_date, 1L, 10L)),
end_date = lubridate::ymd(stringr::str_sub(end_date, 1L, 10L)),
lockdown_duration = end_date - start_date)
# How long is the average and median lockdown? ----
mean(df$lockdown_duration, na.rm = T)
median(df$lockdown_duration, na.rm = T)
## 43 and 38 days, respectively
|
018d46f6a3011f584314803ef885e842a6a9fedb
|
852a84357a58db1f3e99d4ff196612515cbd8707
|
/R/nth_highest_val.R
|
4f3561f0458ae13e791f7adf17fe205949acd760
|
[
"MIT"
] |
permissive
|
siggitrausti/siggitRausti
|
408f38c0cde226014d41ed2052761af4a92f6294
|
61241036371d65f8f7bcb23fefabab8ab0a66818
|
refs/heads/master
| 2021-07-08T07:36:07.653794
| 2020-09-03T11:34:34
| 2020-09-03T11:34:34
| 180,577,767
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 341
|
r
|
nth_highest_val.R
|
#' Function to extract the n-th highest value in a vector
#'
#' @param my_vector,n
#' @keywords indexing
#' @export
#' @examples
#' nth_highest_val(my_vector,n)
nth_highest_val <- function(my_vector,n){ # function to take the n-th highest value of a vector
my_vec_sorted <- sort(my_vector, decreasing = TRUE)
return(my_vec_sorted[n])
}
|
9790e1dc2d3dd574bc1b7d681e23c74eadfff42d
|
d95777e6402e53221ce1835cd1b7e6bc89f0037a
|
/sentimentIt.Rcheck/00_pkg_src/sentimentIt/R/data.R
|
fb9e404d79084fef249e2e3c04be4ce1d2bec22d
|
[] |
no_license
|
carlson9/SentimentIt
|
cb94958ab1a683d083a7a5753ca32fd42116c46b
|
d1855acfb1273fe26fd119b3291de478136b699d
|
refs/heads/master
| 2021-03-27T12:34:06.692224
| 2018-06-22T15:54:15
| 2018-06-22T15:54:15
| 55,007,713
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 923
|
r
|
data.R
|
#' Movie reviews example data set
#'
#' An example dataset of Rotten Tomato movie reviews with the number of stars given by the user. This is the first application in the associated paper introducing SentimentIt.
#'
#' @format A data frame with 500 rows and 2 columns:
#' \describe{
#' \item{Stars}{rating on scale from 1-5 by Mechanical Turk worker}
#' \item{Review}{The movie review looked over by worker}
#' }
#' @seealso \code{\link{sentimentIt}} \code{\link{authenticate}} \code{\link{batchStatus}} \code{\link{checkCert}} \code{\link{checkWorkers}} \code{\link{createBatches}} \code{\link{createCert}} \code{\link{createPairwise}} \code{\link{createTasks}} \code{\link{fitStan}} \code{\link{fitStanHier}} \code{\link{makeCompsSep}} \code{\link{readInData}} \code{\link{readText}} \code{\link{repostExpired}} \code{\link{revokeCert}} \code{\link{signout}}
#' @source \url{https://www.rottentomatoes.com}
"reviews"
|
5ab6b70158620040cd20171646c7fac8df009f3d
|
e0733e54c3a9078e046663ad84ca5e7488489efd
|
/R/sstFunctions.R
|
4d47f04f8ad7ed30131482250d43bb42de5121d1
|
[] |
no_license
|
npp97-field/hadsstR
|
7afde0ff9e00bf48020d6b1043e7846c4a088d98
|
f218bfc992b96e413a6186fac00cd4f063564b2b
|
refs/heads/master
| 2020-12-28T23:16:19.584989
| 2015-04-24T14:23:44
| 2015-04-24T14:23:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,022
|
r
|
sstFunctions.R
|
getSSTChangeMat<- function(sstObj, years=1969:2009){
sstAnnualArray <- getSSTAnnualArray(sstObj, years)
changeMat <- sstAnnualArray[,,length(years)] - sstAnnualArray[,,1]
changeMat
}
#note - you might think that I've reversed rows and columns in the below method
#but, the matrices are stored on their sides - so this is a little wonky
#e.g. I use rows for WE and columns for NS due to being a transposed matrix
getWEChangeMat <- function(averageMat,latitudes){
#make change matrices
WEmat <- averageMat[1:(nrow(averageMat)-1),] - averageMat[2:nrow(averageMat),]
WEmat <- rbind(WEmat, averageMat[nrow(averageMat),] - averageMat[1,])
WEmat <- t(t(WEmat) / (111.325 * cos(latitudes*pi/180)))
WEmat * -1 #multiplying by -1 so that it is compatible with Burrows, higher temp in the East
}
getNSChangeMat <- function(averageMat){
NSmat <- averageMat[,2:ncol(averageMat)] - averageMat[,1:(ncol(averageMat)-1)]
NSmat <- cbind(NSmat, NA)
NSmat <- NSmat/111.325
NSmat
}
#function to get the spatiall? averaged gradient
getSpatialGrad <- function(NSmat, WEmat, i,j){
li <- ncol(NSmat)
lj <- nrow(WEmat)
# print(c(i,j))
#get bounding box indices
id <- i+1
iu <- i-1
jl <- j-1
jr <- j+1
if(jr>li) jr<-1 #wrap
if(jl==0) jl<-li #wrap
if(id>lj) return(c(NA, NA)) #meh, it's ice
if(iu==0) return(c(NA, NA)) #meh, it's ice
yGrad <- weighted.mean(c(NSmat[i,j],NSmat[iu,j],
NSmat[iu,jl], NSmat[iu,jr], NSmat[id,jl], NSmat[id,jr]),
c(2,2,rep(1,4)), na.rm=T)
#oops - did this the wrong direction, so, multiplying by -1 to correct
xGrad <- weighted.mean(c(WEmat[i,j],WEmat[i,jl],
WEmat[iu,jl], WEmat[iu,jr], WEmat[id,jl], WEmat[id,jr]),
c(2,2,rep(1,4)), na.rm=T)
#some convrsion to radial coordinates
vecSum <- sqrt(xGrad^2+yGrad^2)
vecAngle <- NA
if(!is.na(vecSum)){
vecAngle <- 90-atan2(yGrad, xGrad)*180/pi
if(vecAngle<0) vecAngle <- 360+vecAngle
}
return(c(vecSum, vecAngle))
}
getSpatialGradMatsFromMats <- function(NSmat, WEmat){
#greate matrices for spatial gradients and velocity
spatialMat <- matrix(NA, nrow=nrow(WEmat), ncol=ncol(WEmat))
angleMat <- matrix(NA, nrow=nrow(WEmat), ncol=ncol(WEmat))
for(i in 1:nrow(spatialMat)){
for(j in 1:ncol(spatialMat)){
spatialGrad <- getSpatialGrad(NSmat, WEmat, i,j)
spatialMat[i,j] <- spatialGrad[1]
angleMat[i,j] <- spatialGrad[2]
}
}
return(list(spatialGradMat = spatialMat, angleMat = angleMat))
}
getSpatialGradMats <- function(sstObj, years=1969:2009){
#get the array of temps over all years, averaged by year
sstAnnualArray <- getSSTAnnualArray(sstObj, years)
#get the average matrix of temps
averageMat <-getSSTAvgMatFromArray(sstAnnualArray)
#get info on spatial gradients
NSmat <- getNSChangeMat(averageMat)
WEmat <- getWEChangeMat(averageMat, sstObj$lat)
getSpatialGradMatsFromMats(NSmat, WEmat)
}
|
2706675b312c3b0bd7c1963e79aaa4e40b511444
|
a72c87079aee185de30088503886bce58c6a7776
|
/lib/cross_validation_pca_lda.R
|
7b94e3131e8c301a7737fccc77405c2f0c9ba531
|
[] |
no_license
|
TZstatsADS/Spring2021-Project3-group4
|
092b061e00aecf12a1828cc0ea3f2bf01cb59b50
|
6faadd3c67073153ebf5b51b441393f6880cd6d0
|
refs/heads/master
| 2023-03-14T20:35:39.283553
| 2021-03-17T21:35:07
| 2021-03-17T21:35:07
| 342,067,893
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,066
|
r
|
cross_validation_pca_lda.R
|
########################
### pca for lda ###
########################
### Author: Yushi Pan
### Project 3
pca_evaluation <- function(features, labels, K, num_pc) {
set.seed(2021)
n <- dim(features)[1]
n.fold <- round(n/K, 0)
s <- sample(n) %% K + 1
cv.error <-rep(NA,K)
for (i in 1:K){
## create features and labels for train/test
feature_train <- features[s != i,]
feature_test <- features[s == i,]
label_train <- labels[s != i]
label_test <- labels[s == i]
## model training
pca <- prcomp(feature_train)
train_pca <- data.frame(pca$x[,1:num_pc])
## make predictions
pred <- predict(pca, feature_test)
test_pca <- data.frame(pred[,1:num_pc])
dat_train_pca <- cbind(train_pca, label_train)
dat_test_pca <- cbind(test_pca, label_test)
lda.model.pca <- lda(label_train ~., data=dat_train_pca)
label_pred <- predict(lda.model.pca, dat_test_pca[-dim(dat_test_pca)[2]])$class
cv.error[i] <- mean(label_pred != label_test)
}
return(mean(cv.error))
}
|
9618ce7b4727ed9ce6294bf146bb13f565a862e7
|
cec479ddf133987a5cd1acfaeb4056064e03a4c2
|
/R/utils.R
|
aa5c99866f91b27af23d53114bea2fb05ee7025f
|
[] |
no_license
|
Tsaoxy/microbiomeViz
|
3f7dfe28dbd825d6b96cd75cc2396efc986ea11f
|
e22d7b19835f9b0b3d0785931bbd63b898f6299c
|
refs/heads/master
| 2022-12-05T18:10:32.206941
| 2020-09-03T11:42:41
| 2020-09-03T11:42:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,857
|
r
|
utils.R
|
##' @title formatPhrase
##'
##' @param phrase a phrase with taxon name(s)
##' @param taxon a taxon name to be italisized
##' @export
##' @author Chenhao Li, Guangchuang Yu
##' @description generate an expression for the phrase with the given taxon italisized
formatPhrase <- function(sentence, taxon){
## already an expression
if(!is.character(sentence)) return(sentence)
## no pattern matched
if(length(grep(x=sentence, taxon, fixed = TRUE))==0) return(sentence)
p <- taxon
s <- paste0("'~italic('", taxon, "')~'")
str <- gsub(x=paste0("'",sentence,"'"), p, s, fixed = TRUE)
return(parse(text=bquote(.(str))))
}
################################################################################
##' @title summarize_taxa
##'
##' @param physeq a phyloseq object
##' @param level the taxonomy level to summarize
##' @importFrom magrittr "%>%"
##' @importFrom reshape2 melt dcast
##' @import dplyr
##' @export
##' @author Chenghao Zhu, Chenhao Li, Guangchuang Yu
##' @description Summarize a phyloseq object on different taxonomy level
summarize_taxa = function(physeq, level, keep_full_tax = TRUE){
# do some checking here
if (!requireNamespace("phyloseq", quietly = TRUE)) {
stop("Package \"phyloseq\" needed for this function to work. Please install it.",
call. = FALSE)
}
otutab = phyloseq::otu_table(physeq)
taxtab = phyloseq::tax_table(physeq)
if(keep_full_tax){
taxonomy = apply(taxtab[,1:level], 1, function(x)
paste(c("r__Root", x), collapse="|"))
}else{
taxonomy = taxtab[,level]
}
otutab %>%
as.data.frame %>%
mutate(taxonomy = taxonomy) %>%
melt(id.var = "taxonomy",
variable.name = "sample_id") %>%
group_by(taxonomy, sample_id) %>%
summarize(value=sum(value)) %>%
dcast(taxonomy~sample_id)
}
################################################################################
##' @title fix_duplicate_tax
##'
##' @param physeq a phyloseq object
##' @author Chenghao Zhu, Chenhao Li, Guangchuang Yu
##' @export
##' @description fix the duplicatae taxonomy names of a phyloseq object
fix_duplicate_tax = function(physeq){
if (!requireNamespace("phyloseq", quietly = TRUE)) {
stop("Package \"phyloseq\" needed for this function to work. Please install it.",
call. = FALSE)
}
taxtab <- phyloseq::tax_table(physeq)
for(i in 3:ncol(taxtab)){
uniqs = unique(taxtab[,i])
for(j in 1:length(uniqs)){
if(is.na(uniqs[j])) next
ind = which(taxtab[,i]== as.character(uniqs[j]))
if(length(unique(taxtab[ind,i-1]))>1){
taxtab[ind,i] = paste(taxtab[ind,i-1], taxtab[ind,i], sep="_")
}
}
}
phyloseq::tax_table(physeq) = taxtab
return(physeq)
}
|
83946549429d24878cd9c69b6ecf9cf9658bf0fa
|
f4991c8c1e5b7a3676fe717d3ad8f385734f1197
|
/plot2.r
|
888a7f6ed1abbc980e608a02bf4115ee05837b3d
|
[] |
no_license
|
askorek/ExData_Plotting1
|
d630c9dfa845fba44db005ee32e1b121e9058ab6
|
2a6a5349707be78e2500cc55fad522c4a584d9e5
|
refs/heads/master
| 2021-01-14T13:49:21.209503
| 2015-10-09T13:58:05
| 2015-10-09T13:58:05
| 43,957,850
| 0
| 0
| null | 2015-10-09T13:46:47
| 2015-10-09T13:46:47
| null |
UTF-8
|
R
| false
| false
| 564
|
r
|
plot2.r
|
data = read.table("household_power_consumption.txt", header=TRUE, sep= ";", na.strings = c("?",""))
data_filtered = subset(data, data$Date == "1/2/2007" | data$Date == "2/2/2007")
data_filtered$Date = as.Date(strptime(data_filtered$Date, '%d/%m/%Y'))
data_filtered$timetemp = paste(data_filtered$Date, data_filtered$Time)
data_filtered$Time = strptime(data_filtered$timetemp, format = "%Y-%m-%d %H:%M:%S")
png(file = "plot2.png")
plot(data_filtered$Time, data_filtered$Global_active_power, xlab = "", ylab = "Global Active Power(kilowatts)", type = 'l')
dev.off()
|
db46fdcd19251e3cc60934e2d13aea0931210afb
|
c4522a72b9543374d9f6b74bd387a071490348d8
|
/man/rotdat.Rd
|
a79ad6453c548051f2c510bf7824d03ea292dab6
|
[] |
no_license
|
cran/SCCS
|
8aab25b4cf8b2e547369a71d3b3508e25147667c
|
aa0e7c0a549f67ba7017712f13cdbe5e529c852b
|
refs/heads/master
| 2022-07-07T05:02:30.814331
| 2022-07-05T13:20:09
| 2022-07-05T13:20:09
| 133,012,639
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 915
|
rd
|
rotdat.Rd
|
\name{rotdat}
\docType{data}
\alias{rotdat}
\title{Data on Rotavirus vaccine and intussusception}
\description{
The data comprise ages in days at rotavirus vaccine (RV) and first symptoms of intussusception in children between the ages of 42 and 183 days. There are 566 cases. Ages have been jittered.
}
\usage{rotdat}
\format{A data frame containing 566 rows and 6 columns. The column names are 'case' (individual identifier), 'sta' (age on first day of the observation period), 'end' (age on last day of the observation period), 'intus' (age at first symptom of intussusception), 'rv' (age at first dose of RV), 'rvd2' (age at second dose of RV).}
%\source{}
\references{
Stowe J., Andrews, N., Ladhani, S., and Miller E. (2016). The risk of intussusception following monovalent rotavirus vaccination in England: A self-controlled case-series evaluation. Vaccine 34, 3684-3689.
}
\keyword{datasets}
|
39f54350454d4c04d5ee91b79064641756d79335
|
3c0cb98f7e31a4ce850bd715dff583b280e878c4
|
/R/filter.R
|
adaeb32df208e310acf2c8ed186b21394961a213
|
[] |
no_license
|
LeaBreniere/minidplyr
|
22e24f4208de60a33eb0c4f3846f1da0cc1c2424
|
368daf306941d31ccee71643398a78a47a8385c9
|
refs/heads/master
| 2021-04-18T21:49:53.744141
| 2018-03-27T09:09:12
| 2018-03-27T09:09:12
| 126,301,264
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 99
|
r
|
filter.R
|
#' Title
#'
#' @inheritParams select2
#'
#' @export
#'
filter2 <- function(x, ind) {
x[ind, ]
}
|
09d67002c182ba9d2ded0341e2a0c9a351e7a5d1
|
f652d759db19d036860f01f84df8681eef37050f
|
/R/c_test.R
|
60c25fb14c840c99b3a244fc43d0d87b193ef1fb
|
[] |
no_license
|
wch/rcpptest
|
066bd096cc038e5d84f62d5553066c0d51b76191
|
383e6b08205ea5411433a4835287aad74411f212
|
refs/heads/master
| 2016-09-05T13:58:12.998780
| 2012-07-23T23:16:55
| 2012-07-23T23:16:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 85
|
r
|
c_test.R
|
get_folder_path <- function(){
.Call( "get_folder_path", PACKAGE = "rcpptest" )
}
|
276b7ee668845fce966016625d1bfa848321db7c
|
b83c7ce220eef7e0ce1c209b937dd8bc48577db2
|
/man/validate_table.Rd
|
1f2afdd2cb149aff0c615a5de731fe8b2e254758
|
[] |
no_license
|
mahowak/peekds
|
d35a08d983284223f9ea6d3497757a151866f3e4
|
47550093666a108d31d1edfca867cb80bd147278
|
refs/heads/master
| 2020-09-21T10:56:15.359908
| 2020-07-24T00:43:48
| 2020-07-24T00:43:48
| 224,767,618
| 0
| 0
| null | 2019-11-29T03:09:11
| 2019-11-29T03:09:10
| null |
UTF-8
|
R
| false
| true
| 725
|
rd
|
validate_table.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/validators.R
\name{validate_table}
\alias{validate_table}
\title{Check if the table is EtDS compliant before saving as csv or importing into database}
\usage{
validate_table(df_table, table_type)
}
\arguments{
\item{df_table}{the data frame to be saved}
\item{table_type}{the type of table, can be one of this six types:
xy_data, aoi_data, participants, trials, dataset, aoi_regions}
}
\value{
TRUE when the column names of this table are valid
}
\description{
Check if the table is EtDS compliant before saving as csv or importing into database
}
\examples{
\dontrun{
is_valid <- validate_table(df_table = df_table, table_type = "xy_data")
}
}
|
18c24c737ca44ff93bdc8eacc56408543374fd03
|
8b5e9897c10bd990e7aee1201325397dfb7e1c82
|
/Graduação/2021/S1/Checkpoint_Atividade_Complementar/AR.R
|
09226593e9c56887f95282366a03718c826e61ba
|
[] |
no_license
|
Uemura84/FIAP
|
9ea08e79b4b9f1c8de87c317d5bb19f8e66bfd30
|
2c9a62b775096c108c0da42dcb4f66dbd74a0e07
|
refs/heads/master
| 2023-08-02T10:40:17.526656
| 2021-10-08T02:31:11
| 2021-10-08T02:31:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 621
|
r
|
AR.R
|
fat <- read.csv("https://raw.githubusercontent.com/diogenesjusto/FIAP/master/Gradua%C3%A7%C3%A3o/2021/S1/Checkpoint_Atividade_Complementar/faturamento.csv")
fat2 <- fat[fat$loja=="Loja 1",]
fat3 <- fat2[13:396,]
fat3$GMV1 <- fat2[12:395,]$GMV
fat3$GMV2 <- fat2[11:394,]$GMV
fat3$GMV3 <- fat2[10:393,]$GMV
fat3$GMV4 <- fat2[9:392,]$GMV
fat3$GMV5 <- fat2[8:391,]$GMV
fat3$GMV6 <- fat2[7:390,]$GMV
fat3$GMV7 <- fat2[6:389,]$GMV
fat3$GMV8 <- fat2[5:388,]$GMV
fat3$GMV9 <- fat2[4:387,]$GMV
fat3$GMV10 <- fat2[3:386,]$GMV
fat3$GMV11 <- fat2[2:385,]$GMV
fat3$GMV12 <- fat2[1:384,]$GMV
View(fat3)
write.csv("faturamento3.csv")
|
621c74e87188c027b4297b9943f39f0d10aedfc9
|
74b7902dcc4015bdc675c0670210be49b03edf2a
|
/cachematrix.R
|
a86528b3900ba4510749cddd0e82ffbfc24b04d1
|
[] |
no_license
|
MasonSwier/ProgrammingAssignment2
|
a6a70f794ce595f7177da6badc1f07605dac3c0e
|
f3d38b81fea50a092d81652ce1fd58cd96882df4
|
refs/heads/master
| 2021-05-17T18:15:25.978512
| 2020-03-29T02:28:11
| 2020-03-29T02:28:11
| 250,913,517
| 0
| 0
| null | 2020-03-28T23:20:04
| 2020-03-28T23:20:03
| null |
UTF-8
|
R
| false
| false
| 1,728
|
r
|
cachematrix.R
|
## Hello classmate! Thanks for reviewing my work.
## My function uses the makeVector and cacheMean functions as a template
## to cache the inverse of a matrix. In testing, I used the following matrices
## matrix(c(1,0,5,2,1,6,3,4,0),3,3)
## matrix(c(1,0,5,2,1,6,3,4,1),3,3)
## matrix(c(1,0,2,2,0,2,1,0,0,1,0,1,1,2,1,4),4,4)
## To use the functions, you'll first need to apply the makeCacheMatrix function
## to your matrix. For example....
## testMatrix <- makeCacheMatrix(matrix(c(1,0,5,2,1,6,3,4,0),3,3))
## Then you will apply the cacheSolve funtion to your matrix. For example...
## cacheSolve(testMatrix)
makeCacheMatrix <- function(x = matrix()) {
## This function will allow our inverted matrix to be stored after
## it is calculated.
s <- NULL
set <- function(y) {
x <<- y
s <<- NULL
}
get <- function() x
setsolve <- function(solve) s <<- solve
getsolve <- function() s
invisible(list(set = set, get = get,
setsolve = setsolve,
getsolve = getsolve))
## I didn't think it was important to print the list to the console
## so I made it invisible.
}
## This function will update the 'set' values and 'get' values and store the
## inverted matrix after we calculate it for the first time.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
s <- x$getsolve()
if(!is.null(s)) {
message("getting cached data")
return(s)
}
data <- x$get()
s <- solve(data, ...)
x$setsolve(s)
s
}
|
bff0df41cb7eda7510e191e767394d9d099bdf5a
|
e05d6087304c9996106942f7ee54af60c4baeb42
|
/scripts/load_data.R
|
880278233b125ed4a8f7ee30bf8e3dc23a2a204f
|
[
"MIT"
] |
permissive
|
avallarino-ar/gene_expression_explorer
|
523a21063a1b3590cbc81485dadbc4238f63c66c
|
1d1cbee4b1348650bafdef7c4d79503e36fc37c1
|
refs/heads/main
| 2023-04-07T13:02:37.376928
| 2021-04-14T06:29:24
| 2021-04-14T06:29:24
| 357,788,946
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,768
|
r
|
load_data.R
|
lsh_gene <- function(df_genes){
# quito las columnas con valores NA. Son pocas , pero igual debemos
# decidir si queremos quitarlas o cambiar los NA por 0.
df_genes <- drop_na(df_genes)
m <- as.matrix(df_genes[,-1])
rownames(m) <- df_genes[,1]
##########################################
## Funciones
# Funcion para crear un vector unitario de longitud n
crear_vector <- function(n) {
x <- runif(n, 0, 1)
x / sqrt(sum(x^2))
}
# funci�n para crear una funci�n hash con un vector aleatorio y
# determinar en qu� segmento cae el hash.
crear_hash <- function(n, d) {
v <- crear_vector(n)
f <- function(x) {
as.integer(round((v %*% x)/d, 0))
}
f
}
# funcion para, ya tiendo un vectorsote, lo dividas en las cubetas
# que quieres. Para distinguirlas, antepone una letra; lo cual significa
# que tendremos que modificarla si queremos m�s de 26 cubetas
crear_cubetas <- function(vector, n_cubetas) {
ifelse(length(vector)%%n_cubetas == 0,
t <- length(vector)/n_cubetas,
stop())
cubetas <- split(vector, ceiling(seq_along(vector)/t)) %>%
lapply(paste0, collapse = '-') %>%
flatten_chr()
paste0(letters[1:n_cubetas], '||', cubetas)
}
# le das dos nombres de genes y los busca en el documento con
# candidatos. Si est�n, cuenta en cu�ntas cubetas aparecen
# juntos
buscar_cubetas <- function(v1,v2, dd) {
conteo = 0
for (i in 1:nrow(dd)) {
exito <-
v1 %in% dd$candidatos[[i]] &
v2 %in% dd$candidatos[[i]]
conteo = conteo + as.integer(exito)
}
conteo
}
#######################################
### Aplicacion
# si quieren hacer una prueba chica, pueden hacer un muestreo
#mm <- head(m, 2000)
mm <- m
t <- 200 # numero de funciones hash
# tama�o de cada segmento en mi hiperplano. Determina las distancias
# en mi familia
delta <- 10
b <- 25 # numero de cubetas
v <- ncol(mm) # tama�o de cada vector
r <- t/b # elementos por cubeta
# creamos la lista con todas las funciones hash
lista <- replicate(t, crear_hash(v, delta))
# la aplicamos a todos los elementos
c <- sapply(lista, function(x) apply(mm, MARGIN=1, x))
# creamos las cubetas
cc <- t(apply(c, MARGIN=1, crear_cubetas, n_cubetas=b))
# agrupamos por cubetas
df_cubetas <- as_tibble(cc, rownames='gen') %>%
gather('v', 'cubeta', -gen) %>%
select(-v) %>%
group_by(cubeta) %>%
summarise(n_elementos = n(),
candidatos = list(gen)) %>%
filter(n_elementos >= 8) %>%
arrange(desc(n_elementos))
# eliminamos las cubetas de un elemento
df_cubetas <- filter(df_cubetas, n_elementos > 1)
return(df_cubetas)
}
|
8a6a351f487b69097f87c15e1e45493520732567
|
f5f02ca0fea2d41b306e00bcb4c06b489b879862
|
/DM/遺漏值.R
|
6738626580765ccbb6c90320371b37993fb28a5a
|
[] |
no_license
|
hanksky12/Institute-for-Information-Industry
|
2e2d2b4a77f0048bb99b4ad8fd02b068f3161fda
|
d0599f383bfcda83ed18bc812b2d60b624c9174f
|
refs/heads/master
| 2020-08-06T12:18:47.307680
| 2020-06-06T10:47:24
| 2020-06-06T10:47:24
| 212,971,990
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 248
|
r
|
遺漏值.R
|
tmp=c(2,3,8,NA,4,NA,9,12,NA)#na要大寫
is.na(tmp)
any(is.na(tmp))#針對是否有missing data回傳true或false值
sum(is.na(tmp))#計算全部的missing數值數量
is.nan(0/0)
is.infinite(1/0)
summary(tmp) #summary也可看出遺漏值數量
|
91874f48ba69ee26c83d5ed7b5962bb4c297fef5
|
f6f827705ff14dd30922d4c88a22b45c30562d95
|
/venn/venn.R
|
c128a88fb4fb65006363fc67300d6a6a728a6658
|
[] |
no_license
|
neocruiser/thesis2014
|
4a34cec26dda28088ef96eb3ec64d140faa18bfb
|
e3411074d66afc982f7f62ce89c6bc4827509274
|
refs/heads/master
| 2021-01-10T19:48:25.522504
| 2018-03-22T19:13:18
| 2018-03-22T19:13:18
| 18,043,071
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,692
|
r
|
venn.R
|
## Launch 01funcs.R
#########################################################################
# SECTION -1- #
#########################################################################
ratio <- matrix(sample(seq(-5, 5, by=0.1), 100, replace=T), 100, 4, dimnames=list(paste("g", 1:100, sep=""), paste("DEG", 1:4, sep="")), byrow=T)
# Creates a sample matrix of gene expression log2 ratios. This could be any data type!
setlistup <- sapply(colnames(ratio), function(x) rownames(ratio[ratio[,x]>=1,]))
setlistdown <- sapply(colnames(ratio), function(x) rownames(ratio[ratio[,x]<=-1,]))
# Identifies all genes with at least a two fold up or down regulation and stores the corresponding gene identifiers
# in setlistup and setlistdown, respectively.
OLlistup <- overLapper(setlist=setlistup, sep="_", type="vennsets")
OLlistdown <- overLapper(setlist=setlistdown, sep="_", type="vennsets")
counts <- list(sapply(OLlistup$Venn_List, length), sapply(OLlistdown$Venn_List, length))
vennPlot(counts=counts, ccol=c("red", "blue"), colmode=2, mysub="Top: DEG UP; Bottom: DEG Down", yoffset=c(0.3, -0.2))
source("http://faculty.ucr.edu/~tgirke/Documents/R_BioCond/My_R_Scripts/overLapper.R") # Imports required functions.
setlist <- list(A=sample(letters, 18), B=sample(letters, 16), C=sample(letters, 20), D=sample(letters, 22), E=sample(letters, 18), F=sample(letters, 22, replace=T))
setlist5 <- setlist[1:5]
OLlist5 <- overLapper(setlist=setlist5, sep="_", type="vennsets")
counts <- sapply(OLlist5$Venn_List, length)
vennPlot(counts=counts, ccol=c(rep(1,30),2), lcex=1.5, ccex=c(rep(1.5,5), rep(0.6,25),1.5)) # Plots a non-proportional 5-way Venn diagram.
|
d5fc5e79065ec7a82ffafd4328b20df6ec52508f
|
ac8f6c9cef4f67fc84ad86c30b822e9d68779852
|
/run_analysis.R
|
66f906955121a3f5c851b1aff6b912a10b29a671
|
[] |
no_license
|
JakeSaunders/GettingAndCleanData
|
dcecc1ad5e0fc7a305e60d04a86ff76f372b3791
|
7bb946b25f29884ded64fb71416505cf73b7404d
|
refs/heads/master
| 2021-01-09T09:19:51.753915
| 2016-06-29T18:52:31
| 2016-06-29T18:52:31
| 62,195,429
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,687
|
r
|
run_analysis.R
|
# Getting and Cleaning Data Course Project ----
#
# The purpose of this project is to demonstrate your ability to collect,
# work with, and clean a data set. The goal is to prepare tidy data that can be
# used for later analysis. You will be graded by your peers on a series of
# yes/no questions related to the project. You will be required to submit: 1) a
# tidy data set as described below, 2) a link to a Github repository with your
# script for performing the analysis, and 3) a code book that describes the
# variables, the data, and any transformations or work that you performed to
# clean up the data called CodeBook.md. You should also include a README.md in
# the repo with your scripts. This repo explains how all of the scripts work and
# how they are connected.
#
# One of the most exciting areas in all of data science right now is wearable
# computing - see for example this article . Companies like Fitbit, Nike, and
# Jawbone Up are racing to develop the most advanced algorithms to attract new
# users. The data linked to from the course website represent data collected from
# the accelerometers from the Samsung Galaxy S smartphone. A full description is
# available at the site where the data was obtained:
#
# http://archive.ics.uci.edu/ml/datasets/Human+Activity+Recognition+Using+Smartphones
#
# Here are the data for the project:
#
# https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip
#
# You should create one R script called run_analysis.R that does the following.
# #
# # 1 Merges the training and the test sets to create one data set.
# #
# # 2 Extracts only the measurements on the mean and standard deviation for each measurement.
#
# # 3 Uses descriptive activity names to name the activities in the data set
#
# # 4 Appropriately labels the data set with descriptive variable names.
#
# # 5 From the data set in step 4, creates a second, independent tidy data set
# with the average of each variable for each activity and each subject.
## Start My code
### install packages, download zip file and uzip
install.packages("dplyr")
install.packages("reshape2")
download.file("https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip",destfile="data.zip")
unzip("data.zip")
library("dplyr")
library("reshape2")
### read required data into data frames
features <- read.table("UCI HAR Dataset/features.txt", header = FALSE)
test.subject <- read.table("UCI HAR Dataset/test/subject_test.txt", header = FALSE)
test.y <- read.table("UCI HAR Dataset/test/y_test.txt", header = FALSE)
test.x <- read.table("UCI HAR Dataset/test/x_test.txt", header = FALSE)
train.subject <- read.table("UCI HAR Dataset/train/subject_train.txt", header = FALSE)
train.y <- read.table("UCI HAR Dataset/train/y_train.txt", header = FALSE)
train.x <- read.table("UCI HAR Dataset/train/x_train.txt", header = FALSE)
### combine test and train data into columns
subject <- bind_rows(train.subject, test.subject)
y <- bind_rows(train.y, test.y)
x <- bind_rows(train.x, test.x)
### build column names list, make boolean list to determine which columns are kept
sub <- data.frame("V1" = -1, "V2" = "Subject")
act <- data.frame("V1" = 0, "V2" = "Activity")
name <- bind_rows(sub, act)
name <- bind_rows(name, features)
names <- as.list(name$V2)
keep.these <- grepl("Subject|Activity|.*mean.*|.*std.*", names)
### convert number code to activity names
y$V1[y$V1 == 1] <- "Walking"
y$V1[y$V1 == 2] <- "WalkingUpstairs"
y$V1[y$V1 == 3] <- "WalkingDownstairs"
y$V1[y$V1 == 4] <- "Sitting"
y$V1[y$V1 == 5] <- "Standing"
y$V1[y$V1 == 6] <- "Laying"
### build combined data frame and name columns
raw.data <- bind_cols(subject, y, x)
colnames(raw.data) <- names
### select columns for subject, activity, any for mean or std
data <- raw.data[ , keep.these]
### produce averages for each Subject - Activity pair
data$Activity <- as.factor(data$Activity)
data$Subject <- as.factor(data$Subject)
data.all <- data
data <- melt(data, id = c("Subject", "Activity"))
data <- dcast(data, Subject + Activity ~ variable, mean)
### clean up symbols in column names
names <- names(data)
names <- gsub('[-()]', '', names)
names <- gsub("mean", "Mean", names)
names <- gsub('std', 'Std', names)
names <- gsub('Std', 'StandardDeviation', names)
names <- gsub("^t", 'Time', names)
names <- gsub("^f", "Frequency", names)
names <- gsub("Acc", "Accelerometer", names)
names <- gsub("Gyro", "Gyroscope", names)
names <- gsub("Mag", "Magnitude", names)
names <- gsub("BodyBody", "Body", names)
names(data) <- names
names(data.all) <- names
### Save file as comma seperated value files, remove unneeded objects
write.table(data, file = "datameans.txt")
write.table(data.all, file = "dataall.txt")
rm(features,subject,x,y,keep.these,train.subject, test.subject, train.y, test.y, train.x, test.x,sub, act, name, raw.data)
# must submit: ----
#
# 1) a tidy data set as described below,
#
# 2) a link to a Github repository with your script for performing the analysis
#
# 3) a code book that describes the variables, the data, and any transformations
# or work that you performed to clean up the data called CodeBook.md.
#
# 4) You should also include a README.md in the repo with your scripts. This repo
# explains how all of the scripts work and how they are connected.
|
2ecdfa9d365fa1cda9e1ad01253bcf71e188bc42
|
f7f5f38a8b8e74b5011eb0ae0e875bbaddaa814c
|
/dashBoard&Insights(R,R Shiny)/mapAndR/RouteMobileDashboard/ui.R
|
22906e05ff6a193a418396708849bca4da0ed80b
|
[] |
no_license
|
sarikayamehmet/Rshiny-Python-hadoop-Others
|
dce863158786cc48e74c6d1041927744320d0d99
|
07649cc62b01e825ee9e8c6f6cefad3a57899df9
|
refs/heads/master
| 2020-03-20T04:26:06.404516
| 2018-04-03T10:25:43
| 2018-04-03T10:25:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,427
|
r
|
ui.R
|
# This is the user-interface definition of a Shiny web application.
# You can find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com
#
library(shiny)
library(leaflet)
shinyUI(fluidPage(
# Application title
titlePanel("Dashboard"),
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
selectInput("userId",label = "select the userID", choices = list(950,737),
selected = 737) ,
selectInput("vmn",label = "select the VMN", choices = list(9222281818,9029906308),
selected = 9222281818) ,
selectInput(inputId = "terminatingOperator", label = "Select the Terminating Operator",
choices = c("BSNL","IDEA")
),
selectInput(inputId = "terminatingCircle", label = "Select the Terminating Circle",
choices = c("Delhi","Mumbai")),
dateRangeInput(inputId ="dateRangeID",label = "Date",format = "yyyy-mm-dd")
),
# Show a plot of the generated distribution
mainPanel(
tabsetPanel(
tabPanel("MAP(Message delivered state wise)",leafletOutput("mymap")),
tabPanel("Plot-Missed Call per Hour(filter=VMN and Date)",plotOutput("plot1")),
tabPanel("Terminating circle Analysis (Filter = Date,Terminating Circle)",plotOutput("plot2")),
tabPanel("summary", dataTableOutput("table"))
)
)
)
)
)
|
004b0531e72525f29159ecdafdd094c62b9bbd7e
|
ca18e485b00a7395090ab7ea634163b7d62ab9bb
|
/Project Code.R
|
a22394a65444ef723b05865d099a3d22ed1dcf57
|
[
"MIT"
] |
permissive
|
JiyadRehman/Logistic-Regression-and-Decision-Trees
|
75e668c1259f9e7034709d43bfaa78fa7755e1bc
|
826a0596e5c03de5242a3375a97249b46cadb628
|
refs/heads/master
| 2021-03-10T06:47:45.204203
| 2020-03-10T23:44:57
| 2020-03-10T23:44:57
| 246,431,117
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,208
|
r
|
Project Code.R
|
# ---------- ASSIGNMENT 3 -----------------
# --------- Libraries ---------------------
install.packages("questionr")
install.packages("ROCR") # for LIFT, GAIN, AUC chart
install.packages("pROC") # for AUC value
library(caTools) # for sample.split
library(ggplot2)
library(caret) # for confusion matrix
library(ROCR) # for LIFT, GAIN, AUC chart
library(pROC) # for AUC
library(questionr)
# --------- Libraries ---------------------
# ---- DATA LOADING -----------------------
library(readxl)
Churn <- read_excel("Churn(1).xlsx")
View(Churn)
# ---- DATA LOADING -----------------------
# ----- WORKING ---------------------------
# ----------- PART 1 ------------------------------
#----------- PART (a) --------------------
Totlen <- length(Churn$Churn)
chulen <- length(which(Churn$Churn == 1))
ChurnRate <- (chulen/Totlen)*100
rm(Totlen, chulen)
# Churn Rate = 14.49145%
#----------- PART (a) --------------------
#----------- PART (b) --------------------
set.seed(12345)
split = sample.split(Churn$Churn, SplitRatio = 0.6)
training <- subset(Churn, split == TRUE)
validation <- subset(Churn, split == FALSE)
ggplot(training, aes(x=RoamMins, y=Churn)) +
geom_point(aes(col=as.factor(ContractRenewal)), size = 4) + labs(col = 'ContrantRenewal') +
ggtitle("Churn vs. RoamMins colored by ContractRenewal")
#----------- PART (b) --------------------
#----------- PART (c) --------------------
model <- glm(Churn ~ AccountWeeks + DataUsage + CustServCalls + DayMins + DayCalls +
MonthlyCharge + OverageFee + RoamMins ,
data=training, family=binomial)
mylogit.probs<-predict(model,validation,type="response")
lv <- length(validation$Churn)
mylogit.pred = rep(0, lv)
mylogit.pred[mylogit.probs > 0.5] = 1
confusionMatrix(as.factor(mylogit.pred), as.factor(validation$Churn), positive = NULL, dnn = c("Predicted", "Actual"))
length(which(validation$Churn == 0))
rm(lv)
#----------- PART (c) --------------------
#----------- PART (d) --------------------
model2 <- glm(Churn ~. ,
data=training, family=binomial)
mylogit.probs2<-predict(model2,validation,type="response")
lv <- length(validation$Churn)
mylogit.pred2 = rep(0, lv)
mylogit.pred2[mylogit.probs2 > 0.5] = 1
confusionMatrix(as.factor(mylogit.pred2), as.factor(validation$Churn), positive = NULL, dnn = c("Predicted", "Actual"))
rm(lv)
#----------- PART (d) --------------------
#----------- PART (e) --------------------
# Calculation based on part(c) and part(d)
#----------- PART (e) --------------------
#----------- PART (f) --------------------
# *********** ROC curves and AUC values ******************************
# for MODEL C ----------------------------------------
rocdf <- cbind(validation,mylogit.probs)
rocdf$res <- as.factor(ifelse(rocdf$mylogit.probs>0.5, 1, 0))
logit_scores <- prediction(predictions=rocdf$mylogit.probs, labels=rocdf$Churn)
#PLOT ROC CURVE
logit_perf <- performance(logit_scores, "tpr", "fpr")
plot(logit_perf,
main="ROC Curve for Model C",
xlab="1 - Specificity: False Positive Rate",
ylab="Sensitivity: True Positive Rate",
col="darkblue", lwd = 3)
abline(0,1, lty = 300, col = "green", lwd = 3)
grid(col="aquamarine")
# AREA UNDER THE CURVE
logit_auc <- performance(logit_scores, "auc")
as.numeric(logit_auc@y.values) ##AUC Value
auc(roc(rocdf$Churn,rocdf$mylogit.probs))
# for MODEL D ------------------------------------
rocdf2 <- cbind(validation,mylogit.probs2)
rocdf2$res <- as.factor(ifelse(rocdf2$mylogit.probs2>0.5, 1, 0))
logit_scores2 <- prediction(predictions=rocdf2$mylogit.probs, labels=rocdf2$Churn)
#PLOT ROC CURVE
logit_perf2 <- performance(logit_scores2, "tpr", "fpr")
plot(logit_perf2,
main="ROC Curve for Model D",
xlab="1 - Specificity: False Positive Rate",
ylab="Sensitivity: True Positive Rate",
col="darkblue", lwd = 3)
abline(0,1, lty = 300, col = "green", lwd = 3)
grid(col="aquamarine")
# AREA UNDER THE CURVE
logit_auc2 <- performance(logit_scores2, "auc")
as.numeric(logit_auc2@y.values) ##AUC Value
auc(roc(rocdf2$Churn,rocdf2$mylogit.probs2))
# *********** ROC curves and AUC values ******************************
# ********** LIFT chart ********************************************
# for MODEL C ------------------------------------
logit_lift <- performance(logit_scores, measure="lift", x.measure="rpp")
plot(logit_lift,
main="Lift Chart for Model c",
xlab="% Populations (Percentile)",
ylab="Lift",
col="darkblue", lwd = 3)
abline(1,0,col="red", lwd = 3)
grid(col="aquamarine")
# for MODEL D ------------------------------------
logit_lift2 <- performance(logit_scores2, measure="lift", x.measure="rpp")
plot(logit_lift2,
main="Lift Chart for Model d",
xlab="% Populations (Percentile)",
ylab="Lift",
col="darkblue", lwd = 3)
abline(1,0,col="red", lwd = 3)
grid(col="aquamarine")
#----------- PART (f) --------------------
# ----------- PART 1 ------------------------------
###############################################################################
# ----------- PART 2 ------------------------------
Web.Robot <- read.csv("Web Robot.csv")
#----------- PART (a) --------------------
WebRobots <- length(which(Web.Robot$Robot == 1))
WebRobots/length(Web.Robot$Robot)
# WebRobots = 449
#----------- PART (a) --------------------
#----------- PART (b) --------------------
Web.Robot$Robot = ifelse(Web.Robot$Robot==1,"Yes","No")
set.seed(12345)
split = sample.split(Web.Robot$Robot, SplitRatio = 0.6)
training2 <- subset(Web.Robot, split == TRUE)
validation2 <- subset(Web.Robot, split == FALSE)
#---------------- Libraries ---------------------------------------
install.packages("tree")
install.packages("rpart")
install.packages("rattle")
install.packages("rpart.plot")
install.packages("RColorBrewer")
install.packages("party")
install.packages("partykit")
install.packages("caret")
library(rpart) # Popular decision tree algorithm
library(rattle) # Fancy tree plot
library(rpart.plot) # Enhanced tree plots
library(RColorBrewer) # Color selection for fancy tree plot
library(party) # Alternative decision tree algorithm
library(partykit) # Convert rpart object to BinaryTree
library(caret) # Just a data source for this script
library(tree)
#---------------- Libraries ---------------------------------------
# Web.Robot$Robot = ifelse(Web.Robot$Robot==1,"Yes","No")
training2$Robot <- as.factor(training2$Robot)
tree1 <- rpart(Robot~., training2, method = "class")
#tree1 <- rpart(Robot~., training2)
prp(tree1,varlen=2) # Shorten variable names if too many variables
rpart.plot(tree1)
# Each node shows:
# - the predicted class (Yes or No)
# - the predicted probability of Yes
# - the percentage of observations in the node
fancyRpartPlot(tree1) # A fancy plot from rattle
Act1 <- validation2$Robot
tree.pred = predict(tree1 , validation2, type="class")
table(tree.pred,Act1)
#----------- PART (b) --------------------
#----------- PART (c) --------------------
# Calculation done on word file.
#----------- PART (c) --------------------
#----------- PART (d) --------------------
# Random forest
library(randomForest)
training2[-13]
# trainData = data.frame( x=training2[-13], y=training2[13] )
Model3 <- randomForest(training2[,-13], as.factor(training2[,13]), ntree=500 )# ntree=500
Mod3Pre = predict(Model3,validation2[-13])
Act <- validation2$Robot
table(Mod3Pre,Act)
length(which(validation2$Robot == "No"))
#test <- importance(Model3)
#test <- as.data.frame(test)
Model3$importance
varImpPlot(Model3,type=2)
#----------- PART (d) --------------------
#----------- PART (e) --------------------
# Compare error rate
#----------- PART (e) --------------------
|
60b7adb1bd064af3d81384c629dcd5dbe4c5116c
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/rLakeAnalyzer/examples/lake.number.Rd.R
|
dd59f0216060793bba69249b9e66c287bbe9c19e
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 461
|
r
|
lake.number.Rd.R
|
library(rLakeAnalyzer)
### Name: lake.number
### Title: Calculate Lake Number
### Aliases: lake.number
### Keywords: manip
### ** Examples
bthA <- c(1000,900,864,820,200,10)
bthD <- c(0,2.3,2.5,4.2,5.8,7)
uStar <- c(0.0032,0.0024)
St <- c(140,153)
metaT <- c(1.34,1.54)
metaB <- c(4.32,4.33)
averageHypoDense <- c(999.3,999.32)
cat('Lake Number for input vector is: ')
cat(lake.number( bthA, bthD, uStar, St, metaT, metaB, averageHypoDense) )
|
99528553fb99d2e525c744f5462376e51176971d
|
b528d94619a6b701bb2ea80c4d428802a045bac1
|
/man/CommandArgs.Rd
|
050ec9ce44a7d7e4fa062707ce5b42cd2943c93b
|
[] |
no_license
|
BigelowLab/rscripting
|
f4efb0cdbd0de9522631fdcbdca9b128502a5a8d
|
e32fea9bae939872e91ca63b6c424e40f8fd6f65
|
refs/heads/master
| 2022-11-08T13:56:55.714506
| 2022-10-17T20:13:36
| 2022-10-17T20:13:36
| 36,942,162
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 891
|
rd
|
CommandArgs.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/CommandArgs.R
\name{CommandArgs}
\alias{CommandArgs}
\title{Generate a CommandArgs reference}
\usage{
CommandArgs(args = commandArgs(trailingOnly = FALSE), name = "program_name",
help = NULL)
}
\arguments{
\item{args}{a character vector as returned by \code{\link{commandArgs}}
or NULL}
\item{name}{character name of the object}
\item{help}{a character vector of helpful information}
}
\value{
a CommandArgsRefClass instance
}
\description{
Generate a CommandArgs reference
}
\seealso{
Other CommandArgs: \code{\link{CommandArgs_add_argument}},
\code{\link{CommandArgs_add}},
\code{\link{CommandArgs_get_all}},
\code{\link{CommandArgs_get}},
\code{\link{CommandArgs_help_called}},
\code{\link{CommandArgs_parse_arguments}},
\code{\link{CommandArgs_print_help}},
\code{\link{CommandArgs_show}}
}
|
ce11f63d2a2d66368d02f07d15f3dd01563f0ac0
|
890e646bc0a742fbea5d3d8929d5ed1f6516dab9
|
/fishmodel.r
|
6aedf8e24f191cedcd0217a408d072dcc360c51c
|
[] |
no_license
|
mengeln/Bray-Curtis-Reef-Model
|
14df5701f8393e90dfdc52ea0ca0e55a19f1457c
|
b304427c64c0090c12aed5e9e2ebec4ccdddc372
|
refs/heads/master
| 2021-01-16T20:34:20.241699
| 2014-04-09T18:38:09
| 2014-04-09T18:38:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,161
|
r
|
fishmodel.r
|
library(randomForest)
library(reshape2)
library(plyr)
## Make a model to determine if Reference status can be classified directly by fish abundance
#Read in data
species <- read.csv("Bio_all_species_level.csv",
stringsAsFactors=FALSE)
habitat <- read.csv("habitat_fish_trim.csv",
stringsAsFactors=FALSE)
#Format
fishdat <- dcast(species[species$Assemblage =="Fish", ],
SampleID ~ Species,
fill=0,
fun.aggregate=mean, na.rm=TRUE,
value.var="Abundance")
fishdat$Status <- as.factor(
habitat$ref_1980[match(fishdat$SampleID, habitat$SampleID)])
#Bootstrap data to make Ref/Non-Ref balanced
fishdat <- ddply(fishdat, .(Status), function(x){
x[sample(1:nrow(x), 250, replace=TRUE), ]
})
#Make a classification model
fishmod <- randomForest(data=na.omit(fishdat[, -1]),
Status ~ .,
ntree=5000,
importance=TRUE)
#Extract the most important species from the model
impfish <- importance(fishmod)
goodfish <- names(which(impfish[order(impfish[, 3], decreasing=TRUE), 3] > 60))
|
e0d9a22e771338888f761147e1bd484e1242d082
|
daa82962840d74f9519bca7557fefc33c9ab03ce
|
/Salary_SVM.R
|
ff335a68051ddd8bfbba75f7273993b858e5f6f9
|
[] |
no_license
|
AshwinKoushik/R-Codes
|
4e7cb9f00828cf449f6c1e72622369d94e18407b
|
81e99dee6beae286388f1d2941d2bfafed75e4c3
|
refs/heads/master
| 2022-12-04T18:35:53.891118
| 2020-08-15T16:08:49
| 2020-08-15T16:08:49
| 276,684,149
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,976
|
r
|
Salary_SVM.R
|
install.packages("readr")
install.packages("kernlab")
install.packages("caret")
library(readr)
library(kernlab)
library(caret)
# Importing Train data
data_train <- read.csv("E:/Data Science/SVM/Assignment/SalaryData_Train.csv")
View(data_train)
table(data_train$Salary)
# Structure of the data
str(data_train)
# Finding NA values
sum(is.na(data_train))
summary(data_train)
# Importing test data
data_test <- read.csv("E:/Data Science/SVM/Assignment/SalaryData_Test.csv")
View(data_test)
# Model Building
# Kernel=vanilladot
model <-ksvm(Salary ~.,data=data_train,kernel = "vanilladot")
pred_model <- predict(model,newdata = data_test)
mean(pred_model==data_test$Salary) # 84.62%
#kernel=rbfdot
model_rbfdot <- ksvm(Salary ~.,data=data_train,kernel = "rbfdot")
pred_model <- predict(model_rbfdot,newdata = data_test)
mean(pred_model==data_test$Salary) # 85.43%
#kernel = polydot
model_polydot <- ksvm(Salary ~.,data=data_train,kernel = "polydot")
pred_model <- predict(model_polydot,newdata = data_test)
mean(pred_model==data_test$Salary) # 84.62%
#kernel = tanhdot
model_tanhdot <- ksvm(Salary ~.,data=data_train,kernel = "tanhdot")
pred_model <- predict(model_tanhdot,newdata = data_test)
mean(pred_model==data_test$Salary) # 66.38%
#kernel = laplacedot
model_laplacedot <- ksvm(Salary ~.,data=data_train,kernel = "laplacedot")
pred_model <- predict(model_laplacedot,newdata = data_test)
mean(pred_model==data_test$Salary) # 85.25%
#kernel=besseldot
model_besseldot <- ksvm(Salary ~.,data=data_train,kernel = "besseldot")
pred_model <- predict(model_besseldot,newdata = data_test)
mean(pred_model==data_test$Salary) # 77.03%
#kernel=anovadot
model_anovadot <- ksvm(Salary ~.,data=data_train,kernel = "anovadot")
pred_model <- predict(model_anovadot,newdata = data_test)
mean(pred_model==data_test$Salary) # 78.26%
# The best model is rbfdot model, since it has 85.43% accuracy, Highest among all.
|
ef44882c8ab1c352311d544ef95ad1ce10ab1128
|
7a2939a3f1de35e316f8712ec0d33d8241486bb7
|
/code/mapping_vi_estates.R
|
8b8ce3da950ab5fe0a896a6337b48b7b432a2daa
|
[] |
no_license
|
JahNorr/mapping
|
8c3bef9c37edbd20f7be82254efa91a5a34a0d71
|
3cfec32f988caee18e37d5da1d546420586538ea
|
refs/heads/master
| 2023-08-17T20:47:48.074185
| 2023-08-16T05:03:49
| 2023-08-16T05:03:49
| 44,315,285
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,319
|
r
|
mapping_vi_estates.R
|
load("./data/vi_estates.RData")
vi_map_limits<-function(x) {
if (x %in% c("St. Croix","STX","Croix")) island<-"St. Croix"
if (x %in% c("St. Thomas","STT","Thomas")) island<-"St. Thomas"
if (x %in% c("St. John","STJ","John")) island<-"St. John"
return (limits[,c(island)])
}
vi_county_to_island<-function(x) {
if(x=="010") return("STX")
if(x=="020") return("STJ")
if(x=="030") return("STT")
}
vi_estate_data<-function() {
df<-data.frame(estates$ESTATEFP,stringsAsFactors = FALSE)
colnames(df)[1]<-"Estate.Code"
df$Estate.Code<-as.character(df$Estate.Code)
df$Estate.Code<-as.integer(df$Estate.Code)
df$Estate.Name<-estates$NAME
df$Island<-mapply(vi_county_to_island,estates$COUNTYFP)
return (df)
}
vi_estate_codes<-function() return (data.frame(estates$ESTATEFP))
show_island_plot<-function(island_name,colors,show.axes=FALSE, main=NULL) {
maplims<-vi_map_limits(island_name)
par(mar=c(3.0, 1.5, 3, 1.5))
#==============================================
# try this later
# density=6,angle=45,
#
plot(estates,axes = show.axes,main = main ,xlim=c(maplims["minlon"],maplims["maxlon"]),ylim=c(maplims["minlat"],maplims["maxlat"]), col=colors, border=TRUE) #plot the species range
}
#points(samps$lon, samps$lat, pch=19, col="red", cex=0.5) #plot my sample sites
|
419749c7765a9c74b62af450b5debc84eb2291bf
|
192b7af458b5837bc258d30d640e18b67ae14c13
|
/old/boots2.R
|
bde594db78477aabee123882a0180586571a4bad
|
[] |
no_license
|
OldMortality/overdispersion
|
2d2b8190c04db098e122d763394487426af542a8
|
ae8dbb7018bfe8af5798145260df12341188e027
|
refs/heads/master
| 2021-06-30T17:13:52.766856
| 2020-09-27T00:47:05
| 2020-09-27T00:47:05
| 170,043,690
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,320
|
r
|
boots2.R
|
library(MASS) # for "rnegbin"
library(rmutil) # for "rinvgauss"
# calculate CI's for phi, based on chisquare distribution
# with phihat1 and phihat2, and on Gamma distribution
# (phihat2 only), and fitting only Negative binomial
# for now.
# Plots first 100 of the simulations, with error rates.
#
par(mfrow=c(1,1))
phi <- 2
{
n<-30
beta<-c(1,1)
x<-seq(0,1,length.out=n)
p<-length(beta)
eta<-beta[1]+beta[2]*x
mu<-exp(eta)
nu<-phi-1
mn<-mu/nu
w<-(mn+1)/mn
N <- 2000
# estimates of phi
phihats2 <- vector()
ks <- vector()
thetas <- vector()
}
alpha3.hats <- vector()
bias.true <- vector()
bias.hat <- vector()
thetas <- vector()
ks <- vector()
alpha3.true <- phi*(2*phi-1)
for (sim in 1:N) {
print(sim)
y<-rnegbin(n,mu,mn) # negative binomial
muhat<-fitted(glm(y~x,family="poisson"))
P<-sum((y-muhat)^2/muhat)
sbar<-mean((y-muhat)/muhat)
phihat1<-P/(n-p)
phihat2<-phihat1/(1+sbar)
phihats2[sim] <- phihat2
# chisquare CI
df <- n-p
#
e <- y-muhat
alpha3.hat <- (1/df) * sum( (e^3/muhat))
alpha3.hats[sim] <- alpha3.hat
# work out S
W <- diag(muhat)
W.true <- diag(mu)
X1 <- rep(1,n)
X2 <- x
X <- cbind(X1,X2)
Q <- X %*% solve(t(X) %*% W %*% X) %*% t(X)
Q.true <- X %*% solve(t(X) %*% W.true %*% X) %*% t(X)
S <- sum(1/muhat) + n * sum(diag(Q))-sum(Q)
S.true <- sum(1/mu) + n * sum(diag(Q.true))-sum(Q.true)
bias.true[sim] <- (alpha3.true/phi-phi)*S.true/n
bias.hat[sim] <- (alpha3.hat/phihat2-phihat2)*S/n
ktheta <- df - bias.hat[sim]
ktheta2 <- 2 * ktheta
theta <- ktheta2 / ktheta
k <- ktheta / theta
thetas[sim] <- theta
ks[sim] <- k
}
mean(ktheta)
mean(bias.hat)
hist(bias.hat,60,
main=paste('phi=',phi,'n=',n,sep=' '))
abline(v=bias.true[1],col='red')
par(mfrow=c(1,1))
for (i in 1:9) {
hist((n-p)*phihats2/phi,probability = T,
ylim=c(0,0.4),
xlim=c(0,2 * n),100)
c<- seq(0,qchisq(0.9999,df),0.1)
y1<-dchisq(c,df=n-p)
lines(c,y1,col='red')
y2 <- dgamma(c,shape=ks[112],scale=thetas[112])
lines(c,y2,col='blue')
}
par(mfrow=c(1,1))
hist(alpha3.hats,60)
abline(v=alpha3.true,col='red')
mean(alpha3.hats)
alpha3.true
hist(bias.true,60)
# plot estimates of phihat's
#par(mfrow=c(2,2))
{
print(paste(phi,
round(mean((n-p)*phihats1/phi),2),
round(var((n-p)*phihats1/phi),2),
round(mean((n-p)*phihats2/phi),2),
round(var((n-p)*phihats2/phi),2),
sep=' '))
par(mfrow=c(1,1))
main=paste('phi=',phi)
hist((n-p)*phihats2/phi,probability = T,
main=main,ylim=c(0,0.4),
xlim=c(0,2 * n),100)
c<- seq(0,qchisq(0.9999,df),0.1)
y1<-dchisq(c,df=n-p)
lines(c,y1,col='red')
y2<- dgamma(c,shape=mean(ks),scale=mean(thetas))
lines(c,y2,col='blue')
}
#mean(phihats1)
df * mean(phihats2)/ phi
2 * mean(ks)
df
hist(phihats2)
abline(v=phi,col='red')
abline(v=mean(phihats2,col='blue'))
#par(mfrow=c(2,2))
##(n / df) * phi
#var(phihats1)
# these should be roughly equal:
#(1/(df)) * (sum(y^2/muhat)-sum(muhat))
#phihat2
#
|
e415772cfe8f9918fadf2e8736a9b9cf1ac0f753
|
860a1a3c427142e766d7b1f4bd08fd73ed9121ac
|
/rbloomberg/man/convert.data.to.type.Rd
|
b891f0281071cc2b56e98a9c1a88a6e827f49705
|
[] |
no_license
|
acesari/RBloomberg
|
6a5746e10ad7f44d65612a98c0ea75f2466cb513
|
f01f40f0850badd36cef3887c13bb4108c38793a
|
refs/heads/master
| 2021-01-16T12:44:32.276054
| 2011-07-28T13:55:54
| 2011-07-28T13:55:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 511
|
rd
|
convert.data.to.type.Rd
|
\name{convert.data.to.type}
\alias{convert.data.to.type}
\title{Convert a matrix to a data frame.}
\usage{convert.data.to.type(matrix.data, data_types)}
\description{
Convert a matrix to a data frame.
}
\details{
Take results as matrix and a vector of data types and
coerce to a data frame with columns of the specified
types.
}
\keyword{internal}
\arguments{
\item{matrix.data}{the matrix from the bloomberg result}
\item{data_types}{the data types as returned by \code{result$getDataTypes()}}
}
|
0b356e61e244176cedff9b287e593bd13a4ee029
|
56b32941415e9abe063d6e52754b665bf95c8d6a
|
/R-Portable/App/R-Portable/library/igraph/tests/test_dominator.tree.R
|
a83db4b4d236285da8504f2297955ba3bb02a5ff
|
[
"LicenseRef-scancode-unknown-license-reference",
"GPL-2.0-only",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-newlib-historical",
"GPL-2.0-or-later",
"MIT"
] |
permissive
|
voltek62/seo-viz-install
|
37ed82a014fc36e192d9a5e5aed7bd45327c8ff3
|
e7c63f4e2e4acebc1556912887ecd6a12b4458a0
|
refs/heads/master
| 2020-05-23T08:59:32.933837
| 2017-03-12T22:00:01
| 2017-03-12T22:00:01
| 84,758,190
| 1
| 0
|
MIT
| 2019-10-13T20:51:49
| 2017-03-12T21:20:14
|
C++
|
UTF-8
|
R
| false
| false
| 871
|
r
|
test_dominator.tree.R
|
context("dominator_tree")
test_that("dominator_tree works", {
library(igraph)
g <- graph_from_literal(R-+A:B:C, A-+D, B-+A:D:E, C-+F:G, D-+L,
E-+H, F-+I, G-+I:J, H-+E:K, I-+K, J-+I,
K-+I:R, L-+H)
dtree <- dominator_tree(g, root="R")
dtree$dom <- V(g)$name[ as.vector(dtree$dom) ]
dtree$leftout <- V(g)$name[ dtree$leftout ]
expect_that(dtree$dom, equals(c("R", "R", "R", "R", "R", "C", "C",
"D", "R", "R", "G", "R")))
expect_that(dtree$leftout, equals(character()))
expect_that(as_edgelist(dtree$domtree),
equals(structure(c("R", "R", "R", "R", "R", "C", "C",
"D", "R", "R", "G", "R", "A", "B",
"C", "D", "E", "F", "G", "L", "H",
"I", "J", "K"), .Dim = c(12L, 2L))))
})
|
7db39d7122100d43cd30780b7dd2c4a4a18daaf3
|
7a95abd73d1ab9826e7f2bd7762f31c98bd0274f
|
/multivariance/inst/testfiles/fastdist/libFuzzer_fastdist/fastdist_valgrind_files/1612987467-test.R
|
e90bb62231d6c23db019594c9328fe16054aa920
|
[] |
no_license
|
akhikolla/updatedatatype-list3
|
536d4e126d14ffb84bb655b8551ed5bc9b16d2c5
|
d1505cabc5bea8badb599bf1ed44efad5306636c
|
refs/heads/master
| 2023-03-25T09:44:15.112369
| 2021-03-20T15:57:10
| 2021-03-20T15:57:10
| 349,770,001
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 185
|
r
|
1612987467-test.R
|
testlist <- list(x = structure(c(8.34402696940226e-308, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(8L, 2L)))
result <- do.call(multivariance::fastdist,testlist)
str(result)
|
c32c9c22a33efecdef50c8d6c79f0c619377cd82
|
dae6c1f7e8e051cf0a3534e92cc722e9a02174af
|
/tests/testthat/test-taxadb.R
|
cfaf36043d73d2ae314363f9e840ccef635b12bd
|
[
"MIT"
] |
permissive
|
cnyuanh/taxadb
|
f68ad2c6c381d9be421a40821a912414abdb95ae
|
5b95e3810078e90b592f09a107589898cf5eebbe
|
refs/heads/master
| 2020-12-08T16:46:17.986425
| 2019-12-19T02:20:34
| 2019-12-19T02:20:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,502
|
r
|
test-taxadb.R
|
context("taxadb")
library(testthat)
library(taxadb)
library(dplyr)
test_that("we can set up a db and call basic functions", {
td_create(dbdir = test_db)
db <- td_connect(test_db)
df <- taxa_tbl(provider = "itis",
db = db)
chameleons <- df %>%
filter(family == "Chamaeleonidae") %>%
collect()
df <- filter_rank(name = "Aves",
rank = "class",
db = db) %>%
filter(taxonomicStatus == "accepted")
species <- filter_name(df$scientificName,
db = db) %>%
filter(taxonomicStatus == "accepted")
## confirm order did not change
expect_identical(df$scientificName, species$scientificName)
expect_is(df, "data.frame")
expect_is(species, "data.frame")
expect_is(chameleons, "data.frame")
expect_gt(dim(df)[1], 1)
expect_gt(dim(chameleons)[1], 1)
## we can opt out of ignore_case on ids():
species <- filter_name(df$scientificName,
db = db,
ignore_case = FALSE) %>%
filter(taxonomicStatus == "accepted")
expect_is(species, "data.frame")
expect_gt(dim(species)[1], 1)
## filter_id() takes IDs instead of names:
names <- filter_id(id = df$taxonID,
db = db)
expect_is(names, "data.frame")
expect_gt(dim(names)[1], 1)
## Test synonyms: We can
## get synonyms for the accepted names:
syns <- synonyms(df$scientificName,
db = db)
expect_is(syns, "data.frame")
expect_gt(dim(syns)[1], 1)
})
|
0fdfbdd919669c74d36d0bca8853608b6c97b979
|
5432e2fc86a88fd721b16e09e9c1ca96e1b13806
|
/markov/markov.R
|
5a6173b9041079d3757980d4f25d1cbd12fc0d6e
|
[] |
no_license
|
emrahkirdok/codes-for-teaching
|
9556c9e75fd56de9642386e1eef111e69253c3ee
|
e8669ecdf7a0e5e0cf9ad94d1e1559876a22c9ba
|
refs/heads/master
| 2020-12-28T18:44:51.081802
| 2020-02-05T12:38:10
| 2020-02-05T12:38:10
| 238,446,552
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,851
|
r
|
markov.R
|
# a quick implementation of markov chains using an english dictionary
library(readr)
# first read the dataset
words <- read_csv("https://raw.githubusercontent.com/dwyl/english-words/master/words_alpha.txt", col_names = FALSE)
# create the word matrix
wordmatrix<-matrix(data = NA, ncol = 32, nrow = dim(words))
for (i in seq(1,dim(words)[1])){
for ( j in seq(1, nchar( words[i,1] ) + 1 ) ){
if (j != nchar( words[i,1] ) + 1){
wordmatrix[i,j] <- substr(x = words[i,1],start = j,stop = j )
}else{
wordmatrix[i,j] <- "_"
}
}
}
# create first order markov chain
gen_word_1st <- function(size, probab_tbl){
word <- vector(mode = "character", length = as.integer(size))
for (i in seq(1,as.integer(size))){
word[i] <- sample(x = names(probab_tbl), prob = probab_tbl, size = 1)
}
return(paste0(word,collapse = ""))
}
# get the probabilities for the first character
freq1 <- table(wordmatrix[,1])/sum(table(wordmatrix[,1]))
freq2 <- c()
for (i in seq(1,length(freq1))){
chr1 <- names(freq1[i])
chr2_db <- data.frame(table(wordmatrix[wordmatrix[,1]==chr1,2])/sum(table(wordmatrix[wordmatrix[,1]==chr1,2])), stringsAsFactors = F)
layer <- cbind(rep(chr1, nrow(chr2_db)), rep(freq1[i], nrow(chr2_db)))
freq2<-rbind(freq2,cbind(layer,chr2_db))
}
colnames(freq2) <- c("first", "first_probab", "second", "second_probab")
freq2$first<-as.character(freq2$first)
freq2$first_probab<-as.numeric(as.character(freq2$first_probab))
freq2$second<-as.character(freq2$second)
freq2$second_probab<-as.numeric(as.character(freq2$second_probab))
firstprobab<-unique(freq2[,1:2])
i=0
while (i<100){
word1<-sample(x = firstprobab$first, prob = firstprobab$first_probab, size = 1)
word2<-sample(x = freq2[freq2$first==word1,][,3], size = 1, prob = freq2[freq2$first==word1,][,4])
word3<-sample(x = freq2[freq2$first==word2,][,3], size = 1, prob = freq2[freq2$first==word2,][,4])
word4<-sample(x = freq2[freq2$first==word3,][,3], size = 1, prob = freq2[freq2$first==word3,][,4])
word<-paste0(word1,word2,word3,word4, " ", collapse = "")
i=i+1
cat(word)
}
# we could add another markov chain
# now we will calculate the conditional probability of the third character given the previous character
freq3 <- c()
for (i in seq(1,length(freq1))){
second_layer <- c()
chr1 <- names(freq1[i])
chr2_df <- data.frame(table(wordmatrix[wordmatrix[,1]==chr1,2])/sum(table(wordmatrix[wordmatrix[,1]==chr1,2])), stringsAsFactors = F)
chr2_df[,1]<-as.character(chr2_df[,1])
for (j in seq(1,nrow(chr2_df))){
cat(i,j,"\n")
chr2<-chr2_df[j,1]
chr2_probab<-chr2_df[j,2]
chr3_df<-data.frame(table(wordmatrix[wordmatrix[,2]==chr2,3])/sum(table(wordmatrix[wordmatrix[,2]==chr2,3])), stringsAsFactors = F)
layer <- cbind(rep(chr2,nrow(chr3_df)), rep(chr2_probab,nrow(chr3_df)))
layer <- cbind(layer,chr3_df)
second_layer <- rbind(second_layer,layer)
}
layer <- cbind(rep(chr1, nrow(second_layer)), rep(freq1[i], nrow(second_layer)))
freq3<-rbind(freq3,cbind(layer,second_layer))
}
colnames(freq3) <- c("first","firstP","second", "secondP", "third", "thirdP")
freq3$first<-as.character(freq3$first)
freq3$firstP<-as.numeric(as.character(freq3$firstP))
freq3$second<-as.character(freq3$second)
freq3$secondP<-as.numeric(as.character(freq3$secondP))
freq3$third<-as.character(freq3$third)
freq3$thirdP<-as.numeric(as.character(freq3$thirdP))
# generate words
# it seems it is working
i=0
while (i<50){
word1<-sample(x = firstprobab$first, prob = firstprobab$first_probab, size = 1)
probabs<-unique(data.frame(freq3[freq3$first==word1,][,3], freq3[freq3$first==word1,][,4],stringsAsFactors = F))
word2<-sample(x = probabs[,1], size = 1,prob = probabs[,2])
probabs<-data.frame(freq3[freq3[freq3$second==word2,][,3]==word2,][,5], freq3[freq3[freq3$second==word2,][,3]==word2,][,6], stringsAsFactors = F)
word3<-sample(x = probabs[,1], size = 1,prob = probabs[,2])
word4<-sample(x = firstprobab$first, prob = firstprobab$first_probab, size = 1)
cat(paste0(word1,word2, word3, word4, collapse = ""), "\n")
Sys.sleep(0.5)
i=i+1
}
# I tried to increase to forth chain, but I think it is not working correctly because of the NA
# however it is too much work to generate the whole probability tree
freq4 <- c()
for (i in seq(1,length(freq1))){
second_layer <- c()
chr1 <- names(freq1[i])
chr2_df <- data.frame(table(wordmatrix[wordmatrix[,1]==chr1,2])/sum(table(wordmatrix[wordmatrix[,1]==chr1,2])), stringsAsFactors = F)
chr2_df[,1]<-as.character(chr2_df[,1])
for (j in seq(1,nrow(chr2_df))){
third_layer <- c()
#cat(i,j,"\n")
chr2<-chr2_df[j,1]
chr2_probab<-chr2_df[j,2]
if (chr2!=""){
chr3_df<-data.frame(table(wordmatrix[wordmatrix[,2]==chr2,3])/sum(table(wordmatrix[wordmatrix[,2]==chr2,3])), stringsAsFactors = F)
chr3_df[,1]<-as.character(chr3_df[,1])
chr3<-chr3_df[j,1]
chr3_probab<-chr3_df[j,2]
for (z in seq(1,nrow(chr3_df))){
cat(i,j,z,"\n")
chr4_df<-data.frame(table(wordmatrix[wordmatrix[,3]==chr2,4])/sum(table(wordmatrix[wordmatrix[,3]==chr2,4])), stringsAsFactors = F)
layer <- cbind(rep(chr3,nrow(chr4_df)), rep(chr3_probab,nrow(chr4_df)))
layer <- cbind(layer,chr4_df)
third_layer <- rbind(third_layer,layer)
}
}
layer <- cbind(rep(chr3,nrow(chr4_df)), rep(chr3_probab,nrow(chr4_df)))
layer <- cbind(layer,third_layer)
second_layer <- rbind(second_layer,layer)
}
layer <- cbind(rep(chr1, nrow(second_layer)), rep(freq1[i], nrow(second_layer)))
freq4<-rbind(freq4,cbind(layer,second_layer))
}
colnames(freq4) <- c("first","firstP","second", "secondP", "third", "thirdP", "fourth", "fourthP")
freq4$first<-as.character(freq4$first)
freq4$firstP<-as.numeric(as.character(freq4$firstP))
freq4$second<-as.character(freq4$second)
freq4$secondP<-as.numeric(as.character(freq4$secondP))
freq4$third<-as.character(freq4$third)
freq4$thirdP<-as.numeric(as.character(freq4$thirdP))
freq4$fourth<-as.character(freq4$fourth)
freq4$fourth<-as.numeric(as.character(freq4$fourth))
i=0
while (i<50){
word1<-sample(x = firstprobab$first, prob = firstprobab$first_probab, size = 1)
probabs<-unique(data.frame(freq4[freq4$first==word1,][,3], freq4[freq4$first==word1,][,4],stringsAsFactors = F))
word2<-sample(x = probabs[,1], size = 1,prob = probabs[,2])
probabs<-data.frame(freq4[freq4[freq4$second==word2,][,3]==word2,][,5], freq4[freq4[freq4$second==word2,][,3]==word2,][,6], stringsAsFactors = F)
word3<-sample(x = probabs[,1], size = 1,prob = probabs[,2])
probabs<-data.frame(freq4[freq4[freq4$third==word3,][,3]==word3,][,5], freq4[freq4[freq4$third==word3,][,3]==word3,][,6], stringsAsFactors = F)
word4<-sample(x = firstprobab$first, prob = firstprobab$first_probab, size = 1)
cat(paste0(word1,word2, word3, word4, collapse = ""), "\n")
Sys.sleep(0.5)
i=i+1
}
#-----------------------------------
# I tried to write a much simpler code for the first order chain
# but really, not working enough
#first order
i=1
chances<-c()
while (i+1<=ncol(wordmatrix)){
chances<-rbind(chances,cbind(wordmatrix[,i], wordmatrix[,i+1]))
i=i+1
}
getchr<-function(word_in, chances=chances){
nextchr<-chances[chances[,1]==word_in,2]
nextchr<-nextchr[!is.na(nextchr)]
nextchr<-sample(x = nextchr,size = 1)
return(nextchr)
}
makeword <- function(init=freq1, probMAT=chances){
word<-sample(x = names(init), size = 1, prob = init)
cat(word)
while(word != "_"){
word<-getchr(word_in=word, chances=probMAT)
cat(word)
}
cat(" ")
}
init<-freq1
i=1
while(i<10000){
word<-sample(x = names(init), size = 1, prob = init)
cat(word)
word<-getchr(word_in=word, chances=chances)
if (word=="_"){cat(" ");word<-sample(x = names(init), size = 1, prob = init)}else{cat(word)}
i=i+1
}
#then I tried to model second order markov chain as like this:
#the conditional probability of the third character, given the first two characters
i=1
chances_2nd<-c()
while (i+2<=ncol(wordmatrix)){
first_syl<-apply(X = wordmatrix[,i:(i+1)],MARGIN = 1, function(x){paste0(x,collapse = "")})
chances_2nd<-rbind(chances_2nd, cbind(first_syl , wordmatrix[,i+2]))
i=i+1
}
getchr<-function(word_in, chances=chances){
nextchr<-chances[chances[,1]==word_in,2]
nextchr<-nextchr[!is.na(nextchr)]
nextchr<-sample(x = nextchr,size = 1)
return(nextchr)
}
makeword <- function(init=freq1, probMAT=chances){
word<-sample(x = names(init), size = 1, prob = init)
cat(word)
while(word != "_"){
word<-getchr(word_in=word, chances=probMAT)
cat(word)
}
cat(" ")
}
start_mat <- chances_2nd[!is.na(chances_2nd[,1]),1]
start_mat<-start_mat[start_mat!="NANA"]
start_mat<-start_mat[start_mat!="_NA"]
start_mat_prob <- table(start_mat)/sum(table(start_mat))
i=1
while(i<10000){
length_word<-nchar(sample(x = names(start_mat_prob),1, prob = start_mat_prob))
j=1
while (j<=length_word){
SY<-sample(x = start_mat, size = 1)
if (substr(SY,start = 2,stop = 2)=="_"){
SY<-gsub(pattern = "_",replacement = " ", x = SY)
cat(SY)
SY<-sample(x = start_mat, size = 1)
}
if (substr(SY,start = 2,stop = 2)!="_"){
cat(SY)
SY_N<-getchr(word_in=SY, chances=chances_2nd)
if (SY_N=="_"){
cat(" ")
}else{
cat(SY_N)
SY<-paste0(substr(SY,start = 2,stop = 2),SY_N,collapse = "")
}
}
if (substr(SY,start = 1,stop = 1)=="_"){
#SY<-gsub(pattern = "_",replacement = " ", x = SY)
SY<-sample(x = start_mat, size = 1)
cat(SY)
}
#SY_N<-getchr(word_in=SY, chances=chances)
#if (word=="_"){cat(" ");word<-sample(x = names(init), size = 1, prob = init)}else{cat(word)}
i=i+1
}
cat(" ")
}
|
0d17edc58d71f262459b481c61ac8bcb33098806
|
e63b80d27581a2f60463239b9b575097d3eeb765
|
/counting.R
|
d587b68ed84f9068cc86ec0ff4eb2481ace6a402
|
[] |
no_license
|
AnkitaNaik/News-Effect-on-Stocks
|
5ca56a4342ef56f79c8ac4e7f966f5dba9a4a661
|
37ea6193d976224ee54189b1e7ef3321b4304040
|
refs/heads/master
| 2021-06-17T12:08:32.531574
| 2021-01-17T10:09:19
| 2021-01-17T10:09:19
| 137,595,829
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,130
|
r
|
counting.R
|
## Function for frequency calculation
frequency<- function(name,sdk){
## Packages used are mentioned below
require(qdap)
require(tm)
#print(name)
#print(sdk)
sdk = tolower(sdk)
sdk <- removePunctuation(sdk)
keywords= switch (as.character(name),'Wal-Mart' = c('walmex','walmart')
,'johnson' = c('johnson')
,'3M' = c('3M','MMM')
,'United Technologies' = c('United Technologies')
,'Procter & Gamble'= c('Procter & Gamble','P&G','procter')
,'Pfizer' = c('Pfizer')
,'Verizon Communications'=c('Verizon Communications','verison')
,'Microsoft' = c('microsoft','msft')
, 'Coca-Cola'=c('Coca-Cola')
, 'Merck'=c('Merck')
, 'Intel'=c('Intel')
, 'Travelers Companies'=c('Travelers Companies','travelers')
, 'Home Depot'=c('Home Depot')
, 'General Electric'=c('General Electric','GE')
, 'Boeing'=c('Boeing')
, 'American Express' = c('American Express','Amex')
, 'Goldman Sachs'=c('Goldman Sachs','gs')
, 'Nike'=c('Nike')
, 'Walt Disney' = c('Walt Disney','disney')
, 'Apple'=c('Apple','aapl')
, 'UnitedHealth' = c('UnitedHealth','UNH')
, 'Visa'=c('Visa')
, 'Cisco' = c('Cisco')
, 'International Business Machines' = c('International Business Machines','IBM')
, 'du Pont de Nemours' = c('du Pont de Nemours','DD','DuPont')
, 'Exxon Mobil'=c('Exxon Mobil','Exxon','Mobil')
, 'JP Morgan Chase'=c('JP Morgan Chase','JP','JPM')
, 'Chevron' = c('Chevron')
, 'Caterpillar' = c('Caterpillar',"CAT")
, 'McDonald'= c('McDonald','MCD'))
p = sum(termco(text.var = sdk,match.list =keywords)$raw[3:(2+length(keywords))])
return(p)
}
|
e4c29755d9c9cf31a28a51adf67922cc5ec13917
|
a60427d81114009b6004bc9f4aec36e9b0000d28
|
/R/nnet1.r
|
dc0640f0d744b18045ab4123e54044922ab3332f
|
[
"Apache-2.0"
] |
permissive
|
cwkim0314/DataScience-Practice
|
6ad016627a6541e3ac45cf54d84491fbc08770b2
|
7b3aec2089b6508efa2d84c7937e6781bd3fc371
|
refs/heads/main
| 2023-08-19T20:40:15.407225
| 2021-10-01T14:45:22
| 2021-10-01T14:45:22
| 407,879,105
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 107
|
r
|
nnet1.r
|
library(xlsx)
library(nnet)
data<-read.xlsx2(file.choose(),1)
data
plot(df$일자,df$현재지수)
grid()
|
ed799163ac652e0b14a9d6a52cd5526e7bc0dc9a
|
057427005e93aa00f2f8aaaeb78b20567c52e301
|
/man/read.Structure.Rd
|
ae528ca567668adc3bb82fb2cced5078e51e5fd9
|
[] |
no_license
|
cran/assignPOP
|
ee9f1bd1620c45ff98bf10e7447a6d3597d9f3b8
|
bcf7850e2bdfc3725f8ef6797bf561ac8aab6c3c
|
refs/heads/master
| 2021-11-11T20:13:11.220794
| 2021-10-27T18:30:02
| 2021-10-27T18:30:02
| 77,605,837
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,328
|
rd
|
read.Structure.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/read.Structure.R
\name{read.Structure}
\alias{read.Structure}
\title{Read Structure format file}
\usage{
read.Structure(x, ploidy = 2)
}
\arguments{
\item{x}{STRUCTURE file or path to the file. The filename extension (e.g., .txt) should be included.}
\item{ploidy}{An integer of 1, 2, 3, or 4, to indicate haploid, diploid, triploid, or tetraploid data. Default is 2 (diploid).}
}
\value{
This function returns a list comprising three elements. 1. YOU_NAME_IT$DataMatrix: A matrix of genetic data with a population name label ($popNameVector) in the last column. 2. YOU_NAME_IT$SampleID: A vector of sample ID. 3. YOU_NAME_IT$LocusName: A vector of locus name.
}
\description{
This function allows you to import a STRUCTURE format file into R. The first row should be locus name (either with or withour column names for sample ID and population label); the first column should be sample ID; the second column should be population label; the rest are genotype. Use "-9" for missing alleles.
}
\examples{
# infile <- read.Structure("Your_Structure_File.txt")
}
\references{
Pritchard, J.K., Stephens, M. and Donnelly, P., 2000. Inference of population structure using multilocus genotype data. Genetics, 155(2), pp.945-959.
}
|
b0a0404507136487e4692c40ee8a109de4834ca6
|
91a90a5728a83acb0a80ba02eef12f111f1fd7b6
|
/model_old/6_compile_samples.R
|
e7cb828de79e7eb58b1b714fc60623210929c072
|
[] |
no_license
|
ballardtj/gen_arch
|
b5d9556c5ba9e18d3a515608bf5a8fea1dc9080b
|
a0ca3fedd5231e2afb577da8f6ae26377621a9a2
|
refs/heads/master
| 2020-03-23T09:57:22.681605
| 2019-10-15T02:06:53
| 2019-10-15T02:06:53
| 141,416,873
| 0
| 0
| null | 2019-07-08T23:18:13
| 2018-07-18T10:03:06
|
R
|
UTF-8
|
R
| false
| false
| 640
|
r
|
6_compile_samples.R
|
library(rstan)
args <- commandArgs(trailingOnly = T)
print(args)
i <- args[(length(args)-3):length(args)]
frame <- strsplit(i[1],"--")[[1]][2]
source <- strsplit(i[2],"--")[[1]][2]
structure <- strsplit(i[3],"--")[[1]][2]
model <- strsplit(i[4],"--")[[1]][2]
regexstr = paste0(frame,"_",source,".*",structure,"_",model,".*.csv")
#get relevant csv files
csvfiles=dir(path='~/cmdstan/model/',pattern=regexstr)
print(csvfiles)
#create fit object from those csv files
fit=read_stan_csv(paste0('~/cmdstan/model/',csvfiles))
#save fit object
save(fit,file=paste0("~/cmdstan/model/",frame,"_",source,"_",structure,"_",model,"_fit.RData"))
|
d5e185e42a013ec359e6da707ed1b2a56e4891ca
|
4e38170d7b3f871ef2b65419102f9cee757e456d
|
/SuperFarmerMAPA-master/R/badaj_gre.R
|
7273d6b2037584d97ac9b8f42702daf91c75b405
|
[] |
no_license
|
pgorniak/SuperFarmerMAPA
|
a4d96b6d0b48a26851b53212098d7166a04b3d20
|
620bf7a8d58216e22115bc71f5077afb2272f590
|
refs/heads/master
| 2020-06-10T21:15:36.893818
| 2016-12-31T15:12:48
| 2016-12-31T15:12:48
| 75,871,314
| 0
| 0
| null | 2016-12-07T20:05:01
| 2016-12-07T20:05:01
| null |
UTF-8
|
R
| false
| false
| 682
|
r
|
badaj_gre.R
|
#' @title Badanie gry przy zadanej strategii
#'
#' @description Funkcja wywolujaca funkcje \code{gra()} wielokrotnie przy zadanej strategii i zwracajaca wektor wynikow
#'
#' @param strategia Strategia, przy ktorej ma byc badana gra
#' @param ile Ile razy fukcja ma zagrac w SuperFarmera przy zadanej strategii, domyslnie 10000
#'
#' @return Funkcja zwraca wektor liczby ruchów w kolejnych grach.
#'
#' @examples
#' wyniki <- badaj_gre(strategia_rf, 10)
#'
#' @rdname badaj_gre
#'
#' @author Agnieszka Ciepielewska, Marek Wawreniuk, Pawel Gorniak
#'
#' @export
badaj_gre <- function(strategia, ile = 10000){
wyniki <- sapply(1:ile, function(x) gra(strategia))
return(wyniki)
}
|
926a9941b0ab9a36f0317acc6292df5f0e59412a
|
6fbfe5725afa1574e8267679bf5b41739c55943a
|
/Week 3 Kejia Shi.r
|
3755dba74c269cc32e2a6c6d9907ef8e2f5408be
|
[] |
no_license
|
bugakm/628--p1
|
a169dae2933165173b7bf43079d3d71c55310863
|
e35774f312ef1f1d00ec81cc7e4688d578a027b3
|
refs/heads/master
| 2016-08-11T19:23:07.779355
| 2016-02-25T19:01:46
| 2016-02-25T19:01:46
| 50,133,722
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,867
|
r
|
Week 3 Kejia Shi.r
|
##Seasonal Analysis
library(dplyr)
library(timsac)
library(dynlm)
library(stats)
library(forecase)
library(bfast)
library(ggplot2)
#0. Cleaning
dataclean_pre<-function(chr){
data <-read.csv(chr,skip=3,colClasses = c("character",rep("NULL",2),rep("integer",5),rep("NULL",3)))
data<-data %>%
filter(Value != -999) %>%
group_by(Month) %>%
mutate(mean_Value_Mon = mean(Value)) %>%
group_by(Month,Day) %>%
mutate(mean_Value_Day = mean(Value)) %>%
ungroup()
}
dailyclean<-function(chr){
data <-read.csv(chr,skip=3,colClasses = c("character",rep("NULL",2),rep("integer",5),rep("NULL",3)))
data<-data %>%
filter(Value != -999) %>%
group_by(Month) %>%
mutate(mean_Value_Mon = mean(Value)) %>%
group_by(Month,Day) %>%
summarize(mean_Value_Day = mean(Value)) %>%
ungroup()
}
###perform first cleaning
bj2015<-dataclean_pre("bj2015.csv")
bj2014<-dataclean_pre("bj2014.csv")
bj2013<-dataclean_pre("bj2013.csv")
bj2012<-dataclean_pre("bj2012.csv")
bj2011<-dataclean_pre("bj2011.csv")
bj<-rbind(bj2011,bj2012,bj2013,bj2014,bj2015)
save(bj,file="bj.rdata")
dbj2015<-dailyclean("bj2015.csv")
dbj2014<-dailyclean("bj2014.csv")
dbj2013<-dailyclean("bj2013.csv")
dbj2012<-dailyclean("bj2012.csv")
dbj2011<-dailyclean("bj2011.csv")
dbj<-rbind(dbj2011,dbj2012,dbj2013,dbj2014,dbj2015)
save(dbj,file="dbj.rdata")
#1. Calculate Daily Index and Polluted Days
##Daily Index: Mean Value
load("dbj.rdata")
pollute<-function(data){
data<-data %>%
##Polluted Days: Daily Index >= 150
mutate(Polluted = as.numeric(mean_Value_Day >= 150)) %>%
##Heavily Polluted Days: Daily Index >= 300
mutate(Hea_Polluted = as.numeric(mean_Value_Day >= 300))
}
dbj2011<-pollute(dbj2011)
dbj2011$year<-as.numeric(2011)
dbj2012<-pollute(dbj2012)
dbj2012$year<-as.numeric(2012)
dbj2013<-pollute(dbj2013)
dbj2013$year<-as.numeric(2013)
dbj2014<-pollute(dbj2014)
dbj2014$year<-as.numeric(2014)
dbj2015<-pollute(dbj2015)
dbj2015$year<-as.numeric(2015)
dbj<-rbind(dbj2012,dbj2013,dbj2014,dbj2015)
#2. Calculate Monthly Polluted Days and Monthly Average Index
monthpol<-function(data){
data<-data %>%
group_by(Year, Month) %>%
summarize(aggPollute = sum(Polluted), aggHeavy = sum(Hea_Polluted))
}
mbj<-monthpol(dbj)
mbj<-data.frame(year=mbj$Year, month=mbj$Month, aggPollute=mbj$aggPollute, aggHeavy=mbj$aggHeavy)
mmean = monthmean(data=mbj, resp='aggPollute', adjmonth=FALSE)
mmean
plot(mmean)
##Plotting monthly data in circle diagrams
plotCircular(area1 = mmean$mean,
dp = 1, lines = TRUE,
labels = month.abb,
scale = 0.7)
#3. Time Series Plot / monthly data
mbj$yrmon<-round(mbj$year+(1/12)*(mbj$month-1),3)
plot(mbj$yrmon, mbj$aggPollute, type = 'o',
pch = 19,
ylab = 'Number of polluted days (PM2.5>=150) per month', xlab = 'Time')
#4. STL Decomposition / weekly data
|
2e4c470744e27a6e350e16dae277e74be67afbf4
|
7fedcc709364af02dc16baefabf0950e54bebb6c
|
/man/plot_Kroll_differential.Rd
|
4238dafa025994cee4e24dadb3d7eabf64b6e9fa
|
[] |
no_license
|
robertyoung3/MSanalyzeNOM
|
34c406c453a3ac8eda7a1a8456161dd9159062f1
|
604299a54f55948d41ceb5f2769f2242935d2976
|
refs/heads/master
| 2023-05-07T06:42:44.693193
| 2021-06-01T17:55:37
| 2021-06-01T17:55:37
| 268,903,669
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 708
|
rd
|
plot_Kroll_differential.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot_Kroll_differential.R
\name{plot_Kroll_differential}
\alias{plot_Kroll_differential}
\title{plot_Kroll_differential}
\usage{
plot_Kroll_differential(data, plot_title = "")
}
\arguments{
\item{data}{a tibble containing the following column names: "HtoC"
and "OtoC"}
\item{plot_title}{a character string containing the sample name (default =
none)}
}
\description{
This function makes a Kroll diagram that plots average carbon oxidation
states against carbon number (Kroll et al. 2011), but shows the intensities
of the peak abundance intensities between 2 samples gradient color scale
(as a function of relative abundance).
}
|
468345d78ce31197287a63162fc91d43c8345dbc
|
051880099402393c9249d41526a5ac162f822f8d
|
/man/pt.interpolate.Rd
|
a4423cbe7ebbf28d2384dbe7356703445db16d00
|
[
"MIT"
] |
permissive
|
bbTomas/rPraat
|
cd2b309e39e0ee784be4d83a980da60946f4c822
|
4c516e1309377e370c7d05245f6a396b6d4d4b03
|
refs/heads/master
| 2021-12-13T19:32:38.439214
| 2021-12-09T18:42:48
| 2021-12-09T18:42:48
| 54,803,225
| 21
| 7
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,125
|
rd
|
pt.interpolate.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pt.R
\name{pt.interpolate}
\alias{pt.interpolate}
\title{pt.interpolate}
\usage{
pt.interpolate(pt, t)
}
\arguments{
\item{pt}{PitchTier object}
\item{t}{vector of time instances of interest}
}
\value{
PitchTier object
}
\description{
Interpolates PitchTier contour in given time instances.
}
\details{
a) If \code{t < min(pt$t)} (or \code{t > max(pt$t)}), returns the first (or the last) value of \code{pt$f}.
b) If \code{t} is existing point in \code{pt$t}, returns the respective \code{pt$f}.
c) If \code{t} is between two existing points, returns linear interpolation of these two points.
}
\examples{
pt <- pt.sample()
pt <- pt.Hz2ST(pt, ref = 100) # conversion of Hz to Semitones, reference 0 ST = 100 Hz.
pt2 <- pt.interpolate(pt, seq(pt$t[1], pt$t[length(pt$t)], by = 0.001))
\dontrun{
pt.plot(pt)
pt.plot(pt2)
}
}
\seealso{
\code{\link{pt.getPointIndexNearestTime}}, \code{\link{pt.read}}, \code{\link{pt.write}}, \code{\link{pt.plot}}, \code{\link{pt.Hz2ST}}, \code{\link{pt.cut}}, \code{\link{pt.cut0}}, \code{\link{pt.legendre}}
}
|
750e54aae32b984d336c97fcd7e060255a2a2703
|
18ea191874f20d6ce683f54d4a0cb0f28e165e5a
|
/Chapter 6.R
|
5b257a709d435872b36753abb71bffea65b072c4
|
[] |
no_license
|
jjwoolley/Intro-to-Statistical-Learning
|
524f10e39d85e9829ac91b717daf90a2eaf5cb91
|
8d17f74a30be5c138b87d97e50b421d1d34aa483
|
refs/heads/master
| 2022-12-08T11:12:41.293876
| 2020-09-07T21:14:20
| 2020-09-07T21:14:20
| 287,126,099
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,896
|
r
|
Chapter 6.R
|
# Follows An Introduction to Statistical Learning
# with Applications in R
# By Gareth James, Daniela Witten, Trevor Hastie and Robert Tibshirani
# http://faculty.marshall.usc.edu/gareth-james/ISL/ISLR%20Seventh%20Printing.pdf
# good notes
# https://tdg5.github.io/stats-learning-notes/
library("MASS")
library("ISLR")
library("tidyverse")
library("tidymodels")
library("GGally")
library("boot")
library("leaps")
library("glmnet")
library("pls")
##### CONCEPTUAL----------------------------------------------------------------
#1a)
# They could all be tied, but "best subset" will for sure be either tied for the
# best or on its own as the best since it includes every combination of variables
#1b)
# It could be any of the models as best subset could overfit
#1c)
# (i) True
# (ii) True
# (iii) False
# (iv) False
# (v) False
#2a)
# (iii) True, it is less flexible and decreases variance at the price of an
# increase in bias
#2b)
# (iii) same as in 2a
#2c)
# (ii) True, this one is more flexible but has less bias as a tradeoff for an
# increase in variance
#3a)
# (iv) increasing S givs us a more flexible model which will allow the model to
# fit the data closer
#3b)
# (ii) When s = 0, all betas are 0 so when betas are allowed to increase then the model
# overall will start to fit better until it leads to overfitting
#3c)
# (iii) Variance will always increase as the model becomes more flexible
#3d)
# (iv) Bias will always decrease as the model becomes more flexible
#3e)
# (v) This is unrelated to S (it is constant)
#4a)
# (iii) As lambda increases, the model becomes more restrictive which reduces training RSS
#4b)
# (ii) As lambda increases, test RSS will decrease and then start to increase when it overfits
#4c)
# (iv) As lambda increases and goes to infinity, all betas will reduce to 0
#4d)
# (iii) Opposite of 4c
#4e)
# (v) This is unrelated to S (it is constant)
#5)
# Done on paper
#6a)
# p=1, x axis b1, choice of y1 and lambda >0
#plot prove 6.12 is solved by 6.14
lambda <- 2
B1 <- -10:10
y.point <- 3
y <- (y.point - B1)^2 + lambda * B1^2
beta.est <- y.point / (1 + lambda)
sixA <- tibble(cbind(y, B1, beta.est))
ggplot(data = sixA, aes(x = B1, y = y)) +
geom_line() +
geom_point(data = sixA,
aes(x = beta.est, y = (y.point - beta.est)^2 + lambda * beta.est^2),
color = "red")
#6b)
B2 <- -10:10
y <- (y.point - B2)^2 + lambda * abs(B1)
beta.est <- ifelse(y.point > lambda / 2, y.point - lambda / 2,
ifelse(y.point < - lambda / 2, y.point + lambda / 2,
ifelse(abs(y.point) <= lambda / 2), 0, "error"))
sixB <- tibble(cbind(y, B2, beta.est))
ggplot(data = sixB, aes(x = B2, y = y)) +
geom_line() +
geom_point(data = sixB,
aes(x = beta.est, y = (y.point - beta.est)^2 + lambda * abs(beta.est)),
color = "red")
#7)
# Done on paper
#8a)
X <- rnorm(100)
e <- rnorm(100)
#8b)
Y <- 10 + 4 * X + 3 * X^2 + 7 * X^3 + e
#8c)
data1 <- data.frame(y = Y, x = X)
select1.model <- regsubsets(y ~ poly(x, 10, raw = T),
nvmax = 10,
data = data1)
select1.summ <- summary(select1.model)
select1.summ$bic
# this shows the three var model as the best (although it is close)
select1.summ$adjr2
# this shows four var as the best
select1.summ$cp
data1.subs <- tibble(bic = select1.summ$bic,
adjr2 = select1.summ$adjr2,
cp = select1.summ$cp)
ggplot(data = data1.subs) +
geom_line(data = data1.subs, aes(x = 1:10, y = bic), color = "red") +
geom_line(data = data1.subs, aes(x = 1:10, y = cp), color = "blue") +
geom_line(data = data1.subs, aes(x = 1:10, y = adjr2 * 1000), color = "yellow")
# this shows the three var model as the best
coef(select1.model, 3)
# gave coefficients of 10.12, 3.89, 2.99, and 7.02 which is very close
#8d)
# BACKWARD
select2.model <- regsubsets(y ~ poly(x, 10, raw = T),
nvmax = 10,
data = data1,
method = "backward")
select2.summ <- summary(select2.model)
select2.summ$bic
# this shows the three var model as the best (although it is close)
select2.summ$adjr2
# this shows the three var as the best
select2.summ$cp
# shows three var the best
data2.subs <- tibble(bic = select2.summ$bic,
adjr2 = select2.summ$adjr2,
cp = select2.summ$cp)
ggplot(data = data2.subs) +
geom_line(data = data2.subs, aes(x = 1:10, y = bic), color = "red") +
geom_line(data = data2.subs, aes(x = 1:10, y = cp), color = "blue") +
geom_line(data = data2.subs, aes(x = 1:10, y = adjr2 * 1000), color = "yellow")
coef(select2.model, 3)
# FORWARD
select3.model <- regsubsets(y ~ poly(x, 10, raw = T),
nvmax = 10,
data = data1,
method = "forward")
select3.summ <- summary(select3.model)
select3.summ$bic
# this shows the three var model as the best (although it is close)
select3.summ$adjr2
# this shows the four var as the best
select3.summ$cp
# shows three var the best
data3.subs <- tibble(bic = select3.summ$bic,
adjr2 = select3.summ$adjr2,
cp = select3.summ$cp)
ggplot(data = data3.subs) +
geom_line(data = data3.subs, aes(x = 1:10, y = bic), color = "red") +
geom_line(data = data3.subs, aes(x = 1:10, y = cp), color = "blue") +
geom_line(data = data3.subs, aes(x = 1:10, y = adjr2 * 1000), color = "yellow")
coef(select3.model, 3)
# The forward and backward selection methods were very close to the best selection
#8e)
# Y <- 10 + 4 * X + 3 * X^2 + 7 * X^3 + e
xmat4 <- model.matrix(y ~ poly(x, 10, raw = T), data = data1)[, -1]
#run cv.glmnet with an alpha =1 to do lasso (alpha = 0 for ridge)
set.seed(1234)
mod.lasso = cv.glmnet(x = xmat4, y = Y, alpha = 1)
lambda.best = mod.lasso$lambda.min
lambda.best
plot(mod.lasso)
predict(mod.lasso, s = lambda.best, type = "coefficients")
# this lasso regression is close, but should not be selecting any x^4
#8f)
# best subset selection
X <- rnorm(100)
B0 <- 12
B7 <- 5
eps <- rnorm(100)
Y <- B0 + B7*X^7 + eps
data5 <- data.frame(y = Y, x = X)
select5.model <- regsubsets(y ~ poly(x, 10, raw = T),
nvmax = 10,
data = data5)
select5.summ <- summary(select5.model)
which.min(select5.summ$bic)
# this shows the one var model as the best with the X^7 var
which.max(select5.summ$adjr2)
# this shows two var as the best
which.min(select5.summ$cp)
# this shows the one var model as the best
data5.subs <- tibble(bic = select5.summ$bic,
adjr2 = select5.summ$adjr2,
cp = select5.summ$cp)
ggplot(data = data5.subs) +
geom_line(data = data1.subs, aes(x = 1:10, y = bic), color = "red") +
geom_line(data = data1.subs, aes(x = 1:10, y = cp), color = "blue") +
geom_line(data = data1.subs, aes(x = 1:10, y = adjr2 * 1000), color = "yellow")
# this shows the one var model as the best
coef(select5.model, 1)
# very close to our chosen values
# Lasso
xmat6 <- model.matrix(y ~ poly(x, 10, raw = T), data = data5)[, -1]
#run cv.glmnet with an alpha =1 to do lasso (alpha = 0 for ridge)
set.seed(1234)
mod6.lasso = cv.glmnet(x = xmat6, y = Y, alpha = 1)
lambda6.best = mod6.lasso$lambda.min
lambda6.best
plot(mod6.lasso)
predict(mod6.lasso, s = lambda6.best, type = "coefficients")
# this lasso regression is close, but not quite as close as best subset selection
#9a)
data("College")
College_data <- initial_split(College)
train.college <- training(College_data)
test.college <- testing(College_data)
#9b) LINEAR MODEL
fit7.lm <- linear_reg() %>%
set_engine("lm") %>%
fit(Apps ~. , data = train.college)
tidy(fit7.lm)
lm7.pred <- predict(fit7.lm, test.college)
# get mean squared error
mse7 <- map((test.college[, "Apps"] - lm7.pred)^2, mean)
mse7
# mse = 1,844,524
#9c) RIDGE
train.ridge.college <- model.matrix(Apps ~., data = train.college)[,-1]
test.ridge.college <- model.matrix(Apps ~., data = test.college)[,-1]
fit8.ridge <- cv.glmnet(x = train.ridge.college,
y = train.college$Apps,
alpha = 0)
lambda8.best <- fit8.ridge$lambda.min
pred8.ridge <- tibble(predict(fit8.ridge, s = lambda8.best, newx = test.ridge.college))
mse8 <- map((test.college$Apps - pred8.ridge)^2, mean)
mse8
# mse = 3,324,966
#9d) LASSO
fit9.lasso <- cv.glmnet(x = train.ridge.college,
y = train.college$Apps,
alpha = 1)
lambda9.best <- fit9.lasso$lambda.min
pred9.lasso <- tibble(predict(fit9.lasso, s = lambda9.best, newx = test.ridge.college))
mse9 <- map((test.college$Apps - pred9.lasso)^2, mean)
mse9
# mse = 1,967,434
coef9.lasso <- predict(fit9.lasso, type = "coefficients", s = lambda9.best)
coef9.lasso
length(coef9.lasso[coef9.lasso != 0])
# 15 non-zero coefficients including the intercept
#9e) PCR
fit10.pcr <- pcr(Apps ~., data = train.college, scale = T, validation = "CV")
summary(fit10.pcr)
validationplot(fit10.pcr, val.type = "MSEP")
pred10.pcr <- tibble(predict(fit10.pcr, test.college, ncomp = 17))
mse10 <- map((test.college$Apps - pred10.pcr)^2, mean)
#9f) PLS
fit11.pls <- plsr(Apps ~., data = train.college, scale = T, validation = "CV")
summary(fit11.pls)
validationplot(fit11.pls, val.type = "MSEP")
pred11.pls <- tibble(predict(fit11.pls, test.college, ncomp = 9))
mse11 <- map((test.college$Apps - pred11.pls)^2, mean)
#9g)
# MSE's from each method
mse7 #lm
mse8 # ridge:
mse9 # lasso:
mse10 # PCR: (same as lm since we use all M's/ncomp)
mse11 # PLS:
#10a)
X <- matrix(rnorm(1000*20), 1000, 20)
eps <- rnorm(1000)
b <- rnorm(20)
b[2:5] <- 0
y <- X %*% b + eps
#10b)
data10 <- data.frame(y, X)
data10.split <- initial_split(data10, prop = 0.9)
data10.train <- training(data10.split)
data10.test <- testing(data10.split)
#10c) best subset selection on the training set
fit12.best <- regsubsets(y~., data10.train,nvmax = 20)
fit12.summ <- summary(fit12.best)
which.max(fit12.summ$adjr2)
which.min(fit12.summ$cp)
which.min(fit12.summ$bic)
train12.mat <- model.matrix(y ~., data10.train, nvmax = 20)
ts.results <- rep(NA, 20)
for(i in 1:20) {
cf <- coef(fit12.best, id = i)
pred <- train12.mat[, names(cf)] %*% cf
ts.results[i] <- mean((pred - data10.train$y)^2)
}
ggplot(data = NULL, aes(x = 1:20, y = ts.results)) +
geom_point() +
geom_line() +
xlab("Number of predictors") +
ylab("Mean Squared Error (MSE)") +
theme_bw()
#10d)
test12.mat <- model.matrix(y ~., data10.test, nvmax = 20)
test.mse <- rep(NA, 20)
for(i in 1:20) {
cf <- coef(fit12.best, id = i)
pred <- test12.mat[, names(cf)] %*% cf
test.mse[i] <- mean((pred - data10.test$y)^2)
}
ggplot(data = NULL, aes(x = 1:20, y = test.mse)) +
geom_point() +
geom_line() +
xlab("Number of predictors") +
ylab("Mean Squared Error (MSE)") +
theme_bw()
#10e)
# the test mse is smallest in the 12 predictor model
# this is about where the training mse is close to its lowest and levels off
#10f)
coef(fit12.best, id = 12)
b
# they are very close, the model did a great job in estimating the coefficients
#10g)
err <- rep(NA, 20)
x_cols <- colnames(X, do.NULL = F, prefix = "X")
for (i in 1:20) {
cf <- coef(fit12.best, id = i)
err[i] <- sqrt(sum((b[x_cols %in% names(cf)]
- cf[names(cf) %in% x_cols])^2)
+ sum(b[!(x_cols %in% names(cf))])^2)
}
err
ggplot(data = NULL, aes(x = 1:20, y = err)) +
geom_point() +
geom_line() +
theme_bw() +
xlab("Nu. of Coefficients") +
ylab("Error b/w estimated coefficients and true")
#11a)
# predict per capita crime rate (crim)
data(Boston)
Boston <- Boston %>%
mutate(chas = factor(chas))
boston.split <- initial_split(Boston, prop = 0.5, strata = crim)
boston.train <- training(boston.split)
boston.test <- testing(boston.split)
# best subset selection
fit13.bss <- regsubsets(crim ~., data = boston.train, nvmax = 13)
fit13.summ <- summary(fit13.bss)
# code from textbook
predict.regsubsets <- function (object ,newdata ,id ,...){
form <- as.formula(object$call[[2]])
mat <- model.matrix(form, newdata )
coefi <- coef(object, id = id)
xvars <- names (coefi)
mat[, xvars] %*% coefi
}
err.bss <- rep(NA, ncol(boston.train) - 1)
for(i in 1:(ncol(boston.test) - 1)) {
pred.bss <- predict(fit13.bss, boston.test, id = i)
err.bss[i] <- mean((boston.test$crim - pred.bss)^2)
}
err.bss
# ridge
x.train.ridge <- model.matrix(crim ~., data = boston.train)[,-1]
x.test.ridge <- model.matrix(crim ~., data = boston.test)[,-1]
fit14.ridge <- cv.glmnet(x = x.train.ridge, y = boston.train$crim, alpha = 0)
fit14.ridge$lambda.min
pred14.ridge <- predict(fit14.ridge,
s = fit14.ridge$lambda.min,
newx = x.test.ridge)
err.ridge <- mean((boston.test$crim - pred14.ridge)^2)
err.ridge
# lasso
fit15.lasso <- cv.glmnet(x = x.train.ridge, y = boston.train$crim, alpha = 1)
fit15.lasso$lambda.min
pred15.lasso <- predict(fit15.lasso,
s = fit15.lasso$lambda.min,
newx = x.test.ridge)
err.lasso <- mean((boston.test$crim - pred15.lasso)^2)
err.lasso
# pcr
fit16.pcr <- pcr(crim ~., data = boston.train, scale = T, validation = "CV")
summary(fit16.pcr)
validationplot(fit16.pcr, val.type = "MSEP")
pred16.pcr <- predict(fit16.pcr, boston.test, ncomp = 13)[1:252]
err.pcr <- mean((pred16.pcr - boston.test$crim)^2)
err.pcr
err.bss
min(err.bss)
err.ridge
err.lasso
err.pcr
#11b)
# I would use the lasso model because it consistently has the lowest mse
predict(fit15.lasso, s = fit15.lasso$lambda.min, type = "coefficients")
#11c)
# no, it does not include the variables nox, age, or tax
|
f933fe2e96c01b3a34d73b2a56810ce2277e9eb1
|
27293443697add73d8a01b40b54c265b956bf687
|
/man/blblm-package.Rd
|
7edcb1532ffaf78393b780af64b13f5c6ecf0e02
|
[
"MIT"
] |
permissive
|
guangze-yu/blblm
|
01fea7249bc4fdb2a517ce9ef82f4fae0d43b4ac
|
08f18507f69c98ae3f47b8dbc8d232230742c6a9
|
refs/heads/master
| 2022-10-15T23:24:09.988351
| 2020-06-11T12:32:50
| 2020-06-11T12:32:50
| 271,468,366
| 0
| 0
| null | 2020-06-11T06:23:28
| 2020-06-11T06:23:27
| null |
UTF-8
|
R
| false
| true
| 350
|
rd
|
blblm-package.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/blblm.R
\docType{package}
\name{blblm-package}
\title{blblm: What the Package Does (Title Case)}
\description{
This package helps to train the linear regression model for dataset with the method of bootstraps.
}
\details{
Linear Regression with Little Bag of Bootstraps
}
|
e78acf5837849f1713a53919b23ea2eaaf50f519
|
9d82f6cf8d99913a3d889c0e1033c26dfca06749
|
/R/ptest-internal.R
|
94116e6daac51ccc16cd21e1e079350509a4dcc2
|
[] |
no_license
|
cran/ptest
|
9897e6234a036fee94747ac13fe312b9d476836d
|
7e9599c577f9cd8486c1a8d5369f28d6d1905c96
|
refs/heads/master
| 2021-01-21T22:10:48.716762
| 2016-11-12T21:41:37
| 2016-11-12T21:41:37
| 73,577,492
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 117
|
r
|
ptest-internal.R
|
.onAttach <- function(libname, pkgname) {
packageStartupMessage("Periodicity tests in short time series")
}
|
cd05c65600ecfa4f84b9b69f09aac2e4fad0d8b8
|
a3c18cf1bf5e9a6ff679ccbd9feae5f4888e32c7
|
/R/modifyPlot.R
|
9e5fbadade3fd3f635d594e8ee6185e12d26cbd8
|
[] |
no_license
|
nthomasCUBE/KEGGexpressionmapper
|
bc1cbb40e80d8ef2dc33cc18450e2a70e335291d
|
4a25d8a11ab78f54c7f1f19233f6e701d3eaf835
|
refs/heads/master
| 2020-07-16T20:15:27.558403
| 2018-05-07T20:34:10
| 2018-05-07T20:34:10
| 94,316,941
| 2
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,481
|
r
|
modifyPlot.R
|
modifyPlot=function(id, df, gois, expr, cand){
options(stringsAsFactors=FALSE)
system(paste("wget http://www.kegg.jp/kegg/pathway/map/",id,".png",sep=""),ignore.stdout = TRUE, ignore.stderr = TRUE)
img=readPNG(paste(id,".png",sep=""))
nmb_genes=0; ovl=0
for(x in 1:dim(df)[1]){
A=df[x,1];B=df[x,2];C=df[x,3];D=df[x,4];E=df[x,5]
A=as.integer(A);B=as.integer(B);C=as.integer(C);D=as.integer(D)
A=A;B=B+3;C=C+3;D=D+3
if(dim(df)[1]>0 && E %in% gois[,2]){
if(E %in% cand){
for(p1 in (A-3):(C+3)){ for(p2 in (B-3):(D+3)){ img[p2,p1,1]=0; img[p2,p1,2]=0; img[p2,p1,3]=0; img[p2,p1,4]=1 }}
ovl=ovl+1
}
c_g=subset(gois,gois[,2]==E); c_g=c_g[1,1]
for(p in 2:dim(expr)[2]){
my_expr=subset(expr,expr[,1]==c_g); my_expr=my_expr[1,p]
all_expr=expr[,p]; all_expr=all_expr[all_expr>0]
nmb_genes=nmb_genes+1
A_=A+(p-2)*(C-A)/(dim(expr)[2]-1); C_=A+(p-1)*(C-A)/(dim(expr)[2]-1)
if(length(my_expr) > 0){
if(!is.na(my_expr)){
if(sum(my_expr)==0){
}else if(my_expr>0 && my_expr<=quantile(all_expr,0.25)){ col1=255;col2=255;col3=1
}else if(my_expr>=quantile(all_expr,0.25) && my_expr<quantile(all_expr,0.50)){ col1=155;col2=1;col3=155
}else if(my_expr>=quantile(all_expr,0.50) && my_expr<quantile(all_expr,0.75)){ col1=1;col2=100;col3=200
}else if(my_expr>=quantile(all_expr,0.75) && my_expr<=quantile(all_expr,1)){ col1=1;col2=255;col3=255 }
for(p1 in A_:C_){ for(p2 in B:D){ img[p2,p1,1]=col1;img[p2,p1,2]=col2;img[p2,p1,3]=col3;img[p2,p1,4]=1 }}
}
}
}
}
}
cur_entry=(c(id,"genes in total:",nmb_genes,ovl))
system(paste("rm",paste(id,".png",sep="")))
writePNG(img,paste(id,"_modified",".png",sep=""))
return(cur_entry)
}
|
84cf7e3a50ce67055c516ebbfe70bdf56bcc0b18
|
37794cfdab196879e67c3826bae27d44dc86d7f7
|
/Math/Matrix.Special.Vandermonde.R
|
d7f5dc9fb20143795f05167ec3f3c85cf4b5d271
|
[] |
no_license
|
discoleo/R
|
0bbd53a54af392ef53a6e24af85cec4f21133d17
|
e9db8008fb66fb4e6e17ff6f301babde0b2fc1ff
|
refs/heads/master
| 2023-09-05T00:43:32.381031
| 2023-08-31T23:03:27
| 2023-08-31T23:03:27
| 213,750,865
| 1
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,762
|
r
|
Matrix.Special.Vandermonde.R
|
########################
###
### Leonard Mada
### [the one and only]
###
### Special Matrices
### Quasi-Vandermonde
###
### draft v.0.1b-sign
### Quasi-Vandermonde Matrix
# - same determinant as a Vandermonde matrix;
# - useful for Fraction Decomposition of polynomial fractions;
# TODO: proper sign;
matrix.quasiVand = function(x) {
allCoef = function(x) {
len = length(x);
r = sapply(seq(2, len), function(k) {
sum(combn(x, k, FUN=prod));
})
r = c(1, sum(x), r);
}
len = length(x);
r = array(0, c(len, len));
for(id in seq(len)) {
r[,id] = allCoef(x[-id]);
}
return(r);
}
# Direct computation:
det.Vand = function(x) {
r = prod(combn(x, 2, FUN=diff));
return(r);
}
# Fraction Decomposition:
coef.fr = function(x) {
r = sapply(seq(length(x)), function(id) {
d = det.Vand(x[-id]);
if(id %% 2 == 0) d = -d;
return(d);
});
# TODO: proper sign;
d = det.Vand(x);
if(length(x) %% 2 == 1) d = -d;
return(r/d)
}
### Examples:
###
x = sqrt(c(2,3,5,7))
#
m = matrix.quasiVand(x)
det(m)
det.Vand(x)
# Fraction Decomposition
xx = 3^(1/3)
a = coef.fr(x)
1/prod(xx - x) # ==
sum(a / (xx - x))
###
x = sqrt(c(2,3,5,7,11))
#
m = matrix.quasiVand(x)
det(m)
det.Vand(x)
# Fraction Decomposition
xx = 3^(1/3)
a = coef.fr(x)
1/prod(xx - x) # ==
sum(a / (xx - x))
###
x = sqrt(c(2,3,5,7,11,13))
# TODO: sign
m = matrix.quasiVand(x)
det(m)
det.Vand(x)
# Fraction Decomposition
xx = 3^(1/3)
a = coef.fr(x)
1/prod(xx - x) # ==
sum(a / (xx - x))
###
x = sqrt(c(2,3,5,7,11,13, 17))
# TODO: sign
m = matrix.quasiVand(x)
det(m)
det.Vand(x)
# Fraction Decomposition
xx = 3^(1/3)
a = coef.fr(x)
1/prod(xx - x) # ==
sum(a / (xx - x))
|
d372e962a7031e0989e647e6a678bb4bc8a0217e
|
a873ce543ae4a83e2fdd5ab6670b4957d2203694
|
/USP delete/Mello codes/hierarchical/hierarchical.r
|
859d26bb4da3c8545c326ee918d5307e0ed98657
|
[] |
no_license
|
antonioMoreira/Machine-Learning
|
e576a5ee15ca3b68dafbe94ae2b838fe2de69d46
|
2f8a3befb93491377673d353105e3f711a4bb7ec
|
refs/heads/master
| 2020-04-20T10:51:18.939090
| 2019-04-11T09:49:58
| 2019-04-11T09:49:58
| 168,800,416
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,223
|
r
|
hierarchical.r
|
hierarchical <- function(similarities, criterion) {
if (nrow(similarities) != ncol(similarities))
return ("Matrix must be squared.")
dendrogram = list()
partition = list()
for (i in 1:nrow(similarities)) partition[[i]] = c(i)
level = 0
dendrogram[[paste(level)]] = partition
while (length(partition) > 1) {
min = Inf
A.id = -1
B.id = -1
for (i in 1:(length(partition)-1)) {
A = partition[[i]]
for (j in (i+1):length(partition)) {
B = partition[[j]]
elements = similarities[A, B]
value = 0
if (criterion == "single") {
value = min(elements)
} else if (criterion == "complete") {
value = max(elements)
} else if (criterion == "average") {
value = mean(elements)
}
if (value < min) {
min = value
A.id = i
B.id = j
}
}
}
new.partition = list()
j = 1
for (i in 1:length(partition)) {
if (i != A.id && i != B.id) {
new.partition[[j]] = partition[[i]]
j = j + 1
} else if (i <= A.id && i <= B.id) {
new.partition[[j]] = c(partition[[A.id]],
partition[[B.id]])
j = j + 1
}
}
level = min
dendrogram[[paste(level)]] = new.partition
partition = new.partition
}
return (dendrogram)
}
|
27aa331f57ddd26136dec4f2f3c8b355b7979970
|
5095bbe94f3af8dc3b14a331519cfee887f4c07e
|
/ClimateChange/mkProjection.R
|
0a1f0faa3d4a0fc9d64fd0c226dbc1a57e34f598
|
[] |
no_license
|
sativa/apsim_development
|
efc2b584459b43c89e841abf93830db8d523b07a
|
a90ffef3b4ed8a7d0cce1c169c65364be6e93797
|
refs/heads/master
| 2020-12-24T06:53:59.364336
| 2008-09-17T05:31:07
| 2008-09-17T05:31:07
| 64,154,433
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,625
|
r
|
mkProjection.R
|
#quantileChanges<-"Birchip.quantileTrendChange.csv"
#co2<-350
#nYears<-40
#meanChanges<-"Woomelang.change.csv"
#inMetFile<-"Woomelang.ppd.met"
#outBaselineFile<-"Woomelang.baseline.met"
#outScenarioFile<-"Woomelang.scenario.met"
library(seas)
source ("read.SILO.R")
c1 <- read.csv(quantileChanges)
c2 <- read.csv(meanChanges)
change <- merge(c1, c2, by="month")
# Develop the change scenario from the historical series.
historicalData<-read.met(file(inMetFile))
hdate <- as.Date(paste(historicalData$year,historicalData$day),"%Y %j")
historicalData$month <- as.integer(format.Date(hdate, "%m"))
scenario <- list(year=historicalData$year,
month=historicalData$month,
day=historicalData$day,
rain=historicalData$rain,
maxt=historicalData$maxt,
mint=historicalData$mint)
# Ancillary variables via linear model
radnLm <- lm(radn ~ maxt, historicalData)
panLm <- lm(pan ~ maxt, historicalData)
vpLm <- lm(vp ~ maxt, historicalData)
scenario$radn<-predict(radnLm,newdata=data.frame(maxt=scenario$maxt))
scenario$pan<-predict(panLm,newdata=data.frame(maxt=scenario$maxt))
scenario$vp<-predict(vpLm,newdata=data.frame(maxt=scenario$maxt))
for (what in c("latitude", "tav", "amp")) {
scenario[[what]] <- historicalData[[what]]
}
write.apsim(file=outBaselineFile, scenario)
scenario<-as.data.frame(scenario)
for (month in 1:12) {
df <- scenario[scenario$month == month,]
# Temperature scenario
for (v in c("maxt", "mint")) {
# Build interpolation function for each trend.
## x is cdf position, y is oC/year trend
fun<-approxfun(x=c(4,16,28),
y=c(change[[paste(v,".m90",sep="")]][month],
change[[paste(v,".m50",sep="")]][month],
change[[paste(v,".m10",sep="")]][month]), rule=2)
for (year in unique(scenario$year)) {
# 1. Change dispersion by bending cdf
old <- df[[v]][df$year == year]
iold <- order(old, decreasing=T)
cdf <- old[iold]
newcdf <- nYears * fun(1:length(cdf)) + cdf
new <- vector()
new[iold]<-newcdf
# 2. Shift mean, while accounting for change introduced in step 1.
shift1 <- mean(new) - mean(old)
if (v == "maxt") {
shift2 <- change$maxtProp[month] * change$tmean[month]
} else {
shift2 <- (1.0 - change$maxtProp[month]) * change$tmean[month]
}
new <- new + (shift2 - shift1)
if (length(new) > 0) {
scenario[[v]][scenario$month == month & scenario$year == year] <- new
}
}
}
# Rainfall scenario
for (year in unique(scenario$year)) {
old <- sum(scenario$rain[scenario$year==year & scenario$month == month])
new <- max(old + change$rain[month], 0.0)
scale <- new / old
ix <- scenario$month == month & scenario$year == year & scenario$rain > 0
if (any(ix)) {
scenario$rain[ix] <- scenario$rain[ix] * scale
}
}
}
scenario<-as.list(scenario)
scenario$co2 <- co2
for (what in c("latitude", "tav", "amp")) {
scenario[[what]] <- historicalData[[what]]
}
scenario$radn<-predict(radnLm,newdata=data.frame(maxt=scenario$maxt))
scenario$pan<-predict(panLm,newdata=data.frame(maxt=scenario$maxt))
scenario$vp<-predict(vpLm,newdata=data.frame(maxt=scenario$maxt))
write.apsim(file=outScenarioFile, scenario)
|
f18858bcf72eaf4e73b409fc93702278fccb1d33
|
6a1d8d8db74082f5166ac697e2f04eadc0cdf6df
|
/gdistance/doc/gdistance.R
|
4558ddcab5caca384bb9effac91b9773eb78336a
|
[] |
no_license
|
bwtian/rLibLin
|
b4ba89314d4dbf1a6bfee964bd77ee1ea4c212c2
|
b1e9f15d6b5618472aecb652eb47e10db48ecc3b
|
refs/heads/master
| 2021-01-22T13:38:10.439482
| 2015-05-04T01:40:36
| 2015-05-04T01:40:36
| 25,282,037
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,177
|
r
|
gdistance.R
|
### R code from vignette source 'gdistance.Rnw'
### Encoding: ISO8859-1
###################################################
### code chunk number 1: figure0
###################################################
library("gdistance")
rex <- raster(matrix(1,4,4))
a <- rep(c(1.3333),times=5)
b <- c(-1.3333, -0.6666, 0, 0.6666, 1.3333)
x1 <- c(-a, b)
x2 <- c(a, b)
y1 <- c(b, -a)
y2 <- c(b, a)
x <- cbind(x1,x2)
y <- cbind(y1,y2)
par(mfrow=c(1,3), mar = c(5,5,4,2) + 0.1,
oma = c(0,0,0,0) + 0.1)
x4 <- transition(rex, mean, 4)
g4 <- graph.adjacency(transitionMatrix(x4), mode="undirected")
gridLayout <- xyFromCell(x4, 1:ncell(x4))
plot(g4,layout=gridLayout, edge.color="black", vertex.color="black", vertex.label=NA, main="4 cells")
for(i in 1:dim(x)[1]){lines(x[i,],y[i,], col="lightgray")}
plot(g4, layout=gridLayout, add=TRUE, edge.color="black", vertex.color="black", vertex.label=NA)
x8 <- transition(rex, mean, 8)
g8 <- graph.adjacency(transitionMatrix(x8), mode="undirected")
plot(g8,layout=gridLayout, edge.color="black", vertex.color="black", vertex.label=NA, main="8 cells")
for(i in 1:dim(x)[1]){lines(x[i,],y[i,], col="lightgray")}
plot(g8, layout=gridLayout, add=TRUE, edge.color="black", vertex.color="black", vertex.label=NA)
x16 <- transition(rex, mean, 16)
g16 <- graph.adjacency(transitionMatrix(x16), mode="undirected")
plot(g16, layout=gridLayout, edge.color="black", vertex.color="black", vertex.label=NA, , main="16 cells")
for(i in 1:dim(x)[1]){lines(x[i,],y[i,], col="lightgray")}
plot(g16,layout=gridLayout, add=TRUE, edge.color="black", vertex.color="black", vertex.label=NA)
###################################################
### code chunk number 2: gdistanc00
###################################################
options(prompt = "R> ", continue = "+ ", width = 70, useFancyQuotes = FALSE)
###################################################
### code chunk number 3: gdistance-1
###################################################
r <- raster(ncol=3,nrow=3)
r[] <- 1:ncell(r)
r
###################################################
### code chunk number 4: figure1
###################################################
plot(r, main="r")
text(r)
###################################################
### code chunk number 5: gdistance-2
###################################################
plot(r, main="r")
text(r)
###################################################
### code chunk number 6: gdistance-3
###################################################
library("gdistance")
r[] <- 1
tr1 <- transition(r, transitionFunction=mean, directions=8)
###################################################
### code chunk number 7: gdistance-4
###################################################
tr1
###################################################
### code chunk number 8: gdistance-5
###################################################
r[] <- runif(9)
ncf <- function(x) max(x) - x[1] + x[2]
tr2 <- transition(r, ncf, 4, symm=FALSE)
tr2
###################################################
### code chunk number 9: gdistance-6
###################################################
tr3 <- tr1*tr2
tr3 <- tr1+tr2
tr3 <- tr1*3
tr3 <- sqrt(tr1)
###################################################
### code chunk number 10: gdistance-7
###################################################
tr3[cbind(1:9,1:9)] <- tr2[cbind(1:9,1:9)]
tr3[1:9,1:9] <- tr2[1:9,1:9]
tr3[1:5,1:5]
###################################################
### code chunk number 11: gdistance-8 (eval = FALSE)
###################################################
## image(transitionMatrix(tr1))
###################################################
### code chunk number 12: figure2
###################################################
print(image(transitionMatrix(tr1)))
###################################################
### code chunk number 13: figure3
###################################################
plot(raster(tr3), main="raster(tr3)")
###################################################
### code chunk number 14: gdistance-9
###################################################
tr1C <- geoCorrection(tr1, type="c", multpl=FALSE)
tr2C <- geoCorrection(tr2, type="c", multpl=FALSE)
###################################################
### code chunk number 15: gdistance-10
###################################################
r3 <- raster(ncol=18, nrow=9)
r3 <- setValues(r3, runif(18*9)+5)
tr3 <- transition(r3, mean, 4)
tr3C <- geoCorrection(tr3, type="c", multpl=FALSE, scl=TRUE)
tr3R <- geoCorrection(tr3, type="r", multpl=FALSE, scl=TRUE)
###################################################
### code chunk number 16: gdistance-11
###################################################
CorrMatrix <- geoCorrection(tr3, type="r", multpl=TRUE, scl=TRUE)
tr3R <- tr3 * CorrMatrix
###################################################
### code chunk number 17: gdistance-12
###################################################
sP <- cbind(c(-100, 100, -100), c(-50, 50, 50))
###################################################
### code chunk number 18: gdistance-13
###################################################
costDistance(tr3C, sP)
commuteDistance(tr3R, sP)
rSPDistance(tr3R, sP, sP, theta=1e-12, totalNet="total")
###################################################
### code chunk number 19: gdistance-14
###################################################
origin <- SpatialPoints(cbind(0, 0))
rSPraster <- passage(tr3C, origin, sP[3,], theta=3)
###################################################
### code chunk number 20: figure4
###################################################
plot(rSPraster, main="rSPraster")
###################################################
### code chunk number 21: gdistance-15
###################################################
r1 <- passage(tr3C, origin, sP[1,], theta=1)
r2 <- passage(tr3C, origin, sP[3,], theta=1)
rJoint <- min(r1, r2) #Figure 6
rDiv <- max(max(r1, r2) * (1 - min(r1, r2)) - min(r1, r2), 0) #Figure 7
###################################################
### code chunk number 22: figure5
###################################################
plot(rJoint, main="rJoint")
###################################################
### code chunk number 23: figure6
###################################################
plot(rDiv, main="rDiv")
###################################################
### code chunk number 24: gdistance-17
###################################################
pathInc(tr3C, origin, sP)
###################################################
### code chunk number 25: figure7
###################################################
plot(function(x)exp(-3.5 * abs(x + 0.05)), -1, 1, xlab="slope", ylab="speed (m/s)")
lines(cbind(c(0,0),c(0,3.5)), lty="longdash")
###################################################
### code chunk number 26: gdistance-18
###################################################
r <- raster(system.file("external/maungawhau.grd",
package="gdistance"))
###################################################
### code chunk number 27: gdistance-19
###################################################
heightDiff <- function(x){x[2] - x[1]}
hd <- transition(r,heightDiff,8,symm=FALSE)
slope <- geoCorrection(hd, scl=FALSE)
###################################################
### code chunk number 28: gdistance-20
###################################################
adj <- adjacent(r, cells=1:ncell(r), pairs=TRUE, directions=8)
speed <- slope
speed[adj] <- exp(-3.5 * abs(slope[adj] + 0.05))
###################################################
### code chunk number 29: gdistance-21
###################################################
x <- geoCorrection(speed, scl=FALSE)
###################################################
### code chunk number 30: gdistance-22
###################################################
A <- c(2667670,6479000)
B <- c(2667800,6479400)
AtoB <- shortestPath(x, A, B, output="SpatialLines")
BtoA <- shortestPath(x, B, A, output="SpatialLines")
###################################################
### code chunk number 31: fig8plot
###################################################
plot(r, main="")
lines(AtoB, col="red", lwd=2)
lines(BtoA, col="blue")
text(A[1]-10,A[2]-10,"A")
text(B[1]+10,B[2]+10,"B")
###################################################
### code chunk number 32: fig8
###################################################
plot(r, main="")
lines(AtoB, col="red", lwd=2)
lines(BtoA, col="blue")
text(A[1]-10,A[2]-10,"A")
text(B[1]+10,B[2]+10,"B")
###################################################
### code chunk number 33: gdistance-24
###################################################
Europe <- raster(system.file("external/Europe.grd",
package="gdistance"))
Europe[is.na(Europe)] <- 0
data(genDist)
data(popCoord)
pC <- as.matrix(popCoord[c("x","y")])
###################################################
### code chunk number 34: gdistance.Rnw:724-726
###################################################
plot(Europe, main="")
text(pC[,1],pC[,2],unlist(popCoord["Population"]),cex=.7)
###################################################
### code chunk number 35: gdistance-25
###################################################
geoDist <- pointDistance(pC, longlat=TRUE)
geoDist <- as.dist(geoDist)
Europe <- aggregate(Europe,3)
tr <- transition(Europe, mean, directions=8)
trC <- geoCorrection(tr, "c", scl=TRUE)
trR <- geoCorrection(tr, "r", scl=TRUE)
cosDist <- costDistance(trC,pC)
resDist <- commuteDistance(trR, pC)
cor(genDist,geoDist)
cor(genDist,cosDist)
cor(genDist,resDist)
###################################################
### code chunk number 36: gdistance-26
###################################################
origin <- unlist(popCoord[22,c("x","y")])
pI <- pathInc(trC, origin=origin, from=pC,
functions=list(overlap))
cor(genDist,pI[[1]])
|
a525dade290dad1fd13652eba6cfd5ed4a0cbb38
|
9d524b354021472c35c60689ba83eb1e97544253
|
/Copia de Script_02JUL.R
|
aa6abdd28e6aa624d9561a5710674934a83fae21
|
[] |
no_license
|
fdezfari/R-programming
|
0ca29767269e2f70ff1f01b1e841d40412d4756a
|
5e2a96a1cad4857bba9979caa0b9faa5ad084cee
|
refs/heads/master
| 2021-05-19T17:22:21.339682
| 2020-09-04T15:12:17
| 2020-09-04T15:12:17
| 252,043,316
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,072
|
r
|
Copia de Script_02JUL.R
|
##Library import
library(readr)
library(superheat)
library(ggplot2)
library(cluster)
library(tidyselect)
library(dplyr)
library(tibble)
##File import and preparation #if imported manually
DSS <- read_csv("MatchDS_21JUN.csv", stringsAsFactors=FALSE)
#DSS <- as.data.frame(DSS)
#row.names(DSS) <- DSS$Name
#DSS <- DSS[,-1] #remove first column
DSSm <- as.matrix(DSS)
PDS <- read_csv("PerturbationDS_30JUN.csv", header=FALSE, row.names=NULL)
PDSm <- as.matrix(PDS)
##Heatmap with missing values
superheat(DSSm, scale = FALSE, heat.na.col = "black",
heat.pal= c("black", "deeppink4", "lightgreen"))
##Dendograms
superheat(DSSm, scale = FALSE, heat.na.col = "black",
col.dendrogram = TRUE)
#Generating clusters {not added}
set.seed(2)
superheat(DSSm, scale = FALSE, heat.na.col = "black",
pretty.order.rows = TRUE,
n.clusters.rows = 3,
left.label = 'variable')
##Titles
superheat(DSSm, scale = FALSE, heat.na.col = "black",
title = "Expression of macrophage markers in different conditions",
title.size = 5, title.aligment = "center")
#Row and column titles
superheat(DSSm, scale = FALSE, heat.na.col = "black",
title = "Expression of macrophage markers in different conditions",
title.size = 5, title.aligment = "center",
row.title = "Condition", row.title.size = 4,
column.title = "Marker", column.title.size = 4)
##Adding text
superheat(X = DSSm, scale = FALSE, heat.na.col = "black",
title = "Expression of macrophage markers in different conditions",
title.size = 5,
row.title = "Condition", row.title.size = 4,
column.title = "Marker", column.title.size = 4,
X.text = round(PDSm,1), X.text.size = 2) #here I get the error
##Labels
superheat(DSSm, scale = FALSE, heat.na.col = "black",
title = "Expression of macrophage markers in different conditions",
title.size = 5, title.alignment = "center",
row.title = "Condition", row.title.size = 4,
column.title = "Marker", column.title.size = 4,
force.bottom.label = TRUE, force.left.label = TRUE,
bottom.label.size = 0.2, left.label.size = 0.2,
bottom.label.text.size = 3, left.label.text.size = 3,
bottom.label.text.alignment = "center",
bottom.label.text.angle = 90, bottom.label.col = "white",
left.label.text.alignment = "left", left.label.col = "white")
##Grids
superheat(DSSm, scale = FALSE, heat.na.col = "black",
title = "Expression of macrophage markers in different conditions",
title.size = 5, title.alignment = "center",
row.title = "Condition", row.title.size = 4,
column.title = "Marker", column.title.size = 4,
force.bottom.label = TRUE, force.left.label = TRUE,
bottom.label.size = 0.2, left.label.size = 0.2,
bottom.label.text.size = 3, left.label.text.size = 3,
bottom.label.text.alignment = "center",
bottom.label.text.angle = 90, bottom.label.col = "white",
left.label.text.alignment = "left", left.label.col = "white",
grid.hline.col = "black", grid.vline.col = "black")
##Legend
superheat(DSSm, scale = FALSE, X.text = round(PDSm,1), X.text.size = 2,
heat.pal= c("coral1", "lightgreen"),
title = "Expression of macrophage markers in different conditions",
title.size = 5,
heat.na.col = "black",
heat.lim = c(0,1),
row.title = "Condition", row.title.size = 4,
column.title = "Marker", column.title.size = 4,
force.bottom.label = TRUE, force.left.label = TRUE,
bottom.label.size = 0.2, left.label.size = 0.2,
bottom.label.text.size = 3, left.label.text.size = 3,
bottom.label.text.alignment = "center",
bottom.label.text.angle = 90, bottom.label.col = "white",
left.label.text.alignment = "left", left.label.col = "white",
grid.hline.col = "black", grid.vline.col = "black",
legend = FALSE)
# Create png file in your directory
png("superheat_21JUN_perturbations.png", height = 500, width = 900)
# Save the plot in the image created
superheat(DSSm, scale = FALSE, heat.na.col = "black",
title = "Expression of macrophage markers in different conditions",
title.size = 5, title.alignment = "center",
row.title = "Condition", row.title.size = 4,
column.title = "Marker", column.title.size = 4,
force.bottom.label = TRUE, force.left.label = TRUE,
bottom.label.size = 0.2, left.label.size = 0.2,
bottom.label.text.size = 3, left.label.text.size = 3,
bottom.label.text.alignment = "center",
bottom.label.text.angle = 90, bottom.label.col = "white",
left.label.text.alignment = "left", left.label.col = "white",
grid.hline.col = "black", grid.vline.col = "black",
legend.breaks = c(0,1), legend.text.size = 8)
# Close the image
dev.off()
|
5f5408fa37045fa4392405e3cd0dcfba497eac9e
|
b8841bf56594f65fe13115b7f8318b97c9dddb1f
|
/ch_intro_to_data_oi_biostat/figures/incomeLifeExpectancyLog/incomeLifeExpectancyLog.R
|
230677efca0d00c29b881ed781a5434a551b7f9f
|
[] |
no_license
|
OI-Biostat/oi_biostat_text
|
5cdab8191714beef6a67d61d1f69982e4d8c13aa
|
a333893cffe830b8ce1e31d9226a9a50906605e4
|
refs/heads/master
| 2023-05-28T12:33:51.243348
| 2022-11-30T20:59:44
| 2022-11-30T20:59:44
| 281,164,408
| 7
| 7
| null | 2022-11-30T21:02:43
| 2020-07-20T16:06:49
|
TeX
|
UTF-8
|
R
| false
| false
| 502
|
r
|
incomeLifeExpectancyLog.R
|
library(openintro)
library(oibiostat)
data("wdi.2011")
data(COL)
myPDF("incomeLifeExpectancyLog.pdf",
6,
3.3,
mar = c(3, 4.5, 0.5, 1.2),
mgp = c(2.8, 0.5, 0))
plot(log(wdi.2011$gdp.per.capita), log(wdi.2011$life.expect),
pch = 19,
cex = 1.3,
col = COL[1, 3],
ylab = "Life Expectancy (log years)")
points(log(wdi.2011$gdp.per.capita), log(wdi.2011$life.expect),
cex = 1.3,
col = COL[1])
mtext("Per Capita Income (log USD)", 1, 1.9)
dev.off()
|
b9197e584ce6008da1e5ac4bda2b842d51b96cf6
|
62222419e16bc93a6db5eec771f527e67ff32e12
|
/man/stamppNeisD.Rd
|
5e2a0b84469ed738cf0860be9ffd366d3ce8c5ef
|
[] |
no_license
|
UoA-eResearch/StAMPP
|
a02a0a750617bc1cc3a584c5eaa614dd0a3383ac
|
4b991686e8ce3dcc7d417a543ea5307db456efc9
|
refs/heads/master
| 2020-04-22T18:42:52.128266
| 2019-02-13T21:55:33
| 2019-02-13T21:55:33
| 170,585,604
| 0
| 0
| null | 2019-02-13T21:55:45
| 2019-02-13T21:55:44
| null |
UTF-8
|
R
| false
| false
| 1,203
|
rd
|
stamppNeisD.Rd
|
\name{stamppNeisD}
\alias{stamppNeisD}
\title{
Genetic Distance Calculation
}
\description{
This function calculates Nei's genetic distance (Nei 1972) between populations or individuals.
}
\usage{
stamppNeisD(geno, pop = TRUE)
}
\arguments{
\item{geno}{
a data frame containing allele frequency data generated from stamppConvert, or a genlight object containing genotype data, individual IDs, population IDs and ploidy levels.
}
\item{pop}{
logical. True if genetic distance should be calculated between populations, false if it should be calculated between individual.
}
}
\value{
A object of class matrix which contains the genetic distance between each population or individual.
}
\references{
Nei M (1972) Genetic Distance between Populations. The American Naturalist 106, 283-292.
}
\author{
LW Pembleton
}
\examples{
# import genotype data and convert to allele frequecies
data(potato.mini, package="StAMPP")
potato.freq <- stamppConvert(potato.mini, "r")
# Calculate genetic distance between individuals
potato.D.ind <- stamppNeisD(potato.freq, FALSE)
# Calculate genetic distance between populations
potato.D.pop <- stamppNeisD(potato.freq, TRUE)
}
|
c4d7965925cff4e974534383405b72fd30c465d2
|
a88ff00d49b14c84059a6fa74af25ee4c531d9fe
|
/man/generateTKLY1.Rd
|
7dab920a4b4ccc034cad9e747b814639128ceccf
|
[] |
no_license
|
danielhorn/moobench
|
b4bcd7f7959f583fb6a9a895ce4c9e0af1a84d34
|
dacf01a327c9d8cd2ffc5f6be261a2af9b95e4fe
|
refs/heads/master
| 2016-09-06T11:18:22.257466
| 2015-11-19T11:46:40
| 2015-11-19T11:46:40
| 35,089,700
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 527
|
rd
|
generateTKLY1.Rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/tkly1.R
\name{generateTKLY1}
\alias{TKLY1}
\alias{generateTKLY1}
\alias{tkly1}
\title{TKLY1 test function generator.}
\usage{
generateTKLY1(in.dim = 4L, out.dim = 2L)
}
\arguments{
\item{in.dim}{[\code{integer(1)}] \cr
Size of parameter space. Must be four.}
\item{out.dim}{[\code{integer(1)}] \cr
Size of target space. Must be two.}
}
\value{
A \code{mooFunction}.
}
\description{
TKLY1 test function generator.
}
\references{
wfg [62]
}
|
d5c66c1a4d9f6eba46240d3dc5f00dbf98086cd8
|
6029dcf3ddcd0a7dbfa0538daabb0585f15631e6
|
/R/MPGE.R
|
4ede1cea4656791f592e1f2a14e46789235fd75e
|
[] |
no_license
|
cran/MPGE
|
786ce4818133d9e46c9ec68bcc36d05af6ca084e
|
d4c2aa730662ff7e911cea7656973928472eea8e
|
refs/heads/master
| 2023-01-04T02:44:25.646524
| 2020-10-23T14:40:07
| 2020-10-23T14:40:07
| 307,955,989
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,782
|
r
|
MPGE.R
|
#' MPGE: an R package to implement a two-step approach to testing overall effect of
#' gene-environment interaction for multiple phenotypes.
#'
#'Interaction between a genetic variant (e.g., a SNP) and an environmental variable
#' (e.g., physical activity) can have a shared effect on multiple phenotypes
#' (e.g., LDL and HDL). MPGE is a two-step method to test for an overall interaction
#' effect on multiple phenotypes. In first step, the method tests for an overall
#' marginal genetic association between the genetic variant and the multivariate
#' phenotype. In the second step, SNPs which show an evidence of marginal
#' overall genetic effect in the first step are prioritized while testing
#' for an overall GxE effect. That is, a more liberal threshold of
#' significance level is considered in the second step while testing for
#' an overall GxE effect for these promising SNPs compared to the other
#' SNPs.
#'
#' The package consists of following functions:
#'\code{\link{mv_G_GE}}, \code{\link{WHT}}; \code{\link{SST}}.
#'
#' @section Functions:
#' \describe{
#' \item{\code{\link{mv_G_GE}}}{for a batch of genetic variants, this function provides
#' two different p-values for each genetic variant, one from the test
#' of marginal overall genetic association with multiple phenotypes
#' , and the other from the test of overall GxE effect on multivariate
#' phenotype allowing for a possible marginal effect due to the genetic
#' variant and a marginal effect due to the environmental variable.}
#' \item{\code{\link{WHT}}}{this function implements the weighted multiple hypothesis testing
#' procedure to adjust for multiple testing while combining the two steps of
#' testing gene-environment interaction in the two-step GxE testing procedure,
#' given two sets of p-values obtained using the previous function mv_G_GE for
#' genome-wide genetic variants.}
#' \item{\code{\link{SST}}}{this function implements
#' the subset multiple hypothesis testing procedure to adjust for multiple
#' testing while combining the two steps of testing gene-environment
#' interaction based on the same two sets of p-values described above.}
#' }
#' @references A Majumdar, KS Burch, S Sankararaman, B Pasaniuc, WJ Gauderman, JS Witte (2020)
#' A two-step approach to testing overall effect of gene-environment interaction for multiple phenotypes.
#' bioRxiv, doi: https://doi.org/10.1101/2020.07.06.190256
#'
#'
#' @docType package
#' @importFrom stats lm
#' @importFrom utils capture.output
#' @importFrom utils read.table
#'
#' @name MPGE
#'
NULL
|
682b1ebbab46d380acd5c74288c5e97ebffec037
|
9eb5e211b5d15faf3df138093b2c51398863ae8e
|
/man/LMSquareLossL2.Rd
|
6a81463cb16a512f347575869ad7ea48905d715c
|
[] |
no_license
|
SixianZhang/CS499-Coding-Project-2
|
6a0c6e88ff7ae3f4b6f7852aecf4c9cc68b41c43
|
01df22916f8f5b6a0846706ac515c4d4c8586745
|
refs/heads/master
| 2020-04-23T23:18:01.529419
| 2019-04-08T05:08:32
| 2019-04-08T05:08:32
| 171,531,724
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,373
|
rd
|
LMSquareLossL2.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/LML2.R
\name{LMSquareLossL2}
\alias{LMSquareLossL2}
\title{Linear model L2 regularization with square loss}
\usage{
LMSquareLossL2(X.scaled.mat, y.vec, penalty, opt.thresh = 0.5,
initial.weight.vec, step.size = 0.01)
}
\arguments{
\item{X.scaled.mat}{a numeric matrix of size [n x p]}
\item{y.vec}{a numeric matrix of length nrow(X.scaled.mat)}
\item{penalty}{a non-negative numeric scalar}
\item{opt.thresh}{a positive numeric scalar}
\item{initial.weight.vec}{a numeric vector of size ncol(X.scaled.mat)}
\item{step.size}{a numeric scalar, which is also greater than 0}
}
\value{
opt.weight the optimal weight vector of length ncol(X.scaled)
}
\description{
Training by using L2 regularization on a linear model with square loss .
Return the optimal weight vector for the given threshold and penalty.
}
\examples{
data(ozone, package = "ElemStatLearn")
y.vec <- ozone[, 1]
X.mat <- as.matrix(ozone[,-1])
num.train <- dim(X.mat)[1]
num.feature <- dim(X.mat)[2]
X.mean.vec <- colMeans(X.mat)
X.std.vec <- sqrt(rowSums((t(X.mat) - X.mean.vec) ^ 2) / num.train)
X.std.mat <- diag(num.feature) * (1 / X.std.vec)
X.scaled.mat <- t((t(X.mat) - X.mean.vec) / X.std.vec)
optimal.weight.vec <- LMSquareLossL2(X.scaled.mat, y.vec, penalty = 0.5, initial.weight.vec = c(rep(0, ncol(X.mat) + 1)))
}
|
53454f0b33666e8f35248d13a090a2e5c656ea12
|
eda1bda3269f28391674cadc55d47db0f323e8e4
|
/plot4.R
|
1c71ac1f5da88bcccbc005d1ab06a3576e9c0a74
|
[] |
no_license
|
iel19/ExData_Plotting1
|
c2237f62dd5e77015caa73a79751a1183bf854fb
|
54e6bc5a64ffeee6d0d79edda961a9646ec3f246
|
refs/heads/master
| 2021-01-18T06:59:39.323807
| 2014-11-05T16:58:55
| 2014-11-05T16:58:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,330
|
r
|
plot4.R
|
data <- read.csv("household_power_consumption.txt", sep=";", na.strings = "?")
partial <- subset(data, Date %in% c( "1/2/2007", "2/2/2007"))
partial $ datetime <- as.POSIXct(paste(partial $ Date, partial $ Time),
format = "%d/%m/%Y %H:%M:%S")
#
# We combine all previous graphs, using par function, and export
#
png(file = "plot4.png")
par(mfrow = c(2,2))
#upper left is plot2.png, with modified axes
plot(x = partial $ datetime, y = partial $ Global_active_power,
type = "l", xlab = "", ylab = "Global Active Power")
#upper right is a graph of datetime versus voltage
plot(x = partial $ datetime, y = partial $ Voltage,
type = "l", xlab = "datetime", ylab = "Voltage")
#bottom left is plot3.png
plot(partial $ datetime, partial $ Sub_metering_1, type = "l", xlab = "", ylab = "Energy sub metering")
lines(partial $ datetime, partial $ Sub_metering_2, col = "red")
lines(partial $ datetime, partial $ Sub_metering_3, col = "blue")
legend("topright", legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
lty = c(1,1), col = c("black", "blue", "red"))
#bottom right is a graph of datetime versus Global_reactive_power
plot(x = partial $ datetime, y = partial $ Global_reactive_power,
type = "l", xlab = "datetime", ylab = "Global_reactive_power")
dev.off()
|
282ffc1ffe40ea21399c716b3f59b365c3571c14
|
3f17ed44ae94cc7570aecd38fe075626e5df84ff
|
/app2020/LakeAssessmentApp_v2/buildAppModules/buildStationTable_Chl_a_ResultsNew.R
|
144a534da2427f89ca59cb974ecedfd5718ed2e5
|
[] |
no_license
|
EmmaVJones/LakesAssessment2020
|
24c57a343ec7df5b18eada630cc2e11e01c8c83c
|
72f4b6e9721c947e8b7348c9d06e3baf8f2da715
|
refs/heads/master
| 2020-05-18T02:41:57.259296
| 2019-06-20T13:38:55
| 2019-06-20T13:38:55
| 184,124,597
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,112
|
r
|
buildStationTable_Chl_a_ResultsNew.R
|
lake_filter <- filter(lakeStations, SIGLAKENAME == 'Leesville Reservoir')#'Lake Moomaw')# 'Claytor Lake')
conventionals_Lake <- filter(conventionals, FDT_STA_ID %in% unique(lake_filter$FDT_STA_ID)) %>%
left_join(dplyr::select(lakeStations, FDT_STA_ID, SEC, CLASS, SPSTDS,PWS, ID305B_1, ID305B_2, ID305B_3,
STATION_TYPE_1, STATION_TYPE_2, STATION_TYPE_3, ID305B, SEC187, SIG_LAKE, USE,
SIGLAKENAME, Chlorophyll_A_limit, TPhosphorus_limit, Assess_TYPE), by='FDT_STA_ID')
unique(conventionals_Lake$ID305B)
AUData <- filter(conventionals_Lake, ID305B_1 %in% "VAW-I03L_JKS01A02" |#"VAW-I03L_JKS02A02" "VAW-I03L_JKS03A02"
ID305B_2 %in% "VAW-I03L_JKS01A02" |
ID305B_2 %in% "VAW-I03L_JKS01A02" ) %>%
left_join(WQSvalues, by = 'CLASS')
AUData <- filter(conventionals_Lake, ID305B_1 %in% "VAW-N16L_NEW01A02" | #"VAW-N16L_NEW01A02" "VAW-N16L_NEW01B14" "VAW-N17L_PKC01A10" "VAW-N17L_PKC02A10"
ID305B_2 %in% "VAW-N16L_NEW01A02" |
ID305B_2 %in% "VAW-N16L_NEW01A02") %>%
left_join(WQSvalues, by = 'CLASS')
AUData <- filter(conventionals_Lake, ID305B_1 %in% "VAW-L05L_CRV01A02" |
ID305B_2 %in% "VAW-L05L_CRV01A02" |
ID305B_2 %in% "VAW-L05L_CRV01A02") %>%
left_join(WQSvalues, by = 'CLASS')
AUData <- filter(conventionals_Lake, ID305B_1 %in% 'VAW-L10L_BSA01A10') %>%#'VAW-L13L_PGG01A02') %>%
left_join(WQSvalues, by = 'CLASS')
# Create Data frame with all data within ID305B and stratification information
# Pool thermocline data to get 1 sample per day, not 2 with top/bottom time difference
#stationDataDailySample <- reactive({
# req(AUData())
dat <- AUData
dat$FDT_DATE_TIME <- as.POSIXct(dat$FDT_DATE_TIME, format="%m/%d/%Y %H:%M")
dat <- mutate(dat, SampleDate=format(FDT_DATE_TIME,"%m/%d/%y"))#%>% # Separate sampling events by day
#filter(!is.na(FDT_TEMP_CELCIUS))# remove any NA values to keep thermocline function happy
#thermo <- stratifiedLake(dat)
thermo <- stratifiedLake_citNonA(citmonOutOfParameterDataset(filter(dat, !is.na(FDT_TEMP_CELCIUS)), FDT_TEMP_CELCIUS, FDT_TEMP_CELCIUS_RMK) )
thermo$ThermoclineDepth <- as.numeric(thermo$ThermoclineDepth)
stationDataDailySample <- plyr::join(dat,thermo,by=c('FDT_STA_ID','SampleDate'))%>%
mutate(LakeStratification= ifelse(FDT_DEPTH < ThermoclineDepth,"Epilimnion","Hypolimnion"))
stationData <- filter(stationDataDailySample, FDT_STA_ID %in% "9-NEW087.14") #"9-NEW087.14" "9-NEW089.34"
stationData <- filter(stationDataDailySample, FDT_STA_ID %in% "2-JKS044.60")# "2-JKS046.40"
stationData <- filter(stationDataDailySample, FDT_STA_ID %in% 'LVLAPGG000.47')
stationData <- stationDataDailySample
sigLake <- as.character(AUData$SIGLAKENAME)
allLakeLZstations <- as.character(filter(lakeStations,SIGLAKENAME %in% sigLake)$STATION_ID)
allLakeLZData <- filter(conventionals,FDT_STA_ID %in% allLakeLZstations)
allLakeLZData$FDT_DATE_TIME <- as.POSIXct(allLakeLZData$FDT_DATE_TIME, format="%m/%d/%Y %H:%M")
allLakeLZData<- mutate(allLakeLZData, SampleDate=format(FDT_DATE_TIME,"%m/%d/%y"))%>% # Separate sampling events by day
filter(!is.na(FDT_TEMP_CELCIUS))# remove any NA values to keep thermocline function happy
thermo <- stratifiedLake_citNonA(citmonOutOfParameterDataset(allLakeLZData, FDT_TEMP_CELCIUS, FDT_TEMP_CELCIUS_RMK) )#stratifiedLake(allLakeLZData)
thermo$ThermoclineDepth <- as.numeric(thermo$ThermoclineDepth)
allLakeLZData2 <- plyr::join(allLakeLZData,thermo,by=c('FDT_STA_ID','SampleDate'))%>%
mutate(LakeStratification= ifelse(FDT_DEPTH < ThermoclineDepth,"Epilimnion","Hypolimnion"))%>%
plyr::join(dplyr::select(lakeStations, Chlorophyll_A_limit, TPhosphorus_limit, Assess_TYPE, FDT_STA_ID), by='FDT_STA_ID')
lakeStations <- allLakeLZData2
chlA_Assessment <- function(x){
if(nrow(x) > 0){
holder <- list()
for(i in 1:length(unique(x$FDT_STA_ID))){
dat <- filter(x,FDT_STA_ID %in% unique(x$FDT_STA_ID)[i])
holder[[i]] <- as.data.frame(chlA_Assessment_OneStation(dat))
}
alldat <- do.call(rbind,holder)#%>%filter(!is.na(Year))
return(alldat)
}
}
exceedance_chlA <- function(x, lakeStations){
chlA_Assess <- chlA_Assessment(x)
if(is.null(chlA_Assess)){
return('No Chlorophyll a data for station ')
}
if(nrow(chlA_Assess) < 1){
return('No Chlorophyll a data for station ')
}else{
if(class(chlA_Assess$FDT_STA_ID)=="factor"){ # have to split this step up bc n stationID's affect how split performs
chlA_Assess$FDT_STA_ID <- droplevels(chlA_Assess$FDT_STA_ID) # have to drop unused levels from factor or it messes with split function and mixes up data in each list item
}
dat <- split(chlA_Assess,f=chlA_Assess$FDT_STA_ID)
holder <- list()
for(i in 1:length(dat)){
# Find two most recent years with >= 6 data points
step1 <- filter(dat[[i]],samplePerYear>=6) # verify enough samples
step2 <- filter(step1,Year %in% tail(sort(unique(step1$Year)),2)) %>% # get two most recent years from valid sample years
mutate(ID305B_1 = as.character(filter(lakeStations, STATION_ID %in% unique(step1$FDT_STA_ID))$ID305B_1))
if(nrow(step2)>1){ # only do this if more than 1 year of data
if(step2$chlA_Exceedance[1]!=step2$chlA_Exceedance[2]){ # if the exceedances contradict one another in two years grab third year
step1alt <- filter(dat[[i]],samplePerYear>=6) # verify enough samples
step2 <- filter(step1,Year %in% tail(sort(unique(step1$Year)),3)) %>% # get three most recent years from valid sample years
mutate(ID305B_1 = as.character(filter(lakeStations, STATION_ID %in% unique(step1$FDT_STA_ID))$ID305B_1))
}
}
holder[[i]] <- step2
}
do.call(rbind,holder) # output table for user to review
}
}
chlA_Exceedances <- exceedance_chlA(citmonOutOfParameterDataset(stationData, CHLOROPHYLL, RMK_32211), lakeStations)
TP_Assessment <- function(x){
if(nrow(x) > 0){
holder <- list()
for(i in 1:length(unique(x$FDT_STA_ID))){
dat <- filter(x,FDT_STA_ID %in% unique(x$FDT_STA_ID)[i])
holder[[i]] <- as.data.frame(TP_Assessment_OneStation(dat))
}
alldat <- do.call(rbind,holder)#%>%filter(!is.na(Year))
return(alldat)
}
}
exceedance_TP <- function(x){
TP_Assess <- TP_Assessment(x)
if(is.null(TP_Assess)){
z <- data.frame(FDT_STA_ID = NA, Year = NA, samplePerYear = NA, medianTP = NA,
TPhosphorus_limit_ug_L = NA, TP_Exceedance = NA, LacustrineZone = NA)[0,]
} else {
if(nrow(TP_Assess)==0){
z <- data.frame(FDT_STA_ID = NA, Year = NA, samplePerYear = NA, medianTP = NA,
TPhosphorus_limit_ug_L = NA, TP_Exceedance = NA, LacustrineZone = NA)[0,]
} else {
if(class(TP_Assess$FDT_STA_ID)=="factor"){ # have to split this step up bc n stationID's affect how split performs
TP_Assess$FDT_STA_ID <- droplevels(TP_Assess$FDT_STA_ID) # have to drop unused levels from factor or it messes with split function and mixes up data in each list item
}
dat <- split(TP_Assess,f=TP_Assess$FDT_STA_ID)
holder <- list()
for(i in 1:length(dat)){
# Find two most recent years with >= 6 data points
step1 <- filter(dat[[i]],samplePerYear>=6) # verify enough samples
step2 <- filter(step1,Year %in% tail(sort(unique(step1$Year)),2)) # get two most recent years from valid sample years
if(nrow(step2)>1){ # only do this if more than 1 year of data
if(step2$TP_Exceedance[1]!=step2$TP_Exceedance[2]){ # if the exceedances contradict one another in two years grab third year
step1alt <- filter(dat[[i]],samplePerYear>=6) # verify enough samples
step2 <- filter(step1,Year %in% tail(sort(unique(step1$Year)),3)) # get three most recent years from valid sample years
}
}
holder[[i]] <- step2
}
z <- do.call(rbind,holder) # output table for user to review
}
}
return(z)
}
TP_Exceedances <- function(x){
# get assessment
z <- exceedance_TP(x)
if(nrow(z)==0){#is.na(unique(z$FDT_STA_ID))){
result <- data.frame(NUT_TP_VIO = NA, NUT_TP_SAMP = NA, NUT_TP_STAT = NA)
} else {
result <- data.frame(NUT_TP_VIO = nrow(filter(z, TP_Exceedance== TRUE)), NUT_TP_SAMP = nrow(z),
NUT_TP_STAT = ifelse(any(z[['TP_Exceedance']])==TRUE,'Review','S'))
}
return(result)
}
TP_Exceedances(citmonOutOfParameterDataset(stationData, PHOSPHORUS, RMK_PHOSPHORUS))
newSTDbacteriaData <- conventionalsToBacteria(stationData, 'E.COLI')
z <- bacteriaAssessmentDecision(newSTDbacteriaData, 10, 410, 126) %>%
distinct(`Assessment Decision`) # only grab 1 record
chlA_Exceedances_NEW <- function(stationData){
# get assessment
z <- exceedance_chlA(x)
result <- data.frame(NUT_CHLA_VIO = nrow(filter(z, chlA_Exceedance== TRUE)), NUT_CHLA_SAMP = nrow(z),
NUT_CHLA_STAT = ifelse(any(z$chlA_Exceedance)==TRUE,'Review','S'))
return(result)
}
|
1775f6f839a4b5a094865d5e9ecfac787f1e140b
|
cf92b98143f6b7ce18abf0789e706cb56167f41b
|
/man/bcdc_check_geom_size.Rd
|
405d5532d8b1da502f341a9cf20fd59302675a14
|
[
"Apache-2.0"
] |
permissive
|
bcgov/bcdata
|
508aa845f14ebd6cb55cb57869b0f0ff9f84a87c
|
8a9aeda79dc8f3c608b77296b12cf24c098f9673
|
refs/heads/main
| 2023-07-06T02:55:38.654594
| 2023-07-01T04:54:05
| 2023-07-01T04:54:05
| 162,173,874
| 74
| 15
|
Apache-2.0
| 2023-07-01T04:54:07
| 2018-12-17T18:33:41
|
R
|
UTF-8
|
R
| false
| true
| 1,456
|
rd
|
bcdc_check_geom_size.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cql-geom-predicates.R
\name{bcdc_check_geom_size}
\alias{bcdc_check_geom_size}
\title{Check spatial objects for WFS spatial operations}
\usage{
bcdc_check_geom_size(x)
}
\arguments{
\item{x}{object of class sf, sfc or sfg}
}
\value{
invisibly return logical indicating whether the check pass. If the return
value is TRUE, the object will not need a bounding box drawn. If the return value is
FALSE, the check will fails and a bounding box will be drawn.
}
\description{
Check a spatial object to see if it exceeds the current set value of
'bcdata.max_geom_pred_size' option, which controls how the object is treated when used inside a spatial predicate function in \code{\link[=filter.bcdc_promise]{filter.bcdc_promise()}}. If the object does exceed the size
threshold a bounding box is drawn around it and all features
within the box will be returned. Further options include:
\itemize{
\item Try adjusting the value of the 'bcdata.max_geom_pred_size' option
\item Simplify the spatial object to reduce its size
\item Further processing on the returned object
}
}
\details{
See the \href{https://bcgov.github.io/bcdata/articles/efficiently-query-spatial-data-in-the-bc-data-catalogue.html}{Querying Spatial Data with bcdata}
for more details.
}
\examples{
\donttest{
try({
airports <- bcdc_query_geodata("bc-airports") \%>\% collect()
bcdc_check_geom_size(airports)
})
}
}
|
f303d78aaf47902ee811ac24e98e1f170a4cada4
|
6e1d9a0e305dee59d9eed10df14ee1e417d2ed85
|
/R/toolbox.R
|
4e8664fca0a712fdd9f3696f881523a111347096
|
[] |
no_license
|
pvermees/ArArRedux
|
395a575188e078bb8ac3d506bff380be3d9d542b
|
9d8c84244c769a40642b43ef777789dd333c7f12
|
refs/heads/master
| 2023-08-20T18:02:32.793378
| 2018-10-03T17:00:00
| 2018-10-03T17:00:00
| 61,240,004
| 3
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,163
|
r
|
toolbox.R
|
theday <- function(thedate){
dateinseconds <- as.POSIXlt(thedate,origin="1970-01-01 00:00:00")
dateindays <- (1900+dateinseconds$year-1970)*365 + dateinseconds$yday
dayinseconds <- dateindays*24*3600
return(dayinseconds)
}
mergematrices <- function(xcovmat,ycovmat){
if (is.null(xcovmat)) return(ycovmat)
if (is.null(ycovmat)) return(xcovmat)
nx <- max(1,nrow(xcovmat))
ny <- max(1,nrow(ycovmat))
covmat <- matrix(0,nrow=nx+ny,ncol=nx+ny)
covmat[1:nx,1:nx] <- xcovmat
covmat[(nx+1):(nx+ny),(nx+1):(nx+ny)] <- ycovmat
return(covmat)
}
# returns the indices of timevec2 which are closest to timevec1
nearest <- function(timevec1,timevec2){
ii <- NULL
for (i in 1:length(timevec1)) {
ii <- c(ii, which.min(abs(timevec2 - timevec1[i])))
}
return(ii)
}
nmasses <- function(x){
return(length(x$masses))
}
nruns <- function(x,...){ UseMethod("nruns",x) }
nruns.default <- function(x,...){
length(x$labels)
}
nruns.PHdata <- function(x,...){
return(nruns(x$signals[[1]]))
}
ncycles <- function(x,...){ UseMethod("ncycles",x) }
ncycles.default <- function(x,...){stop()}
ncycles.timeresolved <- function(x,...){
return(dim(x$thetime)[1])
}
getsignal <- function(X,prefix,num=NULL){
i <- getindices(X,prefix,num)
return(cbind(X$intercepts[i], sqrt(X$covmat[i,i])))
}
getindices <- function(...){ UseMethod("getindices") }
getindices.default <- function(nmasses,nruns=NULL,
imasses=NULL,iruns=NULL,...){
if (is.null(nruns)) nruns <- max(iruns)
if (is.null(imasses)) imasses <- 1:nmasses
if (is.null(iruns)) iruns <- 1:nruns
i <- NULL
for (irun in iruns){
i <- c(i,(irun-1)*nmasses + imasses)
}
return(i)
}
getindices.logratios <- function(x,iruns,...){
cs <- c(0,cumsum(x$nlr))
i <- NULL
for (irun in iruns){
i <- c(i,(cs[irun]+1):cs[irun+1])
}
return(i)
}
getindices.redux <- function(X,prefix=NULL,num=NULL,den=NULL,
pos=NULL,invert=FALSE,include.J=FALSE,...){
i <- 1:length(X$intercepts)
if (is.null(prefix)) {
i1 <- i
} else {
j <- findrunindices(X,prefixes=prefix,invert=invert,include.J=include.J)
i1 <- getindices.logratios(X,j)
}
if (is.null(num)) {
i2 <- i
} else {
i2 <- findmatches(X$num,prefixes=num,invert=invert)
}
if (is.null(den)) {
i3 <- i
} else {
i3 <- findmatches(X$den,prefixes=den,invert=invert)
}
if (is.null(pos)) {
i4 <- i
} else {
matches <- X$pos %in% pos
if (invert){
i4 <- which(!matches)
} else {
i4 <- which(matches)
}
}
return(which((i %in% i1) & (i %in% i2) &
(i %in% i3) & (i %in% i4)))
}
findrunindices <- function(X,prefixes,invert=FALSE,include.J=FALSE){
i <- 1:nruns(X)
i1 <- findmatches(X$labels,prefixes,invert)
out <- (i %in% i1)
if (include.J) {
i2 <- findmatches(X$pos,prefixes=X$Jpos,invert)
i3 <- findmatches(X$labels,prefixes=c("DCAL","K:","Ca:"),invert)
out <- out | (i %in% i2) | (i %in% i3)
}
which(out)
}
findmatches <- function(labels,prefixes,invert=FALSE){
ns <- length(labels)
i <- c() # vector of intercepts
for (prefix in prefixes){
matches <- labels %in% prefix
if (any(matches)) i <- c(i,which(matches))
else i <- c(i,grep(prefix, labels))
}
j <- sort(i)
if (invert){
if (length(j)==0) return(1:ns)
else return((1:ns)[-j])
} else {
return(j)
}
}
getruns <- function(x,...){ UseMethod("getruns",x) }
getruns.default <- function(x,...){stop()}
getruns.timeresolved <- function(x,i,...){
ii <- getindices(nmasses=nmasses(x),iruns=i)
return(x$d[,ii])
}
#' Select a subset of some data
#'
#' Extracts those intercepts, covariances etc. that match a given list
#' of indices or labels.
#'
#' @param x an object of class \code{\link{timeresolved}},
#' \code{\link{logratios}}, \code{\link{redux}} or
#' \code{\link{results}}
#' @param i a vector with indices of the selected runs
#' @param labels a string or a vector of strings with sample names
#' @param invert boolean flag indicating whether the selection should
#' be inverted, i.e. whether the selected indices or labels should
#' be removed rather than retained
#' @param include.J if \code{TRUE}, automatically adds the irradiation
#' monitors to the selection
#' @param ... other arguments
#' @return an object of the same class as \code{x}
#' @examples
#' data(Melbourne)
#' ages <- process(Melbourne$X,Melbourne$irr,Melbourne$fract)
#' MD <- subset(ages,labels=c("MD2-1","MD2-2","MD2-3","MD2-4","MD2-5"))
#' plotcorr(MD)
#' @rdname subset
#' @export
subset.timeresolved <- function(x,i=NULL,labels=NULL,invert=FALSE,include.J=FALSE,...){
if (is.null(i)) i <- findrunindices(x,prefixes=labels,invert=invert,include.J=include.J)
out <- x
out$d <- getruns(x,i)
out$thetime <- x$thetime[,i]
out$thedate <- x$thedate[i]
out$irr <- x$irr[i]
out$pos <- x$pos[i]
out$labels <- x$labels[i]
if (methods::is(x,"blankcorrected"))
out$blankindices <- x$blankindices[i]
return(out)
}
#' @rdname subset
#' @export
subset.logratios <- function(x,i=NULL,labels=NULL,invert=FALSE,include.J=FALSE,...){
if (is.null(i)) i <- findrunindices(x,prefixes=labels,invert=invert,include.J=include.J)
out <- x
out$irr <- x$irr[i]
out$pos <- x$pos[i]
out$labels <- x$labels[i]
out$thedate <- x$thedate[i]
out$nlr <- x$nlr[i]
j <- getindices.logratios(x,i)
out$num <- x$num[j]
out$den <- x$den[j]
out$intercepts <- x$intercepts[j]
out$covmat <- x$covmat[j,j]
return(out)
}
#' @rdname subset
#' @export
subset.redux <- function(x,i=NULL,labels=NULL,invert=FALSE,include.J=FALSE,...){
subset.logratios(x,i,labels,invert,include.J,...)
}
#' @rdname subset
#' @export
subset.results <- function(x,i=NULL,labels=NULL,invert=FALSE,...){
out <- x
if (is.null(i)) i <- findrunindices(x,prefixes=labels,invert=invert)
out$labels <- x$labels[i]
out$thedate <- x$thedate[i]
out$ages <- x$ages[i]
out$covmat <- x$covmat[i,i]
return(out)
}
#' Select a subset of isotopes from a dataset
#'
#' Extracts the intercepts, covariance matrix, etc. of a selection of
#' isotopes from a larger dataset
#'
#' @param x an object of class \code{\link{logratios}},
#' \code{\link{timeresolved}}, \code{\link{PHdata}} or
#' \code{\link{redux}}.
#' @param ... other arguments
#' @return an object of the same class as x
#' @examples
#' kfile <- system.file("K-glass.csv",package="ArArRedux")
#' masses <- c("Ar37","Ar38","Ar39","Ar40","Ar36")
#' mk <- loaddata(kfile,masses)
#' lk <- fitlogratios(blankcorr(mk,"EXB#","K:"),"Ar40")
#' k <- getmasses(lk,"Ar39","Ar40") # subset of the relevant isotopes
#' plotcorr(k)
#' @export
getmasses <- function(x,...){ UseMethod("getmasses",x) }
#' @rdname getmasses
#' @export
getmasses.default <- function(x,...){ stop() }
#' @param mass a vector of strings denoting the masses of interest
#' @param invert boolean parameter indicating whether the selection
#' should be inverted (default = FALSE)
#' @rdname getmasses
#' @export
getmasses.timeresolved <- function(x,mass,invert=FALSE,...){
out <- x
if (invert){ imasses <- which(x$masses != mass) }
else { imasses <- which(x$masses == mass) }
out$masses <- out$masses[imasses]
ii <- getindices(nmasses(x),nruns(x),imasses)
out$d <- x$d[,ii]
return(out)
}
#' @param num vector of strings indicating the numerator isotopes
#' @param den vector of string indicating the denominator isotopes
#' @rdname getmasses
#' @export
getmasses.logratios <- function(x,num,den,invert=FALSE,...){
out <- x
if (invert){ i <- which(!((x$num %in% num) & (x$den %in% den))) }
else { i <- which((x$num %in% num) & (x$den %in% den)) }
out$num <- x$num[i]
out$den <- x$den[i]
out$intercepts <- x$intercepts[i]
out$covmat <- x$covmat[i,i]
out$nlr <- graphics::hist(i,breaks=c(0,cumsum(x$nlr)),
plot=FALSE)$counts
return(out)
}
#' @rdname getmasses
#' @export
getmasses.redux <- function(x,num,den,invert=FALSE,...){
out <- x
if (invert){ hasmass <- !((x$num %in% num) & (x$den %in% den)) }
else { hasmass <- (x$num %in% num) & (x$den %in% den) }
i <- which(hasmass)
bi <- cumsum(c(1,x$nlr))
ei <- cumsum(x$nlr)
nn <- length(x$labels)
retain <- rep(FALSE,nn)
for (j in 1:nn) {
retain[j] <- any(hasmass[bi[j]:ei[j]])
}
out$irr <- x$irr[retain]
out$pos <- x$pos[retain]
out$labels <- x$labels[retain]
out$thedate <- x$thedate[i]
out$intercepts <- x$intercepts[i]
out$covmat <- x$covmat[i,i]
out$num <- x$num[i]
out$den <- x$den[i]
out$nlr <- graphics::hist(i,breaks=c(0,cumsum(x$nlr)),plot=FALSE)$counts[retain]
return(out)
}
setmasses <- function(x,mass,value){ UseMethod("setmasses",x) }
setmasses.default <- function(x,mass,value){stop()}
setmasses.timeresolved <- function(x,mass,value){
imasses <- which(x$masses == mass)
ii <- getindices(nmasses(x),nruns(x),imasses)
x$d[,ii] <- value
return(x)
}
setmasses.fit <- function(x,mass,value){
imasses <- which(x$masses == mass)
ii <- getindices(nmasses(x),nruns(x),imasses)
x$intercepts[ii] <- value$intercepts
for (i in 1:length(ii)){
x$covmat[ii[i],ii] <- value$covmat[i,]
}
return(x)
}
replacenegatives <- function(x){
out <- x
nmasses <- nmasses(x)
nruns <- nruns(x)
isnegative <- apply(x$d<0,2,"sum")>0
ntoreplace <- sum(isnegative)
out$d[,isnegative] <- # effectively set to zero
seq(from=1e-18,to=1e-20,length.out=ncycles(x)*ntoreplace)
return(out)
}
#' Merge a list of logratio data
#'
#' Recursively concatenates a list of logratio data into one big dataset
#'
#' @param lrlist a list containing items of class
#' \code{\link{logratios}} or \code{\link{redux}}
#' @return an object of the same class as \code{x} containing the
#' merged dataset
#' @examples
#' samplefile <- system.file("Samples.csv",package="ArArRedux")
#' kfile <- system.file("K-glass.csv",package="ArArRedux")
#' cafile <- system.file("Ca-salt.csv",package="ArArRedux")
#' dfile <- system.file("Calibration.csv",package="ArArRedux")
#' masses <- c("Ar37","Ar38","Ar39","Ar40","Ar36")
#' blanklabel <- "EXB#"
#' Jpos <- c(3,15)
#' dlabels <- c("H1","AX","L1","L2")
#'
#' m <- loaddata(samplefile,masses) # samples and J-standards
#' mk <- loaddata(kfile,masses) # K-interference data
#' mca <- loaddata(cafile,masses) # Ca interference data
#' md <- loaddata(dfile,dlabels,PH=TRUE) # detector intercalibrations
#'
#' # form and fit logratios
#' l <- fitlogratios(blankcorr(m,blanklabel),"Ar40")
#' lk <- fitlogratios(blankcorr(mk,blanklabel,"K:"),"Ar40")
#' k <- getmasses(lk,"Ar39","Ar40") # subset on the relevant isotopes
#' lca <- fitlogratios(blankcorr(mca,blanklabel,"Ca:"),"Ar37")
#' ca <- getmasses(lca,c("Ar36","Ar39"),c("Ar37","Ar37")) # subset
#' ld <- fitlogratios(blankcorr(md))
#' d <- averagebyday(ld,"DCAL")
#'
#' # merge all data (except air shots) into one big logratio structure
#' X <- newredux(concat(list(l,k,ca,d)),Jpos)
#' data(Melbourne)
#' if (isTRUE(all.equal(Melbourne$X,X))) {
#' print("We just reconstructed the built-in dataset Melbourne$X")}
#' @export
concat <- function(lrlist){
if (length(lrlist)==2) {
x <- lrlist[[1]]
y <- lrlist[[2]]
out <- x
out$irr <- c(x$irr,y$irr)
out$pos <- c(x$pos,y$pos)
out$labels <- c(x$labels,y$labels)
out$thedate <- c(x$thedate,y$thedate)
out$num <- c(x$num,y$num)
out$den <- c(x$den,y$den)
out$nlr <- c(x$nlr,y$nlr)
out$intercepts <- c(x$intercepts,y$intercepts)
out$covmat <- mergematrices(x$covmat,y$covmat)
} else {
x <- lrlist[[1]]
rest <- lrlist[2:length(lrlist)]
y <- concat(rest)
out <- concat(list(x,y))
}
return(out)
}
|
d9f83bd2ab80ba08b15b363417ba518f8a6c93ef
|
cf8e1e41c8e96b3abf9f303ec370a80086458c5a
|
/man/make_figure_3a.Rd
|
1ffb8dd2f92df7925995acb54ac84f2c4c909efa
|
[
"MIT"
] |
permissive
|
timcdlucas/ringbp
|
57cb2905a9cc17e6077fed590f899fa94b738864
|
1097485287b4eee06794d01331b4786eac246d98
|
refs/heads/master
| 2023-03-25T20:13:05.029785
| 2020-11-09T09:30:03
| 2020-11-09T09:30:03
| 254,608,812
| 2
| 1
|
NOASSERTION
| 2020-05-18T16:50:19
| 2020-04-10T10:36:38
|
R
|
UTF-8
|
R
| false
| true
| 676
|
rd
|
make_figure_3a.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/figures2to4.R
\name{make_figure_3a}
\alias{make_figure_3a}
\title{Construct Figure 3a from manuscript}
\usage{
make_figure_3a(
df = NULL,
num.initial.cases_val = 5,
delay_val = "SARS",
prop.asym_val = 0.4
)
}
\arguments{
\item{df}{A dataframe of results as produced by \code{parameter_sweep}}
\item{num.initial.cases_val}{Value for num.initial.cases to subset the data to.}
\item{delay_val}{Value for delay type to subset the data to.}
\item{prop.asym_val}{Value for prop.asym to subset the data to.}
}
\value{
A ggplot2 plot object
}
\description{
Construct Figure 3a from manuscript
}
|
ec9b2987391b0f508094109c1377cb1c0d1ec42e
|
dd9ec6b951f8db9e163f7183c247c5b9ac4f5569
|
/LOGISTIC_ORDINAL.R
|
0d46e56f9724b106aaef7ec6f1b68db58cba6d08
|
[] |
no_license
|
sunnysingh30/R-Learning-Projects
|
b7bc0cdf94af8e02c47cf0a5676470132b3cbead
|
e6004b04b209cdc3c1523008be92d35e8ec2883f
|
refs/heads/master
| 2021-01-10T03:10:43.028198
| 2016-02-14T09:10:26
| 2016-02-14T09:10:26
| 51,687,864
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,740
|
r
|
LOGISTIC_ORDINAL.R
|
# Ordinary Logistic Regression
Case 2 (Ordinal Regression)
A study looks at factors which influence the decision of whether to apply to graduate
school. College juniors are asked if they are unlikely, somewhat likely, or very likely
to apply to graduate school. Hence, our outcome variable has three categories i.e.
unlikely, somewhat likely and very likely.
Data on parental educational status, class of institution (private or state run),
current GPA are also collected. The researchers have reason to believe that the
???distances??? between these three points are not equal. For example, the ???distance???
between ???unlikely??? and ???somewhat likely??? may be shorter than the distance between
???somewhat likely??? and ???very likely???. In such case, we???ll use Ordinal Regression.
#######################
require(foreign)
require(ggplot2)
require(MASS)
require(Hmisc)
require(reshape2)
rm(list=ls())
# Load data
data <- read.dta('http://www.ats.ucla.edu/stat/data/ologit.dta')
head(data)
levels(data$apply)
# Plottign distribution
ggplot(data, aes(x=apply, y=gpa)) +
geom_boxplot(size= .75) +
geom_jitter(alpha= .5) +
facet_grid(pared~public, margins=T) +
theme(axis.text.x=element_text(angle=45, hjust=1, vjust=1))
# Build model
m <- polr(apply~ pared + public + gpa, data=data, Hess=T)
coef(summary(m))
# NOw, lets' calculte essential metrics such as p-value, CI, Odds ratio.
ctable <- coef(summary(m))
ctable
p <- pnorm(abs(ctable[,'t value']), lower.tail = F)*2
ctable <- cbind(ctable, 'p value' = p)
# Confidence Interval
ci <- confint(m)
exp(coef(summary(m)))
# OR or CI
exp(cbind(OR=coef(m), ci))
##################################
Interpretation
1. One unit increase in parental education, from 0 (Low) to 1 (High), the
odds of ???very likely??? applying versus ???somewhat likely??? or ???unlikely???
applying combined are 2.85 greater .
2. The odds ???very likely??? or ???somewhat likely??? applying versus ???unlikely???
applying is 2.85 times greater .
3. For gpa, when a student???s gpa moves 1 unit, the odds of moving from
???unlikely??? applying to ???somewhat likely??? or ???very likley??? applying
(or from the lower and middle categories to the high category) are
multiplied by 1.85.
##################################
# -- Let???s now try to enhance this model to obtain better prediction estimates.
summary(m)
summary(update(m, method='probit', Hess=T), digits=3)
summary(update(m, method='cloglog', Hess=T), digits=3)
# Lets' add interaction terms here
head(predict(m, data,type='p'))
addterm(m, ~.^2, test='Chisq')
m2 <- stepAIC(m, ~.^2)
m2
summary(m2)
anova(m, m2)
# Time plot this model.
m3 <- update(m, Hess=T)
pr <- profile(m3)
confint(pr)
plot(pr)
pairs(pr)
|
6da0af771c25d07c5d6cf7fea9b5143db7363982
|
2bd6ae8c95456af6ee45a0f1dac2ce9dfdf37b83
|
/R/POT.R
|
a7f158c39def41806acc8f78929be0ecfdc785f2
|
[] |
no_license
|
paulnorthrop/lax
|
036bd5327d08a7ba0589dd119615256cf256414f
|
09a41455481c44b96c33c961249dbcf700a317d7
|
refs/heads/master
| 2022-11-16T21:18:24.890008
| 2022-11-08T11:19:20
| 2022-11-08T11:19:20
| 200,470,886
| 3
| 1
| null | 2019-10-07T16:32:28
| 2019-08-04T08:56:22
|
R
|
UTF-8
|
R
| false
| false
| 2,013
|
r
|
POT.R
|
# =================================== POT =================================== #
#' Loglikelihood adjustment for POT fits
#'
#' S3 \code{alogLik} method to perform loglikelihood adjustment for fitted
#' extreme value model objects returned from
#' \code{\link[POT]{fitGPD}} function in the POT package.
#' The model must have been fitted using maximum likelihood estimation.
#'
#' @inherit alogLik params references
#' @details See \code{\link{alogLik}} for details.
#' @return An object inheriting from class \code{"chandwich"}. See
#' \code{\link[chandwich]{adjust_loglik}}.
#'
#' \code{class(x)} is \code{c("lax", "chandwich", "POT", "pot", "gpd")}.
#' @seealso \code{\link{alogLik}}: loglikelihood adjustment for model fits.
#' @examples
#' # We need the POT package
#' got_POT <- requireNamespace("POT", quietly = TRUE)
#'
#' if (got_POT) {
#' library(POT)
#' # An example from the POT::fitgpd documentation.
#' set.seed(4082019)
#' x <- POT::rgpd(200, 1, 2, 0.25)
#' fit <- fitgpd(x, 1, "mle")
#' adj_fit <- alogLik(fit)
#' }
#' @name POT
NULL
## NULL
#' @rdname POT
#' @export
alogLik.uvpot <- function(x, cluster = NULL, use_vcov = TRUE, ...) {
if (x$est != "MLE") {
stop("Loglikelihood adjustment is only relevant when est = ''mle''")
}
# List of evd objects supported
supported_by_lax <- list(POT_fitgpd = c("uvpot", "pot"))
# Does x have a supported class?
is_supported <- NULL
for (i in 1:length(supported_by_lax)) {
is_supported[i] <- identical(class(x), unlist(supported_by_lax[i],
use.names = FALSE))
}
if (!any(is_supported)) {
stop(paste("x's class", deparse(class(x)), "is not supported"))
}
# Set the class
name_of_class <- names(supported_by_lax)[which(is_supported)]
class(x) <- name_of_class
# Call adj_object() to adjust the loglikelihood
res <- adj_object(x, cluster = cluster, use_vcov = use_vcov, ...)
class(res) <- c("lax", "chandwich", "POT", "pot", "gpd")
return(res)
}
|
39b34eb0292e4b6ad0530304b2a73f593dd8b196
|
61896543387fbfd3b3ebbda5ea0c9e04e61c875a
|
/scripts/tut10.R
|
6f1a41f7ed5d4d005abf98a50b682a2923198cf3
|
[] |
no_license
|
michellehoog/Numbers
|
82ff1f95e9f0c030d859059d974737fc5e3ec354
|
80d7a3c8670b3f1dece731e9219067b09bb399be
|
refs/heads/master
| 2021-09-15T10:35:50.298433
| 2018-05-30T19:49:21
| 2018-05-30T19:49:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,879
|
r
|
tut10.R
|
# Numbers, Hypotheses & Conclusions
# Tut 10: Regression
#Author: Lance Lachenicht
#R: Michelle Hoogenhout
#install pacman package if not yet installed
if(!require(pacman)){install.packages("pacman")}
#load packages
library(pacman)
p_load(dplyr, tidyr, ggplot2, cowplot, psych, magrittr)
#create random data
set.seed(21)
grp1 <- round(rnorm(n = 10, 20, 3))
grp2 <- round(jitter((grp1 + c(6:10))))
reg <- data.frame(grp1, grp2)
reg$grp1[1] <- 18
reg$grp1[9] <- 19
#example linear regression
regexample <- lm(grp2~grp1, data = reg)
fitval <- fitted(regexample)
#plot1: Health & QoL
ggplot(reg, aes(grp1, grp2)) +
geom_point(size = 2) +
labs(x = "Health", y = "Quality of life") +
theme(axis.title=element_text(size=24, face = "bold"),
axis.text.x = element_text(size = 20, hjust = 1),
axis.text.y = element_text(size = 20))+
scale_x_continuous(breaks = c(15, 21, 27)) +
geom_abline(intercept = regexample$coefficients[[1]],
slope = regexample$coefficients[[2]], colour = "cadetblue", size = 1.2) +
geom_segment(x = 15, y = 22, xend = 15, yend = fitval[[7]], linetype = 3) +
geom_segment(x = 16, y = fitval[[4]], xend = 16, yend = 25, linetype = 3) +
geom_segment(x = 17, y = 25, xend = 17, yend = fitval[[8]], linetype = 3) +
geom_segment(x = 18, y = fitval[[1]], xend = 18, yend = 28, linetype = 3) +
geom_segment(x = 19, y = fitval[[9]], xend = 19, yend = 29, linetype = 3) +
geom_segment(x = 20, y = fitval[[10]], xend = 20, yend = 30, linetype = 3) +
geom_segment(x = 21, y = 27, xend = 21, yend = fitval[[6]], linetype = 3) +
geom_segment(x = 22, y = 29, xend = 22, yend = fitval[[2]], linetype = 3) +
geom_segment(x = 25, y = 33, xend = 25, yend = fitval[[3]], linetype = 3) +
geom_segment(x = 27, y = fitval[[5]], xend = 27, yend = 37, linetype = 3)
##income examples
age <- c(20, 25, 30, 35, 40, 45, 50, 55, 60, 65)
income <- c(7, 8, 29, 25, 40, 50, 33, 50, 65, 27)
income2 <- c(6,8,25,29,35,42,48,50,55,60)
incomedata <- data.frame(age, income, income2)
#sort by age
incomedata <- incomedata %>% dplyr::arrange(age)
#regression1
regexample2 <- lm(income~age, data = incomedata)
fitval2 <- fitted(regexample2)
summary(regexample2)
#regression2
regexample3 <- lm(income2~age, data = incomedata)
fitval3 <- fitted(regexample3)
summary(regexample3)
#age plot1
ggplot(incomedata, aes(age, income)) +
geom_point() +
geom_smooth(se = F, method = "lm", colour = "cadetblue") +
labs(x = "Age", y = "Income") +
theme(axis.title=element_text(size=24, face = "bold"),
axis.text.x = element_text(size = 20, hjust = 1),
axis.text.y = element_text(size = 20)) +
geom_segment(x = age[1], y = income[1], xend = age[1], yend = fitval2[[1]], linetype = 3) +
geom_segment(x = age[2], y = income[2], xend = age[2], yend = fitval2[[2]], linetype = 3) +
geom_segment(x = age[3], y = fitval2[[3]], xend = age[3], yend = income[3], linetype = 3) +
geom_segment(x = age[4], y = income[4], xend = age[4], yend = fitval2[[4]], linetype = 3) +
geom_segment(x = age[5], y = income[5], xend = age[5], yend = fitval2[[5]], linetype = 3) +
geom_segment(x = age[6], y = income[6], xend = age[6], yend = fitval2[[6]], linetype = 3) +
geom_segment(x = age[7], y = fitval2[[7]], xend = age[7], yend = income[7], linetype = 3) +
geom_segment(x = age[8], y = income[8], xend = age[8], yend = fitval2[[8]], linetype = 3) +
geom_segment(x = age[9], y = fitval2[[9]], xend = age[9], yend = income[9], linetype = 3) +
geom_segment(x = age[10], y = income[10], xend = age[10], yend = fitval2[[10]], linetype = 3)
#age plot2
ggplot(incomedata, aes(age, income2)) +
geom_point() +
geom_smooth(se = F, method = "lm", colour = "cadetblue") +
labs(x = "Age", y = "Income") +
theme(axis.title=element_text(size=24, face = "bold"),
axis.text.x = element_text(size = 20, hjust = 1),
axis.text.y = element_text(size = 20)) +
geom_segment(x = age[1], y = income2[1], xend = age[1], yend = fitval3[[1]], linetype = 3) +
geom_segment(x = age[2], y = income2[2], xend = age[2], yend = fitval3[[2]], linetype = 3) +
geom_segment(x = age[3], y = fitval3[[3]], xend = age[3], yend = income2[3], linetype = 3) +
geom_segment(x = age[4], y = income2[4], xend = age[4], yend = fitval3[[4]], linetype = 3) +
geom_segment(x = age[5], y = income2[5], xend = age[5], yend = fitval3[[5]], linetype = 3) +
geom_segment(x = age[6], y = income2[6], xend = age[6], yend = fitval3[[6]], linetype = 3) +
geom_segment(x = age[7], y = fitval3[[7]], xend = age[7], yend = income2[7], linetype = 3) +
geom_segment(x = age[8], y = income2[8], xend = age[8], yend = fitval3[[8]], linetype = 3) +
geom_segment(x = age[9], y = fitval3[[9]], xend = age[9], yend = income2[9], linetype = 3) +
geom_segment(x = age[10], y = income2[10], xend = age[10], yend = fitval3[[10]], linetype = 3)
|
fbc50a4c8e22aaac1f39016531178186660e7347
|
8e96ff3f9d4f3d274fa4908471456e277ee80618
|
/analysis/ramldb/code/StepX_hindcast_rmax.R
|
6cc5f9f2b901fc0647ef8bd2a110471e0d965c24
|
[] |
no_license
|
cfree14/sst_recruitment
|
1e96d7e9718a93949f17ed0773d0e2702f4dcc4d
|
134104d76152f1869209e0eb883036274da156a1
|
refs/heads/master
| 2020-04-14T08:45:49.817785
| 2019-01-12T00:15:25
| 2019-01-12T00:15:25
| 163,742,595
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,202
|
r
|
StepX_hindcast_rmax.R
|
# Setup
################################################################################
# Clear workspace
rm(list = ls())
# Packages
library(plyr)
library(dplyr)
library(reshape2)
# Directories
codedir <- "code"
ramdir <- "data/ramldb/data"
outputdir <- "analysis/ramldb/output"
tabledir <- "analysis/ramldb/tables"
sstdir <- "/Users/cfree/Dropbox/Chris/Rutgers/projects/productivity/data/sst/data/averages"
bndrydir <- "/Users/cfree/Dropbox/Chris/Rutgers/projects/productivity/data/stock_boundaries/data"
# Load final model results
load(file.path(outputdir, "RAMLDB_ricker_sst.Rdata"))
# Read SST data
sst <- read.csv(paste(sstdir, "ramldb_sst_yearly_cobe.csv", sep="/"), as.is=T)
# Read stock boundary key
# THIS WILL BE UNNECESSARY WHEN YOU FIX STOCKID PROBLEM
key <- read.csv(file.path(bndrydir, "ramldb_v3.8_stock_boundary_table_v2.csv"), as.is=T)
# Read final results
results <- read.csv(file.path(outputdir, "final_model_results.csv"), as.is=T)
# Build SST data set
################################################################################
# Hindcast years
yr1 <- 1930
yr2 <- 2010
hind_years <- yr1:yr2
# Build SST data set
hind <- sst %>%
# Add stockid
left_join(select(key, assessid, stockid), by="assessid") %>%
select(stockid, year, sst_c) %>%
# Filter to stocks in data
filter(stockid %in% stocks$stockid & year %in% hind_years) %>%
# Add alpha, beta, theta, and average temperature
left_join(select(results, stockid, alpha, beta, theta, sst_c_avg), by="stockid") %>%
# Compute average temperature
mutate(sst_c_sd=sst_c-sst_c_avg,
rmax=alpha/beta*exp(theta*sst_c_sd)*exp(-1))
# Stock level RMAXs
results <- results %>%
mutate(rmax=alpha/beta*exp(-1))
hist(results$rmax[results$rmax<100], seq(0,100,0.5))
# Problem RMAX stocks
prob_rmax <- results %>%
filter(rmax>10)
# Calculate global RMAX trend
global <- hind %>%
filter(!stockid %in% prob_rmax$stockid) %>%
group_by(year) %>%
summarize(rmax_avg=mean(rmax))
# Plot RMAX over time
plot(rmax_avg ~ year, global, type="l", bty="n", las=1,
xlab="", ylab=expression("RMAX (billions)"), xlim=c(yr1, yr2), xaxt="n")
axis(1, at=seq(1930, 2010, 10), las=2)
|
bff655f1dc19a52360750bec5d44b2acb8d54c0c
|
e038c00d4966d4b2598d78c286bff1fba62f7a54
|
/plot2.R
|
57d31a129ebea816e337ec8229eef1e62cb035f3
|
[] |
no_license
|
ferneau/ExData_Plotting1
|
ecb0385c631769e69ce05b464cc2f0aa21018b8a
|
fc59f6d3ff8ceeacd872f85d6fec58e928c1f9ed
|
refs/heads/master
| 2021-01-17T09:03:54.028634
| 2017-03-06T02:57:15
| 2017-03-06T02:57:15
| 83,976,170
| 0
| 0
| null | 2017-03-05T14:27:51
| 2017-03-05T14:27:51
| null |
UTF-8
|
R
| false
| false
| 469
|
r
|
plot2.R
|
source("load_and_prepare_data.R")
mydata <- load_powerdata()
#
# Create the plot without X tic marks
plot( mydata$Global_active_power, type = "l",
ylab = "Global Active Power (kilowatts)",
xlab = "",
xaxt = "n")
#
# Create the range for X tic marks based on 1440 min/day
at <- seq(from=0,to=2880,by=1440 )
#
# Put up the axis labels
axis(labels=c("Thurs","Fri","Sat"),side=1, at=at)
print("Saving image")
dev.copy(png, file="plot2.png")
dev.off()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.