blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
624fbd66fcdb86938e697ac58ef841e1db5433d0
|
30c71b4abd1160411b3407898da82ed0ada91e2e
|
/merge_c_m3.R
|
054d3c8e0e9b10d59b1a2331b20d6ac17a5e6816
|
[] |
no_license
|
rtmill/CMS-to-analytic-csv
|
c2548edc1ab0d9ff8b39abe35f58cef1753d586b
|
7841b4bcdbf5bb1ee31b8b4130027358b3348fcc
|
refs/heads/master
| 2021-01-12T14:07:01.979018
| 2017-05-18T16:47:52
| 2017-05-18T16:47:52
| 70,164,838
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,851
|
r
|
merge_c_m3.R
|
# Script to merge C-1 w/ m3
# -- assumes you have tables c and m3 loaded
# Add source
# c
df <- data.frame(c[,1])
names(df)[1] <- "RPT_REC_NUM"
df$TYPE <- "INDEPENDENT"
df$SUBPROVIDER <- 0
df$TITLE <- 0
df <- cbind(df, c[,2:8])
#m3
df2 <- data.frame(m3[,1])
names(df2)[1] <- "RPT_REC_NUM"
df2$TYPE <- "PROVIDER"
df2 <- cbind(df2, m3[,3:11])
# Parse and merge mutual columns
# c
# Add the columns that dont have sums ( max rate and medicare rate)
df <- cbind(df, c[,c(9:10,12:13)])
# Add totals columns (distinct to c)
df <- cbind(df, c[,c(18,22,26,30,34,38,42,46,50,54,58,62,66,70,74)])
# Add remaining columns
df <- cbind(df, c[,c(75:83,86)])
# m3
# Add the columns that dont have sums ( max rate and medicare rate)
df2 <- cbind(df2, m3[,12:15])
# Sum periods
df2$MEDICARE_VISITS_NONMENTAL_TOTAL <- rowSums(cbind(m3$MEDICARE_VISITS_NONMENTAL_PER1, m3$MEDICARE_VISITS_NONMENTAL_PER2), na.rm=TRUE)
df2$MEDICARE_COSTS_NONMENTAL_TOTAL <- rowSums(cbind(m3$MEDICARE_COSTS_NONMENTAL_PER1, m3$MEDICARE_COSTS_NONMENTAL_PER2), na.rm=TRUE)
df2$MEDICARE_VISITS_MENTAL_TOTAL <- rowSums(cbind(m3$MEDICARE_VISITS_MENTAL_PER1, m3$MEDICARE_VISITS_MENTAL_PER2), na.rm=TRUE)
df2$MEDICARE_COSTS_MENTAL_TOTAL <- rowSums(cbind(m3$MEDICARE_COSTS_MENTAL_PER1, m3$MEDICARE_COSTS_MENTAL_PER2), na.rm=TRUE)
df2$LIMIT_ADJUSTMENT_TOTAL <- rowSums(cbind(m3$LIMIT_ADJUSTMENT_PER1, m3$LIMIT_ADJUSTMENT_PER2), na.rm=TRUE)
df2$GRAD_MEDEDU_TOTAL <- rowSums(cbind(m3$GRAD_MEDEDU_PER1, m3$GRAD_MEDEDU_PER2), na.rm=TRUE)
df2$TOTAL_MEDICARE_COSTS_TOTAL <- rowSums(cbind(m3$TOTAL_MEDICARE_COSTS_PER1, m3$TOTAL_MEDICARE_COSTS_PER2), na.rm=TRUE)
df2$LESS_BENEFDEDUCT_TOTAL <- rowSums(cbind(m3$LESS_BENEFDEDUCT_PER1, m3$LESS_BENEFDEDUCT_PER2), na.rm=TRUE)
df2$NET_MEDICARE_COSTS_NONPIAV_TOTAL <- rowSums(cbind(m3$NET_MEDICARE_COSTS_NONPIAV_PER1, m3$NET_MEDICARE_COSTS_NONPIAV_PER2), na.rm=TRUE)
df2$TOTAL_MEDICARE_CHARGES_TOTAL <- rowSums(cbind(m3$TOTAL_MEDICARE_CHARGES_PER1, m3$TOTAL_MEDICARE_CHARGES_PER2), na.rm=TRUE)
df2$TOTAL_MEDICARE_PREVENT_CHARGES_TOTAL <- rowSums(cbind(m3$TOTAL_MEDICARE_PREVENT_CHARGES_PER1, m3$TOTAL_MEDICARE_PREVENT_CHARGES_PER2), na.rm=TRUE)
df2$TOTAL_MEDICARE_PREVENT_COSTS_TOTAL <- rowSums(cbind(m3$TOTAL_MEDICARE_PREVENT_COSTS_PER1, m3$TOTAL_MEDICARE_PREVENT_COSTS_PER2), na.rm=TRUE)
df2$TOTAL_MEDICARE_NONPREVENT_COSTS_TOTAL <- rowSums(cbind(m3$TOTAL_MEDICARE_NONPREVENT_COSTS_PER1, m3$TOTAL_MEDICARE_NONPREVENT_COSTS_PER2), na.rm=TRUE)
df2$NET_MEDICARE_COST_TOTAL <- rowSums(cbind(m3$NET_MEDICARE_COST_PER1, m3$NET_MEDICARE_COST_PER2), na.rm=TRUE)
df2$BENEF_COINSUR_TOTAL <- rowSums(cbind(m3$BENEF_COINSUR_PER1, m3$BENEF_COINSUR_PER2), na.rm=TRUE)
df2 <- cbind(df2, m3[,c(43:52)])
ret <- rbind(df, df2)
|
0aafc49eca352a11cbdebc9e3c392cf69f88b14c
|
5ba3cc6e6db58697fbbdde318175183c737bc309
|
/step3_xIBD/XIBD/R/merge_lists.R
|
4448abba1636f3302f95ac89d22bdeac0f61d084
|
[] |
no_license
|
RJHFMSTR/PofO_inference
|
a355711295f737678dd504f58c84c7f7b596535e
|
f139ece43b19f6e650017774ab4d202d52fd5350
|
refs/heads/master
| 2023-04-17T18:25:59.448153
| 2022-11-30T15:23:51
| 2022-11-30T15:23:51
| 422,484,656
| 4
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 845
|
r
|
merge_lists.R
|
# Internal Function
#
# Merge Returned Lists from Parallele
#
# \code{merge_lists()} is a function used to merge summary IBD results for multiple pairs when running the IBD analysis on
# multiple cores
#
# @param A List with n objects for one pair.
# @param B List with n objects for another pair. The dimension of each object in A and B should equal.
# @return A list with 2 objects containing merged lists from \code{A} and \code{B} above.
mergeLists1 <- function(A, B){
x <- list()
for(i in 1:max(length(A),length(B))) {
x[[i]] <- mapply(rbind, A[i], B[i], SIMPLIFY=FALSE)[[1]]
}
return(x)
}
mergeLists2 <- function(A, B){
x <- list()
x[[1]] <- mapply(rbind, A[1], B[1], SIMPLIFY=FALSE)[[1]]
x[[2]] <- mapply(cbind, A[2], B[2], SIMPLIFY=FALSE)[[1]]
return(x)
}
#' @useDynLib XIBD
#' @importFrom Rcpp sourceCpp
NULL
|
0cb337aa27e38f5a9ef73ffdcb73a5a43a9a228c
|
3f476a051eb22af77130ee485d0cbac40ccde03c
|
/sensitivity.boxplots.r
|
7512762e958f79dce55b67b7895553ec427856aa
|
[] |
no_license
|
ranalut/Scripts
|
a62d44f809c460319cdd1e2ad249f32aec9ce4b3
|
d863122f53e3a22d23c87a1105c6de46280e2ad0
|
refs/heads/master
| 2020-12-29T02:38:27.728016
| 2017-01-23T07:00:16
| 2017-01-23T07:00:16
| 8,935,875
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,097
|
r
|
sensitivity.boxplots.r
|
setwd('c:/users/cbwilsey/documents/github/scripts/')
library(maptools)
library(sp)
library(RColorBrewer)
library(fields)
source('data.prep.r')
source('extract.number.r')
# extract.number <- function(x,var.name) { temp <- as.numeric(strsplit(x,split=var.name)[[1]][2]); return(temp) }
sensitive.boxplots <- function(workspace, species.folder, baseline.scenario, baseline.time, future.scenario, spatial.variable, time.window, data.type, legend.label, suffix, the.labels, leg.x, leg.y, ylab)
{
# Plotting data
if (data.type=='abundance') { historical.data <- as.numeric(data.prep(data.file=paste(workspace,species.folder,'/Results/',baseline.scenario,'/mean.',baseline.scenario,'.',spatial.variable,'.',baseline.time,'.csv',sep=''),type='abundance',var.name=spatial.variable)[,'variable']) }
# if (data.type=='productivity') { historical.data <- data.prep(data.file=paste(workspace,species.folder,'/Results/',baseline.scenario,'/',baseline.scenario,'.report.productivity.',spatial.variable,'.',baseline.time,'.csv',sep=''),type='productivity',var.name=spatial.variable) }
# print(head(historical.data))
fut.data <- list()
gcms <- c('ccsm3','cgcm3','giss-er','hadcm3','miroc')
for (j in 1:length(suffix))
{
place.holders <- seq(-5,-1,1) + j*6
# print(place.holders)
for (i in 1:5)
{
# cat('gcm',gcms[i],'\n')
if (data.type=='abundance') { fut.data[[place.holders[i]]] <- as.vector(data.prep(data.file=paste(workspace,species.folder,'/Results/',future.scenario,gcms[i],suffix[j],'/abs.change.',future.scenario,gcms[i],suffix[j],'.',spatial.variable,'.',time.window[1],'.csv',sep=''),type='abundance',var.name=spatial.variable)[,'variable']) }
# if (data.type=='productivity') { fut.data[,i] <- data.prep(data.file=paste(workspace,species.folder,'/Results/',future.scenario[1],gcms[i],future.scenario[2],'/',future.scenario[1],gcms[i],future.scenario[2],'.report.productivity.',spatial.variable,'.',time.window[1],'.csv',sep=''),type='productivity',var.name=spatial.variable) }
# print(head(fut.data[[i]]))
# print(fut.data[[i]][historical.data==0 & fut.data[[i]]>0])
fut.data[[place.holders[i]]] <- fut.data[[place.holders[i]]][historical.data != 0 | (historical.data==0 & fut.data[[place.holders[i]]]>0)]
# print(length(fut.data[[i]]))
# print(fut.data[[place.holders[i]]]); stop('cbw')
# fut.data[fut.data==0] <- NA
}
if (j<length(suffix)) { fut.data[[(j*6)]] <- NA }
}
# =====================================================================================================
# Plotting, making a figure
# if (is.null(dev.list())) { dev.new() } # This may start giving an error for an invalid screen number. Quit R and start over if this happens.
png(paste(workspace,species.folder,'/analysis/sensitive.',future.scenario[1],data.type,'.',time.window[1],'.png',sep=''), width=400, height=400)
par(mar=c(3,6,1,1))
boxplot(fut.data, border=brewer.pal(6,name='Set1'), col=brewer.pal(6,name='Set1'), range=0, xaxt='n', bty='n', ylab=ylab) # boxwex=0.5,
at.spots <- (seq(1,length(suffix),1) * 6) - 3
axis(side=1, at=at.spots, labels=the.labels, tick=FALSE)
abline(h=0)
legend(leg.x,leg.y, c('CCSM3','CGCM3.1','GISS-ER','MIROC3.2','UKMO-HadCM3'), fill=brewer.pal(6,name='Set1'), border=brewer.pal(6,name='Set1'), bty='n')
dev.off()
}
# =======================================================================================
# Function Call Townsend Squirrel
# sensitive.boxplots(
# workspace='F:/pnwccva_data2/HexSim/Workspaces/',
# species.folder='town_squirrel_v1',
# baseline.scenario='squirrel.016.110.baseline',
# future.scenario=c('squirrel.016.110.'),
# data.type='abundance',
# spatial.variable='huc',
# baseline.time='31.40',
# time.window=c('99.109','LATE-2000s'), # c('51.60','MID-2100s'), c('99.109','LATE-2100s'),
# legend.label=' populations', # ' females'
# suffix=c('','.swe','.def'),
# the.labels=c('FULL MODEL','SWE ONLY','DEFICIT ONLY'),
# leg.x=5.5, leg.y=3000, ylab='CHANGE IN # OF POPULATIONS\nRELATIVE TO BASELINE'
# )
# =======================================================================================
# Function Call Spotted Frog
sensitive.boxplots(
workspace='H:/HexSim/Workspaces/',
species.folder='spotted_frog_v2',
baseline.scenario='rana.lut.105.125.baseline',
future.scenario=c('rana.lut.105.125.'),
data.type='abundance',
spatial.variable='huc',
baseline.time='31.40',
time.window=c('99.109','LATE-2000s'), # c('51.60','MID-2100s'), c('99.109','LATE-2100s'),
legend.label=' populations', # ' females'
suffix=c('','.swe','.aet'),
the.labels=c('FULL MODEL','SWE ONLY','AET ONLY'),
leg.x=6, leg.y=2500, ylab='CHANGE IN # OF POPULATIONS\nRELATIVE TO BASELINE'
)
# # =======================================================================================
# Function Call Lynx
# create.figure(
# workspace='I:/HexSim/Workspaces/',
# species.folder='lynx_v1',
# baseline.scenario='lynx.050.baseline', # run .35 separately
# future.scenario=c('lynx.050.',''), # run .35 separately
# data.type='abundance', # 'productivity', # 'abundance'
# spatial.variable='huc',
# baseline.time='34.42',
# time.window=c('97.105','LATE-2000s'), # c('97.105','LATE-2100s') # c('52.60','MID-2100s')
# historical.cutoffs=c(0,5,25,200,1000),
# future.cutoffs=c(-350,-75,-20,-5,-2,0,2,5,20,75,350)
# )
# =======================================================================================
# Function Call Wolverine
sensitive.boxplots(
workspace='H:/HexSim/Workspaces/',
species.folder='wolverine_v1',
baseline.scenario='gulo.023.baseline',
future.scenario=c('gulo.023.a2.'), # '' '.swe' '.biomes'
data.type='abundance', # 'productivity', # 'abundance'
spatial.variable='huc',
baseline.time='41.50', # '41.50', # '21.50',
time.window=c('100.109','LATE-2000s'), # c('31.60','MID-2100s'), # c('81.109','LATE-2100s'), '100.109', '51.60'
legend.label=' females', # ' females'
suffix=c('','.swe','.biomes'),
the.labels=c('FULL MODEL','SWE ONLY','BIOMES ONLY'),
leg.x=12, leg.y=-60, ylab='CHANGE IN # OF POPULATIONS\nRELATIVE TO BASELINE'
)
|
54a1e79595cf30aa2008c05b60bda61323b62c3b
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/modelfree/examples/comploglog_link_private.Rd.R
|
54e41b5693c535c86ce8ed2e51e05ee4ae9f8e16
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 574
|
r
|
comploglog_link_private.Rd.R
|
library(modelfree)
### Name: comploglog_link_private
### Title: Complementary log-log link function with guessing and lapsing
### rates
### Aliases: comploglog_link_private
### Keywords: nonparametric models regression nonlinear
### ** Examples
data( "01_Miranda" )
x <- example01$x
r <- example01$r
m <- example01$m
glmdata <- data.frame( cbind( r/m ,m , x ) )
names( glmdata ) <- c( "resp", "m", "x" )
glmformula <- c( "resp ~ x" )
userlink<-comploglog_link_private( 0.1, 0.1 )
fit <- glm( glmformula, data = glmdata, weights = m, family = binomial( userlink ) )
|
a5609ee0e7f54feb9d6b8f3b81db428a2ed0588c
|
7eb63399fa00e3c547e5933ffa4f47de515fe2c6
|
/man/is.retain.Rd
|
5c099afa6e45826843282ecce21a89c0733d49c4
|
[] |
no_license
|
bentaylor1/lgcp
|
a5cda731f413fb30e1c40de1b3360be3a6a53f19
|
2343d88e5d25ecacd6dbe5d6fcc8ace9cae7b136
|
refs/heads/master
| 2021-01-10T14:11:38.067639
| 2015-11-19T13:22:19
| 2015-11-19T13:22:19
| 45,768,716
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 342
|
rd
|
is.retain.Rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/mcmcIterator.R
\name{is.retain}
\alias{is.retain}
\title{do we retain this iteration?}
\usage{
is.retain(obj)
}
\arguments{
\item{obj}{an mcmc iterator}
}
\value{
TRUE or FALSE
}
\description{
if this mcmc iteration is one not thinned out, this is true
}
|
9c5e6a758f2fc2217102db0a9f0aa5f071df6b68
|
5528c02b97dd00525bd91c884a7a30885e9551db
|
/plot2.R
|
eaed3813e814670d0795bbd463e757799defec0a
|
[] |
no_license
|
Shekeen/ExData_Plotting2
|
17daaad6345b240adfb8c1842e0e71f5b7fdffe0
|
4a77790e583166e2d23a6327a390451c39473658
|
refs/heads/master
| 2016-09-05T21:55:21.105271
| 2014-07-23T14:31:28
| 2014-07-23T14:31:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,527
|
r
|
plot2.R
|
emissions_file <- 'summarySCC_PM25.rds'
scc_table_file <- 'Source_Classification_Code.rds'
if (!file.exists(emissions_file) | !file.exists(scc_table_file)) {
tmpfile <- tempfile()
download.file(url='https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip',
destfile=tmpfile)
unzip(tmpfile)
unlink(tmpfile)
}
NEI <- readRDS(emissions_file)
SCC <- readRDS(scc_table_file)
NEI_Baltimore_1999 <- subset(NEI, year == 1999 & fips == 24510, select=c(Emissions))
NEI_Baltimore_2002 <- subset(NEI, year == 2002 & fips == 24510, select=c(Emissions))
NEI_Baltimore_2005 <- subset(NEI, year == 2005 & fips == 24510, select=c(Emissions))
NEI_Baltimore_2008 <- subset(NEI, year == 2008 & fips == 24510, select=c(Emissions))
total_emissions <- c(sum(NEI_Baltimore_1999$Emissions),
sum(NEI_Baltimore_2002$Emissions),
sum(NEI_Baltimore_2005$Emissions),
sum(NEI_Baltimore_2008$Emissions))
total_emissions_per_year <- data.frame(Emissions=total_emissions,
year=c(1999, 2002, 2005, 2008))
options(scipen=7)
par(mar=c(5,7.5,4,6) + 0.1)
plot(total_emissions_per_year$year, total_emissions_per_year$Emissions,
type="b",
lwd=3,
main="PM2.5 total emissions in Baltimore City",
xaxt="n",
yaxt="n",
xlab="year",
ylab="")
axis(1, at=total_emissions_per_year$year, las=0)
axis(2, las=2)
mtext("PM2.5 total emissions, tons", side=2, line=6)
dev.copy(png, 'plot2.png')
dev.off()
|
d0e60bcd1697990408b43c98e189b2f1e0a362df
|
8d3a51881b0f757a4bc8eb2f7f87685c77acfab6
|
/tests/testthat/test-string.R
|
8eafcbcf4d4c2fc046ad41941e6dce4c27576861
|
[] |
no_license
|
cran/wakefield
|
679968f83c3c889616941569afa303156585d67b
|
734a76be93f1df21651312ff2bbf7ba7288bc4f4
|
refs/heads/master
| 2021-01-15T15:25:42.747684
| 2020-09-13T16:30:02
| 2020-09-13T16:30:02
| 48,091,046
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 66
|
r
|
test-string.R
|
context("Checking string")
test_that("string ...",{
})
|
437a3802a55433fccdb6325faf40a22c70a84493
|
439ea351c775a7b192bedc1d3f34d618f5c4a186
|
/Exe8_Q1_Code.R
|
a8d4652b0f7fcfc9b1c5363a726472a591df6f4d
|
[] |
no_license
|
davissage/Intro_Biocomp_ND_317_Tutorial8
|
7901dd9b4c4b8c602f6c9b2fa9de8621a67e98ce
|
7f74b152bbbd08af4526402383e37c04eeb01da7
|
refs/heads/master
| 2021-07-12T06:41:13.330343
| 2017-10-14T02:31:51
| 2017-10-14T02:31:51
| 106,833,497
| 0
| 0
| null | 2017-10-13T14:18:20
| 2017-10-13T14:18:20
| null |
UTF-8
|
R
| false
| false
| 2,175
|
r
|
Exe8_Q1_Code.R
|
setwd("c:/Users/DAVIS/Desktop/shell-novice-data/exe8/Intro_Biocomp_ND_317_Tutorial8/")
library(stringr)
Cflorida <- scan(file="Cflorida.vcf",what = character(), skip = 1)
Cflorida2 <- read.table(file="Cflorida.vcf", skip = 1)
colnames(Cflorida2) <- Cflorida[1:90]
Cflorida3 <- as.data.frame(matrix(nrow=999, ncol=90))
#i=5
#j=1
#h=1
#str_detect(colnames(Cflorida2[5]),"CF\\d{0,2}.A[.]+")
#str_detect(colnames(Cflorida2),"[Cc][Ff](07)?\\.[Aa](2)?\\..+")
#dim(Cflorida2[,str_detect(colnames(Cflorida2),"[Cc][Ff](07)?\\.[Aa](2)?\\..+")])
#dim(Cflorida2[,str_detect(colnames(Cflorida2),"[Cc][Ff]\\.[Gg][Aa]?[2Ii]?\\..+")])
#str_replace(colnames(Cflorida2[5]),"[Cc][Ff](07)?\\.[Aa](2)?\\.","Cf.Sfa.")
#str_detect(Cflorida2[1,5],"0\\/0\\:(\\d\\,\\d)\\:\\d\\:\\d\\d?\\:\\d\\,\\d\\d?\\,\\d\\d?\\d?")
#str_replace(Cflorida2[1,5],"0\\/0\\:(\\d\\,\\d)\\:\\d\\:\\d\\d?\\:\\d\\,\\d\\d?\\,\\d\\d?\\d?","\\1")
for(i in 1:length(colnames(Cflorida2))){
if(str_detect(colnames(Cflorida2[i]),"[Cc][Ff](07)?\\.[Aa](2)?\\..+")==TRUE){
colnames(Cflorida3)[i] <- str_replace(colnames(Cflorida2[i]),"[Cc][Ff](07)?\\.[Aa](2)?\\.","Cf.Sfa.")
for(j in 1:length(Cflorida2$POS)){
if(str_detect(Cflorida2[j,i],"0\\/0\\:(\\d\\,\\d)\\:\\d\\:\\d\\d?\\:\\d\\,\\d\\d?\\,\\d\\d?\\d?")==TRUE){
Cflorida3[j,i] <- str_replace(Cflorida2[j,i],"0\\/0\\:(\\d\\,\\d)\\:\\d\\:\\d\\d?\\:\\d\\,\\d\\d?\\,\\d\\d?\\d?","\\1")
}
else{
Cflorida3[j,i] <- NA
}
}
}
else if(str_detect(colnames(Cflorida2[i]),"[Cc][Ff]\\.[Gg][Aa]?[2Ii]?\\..+")==TRUE){
colnames(Cflorida3)[i] <- str_replace(colnames(Cflorida2[i]),"[Cc][Ff]\\.[Gg][Aa]?[2Ii]?\\.","Cf.Gai.")
for(h in 1:length(Cflorida2$POS)){
if(str_detect(Cflorida2[h,i],"0\\/0\\:(\\d\\,\\d)\\:\\d\\:\\d\\d?\\:\\d\\,\\d\\d?\\,\\d\\d?\\d?")==TRUE){
Cflorida3[h,i] <- str_replace(Cflorida2[h,i],"0\\/0\\:(\\d\\,\\d)\\:\\d\\:\\d\\d?\\:\\d\\,\\d\\d?\\,\\d\\d?\\d?","\\1")
}
else{
Cflorida3[h,i] <- NA
}
}
}
else{}
}
Cflorida4<-Cflorida3[,5:length(colnames(Cflorida3))]
write.table(Cflorida4, "CfloridaCounts.txt", sep="\n")
|
8c9f7e32bd159b52ccaab6155bb3ae472de7d664
|
74d3ccdbeeee691888e89073039b47a9b737d78f
|
/man/modelCandleFeatures.Rd
|
36b21520388609d326b6d44660ce0bf8c7df092c
|
[
"MIT"
] |
permissive
|
elephann/RQuantTrader
|
e5f8813eb880ce05cf997f01b9732cfaa57b995f
|
067c715c036a5d86596b8589d617ec795a8dc3c1
|
refs/heads/master
| 2022-03-21T17:35:51.078741
| 2019-09-07T13:18:40
| 2019-09-07T13:18:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 316
|
rd
|
modelCandleFeatures.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/FeatureEngineering.R
\name{modelCandleFeatures}
\alias{modelCandleFeatures}
\title{Consecutive Candle Analysis}
\usage{
modelCandleFeatures(data)
}
\arguments{
\item{data}{raw data of prices}
}
\description{
Consecutive Candle Analysis
}
|
9cfea061ed9edaba852454ae24a8e651314af248
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/gstat/examples/vv.Rd.R
|
0629a4e68f58d7a04d3e91167a1b231ae55f8208
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 526
|
r
|
vv.Rd.R
|
library(gstat)
### Name: vv
### Title: Precomputed variogram for PM10 in data set air
### Aliases: vv
### ** Examples
## Not run:
##D # obtained by:
##D library(spacetime)
##D library(gstat)
##D data(air)
##D
##D if (!exists("rural"))
##D rural = STFDF(stations, dates, data.frame(PM10 = as.vector(air)))
##D rr = rural[,"2005::2010"]
##D unsel = which(apply(as(rr, "xts"), 2, function(x) all(is.na(x))))
##D r5to10 = rr[-unsel,]
##D vv = variogram(PM10~1, r5to10, width=20, cutoff = 200, tlags=0:5)
## End(Not run)
|
1c7a92cd5387b715934df439f4cdf249a9610493
|
cd901f78760d0856a58e2791d94751b3e3e5c3e8
|
/man/batchExonBed.Rd
|
133c461ef101547be55cca93239b671b9f28a8ae
|
[] |
no_license
|
sanadamakomi/exonCNV
|
4d6056596d2a17df5e56075400441207bf6eb77f
|
92aaeb8ea242aa6965e3910ae5825c68ec30c65b
|
refs/heads/master
| 2022-08-10T09:24:41.165518
| 2022-08-04T07:59:10
| 2022-08-04T07:59:10
| 175,590,331
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 563
|
rd
|
batchExonBed.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/batchFunc.R
\name{batchExonBed}
\alias{batchExonBed}
\title{Export exon bed file.}
\usage{
batchExonBed(annoFile, outPath, gene, expand)
}
\arguments{
\item{annoFile}{Path of annotation file.}
\item{outPath}{Path to write to.}
\item{gene}{A character string of gene symbols seperated by comma(,) to
export.}
\item{expand}{An integer. If an exon's size is less than this value, its
region will be expanded centrally to a width of this value.}
}
\description{
Export exon bed file.
}
|
34b45e1a6599f66bc2c1ba15d584dae3b73df0db
|
7218d2e425fbf03fad09d3d652ead458a3cc6c67
|
/inst/doc/v03-outputs.R
|
d3e62ac9771acf79b8ceb65c2a8d61ce22f4b508
|
[] |
no_license
|
cran/basictabler
|
5fbe8577fc7c23e313ca6a3cca55015d145ff087
|
0a3b06d307fdd9422a68afc10f86f313534e0b97
|
refs/heads/master
| 2023-06-04T01:02:39.324085
| 2021-06-26T14:10:02
| 2021-06-26T14:10:02
| 105,392,738
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,125
|
r
|
v03-outputs.R
|
## ---- message=FALSE, warning=FALSE--------------------------------------------
# data for the table
saleIds <- c(5334, 5336, 5338)
items <- c("Apple", "Orange", "Banana")
quantities <- c(5, 8, 6)
prices <- c(0.34452354, 0.4732543, 1.3443243)
# construct the table
library(basictabler)
tbl <- BasicTable$new()
tbl$addData(data.frame(saleIds, items, quantities, prices),
firstColumnAsRowHeaders=TRUE,
explicitColumnHeaders=c("Sale ID", "Item", "Quantity", "Price"),
columnFormats=list(NULL, NULL, NULL, "%.2f"))
tbl$renderTable()
## ---- message=FALSE, warning=FALSE, comment=""--------------------------------
# data for the table
saleIds <- c(5334, 5336, 5338)
items <- c("Apple", "Orange", "Banana")
quantities <- c(5, 8, 6)
prices <- c(0.34452354, 0.4732543, 1.3443243)
# construct the table
library(basictabler)
tbl <- BasicTable$new()
tbl$addData(data.frame(saleIds, items, quantities, prices),
firstColumnAsRowHeaders=TRUE,
explicitColumnHeaders=c("Sale ID", "Item", "Quantity", "Price"),
columnFormats=list(NULL, NULL, NULL, "%.2f"))
# output table
tbl
## ---- message=FALSE, warning=FALSE, comment=""--------------------------------
# data for the table
saleIds <- c(5334, 5336, 5338)
items <- c("Apple", "Orange", "Banana")
quantities <- c(5, 8, 6)
prices <- c(0.34452354, 0.4732543, 1.3443243)
# construct the table
library(basictabler)
tbl <- BasicTable$new()
tbl$addData(data.frame(saleIds, items, quantities, prices),
firstColumnAsRowHeaders=TRUE,
explicitColumnHeaders=c("Sale ID", "Item", "Quantity", "Price"),
columnFormats=list(NULL, NULL, NULL, "%.2f"))
#out the HTML and CSS
cat(paste(tbl$getHtml(), sep="", collapse="\n"))
cat(tbl$getCss())
## ---- message=FALSE, warning=FALSE, eval=TRUE, comment=""---------------------
# data for the table
saleIds <- c(5334, 5336, 5338)
items <- c("Apple", "Orange", "Banana")
quantities <- c(5, 8, 6)
prices <- c(0.34452354, 0.4732543, 1.3443243)
# construct the table
library(basictabler)
tbl <- BasicTable$new()
tbl$addData(data.frame(saleIds, items, quantities, prices),
firstColumnAsRowHeaders=TRUE,
explicitColumnHeaders=c("Sale ID", "Item", "Quantity", "Price"),
columnFormats=list(NULL, NULL, NULL, "%.2f"))
# convert to flextable
library(flextable)
ft <- tbl$asFlexTable()
ft
## ---- eval=FALSE--------------------------------------------------------------
# # data for the table
# saleIds <- c(5334, 5336, 5338)
# items <- c("Apple", "Orange", "Banana")
# quantities <- c(5, 8, 6)
# prices <- c(0.34452354, 0.4732543, 1.3443243)
#
# # construct the table
# library(basictabler)
# tbl <- BasicTable$new()
# tbl$addData(data.frame(saleIds, items, quantities, prices),
# firstColumnAsRowHeaders=TRUE,
# explicitColumnHeaders=c("Sale ID", "Item", "Quantity", "Price"),
# columnFormats=list(NULL, NULL, NULL, "%.2f"))
#
# # convert to flextable
# library(flextable)
# ft <- tbl$asFlexTable()
#
# # save word document
# library(officer)
# docx <- read_docx()
# docx <- body_add_par(docx, "Example Table")
# docx <- body_add_flextable(docx, value = ft)
# print(docx, target = "example_table_word.docx")
## ---- eval=FALSE--------------------------------------------------------------
# # data for the table
# saleIds <- c(5334, 5336, 5338)
# items <- c("Apple", "Orange", "Banana")
# quantities <- c(5, 8, 6)
# prices <- c(0.34452354, 0.4732543, 1.3443243)
#
# # construct the table
# library(basictabler)
# tbl <- BasicTable$new()
# tbl$addData(data.frame(saleIds, items, quantities, prices),
# firstColumnAsRowHeaders=TRUE,
# explicitColumnHeaders=c("Sale ID", "Item", "Quantity", "Price"),
# columnFormats=list(NULL, NULL, NULL, "%.2f"))
#
# # convert to flextable
# library(flextable)
# ft <- tbl$asFlexTable()
#
# # save PowerPoint document
# library(officer)
# ppt <- read_pptx()
# ppt <- add_slide(ppt, layout = "Title and Content", master = "Office Theme")
# ppt <- ph_with(ppt, value = ft, location = ph_location_left())
# print(ppt, target = "example_table_powerpoint.pptx")
## ---- message=FALSE, warning=FALSE, eval=TRUE, comment=""---------------------
# data for the table
saleIds <- c(5334, 5336, 5338)
items <- c("Apple", "Orange", "Banana")
quantities <- c(5, 8, 6)
prices <- c(0.34452354, 0.4732543, 1.3443243)
# construct the table
library(basictabler)
tbl <- BasicTable$new()
tbl$addData(data.frame(saleIds, items, quantities, prices),
firstColumnAsRowHeaders=TRUE,
explicitColumnHeaders=c("Sale ID", "Item", "Quantity", "Price"),
columnFormats=list(NULL, NULL, NULL, "%.2f"))
# output as matrix
tbl$asMatrix()
## ---- message=FALSE, warning=FALSE, eval=TRUE, comment=""---------------------
# data for the table
saleIds <- c(5334, 5336, 5338)
items <- c("Apple", "Orange", "Banana")
quantities <- c(5, 8, 6)
prices <- c(0.34452354, 0.4732543, 1.3443243)
# construct the table
library(basictabler)
tbl <- BasicTable$new()
tbl$addData(data.frame(saleIds, items, quantities, prices),
firstColumnAsRowHeaders=TRUE,
explicitColumnHeaders=c("Sale ID", "Item", "Quantity", "Price"),
columnFormats=list(NULL, NULL, NULL, "%.2f"))
# output as matrix
tbl$asMatrix(firstRowAsColumnNames=TRUE, firstColumnAsRowNames=TRUE, rawValue=TRUE)
## ---- message=FALSE, warning=FALSE, eval=TRUE, comment=""---------------------
# data for the table
saleIds <- c(5334, 5336, 5338)
items <- c("Apple", "Orange", "Banana")
quantities <- c(5, 8, 6)
prices <- c(0.34452354, 0.4732543, 1.3443243)
# construct the table
library(basictabler)
tbl <- BasicTable$new()
tbl$addData(data.frame(saleIds, items, quantities, prices),
firstColumnAsRowHeaders=TRUE,
explicitColumnHeaders=c("Sale ID", "Item", "Quantity", "Price"),
columnFormats=list(NULL, NULL, NULL, "%.2f"))
# output as data frame
df <- tbl$asDataFrame(firstRowAsColumnNames=TRUE, rawValue=TRUE)
df
str(df)
|
195d561e3eebe82fe4addf3ec7fcad694dd0fe43
|
4d153213c3621e9f313e7a6356328a2f494a7f99
|
/otimizacao.R
|
9348b29e91d2067c69f4be3cea8276330962e0a1
|
[] |
no_license
|
castroantonio/otimizacao-r
|
4e09d4a666efb084e7748e69cd91dd13cc1ff78b
|
3f9d10cd576273c9b10f31ad3ddf6a62e762c45a
|
refs/heads/master
| 2022-06-25T07:56:37.270101
| 2020-05-06T23:56:01
| 2020-05-06T23:56:01
| 261,903,986
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,414
|
r
|
otimizacao.R
|
####****----.... Esemplo de otimizacao com estrutura de dados do R. ...----****####
#### Configuracao do ambiente ####
loadlibrary <- function(x) {
if (!require(x,character.only = TRUE)) {
install.packages(x, repos='http://cran.us.r-project.org', dep=TRUE)
if(!require(x,character.only = TRUE)) stop("Package not found")
}
}
loadlibrary("profvis") ## R Profile - permite visualizar pontos de lentidao no codigo fonte
#### **** Usando dataframes **** ####
profvis({ ## gera o profile do codigo
## variaveis auxiliares
vetor_logico <- c(rep(FALSE, 10)) ## vetor com 10 posicoes com conteudo FALSE
minusculas_maiusculas <- c(letters, LETTERS) ## vetor com 52 posicoes
nr_elementos <- length(minusculas_maiusculas)
## estruturas de dados
aluno <- data.frame("nome", "idade", "respostas", "disciplinas", "historico", stringsAsFactors=FALSE)
colnames(aluno) <- c("nome", "idade", "respostas", "disciplinas", "historico")
aluno <- aluno[0, ]
boletim <- data.frame("disciplina", "nota1"=0, "nota2"=0, "nota3"=0, stringsAsFactors=FALSE)
colnames(boletim) <- c("disciplina", "nota1", "nota2", "nota3")
boletim <- boletim[0, ]
## popular estruturas de dados
novo_elemento <- data.frame("disciplina", "nota1"=0, "nota2"=0, "nota3"=0, stringsAsFactors=FALSE)
colnames(novo_elemento) <- c("disciplina", "nota1", "nota2", "nota3")
## criando boletim com 52 disciplinas com respectivas notas
for(i in 1:nr_elementos) {
novo_elemento$disciplina <- minusculas_maiusculas[i]
novo_elemento$nota1 <- i %% 2
novo_elemento$nota2 <- i %% 8
novo_elemento$nota3 <- i %% 10
boletim <- rbind(boletim, novo_elemento)
}
novo_elemento <- data.frame("nome", "idade", "respostas", "disciplinas", "historico", stringsAsFactors=FALSE)
colnames(novo_elemento) <- c("nome", "idade", "respostas", "disciplinas", "historico")
## criando 52 alunos alunos
for (i in 1:nr_elementos) {
novo_elemento$nome <- minusculas_maiusculas[i]
novo_elemento$idade <- i
respostas <- vetor_logico
respostas[i %% 10 + 1] <- TRUE
novo_elemento$respostas[1] <- list(respostas)
## novo_elemento$disciplinas <- boletim **** nao funciona
## novo_elemento$disciplinas[1] <- boletim **** nao funciona
## novo_elemento$disciplinas[[1]] <- boletim **** nao funciona
novo_elemento$disciplinas[1] <- list(boletim)
novo_elemento$historico[1] <- list(boletim)
aluno <- rbing(aluno, novo_elemento)
}
## de cada aluno obter a nota1 (0) da disciplina de numero 10 (j) , somar com a nota3 (6) do historico 36 (J) e exibir o resultado (6)
for (i in 1:nr_elementos) {
resultado <- aluno[i,]$disciplinas[[1]][10, ]$nota1
resultado <- resultado + aluno[i,]$historico[[1]][36, ]$nota3
print(resultado)
}
## para cada aluno obter as notas do historico e fazer uma media atribuindo a nota1, tambem do historico, e exibindo-a
for (i in 1:nr_elementos) { ## loop dos alunos
for (j in 1:nr_elementos) { ## loop das disciplinas
media <- (aluno[i,]$disciplinas[[1]][j, ]$nota1 + aluno[i,]$disciplinas[[1]][j, ]$nota2 + aluno[i,]$disciplinas[[1]][j, ]$nota3) / 3
aluno[i,]$disciplinas[[1]][j, ]$nota1 <- media
print(aluno[i,]$disciplinas[[1]][j, ]$nota1) ## podia ser media, fiz dessa forma para deixar mais lento
}
}
}) ## profvis
#### **** Usando listas e matrizes **** ####
profvis({ ## gera o profile do codigo
## variaveis auxiliares
vetor_logico <- c(rep(FALSE, 10)) ## vetor com 10 posicoes com conteudo FALSE
minusculas_maiusculas <- c(letters, LETTERS) ## vetor com 52 posicoes
nr_elementos <- length(minusculas_maiusculas)
## estruturas de dados
aluno <- list() ## "nome", "idade", "respostas", "disciplinas", "historico"
boletim <- list() ## "disciplina", "nota1", "nota2", "nota3"
## popular estruturas de dados
## criando boletim com 52 disciplinas com respectivas notas
for(i in 1:nr_elementos) {
disciplina <- minusculas_maiusculas[i]
nota1 <- i %% 2
nota2 <- i %% 8
nota3 <- i %% 10
novo_elemento <- list(disciplina=disciplina, nota1=nota1, nota2=nota2, nota3=nota3)
boletim[[i]] <- novo_elemento
}
## criando 52 alunos alunos
for (i in 1:nr_elementos) {
respostas <- vetor_logico
respostas[i %% 10 + 1] <- TRUE
novo_elemento <- list(nome=minusculas_maiusculas[i], idade=i, respostas=respostas, disciplinas=boletim, historico=boletim)
aluno[[i]] <- novo_elemento
}
## de cada aluno obter a nota1 (0) da disciplina de numero 10 (j) , somar com a nota3 (6) do historico 36 (J) e exibir o resultado (6)
for (i in 1:nr_elementos) {
resultado <- aluno[[i]]$disciplinas[[10]]$nota1
resultado <- resultado + aluno[[5]]$disciplinas[[36]]$nota3
print(resultado)
}
## para cada aluno obter as notas do historico e fazer uma media atribuindo a nota1, tambem do historico, e exibindo-a
for (i in 1:nr_elementos) { ## loop dos alunos
for (j in 1:nr_elementos) { ## loop das disciplinas
media <- (aluno[[i]]$disciplinas[[j]]$nota1 + aluno[[i]]$disciplinas[[j]]$nota2 + aluno[[i]]$disciplinas[[j]]$nota3) / 3
aluno[[i]]$disciplinas[[j]]$nota1 <- media
print(aluno[[i]]$disciplinas[[j]]$nota1) ## podia ser media, fiz dessa forma para deixar mais lento
}
}
}) ## profvis
|
2c6d316103f2bf9bfdf1fda165416ee6eacd6127
|
fe75287628325776ecbf50e2f5cd656710c02378
|
/man/get_parameters.Rd
|
164be67aa629b3dda5dca0b0cf39ae7a6d911a83
|
[
"MIT"
] |
permissive
|
afcarl/FastRText
|
e2b9f7bcb6b4a45b4f22ff51b541324dfa417467
|
d1da2d42f798f78abd8f5537bc4a265708f69e11
|
refs/heads/master
| 2020-03-25T19:22:43.949888
| 2017-08-31T18:14:06
| 2017-08-31T18:14:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 511
|
rd
|
get_parameters.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/API.R
\name{get_parameters}
\alias{get_parameters}
\title{Export hyper parameters}
\usage{
get_parameters(model)
}
\arguments{
\item{model}{trained fasttext model}
}
\description{
Get hyper paramters used to train the model
}
\examples{
library(FastRText)
model_test_path <- system.file("extdata", "model_classification_test.bin", package = "FastRText")
model <- load_model(model_test_path)
print(head(get_parameters(model), 5))
}
|
eecb6a626aac78da3a72054d66d892f64386809f
|
cdb360025e038c2d045a397858c707722ba56e88
|
/R/readEverything.R
|
7ec6bb5e1971e7445b11b2ffefc71e9c3f1ff54c
|
[] |
no_license
|
ShashankKumbhare/paramonteRold
|
aa92962e7a8186c5d42d9e793129b2a553f4bc44
|
1bcb5a9e1fcb941134406550585d5b5bfb2acb62
|
refs/heads/master
| 2023-04-13T11:41:36.960435
| 2021-04-20T09:28:58
| 2021-04-20T09:28:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,174
|
r
|
readEverything.R
|
####################################################################################################################################
################################## readEverything ##################################################################################
# >>
readEverything <- function( file, arg2, arg3 = TRUE, arg4 = FALSE, delimiter, parseContents, renabled ) {
# Get objectName & routineName >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
callerName = getFuncName()
private$objectName = callerName[2] # >> objectName
routineName = callerName[3] # >> routineName
# Get & Verify fileType (based on routineName) >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
fileType = tolower( substr( routineName, 5, 1000 ) ) # >> Get fileType
private$verifyFileType( fileType ) # >> Verify fileType
# Set Input Argument >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
if ( missing(file) ) { file = private$setFileToRead(fileType) } # >> Set fileName (if missing)
switch( routineName,
readChain = , readSample = , readProgress = , readMarkovChain = { # >> Set delimiter, parseContents, renabled (if missing)
if ( missing(arg2) & missing(delimiter) ) { delimiter = private$setDelimiterToRead(fileType) }# else { delimiter = arg2 }
if ( missing(arg3) & missing(parseContents) ) { parseContents = TRUE } # else { parseContents = arg3 }
if ( missing(arg4) & missing(renabled) ) { renabled = FALSE } # else { renabled = arg4 }
}, readReport = , readRestart = { # >> Set renabled only (if missing)
if ( missing(arg2) & missing(renabled) ) { renabled = FALSE } # else { renabled = arg2 }
}, { private$Err$abort = paste0( private$methodName, " routine '", routineName, "' is incorrect. Please Verify.") }
)
# Check Input Argument >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
private$checkInputArg( routineName, file, renabled, delimiter, parseContents )
# Get filePathList >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
filePathList = getFilePathList( file, fileType, self$reportEnabled ) # >> filePathList
lenFilePathList = length(filePathList$files)
msg = paste( lenFilePathList, " ", fileType, " files detected ", "matching the pattern: '", filePathList$pattern, "*'", sep = "" )
if ( lenFilePathList == 0 ) { private$Err$abort = msg } # >> Abort if lenFilePathList = 0
private$Err$note = 0;
private$Err$note = msg # >> Print no. of files detected
private$Err$note = 0; private$Err$note = 0
# Set outputListName >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
outputListName = paste0( fileType, "List" )
# Parse Files >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
outputList = private$parseEverything( fileType, filePathList$files, renabled, parseContents, delimiter )
# Return outputList or Make a New Component >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
if ( renabled ) { assign( outputListName, outputList, envir=globalenv() ) } # >> Return as a variable
else { # >> Make a New Component
newParaDRAMObj <<- private$ParaDRAMUnlocked$new()
outputList <<- outputList
# bug: ..........
text = paste0( "if ( !('", outputListName, "' %in% names(", private$objectName, ")) ) {",
# "b = setdiff( names(", private$objectName, "), c('clone', 'print') );",
private$objectName, "$.__enclos_env__$private$ParaDRAMUnlocked$set('public', '", outputListName, "', outputList);",
"newParaDRAMObj[['", outputListName, "']] = outputList; ",
"newParaDRAMObj$.__enclos_env__$private$ParaDRAMUnlocked = ", private$objectName, "$.__enclos_env__$private$ParaDRAMUnlocked;",
"assign( '", private$objectName, "', newParaDRAMObj, envir = globalenv() );",
"lockEnvironment(", private$objectName, ");",
"} else {",
private$objectName, "[['", outputListName, "']] = outputList; ",
"};",
"rm(newParaDRAMObj);",
"rm(outputList);" )
eval( parse(text = text), envir = globalenv() )
}
# Update User about Success >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
private$updateUserSucess( renabled, fileType, outputListName )
return( invisible(outputList) )
}
# <<
################################## readEverything ##################################################################################
####################################################################################################################################
####################################################################################################################################
################################## Help Code #######################################################################################
# >>
#
# pmpm$readChain ( file,
# delimiter,
# parseContents = TRUE,
# renabled = FALSE )
#
# pmpm$readSample ( file,
# delimiter,
# parseContents = TRUE,
# renabled = FALSE )
#
# pmpd$readReport ( file,
# renabled = FALSE )
#
# pmpd$readRestart ( file,
# renabled = FALSE )
#
# pmpd$readProgress( file,
# delimiter,
# parseContents = TRUE,
# renabled = FALSE )
#
# <<
################################## Help Code #######################################################################################
####################################################################################################################################
|
294273812c41cf93cf66617ed1ca9be60e4719a3
|
6b7c306d52fc6344f6463dcb8847c75f6a99a405
|
/Relatorios/RelatorioGerencial/inner_join-hiperlink.R
|
00b758e3c76a7a32ceadc4f07cb39ca20c33caf3
|
[] |
no_license
|
gbrlla/IpeadataRio_Revisao
|
694e13cf998dcc21460f4b6df5b1869aa909e4e5
|
27605c3ef52ffcd5cbe9ec3ef93f1272c3d8a328
|
refs/heads/master
| 2020-04-29T04:44:06.766273
| 2019-06-07T17:29:04
| 2019-06-07T17:29:04
| 175,856,801
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 139
|
r
|
inner_join-hiperlink.R
|
planilha<-dplyr::inner_join(coment,link,by="codigo")
setwd("C:\\Users\\b248968182\\Desktop\\SCN10")
write.csv(planilha, file = "scn10.csv")
|
e152b75ea15d1f4dc0706835232e123686ed9fef
|
93051b30dbf41dcda0f678a8d811c16ac4c9f65d
|
/man/nomogram.Rd
|
f904ec839a839935a938b12868faff62b1129966
|
[] |
no_license
|
cran/UncertainInterval
|
483d0b3db282e31e7413eb10d35cecdb7d031435
|
f2465d3b1e06f4ed2b3aa01ebad5329757fc3555
|
refs/heads/master
| 2021-07-18T06:29:45.578973
| 2021-03-02T15:00:02
| 2021-03-02T15:00:02
| 79,441,666
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 4,174
|
rd
|
nomogram.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/nomogram.R
\name{nomogram}
\alias{nomogram}
\title{Fagan's nomogram to show the relationships between the prior probability, the
likelihood ratios, sensitivity and specificity, and the posterior
probability.}
\usage{
nomogram(
prob.pre.test = 0.5,
probs.post.test = c(pos = NULL, neg = NULL),
SeSp = c(Se = NULL, Sp = NULL),
LR = c(PLR = NULL, NLR = NULL),
plot = T
)
}
\arguments{
\item{prob.pre.test}{The prior test probability, with a default value of .5.
Often, (local) prevalence is used.}
\item{probs.post.test}{A vector of two values that give the desired posttest
probabilities of observing the event in the case of a positive test result
(positive posttest probability: pos), and the posttest probability of
observing the event in the case of a negative test result (negative
posttest probability: neg). When not given, these probabilities are
calculated using the likelihood ratios (LR).}
\item{SeSp}{A vector of two values that give the desired sensitivity and
specificity. When not given, the Se and Sp values are calculated from the
desired posttest probabilities.}
\item{LR}{A vector of two values that give the positive likelihood ratio
(sensitivity / (1- specificity)): PLR of observing the event, and the
negative likelihood ratio ((1 - sensitivity) / specificity): NLR of not
observing the event. PLR is a value > 1, NLR is a value between 0 and 1.
When not given, the LR values are calculated from the desired posttest
probabilities.}
\item{plot}{A Boolean that indicates whether a plot is desired.}
}
\value{
Vector of values: \describe{ \item{$pre: }{The given pre-test
probability.} \item{$min.LRpos: }{The given or calculated minimally
required positive likelihood ratio. If no value is provided, it is
calculated.} \item{$max.LRneg: }{The given or calculated maximally required
negative likelihood ratio. If no value is provided, it is calculated.}
\item{$post.pos: }{The given or calculated positive posttest probability.}
\item{$minSp: }{The minimum value for the specificity, needed to reach the
desired posttest probabilities.} \item{$minSe: }{The minimum value for the
sensitivity, needed to reach the desired posttest probabilities.} }
}
\description{
Next to plotting Fagan's nomogram, this function also calculates the
minimally needed values for specificity and sensitivity to reach desired
posttest probabilities (or likelihood ratios) for a grey zone (Coste et al.,
2003, 2006).
}
\details{
Parameter probs.post.test or SeSp or LR must be supplied, the other
two values are calculated. When more than one parameter is given the other
two are ignored. The basis of this function is adapted from package
TeachingDemos.
}
\examples{
# Show calculated results (first 3 times about the same)
(nomogram(prob.pre.test = .10, probs.post.test=c(pos=.70, neg=.001), plot=FALSE))
(nomogram(prob.pre.test = .10, SeSp=c(Se=0.991416309, Sp=0.952789700), plot=FALSE))
(nomogram(prob.pre.test = .10, LR=c(pos=21, neg=0.0090090091), plot=FALSE))
(nomogram(prob.pre.test = .10, SeSp=c(Se=0.99, Sp=0.95), plot=FALSE))
# plot only
nomogram(prob.pre.test = .10, LR=c(pos=21, neg=0.0090090091))
# plot and display precise results
(nomogram(prob.pre.test = .10, probs.post.test=c(pos=.70, neg=.001)))
# check the influence of different values of prevalence
i=1
out=matrix(0,nrow = 9, ncol= 7)
for (prev in (seq(.1, .9, by=.1))) {
out[i,]=nomogram(prob.pre.test=prev, probs.post.test=c(.95, .05), plot=FALSE)
i=i+1
}
colnames(out) = names(nomogram(prob.pre.test=prev, probs.post.test=c(.95, .05), plot=FALSE))
out
}
\references{
{ Fagan, T. J. (1975). Nomogram for Bayes theorem. The New
England Journal of Medicine, 293(5), 257-257.
Coste, J., Jourdain, P., & Pouchot, J. (2006). A gray zone assigned to
inconclusive results of quantitative diagnostic tests: application to the
use of brain natriuretic peptide for diagnosis of heart failure in acute
dyspneic patients. Clinical Chemistry, 52(12), 2229-2235.
Coste, J., & Pouchot, J. (2003). A grey zone for quantitative diagnostic
and screening tests. International Journal of Epidemiology, 32(2),
304-313. }
}
|
35d6f05bff0b40fff7df57f25bf85b89c5342658
|
4447836ed0fe92e85edc912d86f8d62860ceac99
|
/ui.R
|
c4403fd00d824a9fbcf3024c95162e68caad9424
|
[
"Apache-2.0"
] |
permissive
|
jbpost2/BasicBayes
|
90c022f5598a24441ced6f151a887a52705c7f21
|
193c554df50716a99c43c2522c6e38ad581c3532
|
refs/heads/master
| 2021-07-15T12:46:54.932390
| 2021-03-05T16:15:10
| 2021-03-05T16:15:10
| 84,206,057
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,662
|
r
|
ui.R
|
###########################################################################
##R Shiny App to plot different possible posterior distributions from coin example
##Added Gamma/pois
##Justin Post
###########################################################################
#Load package
library(shiny)
library(shinydashboard)
# Define UI for application that displays an about page and the app itself
dashboardPage(skin="red",
#add title
dashboardHeader(title="Posterior Distribution Visuals",titleWidth=1000),
#define sidebar items
dashboardSidebar(sidebarMenu(
menuItem("About", tabName = "about", icon = icon("archive")),
menuItem("Binomial & Beta", tabName = "app", icon = icon("laptop")),
menuItem("Gamma & Poisson", tabName = "app2", icon = icon("laptop"))
)),
#define the body of the app
dashboardBody(
tabItems(
# First tab content
tabItem(tabName = "about",
fluidRow(
#add in latex functionality if needed
withMathJax(),
#two columns for each of the two items
column(6,
#Description of App
h1("What does this app do?"),
#box to contain description
box(background="red",width=12,
h4("This application shows the relationship between the prior distribution and the posterior distribution for two simple Bayesian models."),
h4("For the 'Binomial & Beta' app:"),
h5("The prior distribution is assumed to be a Beta distribution and the likelihood is a Binomial distribution with 30 trials (of which you can change the number of successes). This yields a Beta distribution as the posterior. Note: As the prior distribution is in the same family as the posterior, we say the prior is conjugate for the likelihood."),
h5("This application corresponds to an example in ",span("Mathematical Statistics and Data Analysis",style = "font-style:italic"), "section 3.5, example E, by John Rice."),
h5("The goal of the example is to update our belief about the parameter \\(\\Theta\\) = the probability of obtaining a head when a particular coin is flipped. The experiment is to flip the coin 30 times and observe the number of heads. The likelihood is then a binomial distribution. The prior is assumed to be a Beta distribution."),
h4("For the 'Gamma & Poisson' app:"),
h5("The prior distribution is a Gamma and the likelihood is a Poisson."),
h5("This is a conjugate relationship so the posterior is also a Gamma.")
)
),
column(6,
#How to use the app
h1("How to use the app?"),
#box to contain description
box(background="red",width=12,
h4("The controls for the apps are located to the left and the visualizations are available on the right."),
h4("To change the prior distribution, the hyperparameters can be set using the input boxes on the left. The changes in this distribution can be seen on the first graph."),
h4("The resulting changes to the posterior distribution can be seen on the second graph.")
)
)
)
),
#actual app layout
tabItem(tabName = "app",
fluidRow(
column(width=3,
box(width=12,background="red",sliderInput("yvalue","Y=Number of Successes",min = 0,max = 30,value = 15)
),
box(width=12,
title="Hyperparameters of the prior distribution for \\(\\Theta\\)",
background="red",
solidHeader=TRUE,
p("\\(\\frac{\\Gamma(\\alpha+\\beta)}{\\Gamma(\\alpha)\\Gamma(\\beta)}\\theta^{\\alpha-1}(1-\\theta)^{\\beta-1}\\)"),
h5("(Set to 1 if blank.)"),
numericInput("alpha",label=h5("\\(\\alpha\\) Value (> 0)"),value=1,min=0,step=0.1),
numericInput("beta",label=h5("\\(\\beta\\) Value (> 0)"),value=1,min=0,step=0.1)
)
),
column(width=9,
fluidRow(
box(width=6,
plotOutput("priorPlot"),
br(),
h4("Prior distribution for the probability of success parameter \\(\\Theta\\).")
),
box(width=6,
plotOutput("distPlot"),
br(),
h4("Posterior distribution for the probability of success \\(\\Theta|y\\).")
)
)
)
)
),
tabItem(tabName = "app2",
fluidRow(
column(width=3,
box(width=12,background="red",numericInput("sampleMean","Sample mean from data",min = 0,max = 1000, value = 1)
),
box(width=12,background="red",numericInput("sampleSize","Sample size",min = 1,max = 1000, value = 10)
),
box(width=12,
title="Hyperparameters of the prior distribution for \\(\\Lambda\\)",
background="red",
solidHeader=TRUE,
p("\\(\\frac{\\beta^\\alpha}{\\Gamma(\\beta)}\\lambda^{\\alpha-1}e^{-\\beta\\lambda}\\)"),
h5("(Set to 1 if blank.)"),
numericInput("alpha2",label=h5("\\(\\alpha\\) Value (> 0)"),value=1,min=0.0001,step=0.1),
numericInput("beta2",label=h5("\\(\\beta\\) Value (> 0)"),value=1,min=0.0001,step=0.1)
)
),
column(width=9,
fluidRow(
box(width=6,
plotOutput("priorPlot2"),
br(),
h4("Prior distribution for the rate parameter \\(\\Lambda\\).")
),
box(width=6,
plotOutput("distPlot2"),
br(),
h4("Posterior distribution for rate parameter \\(\\Lambda|y_1,...,y_n\\)."),
h4("Theoretically, this posterior is given by"),
h4("\\(\\frac{(\\beta+n)^{\\alpha+\\sum_{i=1}^{n}y_i}}{\\Gamma(\\beta+n)}\\lambda^{\\alpha+\\sum_{i=1}^{n}y_i-1}e^{-(\\beta+n)\\lambda}\\)")
)
)
)
)
)
)
)
)
|
de3a778a657c7c9400f17cf6eefa79d838cd982e
|
ad23a4cd57db88a5d6008379abb8d3139f838e24
|
/R/print.dyadicc.R
|
f0f0409ebaf6f74ec01cb0bc89d0752dfd35cc94
|
[] |
no_license
|
DLEIVA/nonindependence
|
7bb397a35dd300421fffac3f9567009724c4591c
|
f143aceb6e53aa56f9a772547d63e9e65f5329b1
|
refs/heads/master
| 2020-03-23T22:09:48.974114
| 2018-07-24T12:47:24
| 2018-07-24T12:47:24
| 142,157,205
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,069
|
r
|
print.dyadicc.R
|
print.dyadicc <- function(x,digits=max(4,getOption("digits")-4),...)
{
cat("\n")
cat("Indistinguishable Members: Intraclass Correlation")
cat("\n\n Call: \n")
cat("",deparse(x$call), "\n\n")
cat(" Data", if (length(x$data[,1]) <= 5) ": " else " (5 first rows shown): ", "\n")
print( if (length(x$data[,1]) >= 5) x$data[1:5,] else x$data[1:length(x$data),])
cat("\n\n")
cat("Descriptive Statistics","\n")
print.table(x$stats, digits=digits)
cat("\n\n")
cat("F Test: ","\n")
results <- cbind(rbind(x$MSb,x$MSw),rbind(x$df1,x$df2),rbind(x$Fstat,NA),rbind(x$pval,NA))
colnames(results) <- c("Mean Sq", "Df", "F value", "Pr(>|F|)")
rownames(results) <- c("Between-Dyads","Within-Dyads")
print.table(results,digits=digits)
cat("\n\n")
cat(paste("Intraclass Correlation and ",(1-x$alpha)*100,"% Confidence Interval: ",sep=''),"\n")
results <- cbind(x$intracor,t(x$iccCI))
colnames(results) <- c("ICC", "Lower","Upper")
rownames(results) <- ''
print.table(results,digits=digits)
cat("\n\n")
invisible(x)
}
|
62de8907b7f040afe35d2f12e834aa9808cb0f72
|
8bc0b8ed84e03ed0254448aadafce40b3ad27ca7
|
/man/tfr.Rd
|
4cf54f46b07e404a961228772192ed3cfb342aaa
|
[] |
no_license
|
grasshoppermouse/globalsmoking
|
b9402f922f7f88266833c2d7faa55c4c2c1d5a51
|
9f5dc48fe5744a186314dca8ac15128c0054c572
|
refs/heads/main
| 2023-07-21T14:15:31.431203
| 2020-10-20T15:33:57
| 2020-10-20T15:33:57
| 305,552,478
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,302
|
rd
|
tfr.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{tfr}
\alias{tfr}
\title{Total Fertility Rates 1950-2015}
\format{
A data frame with 200 rows and 16 variables:
\describe{
\item{\code{country}}{character. Country name.}
\item{\code{iso3}}{character. ISO Alpha 3.}
\item{\code{tfr1950}}{double. TFR 1950-1955}
\item{\code{tfr1955}}{double. TFR 1955-1960}
\item{\code{tfr1960}}{double. TFR 1960-1965}
\item{\code{tfr1965}}{double. TFR 1965-1970}
\item{\code{tfr1970}}{double. TFR 1970-1975}
\item{\code{tfr1975}}{double. TFR 1975-1980}
\item{\code{tfr1980}}{double. TFR 1980-1985}
\item{\code{tfr1985}}{double. TFR 1985-1990}
\item{\code{tfr1990}}{double. TFR 1990-1995}
\item{\code{tfr1995}}{double. TFR 1995-2000}
\item{\code{tfr2000}}{double. TFR 2000-2005}
\item{\code{tfr2005}}{double. TFR 2005-2010}
\item{\code{tfr2010}}{double. TFR 2010-2015}
}
}
\source{
\url{http://esa.un.org/unpd/wpp/Download/Standard/Fertility/}
}
\usage{
tfr
}
\description{
Total fertility rates computed by the UN Department of Economic and Social
Affairs, Population Division. World Population Prospects. The 2015 Revision.
\url{http://esa.un.org/unpd/wpp/publications/files/key_findings_wpp_2015.pdf}.
Years are
}
\keyword{datasets}
|
d45a7ba21257beccde64a327d55f0c2f4ac097c6
|
eefcd8a80f3cebe5fc36fff145bea7c985d01d52
|
/paper_expansion_custom.R
|
e807a285e64f1afa721ffe390ae65e3e658b1309
|
[] |
no_license
|
bbbales2/ising
|
0202f3ecf9cdfe0152c42ecf45f8814afd92e4b7
|
be7b24cf13b84db369f82676402ef96460bc276a
|
refs/heads/master
| 2021-01-01T17:08:08.930806
| 2018-05-16T23:32:37
| 2018-05-16T23:32:37
| 98,006,458
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,202
|
r
|
paper_expansion_custom.R
|
library(reshape2)
library(tidyverse)
library(ggplot2)
library(rstan)
library(parallel)
require(Rcpp)
library(GGally)
require(gtools)
sourceCpp("covariance2.cpp")
p = function(x) {
as.tibble(melt(x)) %>%
ggplot(aes(x = Var1, y = Var2, fill = factor(value))) +
geom_tile() +
xlab("x") +
ylab("y") +
coord_equal() +
scale_y_reverse()
}
N = 2 * 3 * 5
sigma = 0.01
S = 200
mus = seq(-5.0, 5.0, length = 21)
gamma = c(0.1)
source("ising_helpers3.R")
sourceCpp("ising3.cpp", showOutput = TRUE)
x = matrix(sample(c(-1, 1), size = N * N, replace = TRUE), nrow = N)
system.time(ising_gibbs(x, 0.0, beta, gamma, S, 0))
out = ising_gibbs(x, 0.0, beta, gamma, S, 0)
w = 1
for(s in 1:100) {
x = matrix(sample(c(-1, 1), size = N * N, replace = TRUE), nrow = N)
i = sample(1:N, 1)
j = sample(1:N, 1)
n1 = triplets(x, w)
dn = dtriplets(x, i - 1, j - 1, w)
x[i, j] = -x[i, j]
n2 = triplets(x, w)
stopifnot(abs((n2 - n1) - (-2 * dn)) < 1e-10)
#print(c(n2 - n1, -2 * dn))
}
sourceCpp("ising3.cpp", showOutput = TRUE)
kT = 1.0
beta = c(0.3, 0.0, 0.0, 0.0, 0.0)
out = ising_gibbs(x, 0.0, beta, c(0.0, 0.2), 10 * S, 1, kT)
(out$states %>%
as.tibble %>%
mutate(rn = row_number()) %>%
mutate(Q6h = X0 * Q1 / (N * N)) %>%#Q6s = X0 * X0 * X0 / (N * N),
select(-X0, -Q1, -Q2, -Q3, -Q4, -Q5) -> df1) %>%
gather(which, value, -rn) %>%
ggplot(aes(rn, value)) +
geom_point(aes(shape = which, colour = which))
df1 %>%
select(-rn) %>%
summarize_all(funs(mean, var))
ising_gibbs_derivs(x, 0.0, beta, gamma, S, 0)
ising_sweep = function(x, beta, gamma, S, seeds) {
list(mu = mus,
seed = seeds) %>%
expand.grid %>%
as.tibble %>%
(function(df) split(df, 1:nrow(df))) %>%
mclapply(function(row) {
res = ising_gibbs_derivs(x, row$mu, beta, gamma, S, row$seed)
res$mu = row$mu
res$seed = row$seed
res
}, mc.cores = 24)
}
{
beta = rnorm(5, 0.1, 0.25)
gamma = rnorm(1, 0.0, 0.25)
data = map(ising_sweep(x, beta, gamma, S * 10, sample(1000000, 1)), ~ .$f)
list(mus = mus,
y = map(data, ~ .[[1]]) %>% unlist()) %>%
as.tibble %>%
ggplot(aes(mus, y)) +
geom_line()
}
|
d28d03707260e3814e5ebc661c821d34e89a94c2
|
2bca1f1b682da91997d0547fcf719253bc9ccde0
|
/mymain.R
|
ffb1c8f3dfea0dfbd3b7ea7a7696dff21d9af305
|
[] |
no_license
|
shuke1995/Movie-Sentiment-Analysis
|
61b02e2400b050c0e531be335d29e3a9194a04df
|
2c080b3616968e0b624b6900613122f1c4f1abc6
|
refs/heads/master
| 2020-09-03T03:21:08.731033
| 2019-11-03T23:27:28
| 2019-11-03T23:27:28
| 219,373,294
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,525
|
r
|
mymain.R
|
rm(list = ls())
library(readr)
library(text2vec)
library(data.table)
library(magrittr)
library(RCurl)
library(XML)
library(pROC)
library(glmnet)
library(AUC)
#start.time = proc.time()
#1. read data
all = read.table("data.tsv", stringsAsFactors = F, header = T)
splits = read.table("splits.csv", header = T)
s = 3
Myvocab = read.table("myVocab.txt")
# 1. Remove uselesss variables
all$review = gsub('<.*?>', ' ', all$review)
# Split Train and Test
train = all[-which(all$new_id%in%splits[,s]),]
test = all[which(all$new_id%in%splits[,s]),]
stop_words = c("i", "me", "my", "myself",
"we", "our", "ours", "ourselves",
"you", "your", "yours",
"their", "they", "his", "her",
"she", "he", "a", "an", "and",
"is", "was", "are", "were",
"him", "himself", "has", "have",
"it", "its", "of", "one", "for",
"the", "us", "this")
# Create a vocabulary-based DTM
prep_fun = tolower
tok_fun = word_tokenizer
it_train = itoken(train$review,
preprocessor = prep_fun,
tokenizer = tok_fun,
ids = train$new_id,
progressbar = FALSE)
vocab = create_vocabulary(it_train,ngram=c(1L, 2L), stopwords = stop_words)
vectorizer = vocab_vectorizer(vocab)
# create dtm_train with new pruned vocabulary vectorizer
dtm_train = create_dtm(it_train, vectorizer)
it_test = test$review %>% prep_fun %>% tok_fun %>%
# turn off progressbar because it won't look nice in rmd
itoken(ids = test$id, progressbar = FALSE)
dtm_test = create_dtm(it_test, vectorizer)
set.seed(500)
NFOLDS = 10
train_X = dtm_train[,which(colnames(dtm_train)%in%Myvocab[,1])]
test_X = dtm_test[,which(colnames(dtm_test)%in%Myvocab[,1])]
mycv = cv.glmnet(x=train_X, y=train$sentiment,
family='binomial',type.measure = "auc",
nfolds = NFOLDS, alpha=0)
myfit = glmnet(x=train_X, y=train$sentiment,
lambda = mycv$lambda.min, family='binomial', alpha=0)
logit_pred = predict(myfit,test_X, type = "response")
#glmnet:::auc(test$sentiment, logit_pred)
# Results
results = data.frame(new_id = test$new_id, prob = logit_pred)
colnames(results) = c("new_id", "prob")
write.table(results,"mysubmission.txt", sep = ",", col.names= T, row.names = F, quote=F)
#running_time = (proc.time() - start.time)[1]
#write.table(results,file = paste("Result_3",".txt", sep = ""), sep = ",", col.names= T, row.names = F, quote=F)
|
6fb4da312509b6fae4cc2fa24699ad6fce0d2257
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/mpt/examples/mptspec.Rd.R
|
c252dbf6a4dcc309940ac3da426b942ce7b874c1
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,008
|
r
|
mptspec.Rd.R
|
library(mpt)
### Name: mptspec
### Title: Specify a Multinomial Processing Tree (MPT) Model
### Aliases: mptspec print.mptspec update.mptspec
### Keywords: models
### ** Examples
## Specify storage-retrieval model for pairs
spec1 <- mptspec(
c*r,
(1 - c)*u^2,
2*(1 - c)*u*(1 - u),
c*(1 - r) + (1 - c)*(1 - u)^2
)
## Specify storage-retrieval model with parameter restrictions
spec2 <- mptspec(
c*r,
(1 - c)*u^2,
2*(1 - c)*u*(1 - u),
c*(1 - r) + (1 - c)*(1 - u)^2,
.restr = list(c = r/2, u = 0.3)
)
## Optional names identifying trees in joint MPT model
spec3 <- mptspec(
"1.1" = r + (1 - r)*b,
"1.2" = (1 - r)*(1 - b),
"2.1" = b,
"2.2" = 1 - b
)
## Fit one-high-threshold model to data in Broeder & Schuetz (2009)
mpt1 <- mpt(spec <- mptspec("1HT"), c(55, 35, 45, 765))
## Working with the mptspec object
spec$par2prob(c(0.5, 0.1)) # response probabilities
spec$par2deriv(coef(mpt1))$deriv # Jacobian matrix at ML estimate
## See ?recogROC for further examples.
|
6a0925731c855809bceecd8afe5be17d5777bca2
|
0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb
|
/cran/paws.end.user.computing/man/workmail_test_availability_configuration.Rd
|
5e10aa7d29b13c1ea2e8910a0269deede6885256
|
[
"Apache-2.0"
] |
permissive
|
paws-r/paws
|
196d42a2b9aca0e551a51ea5e6f34daca739591b
|
a689da2aee079391e100060524f6b973130f4e40
|
refs/heads/main
| 2023-08-18T00:33:48.538539
| 2023-08-09T09:31:24
| 2023-08-09T09:31:24
| 154,419,943
| 293
| 45
|
NOASSERTION
| 2023-09-14T15:31:32
| 2018-10-24T01:28:47
|
R
|
UTF-8
|
R
| false
| true
| 1,314
|
rd
|
workmail_test_availability_configuration.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/workmail_operations.R
\name{workmail_test_availability_configuration}
\alias{workmail_test_availability_configuration}
\title{Performs a test on an availability provider to ensure that access is
allowed}
\usage{
workmail_test_availability_configuration(
OrganizationId,
DomainName = NULL,
EwsProvider = NULL,
LambdaProvider = NULL
)
}
\arguments{
\item{OrganizationId}{[required] The WorkMail organization where the availability provider will be
tested.}
\item{DomainName}{The domain to which the provider applies. If this field is provided, a
stored availability provider associated to this domain name will be
tested.}
\item{EwsProvider}{}
\item{LambdaProvider}{}
}
\description{
Performs a test on an availability provider to ensure that access is allowed. For EWS, it verifies the provided credentials can be used to successfully log in. For Lambda, it verifies that the Lambda function can be invoked and that the resource access policy was configured to deny anonymous access. An anonymous invocation is one done without providing either a \code{SourceArn} or \code{SourceAccount} header.
See \url{https://www.paws-r-sdk.com/docs/workmail_test_availability_configuration/} for full documentation.
}
\keyword{internal}
|
03594164eb97b04c99b0fa60f86104c951cdd413
|
0e155767c1733da56a8e034f254af4aa84da2b67
|
/man/fixed_point_computation_function.Rd
|
18461b831a0d9054bd5be6e2edd98c6a1318cbde
|
[
"MIT"
] |
permissive
|
Martin-Summer-1090/syslosseval
|
450776ddbfdd7523b734893cf544da3dd3d9d18a
|
5c74f0cf7856ef4f8d39890ee349a17d19818d6f
|
refs/heads/master
| 2023-06-12T13:27:21.846827
| 2021-08-10T16:02:37
| 2021-08-10T16:02:37
| 305,985,830
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,936
|
rd
|
fixed_point_computation_function.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fixed_point_computation_function.R
\name{fixed_point_computation_function}
\alias{fixed_point_computation_function}
\title{fixed_point_computation_function}
\usage{
fixed_point_computation_function(
mat,
lb,
data_idx,
data_adv,
base_year,
constant,
accuracy = 10^(-9)
)
}
\arguments{
\item{mat}{A list of the initial state variable (output of
\code{make_state_variables()})}
\item{lb}{The critical leverage threshold called lambda_bar in the
paper.}
\item{data_idx}{The data-frame with the sovereign bond indices.}
\item{data_adv}{The data-frame with the average daily volume figures.}
\item{base_year}{The base year for the simulation}
\item{constant}{The value of the constant kappa in the impact fuction
(equation (9)).}
\item{accuracy}{The accuracy of the fixed point approximation. Set by
default to 10^9}
}
\value{
A tibble with variables delta_lower (lower fixed point), iterations_lower (iterations to
converge to lower fixed point), delta_upper (upper fixed point), iterations_uppper( iterations to
converge to the upper fixed point), delta_max (maximum impact), unique (logical variable which is
true if fixed point is unique and false if it is not unique)
}
\description{
This function computes the least and the greatest fire sale equilibrium
according to theorem 2 in the paper to a given level of accuracy, which is
set by default to 10^-9. In many cases the least and the greatest fire-sale
equilibrium will coincide for a given set of data, but this need not
generally be the case.
}
\examples{
stress_data <- make_stress_data(eba_exposures_2016, eba_impairments_2016, 1, 2015)
state_variables <- make_state_variables(stress_data)
fixed_point_computation_function(
mat = state_variables, lb = 33,
data_idx = sovereign_bond_indices,
data_adv = average_daily_volume_sovereign,
base_year = 2015,
constant = 1.5
)
}
|
ddc5d5905b356a65ba3aa7c67904428da8ed1f12
|
8c9598a06fb0b1b7a00eb74e63a1ed2cd8329eb5
|
/man/MultAdjStrategy.Rd
|
e7b7ff3e0c9cb85b37ea6c7ebb12ea8714a88757
|
[] |
no_license
|
gpaux/Mediana
|
1653df59542b80cb3951ce453f8450261b48a752
|
e3a7d7f49292f1f3e4b91e831d957353b798df36
|
refs/heads/master
| 2021-06-08T22:45:10.395261
| 2021-05-29T12:54:24
| 2021-05-29T12:54:24
| 39,732,450
| 22
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 4,647
|
rd
|
MultAdjStrategy.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/MultAdjStrategy.R
\name{MultAdjStrategy}
\alias{MultAdjStrategy}
\title{MultAdjStrategy object}
\usage{
MultAdjStrategy(...)
}
\arguments{
\item{\dots}{defines an object of class \code{MultAdjProc}.}
}
\description{
This function creates an object of class \code{MultAdjStrategy} which can be
added to objects of class \code{AnalysisModel}, \code{MultAdj} or
\code{MultAdjStrategy}.
}
\details{
This function can be used when several multiplicity adjustment procedures
are used within a single Clinical Scenario Evaluation, for example when
several case studies are simulated into the same Clinical Scenario
Evaluation.
Objects of class \code{MultAdjStrategy} are used in objects of class
\code{AnalysisModel} to define a Multiplicity Adjustment Procedure Strategy
that will be applied to the statistical tests to protect the overall Type I
error rate. Several objects of class \code{MultAdjStrategy} can be added to
an object of class \code{AnalysisModel}, using the '+' operator or by
grouping them into a \code{MultAdj} object.
}
\examples{
# Parallel gatekeeping procedure parameters
family = families(family1 = c(1), family2 = c(2, 3))
component.procedure = families(family1 ="HolmAdj", family2 = "HolmAdj")
gamma = families(family1 = 1, family2 = 1)
# Multiple sequence gatekeeping procedure parameters for Trial A
mult.adj.trialA = MultAdjProc(proc = "ParallelGatekeepingAdj",
par = parameters(family = family,
proc = component.procedure,
gamma = gamma),
tests = tests("Trial A Pla vs Trt End1",
"Trial A Pla vs Trt End2",
"Trial A Pla vs Trt End3")
)
mult.adj.trialB = MultAdjProc(proc = "ParallelGatekeepingAdj",
par = parameters(family = family,
proc = component.procedure,
gamma = gamma),
tests = tests("Trial B Pla vs Trt End1",
"Trial B Pla vs Trt End2",
"Trial B Pla vs Trt End3")
)
mult.adj.pooled = MultAdjProc(proc = "ParallelGatekeepingAdj",
par = parameters(family = family,
proc = component.procedure,
gamma = gamma),
tests = tests("Pooled Pla vs Trt End1",
"Pooled Pla vs Trt End2",
"Pooled Pla vs Trt End3")
)
# Analysis model
analysis.model = AnalysisModel() +
MultAdjStrategy(mult.adj.trialA, mult.adj.trialB, mult.adj.pooled) +
# Tests for study A
Test(id = "Trial A Pla vs Trt End1",
method = "PropTest",
samples = samples("Trial A Plac End1", "Trial A Trt End1")) +
Test(id = "Trial A Pla vs Trt End2",
method = "TTest",
samples = samples("Trial A Plac End2", "Trial A Trt End2")) +
Test(id = "Trial A Pla vs Trt End3",
method = "TTest",
samples = samples("Trial A Plac End3", "Trial A Trt End3")) +
# Tests for study B
Test(id = "Trial B Pla vs Trt End1",
method = "PropTest",
samples = samples("Trial B Plac End1", "Trial B Trt End1")) +
Test(id = "Trial B Pla vs Trt End2",
method = "TTest",
samples = samples("Trial B Plac End2", "Trial B Trt End2")) +
Test(id = "Trial B Pla vs Trt End3",
method = "TTest",
samples = samples("Trial B Plac End3", "Trial B Trt End3")) +
# Tests for pooled studies
Test(id = "Pooled Pla vs Trt End1",
method = "PropTest",
samples = samples(samples("Trial A Plac End1","Trial B Plac End1"),
samples("Trial A Trt End1","Trial B Trt End1"))) +
Test(id = "Pooled Pla vs Trt End2",
method = "TTest",
samples = samples(samples("Trial A Plac End2","Trial B Plac End2"),
samples("Trial A Trt End2","Trial B Trt End2"))) +
Test(id = "Pooled Pla vs Trt End3",
method = "TTest",
samples = samples(samples("Trial A Plac End3","Trial B Plac End3"),
samples("Trial A Trt End3","Trial B Trt End3")))
}
\references{
\url{http://gpaux.github.io/Mediana/}
}
\seealso{
See Also \code{\link{MultAdj}}, \code{\link{MultAdjProc}} and
\code{\link{AnalysisModel}}.
}
|
9b3210d47c9db64b1b5d1bea3da63f9612677a51
|
97f8757356dfbcfc5283644a445df3b67034ad0c
|
/ubuntu/jupyter-kernels/irkernel.R
|
6c7e8e9cd72ad07ede64a0499bc47a04a7e2bc34
|
[] |
no_license
|
wilsonify/FreshInstall
|
3d37e522cb281b2cb4c5200dea4fd4530d7b0abc
|
72f0169341bde54d57759d5d3baa093997be60e9
|
refs/heads/master
| 2023-08-17T05:39:14.358609
| 2023-08-06T13:28:03
| 2023-08-06T13:28:03
| 147,225,996
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,100
|
r
|
irkernel.R
|
# 1/3) Installing via CRAN
# If you are on linux, go to the Source instructions
# You can install all packages using the following lines in an R console:
install.packages('IRkernel')
# To update the IRkernel package, use update.packages()
# 2/3) Making the kernel available to Jupyter
# warning Important! warning On OS X, be sure to execute this in R started from the Terminal, not the R App!
# (This is because the R app doesn’t honor $PATH changes in ~/.bash_profile)
# If you haven’t done this already, you will have to make Jupyter see the newly installed R kernel by installing a kernel spec.
# The kernel spec can be installed for the current user with the following line from R:
# To install system-wide, set user to False in the installspec command:
IRkernel::installspec(user = FALSE)
# 3/3) Make useful shortcuts available
# If you use Jupyter lab (and you should!), install Ryan Homer’s text-shortcuts extension:
#```
#jupyter labextension install @techrah/text-shortcuts
#```
# It will make available the familiar shortcuts Ctrl/⌘⇧M to insert %>% and Alt- to insert <-.
|
b1d42cb15246ed5b1d43853b8b6d0726270449fb
|
b4deb4525f89b29cdf2f5b2f79274cc9531f39b2
|
/R/table2.R
|
0196758b34a170a343df3cc4f16a89d68e5628c5
|
[] |
no_license
|
cstubben/genomes2
|
eda426038a2a7778f81db43f6be0610bfb6b339b
|
059661f426231682d3aed4ac1f201a3c049d4470
|
refs/heads/master
| 2021-01-16T18:42:12.477267
| 2015-12-22T22:31:47
| 2015-12-22T22:31:47
| 9,374,826
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,121
|
r
|
table2.R
|
table2 <- function(..., n=10)
{
x1 <- table(...)
top<-n
if(is.na(top) | !top > 0 ) {top <- 1000}
n <- length(dim(x1))
## if table is a vector
if (n == 1) {
# with 1 element or more
if (dim(x1) > 0) {
x1 <- matrix(x1, dimnames=list(names(x1), "Total"))
## remove zero rows?
x1 <- x1[rowSums(x1) != 0, , drop=FALSE]
x1 <- x1[order(x1[ , 1], decreasing=TRUE), , drop=FALSE]
}
}
# if table is a 2-d array
if (n == 2) {
class(x1)<-"matrix"
## remove zero rows and columns?
x1 <- x1[ , colSums(x1) != 0, drop=FALSE]
x1 <- x1[ rowSums(x1) != 0, , drop=FALSE]
# with 1 column
if (dim(x1)[2] == 1) {
x1<-x1[order(x1[,1], decreasing=TRUE), , drop=FALSE ]
}
if (dim(x1)[2]>1) {
## add total to rows
x1 <- addmargins(x1, 2, FUN=list(Total=sum))
x1 <- x1[order(x1[, "Total"], decreasing=TRUE), ]
}
}
#if (n > 2) if more than 3 dimensions, don't do anything
# return
if(nrow(x1) > top) {
x1[1:top, , drop=FALSE]
} else {
x1
}
}
|
7b27a157b0e88d90b8e805a6e989423b9e72bd66
|
9cd5fc06ff366ab54395ed4d4b907cfd956444f7
|
/Week5/Assignment3.R
|
18a7a2fabaec1f8ca5e7bd66729d678c5c1997da
|
[] |
no_license
|
Abhigpt92/MITx-The-Analytics-Edge
|
056fe080b04128415f9747bc004e5447465f1be7
|
7f012fa3d44d7a0f9a3fe0b0117284f270258ee8
|
refs/heads/master
| 2021-01-19T19:29:42.008986
| 2017-08-23T15:51:15
| 2017-08-23T15:51:15
| 101,196,255
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,595
|
r
|
Assignment3.R
|
# PROBLEM 1 - LOADING THE DATASET
emails <- read.csv("emails.csv", stringsAsFactors=F)
# How many emails are in the dataset?
nrow(emails)
# How many of the emails are spam?
sum(emails$spam)
# Which word appears at the beginning of every email in the dataset?
head(emails$text)
# Could a spam classifier potentially benefit from including the frequency of the word
# that appears in every email?
# The Frequency with which it appears might help us differentiate spam from ham
# How many characters are in the longest email in the dataset
max(nchar(emails$text))
# Which row contains the shortest email in the dataset?
which.min(nchar(emails$text))
#######################################
# Problem 2 - Preparing the Corpus
library(tm)
# Build a new corpus variable called corpus.
corpus <- Corpus(VectorSource(emails$text))
# Using tm_map, convert the text to lowercase.
corpus <- tm_map(corpus, tolower)
corpus <- tm_map(corpus, PlainTextDocument)
# Using tm_map, remove all punctuation from the corpus.
corpus <- tm_map(corpus, removePunctuation)
# Using tm_map, remove all English stopwords from the corpus.
corpus <- tm_map(corpus, removeWords, stopwords("english"))
# Using tm_map, stem the words in the corpus.
corpus <- tm_map(corpus, stemDocument)
# Build a document term matrix from the corpus, called dtm.
dtm <- DocumentTermMatrix(corpus)
# How many terms are in dtm?
# Run dtm in the console to see the result
# To obtain a more reasonable number of terms, limit dtm to contain terms appearing in
# at least 5% of documents
spdtm <- removeSparseTerms(dtm, 0.95)
emailsSparse <- as.data.frame(as.matrix(spdtm))
# Use the make.names function to make the variable names of emailsSparse valid
colnames(emailsSparse) <- make.names(colnames(emailsSparse))
# What is the word stem that shows up most frequently across all the emails in the dataset?
which.max(colSums(emailsSparse))
# Add a variable called "spam" to emailsSparse containing the email spam labels.
emailsSparse$spam <- emails$spam
# How many word stems appear at least 5000 times in the ham emails in the dataset?
ham <- subset(emailsSparse, spam==0)
sum(colSums(ham) >= 5000)
# How many word stems appear at least 1000 times in the spam emails in the dataset?
spam <- subset(emailsSparse, spam==1)
sum(colSums(spam) >= 1000)
# The lists of most common words are significantly different between the spam and ham emails.
# What does this likely imply?
# They're helpful in differentiating ham and spam emails
#######################################
# PROBLEM 3 - BUILDING MACHINE LEARNING MODELS
emailsSparse$spam = as.factor(emailsSparse$spam)
# set the random seed to 123 and use the sample.split function to split emailsSparse 70/30
set.seed(123)
library(caTools)
split <- sample.split(emailsSparse$spam, SplitRatio=.7)
train <- subset(emailsSparse, split==T)
test <- subset(emailsSparse, split==F)
# Using the training set, train the following three machine learning models
# A logistic regression model called spamLog
spamLog <- glm(spam ~., data=train, family="binomial")
# A CART model called spamCART
library(rpart)
library(rpart.plot)
spamCART <- rpart(spam ~., data=train, method="class")
# A random forest model called spamRF
library(randomForest)
spamRF <- randomForest(spam ~., data=train)
# For each model, obtain the predicted spam probabilities for the training set
predTrainLog = predict(spamLog, type="response")
predTrainCART = predict(spamCART)[,2]
predTrainRF = predict(spamRF, type="prob")[,2]
# How many of the training set predicted probabilities from spamLog are less than 0.00001?
table(predict(spamLog) < 0.00001)
# How many of the training set predicted probabilities from spamLog are more than 0.99999?
table(predict(spamLog) > 0.99999)
# How many of the training set predicted probabilities from spamLog are between 0.00001 and 0.99999?
table(predict(spamLog) >= 0.00001 && predict(spamLog) <= 0.99999)
# How many variables are labeled as significant (at the p=0.05 level) in the logistic
# regression summary output?
summary(spamLog)
# How many of the word stems "enron", "hou", "vinc", and "kaminski" appear in the CART tree?
# What is the training set accuracy of spamLog, using a threshold of 0.5 for predictions?
table(train$spam, predict(spamLog) >= 0.5)
# What is the training set AUC of spamLog?
predictionTrainLog = prediction(predTrainLog, train$spam)
as.numeric(performance(predictionTrainLog, "auc")@y.values)
# Answer based on the above function
# What is the training set accuracy of spamCART, using a threshold of 0.5 for predictions?
table(train$spam, predTrainCART > 0.5)
# What is the training set AUC of spamCART?
predictionTrainCART = prediction(predTrainCART, train$spam)
as.numeric(performance(predictionTrainCART, "auc")@y.values)
# What is the training set accuracy of spamRF, using a threshold of 0.5 for predictions?
table(train$spam, predTrainRF > 0.5)
# What is the training set AUC of spamRF?
predictionTrainRF = prediction(predTrainRF, train$spam)
as.numeric(performance(predictionTrainRF, "auc")@y.values)
# Which model had the best training set performance, in terms of accuracy and AUC?
# Look at the AUC value of the prediction of each model to figure out the answer
########################################
# Problem 4 - Evaluating on the Test Set
predTestLog = predict(spamLog, newdata=test, type="response")
predTestCART = predict(spamCART, newdata=test)[,2]
predTestRF = predict(spamRF, newdata=test, type="prob")[,2]
# What is the testing set accuracy of spamLog, using a threshold of 0.5 for predictions?
table(test$spam, predTestLog >= .5)
# What is the testing set AUC of spamLog?
predictionTestLog = prediction(predTestLog, test$spam)
as.numeric(performance(predictionTestLog, "auc")@y.values)
# What is the testing set accuracy of spamCART, using a threshold of 0.5 for predictions?
table(test$spam, predTestCART >= .5)
# What is the testing set AUC of spamCART?
predictionTestCART = prediction(predTestCART, test$spam)
as.numeric(performance(predictionTestCART, "auc")@y.values)
# What is the testing set accuracy of spamRF, using a threshold of 0.5 for predictions?
table(test$spam, predTestRF >= 0.5)
# What is the testing set AUC of spamRF?
predictionTestRF = prediction(predTestRF, test$spam)
as.numeric(performance(predictionTestRF, "auc")@y.values)
# Which model had the best testing set performance, in terms of accuracy and AUC?
# The one that has the least fluctuation in AUC and accuracy values
# Which model demonstrated the greatest degree of overfitting?
# The one that has the most fluctuation in AUC and accuracy values
|
b45b8190b82df86ecdc88016050d4a236de99594
|
c99d3397fd7e679cf52ac8837049baa75e55e274
|
/man/frame_density.Rd
|
6fae60f12b0d27bc3561966195f35eea7d562ffb
|
[] |
no_license
|
rooperc4/TrigCamDensityEstimation
|
f650328bfc824fa89903c6ca41e938b0fca64011
|
0a107c1e8358eda84164d1568fb223e4654825e2
|
refs/heads/master
| 2020-04-08T20:53:38.112010
| 2018-11-30T17:46:33
| 2018-11-30T17:46:33
| 159,719,324
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,353
|
rd
|
frame_density.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/DensityFunctions.R, R/DensityFunctions2.R
\name{frame_density}
\alias{frame_density}
\title{Absolute density for a frame}
\usage{
frame_density(start_L, start_K, x0, intercept, range_bin, density,
near_limit = 1)
frame_density(start_L, start_K, x0, intercept, range_bin, density,
near_limit = 1)
}
\arguments{
\item{start_L}{output parameter from the logistic model function}
\item{start_K}{output parameter from the logistic model function}
\item{range_bin}{series of ranges of densities from a frame;}
\item{density}{observed densities by range in a frame}
\item{xo}{output parameter from the logistic model function}
\item{start_L}{output parameter from the logistic model function}
\item{start_K}{output parameter from the logistic model function}
\item{xo}{output parameter from the logistic model function}
\item{range_bin}{series of ranges of densities from a frame;}
\item{density}{observed densities by range in a frame}
}
\description{
This function calculates the absolute density from a frame by correcting for
the sighting function for the species.
This function calculates the absolute density from a frame by correcting for
the sighting function for the species.
}
\examples{
frame_density()
frame_density()
}
\keyword{logistic}
\keyword{model}
|
2846c516dfd8d3a16b6a67cd085fa0f08817c47e
|
5b5cf88112f5239e19302d8cfebff60ca3e2199a
|
/r/utils-config.R
|
ca3271588921778961542883314ee47df55942c9
|
[] |
no_license
|
eth-mds/bmi
|
e32ffc57717151c6b79954fb9c64b5c78dc5473e
|
c45a80e6e4295636364b2d8a4c7860b5cf531465
|
refs/heads/main
| 2023-08-12T06:33:26.860001
| 2021-10-01T14:47:42
| 2021-10-01T14:47:42
| 371,434,398
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 834
|
r
|
utils-config.R
|
proj_root <- function() {
rprojroot::find_root(rprojroot::has_file(".gitignore"), path = ".")
}
json_file <- function(name, dir, value = NULL, simplifyVector = TRUE,
simplifyDataFrame = FALSE, simplifyMatrix = FALSE, null = "null", ...) {
assert_that(dir.exists(dir))
file <- paste0(file.path(dir, name), ".json")
if (!is.null(value)) {
assert_that(is.list(value))
jsonlite::write_json(value, file, ...)
} else {
if (!file.exists(file)) {
stop("config file ", basename(file), " does not exists.")
}
jsonlite::read_json(file, simplifyVector = simplifyVector,
simplifyDataFrame = simplifyDataFrame,
simplifyMatrix = simplifyMatrix, ...)
}
}
config <- function(name, value = NULL, ...) {
json_file(name, file.path(proj_root(), "config"), value, ...)
}
|
08df7bcb3e654dfc417cd43c7a6b6235813b9643
|
72d03ec10b4955bcc7daac5f820f63f3e5ed7e75
|
/input/gcam-data-system/energy-data/assumptions/A_elec_data.R
|
b76957918b1338c96a63330bfbf7656b000cfc9d
|
[
"ECL-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
bgmishra/gcam-core
|
54daddc3d037571bf745c4cf0d54c0d7a77f493f
|
bbfb78aeb0cde4d75f307fc3967526d70157c2f8
|
refs/heads/master
| 2022-04-17T11:18:25.911460
| 2020-03-17T18:03:21
| 2020-03-17T18:03:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 905
|
r
|
A_elec_data.R
|
#Electricity generation fuels whose calibrated quantities in the IEA energy balances are used
electricity_input_fuels <- c( "biomass", "coal", "gas", "refined liquids" )
#Assumed base year heat price, used for calculating adjustment to non-energy costs of electricity technologies with secondary output of heat
heat_price <- 3.2
gas_price <- 2
digits_capital <- 0
digits_OM <- 2
digits_capacity_factor <- 2
#Set a default electric efficiency, (a) for regions where the IEA has input but no output, and
# (b) for setting a floor below which heat plant costs are modified
default_electric_efficiency <- 0.33
#Level2 data names specific to the electric sector
#Note - level2 data names at the technology level are held in the generic level2_data_names folder
names_ElecReserve <- c( "region", "supplysector", "electricity.reserve.margin", "average.grid.capacity.factor" )
wind_base_cost_year <- 2005
|
ed320947e7c9428e51eef505ba504211f1e8156a
|
f2a982ef2ad5d0a1086830a59f2700bc7e0c668a
|
/R/mod_formatUI.R
|
3f546086e50d4d587614a1458874786184e2cb1f
|
[] |
no_license
|
jimsforks/cleanser
|
6f87363fefd5c0223c17d349ffa19f8d5ff1956c
|
1597f2bfcf58a0084c2810fea236e38a51385e43
|
refs/heads/master
| 2022-03-16T23:49:13.342589
| 2019-09-27T07:43:25
| 2019-09-27T07:43:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,256
|
r
|
mod_formatUI.R
|
#' mod_format and mod_formatUI function
#' @param id The id of the current element you are entering
#' @description Shiny Module that allows to change the variable types: qualitative, quantitative or date.
#' @export
#' @importFrom DT renderDT DTOutput replaceData
#' @importFrom shinyjs runjs
#' @examples
#' if (interactive()){
#' library(shiny)
#' library(cleanser)
#' library(readr)
#' ui <- fluidPage(
#' titlePanel("Example: mod_FormatUI"),
#' selectInput("go","change data",c("boys",
#' "vins",
#' "vins_missing",
#' "one_factor",
#' "right_csv",
#' "right_xls",
#' "demo2_csv",
#' "demo3_xlsx",
#' "demo4_rds"
#' )),
#' mod_formatUI("truc2")
#' )
#'
#' server <- function(input, output,session) {
#'
#' donnee <- reactive({
#'
#' md <- function(x){system.file("dataset",x,package = "cleanser")}
#' switch (input$go,
#' "boys" = readr::read_csv(md("boys.csv")),
#' "vins" = readr::read_csv(md("vins.csv")),
#' "vins_missing" = readr::read_csv(md("vins_missing.csv"),
#' locale = locale(encoding = 'ISO-8859-1')),
#' "one_factor" = readr::read_csv(md("one_factor.csv")),
#' "right_csv" = readr::read_csv(md("right.csv")),
#' "right_xls" = readxl::read_excel(md("right.xls"),1),
#' "demo2_csv" = readr::read_delim(md("demo2.csv"),
#' locale = locale(encoding = 'ASCII'), delim=";"),
#' "demo4_rds" = readr::read_rds(md("demo4.rds")),
#' "demo3_xlsx" = readxl::read_excel(md("demo3.xlsx"),1)
#' )
#' })
#'
#' callModule(mod_format, "truc2", reactive({donnee()}))
#' }
#' # Run the application
#' shinyApp(ui = ui, server = server)
#' }
mod_formatUI <- function(id){
ns <- NS(id)
tagList(useShinyjs(),
tags$head(
tags$style(HTML("
.table-wrapper {
overflow-x:auto;
overflow-y:hidden;
width: 100%;
}
"))
),
tags$div(class="table-wrapper",
DTOutput(ns("tableau")),
shinyjs::hidden(actionButton(inputId = ns("reset_var"),
icon = icon(name = "ban"),
label = "Unselect all variables"))),
verbatimTextOutput(ns("log2")),
verbatimTextOutput(ns("log3")),
verbatimTextOutput(ns("log")),
br(),
div(actionButton(ns("valider"),"Valider les changements",class="btn-primary",style="display:block;margin:auto;")),
br()
)[
if(app_prod()){ c(1, 2, 3, 4) }else{ c(1, 2, 3, 4, 5, 6,7)}
]
}
#' @param input internal
#' @param output internal
#' @param session internal
#' @param r dataset as reactive
#'
#' @import shiny ggplot2 dplyr vcd corrplot
#' @importFrom stats na.omit cor
#' @importFrom DT datatable dataTableProxy JS replaceData
#' @importFrom purrr map map_df
#' @export
#' @rdname mod_formatUI
mod_format <- function(input, output, session, r){
gen_selectinput <- function(df, class, indic, session){
map(1:ncol(df),
function(i) {
if ( class[i] == "numeric" | class[i]=="integer") {
opt1 <- "Numeric variable"
opt2 <- "Categorical variable"
opt3 <- "Date variable"
} else if ( class[i] == "Date variable" | class[i] == "POSIXct"| class[i] == "POSIXlt"| class[i] == "POSIX" ) {
opt1 <- "Date variable"
opt2 <- "Numeric variable"
opt3 <- "Categorical variable"
}else{
opt1 <- "Categorical variable"
opt2 <- "Numeric variable"
opt3 <- "Date variable"
}
selectInput(
inputId = session$ns(paste0(paste0("col",indic), i)),
label = NULL, selected = opt1,
choices = c(opt1, opt2, opt3))
}
)
}
flag <- reactiveValues(indic = "",
typage_out = NULL,
typage_in = NULL
)
current <- reactiveValues(data=NULL, init=NULL)
mydata <- reactive({
df <- r$sortie
current$data <- r$sortie
current$init <- r$sortie
df %>% mutate_if(is.character,as.factor)
})
updateData <- reactive({
validate(
need(!is.null(mydata()) & !is.null(unlist(mydata())), " ")
)
df <- mydata()
class <- isolate( flag$typage_in )
if ( !is.null(current$data)) {
df <- current$data
if(!app_prod()){
message( "flag: ", input[[paste0(paste0("col",flag$indic), 3)]])
}
map(1:ncol(df), function(i){
id <- paste0(session$ns(''),'col',flag$indic,i)
if (length(input[[paste0(paste0("col",flag$indic), i)]]) > 0) {
if (input[[paste0(paste0("col",flag$indic), i)]] == "Numeric variable") {
shinyjs::runjs(
#background
glue::glue("document.getElementById('{id}').style.color ='{color}'",
color="#fc6d26"
)
)
df[,i] <<- unlist(current$init[,i]) %>% to_numeric()
} else if (input[[paste0(paste0("col",flag$indic), i)]] == "Categorical variable") {
shinyjs::runjs(
#background
glue::glue("document.getElementById('{id}').style.color ='{color}'",
color="#2aa198"
)
)
df[,i] <<- unlist(current$init[,i]) %>% as.factor
}else if (input[[paste0(paste0("col",flag$indic), i)]] == "Date variable") {
shinyjs::runjs(
#background
glue::glue("document.getElementById('{id}').style.color ='{color}'",
color="#d33682"
)
)
if(map_df(current$init, ~{class(.)[1]})[i] %in% c("POSIX", "POSIXct", "POSIXlt") ){
df[,i] <<- current$init[,i] #%>% to_date()
# df[,i] <<- unlist(current$init[,i]) %>% as.factor
} else if(map_df(current$init, ~{class(.)[1]})[i] =="Date" ){
df[,i] <<- unlist(current$init[,i]) %>% as.Date(origin="1970-01-01")
} else{ df[,i] <<- unlist(current$init[,i]) %>% to_date() }
}
}
})}
current$data <- df
df
})
output$log3 <- renderPrint({
reactiveValuesToList(input)
})
output$tableau <- renderDT({
validate(
need(!is.null(mydata()) & !is.null(unlist(mydata())), "Check that all input are selected on your left")
)
if(!app_prod()){
message("mise a jour flag")
}
flag$indic <- as.character(rnorm(1))
df <- mydata()
class <- map_df(df, ~{class(.)[1]})
flag$typage_in <- class
if(!app_prod()){
message(flag$indic)
}
tableSelectInput <- gen_selectinput(df = df,
class = class,
indic = flag$indic,
session = session)
l <- length(tableSelectInput)
type_cat <- selectin <- seq_len(l)
for (i in 1:l) {
selectin[i] <- as.character(tableSelectInput[[i]])
pos <- gregexpr("selected>",selectin[i])[[1]][1]
type_cat[i] <- substr(selectin[i], (pos + 9), (pos + 11))
}
colors_types <- likely_types(df)
col_names <- paste0( '<font color = "', colors_types, '">',colnames(df), "</font><br/>", selectin)
datatable(isolate(
updateData()), selection = list(target="column"),
options = list(ordering = FALSE, lengthMenu = c(3, 10,1, 125, 1000),
# scrollX = TRUE, # casse le code !
initComplete = JS("function() {
$('.colors').remove();
$(document).ready(function() {if ( $('.table-wrapper').height()>0 && $('body > div > div > ul > li:nth-child(1)').attr('class')==='active'){
$( '.well' ).after('<div class=\\042 colors \\042 ><p> The colors help you decide which type to attribute to a variable.</p><p style=\\042 color: #2aa198 ;\\042 ><b>Green</b> indicates that a variable is likely a Categorical variable. </p><p style=\\042 color: #d33682 ;\\042> <b>Pink</b> indicates that a variable is likely a Date variable.</p><p style=\\042 color: #fc6d26 ;\\042><b>Orange</b> indicates that a variable is likely a Numeric variable.</p></div>' )}
else { $('.colors').hide();
}})}"),
preDrawCallback = JS("function() {
Shiny.unbindAll(this.api().table().node()); }"),
drawCallback = JS("function() { Shiny.bindAll(this.api().table().node());
}")
),
colnames = col_names,
escape = FALSE
)
}
)
proxy <- dataTableProxy('tableau')
observe({
replaceData(proxy, updateData(), resetPaging = TRUE )
})
output$log <- renderPrint({
updateData()
})
output$log2 <- renderPrint({
class <- map_df(updateData(), ~{class(.)[1]})
})
observeEvent(input$reset_var,{
if (!app_prod()){
message("reset selection")
}
DT::selectColumns(proxy = proxy,selected = NULL)
})
observeEvent(updateData(),{
shinyjs::show("reset_var")
})
sortie <- reactive({
list(df = updateData(),
selected = as.numeric(input$tableau_columns_selected))
})
observeEvent(input$valider,{
r$sortie <- sortie()
})
}
|
3a306b7ac71f1a96320fcff59056c43329c0092a
|
fe17217bf85ed660a1fa3173f6078133c5bc49e0
|
/man/L.2x.poisson.strat.Rd
|
dd27e0fe83d667c6c115ae40ccc7b28dcc44e617
|
[] |
no_license
|
rgcstats/ODS
|
5a4ba2107328175174b4874e10d8e47733c33397
|
0290071546fdd8dff1c8e9e6d8bc5920d1c04491
|
refs/heads/master
| 2020-12-10T10:30:36.232624
| 2020-01-13T10:20:29
| 2020-01-13T10:20:29
| 77,803,517
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,053
|
rd
|
L.2x.poisson.strat.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/L_2x_poisson_strat.R
\name{L.2x.poisson.strat}
\alias{L.2x.poisson.strat}
\title{Calculates the estimated likelihood for a regression model under a general sample design
assuming one continuous and one binary covariate}
\usage{
L.2x.poisson.strat(beta, sd, prob.x2, mu0.x1, mu1.x1, sd.x1, ys, x1s, x2s, pi.h,
cutoffs, log = FALSE, verbose = FALSE)
}
\arguments{
\item{beta}{vector of regression coefficients}
\item{sd}{error standard deviation}
\item{prob.x2}{the parameter of x2's assumed bernoulli distribution}
\item{mu0.x1}{the expected value of x1 conditional on x2=0}
\item{mu1.x1}{the expected value of x1 conditional on x2=1}
\item{sd.x1}{the SD of x1 conditional on x2}
\item{ys}{vector of sample values of the dependent variable}
\item{x1s}{sample values for covariate 1 (continuous)}
\item{x2s}{sample values for covariate 2 (binary)}
\item{pi.h}{selection probability in each stratum (H-vector)}
\item{cutoffs}{vector of H-1 cutoffs on Y defining the strata}
\item{log}{If FALSE, the function returns the likelihood, otherwise the log-likelihood}
\item{verbose}{If TRUE, the function outputs information to the console.}
}
\value{
The likelihood or log-likelihood.
}
\description{
This function calculates the likelihood.
}
\details{
Add some details later.
}
\examples{
data(population_example)
eg.ybar.pi <- sum(sample.example$y/sample.example$pi) / sum(1/sample.example$pi)
eg.s2.pi <- sum((sample.example$y-eg.ybar.pi)^2/sample.example$pi)/sum(1/sample.example$pi)
eg.pi.fn <- function(y){ out <- sqrt( 0.1 + 0.9*(y-eg.ybar.pi)^2/eg.s2.pi )
out <- out * mean(sample.example$pi) /
mean(sqrt( 0.1 + 0.9*(sample.example$y-eg.ybar.pi)^2/eg.s2.pi ))
out }
L.2x.poisson(beta=c(1,0.5,-0.5),sd=1,prob.x2=0.45,mu0.x1=0,mu1.x1=0,sd.x1=1,
ys=sample.example$y,x1s=sample.example$x1,x2s=sample.example$x2,
pi.fn=eg.pi.fn,pi.s=sample.example$pi,R=10,log=TRUE)
}
|
68601dfc6d3b48bd8a7ea77e24eab09231d340ca
|
bea3b6bb4ce9cc6859db748401ecea7b9e5e8595
|
/dc-campaign-finance/contrib-analysis.r
|
5d701a621e09cdaeb8801ec7d68316af0638476c
|
[] |
no_license
|
occupydata/Occupy-data-processing
|
db45773c3a76d7ed3390a9b496fc140f8013142f
|
1960c65ad31799be76a31270ee345ad215ee9afa
|
refs/heads/master
| 2016-09-09T21:07:16.787575
| 2012-06-11T16:12:41
| 2012-06-11T16:12:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,784
|
r
|
contrib-analysis.r
|
# Copyright (c) 2012 Data Committee of Occupy DC
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Contact: data at occupydc dot org
by.comm.year.agg<-aggregate(contribs.df$Amount[contribs.df$Contribution.Type=="Corporation"],
by=list(electoral.office=contribs.df$electoral.office[contribs.df$Contribution.Type=="Corporation"],
Committee.Name=contribs.df$Committee.Name[contribs.df$Contribution.Type=="Corporation"],
year=substr(contribs.df$Date.of.Receipt[contribs.df$Contribution.Type=="Corporation"], 1, 4)),
FUN=sum, na.rm=TRUE)
write.csv(by.comm.year.agg, file=paste(work.dir, "Aggregation by committee and year - corps.csv", sep=""), row.names=FALSE)
table(contribs.df$contributor.recipient.same.geo[contribs.df$Contribution.Type=="Corporation"])
aggregate(contribs.df$Amount[contribs.df$Contribution.Type %in% c("Corporation", "Business")],
by=list(contribs.df$contributor.recipient.same.geo[contribs.df$Contribution.Type %in% c("Corporation", "Business")],
contribs.df$recipient.ward[contribs.df$Contribution.Type %in% c("Corporation", "Business")]),
FUN=sum)
aggregate(contribs.df$Amount[contribs.df$Contribution.Type=="Individual"],
by=list(contribs.df$contributor.recipient.same.geo[contribs.df$Contribution.Type=="Individual"],
contribs.df$recipient.ward[contribs.df$Contribution.Type=="Individual"]),
FUN=sum)
names(DC.geocoded.df)[grepl("[.][0-9]$", names(DC.geocoded.df))]
table(!is.na(DC.geocoded.df$returnDataset.diffgram.NewDataSet.Table1.FULLADDRESS.1))
contribs.df[!is.na(contribs.df[contribs.df$state.clean=="DC", "MPreDirectional"]), "address.clean"]
contribs.df[!is.na(contribs.df[contribs.df$state.clean=="DC", "MPreDirectional"]), "Address"]
contribs.df[!is.na(contribs.df[, "MPreDirectional"]) & contribs.df$state.clean=="DC", "MPreDirectional"]
head(
contribs.df[!is.na(contribs.df[, "MPreDirectional"]) & contribs.df$state.clean=="DC", "Address"]
)
str(contribs.df[contribs.df$Address=="9102 E. Pershing Ave",] )
install.packages("chron")
source("http://blog.revolution-computing.com/downloads/calendarHeat.R")
contribs.date.agg<-aggregate(contribs.df$Amount, by=list(contribs.df$Date.of.Receipt),
FUN=sum, na.rm=TRUE)
contribs.date.agg<-contribs.date.agg[as.Date(contribs.date.agg$Group.1) >
as.Date("2007-01-01"), ]
calendarHeat(contribs.date.agg$Group.1, log(contribs.date.agg$x), color="r2b")
#See:
#https://code.google.com/p/hackystat-ui-trajectory/source/browse/trunk/Rcode/?r=612#Rcode%2Fe4.heatmaps
# http://greatergreaterwashington.org/post/13968/most-sitting-councilmembers-absent-on-campaign-finance/
# Liscenced home improvement contractors: http://government.dc.gov/DC/Government/Publication%20Files/Consumer/home_improvement_contractors_list_1_2010.pdf
# DC liccensed general contractors: http://government.dc.gov/DC/Government/Publication%20Files/Consumer/general_contractors_list_12_2010.pdf
# http://lsdbe.dslbd.dc.gov/public/certification/search.aspx
# http://pivs.dcra.dc.gov/property/search
# http://cpms.dcra.dc.gov/BBLV/default.aspx
# http://dcra.dc.gov/DC/DCRA/About+DCRA/News+Room/Press+Releases/DCRA+launches+Corp+Online,+a+new+online+corporate+registration+and+filing+system
# https://corp.dcra.dc.gov
# http://dcatlas.dcgis.dc.gov/catalog/results.asp
# http://dcatlas.dcgis.dc.gov/metadata/AddressPt.html
# Biking: http://dcatlas.dcgis.dc.gov/metadata/TopoLn.html
# http://dcatlas.dcgis.dc.gov/metadata/Topo_20ft.html
# http://dcatlas.dcgis.dc.gov/catalog/
# http://dcatlas.dcgis.dc.gov/metadata/NbhClusPly.html
# http://dcatlas.dcgis.dc.gov/metadata/DCPropertyPt.html DC govt property
# http://dcatlas.dcgis.dc.gov/metadata/CamaCommPt.html
# Basic Business licenses: http://data.dc.gov/Metadata.aspx?id=1520
# http://octo.dc.gov/DC/OCTO/Maps+and+Apps/Online+Mapping/All+Online+Maps
# http://geospatial.dcgis.dc.gov/ocf/
# http://www.city-data.com
# http://otr.cfo.dc.gov/otr/cwp/view,a,1330,q,594345.asp
# http://otr.cfo.dc.gov/otr/lib/otr/tax/property/pdf/usecodes.pdf
# https://www.taxpayerservicecenter.com/PropertyDetailTips.pdf
contribs.df$DUPS<-duplicated(contribs.df[, c("Address", "city", "state", "Zip")])
k<-1
for ( i in 1:nrow(contribs.df)) {
if (contribs.df$DUPS[i]) {
contribs.df$geocode.id[i]<-k
} else {
k<-k+1
contribs.df$geocode.id[i]<-k
}
}
#### THIS IS NECESSARY SINCE SOMEHOW THE GEOCODES WERE SCRAMBLED. MUST FIX IN FINAL
#### VERSION OF CODE ABOVE
View(contribs.geocoded.df[90000:91000, c("Address", "address.clean")])
View(contribs.geocoded.df[90020, ])
contribs.geocoded.df[contribs.geocoded.df$Matching.Geography.Type=="USPSZipPlus4", c("FArea", "FAreaType")]
head(
contribs.geocoded.df[contribs.geocoded.df$Matching.Geography.Type=="CountySubRegion", c("FArea", "FAreaType")]
)
|
1d0053404bb73f63419fb9d9f68267c1036c6f38
|
b8dda22d68ef15ca86120fe0c670bb4b3165f9dc
|
/man/getCRUCLdata-package.Rd
|
8dbba6b6c4db83b8e101d2e6d1230a74bbd6ff8c
|
[
"MIT"
] |
permissive
|
hydroinfo-gis/getCRUCLdata
|
7ec956049f2bfe26d4e862f0151b7b11d85002e1
|
ab866ade0f7621be66a675be24779053590a9b79
|
refs/heads/master
| 2023-02-07T22:12:57.880407
| 2020-12-16T04:24:19
| 2020-12-16T04:24:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,320
|
rd
|
getCRUCLdata-package.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getCRUCLdata-package.R
\docType{package}
\name{getCRUCLdata-package}
\alias{getCRUCLdata}
\alias{getCRUCLdata-package}
\title{getCRUCLdata: 'CRU' 'CL' v. 2.0 Climatology Client}
\description{
Provides functions that automate downloading and importing
University of East Anglia Climate Research Unit ('CRU') 'CL' v. 2.0
climatology data, facilitates the calculation of minimum temperature and
maximum temperature and formats the data into a tidy data frame as a
'tibble' or a list of 'raster' 'stack' objects for use. 'CRU' 'CL' v. 2.0
data are a gridded climatology of 1961-1990 monthly means released in 2002
and cover all land areas (excluding Antarctica) at 10 arcminutes
(0.1666667 degree) resolution. For more information see the description of
the data provided by the University of East Anglia Climate Research Unit,
<https://crudata.uea.ac.uk/cru/data/hrg/tmc/readme.txt>.
}
\seealso{
Useful links:
\itemize{
\item \url{https://docs.ropensci.org/getCRUCLdata/}
\item Report bugs at \url{https://github.com/ropensci/getCRUCLdata/issues}
}
}
\author{
\strong{Maintainer}: Adam H. Sparks \email{adamhsparks@gmail.com} (\href{https://orcid.org/0000-0002-0061-8359}{ORCID})
}
\keyword{internal}
|
b3e09912d48811b4b5341738b45c5aace9ed9a58
|
866899ce28eb6270fde88421319045a17c35c8b9
|
/crypto_class.R
|
b7b3e4342e7c9e11373c4b1058a286262501b3f9
|
[] |
no_license
|
martinkabe/crypto-analytics
|
11098002f511e09d2f81d63f3c655e7e9266cfde
|
a0aba28019459b88fdbef7fa6f36ebabcb71fe50
|
refs/heads/main
| 2023-03-10T23:50:38.484083
| 2021-02-24T22:09:14
| 2021-02-24T22:09:14
| 340,859,788
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,739
|
r
|
crypto_class.R
|
sapply(c("tidyverse", "rvest", "R6", "dplyr", "prophet", "RSQLS", "bit64"), require, character.only = TRUE)
CleanHtmlData <- R6::R6Class("CleanHtmlData",
private = list(
..path_to_file = as.character(),
..file_name = as.character(),
..data = data.frame(),
..table_name = as.character(),
..prophet_crypto_name = as.character()
),
public = list(
initialize = function(path_to_file = NULL,
file_name = NULL,
tableName) {
private$..path_to_file = path_to_file
private$..file_name = file_name
private$..table_name = tableName
if (!is.null(path_to_file)) {
raw_table = readLines(paste0(path_to_file, file_name))
private$..data = rvest::minimal_html(raw_table) %>%
html_node("table") %>%
html_table() %>% mutate(Date = as.Date(Date, format = "%B %d, %Y"))
private$..data[2:ncol(private$..data)] = apply(private$..data[2:ncol(private$..data)], 2, function(x) {
as.numeric(gsub("[\\$,]", "", x))
}) %>% as.data.frame()
colnames(private$..data) = str_remove_all(colnames(private$..data), "[\\* ]")
}
},
getCryptoName = function() {
if (is.null(private$..file_name)) {
cat("\n... all crypto names considered ...\n")
} else {
return (tools::file_path_sans_ext(private$..file_name))
}
},
getCryptoData = function() {
if (is.null(private$..path_to_file)) {
cat("\n... general object ...\n")
} else {
return (private$..data)
}
},
getCryptoDataFromDB = function(connString,
dateFrom = NULL,
dateTo = NULL,
cryptos = NULL) {
sql_task = paste0("SELECT * FROM ", private$..table_name)
if (!is.null(private$..path_to_file)) {
crypto_name = paste0("'", self$getCryptoName(), "'")
crypto_name_task = paste0(" CryptoName = ", crypto_name)
} else {
if (!is.null(cryptos)) {
crypto_name = paste0("'", cryptos, "'", collapse = ", ")
crypto_name_task = paste0(" CryptoName in (", crypto_name, ")")
} else {
crypto_name = "ALL"
crypto_name_task = NULL
}
}
if (is.null(dateFrom) & is.null(dateTo)) {
# return all data
cat("\n---------- Returning all the data for ", crypto_name, " ----------\n")
sql_task = ifelse(is.null(crypto_name_task),
sql_task,
paste0(sql_task, " WHERE", crypto_name_task))
} else if (!is.null(dateFrom) & is.null(dateTo)) {
# return data from until the end
cat("\n---------- Returning data from ", dateFrom, " till the end for ", crypto_name, " ----------\n")
sql_task = ifelse(is.null(crypto_name_task),
paste0(sql_task, " WHERE Date >= '", dateFrom, "'"),
paste0(sql_task, " WHERE Date >= '", dateFrom, "' AND", crypto_name_task))
} else if (is.null(dateFrom) & !is.null(dateTo)) {
# return all data until some date
cat("\n---------- Returning all data until ", dateTo, " for ", crypto_name, " ----------\n")
sql_task = ifelse(is.null(crypto_name_task),
paste0(sql_task, " WHERE Date <= '", dateTo, "'"),
paste0(sql_task, " WHERE Date <= '", dateTo, "' AND", crypto_name_task))
} else {
# return date range
cat("\n---------- Returning all data from ", dateFrom, " to ", dateTo, " for ", crypto_name, " ----------\n")
sql_task = ifelse(is.null(crypto_name_task),
paste0(sql_task, " WHERE Date BETWEEN '", dateFrom, "' AND '", dateTo,"'"),
paste0(sql_task, " WHERE Date BETWEEN '", dateFrom, "' AND '", dateTo,"' AND", crypto_name_task))
}
cat("\n", sql_task, "\n")
sql_task = paste0(sql_task, " ORDER BY Date DESC")
return (pull_data(connectionString = connString,
sqltask = sql_task,
showprogress = FALSE))
},
getDataInfo = function(data) {
cat("\n----------------------------------------------------\n")
cat("\trows: ", nrow(data), "\tcols: ", ncol(data), "\n")
cat("\n")
print(head(data, 3))
cat("\n\t...\t...\t...\n\n")
print(tail(data, 3))
cat("\n")
},
drawDyplotProphet = function(data, cryptoname = NULL, periods = 365) {
cat("Creating prophet data.frame ...\n")
if (is.null(private$..path_to_file)) {
data_prophet <- data %>% filter(CryptoName == cryptoname) %>% select(Date, Close) %>% rename(ds=Date, y=Close)
} else {
private$..prophet_crypto_name = self$getCryptoName()
data_prophet <- private$..data %>% select(Date, Close) %>% rename(ds=Date, y=Close)
}
cat("Creating prophet model ...\n")
model <- prophet(data_prophet)
cat("Creating future data.frame based on prohpet model ...\n")
future <- make_future_dataframe(model, periods = periods)
cat("Creating prophet future prediction with prediction interval ...\n")
forecast <- predict(model, future)
cat("Plotting prophet chart ...\n")
dyplot.prophet(model, forecast,
main=paste0("Time Series for ",
cryptoname, " [",
min(data_prophet$ds), "::",
max(data_prophet$ds), "]"))
},
updateData = function(connString) {
if (is.null(private$..path_to_file)) {
cat("\n... general object, this method cannot be used ...\n")
} else {
db_dates <- pull_data(connectionString = connString,
sqltask = paste0("SELECT Date FROM ", private$..table_name, " WHERE CryptoName = '", self$getCryptoName(), "'"),
showprogress = FALSE) %>% .$Date
missing_dates <- self$getCryptoData()[!self$getCryptoData()$Date %in% db_dates,]
if (nrow(missing_dates) > 0) {
dates_to_update <- paste0(missing_dates$Date, collapse = ", ")
cat("\nDates to be updated: ", dates_to_update, "\n")
push_data(connString, missing_dates %>% mutate(CryptoName=self$getCryptoName()),
private$..table_name,
showprogress = TRUE,
append = TRUE)
} else {
cat("\nAll up to date!\n")
}
}
}
)
)
|
399fcfe4dd63f6b0b9eb217ca1eccb9485ca3591
|
12ae74bd0ba9d5494d7301b521b45d1bfa5ff84a
|
/R/grapes_similar_equals_grapes.R
|
21005aee8dbc26b0efe56ca6cdd9a1066f2dc7d1
|
[] |
no_license
|
cran/do
|
62b609a0f0cc0f0c0cc879adb821b1d9d95b6632
|
fa0d7c8f9799326ffa6f0763f490c2873597131b
|
refs/heads/master
| 2021-08-15T11:59:00.793187
| 2021-08-03T10:40:02
| 2021-08-03T10:40:02
| 206,034,685
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 396
|
r
|
grapes_similar_equals_grapes.R
|
#' Locate Similarly by grep()
#'
#' @param a vector for matching
#' @param b vector for searching
#'
#' @return A list contains location information.
#' @export
#'
#' @examples
#' 1 %s=% c(1,12,3)
#' c(1,2) %s=% c(1,12,3)
"%s=%" <- function(a,b){
loc=list()
for (i in 1:length(a)) {
loc=c(loc,list(grep(a[i],b)))
names(loc)[i]=a[i]
}
loc
}
|
ce639b6ee847af467626d25bf131ecccc7118da6
|
d96211b26845f18d8b744c3466111856f1abce0d
|
/Marriah_Lewis_HW4.R
|
2a18cfd460b8fabbecc9859a5471219fc9e1eff3
|
[] |
no_license
|
marriah0024/IST707-MachineLearning-R-
|
91ce7e5b013d86cd85c5fb9cdd13a313c72d98d5
|
5803e3a6a31793047a6f0574274d344779eb00c5
|
refs/heads/main
| 2023-04-22T05:59:43.218626
| 2021-05-21T14:54:37
| 2021-05-21T14:54:37
| 368,610,722
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,911
|
r
|
Marriah_Lewis_HW4.R
|
# loading packages
library(ggplot2)
library(factoextra)
library(stringr)
library(tidyr)
library(gridExtra)
library(FunCluster)
library(rpart)
library(caret)
library(rattle)
#loading the data
papers<-read.csv("C:/Users/lewis/Downloads/fedPapers85.csv")
str(papers)
#summary of the authors
summary(papers$author)
#creating a new column with an abbreviation of the author name
papers$owner<- ifelse(papers$author== 'HM', 'HM', ifelse(papers$author== 'Jay', 'J', ifelse(papers$author== 'Madison', 'M', ifelse(papers$author== 'dispt', 'D', ifelse(papers$author=='Hamilton', 'H',NA)))))
#splitting the file name and file number
papers<- extract(papers, filename, into= c("Name", "Num"), "([^(]+)\\s*[^0-9]+([0-9].).")
#Combine the author name with the file number
papers$file<-paste(papers$owner, "-", papers$Num)
rownames(papers)<- papers$file
#Drop unnecessary columns
papers<- papers[c(-(ncol(papers)-1))]
papers<- papers[c(-(ncol(papers)))]
papers<- papers[c(-2,-3)]
#Authored by Jay and Hamilton/Madison
n<- papers[papers$author!='Jay',]
papers<- n[n$author!='HM',]
#View the dataset
View(papers)
#dropping unused levels
papers<- droplevels(papers)
#Review the data top 5
head(papers,5)
#Euclidean distance calculation and visualization
distance<-get_dist(papers)
fviz_dist(distance, gradient = list(low='#BB0099', mid= 'white', high= '#FC6a2e'))
#K-means
set.seed(42)
clus<-kmeans(papers[c(-1)], centers = 5)
#created a cluster table
t(table(papers[,1], clus$cluster))
#Rename author column and plot the cluster; more cleaning needed
fviz_cluster(clus, data = papers[c(-1)])
# How many clusters are needed?
set.seed(123)
q<-function(k){
return(kmeans(papers[c(-1)],k, nstart = 30)$tot.withins)
}
k_values<-1:10
q_values<-purrr::map_dbl(k_values, q)
plot(x = k_values, y= q_values,
type = 'b', frame= F,
xlab = 'Number of clusters (k)',
ylab = 'Total within clusters')
#Try 4 clusters
set.seed(48)
four<-kmeans(papers[c(-1)], centers = 4, nstart = 30, iter.max = 100)
tab<-t(table(papers[,1], four$cluster))
#Plotting the four clusters
fviz_cluster(four, data = papers[c(-1)])
#Cluster Growth
k2<-kmeans(papers[c(-1)], centers = 2, nstart = 30)
k3<-kmeans(papers[c(-1)], centers = 3, nstart = 30)
k4<- kmeans(papers[c(-1)], centers = 4, nstart = 30)
k5<- kmeans(papers[c(-1)], centers = 5, nstart = 30)
k6<- kmeans(papers[c(-1)], centers=6, nstart= 30)
k7<- kmeans(papers[c(-1)], centers = 7, nstart = 30)
#plotting the clusters
plot2<- fviz_cluster(k2, geom = "point", data = papers[c(-1)])+ ggtitle('k=2')
plot3<- fviz_cluster(k3, geom = "point", data = papers[c(-1)])+ ggtitle('k=3')
plot4<- fviz_cluster(k4, geom = "point", data = papers[c(-1)])+ ggtitle('k=4')
plot5<- fviz_cluster(k5, geom = "point", data = papers[c(-1)])+ ggtitle('k=5')
plot6<- fviz_cluster(k6, geom = "point", data = papers[c(-1)])+ ggtitle('k=6')
plot7<- fviz_cluster(k7, geom = "point", data = papers[c(-1)])+ ggtitle('k=7')
grid.arrange(plot2, plot3, plot4, plot5, plot6, plot7, nrow=3)
#Hierarchical clustering
hac<-hclust(dist(papers[c(-1)],method = 'euclidean'), method = 'ward.D2')
#plot the hierarchical clustering
plot.new()
plot(hac, main = 'Dendogram using HAC', xlab = rect.hclust(hac, k=4))
#Decision Tree m=test and l= train
m<- papers[papers$author=='dispt',]
l<-papers[papers$author!= 'dispt',]
#Dropping the unused levels
m<-droplevels(m)
l<- droplevels(l)
#Training the model with l dataset
dt_model<-train(author~., data= l, metric= 'Accuracy', method= 'rpart')
dt_predict<-predict(dt_model, newdata= m, na.action=na.omit, type= 'prob')
head(dt_predict, 11)
#printing the final model
print(dt_model)
#Plotting the final model
fancyRpartPlot(dt_model$finalModel)
#Model Prediction
raw_dt_predict<- predict(dt_model, newdata = m, type = 'raw')
print(raw_dt_predict)
|
16972a9b752b6d4b79b33401f40be3ee0fdc4f26
|
61426d4fd3b4edf3e74d229adcebbe9427a11729
|
/analysis/model_plots.R
|
ef0fe83bb96b921e878b546ba568bfa2694d4d03
|
[] |
no_license
|
wangdafacai/MXU5MR
|
e28f71c78c47c8dbe75bd9c87060332d3e8f504a
|
6ab9d412245fc925bfd9026f7d05ad2e93eb03ec
|
refs/heads/master
| 2023-03-15T17:55:40.431026
| 2017-05-17T17:32:46
| 2017-05-17T17:32:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,286
|
r
|
model_plots.R
|
################################################################################
# Outline for plots to eventually put into a shiny app, right now im thinking
# of putting three options, model, age, and year and plotting a muni map
# that is responsive to all three a state by state age time series,
# and a histogram
################################################################################
rm(list=ls())
pacman::p_load(leaflet, INSP, ggplot2, dplyr, data.table)
DT <- fread("~/Documents/MXU5MR/analysis/outputs/model_phi.csv")
DT[,GEOID:=sprintf("%05d", GEOID)]
DT$ENT_RESID <- sapply(DT$GEOID, function(x)
paste0(strsplit(x, "")[[1]][1:2], collapse=""))
DT$MUN_RESID <- sapply(DT$GEOID, function(x)
paste0(strsplit(x, "")[[1]][3:5], collapse=""))
statedf <- DT[,mean(RR), by=list(ENT_RESID, EDAD, YEAR)]
setnames(statedf, names(statedf), c("ENT_RESID", "EDAD", "YEAR", "RR"))
statedf[,logRR:=log(RR)]
ggplot(data=statedf, aes(x=YEAR, y=logRR, group=EDAD, color=EDAD)) +
geom_line() + facet_wrap(~ENT_RESID)
mx.sp.df@data <- left_join(mx.sp.df@data, subset(DT, EDAD==0 & YEAR==2014))
mx.sp.df@data$logRR <- log(mx.sp.df@data$RR)
spdf2leaf(mx.sp.df, "RR", "2011 U1MR<br>Relative<br>Risk")
spdf2leaf(mx.sp.df, "logRR", "2011 U1MR<br>Relative<br>Risk (log)")
|
38a4b85be98c6c4aa3bc835a4f3f9f8fde4bec0f
|
69f3702c822466f929e77b746c911cb4d580c3fc
|
/ui/enrich_ui.R
|
d58f92504b7098febfdb1b8a7bea7d892c100147
|
[] |
no_license
|
soulong/shinyHeatmap2Pathway
|
73050fd7406a41d49d2f85807d456ee498b381b8
|
79b71a42fafbee44aa6ba58b6748e8b21de58442
|
refs/heads/master
| 2023-08-19T09:00:23.168083
| 2023-08-03T09:17:50
| 2023-08-03T09:17:50
| 251,056,537
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,411
|
r
|
enrich_ui.R
|
fluidRow(
column(width=3,
wellPanel(
style = "overflow-y:scroll; max-height: 1000px",
selectInput("choose_cluster", "select cluster to enrich", 1, 1),
checkboxGroupInput("enrich_db", "choose db", c("go", "kegg", "msigdb", "enrichr"), "go", inline=T),
fluidRow(column(6, numericInput("enrich_pval_cutoff", "pval cutoff", 0.2, 0, 1)),
column(6, numericInput("enrich_qval_cutoff", "qval cutoff", 0.5, 0, 1))),
div(style="display:inline-block; width:90%; text-align:center;",
actionButton("enrich_apply", "Enriching", icon("far fa-edit"))),
hr(),
br(),
sliderInput("enrich_show_category", "show category number", 5, 50, 10, 5),
sliderInput("enrich_plot_width", "plot width", 200, 1600, 800, 100),
sliderInput("enrich_plot_height", "plot height", 200, 1600, 800, 100),
hr(),
br(),
div(style="display:inline-block; width:48%; text-align:center;", downloadButton("enrich_plot", "PDF")),
div(style="display:inline-block; width:48%; text-align:center;", downloadButton("enrich_data", "Data"))
)
), # end od left side bar column
# main plot area
column(width=9,
uiOutput("enrichment_plot_ui")
# plotOutput("enrichment_plot")
) # end of column
)
|
934a1abb0a7259cc9d213945e4e2c5a071dfb481
|
8699df251aacbb56633c382f0f179136c0a9802c
|
/ui.R
|
54c04640991313edb93e4f1d23b28d44b7dd5230
|
[] |
no_license
|
michaelchang64/college_stats
|
d55470b6ed62bee0d74cc70172276a8ebbbfb9a9
|
f59f6803a43e9478f12d86a19634bd1aeb31372b
|
refs/heads/master
| 2021-01-21T19:36:23.019549
| 2017-08-07T17:14:32
| 2017-08-07T17:14:32
| 92,143,304
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,615
|
r
|
ui.R
|
# ui.R
# Imported shiny packages. ggplot2 is the data visualization package used in app.
library(shiny)
library(ggplot2)
# Creates page that fits dimension of screen
shinyUI(fluidPage(
# This is the title
titlePanel(h1("College Admissions Data Visualizer")),
# This creates the sidebar
sidebarLayout(
sidebarPanel(
# Informs user what following dropdown menu is for
helpText("Select a university to view admissions data."),
# uni: selectInput is dropdown menu with list of universities under choices argument.
# Default choice is Brown.
selectInput("uni",
label = "Select a university",
choices = list("Brown", "Berkeley", "Yale"),
selected = "Brown"),
# Header for subsection wellPanel housing x- and y-variables within sidePanel
h4("Variables"),
# Informs user what following two dropdown menus are for
helpText("Compare two variables in a scatterplot"),
# xvar: dropdown menu of columns to be selected for x variable from college.data
# dataframe housed in global.R.
# yvar: dropdown menu of columns to be selected for y variable from college.data
# dataframe housed in global.R.
wellPanel(
selectInput("xvar", "X-axis variable", vars, selected = vars[1]),
selectInput("yvar", "Y-axis variable", vars, selected = vars[2]))
),
# Location in central region of webpage housing reactive texts and scatterplot
mainPanel(
h2(textOutput("uni")),
plotOutput("plot1"),
dataTableOutput("dt")
)
)
))
|
a167752029ae36f22ae606173d56821a9b55abe2
|
c06ab34b8c9290bb5d42ed3a989cc4aeaa794356
|
/man/MT_F1.Rd
|
6404e2ea1d692f8a30d3bb6c13ce6865dfc55424
|
[
"MIT"
] |
permissive
|
Nowaysis/F1Stats
|
86fc1718a7bf33a1322cfa3c0235ce6a6a282fa0
|
db24584e41affcf28ad7614e678c23e47c46106c
|
refs/heads/main
| 2023-02-03T03:49:11.213775
| 2020-12-27T19:15:56
| 2020-12-27T19:15:56
| 323,680,755
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 294
|
rd
|
MT_F1.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{MT_F1}
\alias{MT_F1}
\title{Dataset about football}
\format{
An object of class \code{list} of length 71.
}
\usage{
data(MT_F1)
}
\description{
Dataset about football
}
\keyword{datasets}
|
2c218029de1a64e4e8029fe5aa4469b723ccadf4
|
3fdfbd6728f19b4221129cc0d9637e2f1a4e0f3b
|
/tests/testthat/test-impute_sd.R
|
b559a79810f175ae96498fa8e80427aa672df7ef
|
[] |
no_license
|
billdenney/bsd.report
|
09e2b5642046ee51956d33199d6e1ae670b424ff
|
778f0298da10f6369bd7ffaa1a5c20d3f8f691fa
|
refs/heads/main
| 2023-02-19T22:49:25.273961
| 2023-02-15T21:54:41
| 2023-02-15T21:54:41
| 153,116,819
| 3
| 0
| null | 2023-02-15T22:57:48
| 2018-10-15T13:21:55
|
R
|
UTF-8
|
R
| false
| false
| 6,912
|
r
|
test-impute_sd.R
|
context("impute_sd")
test_that("input checks work", {
# impute_sd does not give an error for NA, so it is pulled out.
expect_error(impute_sd(point=1:2, var1=1, var2=1, n=1, vartype="SD"))
expect_error(impute_sd(point=1, var1=1:2, var2=1, n=1, vartype="SD"))
expect_error(impute_sd(point=1, var1=1, var2=1:2, n=1, vartype="SD"))
expect_error(impute_sd(point=1, var1=1, var2=1, n=1:2, vartype="SD"))
expect_error(impute_sd(point=1, var1=1, var2=1, n=1, vartype=rep("SD", 2)))
expect_error(impute_sd(point=1:3, var1=1:3, var2=1:3, n=1:3, vartype=rep("SD", 2)))
for (current_fun in c(impute_sd_ci, impute_sd_cv, impute_sd_iqr, impute_sd_range, impute_sd_sd, impute_sd_se)) {
# Mismatch in argument lengths
expect_error(current_fun(point=1:2, var1=1, var2=1, n=1, vartype="SD"))
expect_error(current_fun(point=1, var1=1:2, var2=1, n=1, vartype="SD"))
expect_error(current_fun(point=1, var1=1, var2=1:2, n=1, vartype="SD"))
expect_error(current_fun(point=1, var1=1, var2=1, n=1:2, vartype="SD"))
expect_error(current_fun(point=1, var1=1, var2=1, n=1, vartype=rep("SD", 2)))
expect_error(current_fun(point=1:3, var1=1:3, var2=1:3, n=1:3, vartype=rep("SD", 2)))
expect_error(current_fun(point=1, var1=1, var2=1, n=1, vartype=NA_character_))
}
})
test_that("imputation selects the correct method", {
expect_equal(
impute_sd(point=1, var1=1, var2=NA_real_, n=5, vartype="SD"),
impute_sd_sd(point=1, var1=1, var2=NA_real_, n=5, vartype="SD")
)
expect_equal(
impute_sd(point=1, var1=0.5, var2=NA_real_, n=5, vartype="90% CI"),
impute_sd_ci(point=1, var1=0.5, var2=NA_real_, n=5, vartype="90% CI")
)
expect_equal(
impute_sd(point=1, var1=5, var2=NA_real_, n=5, vartype="CV"),
impute_sd_cv(point=1, var1=5, var2=NA_real_, n=5, vartype="CV")
)
expect_equal(
impute_sd(point=1, var1=5, var2=NA_real_, n=5, vartype="%CV"),
impute_sd_cv(point=1, var1=5, var2=NA_real_, n=5, vartype="%CV")
)
expect_equal(
impute_sd(point=1, var1=5, var2=NA_real_, n=5, vartype="CV%"),
impute_sd_cv(point=1, var1=5, var2=NA_real_, n=5, vartype="CV%")
)
expect_equal(
impute_sd(point=1, var1=0.5, var2=1.5, n=5, vartype="IQR"),
impute_sd_iqr(point=1, var1=0.5, var2=1.5, n=5, vartype="IQR")
)
expect_equal(
expect_warning(impute_sd(point=1, var1=0, var2=2, n=5, vartype="RANGE")),
expect_warning(impute_sd_range(point=1, var1=0, var2=2, n=5, vartype="RANGE"))
)
expect_equal(
expect_message(
impute_sd(point=1, var1=0.5, var2=1.5, n=5, vartype=NA),
"is imputed as NA"
),
NA_real_
)
expect_equal(
expect_message(
impute_sd(point=c(1, 1, NA), var1=c(5, 0.5, NA), var2=c(NA_real_, 1.5, NA), n=c(5, 5, NA), vartype=c("CV", "IQR", NA)),
"is imputed as NA"
),
c(
impute_sd_cv(point=1, var1=5, var2=NA_real_, n=5, vartype="%CV"),
impute_sd_iqr(point=1, var1=0.5, var2=1.5, n=5, vartype="IQR"),
NA_real_
)
)
})
test_that("sd imputation works", {
expect_error(
impute_sd_sd(point=1, var1=1, var2=NA_real_, n=5, vartype="foo"),
info="correct vartype"
)
expect_error(
impute_sd_sd(point=1, var1=1, var2=1, n=5, vartype="SD"),
info="no var2"
)
expect_equal(
impute_sd_sd(point=1, var1=1, var2=NA_real_, n=5, vartype="SD"),
1
)
})
test_that("cv imputation works", {
expect_error(
impute_sd_cv(point=1, var1=5, var2=NA_real_, n=5, vartype="foo"),
info="correct vartype"
)
expect_error(
impute_sd_cv(point=1, var1=5, var2=1, n=5, vartype="CV"),
info="no var2"
)
expect_equal(
impute_sd_cv(point=1, var1=5, var2=NA_real_, n=5, vartype="CV"),
0.05
)
expect_equal(
expect_warning(
impute_sd_cv(point=1, var1=0.05, var2=NA_real_, n=5, vartype="CV"),
info="fractional input"
),
0.0005
)
})
test_that("se imputation works", {
expect_error(
impute_sd_se(point=1, var1=1, var2=NA_real_, n=5, vartype="foo"),
info="correct vartype"
)
expect_error(
impute_sd_se(point=1, var1=1, var2=1, n=5, vartype="SE"),
info="no var2"
)
expect_equal(
impute_sd_se(point=1, var1=1, var2=NA_real_, n=5, vartype="SE"),
1*sqrt(4)
)
})
test_that("ci imputation works", {
expect_error(
impute_sd_ci(point=1, var1=1, var2=1, n=5, vartype="foo"),
regexp="vartype must match the regular expression",
info="correct vartype"
)
expect_error(
impute_sd_ci(point=1, var1=1, var2=2, n=5, vartype="90% CI"),
regexp="`var1` must be <= `point`", fixed=TRUE,
info="var1 == point"
)
expect_error(
impute_sd_ci(point=1, var1=2, var2=2, n=5, vartype="90% CI"),
regexp="`var1` must be <= `point`", fixed=TRUE,
info="var1 > point"
)
expect_error(
impute_sd_ci(point=1, var1=0, var2=1, n=5, vartype="90% CI"),
regexp="`var2` must be >= `point`", fixed=TRUE,
info="var2 == point"
)
expect_error(
impute_sd_ci(point=1, var1=0, var2=0, n=5, vartype="90% CI"),
regexp="`var2` must be >= `point`", fixed=TRUE,
info="var2 < point"
)
expect_equal(
impute_sd_ci(point=1, var1=0, var2=2, n=5, vartype="90% CI"),
1/qt(0.95, 5)
)
expect_equal(
impute_sd_ci(point=1, var1=NA_real_, var2=2, n=5, vartype="90% CI"),
1/qt(0.95, 5),
info="NA works in var1"
)
expect_equal(
impute_sd_ci(point=1, var1=0, var2=NA_real_, n=5, vartype="90% CI"),
1/qt(0.95, 5),
info="NA works in var2"
)
expect_equal(
impute_sd_ci(point=1, var1=0, var2=3, n=5, vartype="90% CI"),
1.5/qt(0.95, 5),
info="average of lower and upper is returned"
)
})
test_that("iqr imputation works", {
expect_error(
impute_sd_iqr(point=1, var1=1, var2=NA_real_, n=5, vartype="foo"),
info="correct vartype"
)
expect_equal(
expect_warning(
impute_sd_iqr(point=1, var1=1, var2=1, n=5, vartype="IQR"),
regexp="distributional assumption",
fixed=TRUE
),
NA_real_
)
expect_equal(
expect_warning(
impute_sd_iqr(point=1, var1=0, var2=1, n=5, vartype="IQR"),
regexp="distributional assumption",
fixed=TRUE
),
NA_real_
)
expect_equal(
impute_sd_iqr(point=1, var1=0, var2=2, n=5, vartype="IQR"),
2/(2*qt(p=0.75, df=5))
)
})
test_that("range imputation works", {
expect_error(
expect_warning(impute_sd_range(point=1, var1=1, var2=NA_real_, n=5, vartype="foo")),
info="correct vartype"
)
expect_equal(
expect_warning(
impute_sd_range(point=1, var1=1, var2=1, n=5, vartype="RANGE"),
regexp="distributional assumption",
fixed=TRUE
),
NA_real_
)
expect_equal(
expect_warning(
impute_sd_range(point=1, var1=0, var2=1, n=5, vartype="RANGE"),
regexp="distributional assumption",
fixed=TRUE
),
NA_real_
)
expect_equal(
expect_warning(impute_sd_range(point=1, var1=0, var2=2, n=5, vartype="RANGE")),
2/4
)
})
|
29a993556cb9d10547d50d3d3fa694c8d059b38f
|
4592e2fef5a229a035ea6d1d5158c0f6efe58f9e
|
/inst/scripts/dashboard-run-locally.R
|
a1fed0ef205a20f22db739e597c58a611b86ef0b
|
[
"MIT"
] |
permissive
|
harell/caret.explainer
|
b199ec3da0fe52bc0bb3c5add6f7e0c7e6d2086b
|
39c69d928b6672c7814ab27531abec7ef372808c
|
refs/heads/master
| 2021-02-06T03:09:28.444649
| 2020-06-26T01:37:55
| 2020-06-26T01:37:55
| 243,869,172
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 682
|
r
|
dashboard-run-locally.R
|
# Setup -------------------------------------------------------------------
pkgload::load_all(path = ".", helpers = FALSE, quiet = TRUE)
dashboard_source <- getOption("path_dashboard")
dashboard_target <- normalizePath(file.path(tempdir(), "dashboard"))
Dashboard$utils$prepare_app_files(dashboard_source, dashboard_target)
# App Information ---------------------------------------------------------
# shell.exec(dashboard_target)
# sort(rsconnect::appDependencies()$packages)
# Run Shiny ---------------------------------------------------------------
withr::local_options(
list(shiny.autoload.r = TRUE, shiny.reactlog = TRUE),
shiny::runApp(appDir = dashboard_target)
)
|
219361ef32a5208d9511d1c11be4e8203a0da74d
|
1a5e14f3049b4a16a2b3ba69aa750d5abb80c317
|
/R/ipwlm.R
|
716326764bc312e13d7a0d2903e91c0c97adfc9c
|
[] |
no_license
|
drmiller1220/attritR
|
757d3c5eb14ca422b3f98d4941ae3665873aeeae
|
6e32eba77b398e0ac2932a2d0a648b24fecf3b30
|
refs/heads/master
| 2022-07-21T20:43:42.299490
| 2020-05-17T22:13:18
| 2020-05-17T22:13:18
| 264,313,939
| 0
| 0
| null | 2020-05-19T00:33:56
| 2020-05-15T22:43:14
|
R
|
UTF-8
|
R
| false
| false
| 13,881
|
r
|
ipwlm.R
|
#' Estimating treatment effects given non-random attrition
#'
#' \code{ipwlm} estimates the average treatment effect --- either among
#' respondents (ATE|R = 1) or among the population (ATE) --- under various
#' conditions of non-random attrition (attrition due to treatment alone,
#' attrition due to treatment conditioned on observable confouding, or attrition
#' due to treatment conditioned on observable and unobservable confounding).
#'
#' @param regression_formula An object of class \code{formula} (or one that can
#' be coerced to that class); a symbolic description of the model to be fitted.
#' \code{regression_formula} is the model which will be used to estimate the
#' average treatment effect accounting for non-random attrition. Formula must be
#' of the form Y ~ D + ..., where Y is the outcome, D is the treatment, and ...
#' represents any additional covariates, including an instrumental variable when
#' weighting for attrition on unobservables.
#' @param treatment A string specifying the treatment variable in the regression.
#' The string must match the name of the treatment variable exactly as it is
#' passed into \code{regression_formula}.
#' @param instrument A string specifying the instrumental variable in the
#' regression, if one is included. The string must match the name of the
#' treatment variable exactly as it is passed into \code{regression_formula}. If
#' no instrumental variable is used, the argument input should remain \code{NULL}.
#' @param data A data.frame which contains all variables specified in
#' \code{regression_formula}.
#' @param effect_type A string specifying the type of treatment effect to be
#' estimated. Input must be one of \code{"respondent"} or \code{"population"},
#' which refer to the average treatment effect among respondents (ATE|R = 1) and
#' the average treatment effect among the population (ATE), respectively.
#' @param attrition_type A string specifying the assumed effect of attrition.
#' Input must be one of \code{"treatment"}, \code{"observable"}, or
#' \code{"unobservable"}. These inputs refer, in order, to attrition due to
#' treatment alone, attrition due to treatment and observable confounders, and
#' attrition due to treatment, observable confounders, and unobservable
#' confounders. If \code{"unobservable"} is specified, an instrumental variable
#' must be included in \code{regression_formula} and its name must be noted in
#' \code{instrument}.
#' @param response_weight_formula An object of class \code{formula} (or one that
#' can be coerced to that class); a symbolic description of the model to be
#' fitted. \code{response_weight_formula} specifies the model for estimating
#' respondents' response propensity, which is used in weighting treatment effect
#' estimates that account for non-random attrition. By default, the formula
#' includes all covariates on the right-hand side of \code{regression_formula}.
#' @param response_weight_method A character string specifying the error
#' distribution and link function to be used in the generalized additive model
#' which estimates subjects' response propensity. By default,
#' \code{response_weight_method} is set to \code{binomial(link = logit)}, which
#' fits a logistic regression model. Any family function that \code{gam} accepts
#' can be specified here.
#' @param treatment_weight_formula An object of class \code{formula} (or one that
#' can be coerced to that class); a symbolic description of the model to be
#' fitted. \code{treatment_weight_formula} specifies the model for estimating
#' respondents' treatment propensity, which is used in weighting treatment effect
#' estimates that account for non-random attrition. By default, the formula
#' includes only the confounder covariates included on the right-hand side of
#' \code{regression_formula}. When accounting for attrition on unobservables,
#' response propensity weights are also included on the right-hand side of the
#' formula.
#' @param treatment_weight_method A character string specifying the error
#' distribution and link function to be used in the generalized additive model
#' which estimates subjects' response propensity. By default,
#' \code{response_weight_method} is set to \code{binomial(link = logit)}, which
#' fits a logistic regression model. Any family function that \code{gam} accepts
#' can be specified here.
#' @param n_bootstraps Numeric value defining the number of non-parametric
#' bootstrap samples to be computed. The default number of bootstrap
#' replications is 1000.
#' @param quantiles Vector of two numeric values between 0 and 1 specifying the
#' quantiles at which non-parametric bootstrapped confidence intervals are to be
#' returned. By default, \code{quantiles} are set to 0.05 and 0.95, which
#' corresponds with a 90\% confidence interval.
#' @param n_cores Numeric value indicating the number of cores to allocate to
#' running the function. See \code{parallel} for details.
#'
#' @details
#' The function estimates the treatment effect given non-random attrition, and
#' uses non-parametric bootstrapping to estimate uncertainty.
#'
#' @references Huber, Martin (2012): "Identification of Average Treatment Effects
#' in Social Experiments Under Alternative Forms of Attrition.", Journal of
#' Educational and Behavioral Statistics, vol. 37, no. 3, 443-474.
#'
#' @return A list of five elements containing the following:
#' \item{coefficients}{A vector containing the estimated treatment effect,
#' bootstrapped standard error, and a bootstrapped confidence interval.}
#' \item{weights}{A data.frame containing the response propensity weights, the
#' treatment propensity weights, and the inverse probability weights used to
#' estimate the treatment effect.}
#' \item{effect}{A string noting the \code{effect_type} specified for the given
#' estimate.}
#' \item{attrition}{A string noting the \code{attrition_type} specified for the
#' given estimate.}
#' \item{formulae}{A list containing the precise formulae that specify the
#' treatment effect function, the response propensity function, and the treatment
#' propensity function.}
#' @author Ryden Butler and David Miller. Special thanks to Jonas Markgraf and Hyunjoo Oh.
#'
#' @rdname ipwlm
#' @import 'parallel'
#' @import 'gam'
#' @export
#'
#'
ipwlm <- function(regression_formula,
treatment,
instrument = NULL,
data,
effect_type,
attrition_type, # "treatment", "observable", "unobservable"
response_weight_formula = response ~ .,
response_weight_method = binomial(link = logit),
treatment_weight_formula = data[treatment] ~ .,
treatment_weight_method = binomial(link = logit),
n_bootstraps = 1000,
quantiles = c(0.05, 0.95),
n_cores = 1) {
# prop 0: attrition caused by treatment (ATE|R = 1): effect_type = "respondent", attrition_type = "treatment"
# prop 1: attrition caused by treatment (ATE): effect_type = "population", attrition_type = "treatment"
# prop 2: attrition caused by treatment and observables (ATE|R = 1): effect_type = "respondent", attrition_type = "observable"
# prop 3: attrition caused by treatment and observables (ATE): effect_type = "population", attrition_type = "observable"
# prop 4: attrition caused by treatment, observables, and unobservables (ATE|R = 1): effect_type = "respondent", attrition_type = "unobservable"
# prop 5: attrition caused by treatment, observables, and unobservables (ATE): effect_type = "population", attrition_type = "unobservable"
# store data from model for manipulation
internal_data <- model.frame(regression_formula, data, na.action = NULL)
# make attrition indicator (non-response = 1; response = 0)
internal_data$response <- as.numeric(!is.na(internal_data[ , 1]))
# rename relevant variables
names(internal_data)[1] <- 'outcome'
names(internal_data)[which(names(internal_data) == treatment)] <- 'treatment'
names(internal_data)[which(names(internal_data) == instrument)] <- 'instrument'
### Calculate Weights for Relevant Estimator
# exception handling for effect type inputs
if(effect_type != "population" & effect_type != "respondent"){
stop('effect_type must be either "population" or "respondent"')
}
# exception handling for attrition type inputs
if(attrition_type != "treatment" & attrition_type != "observable" & attrition_type != "unobservable"){
stop('attrition_type must be one of "treatment" or "observable" or "unobservable"')
}
# exception handling for unobservable weighting without instrument
if(attrition_type == "unobservable" & is.null(instrument)){
stop('weighting for attrition on unobservables requires specification on instrument')
}
# if estimating ATE or conditionining on unobservables, compute meaningful response weight ...
if(effect_type == "population" | attrition_type == "unobservable"){
response_propensity <- gam(formula = response_weight_formula,
family = response_weight_method,
data = internal_data[ , !(names(internal_data) == 'outcome')],
maxit = 1000)
response_weights <- predict(object = response_propensity, type = "response")
response_weights <- response_weights/sum(response_weights)
# if conditioning treatment propensity on response weights
if(attrition_type == "unobservable"){
internal_data$response_conditioning <- response_weights
} else {
internal_data$response_conditioning <- NULL
}
}
# treatment weights for props 2-5; formula determined by attrition_type
treatment_propensity <- gam(formula = treatment_weight_formula,
family = treatment_weight_method,
data = internal_data[ , !(names(internal_data) %in% c('outcome', 'instrument', 'response'))],
maxit = 1000)
treatment_weights <- predict(object = treatment_propensity, type = "response")
# reorient the treatment_weight for units in control
treatment_weights[internal_data$treatment != 1] <- (1 - treatment_weights[internal_data$treatment != 1])
treatment_weights <- treatment_weights/sum(treatment_weights)
# if estimating ATE|R, multiplying by response weights is trivial
if(effect_type == 'respondent'){
response_weights <- 1
}
# if disregarding treatment propensity
if(attrition_type == 'treatment'){
treatment_weights <- 1
}
# add inverse probability weights to model data
internal_data$final_weights <- 1/(treatment_weights * response_weights)
### Estimate Treatment Effect for Relevant Estimator
treatment_effect_model <- lm(formula = outcome ~ treatment,
weights = final_weights,
data = internal_data)
### Estimate n Bootstraps of Relevant Estimator
# Make cluster
bootstrap_cluster <- makeCluster(n_cores)
# Bootstrap data: random sampling of dataset with replacement
clusterExport(bootstrap_cluster, c('internal_data',
'n_bootstraps'),
envir = environment())
# DRM: in the bootstrapping, do we want bootstrapped data sets of the size equal to the number of respondents,
# or of the full sample? the former is implemented
# RWB: I think we want the full sample. Saving your code as comment here in case I'm wrong
# data = data[sample(x = nrow(data[which(data$R==1),]),
# size = nrow(data[which(data$R==1),]),
# replace = T), ]
np_bootstrap <- parSapply(bootstrap_cluster, 1:n_bootstraps,
FUN = function(n) {
bootstrap <- lm(formula = outcome ~ treatment,
weights = final_weights,
data = internal_data[sample(nrow(internal_data), replace = T), ])
return(bootstrap$coefficients[[2]])
})
stopCluster(cl = bootstrap_cluster)
### Print Model Summary & Return Results
confidence_interval <- quantile(np_bootstrap, probs = quantiles, na.rm = T)
# Printing summary result tables
ResultsPrint <- data.frame('Estimate' = treatment_effect_model$coefficients[[2]],
'SE' = sd(np_bootstrap),
'CI Lower' = confidence_interval[1],
'CI Upper' = confidence_interval[2],
row.names = NULL)
cat('--- Treatment Effect with Uncertainty Estimates from', n_bootstraps, 'Non-Parametric Bootstraps ---\n',
'Summary:\n')
print(ResultsPrint)
# what we might want to add to the printed summary: number of observations, number attritted,
# effect_type, attrition_type
return(list(coefficients = c('treatment' = treatment_effect_model$coefficients[[2]],
'se' = sd(np_bootstrap),
'lower_ci' = confidence_interval[1],
'upper_ci' = confidence_interval[2]),
weights = data.frame(Response = response_weights,
Treatment = treatment_weights,
IPW = internal_data$final_weights),
effect = effect_type,
attrition = attrition_type,
formulae = list(TreatmentEffect = formula(treatment_effect_model),
ResponsePropensity = if(exists('response_propensity')) formula(response_propensity),
TreatmentPropensity = formula(treatment_propensity))
))
}
|
c2e8a78beb69a581646b2df4f9ed90e989618a7a
|
b35a0c412ea0fb6e214029fd0db3bc41a5a93ed3
|
/PCA/get2D3DmatForPCA_script.R
|
c9f9e46da610f7c5edb8676410d32cb8b8d137aa
|
[] |
no_license
|
skiamu/StatApp_test
|
83bd65300cd0133defb03a1d455698b7aee1b703
|
2ee23ccc8773352709e58421d6fc3b40943b20c5
|
refs/heads/master
| 2021-01-18T16:13:12.603196
| 2017-06-24T16:13:03
| 2017-06-24T16:13:03
| 86,727,880
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,473
|
r
|
get2D3DmatForPCA_script.R
|
# extract 2D and 3D matrix for PCA
# IMPORTANT: changed read_data.R rerun it
# [added a column in Country to match CountryName (TableName was not working),
# added two dataframe to pass from Name to Code and viceversa for ind and cnt]
# 00 preliminars ----
setwd("C:/Users/Leonardo/Desktop/POLIMI/ATTUALI/Stat App/Progetto/StatApp_test_loc")
#source("ReadData/read_data.R") #It could give some problems with dplyr-plyr
#read_data("C:/Users/Leonardo/Desktop/POLIMI/ATTUALI/Stat App/Progetto/Playground")
load("ReadData/data.RData")
#library(reshape2) # dcast
library(dplyr) # %>%
source('Filters/functionsFullMatrix.R') # extract2DmatrixWithFullestIndicators, fullness
source('function_extract.R') # getIndicators, getCntInd, getIndYear, uniCnt, get3D
#source("outlier.R") # find_outlier
#source("PCA/PCA_function.R") # PC
source("function_name.R") # name2codeCnt, ...
# 01 from a previous analysis ----
# fixing the year (fixYear) select the best M indicators after filtering out the countries
# with a number of indicators below Tind
df <- extract2DmatrixWithFullestIndicators(Indicators,
M=200,
fixYear=2010,
viewFlag=F,
Tind=400)
# manual selection of indicators (among the fullest)
myInd <- c('GDP (current LCU)',
'Health expenditure per capita (current US$)',
'Population, total',
'Birth rate, crude (per 1,000 people)',
'CO2 emissions (kt)',
'Number of infant deaths')
# keep only the countries with at least Tind indicators in 2010
myCnt <- df$CountryName %>% unique()
# years
myYears <- c(2005:2010)
# 02 extract from indicators ----
# filter Indicators
indF <- getIndicators(myYear = myYears,
myCnt = myCnt,
myInd = myInd)
# get some 2D matrices
# Cnt vs Ind for 2010
dcCntInd <- getCntInd(indF, 2010, dropNA = T, showCnt = T)
# Ind vs Year for Italy
dcIndYear <- getIndYear(indF, 'Italy', dropNA = T, showInd = T)
# Cnt vs Year for 'GDP (current LCU)'
dcCntYear <- getCntYear(indF, 'GDP (current LCU)', dropNA = T, showCnt = T)
# View(dcCntInd); View(dcIndYear); View(dcCntYear)
# REM:you may create similar functions with different combinations of rows and columns
# drop the countries which have a missing values for at least one year and one indicator
indFull <- unifCnt(indF) # it's Indicators-like with the correspondent 3D matrix is Full
# the matrix is full?
fullness(indFull) == 1
fullness(indF) == 1
# get 3D matrices
indFull3D <- get3D(indFull, myYears) # full
indF3D <- get3D(indF, myYears) # some NA, still working only a warning, be aware
# to extract from the list: listName[[pos]]
dc1 <- indFull3D[[1]]
# 03 try the change name-code ----
myCnt_code <- name2codeCnt(myCnt, cntNameCode)
myCnt_name <- code2nameCnt(myCnt_code, cntNameCode)
setequal(myCnt_name,myCnt) # it works
myInd_code <- name2codeInd(myInd, indNameCode)
myInd_name <- code2nameInd(myInd_code, indNameCode)
setequal(myInd_name,myInd) # it works
# 04 perform PCA and further analysis [incomplete] ----
# remove outlier
# w <- find_outlier(w, remove = T)
# PCA
# pc_data1<-PC(w,method="eigen",scaled=T,graph=T,rm.na=T,print.results=T)
|
a46ab9152642b677665443c59ed7bb4176524d53
|
8ef127711f234f6becd9beb6d64fbdaa93879500
|
/evaluation/time-synchronization.R
|
fd36f92803b42950bedd4f6f19f6817b5b216246
|
[] |
no_license
|
DSoko2/i3ql-measurements
|
dca70577aca506741c7b14785a609bf10a5112d1
|
0658d8aeb039e0026520f1f244b038714b7e862c
|
refs/heads/master
| 2022-01-27T16:28:14.640760
| 2019-07-31T15:44:56
| 2019-07-31T15:44:56
| 156,348,407
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,697
|
r
|
time-synchronization.R
|
# Calculates the offset as mean of all reference section enter timestamps
evalTimeOffsets <- function(eventData) {
sectionEnterMean <-
filter(eventData, grepl("^section\\..*\\.enter$", event)) %>%
group_by(node, execution) %>%
summarize(timeOffset = mean(time)) %>%
ungroup
return(sectionEnterMean)
}
# Provides evaluation data about the error of each section entry over all nodes
timeOffsetSectionPrecision <- function() {
sectionPrecision <- prepareSectionDuration() %>%
group_by(section, execution) %>%
summarise(
min = min(enterTime),
QT1 = quantile(enterTime, probs = 0.25),
median = median(enterTime),
mean = mean(enterTime),
QT3 = quantile(enterTime, probs = 0.75),
max = max(enterTime),
sd = sd(enterTime)
)
ungroup
return(sectionPrecision)
}
# Provides evaluation data about the error of all section enters over all nodes
timeOffsetPrecision <- function() {
precision <- prepareSectionDuration() %>%
group_by(execution, section) %>%
mutate(error = mean(enterTime) - enterTime) %>%
ungroup %>%
group_by(execution) %>%
summarise(
min = min(error),
QT1 = quantile(error, probs = 0.25),
median = median(error),
mean = mean(error),
QT3 = quantile(error, probs = 0.75),
max = max(error),
sd = sd(error)
) %>%
ungroup
return(precision)
}
# Applies the time offset to the time field of data
applyTimeOffsets <- function(data, timeOffsets) {
adjustedData <- data %>%
left_join(timeOffsets, by = c("node", "execution")) %>%
mutate(time = time - timeOffset) %>%
arrange(time)
return(adjustedData)
}
# Evaluates the start and the end of a measurement, by taking the timestamps of the first and last latency event
evalTimeStartEnd <- function(alignedEventData) {
startEnd <-
filter(alignedEventData, grepl("^latency\\..*$|^section\\.measurement\\.exit$", event)) %>%
group_by(execution) %>%
summarize(startTime = min(time), endTime = max(time)) %>%
ungroup
return(startEnd)
}
# Adjust all times to be 0 at start time
applyStartOffsets <- function(data, timeStartEnd) {
adjustedData <-
data %>%
left_join(timeStartEnd, by = "execution") %>%
mutate(time = time - startTime) %>%
select(-one_of("startTime")) %>%
select(-one_of("endTime"))
return(adjustedData)
}
# Drops all rows from data, which have a timestamp outside the start and end of the measurement
applyTimeStartEnd <- function(data, timeStartEnd) {
adjustedData <-
data %>%
left_join(timeStartEnd, by = "execution") %>%
filter(time >= startTime & time <= endTime)
return(adjustedData)
}
|
e3d815537f0bb0ff35e544d82f405898d66d40ab
|
703d6573ce0d626cf46f1ba9be67517ae52d6665
|
/Stat251Project/STAT251_Final_Project.R
|
6259ee09801b102453ade360aa511eeb64d4a5cb
|
[] |
no_license
|
cam4socal/Stat251Project
|
5e67a984db3b4cbf41ae21ce8c06517db603056b
|
428c6b44cf94eec6717617da630ef49d2f51673e
|
refs/heads/master
| 2020-04-11T05:09:37.272240
| 2018-12-12T20:17:55
| 2018-12-12T20:17:55
| 161,539,433
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,818
|
r
|
STAT251_Final_Project.R
|
##############################
### STAT 251 Final Project ###
####### Dr. Garitt Page ######
#### Fall 2018, Section 1 ####
## A. Hamilton & C. Timpson ##
##############################
library(dplyr)
bakery <- read.csv("C:/Users/cam4s/Documents/STAT251/STAT 251/bakery.csv", header = TRUE)
head(bakery)
tail(bakery)
# Dataset contains transactions from 10/30/2016 - 4/9/2017
# Adjusting the date formatting
class(bakery$Date)
bakery$Date <- format(as.Date(bakery$Date, format = "%Y-%m-%d"), "%m/%d/%Y")
class(bakery$Date)
head(bakery)
# Removing unnecessary items
dontNeed <- c("NONE", "Adjustment", "The BART", "Fairy Doors", "Bowl Nic Pitt", "Siblings",
"Christmas common", "Mortimer")
bakery <- subset(bakery, !Item %in% dontNeed)
# List of all unique item types
list_items <- unique(bakery$Item)
list_items
# This vector will not be utilized; it's meerly a reference for all items not removed from dataframe nor are desserts
notDesserts <- c("Bread", "Coffee", "Tea", "Mineral water", "Juice", "Frittata", "Hearty & Seasonal", "Soup", "Mighty protein",
"Chicken sand", "My-5 Fruit Shoot", "Focaccia", "Sandwich", "Eggs", "Granola", "Chimichurri Oil", "Bacon",
"Kids biscuit", "Olum & polenta", "Polenta", "The Nomad", "Hack the stack", "Toast", "Bare Popcorn", "Muelsi",
"Crisps", "Pintxos", "Brioche and salami", "Afternoon with the baker", "Salad", "Chicken Stew", "Spanish Brunch",
"Extra Salami or Feta", "Duck egg", "Baguette", "Tshirt", "Vegan Feast", "Nomad bag", "Coffee granules",
"Drinking chocolate spoons", "Argentina Night", "Half slice Monster", "Gift voucher", "Raw Bars", "Tacos/Fajita",
"Farm House", "Scandinavian", "Tartine", "Ella's kitchen pouches", "Hot chocolate", "Basket", "Smoothies", "Coke",
"Empanadas", "Art Tray", "Valentine's card", "Postcard")
# Vector of items classified as desserts or holiday treats
desserts <- c("Cake", "Cookies", "Fudge", "Lemon and coconut", "Muffin", "Pastry", "Victorian Sponge", "Alfajores",
"Brownies", "Dulce de Leche", "Keeping It Local", "Bread Pudding", "Truffles", "Caramel bites",
"Jammie Dodgers", "Tiffin", "Bakewell", "Scone", "Vegan mincepie", "Raspberry shortbread sandwich",
"Panatone", "Chocolates", "Medialuna", "Jam", "Pick and Mix Bowls", "Honey", "Spread", "Crepes",
"Gingerbread syrup", "Cherry me Dried fruit")
# Creating array of dessert counts during week before Thanksgiving
bakery_thx_week <- subset(bakery, Date >= "11/18/2016" & Date <= "11/24/2016")
bakery_thx_day1 <- subset(bakery, Date == "11/18/2016")
bakery_thx_day2 <- subset(bakery, Date == "11/19/2016")
bakery_thx_day3 <- subset(bakery, Date == "11/20/2016")
bakery_thx_day4 <- subset(bakery, Date == "11/21/2016")
bakery_thx_day5 <- subset(bakery, Date == "11/22/2016")
bakery_thx_day6 <- subset(bakery, Date == "11/23/2016")
bakery_thx_day7 <- subset(bakery, Date == "11/24/2016")
dessert_count_thx_day1 <- sum(bakery_thx_day1$Item %in% desserts)
dessert_count_thx_day2 <- sum(bakery_thx_day2$Item %in% desserts)
dessert_count_thx_day3 <- sum(bakery_thx_day3$Item %in% desserts)
dessert_count_thx_day4 <- sum(bakery_thx_day4$Item %in% desserts)
dessert_count_thx_day5 <- sum(bakery_thx_day5$Item %in% desserts)
dessert_count_thx_day6 <- sum(bakery_thx_day6$Item %in% desserts)
dessert_count_thx_day7 <- sum(bakery_thx_day7$Item %in% desserts)
thx_week_dessert_count = c(dessert_count_thx_day1, dessert_count_thx_day2,
dessert_count_thx_day3, dessert_count_thx_day4,
dessert_count_thx_day5, dessert_count_thx_day6,
dessert_count_thx_day7)
# Creating array of dessert counts during week before Christmas
bakery_xmas_week <- subset(bakery, Date >= "12/18/2016" & Date <= "12/24/2016")
bakery_xmas_day1 <- subset(bakery, Date == "12/18/2016")
bakery_xmas_day2 <- subset(bakery, Date == "12/19/2016")
bakery_xmas_day3 <- subset(bakery, Date == "12/20/2016")
bakery_xmas_day4 <- subset(bakery, Date == "12/21/2016")
bakery_xmas_day5 <- subset(bakery, Date == "12/22/2016")
bakery_xmas_day6 <- subset(bakery, Date == "12/23/2016")
bakery_xmas_day7 <- subset(bakery, Date == "12/24/2016")
dessert_count_xmas_day1 <- sum(bakery_xmas_day1$Item %in% desserts)
dessert_count_xmas_day2 <- sum(bakery_xmas_day2$Item %in% desserts)
dessert_count_xmas_day3 <- sum(bakery_xmas_day3$Item %in% desserts)
dessert_count_xmas_day4 <- sum(bakery_xmas_day4$Item %in% desserts)
dessert_count_xmas_day5 <- sum(bakery_xmas_day5$Item %in% desserts)
dessert_count_xmas_day6 <- sum(bakery_xmas_day6$Item %in% desserts)
dessert_count_xmas_day7 <- sum(bakery_xmas_day7$Item %in% desserts)
xmas_week_dessert_count = c(dessert_count_xmas_day1, dessert_count_xmas_day2,
dessert_count_xmas_day3, dessert_count_xmas_day4,
dessert_count_xmas_day5, dessert_count_xmas_day6,
dessert_count_xmas_day7)
# Function created for checking item counts
count_check <- function(item_search, day){
all_days <- sum(bakery$Item == item_search) # All days
first_thx_day <- sum(day$Item == item_search) # One day
return(paste("Store Total: ", all_days, " On selected day: ", first_thx_day))
}
# Starting the poisson-gamma distributions
a_prior <- 5
b_prior <- 10
prior_mean <- a_prior/b_prior
prior_mean
# Prior distribution information
# NEED TO COME UP WITH
plot(seq(0,100,length.out = 5000),dgamma(seq(0,100,length.out = 5000),5.1,1.25),type = "l", xlim = c(0,15), ylim = c(0,.6), xlab = "theta", ylab = "Probability Density", main = "Posterior Distribution", col = "gray")
lines(seq(0,100,length.out = 5000),dgamma(seq(0,100,length.out = 5000),5.1 + 5,1.25 + 1), col = "black")
legend("topright",legend = c("Prior", "Posterior"), col = c("gray", "black"), lty = c(1,1), cex = .8)
# Looking at the dessert counts during holiday weeks
thx_week_dessert_count
xmas_week_dessert_count
# Lengths of holiday dessert count arrays
accident_sum <- sum(accident)
nThx <- length(thx_week_dessert_count)
nXmas <- length(xmas_week_dessert_count)
a <- 5
b <- .2
nreps <- 10000
before_pot <- c(27,30,25,29,27,23,29,24,27,36,33,34,30,25,29,27,33,33,27,34,28,43,31,24,36,28,26,29,30,20,33)
after_pot <- c(26,32,21,32,21,24,36,26,24,25,30,25,20,14,26,28,21,21)
sum_before <- sum(before_pot)
len_before <- length(before_pot)
sum_after <- sum(after_pot)
len_after <- length(after_pot)
before_theta <- rgamma(nreps,a + sum_before, b + len_before)
after_theta <- rgamma(nreps, a + sum_after, b + len_after)
diffs <- before_theta - after_theta
mean(diffs > 0)
mean(diffs)
median(diffs)
quantile(diffs,c(.025,.975))
|
754ec7b530b14a3aab625a7043ec38fa6c639fa5
|
19fde05976bb68cb1dd89153956153b546ab7826
|
/man/BAMBA.Rd
|
6627b1feaca553166aaeca4f35c142ac4d98c2fe
|
[] |
no_license
|
RGLab/BAMBA
|
30f842beb56d93ab304fd7ee6cb759a015d204ab
|
f26b06f2a44fc987abaaf7e9ce07011b3fa880bb
|
refs/heads/master
| 2021-06-23T04:36:52.121625
| 2019-01-03T20:05:34
| 2019-01-03T20:05:34
| 135,614,518
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 3,307
|
rd
|
BAMBA.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/BAMBA.r
\name{BAMBA}
\alias{BAMBA}
\title{Fit the BAMBA model on BAMA or Fc array data}
\usage{
BAMBA(data, dataType = "fc", nChains = 1, nIter = 2000,
outFolder = NULL, outFile = date_filename("BAMBA_stanfit.rds"), ...)
}
\arguments{
\item{dataType}{A string denoting the type of data
to be modeled ('bama' or 'fc'). Defaults to 'fc'.}
\item{nChains}{The number of chains to run for the stan sampling.
Defaults to 1. If more than one chain is used, it is recommended
to call \code{options(mc.cores = parallel::detectCores())}
to allow for parallel processing of multiple chains.}
\item{nIter}{The number of iterations per chain for the stan sampling.
Defaults to 2000.}
\item{outFolder}{The folder to save the stan results to.
If disk space is an issue, you may set this to \code{NULL} to
not save the stanfit object. Defaults to \code{NULL}}
\item{outFile}{The filename to save the stan results to.
Defaults to '<yyyy-mm-dd>_BAMBA_stanfit.rds'.}
\item{...}{Additional parameters to pass to the stan sampling function.
See the rstan documentation for more information.}
\item{bamaData}{The data to be modeled.}
}
\value{
A \code{BAMBAResult} is a list with the following components:
\item{data}{The validated and prepared data used to fit
the \code{BAMBA} model.}
\item{dataType}{The type of data modeled. Either 'bama' or 'fc'.}
\item{chains}{The number of chains run for the stan sampling.}
\item{iter}{The number of iterations per chain for the stan sampling.}
\item{parameters}{A \code{data.frame} summarizing all parameters sampled
for the model.}
\item{mu0}{A \code{data.frame} summarizing the samples of the
baseline mean parameter, mu0.}
\item{mu_ag}{A \code{data.frame} summarizing the samples of the
antigen offsets, mu_ag.}
\item{mu_re}{A \code{data.frame} summarizing the samples of the
Fc variable offsets, mu_re. Only included if \code{dataType == 'fc'}.}
\item{mu_ar}{A \code{data.frame} summarizing the samples of the
antigen/Fc variable offsets, mu_ar = mu_ag + mu_re.
Only included if \code{dataType == 'fc'}. This is included because mu_ar
has more sampling stability than mu_ag and mu_re.}
\item{omega_t}{A \code{data.frame} summarizing the samples of the
prior response probabilities per timepoint, omega_t.}
\item{omega_ag}{A \code{data.frame} summarizing the samples of the
prior response probabilities per antigen, omega_ag.}
\item{omega_re}{A \code{data.frame} summarizing the samples of the
prior response probabilities per Fc variable, omega_re.
Only included if \code{dataType == 'fc'}.}
\item{omega_grp}{A \code{data.frame} summarizing the samples of the
prior response probabilities per group, omega_grp.
Only included if \code{dataType == 'fc'}.}
\item{hyperparameters}{A \code{data.frame} summarizing
the samples of the model hyperparameters.}
}
\description{
This function fits the \code{BAMBA} model.
TODO: change params out to just list all the options?
saving the stanfit object or returning it
keeping original data
}
\examples{
bamaData <- prepare_bama_data()
fit <- BAMBA(bamaData,
dataType = "bama",
nChains = 1,
nIter = 200, ## set higher for a real analysis
outFolder = NULL)
}
|
88353ca9fbb70e29203103dc7faa5cc3759e9f0f
|
45967efbed95edfac0fc82e70fb45f2830e1f400
|
/plot4.R
|
5202f3e4a1b7d41a1a94290cb4db41eb5a8b8979
|
[] |
no_license
|
jonpresley/ExData_Plotting1
|
25f33706d20bb0cfcc1379f66095a5d3e451a25b
|
f67c587fde904f32c872212cb6df26b1800634de
|
refs/heads/master
| 2021-07-06T11:04:40.419312
| 2017-10-04T06:27:17
| 2017-10-04T06:27:17
| 105,479,185
| 0
| 0
| null | 2017-10-01T22:29:31
| 2017-10-01T22:29:31
| null |
UTF-8
|
R
| false
| false
| 1,146
|
r
|
plot4.R
|
setwd("/Users/jonpresley/Desktop/R_Working_Directory")
data_all <- read.table("household_power_consumption.txt", header = TRUE, sep = ";", na.strings = "?", dec = ".", stringsAsFactors = FALSE)
data <- data_all[data_all$Date %in% c("1/2/2007","2/2/2007") , ]
GAP <- as.numeric(data$Global_active_power)
datetime <- strptime(paste(data$Date, data$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
sm1 <- as.numeric(data$Sub_metering_1)
sm2 <- as.numeric(data$Sub_metering_2)
sm3 <- as.numeric(data$Sub_metering_3)
Voltage <- as.numeric(data$Voltage)
GRP <- as.numeric(data$Global_reactive_power)
png("plot4.png", width=480, height=480)
par(mfrow = c(2, 2))
plot(datetime, GAP, type="l", xlab="", ylab="Global Active Power")
plot(datetime, Voltage, type="l", xlab="datetime", ylab="Voltage")
plot(datetime, sm1, type="l", ylab="Energy sub metering", xlab="")
lines(datetime, sm2, type="l", col="red")
lines(datetime, sm3, type="l", col="blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=1, col=c("black", "red", "blue"), bty="n")
plot(datetime, GRP, type="l", xlab="datetime", ylab="Global_reactive_power")
dev.off()
|
046603a12233771eec7874add2fb39ff5eaf8216
|
9c20dfe75c41283db15d80bef6ff60fbf4cb5255
|
/Forestplots_APR22.R
|
039d4b54ab848281ce7b41f2296d2ec29998b93b
|
[] |
no_license
|
kristyrobledo/CodingConundrums
|
c9eadaa313afa5e04e47ecf5e016936b9bee328b
|
7713b19a28774c75b9ae0e48b8ff7d648a40d278
|
refs/heads/master
| 2023-08-04T11:30:56.604221
| 2023-07-27T03:02:00
| 2023-07-27T03:02:00
| 237,126,082
| 4
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,749
|
r
|
Forestplots_APR22.R
|
library(tidyverse)
library(metafor)
library(forestplot)
library(glue)
library(gtsummary)
# meta analysis
glimpse(dat.bcg)
dat <-escalc(measure="RR", ai=tpos, bi=tneg, ci=cpos,di=cneg, data=dat.bcg)
dat$sei <-sqrt(dat$vi)
ma<-rma(yi, sei=sei, data=dat, method="FE")
metafor::forest(ma,
xlab = "Relative risk (95% CI)")
# plot from gtsummary
library(palmerpenguins)
glimpse(penguins)
m1<-glm(bill_length_mm~ year+body_mass_g+island+species, data=penguins) %>%
tbl_regression()
plot(m1)
# specific estimates
## example 1 - ggplot
penguins %>%
pivot_longer(cols = c(bill_length_mm, bill_depth_mm, flipper_length_mm)) %>%
filter(!is.na(value)) %>%
group_by(species, year, name) %>%
summarise(n=n(),
mn=mean(value),
sd=sd(value),
ci = qt(0.975, df=n-1)*sd/sqrt(n)) ->overtime.summ
labs<-c(bill_length_mm="Bill length",
flipper_length_mm = "Flipper length",
bill_depth_mm = "Bill depth")
overtime.summ %>%
ggplot(aes(y= mn, x=as.factor(year), colour=species)) +
geom_point(position = position_dodge(0.3)) +
geom_errorbar(aes(ymin=mn-ci, ymax=mn+ci), position = position_dodge(0.3)) +
coord_flip()+
facet_wrap(~name, scales = "free",
labeller =labeller(name=labs)) +
labs(x="", y="Mean and 95% CI") +
theme_minimal()
## example two - forestplot
overtime.summ %>%
ungroup() %>%
filter(name == "bill_depth_mm") %>%
mutate(lci = mn - ci,
uci = mn+ci,
sum = glue("N = {n}, {round(mn, 1)} (95% CI: {round(lci, 1)} - {round(uci, 1)})")) %>%
forestplot(mean = mn,
lower = lci,
upper = uci,
labeltext = c(year, species, sum),
boxsize=0.25,
graph.pos=3)
|
d39b6952a2386d71ef843a72a3e03bf1528478b4
|
c315e8d1fdcf23086841a7d9cfb48ceba0b0b357
|
/man/MeanListDim.Rd
|
43d0e9efe047490e6e453084833ca057b930238c
|
[] |
no_license
|
cran/s2dverification
|
c4d5caa8b518356b095c8768a7aadfe6c8da5a64
|
a772070454789d66328916463f91d306f1df0a3b
|
refs/heads/master
| 2022-05-22T11:13:13.975865
| 2022-04-20T07:10:06
| 2022-04-20T07:10:06
| 19,931,684
| 1
| 3
| null | null | null | null |
UTF-8
|
R
| false
| true
| 869
|
rd
|
MeanListDim.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/MeanListDim.R
\name{MeanListDim}
\alias{MeanListDim}
\title{Averages An Array Along Multiple Dimensions}
\usage{
MeanListDim(var, dims, narm = TRUE)
}
\arguments{
\item{var}{Input array.}
\item{dims}{List of dimensions to average along.}
\item{narm}{Ignore NA (TRUE) values or not (FALSE).}
}
\value{
The averaged array, with the dimensions specified in \code{dims}
removed.
}
\description{
Averages an array along a set of dimensions given by the argument dims.
}
\examples{
a <- array(rnorm(24), dim = c(2, 3, 4))
print(a)
print(Mean1Dim(a, 2))
print(MeanListDim(a, c(2, 3)))
}
\author{
History:\cr
0.1 - 2011-04 (V. Guemas) - Original code\cr
1.0 - 2013-09 (N. Manubens) - Formatting to R CRAN\cr
1.1 - 2015-03 (N. Manubens) - Improved memory usage
}
\keyword{datagen}
|
5f4a4be6bd1cf776d2a2b1b70e495dfe918b2287
|
30d5c568cdd8a4a055387f79c2d76bb2e7b46b35
|
/02.Practice_Script/ch5_script.R
|
aad6e43eed78da1e934502bfdd7d010918400008
|
[] |
no_license
|
YBCHEON95/R
|
d45a57c3a703981ed76259528abc529734eba885
|
60db3aaf5490a95fb1b21abd7344e688b2287859
|
refs/heads/master
| 2023-05-13T22:33:20.080392
| 2021-06-03T14:34:46
| 2021-06-03T14:34:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,542
|
r
|
ch5_script.R
|
### Chapter 5. 상관분석과 회귀분석
## Chapter5-1. 병아리의 성장(체중)에 영향을 미치는 인자는 무엇일까? (상관분석)
# 데이터 불러오기
w <- read.csv("ch5-1.csv", header = TRUE)
head(w)
str(w)
# w 데이터 셋에서 2~5열 데이터만 가져오기(첫열은 factor이므로)
w_n <- w[,2:5]
head(w_n)
# 위와 동일
w_n <- subset(w, select = -c(chick_nm))
head(w_n)
w_cor <- cor(w_n) # w_n 데이터 셋으로 상관분석한 결과를 w_cor변수에 넣음
w_cor # w_cor 상관분석 결과 확인
plot(w_n) # w_n 데이터 셋 산점도로 표현
# 상관분석을 보다 잘 표현할 수 있는 패키지 설치
install.packages("corrplot") # 패키지 설치
library(corrplot) # corrplot 패키지 불러오기
# 그냥 한번 실행해보기(주의할 점은 데이터셋이 아닌 상관분석결과를 넣어야함)
corrplot(w_cor) # 상관분석 결과인 w_cor을 corrplot 패키지로 실행해보기
# 원을 타원으로 표시하고, 하단에만 표시하고, 상관계수 표시
corrplot(w_cor, method = "ellipse",
type = "lower", addCoef.col = "white")
## Chapter5-2. 병아리의 무게를 예측할 수 있을까? (회귀분석)
# 단순선형 회귀분석 실시
w_lm <- lm(weight ~ egg_weight, data = w_n)
# 회귀모델 결과 확인
summary(w_lm)
# 산점도에 회귀직선을 표시해 모델이 데이터를 잘 대표하는지 확인
plot(w_n$egg_weight, w_n$weight) # 산점도 그리기
lines(w_n$egg_weight, w_lm$fitted.values, col = "blue") # 회귀직선 추가
text(x = 66, y = 132, label = 'Y = 2.3371X - 14.5475') # 회귀직선 라벨로 표시
names(w_lm) # w_lm 변수에 어떤 항목들이 있는지 확인
w_lm
w_lm$coefficients
w_lm$model
hist(w_lm$residuals, col = "skyblue", xlab = "residuals",
main = "병아리 무게 잔차 히스토그램")
# 다중회귀분석 실시
w_mlm <- lm(weight ~ egg_weight + movement + food, data = w_n)
summary(w_mlm)
# p값이 높은 movement 변수를 제외한 열만 다시 회귀분석 실시
w_mlm2 <- lm(weight ~ egg_weight + food, data = w_n)
summary(w_mlm2)
# 다중공선성(Multicollinearity) 확인을 위한 패키지 설치
install.packages("car")
library(car)
# 분산팽창요인(Variation Inflation Factor, VIF)
# 10이상이면 문제있다고 보고, 30보다 크면 심각
vif(w_mlm2)
# 잔차 히스토그램
hist(w_mlm2$residuals, col = "skyblue", xlab = "residuals",
main = "병아리 무게 잔차 히스토그램(다중 회귀)")
# (참고)후진소거법을 적용해 자동으로 실행
step_mlm <- step(w_mlm, direction = "backward")
# (참고)회귀분석 결과 그래프로 확인
plot(w_mlm2)
# 비선형 회귀분석용 두번째 데이터셋 불러오기
w2 <- read.csv("ch5-2.csv", header = TRUE)
head(w2)
str(w2)
plot(w2) # 데이터 형태 산점도로 확인
# 성장기간에 따른 병아리 무게 변화 선형 회귀분석 실시
w2_lm <- lm(weight ~ day, data = w2)
summary(w2_lm)
# 산점도 위에 회귀직선 표시
lines(w2$day, w2_lm$fitted.values, col = "blue")
# 성장기간에 따른 병아리 무게 변화 비선형 회귀분석 실시
w2_lm2 <- lm(weight ~ I(day^3) + I(day^2) + day, data = w2)
summary(w2_lm2)
plot(w2)
# 산점도 위에 회귀곡선 표시
lines(w2$day, w2_lm2$fitted.values, col = "blue")
# w2_lm2 회귀분석 결과에서 계수 확인
w2_lm2$coefficients
# 산점도 위에 수식 표시
text(25, 3000, "weight = -0.025*day^3 + 2.624*day^2 - 15.298*day + 117.014")
|
6fef3cadb164002248f2a2adc0a726ebeb566f2c
|
a1657febb6a1dd1810d104f82c3181717e37a96f
|
/R/screen.R
|
c666d53fbb8b62e0feec3991bf668dd5e45c1ecd
|
[] |
no_license
|
oslerinhealth/slamR
|
32fe855ffb9d895b48d6c2ba4476263c43194b09
|
50a2461f411b04b924c7cc60399433e89dccbe6d
|
refs/heads/master
| 2021-07-10T17:18:38.449181
| 2021-05-19T21:26:01
| 2021-05-19T21:26:03
| 245,497,581
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,927
|
r
|
screen.R
|
# screening
## screen Q using Gibbs (Alternating Gibbs)
##
## @param X N by J binary data matrix
## @param Z_ini N by K initial latent attributes
## @param Q_ini J by K initial Q matrix
## @param max_iter maximum iterations (e.g., 50)
## @param err_prob noise level
##
## @return
## \itemize{
## \item Z_est Estimated latent attributes for all people
## \item Z_candi candidate latent attribute patterns (unique)
##\item Q_arr a list of Q matrices obtained from the algorithm
##\item c J dimensional 1-slipping parameter
## \item g J dimensional guessing parameter
## }
##
## @export
#screen_Q_gibbs_large <- function(X,Z_ini,Q_ini,max_iter,err_prob){
# J <- nrow(Q_ini)
# K <- ncol(Q_ini)
#
# Q <- Q_ini
# range_gen <- (1-2*err_prob)/4 #?
# g_new <- err_prob + (-range_gen/2+range_gen*stats::runif(J))
# c_new <- 1-err_prob + (-range_gen/2+range_gen*stats::runif(J))
#
# iter <- 0
# err <- 1
# G <- 6 # 2*m_burnin, for example.
# m_burnin <- 3
#
# Z <- Z_ini
# Q_arr <- vector("list",max_iter)
#
# # the following two arrays store probabilities used to sample Q:
# while (err > 1e-3 & iter < max_iter){
# g <- g_new
# c <- c_new
# Q_old <- Q
#
# # E-step:
# Rcg <- sweep(X,2,(log_prac(c)-log_prac(g)),"*")+
# sweep(1-X,2,log_prac(1-c)-log_prac(1-g),"*")
# iter <- iter +1
#
# ZR <- 0 # ideal response matrix.
# ZZ <- 0
# num <- 0
#
# # start Gibbs sampling of attribute profiles for G times:
# for (mm in 1:G){
# for (k in 1:K){ #iterate over attributes:
# ZR_k <- get_ideal_resp(Q[,-k,drop=FALSE],
# Z[,-k,drop=FALSE])
# P <- (ZR_k*Rcg)%*%Q[,k,drop=FALSE]#?
# #p_nume <- exp(c(P)) # potential numerical overflow!
# #Z[,k] <- rvbern(p_nume/(p_nume+1))
# Z[,k] <- rvbern(exp(c(P)-
# c(apply(cbind(c(P),0),1,matrixStats::logSumExp))))
# }
# if (mm > m_burnin | iter >200){
# ZZ <- ZZ+Z
# ZR <- ZR+get_ideal_resp(Q,Z)
# num <- num+1
# }
# }
# # end of Gibbs "sampling" of latent attributes.
# ave_ZR_new <- ZR/num
# ave_Z_new <- ZZ/num
#
#
# # new step for updating Q:
# QQ <- 0
# num <- 0
# # start Gibbs sampling of Q-matrix for G time:
# for (mm in 1:G){
# for (k in 1:K){
# ZR_kq <- get_ideal_resp(Q[,-k,drop=FALSE],ave_Z_new[,-k,drop=FALSE])
# inside_kq <- matrix((1-ave_Z_new[,k]),nrow=1)%*%(ZR_kq*Rcg)
# Q[,k] <- rvbern(1/(1+exp(c(inside_kq))))
# }
#
# if (mm>m_burnin | iter >200){
# QQ <- QQ+Q
# num <- num+1
# }
# }
# # end of Gibb sampling for Q.
#
# ave_Q_new <- QQ/num
# Q <- 0+(ave_Q_new > 1/2)
# Q_arr[[iter]] <- Q
# # end of updating Q.
#
# ## new M step:
# if (iter ==1){
# ave_ZR <- ave_ZR_new
# ave_Z <- ave_Z_new
# ave_Q <- ave_Q_new
# } else{
# step <- 1/iter
# ave_ZR <- step * ave_ZR_new + (1-step) * ave_ZR
# ave_Z <- step * ave_Z_new + (1-step) * ave_Z
# ave_Q <- step * ave_Q_new + (1-step) * ave_Q
# }
#
# c_new <- colSums(X * ave_ZR) / colSums(ave_ZR)
# g_new <- colSums(X * (1-ave_ZR)) / colSums(1-ave_ZR)
# c_new <- c_new
# g_new <- g_new
#
# c_new[is.nan(c_new)] <- 1
#
# err <- (sum(abs(g-g_new)) + sum(abs(c-c_new)))/(2*J) # parameter differences
# cat("==[slamR] Iteration: ",iter,", Err: ",round(err,6),", number of Q-entries changed: ",sum(sum(abs(Q-Q_old))),". ==\n")
# # after the algorithm has converged, examine the equivalent class of Q.
# }
#
# Z_est <- 0+(ave_Z >1/2)
# Z_candi <- unique_sort_binmat(Z_est)
#
# Q_arr <- Q_arr[1:iter]
#
# make_list(Z_est,Z_candi,Q_arr,c,g)
#}
#' Estimate Q, screening latent attributes (Alternating Gibbs)
#'
#' This function implements the Alternating Direction Gibbs EM (ADG-EM)
#' algorithm in the scenario of responses observed over many taxonomies
#' (trees)
#'
#' @param X N by J2 binary data matrix - level 2
#' @param Z_ini N by K initial latent attributes
#' @param Q_ini J by K inital Q matrix
#' @param max_iter maximum iterations (e.g., 50)
#' @param err_prob noise level
#' @param must_maxiter 1 to force maxiter; default is \code{0}
#' @param D_mat J1 by J2 binary matrix to indicate children in two-level trees.
#' \code{D_mat} is the \code{J1 * J2} binary adjacency matrix specifying how the
#' trees are grown in the second layer, i.e., which second-level
#' responses are "children" of each first-level response. Default is \code{NULL}
#' @param X1 N by J1 binary data matrix - level 1; default is \code{NULL}
#' @param model "DINA" (default) or "DINO"
#'
#' @return
#' \itemize{
#' \item Z_est Estimated latent attributes for all people
#' \item Z_candi candidate latent attribute patterns (unique)
#' \item Q_arr a list of Q matrices obtained from the algorithm
#' \item c J dimensional 1-slipping parameter
#' \item g J dimensional guessing parameter
#' }
#' @importFrom matrixStats logSumExp
#' @export
adg_em <- function(X,Z_ini,Q_ini,max_iter,err_prob,must_maxiter=0,
D_mat=NULL,X1=NULL,model="DINA"){
# obtain a binary matrix specifying which subjets are linked to
# which second-level responses
# for DINO, do the following transformation:
if(model=="DINO"){
X <- 1-X
Z_ini <- 1-Z_ini
}
indmat_im <- NULL
if (!is.null(D_mat) & !is.null(X1)){indmat_im <- X1%*%D_mat}
J <- nrow(Q_ini)
K <- ncol(Q_ini)
Q <- Q_ini
range_gen <- (1-2*err_prob)/4 #?
g_new <- err_prob + (-range_gen/2+range_gen*stats::runif(J))
c_new <- 1-err_prob + (-range_gen/2+range_gen*stats::runif(J))
iter <- 0
err <- 1
G <- 6 # 2*m_burnin, for example.
m_burnin <- 3
Z <- Z_ini
Q_arr <- vector("list",max_iter)
if (must_maxiter==1){
iter_proceed <- (iter < max_iter)
} else{
iter_proceed <- (err > 1e-3 && iter < max_iter)
}
# the following two arrays store probabilities used to sample Q:
while (iter_proceed){
g <- g_new
c <- c_new
Q_old <- Q
# E-step: N by J
Rcg <- sweep(X,2,(log_prac(c)-log_prac(g)),"*")+
sweep(1-X,2,log_prac(1-c)-log_prac(1-g),"*")
iter <- iter +1
ZR <- 0 # ideal response matrix.
ZZ <- 0
num <- 0
# start Gibbs sampling of attribute profiles for G times:
for (mm in 1:G){
for (k in 1:K){ #iterate over attributes:
ZR_k <- get_ideal_resp(Q[,-k,drop=FALSE],
Z[,-k,drop=FALSE])
if(!is.null(indmat_im)) {
P <- (ZR_k*Rcg*indmat_im)%*%Q[,k,drop=FALSE]
} else{
P <- (ZR_k*Rcg)%*%Q[,k,drop=FALSE]#?
}
#p_nume <- exp(c(P)) # potential numerical overflow!
#Z[,k] <- rvbern(p_nume/(p_nume+1))
Z[,k] <- rvbern(exp(c(P)-
c(apply(cbind(c(P),0),1,matrixStats::logSumExp))))
}
if (mm > m_burnin | iter >200){
ZZ <- ZZ+Z
ZR <- ZR+get_ideal_resp(Q,Z)
num <- num+1
}
}
# end of Gibbs "sampling" of latent attributes.
ave_ZR_new <- ZR/num
ave_Z_new <- ZZ/num
# new step for updating Q:
QQ <- 0
num <- 0
# start Gibbs sampling of Q-matrix for G time:
for (mm in 1:G){
for (k in 1:K){
ZR_kq <- get_ideal_resp(Q[,-k,drop=FALSE],ave_Z_new[,-k,drop=FALSE])
if(!is.null(indmat_im)) {
inside_kq <- matrix((1-ave_Z_new[,k]),nrow=1)%*%(ZR_kq*Rcg*indmat_im)
} else{
inside_kq <- matrix((1-ave_Z_new[,k]),nrow=1)%*%(ZR_kq*Rcg)
}
Q[,k] <- rvbern(exp(0 -
c(apply(cbind(c(inside_kq),0),1,matrixStats::logSumExp))))
}
if (mm>m_burnin | iter >200){
QQ <- QQ+Q
num <- num+1
}
}
# end of Gibb sampling for Q.
ave_Q_new <- QQ/num
Q <- 0+(ave_Q_new > 1/2)
Q_arr[[iter]] <- Q
# end of updating Q.
## new M step:
if (iter ==1){
ave_ZR <- ave_ZR_new
ave_Z <- ave_Z_new
ave_Q <- ave_Q_new
} else{
step <- 1/iter
ave_ZR <- step * ave_ZR_new + (1-step) * ave_ZR
ave_Z <- step * ave_Z_new + (1-step) * ave_Z
ave_Q <- step * ave_Q_new + (1-step) * ave_Q
}
c_new <- colSums(X * ave_ZR) / colSums(ave_ZR)
g_new <- colSums(X * (1-ave_ZR)) / colSums(1-ave_ZR)
c_new <- c_new
g_new <- g_new
c_new[is.nan(c_new)] <- 1
err <- (sum(abs(g-g_new)) + sum(abs(c-c_new)))/(2*J) # parameter differences
cat("==[slamR] Iteration: ",iter,", Err: ",round(err,6),", number of Q-entries changed: ",sum(sum(abs(Q-Q_old))),". ==\n")
if (must_maxiter==1){
iter_proceed <- (iter < max_iter)
} else{
iter_proceed <- (err > 1e-3 && iter < max_iter)
}
# after the algorithm has converged, examine the equivalent class of Q.
}
Z_est <- 0+(ave_Z >1/2)
Z_candi <- unique_sort_binmat(Z_est)
Q_arr <- Q_arr[1:iter]
make_list(Z_est,Z_candi,Q_arr,c,g)
}
|
aaeed83f6c69bf6a86fe065bd41e0d4861e6672f
|
3922723f5436957ff43d3952aeafe1b055b8b508
|
/r/examples/3_histogram.R
|
ee85e7dfc1a873cf6224da08c7453de8a2d42102
|
[] |
no_license
|
sumkincpp/CodeTest
|
945d865dafc7f39655bd2d496f2241cf5467d408
|
d4e9b47aa8440a36d4fa4251dca23df5f94eb698
|
refs/heads/master
| 2023-07-16T21:06:05.347753
| 2023-07-07T11:44:12
| 2023-07-07T11:44:12
| 2,765,685
| 3
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 300
|
r
|
3_histogram.R
|
data <- c(5.28, 14.64, 37.25, 78.9, 44.92, 8.96, 19.22, 34.81, 33.89, 24.28, 6.5, 4.32, 2.77, 17.6, 33.26, 52.78, 5.98, 22.48, 20.11, 65.74, 35.73, 56.95, 30.61, 29.82);
hist(data, breaks=seq(0,80,l=6),
freq=FALSE,col="orange",main="Histogram",
xlab="x",ylab="f(x)",yaxs="i",xaxs="i")
|
5679791e6ef18b50a071abeae5e7e65c23bc1392
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/EdSurvey/examples/showCutPoints.Rd.R
|
6c843fe8cba1bef02d98090f6cacad26ca0539eb
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 332
|
r
|
showCutPoints.Rd.R
|
library(EdSurvey)
### Name: showCutPoints
### Title: Retrieve Achievement Level Cutpoints
### Aliases: showCutPoints
### ** Examples
# read in the example data (generated, not real student data)
sdf <- readNAEP(system.file("extdata/data", "M36NT2PM.dat", package="NAEPprimer"))
# show the cut points
showCutPoints(data=sdf)
|
d46c10b545890f4258abda828d49c0e5f8ac58f4
|
6ef61c4939d90300554cdd4e6b3336146bc08fe1
|
/man/congress116.Rd
|
da042eb1356331920522288f3d30d36e584fcffd
|
[
"MIT"
] |
permissive
|
r-congress/congress116
|
56b5603e965afcd384a0269e879532b7d5c7b793
|
ed02f5b7f0cb9a877f18878b48dd201fea6d5e80
|
refs/heads/master
| 2020-09-03T22:04:56.019869
| 2019-12-11T21:22:08
| 2019-12-11T21:22:08
| 219,583,888
| 10
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 753
|
rd
|
congress116.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{congress116}
\alias{congress116}
\title{IDs for members of the 116th U.S. Congress}
\format{A data frame with 544 rows and 3 variables:
\describe{
\item{bioguide}{official congressional ID}
\item{screen_name_official}{twitter handle associated with (via declaration or .gov URL in account profile) the office held by a member of congress}
\item{screen_name_personal}{twitter handle associated with the person–independent of the office}
}}
\source{
\url{https://twitter.com/}
}
\usage{
congress116
}
\description{
A dataset containing official and twitter identifiers associated with members
of the 116th U.S. Congress
}
\keyword{datasets}
|
1d82f8a5d2dce98d660e36b6d63b8a3805e8c634
|
d86268c2fdd4195208c3fd5aecab31c324af7bca
|
/omd/man/colfun.Rd
|
802f771d9bea33e698ebefbd87b3e7097dbb58b2
|
[] |
no_license
|
bio-datascience/omd
|
0e2edc61e86c135383b5d4bf29c14c95af026f5f
|
5f2f532dfe077388f7911cc7999622c4b6a3f8b8
|
refs/heads/master
| 2023-08-28T21:44:27.488641
| 2021-11-02T15:25:02
| 2021-11-02T15:25:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 299
|
rd
|
colfun.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/helpers.R
\name{colfun}
\alias{colfun}
\title{From a numeric vector between 0 and 1, make Red-Yellow-Blue colors.}
\usage{
colfun(vec)
}
\description{
From a numeric vector between 0 and 1, make Red-Yellow-Blue colors.
}
|
1e970ae88c30f87a69f1bbb499d5c08d19336e2a
|
72fc8ccca937ca048ae38ba4d1b9c724577864b9
|
/man/buildCondition.Rd
|
424df504657639812eda5bd19fb6339623482e98
|
[
"MIT"
] |
permissive
|
metadevpro/traitbaser
|
42fdd9c8aa7c590cf7e9eab81618e1d59ac5555f
|
c480bdd7ae088b3e090cd2db8340dd6abe5a64bc
|
refs/heads/master
| 2021-01-12T05:31:20.796821
| 2020-04-20T15:02:56
| 2020-04-20T15:02:56
| 77,943,876
| 3
| 2
|
MIT
| 2020-04-16T14:33:56
| 2017-01-03T18:50:51
|
R
|
UTF-8
|
R
| false
| true
| 1,063
|
rd
|
buildCondition.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/buildCondition.R
\name{buildCondition}
\alias{buildCondition}
\title{Builds a condition}
\usage{
buildCondition(variable, operator, value)
}
\arguments{
\item{variable}{The variable or column name to filter.}
\item{operator}{An operator for the filter. Currenty (== equals) an (!= not equals) are supported. (More operators to be implemented in the future).}
\item{value}{Provides a value to compare with.}
}
\value{
Returns the filtering clause. A list of clauses can be composed
and passed to \code{query()} or \code{count()} functions via the \code{conditions}
parameter to build complex queries.
}
\description{
Returns a condition clause to be used in queries.
}
\examples{
\donttest{
cnx <- connect('http://www.traitbase.info')
off <- resource(cnx, 'species')
count(off)
count(off, conditions=buildCondition('species', '!=', 'Bombus') )
count(off, conditions=buildCondition('species', '==', 'Bombus') )
query(off, conditions=buildCondition('species', '==', 'Bombus') )
}
}
|
943f56b4cf230e082b9513a79a666ef7a2de3222
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/biogeo/examples/geo2envpca.Rd.R
|
7620ee7937e7976ff1304759d0526b28ce02bdfc
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 385
|
r
|
geo2envpca.Rd.R
|
library(biogeo)
### Name: geo2envpca
### Title: Interactive plot to explore points in geographical and
### environmental space
### Aliases: geo2envpca
### ** Examples
## Not run:
##D plotsetup(6,6)
##D ed<-geo2envpca(edat,"Species U",group1="Species",group2="",
##D world,scaling=1,vars=c("bio1","bio12","bio5","bio6","bio14"),
##D showrecord="1981",ext="p")
## End(Not run)
|
55e62d2a470425bd8bebd31fb767623a15689a61
|
52f0109765f39fb570a5e944fde6822db845769c
|
/Session10.R
|
4c019b39ff837dab7e1e8700a3cb50fa6b1c96d8
|
[] |
no_license
|
cart3ch/bsf_iimtrichy_18-20
|
c873a4aa1f746c9ade1518370f8ed250940a3111
|
97c7041357aa647180ae64b9d35af4fe77f353c7
|
refs/heads/master
| 2020-06-02T20:48:55.653171
| 2019-08-15T17:14:24
| 2019-08-15T17:14:24
| 191,305,275
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 161
|
r
|
Session10.R
|
## In-class session
#Time-series analysis
library(readxl)
tsdata<- read_excel("rdata/Tea Production_Classical_Decomposition.xlsx")
#create a time series object
|
e1d09c9f228429907c676e6cee1c74859f59d9fd
|
9aafde089eb3d8bba05aec912e61fbd9fb84bd49
|
/codeml_files/newick_trees_processed/5616_0/rinput.R
|
e63c4189665cdbe133c193de50928300e1172c56
|
[] |
no_license
|
DaniBoo/cyanobacteria_project
|
6a816bb0ccf285842b61bfd3612c176f5877a1fb
|
be08ff723284b0c38f9c758d3e250c664bbfbf3b
|
refs/heads/master
| 2021-01-25T05:28:00.686474
| 2013-03-23T15:09:39
| 2013-03-23T15:09:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 135
|
r
|
rinput.R
|
library(ape)
testtree <- read.tree("5616_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="5616_0_unrooted.txt")
|
d6bbe7737493a28893eb9b36da07605af49ef8a5
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/ionr/R/ind_excl_step.R
|
c4a0ef1c0c51f0ec58cba32d7722cb5f9b82b0c8
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,006
|
r
|
ind_excl_step.R
|
#' One step in indicator exclusion procedure
#'
#' @description See \code{\link{ind_excl}} for details.
#' @inheritParams ind_excl
#' @param exclude Exclude an item excluded at previous step, e.g., as decided by \code{\link{ind_excl_inc}}
#' @param round Allows rounding of values in returned matrix.
#' @return Provides the results of a single step in indicator exclusion procedure. See example for
#' details
#' @encoding utf-8
#'
#' @examples
#' ## Create a scale-outcome set that violates ION. Only 2 indicators out of 8 relate to
#' ## the outcome, the others just relate to the 2 indicators
#' set.seed(466)
#' a<-scale_sim(n=2500, to_n=2, tn_n=6)
#' res=ind_excl_step(a[[1]],a[[2]])
#' print(res)
#'
#' # note that the p-values for upper items (7 & 8 ) are much smaller than for the rest
#'
#' #row number indicator number
#' #r.test.t t value of the r.test.
#' #t.test.p p value of the r.test.
#' #cor_excl correlation between outcome and sum-score when an item is excluded.
#' #cor_all correlation between outcome and sum-score when all items are included
#' # (i.e., full scale).
#' #cor.excl_all correlation between two sum-scores.
#' @export
ind_excl_step <- function(indicators, outcome, indicatornames = 1:ncol(indicators), exclude = vector(), coruse = "everything", round = F) {
# drop indicators from scale, test them with outcome. Commented version supports multiple drop.
# However, current algorithm makes no use of multiple drop.
# foo.outcome <- function(i, o, drop = 1, coruse =
# 'everything') { combs <- combn(ncol(i), drop) res <- numeric(length = ncol(combs)) #better than res = numeric(0)
# for (j in 1:ncol(combs)) { res[j] <- cor(o, rowMeans(i[, -c(combs[, j,drop = FALSE])]), use = coruse) }
# return(res) }
# new version, drops just one
foo_outcome_one <- function(i, o, coruse = "everything") {
# assign temp variables and preallocate for speed
nindic = ncol(i)
res <- numeric(length = nindic)
for (j in 1:nindic) {
res[j] <- cor(o, rowMeans(i[, -j, drop = FALSE], na.rm = T), use = coruse)
}
return(res)
}
# test if some indicators are excluded from the test. if yes, then remove these indicators
if (length(exclude) != 0) {
# get indicators that are excluded
excl_indicators <- grep(paste0(exclude, collapse = "|"), indicatornames) #http://stackoverflow.com/questions/7597559/grep-in-r-with-a-list-of-patterns
# exclude them from current setup
indicators <- indicators[, -excl_indicators] # before 160108: indicators=indicators[-excl_indicators]
indicatornames <- indicatornames[-excl_indicators]
}
# calculate correlations with outcome, when each indicator of the scale is excluded
dat <- foo_outcome_one(i = indicators, o = outcome, coruse = coruse)
# correlation between all indicators and outcome
cor_all <- cor(rowMeans(indicators,na.rm=T), outcome, use = coruse)
# preallocate
stats <- matrix(nrow = ncol(indicators), ncol = 5)
# test the correlation difference between outcome and sumscore that has all indicators, vs. 1 indicator excluded
for (j in 1:length(dat)) {
cor_excl <- dat[j]
cor_excl_all <- cor(rowMeans(indicators,na.rm=F), rowMeans(indicators[, -j,drop=FALSE],na.rm=T), use = coruse)
res <- psych::r.test(n = nrow(indicators), r12 = cor_excl, r13 = cor_all, r23 = cor_excl_all)
numbers <- (c(res$t, res$p, cor_excl, cor_all, cor_excl_all))
stats[j, ] <- numbers
}
rownames(stats) <- indicatornames
colnames(stats) <- c("r.test.t", "t.test.p", "cor_excl", "cor_all", "cor_excl_all")
if (round == T) {
stats[, 2] <- round(stats[, 2], 3)
stats[, c(1, 3:5)] <- round(stats[, c(1, 3:5)], 2)
}
stats <- stats[order(abs(stats[, 1]), decreasing = T), ]
return(stats)
}
|
e5e84eff12d3aaac2dabe71f142f1489cc9c27d2
|
d75a1e1e95ae70ce048a0c26fb0f9c283fd5dd70
|
/man/EAGLE_1.Rd
|
50f8071566467b59fa60101610abc80c4d11ef1d
|
[] |
no_license
|
Owain-S/kmdata
|
49d65b279e7e84e170550f7d1fbdc8573f28784c
|
22569373a88f64ef480ea895c8ef7b7b5ced260e
|
refs/heads/master
| 2023-05-25T22:58:06.758825
| 2021-06-01T19:36:49
| 2021-06-01T19:36:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 947
|
rd
|
EAGLE_1.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{EAGLE_1}
\alias{EAGLE_1}
\title{EAGLE, figure 1}
\format{
A data frame of 368 observations and 3 variables:
\tabular{lll}{
\tab \code{time} \tab event time (in months) \cr
\tab \code{event} \tab PFS event indicator (\code{0}: no event, \code{1}: event) \cr
\tab \code{arm} \tab treatment arms (bev10, bev5) \cr
}
}
\source{
Iwamoto S, Takahashi T, Tamagawa H, et al. FOLFIRI plus bevacizumab
as second-line therapy in patients with metastatic colorectal cancer
after first-line bevacizumab plus oxaliplatin-based therapy: the
randomized phase III EAGLE study. Ann Oncol 2015; 26: 1427–33.
}
\usage{
EAGLE_1
}
\description{
Kaplan-Meier digitized data from EAGLE, figure 1 (PMID 25908603). A reported sample size of 369 for a primary endpoint of PFS in colorectal cancer.
}
\examples{
summary(EAGLE_1)
kmplot(EAGLE_1)
}
\keyword{datasets}
|
de6c61334bcc9659ed23eecb6dfb3db64220ff0e
|
dcf352b37c3b115ec5fab27f1f8b26eb74a06da5
|
/R/gps_utilities.R
|
690ab85dbd16f741455f80ea9da0e2a2e9072590
|
[] |
no_license
|
CraigMohn/bikeCadHr
|
6a2242f9f83d9dea7df8819bbb597158e2b0b467
|
699b73db74563d32a3809cf67cbbef7bc409e682
|
refs/heads/master
| 2021-06-23T20:23:17.317160
| 2020-11-29T22:56:17
| 2020-11-29T22:56:17
| 93,075,658
| 0
| 0
| null | 2017-09-08T16:19:09
| 2017-06-01T16:07:01
|
R
|
UTF-8
|
R
| false
| false
| 1,157
|
r
|
gps_utilities.R
|
## note that lag_one/lead_one pad the new entry with the first/last value,
## which is different than lag_n/lead_n(,1)
## this gives flexibility with differences, but be careful!
lag_one <- function(vec) {
return(c(vec[1],vec[-length(vec)]))
}
lead_one <- function(vec) {
return(c(vec[-1],vec[length(vec)]))
}
lag_n <- function(vec,n) {
if (n < length(vec)) {
return(c(rep(NA,n),vec[1:(length(vec)-n)]))
}
else {
return(vec<-NA)
}
}
lead_n <- function(vec,n) {
if (n < length(vec)) {
return(c(vec[-n:-1],rep(NA,n)))
}
else {
return(vec<-NA)
}
}
dateTimeStr <- function(intDate,intTime) {
return(paste0(stringr::str_pad(intDate,8,pad="0"),
stringr::str_pad(intTime,6,pad="0")))
}
# this was lifted from stack overflow - credit author
find_peaks <- function (x, m = 3){
shape <- diff(sign(diff(x, na.pad = FALSE)))
pks <- sapply(which(shape < 0), FUN = function(i){
z <- i - m + 1
z <- ifelse(z > 0, z, 1)
w <- i + m + 1
w <- ifelse(w < length(x), w, length(x))
if(all(x[c(z:i,(i+2):w)] <= x[i+1])) return(i+1) else return(numeric(0))
})
pks <- unlist(pks)
pks
}
|
7cedca92d1733933093d4c1fdcdad0c0c9207575
|
ad46eeffcbee1c270665dacc44f2e057bcd83fa9
|
/man/BER.Rd
|
91203e8d23091d4db834690a429f72c18cf350da
|
[] |
no_license
|
gbonte/D2C
|
ba61ea515785cf881e74cb39b7c905d778edcde3
|
ae51f3f24672fd21525d0da69547d46c010941a6
|
refs/heads/master
| 2022-09-26T05:15:43.442397
| 2022-09-20T08:12:37
| 2022-09-20T08:12:37
| 66,285,031
| 4
| 2
| null | null | null | null |
UTF-8
|
R
| false
| true
| 507
|
rd
|
BER.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/util.R
\name{BER}
\alias{BER}
\title{Balanced Error Rate}
\usage{
BER(Ytrue, Yhat)
}
\arguments{
\item{Ytrue}{: binary numeric vector (made of 0 or 1) of real classes}
\item{Yhat}{: binary numeric vector (made of 0 or 1) of predicted classes}
}
\value{
Balanced Error Rate \eqn{0 \le } BER \eqn{ \le 1}
}
\description{
The balanced error rate is the average of the errors on each class: BER = 0.5*(FP/(TN+FP) + FN/(FN+TP)).
}
|
9013d399475772098d99fa4e116c55cefd8e2bc4
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/neat/vignettes/neat.R
|
5a815ad417a6def7ad42e724e223fed16ec78668
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,517
|
r
|
neat.R
|
## ---- eval=FALSE, echo=TRUE, results='asis'------------------------------
## install.packages('neat')
## ---- eval=TRUE, echo=TRUE, results='asis'-------------------------------
library('neat')
## ---- eval=TRUE, echo=TRUE, results='markup'-----------------------------
data(yeast) # load the data
ls(yeast) # display the content of the list
## ---- eval=TRUE, echo=TRUE, results='markup'-----------------------------
induced_genes = list('ESR 2' = yeast$esr2) # set of differentially expressed genes
#(ESR 2 is the set of induced ESR genes)
functional_sets = yeast$goslimproc[c(72,75)] # two functional gene sets of interest:
#response to heat and to starvation
## ---- eval=TRUE, echo=TRUE, results='markup'-----------------------------
test = neat(alist = induced_genes, blist = functional_sets, network = yeast$yeastnet,
nettype = 'undirected', nodes = yeast$ynetgenes, alpha = 0.01)
## ---- eval=TRUE, echo=FALSE, results='markup'----------------------------
test$expected_nab = round(test$expected_nab,1)
test$pvalue = round(test$pvalue,4)
## ---- eval=TRUE, echo=TRUE, results='markup'-----------------------------
print(test)
## ---- eval=FALSE, echo=TRUE, results='markup'----------------------------
## neat(alist, blist, network, nettype, nodes, alpha = NULL,
## anames = NULL, bnames = NULL)
## ---- eval=TRUE, echo=TRUE, results='markup'-----------------------------
A = matrix(0, nrow=7, ncol=7)
labels = letters[1:7]
rownames(A) = labels; colnames(A) = labels
A[1,c(2,3)]=1; A[2,c(5,7)]=1;A[3,c(1,4)]=1;A[4,c(2,5,7)]=1;A[6,c(2,5)]=1;A[7,4]=1
print(A)
## ---- eval=TRUE, echo=TRUE, results='markup'-----------------------------
set1 = c('a','e')
set2 = c('c','g')
set3 = c('d','f')
## ---- eval=TRUE, echo=TRUE, results='markup'-----------------------------
alist = list('set 1' = set1, 'set 2' = set2)
blist = list('set 3' = set3)
## ---- eval=TRUE, echo=TRUE, results='markup'-----------------------------
library(Matrix)
as(A, 'sparseMatrix')
## ---- eval=TRUE, echo=FALSE, results='markup'----------------------------
networkmatrix(A, labels, 'directed')
## ---- eval=TRUE, echo=TRUE, results='markup'-----------------------------
test1 = neat(alist = alist, blist = blist, network = A,
nettype = 'directed', nodes = labels)
print(test1)
## ---- eval=TRUE, echo=TRUE, results='markup'-----------------------------
test2 = neat(alist = alist, blist = blist, network = A,
nettype = 'directed', nodes = labels, alpha = 0.05)
print(test2)
|
35a0bd1c022202c7aed2db94b02312701bda83cc
|
39f88826e318b0b351667806602c6957d5ae01d0
|
/R/trading_view.R
|
dfb1f338c894f56b3494cb6e28efb88b06becf1b
|
[
"MIT"
] |
permissive
|
jngod2011/fundManageR
|
9b79169ba3b8157d8ae9ade3bd066c2b5f82c255
|
cea3e117a217bb7c770670ddd440822094a728cc
|
refs/heads/master
| 2020-03-28T07:22:15.138769
| 2018-07-29T16:26:53
| 2018-07-29T16:26:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 28,812
|
r
|
trading_view.R
|
get_dictionary_tradeview_types <- function() {
data_frame(
type = c(
"All",
"stock",
"futures",
"forex",
"cfd",
"cryptocurrency",
"Index",
"Economy",
"quandl"
),
slugType = c(
"",
"stocks",
"futures",
"forex",
"cfd",
"bitcoin",
"index",
"economic",
"quandl"
)
)
}
# dictionaries ------------------------------------------------------------
get_tradeingview_chart_items <-
function() {
json_data <-
"https://pine-facade.tradingview.com/pine-facade/list?filter=standard" %>%
jsonlite::fromJSON(simplifyDataFrame = TRUE)
data <-
json_data[1:6] %>%
tibble::as_data_frame()
data
}
# events ------------------------------------------------------------------
parse_result_number <-
function(x) {
x %>% stringi::stri_trans_general("Latin-ASCII") %>% readr::parse_number()
}
parse_result <-
function(x) {
if (x %>% is.na()) {
return(NA)
}
result <-
x %>% parse_result_number()
is_pct <-
x %>% str_detect('%')
if (is_pct) {
result <-
result / 100
}
is_billions <- x %>% str_detect("b")
if (is_billions) {
result <-
result * 1000000000
}
is_millions <- x %>% str_detect("m")
if (is_millions) {
result <-
result * 1000000
}
is_thousand <- x %>% str_detect("k")
if (is_thousand) {
result <-
result * 1000
}
result <-
result %>% formattable::comma(digits = 3)
result
}
#' Get Market Events
#'
#' Returns a list of global financial
#' and economics events
#'
#' @param return_message
#'
#' @return
#' @export
#' @import dplyr jsonlite purrr anytime glue stringr
#' @examples
#' get_data_market_events(retur_message = TRUE)
get_data_market_events <-
function(return_message = TRUE) {
data <-
"https://chartevents.tradingview.com/chartevents/" %>%
jsonlite::fromJSON(simplifyDataFrame = TRUE) %>%
as_data_frame() %>%
dplyr::select(-c(11, 2)) %>%
purrr::set_names(
c(
'idEvent',
'resultActual',
'resultPrevious',
'resultForecast',
'descriptionEvent',
'nameEvent',
'rankImportance',
'currencyEvent',
'datetimeEvent'
)
) %>%
mutate(
datetimeEvent = anytime::anytime(datetimeEvent),
dateEvent = datetimeEvent %>% anytime::anydate()
) %>%
dplyr::select(
dateEvent,
datetimeEvent,
currencyEvent,
nameEvent,
rankImportance,
descriptionEvent,
resultForecast,
resultActual,
resultPrevious,
everything()
) %>%
arrange(dateEvent, desc(rankImportance)) %>%
dplyr::select(-idEvent)
data <-
data %>%
mutate_if(is.character,
funs(ifelse(. == '', NA, .)))
data %>%
mutate(resultActual = resultActual)
data <-
data %>%
mutate_at(c('resultActual', 'resultPrevious', 'resultForecast'),
funs(. %>% map_dbl(function(x) {
parse_result(x = x)
}))) %>%
suppressWarnings()
if (return_message) {
glue::glue(
"Returned {nrow(data)} events from {min(data$dateEvent)} to {max(data$dateEvent)}"
) %>%
message()
}
gc()
data
}
# search ------------------------------------------------------------------
"curl 'https://data.tradingview.com/search/?=FOREST&exchange=&type=&hl=true&lang=en&domain=production' -H 'Origin: https://www.tradingview.com' -H 'Accept-Encoding: gzip, deflate, br' -H 'Accept-Language: en-US,en;q=0.8' -H 'User-Agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.24 Safari/537.36' -H 'Accept: application/json, text/javascript, */*; q=0.01' -H 'Referer: https://www.tradingview.com/chart/pKbLgoMZ/' -H 'Connection: keep-alive' -H 'DNT: 1' --compressed"
# popular -----------------------------------------------------------------
id_exchanges <- c(
'US',
'AMEX',
'ASX',
'BCBA',
'BIST',
'BME',
'BMFBOVESPA',
'BMV',
'BSE',
'EURONEXT',
'FWB',
'HKEX',
'LSE',
'LSIN',
'MOEX',
'NAG',
'NASDAQ',
'NSE',
'NYSE',
'NZX',
'OTC',
'SGX',
'SIX',
'TSE',
'TSX',
'TSXV',
'XETR'
)
hot_list_slugs <- c(
'volume_gainers',
'percent_change_loosers',
'percent_change_gainers',
'percent_range_gainers',
'percent_range_loosers',
'gap_gainers',
'gap_loosers',
'percent_gap_gainers',
'percent_gap_loosers'
)
# https://hotlist.tradingview.com/hotlist/US/volume_gainers/
# https://hotlist.tradingview.com/hotlist/US/percent_change_loosers/
# https://hotlist.tradingview.com/hotlist/US/percent_change_gainers/
# https://hotlist.tradingview.com/hotlist/US/percent_range_gainers/
# https://hotlist.tradingview.com/hotlist/US/gap_gainers/
# company -----------------------------------------------------------------
# https://esd-feed.tradingview.com/estimates?symbol=xbit&exchange=nasdaq
# https://news-headlines.tradingview.com/headlines/yahoo/symbol/FB/?locale=en -- news
generate_slug <-
function(value = "nyse", sep_pre = "&",parameter = "exchange", symbol = "=", sep_post = "") {
if (value %>% purrr::is_null()) {
return("")
}
glue::glue("{sep_pre}{parameter}{symbol}{value}{sep_post}") %>%
as.character()
}
generate_ticker_estimates_url <-
function(ticker = "cim", exchange = NULL) {
slug_exchange <-
generate_slug(value = exchange)
base <- "https://esd-feed.tradingview.com/estimates"
glue::glue("{base}?symbol={ticker}{slug_exchange}") %>%
as.character()
}
# search ------------------------------------------------------------------
generate_url_reference <-
function() {
user_agents <-
c(
"Mozilla/5.0 (Linux; U; en-US) AppleWebKit/528.5+ (KHTML, like Gecko, Safari/528.5+) Version/4.0 Kindle/3.0 (screen 600x800; rotate)",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.135 Safari/537.36 Edge/12.246",
"Mozilla/5.0 (X11; CrOS x86_64 8172.45.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.64 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_2) AppleWebKit/601.3.9 (KHTML, like Gecko) Version/9.0.2 Safari/601.3.9",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.111 Safari/537.36"
)
user_agent <-
user_agents[!user_agents %>% str_detect("bot|slurp")] %>%
sample(1)
tl_domain <-
c('.com', '.gov', '.org', '.mil', '.co') %>%
sample(1)
word_length <-
8:15
words <-
word_length %>% sample(1)
domain_slug <-
1:words %>%
map_chr(function(x) {
sample(letters, 1)
}) %>%
paste0(collapse = '')
url <-
list('http://', domain_slug, tl_domain) %>%
purrr::reduce(paste0)
df <-
data_frame(urlReferer = url,
userAgent = user_agent)
return(df)
}
get_tradeview_term <-
function(term = "FB",
exchange = NULL,
type = NULL) {
url <- 'https://data.tradingview.com/search/'
df_ref <- generate_url_reference()
headers <-
list(
'Origin' = 'https://www.tradingview.com',
'Accept-Encoding' = 'gzip, deflate, br',
'Accept-Language' = 'en-US,en;q=0.9',
'User-Agent' = df_ref$userAgent,
'Accept' = 'application/json, text/javascript, */*; q=0.01',
'Referer' = df_ref$urlReferer,
'Connection' = 'close',
'DNT' = '1'
) %>%
dict()
params <-
tuple(
tuple('text', term),
tuple('exchange', ''),
tuple('type', ''),
tuple('hl', 'true'),
tuple('lang', 'eng'),
tuple('domain', 'production')
)
r <- import("requests")
resp <- r$get(url = url,
headers = headers,
params = params)
data <-
resp$text %>%
jsonlite::fromJSON(simplifyDataFrame = TRUE)
data <-
data %>%
mutate_at(c("symbol", "description"),
funs(. %>% str_replace_all("<em>|</em>", ""))) %>%
tibble::as_data_frame() %>%
mutate(termSearch = term) %>%
dplyr::select(termSearch, everything()) %>%
mutate_if(is.character,
str_trim)
data
}
# scan --------------------------------------------------------------------
# {https://scanner.tradingview.com/uk/scan}
parse_region_security_url <-
function(url = "https://scanner.tradingview.com/america/scan",
return_message = TRUE) {
idRegion <-
url %>% str_replace_all("https://scanner.tradingview.com/|/scan", '')
data <-
url %>%
jsonlite::fromJSON(flatten = TRUE)
data <-
data$data %>%
select(1) %>%
as_data_frame() %>%
dplyr::rename(idExchangeTicker = s) %>%
tidyr::separate(
idExchangeTicker,
into = c('idExchange',
'idTickerClass'),
sep = '\\:'
) %>%
tidyr::separate(idTickerClass,
into = c('idTicker', 'classSecurity')) %>%
mutate_all(str_trim) %>%
mutate(
regionSecurities = idRegion,
urlJSON = url,
classSecurity = if_else(classSecurity %>% is.na(), 'COMMON', classSecurity)
) %>%
suppressWarnings() %>%
dplyr::select(regionSecurities, everything()) %>%
arrange(idTicker)
if (return_message) {
glue::glue("Acquired {nrow(data)} listed securities in {idRegion %>% str_to_title()}") %>% message()
}
data
}
parse_regions_security_urls <-
function(urls,
return_message = TRUE) {
df <-
data_frame()
success <- function(res) {
parse_region_security_url_safe <-
purrr::possibly(parse_region_security_url, data_frame())
page_url <- res$url
data <-
page_url %>%
parse_region_security_url_safe(return_message = return_message)
df <<-
df %>%
bind_rows(data)
}
failure <- function(msg) {
data_frame()
}
urls %>%
walk(function(x) {
curl_fetch_multi(url = x, success, failure)
})
multi_run()
df
}
#' Tradingview regions traded securities
#'
#' Acquires ticker symbols for specified regions
#'
#' @param regions vector of regions \itemize{
#' \item america
#' \item uk
#' \item australia
#' \item brazil
#' \item canada
#' \item euronext
#' \item germany
#' \item hongkong
#' \item india
#' \item japan
#' \item mexico
#' \item newzealand
#' \item russia
#' \item singapore
#' \item spain
#' \item switzerland
#' \item taiwan
#' \item turkey
#' }
#' @param return_message if \code{TRUE} return message
#' @param nest_data
#' @import jsonlite glue dplyr purrr tidyr stringr
#' @return
#' @export
#'
#' @examples
get_data_tradingview_regions_tickers <-
function(regions = c(
'america',
'uk',
'australia',
'brazil',
'canada',
'euronext',
'germany',
'hongkong',
'india',
'japan',
'mexico',
'newzealand',
'russia',
'singapore',
'spain',
'switzerland',
'taiwan',
'turkey'
),
return_message = TRUE,
nest_data = FALSE) {
regions <-
regions %>% str_to_lower() %>% str_replace_all('\\ ', '')
urls <-
glue::glue("https://scanner.tradingview.com/{regions}/scan")
all_data <-
urls %>%
parse_regions_security_urls(return_message = return_message)
if (nest_data) {
all_data <-
all_data %>%
nest(-c(urlJSON, regionSecurities),
.key = 'dataTickers')
}
all_data
}
parse_metric_dictionary_url <-
function(url = "https://scanner.tradingview.com/america/metainfo",
return_message = TRUE) {
idRegion <-
url %>% str_replace_all("https://scanner.tradingview.com/|/metainfo", '')
data <-
url %>%
jsonlite::fromJSON(flatten = TRUE)
data <-
data$fields %>%
as_data_frame() %>%
purrr::set_names(c('nameTW',
'typeField',
'fieldMembers')) %>%
mutate(regionSecurities = idRegion,
urlJSON = url) %>%
dplyr::select(regionSecurities, everything()) %>%
separate(nameTW,
into = c('nameTW', 'baseTimeframe'),
sep = '\\|') %>%
suppressWarnings()
df_fields <-
1:length(data$fieldMembers) %>%
map_df(function(x) {
field <-
data$fieldMembers[[x]]
if (field %>% is_null()) {
return(data_frame(idRow = x, dataField = NA))
}
if (field %>% class() == 'data.frame') {
return(data_frame(idRow = x, dataField = list(field)))
}
data_frame(
idRow = x,
itemField = field %>% str_c(collapse = ', '),
dataField = NA
)
})
if (return_message) {
glue::glue("Acquired {nrow(data)} searchable metrics for {idRegion} securities") %>% message()
}
data %>%
mutate(idRow = 1:n()) %>%
left_join(df_fields %>%
mutate(countFieldRows = dataField %>% map_dbl(length))) %>%
dplyr::select(-fieldMembers) %>%
dplyr::select(-idRow) %>%
suppressWarnings() %>%
suppressMessages()
}
parse_metric_dictionaries_url <-
function(urls,
return_message = TRUE) {
df <-
data_frame()
success <- function(res) {
parse_metric_dictionary_url_safe <-
purrr::possibly(parse_metric_dictionary_url, data_frame())
page_url <- res$url
data <-
page_url %>%
parse_metric_dictionary_url_safe(return_message = return_message)
df <<-
df %>%
bind_rows(data)
}
failure <- function(msg) {
data_frame()
}
urls %>%
walk(function(x) {
curl_fetch_multi(url = x, success, failure)
})
multi_run()
df
}
#' Tradingview searchable metrics by region
#'
#' Get searchable metrics by region
#' @param regions vector of regions \itemize{
#' \item america
#' \item uk
#' \item australia
#' \item brazil
#' \item canada
#' \item euronext
#' \item germany
#' \item hongkong
#' \item india
#' \item japan
#' \item mexico
#' \item newzealand
#' \item russia
#' \item singapore
#' \item spain
#' \item switzerland
#' \item taiwan
#' \item turkey
#' }
#' @param return_message if \code{TRUE} return message
#' @param nest_data
#' @import jsonlite glue dplyr purrr tidyr stringr
#' @return
#' @export
#'
#' @examples
get_data_regions_tradingview_metrics <-
function(regions = c(
'america',
'uk',
'australia',
'brazil',
'canada',
'euronext',
'germany',
'hongkong',
'india',
'japan',
'mexico',
'newzealand',
'russia',
'singapore',
'spain',
'switzerland',
'taiwan',
'turkey'
),
return_message = TRUE,
nest_data = FALSE) {
regions <-
regions %>% str_to_lower() %>% str_replace_all('\\ ', '')
urls <-
glue::glue("https://scanner.tradingview.com/{regions}/metainfo")
all_data <-
urls %>%
parse_metric_dictionaries_url(return_message = return_message)
if (nest_data) {
all_data <-
all_data %>%
nest(-c(urlJSON, regionSecurities),
.key = 'dataMetrics')
}
all_data
}
# metric_query ------------------------------------------------------------
#' Generate tradeview metric query
#'
#' @param filter
#' @param symbols
#' @param metrics
#' @param sort
#' @param options
#' @param range
#'
#' @return
#' @export
#' @import reticulate magrittr glue dplyr
#' @examples
generate_trade_view_metric_query <-
function(filter = data_frame(left = 'market_cap_basic',
operation = 'nempty'),
symbols = list(query = list(types = c('stock', 'fund', 'dr'))),
metrics =c("change", "change_abs", "close", "description", "earnings_per_share_basic_ttm",
"market_cap_basic", "name", "number_of_employees", "price_earnings_ttm",
"sector", "volume"),
sort = list(sortBy = 'market_cap_basic',
sortOrder = 'desc'),
options = list(lang = 'eng'),
range = c(0, 1500000)) {
metrics <-
c('name', metrics) %>%
unique()
metrics <-
metrics[!metrics %in% c('component', 'index', 'component')]
data_query <-
list(
filter = filter,
symbols = symbols,
columns = metrics,
sory = sort,
options = options,
range = range
) %>%
toJSON(auto_unbox = T)
data_query
}
parse_tradeview_metric_url <-
function(url = 'https://scanner.tradingview.com/america/scan',
data_query = '{"filter":[{"left":"market_cap_basic","operation":"nempty"}],"symbols":{"query":{"types":["stock","fund","dr"]}},"columns":["name","Recommend.All","sector","close","change_abs","change","volume","market_cap_basic","price_earnings_ttm","earnings_per_share_basic_ttm","number_of_employees","description","name","Recommend.All"],"sort":{"sortBy":"market_cap_basic","sortOrder":"desc"},"options":{"lang":"en"},"range":[0,1500000000]}',
return_message = TRUE) {
idRegion <-
url %>% str_replace_all("https://scanner.tradingview.com/|/scan", "")
requests <-
reticulate::import("requests")
json <-
requests$post(url = url, data = data_query)
json_data <-
json$content %>%
fromJSON(simplifyDataFrame = TRUE, flatten = TRUE)
json$close()
data <-
json_data$data %>% as_data_frame() %>% unnest() %>%
purrr::set_names(c('idExchangeTicker', 'value')) %>%
group_by(idExchangeTicker) %>%
mutate(countItem = 1:n()) %>%
ungroup() %>%
tidyr::separate(
idExchangeTicker,
into = c('idExchange',
'idTickerClass'),
sep = '\\:'
) %>%
tidyr::separate(idTickerClass,
into = c('idTicker', 'classSecurity')) %>%
mutate_all(str_trim) %>%
mutate(idRegion,
classSecurity = if_else(classSecurity %>% is.na(), 'COMMON', classSecurity)) %>%
suppressWarnings() %>%
dplyr::select(idRegion, everything())
if (return_message) {
tickers <-
data$idTicker %>% unique() %>% length() %>% formattable::comma(digits = 0)
glue::glue(
"Acquired {nrow(data) %>% formattable::comma(digits = 0)} listed metrics for {tickers} securities in {idRegion %>% str_to_title()}"
) %>% message()
}
gc()
data
}
#' Tradeview region metrics
#'
#' Get data for trade view query
#'
#' @param regions vector of regions \itemize{
#' \item america
#' \item uk
#' \item australia
#' \item brazil
#' \item canada
#' \item euronext
#' \item germany
#' \item hongkong
#' \item india
#' \item japan
#' \item mexico
#' \item newzealand
#' \item russia
#' \item singapore
#' \item spain
#' \item switzerland
#' \item taiwan
#' \item turkey
#' }
#' @param query list of query parameters \itemize{
#' \item filter - list of query parameters
#' \item symbols - list of types
#' \item metrics - vector of parameters see \code{get_data_tradingview_regions_tickers} for options
#' \item sort - sort paramters
#' \item options- sort options
#' }
#' @param return_message
#'
#' @return
#' @export
#' @import reticulate dplyr purrr stringr glue
#'
#' @examples
get_tradeview_regions_metrics <-
function(regions = c('canada', 'america'),
query = list(
filter = data_frame(left = 'market_cap_basic',
operation = 'nempty'),
symbols = list(query = list(types = c(
'stock', 'fund', 'dr'
))),
metrics = c("description","subtype", "beta_5_year", "earnings_release_date", "earnings_per_share_forecast_next_fq",
"operating_margin", "return_on_equity", "current_ratio", "debt_to_assets",
"price_revenue_ttm", "amount_recent", "market_cap_basic", "ebitda",
"fundamental_currency_code", "total_assets", "current_session",
"earnings_per_share_fq", "earnings_per_share_forecast_fq", "earnings_release_next_time",
"cash_ratio", "yield_upcoming", "sector", "basic_eps_net_income",
"price_book_ratio", "quick_ratio", "net_debt", "total_shares_outstanding_fundamental",
"enterprise_value_fq", "beta_3_year", "total_capital", "earnings_per_share_diluted_ttm",
"last_annual_eps", "revenue_fq", "ex_dividend_date_recent", "price_earnings_ttm",
"debt_to_equity", "pre_tax_margin", "debt_to_equity_fq", "number_of_employees",
"total_current_assets", "last_annual_revenue", "revenue_forecast_fq",
"industry", "return_on_assets", "return_of_invested_capital_percent_ttm",
"return_on_invested_capital", "gross_profit", "dividends_paid",
"preferred_dividends", "earnings_release_next_date", "dividends_yield",
"price_sales_ratio", "yield_recent", "ex_dividend_date_upcoming",
"total_shares_outstanding", "price_earnings_to_growth_ttm", "price_book_fq",
"enterprise_value_ebitda_ttm", "rtc", "amount_upcoming", "average_volume",
"revenue_per_employee", "after_tax_margin", "net_income", "earnings_release_time",
"type_recent", "dividends_per_share_fq", "payment_date_upcoming",
"gross_margin_percent_ttm", "earnings_per_share_basic_ttm", "price_free_cash_flow_ttm",
"long_term_capital", "total_debt", "country", "total_revenue",
"gross_margin", "number_of_shareholders", "type_upcoming", "beta_1_year",
"goodwill", "expected_annual_dividends", "revenue_forecast_next_fq",
"payment_date_recent", "low", "volume", "pre_change_abs", "gap",
"open", "volume", "pre_change_abs", "time", "change_from_open",
"low", "high", "close", "volume", "change_abs", "open", "change_from_open",
"change_abs", "time", "change", "pre_change_abs", "time", "gap",
"high", "open", "change_from_open", "change_abs", "low", "close",
"change", "change_abs", "close", "time", "change", "pre_change",
"close", "high", "gap", "change", "open", "high", "pre_change",
"pre_change", "pre_change_abs", "gap", "pre_change", "change_from_open",
"low", "volume", "relative_volume", "type", "subtype", "eps_surprise_fq",
"market_cap_calc", "exchange", "price_sales", "eps_surprise_percent_fq"
)
,
sort = list(sortBy = 'market_cap_basic',
sortOrder = 'desc'),
options = list(lang = 'eng'),
range = c(0, 15000000000000)
),
return_message = TRUE) {
options(scipen = 99999)
glue::glue("\n\nWARNING -- this function requires Python and the requests module!!!!\n\n") %>%
message()
urls <-
glue::glue("https://scanner.tradingview.com/{regions}/scan")
data_query <-
query %$%
generate_trade_view_metric_query(
filter = filter,
symbols = symbols,
metrics = metrics,
sort = sort,
options = options,
range = range
)
data <-
1:length(urls) %>%
map_df(function(x) {
parse_tradeview_metric_url(url = urls[x], data_query = data_query)
}) %>%
mutate(countItem = countItem %>% as.integer())
metrics <-
data_frame(nameTW = c('name', query$metrics) %>% unique()) %>%
mutate(countItem = 1:n())
data <-
data %>%
left_join(metrics) %>%
suppressMessages()
data <-
data %>%
mutate(idTicker = ifelse(nameTW == "name", value, NA)) %>%
fill(idTicker) %>%
filter(!nameTW == 'name')
df_metrics <-
get_data_regions_tradingview_metrics(regions = regions[1])
data <-
data %>%
left_join(df_metrics %>%
select(nameTW, typeField)) %>%
distinct() %>%
suppressMessages() %>%
filter(!nameTW %in% c('component', 'index'))
df_companies <-
data %>%
filter(typeField %in% c(NA, 'text')) %>%
dplyr::select(idRegion:value, nameTW) %>%
spread(nameTW, value)
df_values <-
data %>%
filter(!typeField %in% c(NA, 'text')) %>%
dplyr::select(idExchange:value, nameTW) %>%
mutate(value = value %>% readr::parse_number()) %>%
spread(nameTW, value)
data <-
df_companies %>%
left_join(df_values) %>%
suppressMessages() %>%
dplyr::select(which(colMeans(is.na(.)) < 1))
data
}
# news --------------------------------------------------------------------
# https://news-headlines.tradingview.com/headlines/yahoo/symbol/FB/?locale=en
get_ticker_tradingview_news <-
function(ticker = "FB") {
url <-
glue::glue("https://news-headlines.tradingview.com/headlines/yahoo/symbol/{ticker}")
data <-
url %>%
jsonlite::fromJSON(simplifyDataFrame = TRUE) %>%
dplyr::as_data_frame() %>%
dplyr::select(-1) %>%
purrr::set_names(c(
'urlArticle',
'titleArticle',
'descriptionArticle',
'datetimePublished'
)) %>%
mutate(
idTicker = ticker,
datetimePublished = anytime::anytime(datetimePublished),
urlJSON = url
) %>%
dplyr::select(idTicker,
datetimePublished,
titleArticle,
descriptionArticle,
everything())
data
}
parse_trading_view_news_url <-
function(url = "https://news-headlines.tradingview.com/headlines/yahoo/symbol/FB",
return_message = TRUE) {
ticker <-
url %>% str_replace_all("https://news-headlines.tradingview.com/headlines/yahoo/symbol/",
'')
if (return_message) {
glue::glue("Acquiring Tradingview news for {ticker}") %>%
message()
}
data <-
url %>%
jsonlite::fromJSON(simplifyDataFrame = TRUE) %>%
dplyr::as_data_frame() %>%
dplyr::select(-1) %>%
purrr::set_names(c(
'urlArticle',
'titleArticle',
'descriptionArticle',
'datetimePublished'
)) %>%
mutate(
idTicker = ticker,
datetimePublished = anytime::anytime(datetimePublished),
urlJSON = url
) %>%
dplyr::select(idTicker,
datetimePublished,
titleArticle,
descriptionArticle,
everything())
gc()
data
}
parse_tradingview_news_urls <-
function(urls,
return_message = TRUE) {
df <-
data_frame()
success <- function(res) {
parse_trading_view_news_url_safe <-
purrr::possibly(parse_trading_view_news_url, data_frame())
page_url <- res$url
data <-
page_url %>%
parse_trading_view_news_url_safe(return_message = return_message)
df <<-
df %>%
bind_rows(data)
}
failure <- function(msg) {
data_frame()
}
urls %>%
walk(function(x) {
curl_fetch_multi(url = x, success, failure)
})
multi_run()
df
}
#' Tradingview tickers news
#'
#' Returns news data for specified tickers
#'
#' @param tickers
#' @param return_message
#' @param nest_data
#' @import dplyr tibble glue anytime tidyr curl jsonlite
#' @return
#' @export
#'
#' @examples
#' get_data_tickers_tradingview_news(tickers = c("VNO", "AVB", "PEI"), return_message = TRUE, nest_data = FALSE)
get_data_tickers_tradingview_news <-
function(tickers = c("FB", "AAPL", "NFLX", "GOOG", "VNO", "EQR", "BXP"),
return_message = TRUE,
nest_data = FALSE) {
urls <-
glue::glue("https://news-headlines.tradingview.com/headlines/yahoo/symbol/{tickers}")
all_data <-
urls %>%
parse_tradingview_news_urls(return_message = return_message)
if (nest_data) {
all_data <-
all_data %>%
tidyr::nest(-c(idTicker, urlJSON), .key = 'tickerNews')
}
all_data
}
|
82e2abc4cd81a941df678ba9701faa1a86a026f8
|
1f3ca2b315f9281595a5680531bef01381454f16
|
/R/PATHWAY.r
|
a3746658d8e1d12c7610a32adac5b19ad98f2374
|
[] |
no_license
|
LST512/NGS-script
|
3e91c8ca7a9e3c9ec553acbc79ad33a95dac4b6c
|
2491e9461879164ef6b918e987d5afbdc38c5cf7
|
refs/heads/master
| 2020-04-18T03:20:57.374498
| 2019-07-14T09:28:05
| 2019-07-14T09:28:05
| 167,195,204
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,904
|
r
|
PATHWAY.r
|
# PATHWAY
rm(list=ls())
setwd("~/data/lst_data/AN3661/raw_data/DEPAC/GO/DAVIDGO")
library(ggplot2)
#------------------------------------
# 柱状图
#------------------------------------
# 设置好工作路径
# 读数据
pathway=read.table("pathway_depac_up.tsv",header=T,sep="\t")
# 初始化数据
pathbar = ggplot(pathway,aes(x=Pathway,y=-1*log10(PValue)))
# 绘制柱形图
pathbar + geom_bar(stat="identity")
# 改变柱子方向
pathbar + geom_bar(stat="identity") + coord_flip()
# 改变填充颜色和边框颜色
pathbar+geom_bar(stat="identity",color="red",fill="blue")+coord_flip()
# 用Qvalue来作为填充颜色
pathbar+geom_bar(stat="identity",aes(fill=-1*log10(PValue)))+coord_flip()
## 按照输入数据的顺序去画柱子
# 生成一个因子向量,记录原始的pathway顺序
porder=factor(as.integer(rownames(pathway)),labels=pathway$Pathway)
# 根据因子向量中的Pathway顺序来绘制柱状图
pathbar+geom_bar(stat="identity",aes(x=porder,fill=-1*log10(PValue)))+coord_flip()
# 通过rev函数将Pathway的顺序颠倒过来
pathbar+geom_bar(stat="identity",aes(x=rev(porder),fill=-1*log10(rev(PValue)))+coord_flip()
# 正确的做法
porder=factor(as.integer(rownames(pathway)),labels=rev(pathway$Pathway))
pathbar+geom_bar(stat="identity",aes(x=rev(porder),fill=-1*log10(PValue)))+coord_flip()
# 去掉图例
pathbar+geom_bar(stat="identity",aes(x=rev(porder),fill=-1*log10(rev(PValue))))+coord_flip()+theme(legend.position="none")
# 设置标题和坐标轴标题
pqbar=pathbar+geom_bar(stat="identity",aes(x=rev(porder),fill=-1*log10(PValue)))+coord_flip()+theme(legend.position="none")+ labs(title="",y=expression(-log[10](PValue)))
# 添加阈值线
# 注意coord_flip(),所以这里为geom_hline()而不是geom_vline()
pqbar+geom_hline(yintercept=2,color=c("red"),linetype=4)
pqbar+geom_hline(yintercept=c(1.3,2),color=c("darkred","red"),linetype=4)
|
ad6e0e8f68b52528e8afb3f79e36c5c9eef4f068
|
fa8aed0f03a136ea6977b6c2daaa97ee3a9162c2
|
/man/expectation.Rd
|
8ec13e7164e6879309cfdd6b6d157bea47f57afa
|
[] |
no_license
|
cran/lestat
|
d73827e8f7e9b45b85c3cdbf56b711c7c2bf9742
|
421a9c585b26d41c1db4c51bd2f9a73588b15ecf
|
refs/heads/master
| 2021-01-15T19:40:10.190898
| 2018-06-12T17:29:29
| 2018-06-12T17:29:29
| 17,697,040
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,165
|
rd
|
expectation.Rd
|
\name{expectation}
\alias{expectation}
\alias{expectation.betabinomial}
\alias{expectation.betadistribution}
\alias{expectation.binomialdistribution}
\alias{expectation.default}
\alias{expectation.discretedistribution}
\alias{expectation.expgamma}
\alias{expectation.fdistribution}
\alias{expectation.gammadistribution}
\alias{expectation.mdiscretedistribution}
\alias{expectation.mnormalexpgamma}
\alias{expectation.mnormalgamma}
\alias{expectation.mnormal}
\alias{expectation.mtdistribution}
\alias{expectation.muniformdistribution}
\alias{expectation.normalexpgamma}
\alias{expectation.normalgamma}
\alias{expectation.normal}
\alias{expectation.poissondistribution}
\alias{expectation.tdistribution}
\alias{expectation.uniformdistribution}
\title{
Compute Expectation
}
\description{
Compute the expectation of a probability distribution.
}
\usage{
expectation(object)
}
\arguments{
\item{object}{
A probability distribution.
}
}
\value{
The expectation of the probability distribution.
}
\author{
Petter Mostad <mostad@chalmers.se>
}
\seealso{
\code{\link{variance}}
}
\examples{
expectation(normal(3, log(2)))
expectation(binomialdistribution(7, 0.3))
}
|
1a86034e1330a01d826a2da9e87924f02604da09
|
3d353d93d89cd338e60271a1eeb9b22b26cfa17a
|
/wd-nicks/code/bias lab.r
|
34db890aa39ace44b298a65f2c1d2a0d78de9907
|
[] |
no_license
|
ceekr/_DataAnalysisRepo
|
619735a0cea335b1be470b2dfd8228129cacc145
|
3b7f925e490a781d579c2f2f86e913e078d70bb2
|
refs/heads/master
| 2020-06-01T13:22:28.711591
| 2013-05-02T05:15:42
| 2013-05-02T05:15:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,464
|
r
|
bias lab.r
|
#Generate exponential data
#Create 1000 samples of 1000 data points (say, survival times) each
expdata <- array(NA, c(1000,1000)) #Preallocate array
for (i in 1:1000) { #Fill each row
expdata[i,] <- sort(rexp(1000, rate=2), decreasing=TRUE)
}
hist(expdata[1,])
#Generate y variable
y <- 1:1000
plot(expdata[1,], y)
#For each sample, compute lambda using nonlinear least squares, transformation, and MLE
#Least squares (sometimes fails)
nlsEst <- rep(NA, 1000)
for (i in 1:1000) {
try(nlsEst[i] <- coef(nls(y ~ a*exp(-lambda*expdata[i,])+b, start=list(a=1000, lambda=1, b=1)))[2], silent=TRUE) #Ignore errors
}
#Log transformation
logEst <- rep(NA, 1000)
for (i in 1:1000) {
logEst[i] = -coef(lm(log(y) ~ expdata[i,]))[2]
}
#Maximum likelihood
library(MASS)
mleEst <- rep(NA, 1000)
for (i in 1:1000) {
mleEst[i] <- fitdistr(expdata[i,], "Exponential")[1]$estimate
}
#Histogram each method across samples
x11(); hist(nlsEst)
mean(nlsEst, na.rm=TRUE)
x11(); hist(mleEst)
mean(mleEst)
x11(); hist(logEst)
mean(logEst)
#Bootstrap CI's for one data set
indvec = 1:1000 #Set up index vector
#Preallocate space (one row per bootstrap replicate)
nlsboot <- array(NA, c(100,1000))
logboot <- array(NA, c(100,1000))
mleboot <- array(NA, c(100,1000))
for (i in 1:100) {
expvec <- expdata[i,] #Pick data set to resample
#Main bootstrap loop
for (j in 1:1000) {
indvecboot <- sample(indvec, length(indvec), replace=TRUE)
psexpvec <- expvec[indvecboot]
psy <- y[indvecboot]
#Estimate lambda with each method
nlsboot[i,j] <- as.numeric(try(coef(nls(psy ~ a*exp(-lambda*psexpvec)+b, start=list(a=1000, lambda=1, b=1)))[2], silent=TRUE))
logboot[i,j] <- -coef(lm(log(psy) ~ psexpvec))[2]
mleboot[i,j] <- fitdistr(psexpvec, "Exponential")[1]$estimate
}
}
#Find 2.5 and 97.5% percentiles. How often does true value lie in them?
nlsright <- rep(NA, 100)
logright <- rep(NA, 100)
mleright <- rep(NA, 100)
for (i in 1:100) {
#NLS
if (quantile(nlsboot[i,], 0.025, na.rm=TRUE) <= 2 & quantile(nlsboot[i,], 0.975, na.rm=TRUE) >= 2)
nlsright[i] <- 1
else
nlsright[i] <- 0
#Log transform
if (quantile(logboot[i,], 0.025) <= 2 & quantile(logboot[i,], 0.975) >= 2)
logright[i] <- 1
else
logright[i] <- 0
#MLE estimation
if (quantile(mleboot[i,], 0.025) <= 2 & quantile(mleboot[i,], 0.975) >= 2)
mleright[i] <- 1
else
mleright[i] <- 0
}
#What fraction of CIs contained true value?
mean(nlsright)
mean(logright)
mean(mleright)
|
f07f57f098beb21286428fe353dc185d199d8047
|
fd91fd81027df91f03e29138b26e2a1b6e31e054
|
/R/RandomTreeScore.R
|
8458d497da107985cf140e0abc968dce5388ffe5
|
[] |
no_license
|
gitter-badger/TreeSearch
|
77fa06b36d691f942c8ef578f35f3e005cc2f13e
|
5a95195211d980baa6db29260bf929a12c5bf707
|
refs/heads/master
| 2022-04-20T07:40:33.050434
| 2020-04-16T13:47:57
| 2020-04-16T13:47:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,448
|
r
|
RandomTreeScore.R
|
#' Parsimony score of random postorder tree
#'
#' @param nTip number of tips (minimum 3)
#' @template morphyObjParam
#'
#' @return the parsimony score of a random tree, for the given Morphy object.
#'
#' @export
RandomTreeScore <- function (nTip, morphyObj) {
if (nTip < 3) {
warning("nTip < 3 not implemented, as there's only one unrooted topology.")
return (0)
}
# Return:
.Call('RANDOM_TREE_SCORE', as.integer(nTip), morphyObj)
}
#' Random postorder tree
#'
#' @param nTip Integer specifying the number of tips to include in the tree
#' (minimum 2).
#'
#' @return A list with three elements, each a vector of integers, respectively
#' containing:
#'
#' - The parent of each tip and node, in order
#'
#' - The left child of each node
#'
#' - The right child of each node.
#'
#' @family tree generation functions
#' @export
RandomMorphyTree <- function (nTip) {
if (nTip < 2) {
stop("nTip < 2 not implemented: a tip is not a tree.")
}
# Return:
.Call('RANDOM_TREE', as.integer(nTip))
}
#' @importFrom graphics plot
plot.morphyTree <- function (morphyTree) {
parentOf <- morphyTree[[1]]
left <- morphyTree[[2]]
right <- morphyTree[[3]]
nTip <- length(left) + 1L
edge <- matrix(c(rep(seq(nTip, len=nTip - 1L), 2), right, left), ncol=2) + 1L
tree <- structure(list(edge=edge, Nnode=nTip - 1L, tip.label=seq_len(nTip) - 1L),
class = 'phylo')
plot(tree)
}
|
3d55f8bbe4c83cd86cb3440bf260da00ccc97aa0
|
bb5c3c3af07cbd4230773b1bed5e2f28705077cd
|
/barcode.R
|
a4a320043aa0ce4eb7423c9b3675c6404e5f7ea0
|
[] |
no_license
|
nchintamaneni/strip_channels
|
382fb4cda95a0758a7715670dfc46afd5029fe10
|
196b5d64688fa6b46733b0aed0cd493e2dc838c3
|
refs/heads/master
| 2020-06-30T03:24:44.722641
| 2019-12-01T19:56:03
| 2019-12-01T19:56:03
| 200,707,374
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,926
|
r
|
barcode.R
|
# Usage: In Rstudio terminal (or command line if RStudio.exe is added to path), type command
# $ Rscript barcode.R [path/to/folder/of/csv/files]
# Description:
# This Rscript will plot a shaded barcode graphs given a folder of csv files.
# The length / max x value is determined by the longest csv in the folder. Files shorter than the longest
# will have trailing white spaces that's indictive of how much shorter it is than the longest.
# This is a *shaded* barcode graph because it will assign different shade of gray depending on
# what category the dB falls into.
# Currently, the categories for this rscript is: -infty ~ -50 ~ -30 ~ -20 ~ + infty
# Notes:
# This Rscript assumes there exists a $time column, provided in seconds (s), and a $Peak.Level column in the input csv files.
# Ideally, the input folder of csvs is the result of running csvGenerator.py
# Author: Jiachen (Amy) Liu
# libraries for plotting
library(ggplot2, reshape)
library("cowplot")
theme_set(theme_cowplot())
args = commandArgs(trailingOnly=TRUE)
findMaxDuration <- function(fileList, verbose=FALSE, showWarnings=TRUE) {
max <- 0
# For-loop to find the file with the longest duration.
# Ignores any null/broken/corrupted files
for (file in fileList){
durationList = read.csv(file, header=TRUE)$time
duration = durationList[length(durationList)-1]
if(!is.null(duration)){
if(duration > max){
max <- duration
}
}
}
return(max)
}
plotRMS <- function (fileName, maxDur, verbose=FALSE, showWarnings=TRUE) {
fullTable <-read.csv(fileName)
peaks <-fullTable$Peak.Level
silence <-read.csv(fileName)$Peak.Level
noise <-read.csv(fileName)$Peak.Level
low <-read.csv(fileName)$Peak.Level
high <-read.csv(fileName)$Peak.Level
# Categorize each data point into the 4 category. value = 1 if it's in that category.
# value = 0 otherwise.
for (i in 1:(length(peaks))){
silence[[i]] = if (peaks[[i]] < -50) 1 else 0
noise[[i]] = if ((peaks[[i]] >= -50) && (peaks[[i]] < -30)) 1 else 0
low[[i]] = if (peaks[[i]] >= -30 && peaks[[i]] < -20) 1 else 0
high[[i]] = if (peaks[[i]] >= -20) 1 else 0
}
timeStampTemp = fullTable$time
# make data frame
temp = data.frame(timeStamp = timeStampTemp, Silence=silence, Noise=noise, LowEnergy=low, HighEnergy=high)
dat <- temp
dat.m <- reshape2::melt(dat, id.vars=c("timeStamp"))
# Set up file title & duration-indicating string
fileTitle = tail(strsplit(fileName,split="/")[[1]],1)
fileName = substr(fileName, 0, (nchar(fileName)-4))
dur = timeStampTemp[length(timeStampTemp)]
min = trunc(dur/60)
sec = trunc(dur - min*60)
durationString = paste(min, "min", sec, "sec")
# plot the graph with appropriate titles and aesthetic customization
myplot <- ggplot(dat.m, aes(x=timeStamp, y=value, fill=variable)) +
geom_bar(stat = "identity") +
ggtitle(paste("Source:", fileTitle, "//", durationString)) +
labs(x = "Time (s)", y = "Indicator") +
scale_fill_manual(values = c("#ffffff", "#DCDCDC", "#888888","#0d0d0d")) +
theme_classic() +
labs(fill = "Category: ") +
xlim(0, maxDur) +
theme(legend.position="top")
return(myplot)
}
barcode <- function(fileName, maxDurBar, verbose=FALSE, showWarnings=TRUE){
barcode <- plotRMS(fileName, maxDurBar)
perfect = plot_grid(barcode, nrow = 1, align = 'v', axis = 'l') # alignment
# outputs the plot ggplot into a jpg.
# Change the `base_height` and `base_width` to change proportions of the graph.
save_plot(paste(fileName, "_barcode_graph.jpg", sep = ""), perfect, base_height=3, base_width=10)
}
# Collect csvFiles
files <- list.files(path=args[1], pattern="*.csv", full.names=TRUE, recursive=TRUE, include.dirs = TRUE)
# Find maximum duration to set a standard width across all graphs
maxDurBar <- findMaxDuration(files)
# draw the barcode graph
lapply(files, barcode, maxDurBar)
|
e10284b6fd0c3092e6c4b3ef084a068d5783ddb1
|
0450d0f6e007e4ed64e2efb90b8265e6df697561
|
/plot1.R
|
e4db1e38e941e1f8651e0a136c2a05ef704dcec6
|
[] |
no_license
|
vaddss/Exploratory-Data-Analysis
|
2b168f1b93273a3d5fe5f89303ee09624268906b
|
2a033ff746cb5b5ec857c7772e236dbb13990f40
|
refs/heads/master
| 2020-05-17T13:46:54.549498
| 2014-07-11T18:08:05
| 2014-07-11T18:08:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 744
|
r
|
plot1.R
|
# Step 1 (getting the dataset):
temp <- tempfile()
fileUrl <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(fileUrl, temp)
dataset <- read.table(unz(temp,"household_power_consumption.txt"), header=T, na.strings = "?", sep=";")
# Step 2 (subsetting our dataset):
data <- subset(dataset, Date == "1/2/2007" | Date == "2/2/2007" )
# Step 3 (converting date and time):
data$DateTime <- paste(data$Date, data$Time)
data$DateTime <- strptime(data$DateTime, format = "%d/%m/%Y %H:%M:%S")
# Step 4 (creating a plot):
hist(data$Global_active_power, col = "red", xlab = "Global Active Power (kilowatts)", main = "Global active power")
dev.copy(png, "plot1.png", height=480, width=480)
dev.off()
|
f8e917bce0d4aa8d6f76893cb08a91244c8947fa
|
0d0638f672c3e02683d4d0dde55e351d5a240cee
|
/ui.R
|
5dbbd3fa545cc29ac954ca1bdd6b0c47086481db
|
[] |
no_license
|
wrightbr/DS9_ShinyApp
|
7545a64c8e062be4a43168528763b4e176609b9d
|
2c3afc0e1de4da36452b9f0637a6a294c86b3ac0
|
refs/heads/master
| 2021-01-15T21:45:17.066960
| 2014-12-12T13:37:02
| 2014-12-12T13:37:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,230
|
r
|
ui.R
|
library(shiny)
# define UI
shinyUI(fluidPage(
# application title
tags$h3("Sample size calculator for estimating a population proportion"),
p("Say you want to estimate the proportion of likely voters that are going to vote for a given candidate (e.g., see this recent poll by ", tags$a(href="http://www.politico.com/p/polls/person/latest/hillary-clinton#.VImiWntRJZY", "Politico)"), ". An obvious question is 'how many people should I ask'? Less obvious questions are 'how confident do I want to be in my estimate' and 'how close to the true population value do I want to be'? Next time you read an article citing an opinion poll, pay close attention to the answers to these questions. Most credible polling organizations ususally say something like 'based on a survey of ", tags$i("X"), " voters, our poll had a margin of error of ", tags$i("Y"), " percentage points'. This app can be used to help you reproduce these claims or even design your own poll."),
p("This Shiny app calculates the sample size ", tags$i("n"), " needed to estimate a population proportion " , tags$i("p"), " that has probability at least 1-\\(\\alpha\\) of being no farther than ", tags$i("d"), " from the true (unknown) proportion. The sample size equation (from 'Sampling' by S. K. Thompson, 1st ed.) is as follows:"),
withMathJax(helpText("$$n=\\frac{Np(1-p)}{(N-1)\\frac{d^2}{z^2}+p(1-p)},$$")),
p("where ", tags$i("z"), "is the upper \\(\\alpha\\)/2 point of the normal distribution. If no estimate of ", tags$i("p"), " is available prior to the survey then a 'worst case' value of 0.5 can be used in determining sample size. Result are provided for two cases: 1) when the sample size ", tags$i("n"), " is SMALL relative to the population size ", tags$i("N"), " (such as in a national poll), in which case the finite population correction can be ignored; and 2) when the sample size ", tags$i("n"), " is LARGE relative to the population size ", tags$i("N"), " (such as in a poll for a local school board), in which case the finite population correction is advantageous and results in a smaller sample size requirement."),
tags$hr(),
# Sidebar layout
sidebarLayout(
sidebarPanel(
p("See what happens to the sample size requirement as you change the parameters below:"),
hr(),
sliderInput("alpha", p("\\(\\alpha\\) (1-\\(\\alpha\\) = confidence level)"), min = 0.01, max = 0.1, value = 0.05, step = 0.01),
sliderInput("d", p(tags$i("d"), " (margin of error)"), min = 0.01, max = 0.05, value = 0.03, step = 0.001),
sliderInput("p", p(tags$i("p"), " (population proportion)"), min = 0.1, max = 0.9, value = 0.5, step = 0.1),
numericInput('N', p(tags$i("N"), " (population size)"), value = 1000)
),
# show result
mainPanel(
p("Sample size required when the sample is small relative to the population size"),
verbatimTextOutput("sampleSize"),
p("Sample size required when the sample is large relative to the population size"),
verbatimTextOutput("sampleSizeFpc")
)
)
))
|
27a1225bc64e7eb9c7e347e46d3b4a9de80077cc
|
b30e1aa7485835f66523697614a8308a5f202cf7
|
/cachematrix.R
|
aeb8ff1477a7409288632be73e04ca9752888d06
|
[] |
no_license
|
BrodyVogel/ProgrammingAssignment2
|
730d6338bf74599349fcbf62fed89bfb45718269
|
b55bd0412de4c4e61c46c54d11726cfdf7ce45f9
|
refs/heads/master
| 2020-04-19T14:18:20.261914
| 2019-01-29T23:03:30
| 2019-01-29T23:03:30
| 168,240,248
| 0
| 0
| null | 2019-01-29T22:34:48
| 2019-01-29T22:34:47
| null |
UTF-8
|
R
| false
| false
| 1,066
|
r
|
cachematrix.R
|
## These functions caches the inverse of a matrix, which could save time with huge matrices
### This function creates a process for caching the inverse of a matrix, if it exists
makeCacheMatrix <- function(x = matrix()) {
mat <- NULL
set <- function(y) {
x <<- y
mat <<- NULL
}
get <- function() x
set_inverse <- function(inverse) mat <<- inverse
get_inverse <- function() mat
list(set = set,
get = get,
set_inverse = set_inverse,
get_inverse = get_inverse)
}
### This function either retrieves the cached inverse or calculates that inverse of the matrix
cacheSolve <- function(x, ...) {
mat <- x$get_inverse()
if (!is.null(mat)) {
message("getting cached data")
return(mat)
}
data <- x$get()
mat <- solve(data, ...)
x$set_inverse(mat)
mat
}
### Test
#test <- matrix(c(1,2,3,4),2,2)
#test_1 <- makeCacheMatrix(test)
#cacheSolve(test_1) #inverse returned after computation
# this time returns the cached matrix
#cacheSolve(test_1) #inverse returned from cache
|
73a363172a3c5503b95fb94850f7b600f83f4968
|
deca20f404aa14f95dbb266585e59ea264e12691
|
/IterativeAlgo/tests/testthat/test-make.matrix.R
|
7fd907df719b606bd7b56a1c63533edb7df7235a
|
[] |
no_license
|
TGuillerme/Parsimony_Inapplicable
|
0cea924ffcff59b7cf985260c843553170e3f0c4
|
2710e3c89a9e7d4ee02e8c16b19ca168f99a036c
|
refs/heads/master
| 2021-01-10T15:04:34.638326
| 2016-11-24T16:14:06
| 2016-11-24T16:14:06
| 49,874,989
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,931
|
r
|
test-make.matrix.R
|
#TEST make.matrix
context("make.matrix")
#Testing sample.distribution
test_that("sample.distribution works", {
#errors
expect_warning(
expect_error(
sample.distribution("a", c(runif,1,2))
)
)
expect_error(
sample.distribution(1, "c(runif,1,2)")
)
expect_error(
sample.distribution(1, c(aov,1,2))
)
#Returns the right number of values
expect_equal(
length(sample.distribution(1, c(runif))), 1
)
expect_equal(
length(sample.distribution(1, c(runif, 1, 2))), 1
)
expect_equal(
length(sample.distribution(1000, c(runif, 1, 2))), 1000
)
#Returns values in the range
expect_equal(
length(sample.distribution(1, c(runif))), 1
)
expect_less_than(
max(sample.distribution(1000, c(runif, 1,2))), 2.0000000001
)
expect_more_than(
min(sample.distribution(1000, c(runif, 1,2))), 0.9999999999
)
})
#Testing proportional.distribution
test_that("proportional.distribution works", {
#errors
expect_warning(
expect_error(
proportional.distribution("a", runif)
)
)
expect_error(
proportional.distribution(4, "runif")
)
expect_error(
proportional.distribution(4, runif, "a")
)
#sum(results) = 1
expect_equal(
sum(proportional.distribution(4, runif)), 1
)
expect_equal(
sum(proportional.distribution(100, runif)), 1
)
expect_equal(
sum(proportional.distribution(4, runif, 1000, 2000)), 1
)
expect_equal(
sum(proportional.distribution(4, rnorm)), 1
)
})
#Testing gen.seq.HKY.binary
test_that("gen.seq.HKY.binary works", {
#errors
expect_error(
gen.seq.HKY.binary("a", c(runif, 2, 2), c(runif, 1, 1))
)
expect_error(
gen.seq.HKY.binary(5, c(runif, 2, 2), c(runif, 1, 1))
)
expect_error(
gen.seq.HKY.binary(rtree(5), runif, c(runif, 1, 1))
)
expect_error(
gen.seq.HKY.binary(rtree(5), c(runif, 1, 1), runif)
)
#results is a vector of length 5 (characters)
expect_equal(
length(gen.seq.HKY.binary(rtree(5), c(runif, 2, 2), c(runif, 1, 1))), 5
)
expect_is(
gen.seq.HKY.binary(rtree(5), c(runif, 2, 2), c(runif, 1, 1)), "character"
)
set.seed(1)
expect_equal(
unique(as.vector(gen.seq.HKY.binary(rtree(5), c(runif, 2, 2), c(runif, 1, 1)))), c("1", "0")
)
})
#Testing k.sampler
test_that("k.sampler works", {
#binary states (most of the cases)
expect_equal(
k.sampler("a"), 2
)
expect_equal(
k.sampler(1), 2
)
expect_equal(
k.sampler(0.5), 2
)
#multistates (up to 4 states)
set.seed(1)
expect_equal(
sort(unique(replicate(100, k.sampler(c(0.34, 0.33, 0.33))))), c(2,3,4)
)
#Proportion respected
set.seed(1)
test <- replicate(10000, k.sampler(c(0.80, 0.15, 0.05)))
expect_equal(
sort(unique(test)), c(2,3,4)
)
expect_equal(
length(which(test == 2))/10000, 0.7932
)
expect_equal(
length(which(test == 3))/10000, 0.1535
)
expect_equal(
length(which(test == 4))/10000, 0.0533
)
})
#Testing rTraitDisc.mk
test_that("rTraitDisc.mk works", {
#errors
expect_error(
rTraitDisc.mk("a", c(runif,1,1), c(runif,2,2), c(0.5, 0.5))
)
expect_error(
rTraitDisc.mk(rtree(5), c(runif,1,1), rates = "a", c(0.5, 0.5))
)
})
#Testing is.invariant
test_that("is.invariant works", {
#errors
expect_error(
is.invariant(mean)
)
#true or false
expect_true(
is.invariant(rep("a", 5))
)
expect_false(
is.invariant(c(rep("A", 5), "b"))
)
})
|
c1e4286a1e4ed260df93ed9ead2d8dace5731324
|
6e9f1b8058a2e5a38768c0185351b9a14d69afe2
|
/R/Misc.R
|
934be86cfd450bee59cfe13ea85d5d27ba516311
|
[] |
no_license
|
cran/DLMtool
|
d4a7cae061a8bcf897d9d01eb913e2584c710a00
|
f42d4e74f8eab28738d9305945c7c3ae57ccd160
|
refs/heads/master
| 2022-07-05T17:08:22.460549
| 2022-06-20T15:20:09
| 2022-06-20T15:20:09
| 24,017,503
| 1
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 19
|
r
|
Misc.R
|
tiny <- 1E-15
|
e4a29e8612eb59200bff7d063f18289be5a274ea
|
492f49a78bea9ab16fc99d159653722113afa125
|
/man/separate_header.Rd
|
ea6c535305d79a2f2b06e8e675c1edda05bc51e7
|
[] |
no_license
|
davidgohel/flextable
|
48c34514420e435ca70f65354e94aa69786777bc
|
fc62aaf29c01bbac26fe34ef85240afe4eb201ab
|
refs/heads/master
| 2023-08-23T06:49:13.945566
| 2023-08-20T22:53:39
| 2023-08-20T22:53:39
| 62,127,938
| 502
| 83
| null | 2023-08-20T19:03:11
| 2016-06-28T09:25:11
|
R
|
UTF-8
|
R
| false
| true
| 2,419
|
rd
|
separate_header.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/augment_rows.R
\name{separate_header}
\alias{separate_header}
\title{Separate collapsed colnames into multiple rows}
\usage{
separate_header(
x,
opts = c("span-top", "center-hspan", "bottom-vspan", "default-theme"),
split = "[_\\\\.]",
fixed = FALSE
)
}
\arguments{
\item{x}{a flextable object}
\item{opts}{optional treatments to apply
to the resulting header part as a character
vector with multiple supported values.
The supported values are:
\itemize{
\item "span-top": span empty cells with the
first non empty cell, this operation is made
column by column.
\item "center-hspan": center the cells that are
horizontally spanned.
\item "bottom-vspan": bottom align the cells treated
when "span-top" is applied.
\item "default-theme": apply to the new header part
the theme set in \code{set_flextable_defaults(theme_fun = ...)}.
}}
\item{split}{a regular expression (unless \code{fixed = TRUE})
to use for splitting.}
\item{fixed}{logical. If TRUE match \code{split} exactly,
otherwise use regular expressions.}
}
\description{
If your variable names contain
multiple delimited labels, they will be separated
and placed in their own rows.
\if{html}{\out{
<img src="https://www.ardata.fr/img/flextable-imgs/flextable-016.png" alt="add_header illustration" style="width:100\%;">
}}
}
\examples{
library(flextable)
x <- data.frame(
Species = as.factor(c("setosa", "versicolor", "virginica")),
Sepal.Length_mean = c(5.006, 5.936, 6.588),
Sepal.Length_sd = c(0.35249, 0.51617, 0.63588),
Sepal.Width_mean = c(3.428, 2.77, 2.974),
Sepal.Width_sd = c(0.37906, 0.3138, 0.3225),
Petal.Length_mean = c(1.462, 4.26, 5.552),
Petal.Length_sd = c(0.17366, 0.46991, 0.55189),
Petal.Width_mean = c(0.246, 1.326, 2.026),
Petal.Width_sd = c(0.10539, 0.19775, 0.27465)
)
ft_1 <- flextable(x)
ft_1 <- colformat_double(ft_1, digits = 2)
ft_1 <- theme_box(ft_1)
ft_1 <- separate_header(
x = ft_1,
opts = c("span-top", "bottom-vspan")
)
ft_1
}
\seealso{
Other functions to add rows in a flextable:
\code{\link{add_body_row}()},
\code{\link{add_body}()},
\code{\link{add_footer_lines}()},
\code{\link{add_footer_row}()},
\code{\link{add_footer}()},
\code{\link{add_header_row}()},
\code{\link{add_header}()},
\code{\link{set_header_footer_df}},
\code{\link{set_header_labels}()}
}
\concept{functions to add rows in a flextable}
|
8eba97961f942056fd03549eb2ba275fd5039d1b
|
75f2c23f74b501091cbbbf6ac65a2854fdda9075
|
/bulb_temp/bulb_temp.R
|
e2f0ada008a7250eda292bfe217027fa364cb490
|
[] |
no_license
|
cortrudolph/preschool_measurement
|
daf8f984f07166e53a19148a6f7281ccd6657962
|
3ca97755113b1051dff2aa0d2c247d9aed11e942
|
refs/heads/master
| 2022-07-26T22:50:30.226389
| 2020-05-20T14:50:45
| 2020-05-20T14:50:45
| 263,669,476
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 461
|
r
|
bulb_temp.R
|
library(tidyverse)
data<-tibble::tribble(
~Time, ~Temperature,
0, 56.3,
0.5, 114,
1, 123,
1.5, 137,
2, 169,
2.5, 164,
3, 147,
3.5, 158,
4, 160,
4.5, 157,
5, 171
)
data %>% ggplot(., aes(x=Time, y=Temperature)) +
geom_point(size=5) +
stat_smooth(method = "lm", formula = y ~ x + I(x^2), size = 5)
|
66583c3ab4909f14d726b4d1a5171f0f478f8ea4
|
e4c4c406e880566f29a56ff4b0899b170563c192
|
/triangles_test.R
|
a0193b1772d748ff1014a0997a16d29a8d6c8ba6
|
[] |
no_license
|
will-r-chase/artinpi
|
cda43cac5f83f176c8ca041473c1e23d6e8f664a
|
d046b2c453388f489ad29addfde7d1a8456485ba
|
refs/heads/master
| 2020-04-08T18:20:50.942852
| 2019-03-26T14:55:17
| 2019-03-26T14:55:17
| 159,604,403
| 0
| 0
| null | 2018-11-29T03:52:31
| 2018-11-29T03:52:31
| null |
UTF-8
|
R
| false
| false
| 1,355
|
r
|
triangles_test.R
|
library(deldir)
library(sf)
library(sp)
library(tidyverse)
#idea sameple pi in groups of 3 digits, each digit is an angle, sum is size (or 1st num is size)
piChar <- read.table("data/PI_10000.txt", stringsAsFactors=F, colClasses = c("character"))[1,1]
piVec <- as.numeric(strsplit(piChar, "")[[1]])
################################################
data <- data.frame(x = runif(20), y = runif(20))
x <- data[,"x"]
y <- data[,"y"]
pts = SpatialPointsDataFrame(cbind(x, y), data, match.ID = T)
vor_desc = tile.list(deldir(pts@coords[, 1], pts@coords[, 2],
suppressMsge = TRUE))
vor_polygons <- lapply(1:(length(vor_desc)), function(i) {
tmp <- cbind(vor_desc[[i]]$x, vor_desc[[i]]$y)
tmp <- rbind(tmp, tmp[1, ])
Polygons(list(Polygon(tmp)), ID = i)
})
rownames(pts@data) = sapply(slot(SpatialPolygons(vor_polygons),
"polygons"), slot, "ID")
vor_spdf = SpatialPolygonsDataFrame(SpatialPolygons(vor_polygons),
data = pts@data)
polys <- fortify(vor_spdf, id = "ID")
ggplot(polys, aes(x = long, y = lat)) + geom_polygon()
triangles<-triang.list(deldir(points, plotit = TRUE))
for(tri in triangles) {
polygon(as.list(tri))
}
tri_list <- rbind_list(as.list(triangles))
ggplot(data = tri_list, aes(x = x, y = y)) + geom_polygon()
?tile.list
|
7fd0252115e30333bdcdc00b1eae059645a8a61f
|
5aaf7a4652ec0f338fc9f595d6c296336f0339f7
|
/exercises/04_import_data.R
|
06895ebf4d10c9f151ea9f4f3eab37766a311468
|
[] |
no_license
|
tertiarycourses/ApacheSparkRTraining
|
621a761ab9a9d61936b77cca0b4932f30c086469
|
0422424e53ce46e9198ee5d41a20b0c368815465
|
refs/heads/master
| 2020-03-27T06:13:36.673348
| 2018-08-29T08:02:00
| 2018-08-29T08:02:00
| 146,090,035
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,591
|
r
|
04_import_data.R
|
############# copy data to impala ###############
library(DBI)
library(dbplyr)
library(odbc)
library(ggplot2)
library(dplyr)
library(RODBC)
library(readr)
### wait for the hive server to start in your VM > takes about 10 minutes
# $ifconfig
impalacon=dbConnect(drv = odbc::odbc(),
driver = "Cloudera ODBC Driver for Impala",
host = "192.168.1.124",
port = 21050,
database = "default")
dbListTables(impalacon)
dbRemoveTable(impalacon, "airways")
dbDisconnect(impalacon)
################# airlines dataset ####################
# *** hadoop is designed for large datasets only
# small datasets will not perform well
# usually for datasets 10GB and above only
#### loop
# for dbWriteTable
# the following data types are accepted (from DBI documentation):
#integer
#numeric
#logical for Boolean values (some backends may return an integer)
#NA
#character
#factor (bound as character, with warning)
#Date
#POSIXct timestamps
#POSIXlt timestamps
#lists of raw for blobs (with NULL entries for SQL NULL values)
#objects of type blob::blob
air1="http://stat-computing.org/dataexpo/2009/2008.csv.bz2"
library(RCurl)
download.file(air1, destfile = "air1.csv.bz2", method="curl")
air1="air1.csv.bz2"
#specify the column types
spec_csv(air1)
air=c(air1) # specify your files
tbl="airways"
for(i in air){ # WARNING ! -- THIS WILL TAKE 45 minutes
library(R.utils)
n1=length(count.fields(i))
n2=floor(n1/250000)
print("bz2 file unzipping")
bunzip2(i, "mydata.csv", remove=F, skip=T) # unzip the bz2 file
print("bz2 file finished unzipped")
print("csv file being read")
df22=read.csv("mydata.csv", header=T, sep=",",
dec=".", stringsAsFactors=F,
na.strings=NA)
print("csv file finished read")
print(paste("there are",n1,"rows in this file"))
colNames=names(df22)
for(j in 0:n2){
jn=j * 250000 # we insert data in chunks of 10000 rows
j1=jn+1
j2=jn+250000
tryCatch({
dd22=df22[j1:j2,]},
error=function(e){dd22=df22[j1:n1,]}
)
# if(nrow(dd22)==0L) return(FALSE)
colnames(dd22)=colNames
if(j==0 & i==air[1]){
dbCreateTable(conn=impalacon,
name=tbl,
fields=dd22,
row.names = NULL,
temporary=F)
print(paste("table",tbl,"created"))
}else{
tryCatch({
dbWriteTable(conn=impalacon,
name=tbl,
value=dd22,
row.names=F,
temporary=F,
append=T)},
error=function(e)NA)
print("250000 rows appended")
}
rm(dd22); gc();
}
rm(df22); file.remove("mydata.csv")
print("mydata.csv file removed")
}
### testing
impalacon %>% tbl("airways") %>%
select(distance, dayofweek) %>%
filter(!is.na(distance)) %>%
filter(!is.na(dayofweek)) %>%
group_by(dayofweek) %>%
summarize(ss=sum(distance)) %>%
as.data.frame() %>%
mutate(dayofweek=as.factor(dayofweek),
ss=as.integer(ss)) %>%
ggplot()+ aes(x=dayofweek, y=ss) + geom_bar(stat="identity")
### retrieve data
# collect() executes the query and returns the results to R.
# compute() executes the query and stores the results in a temporary table in the database.
# compute(name="table1", temporary=FALSE)
# impala doesn't support temporary tables
################# heart dataset ####################
heart=read.csv(file.choose()) # import heart dataset
df22=heart
colNames=names(df22)
dbCreateTable(conn=impalacon,
name="heart",
fields=df22,
row.names = NULL,
temporary=F)
dbWriteTable(conn=impalacon,
name="heart",
value=df22,
row.names=F,
temporary=F,
overwrite=T)
rm(df22); gc()
###################### retrieve data
# collect() executes the query and returns the results to R.
# compute() executes the query and stores the results in a temporary table in the database.
# compute(name="table1", temporary=FALSE)
# impala doesn't support temporary tables
# collapse() turns the query into a table expression.
# generates an SQL query which you can use later on
|
c884e070e9d45a5c36f818858bdc46809ceb4555
|
7da5415ba32fabc012b8b6173ec2e346243f0188
|
/consensus_clustering/src/GDAC_CnmfReports/reports/.svn/text-base/CnmfCNReport.R.svn-base
|
47f46fe8db42f78b6fb4dc837e78273f6d95de98
|
[] |
no_license
|
WANGDI0212/DLBCL_Nat_Med_April_2018
|
472443c164dfda8795e4ccf36cb02ec41ceed36c
|
1c5dcd2f7b859f8b7839f4e1d9725e455b14df4d
|
refs/heads/master
| 2022-10-19T17:30:38.563049
| 2020-06-11T20:34:50
| 2020-06-11T20:34:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,169
|
CnmfCNReport.R.svn-base
|
# Filename:
# Authors:
#
# Purpose:
# Command line calling script:
# <R> <libdir>consensusReport_v5.R writeReport -o<expdata> -v<kclus> -s<bestclu> -u<allcluster> -w<markers> -p<cormatrix> -q<markersP> -r<heatmap> -t<heatmapall> -a<file.gif.2> -b<file.gif.3> -c<file.gif.4> -d<file.gif.5> -e<file.gif.6> -f<file.gif.7> -g<file.gif.8> -h<nozzle.path>
#
#
writenmfCNReport <- function(expdata,kclus,bestclu,allcluster,markers,cormatrix,markersP,heatmap,heatmapall, images){
# Load the relevent information tha tis needed by the report
tgenenumber <- readLines(expdata,n=2)[2]
genenumber <- unlist(strsplit(tgenenumber,"\t"))[1]
bestclufile <- read.delim(bestclu, header = TRUE, sep="\t", as.is = TRUE, skip = 1)
nnclu <- unique(bestclufile[,2])
nclu <- length(nnclu)
samplenum <- dim(bestclufile)[1]
almarkers <- read.delim(markers, header = TRUE, sep = "\t", as.is = TRUE, skip = 1)
markersig <- read.delim(markersP, header = TRUE, sep = "\t",as.is = TRUE)
markergenenumber <- nrow(markersig)
# Prepare the reference to the images and set the names of the members
allfile <- images
consensusm <- allfile[nclu-1]
allmems <- read.delim(allcluster, sep = "\t", header=TRUE, as.is = TRUE, stringsAsFactors = FALSE)
# This is a bit of a hack, but since we normally only calculate up to eight,
# remove any higher or lower names that are not needed
#colnames(allmems) <- paste("K=" , seq(2 + length(which(images == "")), 8), sep = "")
colnames(allmems) <- paste("K=" , seq(2, 8), sep = "")
allmems <- cbind("SampleName" = as.character(rownames(allmems)), allmems)
allmems <- allmems[order(allmems[,2]),]
# Prepare the references for inclusion
fullCitation <- newCitation(
authors = "Brunet, J.P., Tamayo, P., Golub, T.R. & Mesirov, J.P.",
title="Metagenes and molecular pattern discovery using matrix factorization",
publication="Proc Natl Acad Sci U S A", issue="12", number="101", pages="4164-9", year="2004",
url="http://www.ncbi.nlm.nih.gov/pubmed?term=Metagenes%20and%20molecular%20pattern%20discovery%20using%20matrix%20factorization" );
fullCitation2 <- newCitation(
authors="Rousseeuw, P.J.",
title="Silhouettes: A graphical aid to the interpretation and validation of cluster analysis.",
publication="J. Comput. Appl. Math.", issue="20", pages="53-65", year="1987" );
webCitation <- newCitation( title="Broad Genepattern: NMFConsensus", url="http://genepattern.broadinstitute.org/gp/pages/index.jsf" );
webCitation2 <- newCitation( title="R silhouette package", url="http://stat.ethz.ch/R-manual/R-patched/library/cluster/html/silhouette.html" );
#webCitation3 <- newCitation( title="R edgeR package", url="http://www.bioconductor.org/packages/release/bioc/html/edgeR.html" );
# Start a new report
fhReport <- newReport("Clustering of copy number data by focal peak region with log2 ratio: consensus NMF")
# Report overview section
fhReport <- addToIntroduction(fhReport,
newParagraph("This pipeline calculates clusters based on a consensus
non-negative matrix factorization (NMF) clustering method ",
asReference(fullCitation), "," , asReference(webCitation),
". This pipeline has the following features:"),
newList(isNumbered = TRUE,
newParagraph("Convert input data set to a non-negitive matrix by column rank normalization."),
newParagraph("Classify samples into consensus clusters."),
newParagraph("Determine differentially expressed focal events for each subtype.")));
fhReport <- addToSummary(fhReport,
newParagraph("The most robust consensus NMF clustering of ", asParameter(samplenum),
" samples using the ", asParameter(genenumber), " copy number focal regions
was identified for ", asParameter("k"), " = ", asParameter(nclu),
" clusters. We computed the clustering for ", asParameter("k"),
" = 2 to ", asParameter("k"), " = 8 and used the cophenetic
correlation coefficient to determine the best solution."))
# Prepare the table of the best clusters
bestclufile <- bestclufile[1:10,]
tabbestclus <- newTable(bestclufile, file = basename(bestclu),
"List of samples with ", nclu, " subtypes and silhouette width.");
# Prepare the table of the list of samples and their clusters
allmems <- as.matrix(allmems[1:10,])
taballclus <- newTable(allmems, file = basename(allcluster),
"List of samples belonging to each cluster in different k clusters.");
# Prepare the table of gene markers
almarkers <- almarkers[1:10,]
tabmarker <- newTable(almarkers, file = basename(markers),
"List of marker focal events with p <= 0.05.");
# Prepare the figures for inclusion in the results
figconsensu <- newFigure(basename(consensusm), fileHighRes = basename(consensusm),
"The consensus matrix after clustering shows ", asParameter(nclu),
" clusters with limited overlap between clusters." );
figcormatrix <- newFigure(basename(cormatrix), fileHighRes = basename(cormatrix),
"The correlation matrix also shows ",asParameter(nclu)," clusters.");
figsilhouette <- newFigure(basename(kclus), fileHighRes = basename(kclus),
"The silhouette width was calculated for each sample and each value of ",
asParameter("k"), ". The left panel shows the average silhouette width
across all samples for each tested ", asParameter("k"), " (left panel).
The right panels shows assignments of clusters to samples and the
silhouette width of each sample for the most robust clustering.");
# Removed as the figure is currently flawed
figheatmap <- newFigure(basename(heatmap), fileHighRes = basename(heatmap),
"Samples were separated into ", asParameter(nclu), " clusters. Shown are ",
asParameter(samplenum), " samples and ", asParameter(markergenenumber),
" marker focal events. The color bar of the row indicates the marker focal events for
the corresponding cluster.");
figheatmapall <- newFigure(basename(heatmapall), fileHighRes = basename(heatmapall),
"Heatmap with a standard hierarchical clustering for ",
asParameter(samplenum), " samples and the ", asParameter(genenumber), " focal events.");
# Render the main results
fhReport <- addToResults(fhReport,
# Removed as the figure is currently flawed
addTo(newSubSection("Copy number focal events of molecular subtypes"), figheatmap, figheatmapall),
#addTo(newSubSection("Gene expression patterns of molecular subtypes"), figheatmapall),
addTo(newSubSection("Consensus and correlation matrix"), figconsensu, figcormatrix),
addTo(newSubSection("Silhouette widths"), figsilhouette),
addTo(newSubSection("Samples assignment with silhouette width"), tabbestclus, taballclus),
addTo(newSubSection("Marker focal events of each subtype"),
newParagraph("Samples most representative of the clusters, hereby
called ", asParameter("core samples"), " were identified based
on positive silhouette width, indicating higher similarity to
their own class than to any other class member. Core samples
were used to select differentially expressed marker focal events for
each subtype by comparing the subclass versus the other
subclasses, using Student's t-test."), tabmarker));
# Prepare the CNMF method section
cnmfMethod <- addTo(
newSubSection("CNMF Method"),
newParagraph("Non-negative matrix factorization (NMF) is an unsupervised
learning algorithm that has been shown to identify molecular patterns
when applied to gene expression data ",
asReference(fullCitation), ",", asReference(webCitation),
". Rather than separating gene clusters based on distance
computation, NMF detects contextdependent patterns of gene expression
in complex biological systems."));
# Prepare the cophenetic coeefficent method section
copheneticMethod <- addTo(
newSubSection("Cophenetic Correlation Coefficient"),
newParagraph("We use the cophenetic correlation coefficient ",
asReference(fullCitation), " to determine the cluster that yields
the most robust clustering. The cophenetic correlation coefficient
is computed based on the consensus matrix of the CNMF clustering,
and measures how reliably the same samples are assigned to the same
cluster across many iterations of the clustering lgorithm with
random initializations. The cophenetic correlation coefficient lies
between 0 and 1, with higher values indicating more stable cluster
assignments. We select the number of clusters ", asParameter("k"),
" based on the largest observed correlation coefficient for all
tested values of ", asParameter("k"), "."));
# Prepare the silhouette width method section
silhouetteWidth <- addTo(
newSubSection("Silhouette Width"),
newParagraph("Silhouette width is defined as the ratio of average
distance of each sample to samples in the same cluster to the
smallest distance to samples not in the same cluster. If
silhouette width is close to 1, it means that sample is well
clustered. If silhouette width is close to -1, it means that
sample is misclassified ", asReference(fullCitation2), "."));
Input <- addTo(
newParagraph("Copy number data file = All Lesions File actual copy number part (all_lesions.conf_##.txt,
where ## is the confidence level). The all lesions file is from GISTIC pipeline
and summarizes the results from the GISTIC run. It contains data about the
significant regions of amplification and deletion as well as which samples
are amplified or deleted in each of these regions. The identified regions are
listed down the first column, and the samples are listed across the first row, starting in column 10"));
# Add the methods to the report
fhReport <- addToMethods(fhReport, cnmfMethod, copheneticMethod, silhouetteWidth)
#Add the Input to the report
fhReport <- addToInput( fhReport,Input)
# Report citations
fhReport <- addToReferences( fhReport, fullCitation, webCitation, fullCitation2,webCitation2);
# Render the report
writeReport( fhReport);
}
|
|
e58ffacce904db55e326f85d02501d4b8f5efdff
|
0f6f92ec1fcc3a27cfe3dc712f0ce92e57dc0c03
|
/R/encodepeak.to.granges.r
|
38c28ec4195d09ccc6970d029ef5673b48dfbd73
|
[] |
no_license
|
tengmx/toolbox
|
274ec1b967a9c281a6ac61500f49d05800d0e8cc
|
e8e55f357bbba93a108feb7d083d62ea469fb9a6
|
refs/heads/master
| 2021-01-12T16:06:12.905158
| 2016-12-05T16:47:15
| 2016-12-05T16:47:15
| 71,936,398
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,076
|
r
|
encodepeak.to.granges.r
|
### load ENCODE peak file into GRanges
### input: broad/narrow peak file, gzipped status, peak file type
### output: GRanges for all peaks
encodepeak.to.granges <- function(peakfile,gz=TRUE,format=c("broad", "narrow"),
meta=TRUE){
if (!is.logical(gz) || length(gz) != 1L)
stop("'gz' must be a length-1 logical vector.\n")
format <- match.arg(format)
library(GenomicRanges)
if(gz)
peakfile <- gzfile(peakfile)
peaktbl <- read.delim(peakfile,stringsAsFactors=F,quote = "",header=F,sep="")
if(meta){
peaks <- GRanges(peaktbl$V1,IRanges(start=peaktbl$V2+1,end=peaktbl$V3),
strand=gsub('\\.','\\*',peaktbl$V6),signalValue=peaktbl$V7,
pValue=peaktbl$V8,qValue=peaktbl$V9)
if(format=="narrow") mcols(peaks)$peak <- peaktbl$V10
}else{
peaks <- GRanges(peaktbl$V1,IRanges(start=peaktbl$V2+1,end=peaktbl$V3),
strand=gsub('\\.','\\*',peaktbl$V6))
}
peaks
}
|
2c1eb74c2516448bcd802b99b51731104b24692a
|
515c4a095ef3b6d2879d7943e610fa98441f9afe
|
/facial_keypoints.R
|
76e74d5eb7f5e6a4718605ab94345ec32264bd8d
|
[] |
no_license
|
KaranvirSinghJ/Facial-Expression-using-R
|
a3f45c1419b144e7febe0f85e147e068f34b2ab3
|
8c9cc2183db9e27751a4effdafa40f7debfe2fdd
|
refs/heads/master
| 2020-03-20T03:26:28.762526
| 2018-06-13T01:28:27
| 2018-06-13T01:28:27
| 137,146,334
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,636
|
r
|
facial_keypoints.R
|
data.dir <- '/home/student/Downloads/'
train.file <- paste0(data.dir, 'training.csv')
test.file <- paste0(data.dir, 'test.csv')
d.train <- read.csv(train.file, stringsAsFactors=F)
im.train <- d.train$Image
d.train$Image <- NULL
head(d.train)
im.train[1]
as.integer(unlist(strsplit(im.train[1], " ")))
install.packages('doMC')
library(doMC)
registerDoMC()
im.train <- foreach(im = im.train, .combine=rbind) %dopar% {
as.integer(unlist(strsplit(im, " ")))
}
str(im.train)
d.test <- read.csv(test.file, stringsAsFactors=F)
im.test <- foreach(im = d.test$Image, .combine=rbind) %dopar% {
as.integer(unlist(strsplit(im, " ")))
}
d.test$Image <- NULL
save(d.train, im.train, d.test, im.test, file='model.Rd')
load('model.Rd')
im <- matrix(data=rev(im.train[1,]), nrow=96, ncol=96)
image(1:96, 1:96, im, col=gray((0:255)/255))
points(96-d.train$nose_tip_x[1], 96-d.train$nose_tip_y[1], col="red")
points(96-d.train$left_eye_center_x[1], 96-d.train$left_eye_center_y[1], col="blue")
points(96-d.train$right_eye_center_x[1], 96-d.train$right_eye_center_y[1], col="green")
for(i in 1:nrow(d.train)) {
points(96-d.train$nose_tip_x[i], 96-d.train$nose_tip_y[i], col="red")
}
version
idx <- which.max(d.train$nose_tip_x)
im <- matrix(data=rev(im.train[idx,]), nrow=96, ncol=96)
image(1:96, 1:96, im, col=gray((0:255)/255))
points(96-d.train$nose_tip_x[idx], 96-d.train$nose_tip_y[idx], col="red")
colMeans(d.train, na.rm=T)
p <- matrix(data=colMeans(d.train, na.rm=T), nrow=nrow(d.test), ncol=ncol(d.train), byrow=T)
p
colnames(p) <- names(d.train)
predictions <- data.frame(ImageId = 1:nrow(d.test), p)
head(predictions)
|
67f2591abd52a978becb459ef769bdda04dbebc9
|
3e3adfa146c94c393e69dbd1a02767234b1dc173
|
/draw_multi_probe_ann.R
|
af786beea6d9603608e79dc240e26a0b60520f56
|
[] |
no_license
|
viirya/fastdict
|
1a18ce14ddb18b69531ccaa71bcf2b935d0f777c
|
83a2afc2096f57c49da7048b4a21b63a9b36d0ac
|
refs/heads/master
| 2020-05-02T22:01:25.859446
| 2017-02-11T05:38:03
| 2017-02-11T05:38:03
| 15,826,276
| 15
| 4
| null | 2017-02-11T05:38:04
| 2014-01-11T16:52:45
|
Python
|
UTF-8
|
R
| false
| false
| 1,878
|
r
|
draw_multi_probe_ann.R
|
draw_figure_ggplot <- function(dataname, data_frames) {
library(ggplot2)
print(data_frames)
p <- ggplot(data_frames, aes(x = probe, y = nn, group = distance))
p + geom_line(aes(colour = distance)) + scale_colour_discrete(h = c(0, 360) + 15, c = 100, h.start = 0, direction = 1) + xlab("Number of multi-probe") + ylab("NN percentage in theory") + theme_bw() + theme(axis.title.x = element_text(size = 15), axis.title.y = element_text(size = 15), legend.text = element_text(size = 15), legend.title = element_text(size = 15), axis.text = element_text(size = 15))
ggsave(file = paste(dataname, ".eps", sep = ''))
}
arg <- commandArgs(trailingOnly = TRUE)
argLen <- length(arg)
if (argLen == 3) {
arg_b <- as.integer(arg[1]) # total b bits
arg_s <- as.integer(arg[2]) # randomly sampling s bits
dataname <- arg[3] # output filename
print(paste("Draw ANN figure for multi-probe: b = ", arg_b, ", s = ", arg_s, sep = ''))
library(combinat)
multi_probe_levels = c(0:4)
distances = c(0:4)
data_frames = rbind()
for (distance in distances) {
accum_nn_percentage = 0
print(paste("d = ", distance, sep = ''))
for (level in multi_probe_levels) {
print(paste("m = ", level, sep = ''))
i = c((distance - level):0)
rCi = sum(nCm(arg_b - arg_s, i[i >= 0]))
sCm = nCm(arg_s, level)
bCj = sum(nCm(arg_b, c(distance:0)))
accum_nn_percentage = accum_nn_percentage + sCm * rCi / bCj
if (accum_nn_percentage > 1.0) {
accum_nn_percentage = 1.0
}
data_frames = rbind(data.frame(probe = level, nn = accum_nn_percentage * 100, distance = paste("d = ", distance, sep = '')), data_frames)
}
}
print(data_frames)
draw_figure_ggplot(dataname, data_frames)
}
|
a3b98435ca9d9175c42e3420a5c8de11c20e613a
|
372a279bd0a9c0a5c5fa5e4cb932ec41db07ce7e
|
/man/amce.Rd
|
5fd5c21b3602aa00d49e7a76f4172d05e4df563e
|
[] |
no_license
|
cran/cregg
|
07dc7e2d3e94797829b845a0585ab3ba0297c032
|
74636ad832430b994b814a48498ad0e00be51a63
|
refs/heads/master
| 2021-07-12T16:53:22.169731
| 2020-06-28T20:20:03
| 2020-06-28T20:20:03
| 145,906,570
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 5,734
|
rd
|
amce.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/amce.R
\name{amce}
\alias{amce}
\alias{amce_by_reference}
\title{Tidy estimation of AMCEs}
\usage{
amce(
data,
formula,
id = ~0,
weights = NULL,
feature_order = NULL,
feature_labels = NULL,
level_order = c("ascending", "descending"),
alpha = 0.05,
...
)
amce_by_reference(data, formula, variable, ...)
}
\arguments{
\item{data}{A data frame containing variables specified in \code{formula}. All RHS variables should be factors; the base level for each will be used in estimation and its reported AMCE will be NA (for printing). Optionally, this can instead be an object of class \dQuote{survey.design} returned by \code{\link[survey]{svydesign}}.}
\item{formula}{A formula specifying an AMCE model to be estimated. All variables should be factors; all levels across features should be unique. Two-way constraints can be specified with an asterisk (*) between RHS features. The specific constrained level pairs within these features are then detected automatically. Higher-order constraints are not allowed.}
\item{id}{An RHS formula specifying a variable holding respondent identifiers, to be used for clustering standard errors. By default, data are unclustered.}
\item{weights}{An (optional) RHS formula specifying a variable holding survey weights.}
\item{feature_order}{An (optional) character vector specifying the names of feature (RHS) variables in the order they should be encoded in the resulting data frame.}
\item{feature_labels}{A named list of \dQuote{fancy} feature labels to be used in output. By default, the function looks for a \dQuote{label} attribute on each variable in \code{formula} and uses that for pretty printing. This argument overrides those attributes or otherwise provides fancy labels for this purpose. This should be a list with names equal to variables on the righthand side of \code{formula} and character string values; arguments passed here override variable attributes.}
\item{level_order}{A character string specifying levels (within each feature) should be ordered increasing or decreasing in the final output. This is mostly only consequential for plotting via \code{\link{plot.cj_mm}}, etc.}
\item{alpha}{A numeric value indicating the significance level at which to calculate confidence intervals for the MMs (by default 0.95, meaning 95-percent CIs are returned).}
\item{\dots}{For \code{amce}: additional arguments to \code{\link[stats]{glm}} or \code{\link[survey]{svyglm}}, the latter being used if \code{weights} is non-NULL. For \code{amce_by_reference}: additional arguments passed to \code{amce}.}
\item{variable}{An RHS formula containing a single factor variable from \code{formula}. This will be used by \code{amce_by_reference} to estimate AMCEs relative to each possible factor level as a reference category. If more than one RHS variables are specified, the first will be used.}
}
\value{
A data frame of class \dQuote{cj_amce}
}
\description{
Estimate AMCEs for a conjoint analysis and return a tidy data frame of results
}
\details{
\code{amce} provides estimates of AMCEs (or rather, average marginal effects for each feature level). Two-way constraints can be specified with an asterisk (*) between features. The specific constrained level pairs within these features are then detected automatically. The function can also be used for calculating average component interaction effects when combined with \code{interaction}, and for balance testing by specifying a covariate rather outcome on the left-hand side of \code{formula}. See examples.
\code{amce_by_reference} provides a tool for quick sensitivity analysis. AMCEs are defined relative to an arbitrary reference category (i.e., feature level). This function will loop over all feature levels (for a specified feature) to show how interpretation will be affected by choice of reference category. The resulting data frame will be a stacked result from \code{amce}, containing an additional \code{REFERENCE} column specifying which level of \code{variable} was used as the reference category. In unconstrained conjoint designs, only AMCEs for \code{variable} will vary by reference category; in constrained designs, AMCEs for any factor constrained by \code{variable} may also vary.
Users may desire to specify a \code{family} argument via \code{\dots}, which should be a \dQuote{family} object such as \code{gaussian}. Sensible alternatives are \code{binomial} (for binary outcomes) and quasibinomial (for weighted survey data). See \code{\link[stats]{family}} for details. In such cases, effects are always reported on the link (not outcome) scale.
}
\examples{
data("taxes")
# estimating AMCEs
amce(taxes, chose_plan ~ taxrate1 + taxrate2 + taxrate3 +
taxrate4 + taxrate5 + taxrate6 + taxrev, id = ~ ID)
\donttest{
data("immigration")
# estimating AMCEs with constraints
amce(immigration, ChosenImmigrant ~ Gender + ReasonForApplication * CountryOfOrigin,
id = ~CaseID)
# estimating average component interaction effects (AMCEs of feature combinations)
immigration$language_entry <- interaction(immigration$LanguageSkills,
immigration$PriorEntry, sep = "_")
amce(immigration,ChosenImmigrant ~ language_entry, id = ~CaseID)
# balance testing example
plot(amce(immigration[!is.na(immigration$ethnocentrism),],
ethnocentrism ~ Gender + Education + LanguageSkills, id = ~ CaseID))
# reference category sensitivity
x <- amce_by_reference(immigration, ChosenImmigrant ~ LanguageSkills + Education,
variable = ~ LanguageSkills, id = ~ CaseID)
# plot
plot(x)
}
}
\seealso{
\code{\link{amce_diffs}} \code{\link{mm}} \code{\link{plot.cj_amce}}
}
|
191362712c92e67607be19230245aba52da3a205
|
826cc17cd51ccbceeb0b33ee23cab81ccee3932f
|
/tests/testthat/test-enrichment_depletion_test.R
|
258a72e28dd83b943f635c70fb06d2d029d13778
|
[
"MIT"
] |
permissive
|
UMCUGenetics/MutationalPatterns
|
9b9d38a7ab69d7e29d8900f11fa9fb7ef328cfb9
|
ca9caf0d0ba3cd1e13cb909009dc5b3b27b84631
|
refs/heads/master
| 2023-04-14T23:28:50.852559
| 2022-11-22T11:37:17
| 2022-11-22T11:37:17
| 53,409,261
| 86
| 37
|
MIT
| 2022-11-22T11:37:18
| 2016-03-08T12:10:11
|
R
|
UTF-8
|
R
| false
| false
| 1,830
|
r
|
test-enrichment_depletion_test.R
|
context("test-enrichment_depletion_test")
# Read distribution data
distr <- readRDS(system.file("states/distr_data.rds",
package = "MutationalPatterns"
))
# Set tissue
tissue <- c(rep("colon", 3), rep("intestine", 3), rep("liver", 3))
## Perform the enrichment/depletion test by tissue type.
output <- enrichment_depletion_test(distr, by = tissue)
## Or without specifying the 'by' parameter.
output_pooled <- enrichment_depletion_test(distr)
## Use different cutoffs for p and fdr
output_strictcutoff <- enrichment_depletion_test(distr,
by = tissue,
p_cutoffs = 0.000001, fdr_cutoffs = 0.000005
)
# Use multiple cutoffs for p and fdr
output_multistars <- enrichment_depletion_test(distr,
by = tissue,
p_cutoffs = c(0.05, 0.01, 0.00000005),
fdr_cutoffs = c(0.1, 0.05, 0.00000001)
)
test_that("Output has correct class", {
expect_true(inherits(output, c("data.frame")))
expect_true(inherits(output_pooled, c("data.frame")))
expect_true(inherits(output_strictcutoff, c("data.frame")))
expect_true(inherits(output_multistars, c("data.frame")))
})
test_that("Output has correct size", {
expect_equal(dim(output), c(15, 13))
expect_equal(dim(output_pooled), c(5, 13))
expect_equal(dim(output_strictcutoff), c(15, 13))
expect_equal(dim(output_multistars), c(15, 13))
})
test_that("Number significant is correct", {
expect_equal(sum(output$significant == "*"), 15)
expect_equal(sum(output$significant_fdr == "*"), 15)
expect_equal(sum(output_pooled$significant == "*"), 5)
expect_equal(sum(output_pooled$significant_fdr == "*"), 5)
expect_equal(sum(output_strictcutoff$significant == "*"), 9)
expect_equal(sum(output_strictcutoff$significant_fdr == "*"), 9)
expect_equal(sum(output_multistars$significant == "***"), 8)
expect_equal(sum(output_multistars$significant_fdr == "**"), 9)
})
|
f791f47691c9ed0a5bbeef7e340d13b627270885
|
f9d6ff022b97ff2d299c8927cdb8884d51e51701
|
/R/count_total_missing_values.R
|
af21eca89aafd9bdb7c0e18ae0714c21aaccece4
|
[] |
no_license
|
antchau/glider
|
c0571ef7e69c440ca11f99026e0725ed3e126f2b
|
9deaafc9aaca9c5f1e9d4fd143d8872169824a7e
|
refs/heads/master
| 2023-01-22T03:05:37.216056
| 2020-11-30T22:26:44
| 2020-11-30T22:26:44
| 286,846,018
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 268
|
r
|
count_total_missing_values.R
|
#' Count total number of missing values in a data frame.
#'
#' @param df A data frame.
#'
#' @return Count of missing values in the data frame
#' @export
#'
#' @examples count_total_missing_values(mtcars)
count_total_missing_values <- function(df){
sum(is.na(df))
}
|
eb85b22aa6e2fcbf6ee2230e09f5df49a7e000ef
|
9b202913ece1e2916e80c913693bc17c0adba768
|
/R/processing/process_forsstrom2015.R
|
70d787a7693a2df89d7a8c67bd71532bd502699b
|
[] |
no_license
|
PMassicotte/cdoc
|
5217750db3e1fdf699a1a8d5a26a8f1b83d52253
|
fef25b848cb1ac1f2c7be171b290ad5d5fef7af7
|
refs/heads/master
| 2021-09-13T21:36:56.273815
| 2018-05-04T12:59:30
| 2018-05-04T12:59:30
| 80,427,518
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,413
|
r
|
process_forsstrom2015.R
|
#<><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><>
# FILE: process_forsstrom2015.R
#
# AUTHOR: Philippe Massicotte
#
# DESCRIPTION: Process raw data from:
#
# Forsström, L., Rautio, M., Cusson, M., Sorvari, S., Albert, R.,
# Kumagai, M., et al. (2015). Dissolved organic matter concentration,
# optical parameters and attenuation of solar radiation in high- latitude
# lakes across three vegetation zones. Écoscience 6860.
# doi:10.1080/11956860.2015.1047137.
#<><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><>
rm(list = ls())
forsstrom2015 <- read_csv("dataset/raw/literature/forsstrom2015/forsstrom2015.csv", na = "nd") %>%
select(lake_code = `Lake (code)`,
doc = DOC,
a440,
a320) %>%
mutate(lake_code = iconv(lake_code, from = "latin1", to = "UTF-8")) %>%
na.omit() %>%
mutate(doc = doc / 12 * 1000) %>%
mutate(date = as.Date("2004-08-22")) %>% # average
gather(wavelength, absorption, a320, a440) %>%
mutate(wavelength = parse_number(wavelength)) %>%
mutate(longitude = 21) %>% # based on Fig. 1
mutate(latitude = 69) %>%
mutate(study_id = "forsstrom2015") %>%
filter(!is.na(doc) & !is.na(absorption)) %>%
mutate(unique_id = paste("forsstrom2015", 1:nrow(.), sep = "_")) %>%
mutate(ecosystem = "lake")
write_feather(forsstrom2015, "dataset/clean/literature/forsstrom2015.feather")
|
60ab07730099b3c90ccca4ac732fadb6138bfebc
|
6fb04083c9d4ee38349fc04f499a4bf83f6b32c9
|
/tests/next/test_FLPCA.R
|
1955d65255c1848112ff324b9e0318653364a684
|
[] |
no_license
|
phani-srikar/AdapteR
|
39c6995853198f01d17a85ac60f319de47637f89
|
81c481df487f3cbb3d5d8b3787441ba1f8a96580
|
refs/heads/master
| 2020-08-09T10:33:28.096123
| 2017-09-07T09:39:25
| 2017-09-07T09:39:25
| 214,069,176
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 647
|
r
|
test_FLPCA.R
|
# DB-Lytix Example.
Renv <- new.env(parent = globalenv())
FLenv <- as.FL(Renv)
Renv$tbl <- iris
Renv$tbl$Species <- as.numeric(Renv$tbl$Species)
FLenv$tbl <- as.FLTable(Renv$tbl,tableName = getOption("TestTempTableName"),
temporary=F, drop = TRUE)
#'
#' fliris <- as.FL(rtbl)
#' flirispca <- prcomp(Species~., data = fliris)
Renv$mod <- princomp(Renv$tbl[,-1])
FLenv$mod <- prcomp(Species~.,FLenv$tbl)
eval_expect_equal({
fit <- prcomp(data = tbl)
},Renv,FLenv,
noexpectation = "fit")
##FLexpect_equal(FLenv$mod$rotation, as.matrix(Renv$mod$loading[1:4,1:4]))
FLexpect_equal(FLenv$mod$sdev, as.numeric(Renv$mod$sdev),tolerance = .05)
|
75699b7690e82c7f26f53a15bdb8ddfafefb7a6f
|
30ef24a6c48d15897736f1a1b2bd165344eb6017
|
/docs/build.R
|
3a9ea2551173179467ac2653d7ceaea9d2a8be33
|
[
"MIT"
] |
permissive
|
skhan890/hbgd
|
6527d165c103c04e048713bd495d38ae99d611d4
|
6fc084ff17d6b88afc0724273b03e8e822151071
|
refs/heads/master
| 2021-01-16T20:36:17.479770
| 2016-06-07T19:35:09
| 2016-06-07T19:35:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 758
|
r
|
build.R
|
knitr::opts_knit$set(root.dir = normalizePath("./docs"))
packagedocs::render_docs(
code_path = ".", # location of code directory
docs_path = "./docs", # location of docs directory
package_name = "hbgd", # name of the package
main_toc_collapse = TRUE, # use collapsing toc on main page
rd_toc_collapse = TRUE, # use collapsing toc on rd page
lib_dir = "assets", # put assets in "assets" directory
render_main = TRUE, # render main page
render_rd = TRUE, # render rd page
view_output = TRUE, # look at the output after render
rd_index = "rd_index.yaml" # optional path to rd layout yaml
)
# setwd("docs")
# system("R CMD build ../../hbgd")
packagedocs::purl_docs("docs", "docs/code")
|
85962396d7e87e094fe0232b23a979e540e5504d
|
13750542b2e5406b948059ae7d2bbe9967bb58bc
|
/initGrid.R
|
37293ab7e4d2b70e83fe31253516952f250d2d97
|
[] |
no_license
|
jrevenaugh/Riquity
|
ce65cd3e9f0f30b92b8e773e723b1cb34793e226
|
64ce1eed78467f6a6849744819829f9aa469f4c7
|
refs/heads/master
| 2020-03-15T01:57:34.876482
| 2018-05-07T21:07:17
| 2018-05-07T21:07:17
| 131,906,279
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 303
|
r
|
initGrid.R
|
# Initialize a grid with empty hole positions given by vector "hole".
# hole is in key format (characters "1" through "9" and "A" through "F").
source("global.R")
initGrid <- function(hole){
i <- which(key %in% tolower(hole))
grid <- matrix(TRUE, nrow = 15)
grid[i] <- FALSE
return(grid)
}
|
d1b4fccd359041b7e368d78a9a569efd0b00df9c
|
332b6802a20847807f84817512df1d681275469e
|
/server.r
|
489fed6d263b45a87bc91b298cff65eb41d8f2c1
|
[] |
no_license
|
tischi/shinyHTM
|
abb45aef3514791df06412602bdf3f7dd4e9de1e
|
ea35c9f9dce2fb7d079933e76f8f7bd992f92255
|
refs/heads/master
| 2021-01-09T08:44:31.436833
| 2017-03-14T08:59:14
| 2017-03-14T08:59:14
| 65,203,389
| 0
| 0
| null | 2017-03-13T18:22:09
| 2016-08-08T12:43:49
|
R
|
UTF-8
|
R
| false
| false
| 22,708
|
r
|
server.r
|
# ==========================================================================
# Phylosophy: the 'htm' data.frame is placed in the global enviromnent, where it can be accessed and updated by reactive functions
#
# Heatmaps are built from a reactive data.frame 'htmHM()'
#
# ==========================================================================
# To do:
# Sync negative control across Normalize&Summarize
# Update plot symbols each time QC is applied
source("./functions.r")
loadpackage("shiny")
loadpackage("plotly")
loadpackage("ggplot2")
loadpackage("tcltk")
loadpackage("xlsx")
loadpackage("shinyjs")
# Adjust maximum upload size to 2 Gb
options(shiny.maxRequestSize=2*1024^3)
# Initialize variables
if(exists("htm")) rm(htm)
if(exists("QCsettings")) rm(QCsettings)
QCsettings <<- data.frame(type = character(),
measurement = character(),
minimum = character(),
maximum = character(),
failed = integer(),
stringsAsFactors = FALSE)
col_QC <- "HTM_QC"
shinyServer(function(input, output){
# File Input
observeEvent(input$file1, {
htm <- read.HTMtable(input$file1$datapath)
htm[[col_QC]] <- TRUE
htm <<- htm
})
# Settings
output$UIcolNameTreatment <- renderUI({
input$file1
input$applyNorm
selectInput("colTreatment", "Treatment:", as.list(names(htm)))
})
output$UIcolNameBatch <- renderUI({
input$file1
input$applyNorm
selectInput("colBatch", "Batch:", as.list(names(htm)))
})
output$UIcolNameWell <- renderUI({
input$file1
input$applyNorm
selectInput("colWell", "Well coordinate:", as.list(names(htm)))
})
output$UIcolNamePos <- renderUI({
input$file1
input$applyNorm
selectInput("colPos", "Sub-position coordinate:", as.list(names(htm)))
})
output$UIfiji_path <- renderUI({
if (Sys.info()['sysname'] == "Windows")
{
fiji_binary_path = "C:/Fiji.app/ImageJ-win64.exe"
}
else
{
fiji_binary_path = "/Applications/Fiji.app/Contents/MacOS/ImageJ-macosx"
}
textInput("fiji_binary", "Path to Fiji ", value = fiji_binary_path)
})
output$UIavailableimages <- renderUI({
input$file1
img_names <- gsub(paste0("^", input$prefixPath, "(.*)"), "\\1", names(htm)[grep(paste0("^", input$prefixPath), names(htm))])
checkboxGroupInput("images2display", "Select images to be viewed upon clicking within a plot", as.list(img_names))
})
# Plot settings
output$UIselectBatch <- renderUI({
input$file1
selectInput("batch", "Show this batch:", as.list(c("All batches",unique(htm[[input$colBatch]]))))
})
observeEvent(input$plotType,{
# Display plot control widgets depending on which plot type is selected
switch(input$plotType,
"Scatter plot" = {
output$UIselectXaxis <- renderUI(selectInput("Xaxis", "X axis:", choices = as.list(names(htm)), width = "200%"))
output$UIselectYaxis <- renderUI(selectInput("Yaxis", "Y axis:", choices = as.list(names(htm)), width = "200%"))
output$UIhighlightQCfailed <- renderUI(checkboxInput("highlightQCfailed", "Show data points that did not pass QC", value = FALSE))
output$UIPointplotsplitBy <- renderUI(selectInput("PointplotsplitBy", "Split plot by", choices = as.list(c("None", names(htm)))))
output$UIPointplotfilterColumn <- renderUI(selectInput("PointplotfilterColumn", "Only show images where column:", choices = as.list(c("None", names(htm))), width = "100%"))
output$UIPointplotfilterValues <- renderUI(selectInput("PointplotfilterValues", "Matches:", choices = as.list(c("All", htm[[input$PointplotfilterColumn]])), width = "100%", multiple = TRUE))
output$UIBoxplothighlightCenter <- renderUI(NULL)
output$UIBoxplotsplitBy <- renderUI(NULL)
},
"Boxplot" = {
output$UIselectXaxis <- renderUI(selectInput("Xaxis", "Categories:", choices = as.list(names(htm)), width = "200%"))
output$UIselectYaxis <- renderUI(selectInput("Yaxis", "Values:", choices = as.list(names(htm)), width = "200%"))
output$UIhighlightQCfailed <- renderUI(checkboxInput("highlightQCfailed", "Hide data points that did not pass QC", value = FALSE))
output$UIPointplotsplitBy <- renderUI(NULL)
output$UIPointplotfilterColumn <- renderUI(NULL)
output$UIPointplotfilterValues <- renderUI(NULL)
output$UIBoxplothighlightCenter <- renderUI(selectInput("BoxplothighlightCenter", "Highlight box center?", choices = list("No", "Mean", "Median")))
output$UIBoxplotsplitBy <- renderUI(selectInput("BoxplotsplitBy", "Split plot by", choices = as.list(c("None", names(htm)))))
},
"Heatmap" = {
output$UIselectXaxis <- renderUI(NULL)
output$UIselectYaxis <- renderUI(selectInput("Yaxis", "Values:", choices = as.list(names(htm)), width = "200%"))
output$UIhighlightQCfailed <- renderUI(checkboxInput("highlightQCfailed", "Show data points that did not pass QC", value = FALSE))
output$UIPointplotsplitBy <- renderUI(NULL)
output$UIPointplotfilterColumn <- renderUI(NULL)
output$UIPointplotfilterValues <- renderUI(NULL)
output$UIBoxplothighlightCenter <- renderUI(NULL)
output$UIBoxplotsplitBy <- renderUI(NULL)
}
)
})
htmHM <- reactive({
if(input$batch == "All batches") return(NULL)
makeHeatmapDataFrame(df = htm,
WellX = input$wells_X,
WellY = input$wells_Y,
PosX = input$npos_X,
PosY = input$npos_Y,
subposjitter = input$squaredodge,
batch_col = input$colBatch,
batch = input$batch,
col_Well = input$colWell,
col_Pos = input$colPos,
col_QC = col_QC)
})
# Plot
output$plot <- renderPlotly({
switch(input$plotType,
"Scatter plot" = pointPlot(htm, input$colBatch, input$batch, input$Xaxis, input$Yaxis, col_QC, input$highlightQCfailed, input$PointplotsplitBy, filterByColumn = input$PointplotfilterColumn, whichValues = input$PointplotfilterValues),
"Boxplot" = boxPlot(htm, input$colBatch, input$batch, input$Xaxis, input$Yaxis, col_QC, input$highlightQCfailed, input$BoxplothighlightCenter, input$BoxplotsplitBy),
"Heatmap" = heatmapPlot(htmHM(), input$Yaxis, input$batch, input$wells_Y, input$wells_X, input$squaresize, col_QC, input$highlightQCfailed)
)
})
# QC-specific settings
approvedExperiments <- reactive({
input$QCAddfailedExperiments
input$QCcheckGroup
unique(as.character(htm[[input$colBatch]]))[!(unique(as.character(htm[[input$colBatch]])) %in% as.character(QCsettings[QCsettings$type == "Failed experiment","minimum"]))]
})
output$UIQCfailedExperiments <- renderUI({
input$file1
input$applyNorm
fluidRow(
column(6,
selectInput("QCfailedExperiment", "Failed experiments:", approvedExperiments(), width = "200%")
),
column(2,
NULL
),
column(2,
NULL
),
column(1,
actionButton("QCAddfailedExperiments", "Add QC", icon = icon("plus-square"), width = "100px")
),
tags$style(type='text/css', "#QCAddfailedExperiments { width:100%; margin-top: 25px;}")
)
})
output$UIQCnumeric <- renderUI({
input$file1
input$applyNorm
fluidRow(
column(6,
selectInput("QCnumMeasurement", "Number-based QC:", as.list(names(htm)), width = "200%")
),
column(2,
numericInput("QCnumMin", "Minimum:", value=1)
),
column(2,
numericInput("QCnumMax", "Maximum:", value=100)
),
column(1,
actionButton("QCAddnumeric", "Add QC", icon = icon("plus-square"), width = "100px")
),
tags$style(type='text/css', "#QCAddnumeric { width:100%; margin-top: 25px;}")
)
})
output$UIQCtext <- renderUI({
input$file1
input$applyNorm
fluidRow(
column(6,
selectInput("QCtxtMeasurement", "Failed images (text-based):", as.list(names(htm)), width = "200%")
),
column(2,
textInput("QCtxtBad", "Value:")
),
column(2,
NULL
),
column(1,
actionButton("QCAddtxt", "Add QC", icon = icon("plus-square"), width = "100px")
),
tags$style(type='text/css', "#QCAddtxt { width:100%; margin-top: 25px;}")
)
})
observeEvent(input$QCAddfailedExperiments,{
temp <- data.frame(
type = "Failed experiment",
measurement = isolate(input$colBatch),
minimum = isolate(input$QCfailedExperiment),
maximum = isolate(input$QCfailedExperiment),
failed = sum(htm[[isolate(input$colBatch)]] == isolate(input$QCfailedExperiment))
)
QCsettings <<- rbind(QCsettings, temp)
# Update show/remove QC
output$QCtable <- renderTable(QCsettings[,1:4])
output$UIQCactive <- renderUI({
checkboxGroupInput("QCcheckGroup",
label = strong("Disable this QC"),
choices = as.list(row.names(QCsettings))
)
})
# Reset QC report
output$QCreport <- renderPrint("")
})
observeEvent(input$QCAddnumeric,{
temp <- data.frame(
type = "Numeric QC",
measurement = isolate(input$QCnumMeasurement),
minimum = as.character(isolate(input$QCnumMin)),
maximum = as.character(isolate(input$QCnumMax)),
failed = sum((htm[[isolate(input$QCnumMeasurement)]] < isolate(input$QCnumMin) | htm[[isolate(input$QCnumMeasurement)]] > isolate(input$QCnumMax)) & !is.na(htm[[isolate(input$QCnumMeasurement)]]))
)
QCsettings <<- rbind(QCsettings, temp)
# Update show/remove QC
output$QCtable <- renderTable(QCsettings[,1:4])
output$UIQCactive <- renderUI({
checkboxGroupInput("QCcheckGroup",
label = strong("Disable this QC"),
choices = as.list(row.names(QCsettings))
)
})
# Reset QC report
output$QCreport <- renderPrint("")
})
observeEvent(input$QCAddtxt,{
temp <- data.frame(
type = "Text QC",
measurement = isolate(input$QCtxtMeasurement),
minimum = isolate(input$QCtxtBad),
maximum = isolate(input$QCtxtBad),
failed = sum(htm[[isolate(input$QCtxtMeasurement)]] == isolate(input$QCtxtBad))
)
QCsettings <<- rbind(QCsettings, temp)
# Update show/remove QC
output$QCtable <- renderTable(QCsettings[,1:4])
output$UIQCactive <- renderUI({
checkboxGroupInput("QCcheckGroup",
label = strong("Disable this QC"),
choices = as.list(row.names(QCsettings))
)
})
# Reset QC report
output$QCreport <- renderPrint("")
})
observeEvent(input$QCcheckGroup, {
QCsettings <<- QCsettings[row.names(QCsettings) != input$QCcheckGroup,]
# Update show/remove QC
output$QCtable <- renderTable(QCsettings[,1:4])
output$UIQCactive <- renderUI({
checkboxGroupInput("QCcheckGroup",
label = strong("Disable this QC"),
choices = as.list(row.names(QCsettings))
)
})
# Reset QC report
output$QCreport <- renderPrint("")
})
observeEvent(input$applyQC,{
withCallingHandlers({
html("echo_QC", "", add = FALSE)
echo("Performing QCs:")
echo("")
if(nrow(QCsettings) == 0){
# If no QC setting is selected, approve all images
htm[[col_QC]] <- TRUE
htm <<- htm
echo(" No QCs selected. Setting all data to valid.")
echo(" Total measurements: ", nrow(htm))
echo("")
echo("The column ", col_QC, " has been updated.")
} else{
# If QC parameters have been selected, label the htm data.frame accordingly
temp <- applyQC(htm, QCsettings)
htm[[col_QC]] <- temp
htm <<- htm
echo("QCs:")
for(i in 1:nrow(QCsettings)){
switch(as.character(QCsettings[i, "type"]),
"Failed experiment" = {
echo("Failed experiment:")
echo(" Batch: ", QCsettings[i, "minimum"])
},
"Numeric QC" = {
echo("Measurement: ", QCsettings[i, "measurement"])
echo(" Allowed range: ", QCsettings[i, "minimum"], " ... ", QCsettings[i, "maximum"], " and not NA.")
},
"Text QC" = {
echo("Measurement: ", QCsettings[i, "measurement"])
echo(" Reject text: ", QCsettings[i, "minimum"])
}
)
echo(" Total: ", nrow(htm))
echo(" Failed: ", QCsettings[i, "failed"])
echo("")
}
echo("Summary of all QCs:")
echo(" Total (all QCs): ", nrow(htm))
echo(" Approved (all Qcs): ", sum(htm[[col_QC]]))
echo(" Failed (all Qcs): ", sum(!htm[[col_QC]]))
echo("")
echo("The column ", col_QC, " has been updated.")
}
},
message = function(m) html("echo_QC", m$message, add = TRUE)
)
})
# Plot-Fiji interaction
output$selection <- renderPrint({
s <- event_data("plotly_click")
if (length(s) == 0) {
"Click on a data point to open images!"
} else {
print("You selected:")
print(s)
i = s[["pointNumber"]] + 1
openTheseImgChannels <- input$images2display
tempPathInTable <- gsub("\\\\", "/", input$pathInTable)
tempPathInComputer <- gsub("\\\\", "/", input$pathInComputer)
tempFullPathName <- paste0(htm[i, paste0(input$prefixPath, input$images2display)], "/", htm[i, paste0(input$prefixFile, input$images2display)])
tempFullPathName <- gsub("\\\\", "/", tempFullPathName)
FullPathFile <- sub(tempPathInTable, tempPathInComputer, tempFullPathName, ignore.case = TRUE)
#print(paste0("Launching Fiji: ",input$fiji_binary))
#print(FullPathFile)
OpenInFiji(FullPathFile, input$fiji_binary)
}
})
# Normalization settings
output$UINormFeatures <- renderUI({
input$file1
input$applySummary
selectInput("NormFeatures", "Data features to be analyzed", choices = as.list(names(htm)), multiple = FALSE)
})
output$UINormDataTransform <- renderUI({
input$file1
selectInput("NormDataTransform", "Data transformation", choices = list("None selected", "log2"))
})
output$UINormGradientCorr <- renderUI({
input$file1
selectInput("NormGradientCorr", "Batch-wise spatial gradient correction", choices = list("None selected", "median polish", "median 7x7", "median 5x5", "median 3x3", "z-score 5x5"))
})
output$UINormMethod <- renderUI({
input$file1
selectInput("NormMethod", "Batch-wise normalisation against negative control", choices = list("None selected", "z-score", "robust z-score", "subtract mean ctrl", "divide by mean ctrl", "subtract median ctrl", "divide by median ctrl"))
})
output$UINormNegCtrl <- renderUI({
input$file1
selectInput("NormNegCtrl", "Negative control", choices = as.list(c("None selected", sort(htm[[input$colTreatment]]))))
})
observeEvent(input$applyNorm,{
withCallingHandlers({
html("echo_Normalization", "", add = FALSE)
htm <<- htmNormalization(data = htm,
measurements = input$NormFeatures,
col_Experiment = input$colBatch,
transformation = input$NormDataTransform,
gradient_correction = input$NormGradientCorr,
normalisation = input$NormMethod,
negcontrol = input$NormNegCtrl,
col_QC = col_QC,
col_Well = input$colWell,
col_Treatment = input$colTreatment,
num_WellX = input$wells_X,
num_WellY = input$wells_Y)
},
message = function(m) html("echo_Normalization", m$message, add = TRUE)
)
})
# Treatment summary
output$UISummaryMeasurements <- renderUI({
input$file1
input$applyNorm
selectInput("SummaryMeasurements", "Measurements to be analyzed", choices = as.list(names(htm)), multiple = TRUE)
})
output$UISummaryNegCtrl <- renderUI({
input$file1
input$applyNorm
input$NormNegCtrl
selectInput("SummaryNegCtrl", "Negative control", choices = as.list(c("None selected", "All treatments", sort(htm[[input$colTreatment]]))))
})
output$UISummaryPosCtrl <- renderUI({
input$file1
input$applyNorm
selectInput("SummaryPosCtrl", "Positive control", choices = as.list(c("None selected", sort(htm[[input$colTreatment]]))))
})
output$UISummaryNumObjects <- renderUI({
input$file1
input$applyNorm
selectInput("SummaryNumObjects", "Number of objects per image", choices = as.list(names(htm)))
})
observeEvent(input$SummaryMeasurements,{
output$TreatmentSummaryTable <- renderDataTable(data.frame())
output$SummaryReport <- renderPrint("")
})
observeEvent(input$SummaryNegCtrl,{
output$TreatmentSummaryTable <- renderDataTable(data.frame())
output$SummaryReport <- renderPrint("")
})
observeEvent(input$SummaryPosCtrl,{
output$TreatmentSummaryTable <- renderDataTable(data.frame())
output$SummaryReport <- renderPrint("")
})
observeEvent(input$SummaryNumObjects,{
output$TreatmentSummaryTable <- renderDataTable(data.frame())
output$SummaryReport <- renderPrint("")
})
observeEvent(input$applySummary,{
withCallingHandlers({
html("echo_TreatmentSummary", "", add = FALSE)
temp <- htmTreatmentSummary(data = htm,
measurements = input$SummaryMeasurements,
col_Experiment = input$colBatch,
col_Treatment = input$colTreatment,
col_ObjectCount = input$SummaryNumObjects,
col_QC = col_QC,
negative_ctrl = input$SummaryNegCtrl,
positive_ctrl = input$SummaryPosCtrl,
excluded_Experiments = "")
# Display the summary table in the html page
output$TreatmentSummaryTable <- renderDataTable(temp[,c("treatment", "median__means", "t_test__p_value", "t_test__signCode", "numObjectsOK", "numImagesOK", "numReplicatesOK")])
# Save summary table
echo("")
echo("Please save the summary table using the popup window.")
path <- tclvalue(tkgetSaveFile(initialfile = paste0("TreatmentSummary--", input$SummaryMeasurements, ".csv")))
write.csv(temp, path, row.names = FALSE)
if(path == ""){
echo("Did not save treatment summary table!")
} else{
echo("Saved summary table to ", path)
}
},
message = function(m) html("echo_TreatmentSummary", m$message, add = TRUE)
)
})
# Data table
observeEvent(input$file1, {
input$applyNorm
output$valuestable <- renderDataTable(htm)
})
})
|
e36c92f0f0386eccafe19e573c17fe506616a4dd
|
2f2fb1e88289f6f2e85b75a6bed98940597ce6cc
|
/2_phylo_dating/1.4_gene_shop_data_stat.R
|
bcd880be1aa25ecc0aae808ec38794466d3a903c
|
[] |
no_license
|
PiraMew/Maya
|
744493f3504fae9faf3daea8809ac77e9008e5ed
|
30a51ad3bb01243d73cc9cb39d1ad3f7ac5a2a2b
|
refs/heads/master
| 2023-02-28T19:17:05.736971
| 2020-06-16T18:42:41
| 2020-06-16T18:42:41
| 333,092,172
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,893
|
r
|
1.4_gene_shop_data_stat.R
|
#!/usr/bin/env Rscript
###########################################################################
# Project: Orania Phylogeny MT
# Script: gene_shop_data_stat.R
# --- Action: Compares gene trees to Astral tree (one direction) in order to
# ------------ select the genes that have the least "good" (BS > 75%) nodes disagreeing with the tree of individuals (i.e. "species tree"). Also select among these selected genes the most clock-like genes.
# --- Input: species tree, folder with individual rooted gene trees (with outgroup! + collapsed nodes (BS < 10) and collapsed nodes (branch length < 0.00002))
# --- Output: sorted statistic based on the signal_support_stats function of the script gene_shop_fct.R and selected genes for different scenarii
# Author: Maya Schroedl
###########################################################################
rm(list=ls())
# Functions ------------------------------------------------------------------
source(file.path(getwd(), "scripts", "2_phylo_dating","1.4_gene_shop_fct.R"))
# Libraries ---------------------------------------------------------------
if (!require('dplyr')) install.packages('dplyr'); library('dplyr')
# Working directories -----------------------------------------------------
wd = getwd()
# Application ------------------------------------------------------
### Input ---
## Rooted gene trees ----
genetrees_folder = file.path(wd, "2_phylo_dating", "2_astral_gene_trees","gene_trees_drop") #where are the gene trees
gene_list = read.table(file.path(wd, "1_phylo_reconstruction", "genelist_7575.txt"))[,1] #list of all the genes
suffix = "_drop.tree" #how are the gene tree files named
## Rooted species tree ----
sptree_file = file.path(wd, "2_phylo_dating","2_astral_gene_trees","astral_for_dating.tree" ) #where is the species tree
sptree = read.tree(sptree_file) #read species tree
### Output ----
gene_shop_dir = file.path(wd, "2_phylo_dating","3_gene_shop")
if (!dir.exists(gene_shop_dir)){dir.create(gene_shop_dir)}
###
output = file.path(gene_shop_dir,"geneshopping.stats")
#### Get statistics ----
if (file.exists(output)){file.remove(output)} #delete output file
for (gene in gene_list){
genetree = read.tree(file.path(genetrees_folder,paste0(gene, suffix)))
#genetree$edge.length = NULL #make relations better visible (remove branch lengths for this analysis)
#plot(genetree, main = gene) #have a look how tree looks
#nodelabels(text=genetree$node.label,frame = "none") #add bootstrap support
signal_support_stats(genetree,gene, sptree, output)
}
#### Select genes ----
# get statistics for each gene generated
stats=read.table(output, h=T)
stats_sorted = stats %>%
arrange(gnd_disagree_perc, desc(gnd_agree_perc))
View(stats_sorted)
#######################
# we would like to chose the genes that have:
# - the least good nodes disagreeing with sptree
# - the most clocklike
################
# 1) Genes with 0 disagreeing good nodes
no_gdis = stats$genetree[which(stats$gnd_disagree_perc == 0 & stats$gnd_agree_perc != 0)]
# 2) possible clock-like gene (having 0 disagreeing good nodes). This gene needs to be tested with Bayes factor beast (nested sampling), to see whether it really clock-like and can be run with a strict model or not.
#sort stats by ascending root-tip variance
stats_clock = stats[which(stats$genetree %in% no_gdis),] %>% #only have a look at no_gdis genes
arrange(root_tip_var)
one_clock = stats_clock$genetree[1] #the "best" gene
# Write names of selected gene to files -------------------------------------------
selected_dir = file.path(wd,"2_phylo_dating","3_gene_shop","selected_genes")
if (!dir.exists(selected_dir)){dir.create(selected_dir)}
write.table(no_gdis, file.path(selected_dir,"no_gdis.txt"),col.names=F,row.names = F,quote=F)
write.table(one_clock, file.path(selected_dir,"clock_one.txt"),col.names=F,row.names = F,quote=F)
|
d597ccf45f00dc88c514229e122777814338165a
|
53d0f71c11b9a2cd600e80d9214c013ec6b8b7d3
|
/Functions/scv.bic.R
|
5c4c2c8243f80b94b16a6e7057832610b47a648f
|
[] |
no_license
|
boxinz17/FGM_Neighborhood
|
af480715f76f5e1cff9c741afc7b879d189ff548
|
7a70cf4a699dd42629686e91d36a73af35711bb1
|
refs/heads/main
| 2023-08-03T08:39:16.209314
| 2021-09-15T18:57:30
| 2021-09-15T18:57:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,260
|
r
|
scv.bic.R
|
SCVBIC <- function(X,Y,beta.hat, empty=F){ # beta.hat: pM2 dimensions
n <- nrow(X)
M <- ncol(Y)
p <- ncol(X)/M
if(empty){ # beta.hat is all zero
epsilon.hat <- Y
p.train <- 0
}
else{ # beta.hat is not empty
# Extract Sparsity Pattern
sp.pattern.pM2 <- (beta.hat != 0)
select <- rep(c(rep(T,M), rep(F,M^2-M)),p)
sp.pattern.pM <- sp.pattern.pM2[select] # From pM^2 to pM
select.A <- rep(c(T, rep(F,M-1)),p)
sp.pattern.p <- sp.pattern.pM[select.A] # From pM to p
p.train <- sum(sp.pattern.p) # a vital parameter to be used
# Calculate Sigma.hat
Z <- X.kprod(X,M)
AX.long <- Z%*%beta.hat
AX.n.by.M <- matrix(nrow=n, ncol=M)
for(i in 1:n){
for(j in 1:M){
AX.n.by.M[i,j] <- AX.long[(i-1)*M + j]
}
}
epsilon.hat <- Y - AX.n.by.M # n by M
}
Sigma.hat <- emp.cov(epsilon.hat)
#Omega.hat <- solve(Sigma.hat) # inverse
# First Term
term1 <- log(det(Sigma.hat)) * n
#term1 <- log(det(Sigma.hat)) * n + sum(diag(epsilon.hat %*% solve(Sigma.hat) %*% t(epsilon.hat))) fails at high dim
#term1 <- sum(diag(epsilon.hat %*% t(epsilon.hat)))
# Second Term
term2 <- p.train * log(n)
# Final result
return(list(SCVBIC= term1 + term2, term1=term1, term2=term2))
}
# A function converting True/False to the sequence of true entries
TF.to.seq <- function(TF.vec){
seq.vec <- numeric(0)
for(i in 1:length(TF.vec)){
if(TF.vec[i]) seq.vec <- c(seq.vec,i)
}
return(seq.vec)
}
# A function that returns the minimum choice of lambda that guarantees full sparsity
lambda.sup <- function(X,Y){
n <- nrow(X)
M <- ncol(Y)
p <- ncol(X)/M
candidates <- rep(0,p)
for(j in 1:p){
X.Gj <- X[,(j-1)*M + (1:M)]
candidates[j] <- norm(t(X.Gj)%*%Y,"F")/n
}
return(max(candidates))
}
####################################
## Tuning Parameter lambda ##
## Using ##
## Selective Cross Validation ##
####################################
group.lasso.SCV.ADMM.oneloop <- function(X, Y, lambdas, K=5, udf.warmstart=F, A.in, B.in, U.in){
# Input: X and Y are both in matrix form. X: n*pM, Y: n*M
#lambdas: a vector of lambdas to be cross validated
#K: K-fold SCV
# Objective: Select a best lambda using SCV-BIC, and output its result
n <- nrow(X)
M <- ncol(Y)
p <- ncol(X)/M
L <- length(lambdas)
if(n %% K != 0) stop("K value error: n should be integer times of K")
df <- rep(0,L)
# Init warmstart
if(udf.warmstart){ # if the user applies a user defined warmstart
A <- A.in; B <- B.in; U <- U.in
}
else{
A <- list()
for(j in 1:p) A[[j]] <- matrix(0,M,M)
B <- matrix(0.1, n, M)
U <- matrix(0.01, n, M)
}
# Init warmstart for SCV
p.train.old <- p+1
A.cv.old <- list(); B.cv.old <- list(); U.cv.old <- list()
# Start searching over all lambdas
error <- rep(0,L)
beta.path <- numeric(0)
A.list <- list()
for(l in 1:L){
print(paste("lambda =",lambdas[l]))
# First run ADMM on the full dataset and get a sparsity pattern (group selection)
A.ein <- A; B.ein <- B; U.ein <- U
fit <- ADMM.grplasso(X,Y,lambdas[l], A.init=A, B.init=B, U.init=U)
beta <- fit$coefficients
beta.path <- cbind(beta.path,beta)
sp.pattern.Z <- (beta != 0)
select <- rep(c(rep(T,M), rep(F,M^2-M)),p)
sp.pattern <- sp.pattern.Z[select] # From pM^2 to pM
select.A <- rep(c(T, rep(F,M-1)),p)
sp.pattern.A <- sp.pattern[select.A] # From pM to p
seq.A <- TF.to.seq(sp.pattern.A) # a vector with length p.train
# Warm start for next round (will affect the error path. No warm start)
A <- fit$A; B <- fit$B; U <- fit$U
A.list[[l]] <- A
# Cross-Validation with selected groups
X.s <- X[,sp.pattern]
scv.bic <- 0
# Initial ABU for cv fits: warm start as well
n.train <- n - n/K
p.train <- sum(sp.pattern.Z)/M^2 # p.cv has the same value
p.train.old <- 0
if(p.train!=0){
df[l] <- p.train
if(p.train!= p.train.old) print("Sparsity changed!")
for(k in 1:K){
print(paste("Selected Cross Validation fold:",k,"/",K))
# Dataset Separation
index.cv <- ((n*(k-1)/K+1) : (n*k/K))
X.cv <- X.s[index.cv,]
X.train <- X.s[-index.cv,]
Y.cv <- Y[index.cv,]
Y.train <- Y[-index.cv,]
Z.cv <- X.kprod(X.cv,M)
# Warm Start
if(p.train != p.train.old){ # if sparsity changed
A.cv <- list()
for(j in 1:(p.train)){
A.cv[[j]] <- A[[seq.A[j]]]
} # warm start for A: partition from large A
if(p.train.old==0){ # if A.cv has yet to be non-zero
B.cv <- B[-index.cv,] * K/(K-1)
U.cv <- U[-index.cv,] * K/(K-1)
}else{
B.cv <- B.cv.old[[k]]
U.cv <- U.cv.old[[k]]
}
}
else{ # if sparsity not changed
A.cv <- A.cv.old[[k]]
B.cv <- B.cv.old[[k]]
U.cv <- U.cv.old[[k]]
}
# Fit cv model and predict
fit.cv <- ADMM.grplasso(X.train,Y.train,lambdas[l], A.init=A.cv, B.init=B.cv, U.init=U.cv)
beta.cv <- fit.cv$coefficients
pred.cv <- Z.cv %*% beta.cv
# Update warm start for next round, if sparsity not changed
A.cv <- fit.cv$A; B.cv <- fit.cv$B; U.cv <- fit.cv$U
A.cv.old[[k]] <- A.cv; B.cv.old[[k]] <- B.cv; U.cv.old[[k]] <- U.cv
# SCV-BIC Criterion. MSE + BIC term
# Choose either of the two rows below. Edit row 156 as well
# THIS scv.bic <- scv.bic + (norm(pred.cv-Y.vectorize(Y.cv) ,"2")^2 + p.train/p * M^2 * log(n.train))
#scv.bic <- scv.bic + (norm(pred.cv-Y.vectorize(Y.cv) ,"2")^2 + log(n.train)*M^2*p.train)
# SCV.AIC:
#scv.bic <- scv.bic + norm(pred.cv-Y.vectorize(Y.cv) ,"2")^2 + p.train
# SCV.BIC:
#scv.bic <- scv.bic + norm(pred.cv-Y.vectorize(Y.cv) ,"2")^2 + p.train * log(n.train)
# SCV.BIC:
scv.bic <- scv.bic + SCVBIC(X.cv, Y.cv, beta.cv, empty=F)$SCVBIC
}
p.train.old <- p.train
error[l] <- scv.bic
if(l==1){
A.out <- A; B.out <- B; U.out <- U
error.min <- error[1]
}
if(l>=2){
if(error[l]<error.min){
error.min <- error[l]
A.out <- A; B.out <- B; U.out <- U
}
}
}
else{ # if beta hat is all 0
for(k in 1:K){
index.cv <- ((n*(k-1)/K+1) : (n*k/K))
X.cv <- X.s[index.cv,]
X.train <- X.s[-index.cv,]
Y.cv <- Y[index.cv,]
Y.train <- Y[-index.cv,]
Z.cv <- X.kprod(X.cv,M)
# Choose either in the two rows below. Edit row 136 as well
#scv.bic <- scv.bic + (norm(Y.vectorize(Y.cv) ,"2"))^2
#scv.bic <- scv.bic + (norm(Y.vectorize(Y.cv) ,"2"))^2
scv.bic <- scv.bic + SCVBIC(X.cv, Y.cv, 0, empty=T)$SCVBIC
}
error[l] <- scv.bic
if(l==1){
A.out <- A; B.out <- B; U.out <- U
error.min <- error[1]
}
if(l>=2){
if(error[l]<error.min){
error.min <- error[l]
A.out <- A; B.out <- B; U.out <- U
}
}
}
}
lambda.opt <- lambdas[which.min(error)]
df.opt <- df[which.min(error)]
beta.hat <- beta.path[,which.min(error)]
print(paste("SCV selects lambda =",lambda.opt,"as optimal in this range, with d.f.",df.opt,"out of",p))
#fit <- ADMM.grplasso(X,Y,lambda.opt, A.init=A, B.init=B, U.init=U)
#beta.hat <- fit$coefficients
return(list(lambda=lambda.opt, beta=beta.hat, error.path=error, df.path=df, df.opt=df.opt,
A.opt=A.out, B.opt=B.out, U.opt=U.out, A.list=A.list))
}
###########################
### LAMBDA SEARCH ###
### based on KKT ###
### ###
###########################
# Run multiple loops #
group.lasso.SCV.ADMM.multiloop <- function(X, Y, lambda.min=1, lambda.accuracy=1.5, K=5, lambdas.terms=50){
n <- nrow(X)
M <- ncol(Y)
p <- ncol(X)/M
lambda.max <- lambda.sup(X,Y) + 1
round <- 1
while(lambda.max - lambda.min > lambda.accuracy){
cat(paste("**********************\nlambda range:", lambda.max,"to",lambda.min,"\n"))
lambdas <- exp(seq(log(lambda.max),log(lambda.min),length.out=lambdas.terms))
if(round==1) result <- group.lasso.SCV.ADMM.oneloop(X,Y,lambdas,K)
else result <- group.lasso.SCV.ADMM.oneloop(X,Y,lambdas,K,udf.warmstart = T, A.in=A, B.in=B, U.in=U)
plot(lambdas, result$error.path, type="l", main="Error path", xlab="lambda",ylab="error")
if((which(lambdas==result$lambda)+1)==length(lambdas)){ # The last lambda gives the smallest error
lambda.min <- lambda.min - (lambda.max-lambda.min)/lambdas.terms
lambda.max <- result$lambda
}
else{ # the optimal lambda is not lambda.min
lambda.max <- result$lambda
lambda.min <- lambdas[which(lambdas==result$lambda)+1]
}
A <- result$A.opt; B <- result$B.opt; U <- result$U.opt
round <- round + 1
}
print(paste("The final choice of lambda is",lambda.max,"with d.f.", result$df.opt,"out of",p))
return(list(beta=result$beta, lambda=lambda.max))
}
|
d07e41b45cbd9f54d5e78183ea6f1078ab39914d
|
96a399382fb89102ec288e7030e16a8de6079776
|
/plot3.R
|
76110d7632eba9600148dbac6beefa543c68e27a
|
[] |
no_license
|
electromel/ExData_Plotting1
|
a96a57cf583ead6f28753fd7c9c4712e3910b3a4
|
e0c57a77cd90f772401b8e8148c5136d9942072b
|
refs/heads/master
| 2021-01-15T12:49:21.294579
| 2015-04-12T16:39:44
| 2015-04-12T16:39:44
| 33,526,065
| 0
| 0
| null | 2015-04-07T06:26:05
| 2015-04-07T06:26:05
| null |
UTF-8
|
R
| false
| false
| 806
|
r
|
plot3.R
|
# setwd("~/OneDrive/Formation/Coursera/Data Science/04 - Exploratory Data Analysis/ExData_Plotting1")
# load data
file <- "./household_power_consumption.txt"
data <- read.table(file, sep=";", header=TRUE, na.strings="?")
data$Date <- as.Date(data$Date,format="%d/%m/%Y")
data <- subset(data, Date > as.Date("2007-01-31") &
Date < as.Date("2007-02-03"))
data$DateTime <- as.POSIXct(paste(data$Date, data$Time), format="%Y-%m-%d %H:%M:%S")
# pring graph
plot(data$DateTime,data$Sub_metering_1, type="l",ylab="Energy sub metering",xlab="")
lines(data$DateTime,data$Sub_metering_2, type="l", col="red")
lines(data$DateTime,data$Sub_metering_3, type="l", col="blue")
legend("topright", legend=names(data[7:9]), lty=1, col=c("black","red","blue"))
dev.copy(png,file="./plot3.png")
dev.off()
|
1ee7009fd2cd03a27b9e18bfdf840217de9f4fd7
|
c169f200481e291de6229f998c3b74496a11337c
|
/Titanic.R
|
2529d0fc0587439c3bb16a9a34624902eb817b95
|
[] |
no_license
|
meetkholia/Titanic_Survival_Analysis
|
7d3b44cfeebd3b6adbc773d99181b378faf23f19
|
5d35bd00c23f9f0f0a2c3171ed440fc636c5882c
|
refs/heads/master
| 2016-09-12T09:52:13.127899
| 2016-04-15T21:30:31
| 2016-04-15T21:30:31
| 56,350,890
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 18,053
|
r
|
Titanic.R
|
# Read the data
train <- read.csv(file="train.csv",header=TRUE, sep=",");
test <- read.csv(file="test.csv",header=TRUE, sep=",");
# to combine both dataset make "surived" variable in test
test.survived <- data.frame(Survived = rep("None",nrow(test)),test[,])
data.combined <- rbind(train,test.survived)
str(data.combined)
#converting pclass into factor rather than integer
data.combined$Pclass <- as.factor(data.combined$Pclass)
data.combined$Survived <- as.factor(data.combined$Survived)
# Distribusion of Survival
table(data.combined$Survived)
#Distribution of pclass
table(data.combined$Pclass)
barplot(table(data.combined$Pclass))
##Distribution of sex by pclass
xtabs( ~Pclass+ Sex ,data=train)
table(train$Pclass,train$Sex)
#Hynothesis - Rich people(1st) survival is higher or not
library(ggplot2)
train$Pclass <- as.factor(train$Pclass)
str(train)
## X has to be continuos to plot this. that is why reading train data set again is reqiured
ggplot(data=train) +
geom_histogram(aes(x=Pclass,fill=factor(Survived)),binwidth=0.5) +
xlab("Pclass") +
ylab("Count") +
labs(fill="Survived")
## X is factor for the following code
ggplot(train,aes(x=Pclass,fill=factor(Survived))) +
geom_bar(width=0.5) + ##stat_count(width=0.5) also works fine
xlab("Pclass") +
ylab("Count") +
labs(fill="Survived")
# how many unique names are there in combined data
length(unique(as.character(data.combined$Name)))
#find out duplicated names
dup_name<-as.character(data.combined[which(duplicated(as.character(data.combined$Name))),"Name"])
#is the data duplicated or they are different people
data.combined[which(data.combined$Name %in% dup_name),]
library(stringr)
# Miss,Mr., Mrs. has any correlation????
#get all the data that containes "Miss."
misses<-data.combined[which(str_detect(data.combined$Name,"Miss.")),]
misses[1:5,1:6]
# 127 misses were survived 55 were drowned
table(misses$Survived)
#same for missus
missus<-data.combined[which(str_detect(data.combined$Name,"Mrs.")),]
missus[1:5,1:8]
#check the same with the sex(man)
males<-data.combined[which(train$Sex== "male"),]
head(males[,1:5])
table(males$Survived)
#extracting the title out of the names to see correlation between title name survival and pclass
data.combined$Name <- as.character(data.combined$Name)
#finding Misses in the data with grep function
head(data.combined[grep("Miss.",data.combined$Name),])
#extract function
extract_title<- function(Name) {
if(length(grep("Miss.",Name))>0)
{
return("Miss.")
}
else if(length(grep("Master",Name))>0)
{
return("Master")
}
else if(length(grep("Mrs.",Name))>0)
{
return("Mrs.")
}
else if(length(grep("Mr.",Name))>0)
{
return("Mr.")
}
else
{
return("Other")
}
}
titles <- NULL
for (i in 1:nrow(data.combined))
{
titles <- c(titles,extract_title(data.combined[i,"Name"]))
}
data.combined$title <- as.factor(titles)
#now, creating a graph to see the correlation
#using only 891 rows because we have survived variable in train only which has 891 rows
ggplot(data.combined[1:891,],aes(x=title,fill=Survived)) +
geom_bar(width=0.5)+
facet_wrap(~Pclass)+
ggtitle("Pclass")+
xlab("Title") +
ylab("Count") +
labs(fill="Survived")
# table(data.combined$Sex)
# str(data.combined$Sex)
## ggplot for vizualization of sex and survived
ggplot(data.combined[1:891,],aes(x=Sex,fill=Survived)) +
geom_bar(width=0.5)+
facet_wrap(~Pclass)+
ggtitle("Pclass")+
xlab("Sex") +
ylab("Count") +
labs(fill="Survived")
## for age and survival
## facet wrap will do for both
ggplot(data.combined[1:891,],aes(x=Age,fill=Survived)) +
geom_histogram(binwidth=5)+
facet_wrap(~Sex +Pclass)+
ggtitle("Pclass")+
xlab("Age") +
ylab("Count")
## age of all the Master
## Master is a proxy for Male Children
boys<-data.combined[which(data.combined$title == "Master"),]
summary(boys$Age)
## 8 from the 263 missing values belongs to Master title
## same for Miss
miss <- data.combined[which(data.combined$title == "Miss."),]
summary(miss$Age)
## 50 of 263 missing values belongs to Miss title
## more complicated because age min=0.17 and age max=63
a<-miss[which(miss$SibSp == 0 & miss$Parch == 0),]
a[which(a$Age <= 14.5),]
## ggplot to see correlatoin between age and survival for miss title
ggplot(miss[miss$Survived != "None",],aes(x=Age,fill=Survived)) +
facet_wrap(~Pclass) +
geom_histogram(binwidth=5) +
xlab("Age") +
ylab("Count")
##Analysis of Siblings and Parents and child
unique(data.combined$SibSp)
data.combined$SibSp <- as.factor(data.combined$SibSp)
## vizualization of siblings and survival
ggplot(data.combined[1:891,],aes(x=SibSp,fill=Survived)) +
geom_bar(width=0.5)+
facet_wrap(~Pclass)+
ggtitle("Pclass")+
xlab("Siblings") +
ylab("Count") +
labs(fill="Survived")
data.combined$Parch <- as.factor(data.combined$Parch)
## vizualization of parent chindren and survival
ggplot(data.combined[1:891,],aes(x=Parch,fill=Survived)) +
geom_bar(width=0.5)+
facet_wrap(~Pclass + title)+
ggtitle("Pclass")+
xlab("Parent/Chindren") +
ylab("Count") +
ylim(0,300) +
labs(fill="Survived")
## feature engineering
## creting a family size feature
temp.SibSp <- c(train$SibSp,test$SibSp)
temp.Parch <- c(train$Parch,test$Parch)
data.combined$FamilySize <- as.factor(temp.SibSp + temp.Parch +1)
data.combined$FamilySize <- as.integer(data.combined$FamilySize)
max(data.combined$FamilySize)
## vizualization of family size and survival
ggplot(data.combined[1:891,],aes(x=FamilySize,fill=Survived)) +
geom_bar(width=0.5)+
facet_wrap(~Pclass )+
ggtitle("Pclass")+
xlab("Family Size") +
ylab("Count") +
ylim(0,300) +
labs(fill="Survived")
## ticket variable
str(data.combined$Ticket)
data.combined$Ticket <- as.character(data.combined$Ticket)
data.combined$Ticket[1:20]
## to see the patterns in the ticket number getting the first char in a var
substr(data.combined$Ticket,1,1)
ticket.first <- ifelse(data.combined$Ticket == "", " ",substr(data.combined$Ticket,1,1))
unique(ticket.first)
data.combined$ticket.first <- as.factor(ticket.first)
## vizualization of ticket first character and survival
ggplot(data.combined[1:891,],aes(x=ticket.first,fill=Survived)) +
geom_bar(width=0.5)+
##facet_wrap(~Pclass )+
ggtitle("Pclass")+
xlab("Ticket Number") +
ylab("Count") +
ylim(0,300) +
labs(fill="Survived")
## vizualization of Familysize and survival
ggplot(data.combined[1:891,],aes(x=FamilySize,fill=Survived)) +
geom_bar(width=0.5)+
facet_wrap(~Pclass )+
ggtitle("Pclass")+
xlab("Family Size") +
ylab("Count") +
ylim(0,300) +
labs(fill="Survived")
## fare variable experiments
max(data.combined$Fare)
data.combined[which(data.combined$Fare < 515 & data.combined$Fare > 510),1:2]
##fare variable
summary(data.combined$Fare)
length(unique(data.combined$Fare))
## treat fare as numeric
ggplot(data.combined[1:891,],aes(x=Fare)) +
geom_bar(width=5) +
xlab("Fare") +
ylab("Count") +
ylim(0,50)
ggplot(data.combined[1:891,],aes(x=Fare,fill=Survived)) +
geom_histogram(binwidth=5)+
facet_wrap(~Pclass)+
ggtitle("Pclass")+
xlab("Family Size") +
ylab("Count") +
ylim(0,50) +
xlim(0,300) +
labs(fill="Survived")
## Cabin variable
str(data.combined$Cabin)
data.combined$Cabin <- as.character(data.combined$Cabin)
data.combined$Cabin[1:20]
## replace empty cabin number to "U"
data.combined[is.na(data.combined$Cabin),"Cabin"] <- ""
data.combined[which(data.combined$Cabin == ""),"Cabin"] <- "U"
## getting first char out of cabin
cabin.first <- as.factor(substr(data.combined$Cabin,1,1))
levels(cabin.first)
## add it to data.combined
data.combined$cabin.first <- cabin.first
rm(data.combined$cabin.fisrt)
#Vizualize cabin first and survival
ggplot(data.combined[1:891,],aes(x=cabin.first,fill=Survived)) +
geom_bar()+
ggtitle("Pclass")+
xlab("Cabin") +
ylab("Count")
ggplot(data.combined[1:891,],aes(x=cabin.first,fill=Survived)) +
geom_bar()+
facet_wrap(~Pclass)+
ggtitle("Pclass")+
xlab("Cabin code") +
ylab("Count") +
ylim(0,50) +
labs(fill="Survived")
## data modeling
## Linear Regression
str(data.combined)
data.combined$Survived <- as.factor(data.combined$Survived)
## Logistic Regression
mylogit <- glm(Survived ~ Age + Pclass + Sex + SibSp + Parch + Fare + Embarked, data = data.combined, family = "binomial")
summary(mylogit)
exp(coef(mylogit))
## Chi square test
chisq.test(data.combined$Survived,data.combined$Sex)
summary(table(data.combined$Survived,data.combined$Sex))
##mosaic plot of class and survival
mosaicplot(train$Pclass ~ train$Survived,color= c("Red","Blue"),
xlab="Class",ylab="Survival",main="Survival based on Class")
install.packages(randomForest)
library(randomForest)
# installing/loading the package:
if(!require(installr)) {
install.packages("installr"); require(installr)}
updateR()
hist(train$Age)
struct<-lapply(train,class)
##handling null values
sum(is.na(train))
train$Pclass[is.na(train$Pclass)] <-names(which.max(table(train$Pclass)))
train$Age[is.na(train$Age)] <- median(train$Age,na.rm=TRUE)
train$SibSp[is.na(train$SibSp)] <-names(which.max(table(train$SibSp)))
train$Parch[is.na(train$Parch)] <- median(train$Parch,na.rm=TRUE)
train$Fare[is.na(train$Fare)] <- median(train$Fare,na.rm=TRUE)
train$Embarked[is.na(train$Embarked) | train$Embarked == ""]<- names(which.max((table(train$Embarked))))
##data
# train<- train[,!(names(train) %in% c("PassengerId","Name","Ticket","Cabin"))]
# cols <- c("Survived", "Pclass", "Sex", "SibSp","Embarked")
# train[cols] <- lapply(train[cols], factor)
# col2<- c("Survived","Sex","Fare","Pclass")
# train<- train[,col2]
## Random Forset
library(randomForest)
#model 1 (mod1) with Pclass and title
train.mod1 <- data.combined[1:891,c("Pclass","title")]
mod1.predictor <- as.factor(train$Survived)
#To avoid different output on every run
#To introduce some reproducibility
#To compare different runs of output
set.seed(1234)
mod1 <- randomForest(x = train.mod1,y=mod1.predictor,importance=TRUE,ntree=1000)
mod1
varImpPlot(mod1)
#model 2 with Pclass, title and Sibsp
train.mod2 <- data.combined[1:891,c("Pclass","title","SibSp")]
mod2.predictor <- as.factor(train$Survived)
set.seed(1234)
mod2 <- randomForest(x=train.mod2,y=mod2.predictor,importance=TRUE,ntree=1000)
mod2
varImpPlot(mod2)
#model 3 with Pclass, title and Parch
train.mod3 <- data.combined[1:891,c("Pclass","title","Parch")]
mod3.predictor <- as.factor(train$Survived)
set.seed(1234)
mod3 <- randomForest(x=train.mod3,y=mod3.predictor,importance=TRUE,ntree=1000)
mod3
varImpPlot(mod3)
#model 4 with Pclass, title, Parch and SibSp
train.mod4 <- data.combined[1:891,c("Pclass","title","Parch","SibSp")]
mod4.predictor <- as.factor(train$Survived)
set.seed(1234)
mod4 <- randomForest(x=train.mod4,y=mod4.predictor,importance=TRUE,ntree=1000)
mod4
varImpPlot(mod4)
#model 5 with Pclass, title, Familysize
train.mod5 <- data.combined[1:891,c("Pclass","title","FamilySize")]
mod5.predictor <- as.factor(train$Survived)
set.seed(1234)
mod5 <- randomForest(x=train.mod5,y=mod5.predictor,importance=TRUE,ntree=1000)
mod5
varImpPlot(mod5)
#model 6 with Pclass, title, Familysize and Parch
train.mod6 <- data.combined[1:891,c("Pclass","title","FamilySize","Parch")]
mod6.predictor <- as.factor(train$Survived)
set.seed(1234)
mod6 <- randomForest(x=train.mod6,y=mod6.predictor,importance=TRUE,ntree=1000)
mod6
varImpPlot(mod6)
#model 7 with Pclass, title, Familysize and SibSp
train.mod7 <- data.combined[1:891,c("Pclass","title","FamilySize","SibSp")]
mod7.predictor <- as.factor(train$Survived)
set.seed(1234)
mod7 <- randomForest(x=train.mod7,y=mod7.predictor,importance=TRUE,ntree=1000)
mod7
varImpPlot(mod7)
## Cross Validation
#getting only 3 most predictable variable (Pclass,title and FamilySize) to final test dataset
test.final <- data.combined[892:1309,c("Pclass","title","FamilySize")]
#Predictions
mod5.predict <- predict(mod5,test.final)
table(mod5.predict)
#bulding and saving the results into CSV file
mod5.csv <- data.frame(PassengerId = rep(892:1309),Survived = mod5.predict)
write.csv(mod5.csv,file="mod5_predict.csv",row.names=FALSE)
#10-fold Cross Validation
library(caret)
library(doSNOW)
set.seed(2348)
#to create 10 random folds
folds <- createMultiFolds(mod5.predictor,k=10,times=10)
#Stratification
table(mod5.predictor)
342/549
table(mod5.predictor[folds[[29]]])
307/494
#Train control object for each observation
train.control1 <- trainControl(method = "repeatedcv",number = 10,repeats =10 ,index =folds)
#to stress the CPU power and multi-core training
cluster <- makeCluster(8,type="SOCK")
registerDoSNOW(cluster)
#train
set.seed(34324)
train.1 <- train(x=train.mod5,y=mod5.predictor,method="rf",tuneLength=3,ntree=1000,
trControl = train.control1)
#Stoping the cluster
stopCluster(cluster)
#checking the result
train.1
# now we re still at 76% accuracy which is slightly lower than acuracy in mod5
# so we train less data by creating less numbers of folds
folds2 <- createMultiFolds(mod5.predictor,k = 5,times = 10)
train.control2 <- trainControl(method = "repeatedcv",number = 5,repeats =10 ,index =folds2)
cluster <- makeCluster(8,type="SOCK")
registerDoSNOW(cluster)
set.seed(34324)
train.2 <- train(x=train.mod5,y=mod5.predictor,method="rf",tuneLength=3,ntree=1000,
trControl = train.control2)
stopCluster(cluster)
train.2
#let's train out data with 3-Folds CV
folds3 <- createMultiFolds(mod5.predictor,k = 3,times = 10)
train.control3 <- trainControl(method = "repeatedcv",number = 3,repeats =10 ,index =folds3)
cluster <- makeCluster(8,type="SOCK")
registerDoSNOW(cluster)
set.seed(34324)
train.3 <- train(x=train.mod5,y=mod5.predictor,method="rf",tuneLength=3,ntree=1000,
trControl = train.control3)
stopCluster(cluster)
train.3
## CART and Decision Tree ALgorithm
library("rpart")
library("rpart.plot")
#based on random forest, train.2 has the highest accuracy
#so we will do rpart with 5-Folds CV
folds2.rpart <- createMultiFolds(mod5.predictor,k = 5,times = 10)
train.control2.rpart <- trainControl(method = "repeatedcv",number = 5,repeats =10 ,index =folds2)
cluster <- makeCluster(8,type="SOCK")
registerDoSNOW(cluster)
set.seed(34324)
train.2.rpart <- train(x=train.mod5,y=mod5.predictor,method="rpart",tuneLength=30,
trControl = train.control2.rpart)
stopCluster(cluster)
train.2.rpart
#plot
prp(train.2.rpart$finalModel,type=0,extra=1,under=TRUE)
#just to check the results for 3-Fold CV
folds3.rpart <- createMultiFolds(mod5.predictor,k = 3,times = 10)
train.control3.rpart <- trainControl(method = "repeatedcv",number = 3,repeats =10 ,index =folds3)
cluster <- makeCluster(8,type="SOCK")
registerDoSNOW(cluster)
set.seed(34324)
train.3.rpart <- train(x=train.mod5,y=mod5.predictor,method="rpart",tuneLength=30,
trControl = train.control3.rpart)
stopCluster(cluster)
train.3.rpart
prp(train.3.rpart$finalModel,type=0,extra=1,under=TRUE)
#both rpart and random forest says title is the most important feature
table(data.combined$title)
#taking last names and first names out of Name
name_split <- str_split(data.combined$Name,",")
name_split[1]
#last name
last_name <- sapply(name_split,"[",1)
last_name[1:5]
data.combined$Lastname <- as.factor(last_name)
#first name
name_split <- str_split(sapply(name_split,"[",2)," ")
name_split[1]
title_split <- sapply(name_split,"[",2)
unique(title_split)
table(title_split)
#updaing the titles to get more accuracy
title_split[title_split %in% c("Dona.","the")] <- "Lady."
title_split[title_split %in% c("Mlle.","Ms.")] <- "Miss."
title_split[title_split %in% c("Mme.")] <- "Mrs."
title_split[title_split %in% c("Don.","Jonkheer.")] <- "Sir."
title_split[title_split %in% c("Major.","Capt.","Col.")] <- "Officer"
#adding title_split to data.combined
data.combined$title.split <- as.factor(title_split)
#Plotting survival rate for new splitted titles
ggplot(data.combined[1:891,],aes(x=title.split,fill=Survived))+
geom_bar()+
facet_wrap(~Pclass)+
ggtitle("Survival rate for Splitted Titles")
#since we do not have many instances of Lady,Sir,Dr,Office and Rev
#we will merge them into Mrs. and Mr. respectively for accuracy
#Also this will reduce the chances of over fiting the model
title_split[title_split %in% c("Lady.")] <- "Mrs."
title_split[title_split %in% c("Sir.","Dr.","Officer","Rev.")] <- "Mr."
table(title_split)
data.combined$title.split <- as.factor(title_split)
#visualization of new splitted titles
ggplot(data.combined[1:891,],aes(x=title.split,fill=Survived))+
geom_bar()+
facet_wrap(~Pclass)+
ggtitle("Survival rate for Splitted Titles")
# Now, fitting rpart model into the newly created title_split
features <- c("Pclass","title.split","FamilySize")
best.predictor2 <- data.combined[1:891,features]
#Running a 3- Fold CV with rpart on Pclass,title.split and FamilySize
folds4.rpart <- createMultiFolds(mod5.predictor,k = 3,times = 10)
train.control4.rpart <- trainControl(method = "repeatedcv",number = 3,repeats =10 ,index =folds4.rpart)
cluster <- makeCluster(8,type="SOCK")
registerDoSNOW(cluster)
set.seed(34324)
train.4.rpart <- train(x=best.predictor2,y=mod5.predictor,method="rpart",tuneLength=30,
trControl = train.control4.rpart)
stopCluster(cluster)
train.4.rpart
#Plotting the output
prp(train.4.rpart$finalModel,type=0,extra=1,under=TRUE)
|
803747df8f5256f185afb36344a9bda1bac293f3
|
a44a64837d1cfc4e43251dab59fbe7c1b0dae823
|
/cleaning_data.R
|
2f5c9e8a6d08f7340cb69290c48401f597b48f6f
|
[] |
no_license
|
tanyasarkjain/CaffraMsatAnalysis
|
e772620c6a9b78f0f168de66bcd7ca568a51b369
|
b0838ef4aba2b6e0728acc1d590a2a8c1ac74ac4
|
refs/heads/master
| 2023-06-09T15:46:13.975807
| 2021-06-29T21:33:30
| 2021-06-29T21:33:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,873
|
r
|
cleaning_data.R
|
require(gstudio)
require(ggplot2)
require(ggmap)
require(popgraph)
library(igraph)
require(maps)
require(raster)
require(fields)
library(dplyr)
#Reading Coordinate data
SADataCoords<-read.csv("/Users/tanyajain/Desktop/SouthernAfricaResearch/Wogan_etal_Cossypha_caffra_SamplingInfoTanya.csv",header=TRUE)
SADataCoords
#length of unique samples in Coords: 339
length(unique(SADataCoords$Sample))
#length of unique rows in Coords: 354
nrow(unique(SADataCoords))
SADataCoords[duplicated(SADataCoords$Sample, fromLast = TRUE) == TRUE,]
#deleting duplicates in Coords
SADataCoords <- SADataCoords[duplicated(SADataCoords$Sample, fromLast = TRUE) == FALSE,]
SADataCoords
#Sorts data by Taxon + Population to create the .gen file with more ease
SortedSADataCoords <- SADataCoords[
with(SADataCoords, order(Taxon, Population)),
]
SortedSADataCoords
write.csv(SortedSADataCoords,"Tanya_Wogan_etal_Cossypha_caffra_SamplingInfoSorted.csv", row.names = FALSE)
#joining the data with the data in the igraph file
SADataMsat<-read_population(path="/Users/tanyajain/Desktop/SouthernAfricaResearch/cc2_Msats_GenePop.gen", type="genepop", locus.columns = 3:16, header=TRUE)
SADataMsat
SADataMsat <- as.data.frame(SADataMsat)
#created
SortedSADataMsat = inner_join(SortedSADataCoords, SADataMsat, by = c("Sample" = 'ID'), copy = FALSE, suffix = c(".x", ".y"),)
SortedSADataMsat
SortedSADataMsat <- SortedSADataMsat[, c(1, 40:53)]
SortedSADataMsat
SortedSADataMsat$Sample <- paste(SortedSADataMsat$Sample, ', ', sep=" ")
SortedSADataMsat[] <- lapply(SortedSADataMsat, function(x) sub("$^", "000:000", x))
SortedSADataMsat[]
SortedSADataMsat[] <- lapply(SortedSADataMsat, function(x) gsub(":", "", x))
SortedSADataMsat
#in genepop format
write.table(SortedSADataMsat, file = "SortedSADataMsat2.txt", sep = " ",
row.names = F, col.names = T, quote = FALSE)
#Relay
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.