blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
โ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
โ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
โ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
630d467dfc5f7b9babd369ab4716d0015355deb5
|
546fe376d00b978637ab9328ec351be8669e76ea
|
/HMM.DM.code/DMR.combine.R
|
5377234f48da129df9f8f82b1f69130603c445ef
|
[] |
no_license
|
Maria-831/HMM-DM
|
a67d7da9767d376ea2a14d43a291a4a8564b5194
|
502b458107e9cecfb3a9df155c796a21513e83e5
|
refs/heads/master
| 2021-06-01T01:32:28.829456
| 2016-03-24T04:06:06
| 2016-03-24T04:06:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,979
|
r
|
DMR.combine.R
|
DMR.combine<-function( DM.type, regions, chr.DM.status.matrix, raw.CG, distance.threshold, num.CG.between, posterior.threshold, empty.CG)
{
# This function is used to further combine small regions generated from chr.DM.region.by.CG.ver3. It is called by DM.region.combine.ver2().
# 1) DM.type, the type of DM regions to summarize, "hyper" or "hypo"
# 2) regions: the output from chr.DM.region.by.CG.ver3
# 3) chr.DM.status.matrix: the same input as in chr.DM.region.by.CG.ver3
# 4) raw.CG: a vector of all CG positions on that chr
# 5) distance.threshold: a numeric value shows the threshold of physical distance. The CG sites with distance larger than this value won't be in the same region.
# 6) num.CG.between: the max number of EM CG sites allowed between any two small DM regions
# 7) posterior.threshold: the max posterio probability for the EM CGs inbetween
# 8) empty.CG: a numeric value shows the threshold of number of CGs without coverage between consecutive CG sites to combine together
# output:
# The output file has 11 columns, for example:
# chr start end len DM num.CG total.CG meanCov.control meanCov.test meanDiff meanPost
chr.DM.region.mat<-matrix(NA, nrow=0, ncol=11)
colnames(chr.DM.region.mat)<-c("chr", "start", "end", "len", "DM", "num.CG", "total.CG", "meanCov.control", "meanCov.test", "meanDiff", "meanPost")
DM.index<-(1:dim(regions)[1])[as.character(regions[, 5])== DM.type]
if ( length(DM.index)<=1)
{ cat("There are:", length(DM.index), DM.type, " CG regions, we do not need to summarize \n") }
if ( length(DM.index)>1)
{
# get the distance between DM regions
physical.dis<-regions[DM.index[-1],2] - regions[DM.index[-length(DM.index)],3]
start.index<-1; end.index<-1;
empty<-rep(0, (length(DM.index)-1))
for (j in 1:(length(DM.index)-1))
{ empty[j]<-length((1:length(raw.CG))[as.numeric(raw.CG)> as.numeric(regions[DM.index[j],3]) & as.numeric(raw.CG)< as.numeric(regions[DM.index[j+1],2]) ])}
i<-1
while(i<=(length(DM.index)))
{ # cat("when i is:", i, "start and end index:", c(start.index, end.index), "\n")
add<-0
if ( i < length(DM.index) )
{ if ( physical.dis[i]<= distance.threshold)
{
# the physical distance between them is <= distance
# get the other CG sites inbetween
between.CG.index<-(1:dim(chr.DM.status.matrix)[1])[chr.DM.status.matrix[,2] > regions[DM.index[i],3] & chr.DM.status.matrix[,2] < regions[DM.index[i+1],2] ]
# if both regions have 1 DM CG site, only allow 1 CG site in between
if (regions[DM.index[i],6]==1 && regions[DM.index[i+1],6] == 1 )
{ num.CG.between=1 }
if (length(between.CG.index)>0 && length(between.CG.index) <= num.CG.between && empty[i]<= empty.CG)
{
betweem.CG.state<-chr.DM.status.matrix[between.CG.index,9]
between.CG.raw.state<-chr.DM.status.matrix[between.CG.index,7]
betweem.CG.post<-chr.DM.status.matrix[between.CG.index,6]
vec<-rep(0,length(between.CG.index))
for (j in 1:length(between.CG.index))
{
if (betweem.CG.state[j]=="EM" && betweem.CG.post[j]<=posterior.threshold || between.CG.raw.state[j]==DM.type )
{ vec[j]<-1} # test if these CG sites between regions satisfy the criteria
}
if ( sum(vec)== length(between.CG.index)) # if all of them satisfy
{ add<-1} # label as "add"
}
}
if (add==1)
{end.index<-i+1; i<-i+1 } # combine these two regions
else
{
# cat (" We should end this region and start a new one ", i+1,"\n" )
chrom<-regions[1,1] ; start<-regions[DM.index[start.index], 2]
end<- regions[DM.index[end.index], 3]; leng<-regions[DM.index[end.index], 3] - regions[DM.index[start.index],2] + 1
DM<-DM.type
num.CG<- sum(regions[DM.index[start.index: end.index], 6])
meanPost.prob<-round(mean(regions[DM.index[start.index: end.index], 11]),4)
start.pos.index<- chr.DM.status.matrix[chr.DM.status.matrix[,2]== start, 10]
end.pos.index<- chr.DM.status.matrix[chr.DM.status.matrix[,2]== end, 10]
group1.ave.cov<-round(mean(chr.DM.status.matrix[start.pos.index:end.pos.index, 11]),2)
group2.ave.cov<-round(mean(chr.DM.status.matrix[start.pos.index:end.pos.index, 12]),2)
meanDiff.mC<-round(mean(chr.DM.status.matrix[start.pos.index:end.pos.index, 8]),4)
total.CG.index<-(1:length(raw.CG))[as.numeric(raw.CG)<=as.numeric(end) & as.numeric(raw.CG)>=as.numeric(start)]
total.CG<-length(total.CG.index) # total number of CG sits within this DM regions
new.vec<-c(chrom, start, end, leng, DM, num.CG, total.CG, group1.ave.cov, group2.ave.cov, meanDiff.mC, meanPost.prob)
chr.DM.region.mat<-rbind(chr.DM.region.mat, new.vec)
i<-i+1
if ( i<=length(DM.index) ) { start.index<-i; end.index<-i } # start a new search
}
}
else
{ # Here is the case that we reach the end of the DM index if ( i==length(DM.index) )
# cat(" here we print out everything we got","\n")
chrom<-regions[1,1] ; start<-regions[DM.index[start.index], 2]
end<- regions[DM.index[end.index], 3]; leng<-regions[DM.index[end.index], 3] - regions[DM.index[start.index],2] + 1
DM<-DM.type
num.CG<- sum(regions[DM.index[start.index: end.index], 6])
meanPost.prob<-round(mean(regions[DM.index[start.index: end.index], 11]),4)
start.pos.index<- chr.DM.status.matrix[chr.DM.status.matrix[,2]== start, 10]
end.pos.index<- chr.DM.status.matrix[chr.DM.status.matrix[,2]== end, 10]
group1.ave.cov<-round(mean(chr.DM.status.matrix[start.pos.index:end.pos.index, 11]),2)
group2.ave.cov<-round(mean(chr.DM.status.matrix[start.pos.index:end.pos.index, 12]),2)
meanDiff.mC<-round(mean(chr.DM.status.matrix[start.pos.index:end.pos.index, 8]),4)
total.CG.index<-(1:length(raw.CG))[as.numeric(raw.CG)<=as.numeric(end) & as.numeric(raw.CG)>=as.numeric(start)]
total.CG<-length(total.CG.index) # total number of CG sits within this DM regions
new.vec<-c(chrom, start, end, leng, DM, num.CG, total.CG, group1.ave.cov, group2.ave.cov, meanDiff.mC, meanPost.prob)
chr.DM.region.mat<-rbind(chr.DM.region.mat, new.vec)
break
}
}
}
return(chr.DM.region.mat)
}
|
69ece038da6b11b4f298473046097170568f5bf9
|
d4d2d370f8cb50e002a3489d2e2b9186651ef92f
|
/tests/testthat/test-config_file.R
|
dff2cdb383a03e42d526b986da3409d9ceff6bf3
|
[] |
no_license
|
momeara/RosettaFeatures
|
2c45012b042a76b0176a0924f1cc60fe3ba06e8b
|
2700b0735071971bbd2af91a6b1e7454ceeaa2a6
|
refs/heads/master
| 2021-01-19T03:54:05.386349
| 2017-03-24T14:07:21
| 2017-03-24T14:07:21
| 47,008,643
| 1
| 3
| null | 2016-06-16T23:00:32
| 2015-11-28T03:28:34
|
R
|
UTF-8
|
R
| false
| false
| 687
|
r
|
test-config_file.R
|
context("Configuration File")
test_that("A simple configuration file is parsed correctly", {
verbose = T
config_filename <- "test-config_file__analysis_configuration.json"
configuration <- load_config_file(config_filename, verbose=verobse)
configuration <- initialize_output_dir(configuration, verbose)
db_engine <- initialize_db_engine(NULL, configuration, verbose=verbose)
configuration <- initialize_sample_sources(configuration, db_engine, verbose=verbose)
configuration <- initialize_analysis_scripts(configuration, verbose)
configuration <- initialize_output_formats(configuration, verbose)
expect_equal(configuration$output_dir, "build/native_vs_talaris2014")
})
|
f28407f4638f43ec9aed7a3f6ae3cbd07617cfac
|
e37c5cab4bb89439714b24f9f29e4137f0a812fc
|
/Shopee_Shiny/global.R
|
a9688532f6dde938255f0691bef1c0923ff01955
|
[] |
no_license
|
a5347354/OrderAnalysis
|
c3046c74e143a2868cf2e8a94ba00b68b7171edf
|
d531bb26f4d6143915ef51a6d6f2f44f12dd46e7
|
refs/heads/master
| 2020-05-14T14:52:06.361961
| 2019-04-18T03:33:47
| 2019-04-18T03:33:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,979
|
r
|
global.R
|
library(rvest)
orders = read.csv("korean10146.shopee-order.csv")
orders = orders[,-c(1,2,3,4,11,12,13,17)]
as.character.Date(orders$่จๅฎๆ็ซๆ้)
#ๆๅบ
orders = orders[order(orders$่จๅฎๆ็ซๆ้),]
#ๅๅบๅฏ้ๆนๅผ็ถไฝ้กๅฅ
categories = unique(orders$ๅฏ้ๆนๅผ) %>% as.vector()
#็ขๅ
products = c("้คๆจๅค่ป็ณ",
"ๆช็ธ",
"็ซ้่พฃๆ้บต",
"Enaak",
"่พฒๅฟ็ธ้ฌ้บต",
"่ฟทไฝ ้ฆ่ๅทงๅ
ๅๆฃ",
"้ ๆๆด่็",
"ๆชธๆชฌ็",
"้ปๆ้ขจๅทงๅ
ๅๆฃ",
"้ฆ่ๅทงๅ
ๅๆดพ",
"็ฒ็ด
ๅๅฎข")
#ๆธฌ่ฉฆ
library(stringr)
find_products = function(str){
for(j in 1:length(str)){
for(i in 1:length(products)){
if(grepl(products[i],str[j])){
if(j == 1){
items = products[i]
}else{
items = c(items,products[i])
}
}
}
}
return(items)
}
name = str_match_all(orders$ๅๅ่ณ่จ,'ๅๅๅ็จฑ:(.+); ๅๅ้ธ้
')
name_items = str_match_all(orders$ๅๅ่ณ่จ,'้ธ้
ๅ็จฑ:(.+)ๅนๆ ผ')
price = str_match_all(orders$ๅๅ่ณ่จ,'NT\\$ (.+); ๆธ้')
count = str_match_all(orders$ๅๅ่ณ่จ,'ๆธ้: (.+);')
for(i in 1:length(name)){
df = data.frame(่จๅฎๆ็ซๆ้ = orders$่จๅฎๆ็ซๆ้[i],
ๅๅ่ณ่จ = name[[i]][,2],
ๅๅๆจ็ฑค = find_products(name[[i]][,2]),
ๅๅ้ธ้
= name_items[[i]][,2],
ๅนๆ ผ = price[[i]][,2],
ๆธ้ = count[[i]][,2])
if(i==1){
item_orders = df
}else{
item_orders = rbind(item_orders,df)
}
}
#ๅๅ่ณ่จ
# library(rvest)
# i = 0
# url = paste0("https://shopee.tw/search/?facet=%257B%252266%2522%253A%255B-1%252C2209%255D%257D&keyword=%E9%80%B2%E5%8F%A3%E9%9B%B6%E9%A3%9F&page=",i)
# items_url = GET(url)
# items_url = content(items_url)
# items_url %>% html_nodes("#div")
|
ed7825e9246dd584dd66d65e3d558e3adc17a5a8
|
b06d3f5fc175450710c81e76370bb710698b7cda
|
/Day5/Shiny package in R/using_renderplot_in_shiny/ui.R
|
4f526f90b66e153272fd6236d17733f087d94747
|
[] |
no_license
|
pritamnegi/Learning-R
|
d496a7463df345bf184ec379c6a44a72f3863288
|
8b3207c9038585457828e944f199ea44ca2cdc17
|
refs/heads/master
| 2021-05-06T07:21:30.187234
| 2017-12-17T07:34:35
| 2017-12-17T07:34:35
| 113,953,585
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 573
|
r
|
ui.R
|
library(shiny)
fluidPage(
titlePanel(h3("Plotting iris dataset using render plot function", align="center")),
sidebarLayout(
sidebarPanel(
selectInput("var", "Select the variable name from iris data set", choices = c("Sepal.Length"=1, "Sepal.Width"=2, "Petal.Length"=3, "Petal.Width"=4)),
br(),
sliderInput("slide", "Select the beans number", min=1, max=150, value=30),
radioButtons("color", "Select the color", list("Green", "Red", "Yellow"))
),
mainPanel(
plotOutput('myhist')
)
)
)
|
f6945ff61c5d46c02b00381e51adeb93d24596fd
|
3600d3dfcb7d6ac5b15524977d7ad36c4e94fb3e
|
/Plant_family_key.R
|
ce45619398dd2de086467f3d6f4ae77b06441fe1
|
[] |
no_license
|
HeatherKates/Interactive_plant_key
|
e34ccb19308cf0b982c9e4b84656db064161f9dc
|
300991d4ce41e79e09a0bea3bc58b97a864d9fb8
|
refs/heads/master
| 2021-05-29T02:46:31.212507
| 2015-04-24T16:10:20
| 2015-04-24T16:10:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,577
|
r
|
Plant_family_key.R
|
library(stringr)
plant_families <- read.csv(file="Plant_families_for_R.csv")
##Getting information about distribution of plant characteristics##
###################################################################
#What proportion of plant families are woody?
prop <- nrow(subset(plant_families, habit == "woody"))/(nrow(plant_families))
percent <- ((signif(prop, digits=3))*100)
cat(percent, "% of the families are woody")
#What proportion of plant families have stipules?
prop <- nrow(subset(plant_families, stipules == "present"))/nrow(plant_families)
percent <- ((signif(prop, digits=3))*100)
cat(percent, "% of the families have stipules")
#What proportion of the plant families are vines?
prop <- nrow(subset(plant_families, habit == "vine"))/nrow(plant_families)
percent <- ((signif(prop, digits=3))*100)
cat(percent, "% of the families are vines")
#What families are vines without stipules?
Vine_nostipule_families <- subset(plant_families, habit =="vine" & stipules == "absent")
Family <- as.vector(Vine_nostipule_families$Family)
cat("Possible Families:", Family)
##But this would be annoying to re-write for every new plant
#Querying the table with user input
prompt <- "Describe your plant:
habit(woody/herbaceous/vine)\nstipules(present/absent)\nleaf arrangement(opposite/alternate)
ovary position(superior/inferior)\ninflorescence(umbel/head/cyme/terminal)\nfloral symmetry(radial/zygomorphic/actinomorphic)"
Character_list <- as.vector(strsplit(readline(prompt), " ")[[1]])
#Check to see what families have all the charaters entered by the user:
Matching_rows <- subset(plant_families, habit == Character_list[1] & stipules == Character_list[2]
& leaf.arrangement == Character_list[3] & ovary.position== Character_list[4]
& inflorescence == Character_list[5]
& floral.symmetry ==Character_list[6] )
Matching_families <- (Matching_rows[1])
print(Matching_families[1])
##This relies on user following instructions and is very ugly
###Function that takes user input to get information about plant###
##################################################################
fun <- function(test)
{
habit <-readline("habit? ")
leaf <-readline("leaf arrangement? ")
stipules <-readline("stipules? ")
ovary <- readline("ovary position? ")
inflorescence <- readline("inflorescence type? ")
symmetry <- readline("floral symmetry? ")
habit <- as.character(habit)
leaf <- as.character(leaf)
stipules <- as.character(stipules)
ovary <- as.character(ovary)
inflorescence <- as.character(inflorescence)
symmetry <- as.character(symmetry)
input <- c(habit, leaf, stipules, ovary, inflorescence, symmetry)
}
if(interactive()) fun(test)
###Function that takes user input to ge information about plant###
##################################################################
#Running the function will generate prompts and store output to a vetor
character_suite <- fun(test)
## Same as above but without having to change every time
Matching_rows_2 <- subset(plant_families, habit == character_suite[1] & leaf.arrangement == character_suite[2]
& stipules== character_suite[3] & ovary.position== character_suite[4]
& inflorescence == character_suite[5]
& floral.symmetry ==character_suite[6])
Matching_families_2 <- (Matching_rows_2$Family)
print(Matching_rows_2[1])
######################################################################
Still working on only printing the "Family" value from the vector
|
4ddbaf6300a8b2cb514d0836b3adf8309631e035
|
5b86ed92bd925fa05f56dedfdd97f70ae95de984
|
/r/trading.R
|
33cf88bd9cf9ae9186eaf0c456207d95f006619f
|
[
"MIT"
] |
permissive
|
j-a-m-l/.dot
|
c310deaf2c2b748c427d590ac7f20daa489215d2
|
2a0f527c96c3cd7956a81bcb116326480b99bcae
|
refs/heads/master
| 2021-10-22T00:45:18.411681
| 2021-10-21T15:41:52
| 2021-10-21T15:41:52
| 4,156,546
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 642
|
r
|
trading.R
|
arkBtc <- function (arkEur, btcEur) {
arkEur / btcEur
}
binanceFee <- 0.1
bitfinexFee <- 0.1
bl3pFee <- 0.25 # + 0.01โฌ per order
cossFee <- 0.2
krakenFee <- 0.16
livecoinFee <- 0.18
sell <- function (price, amount, fee = krakenFee) {
(100 - fee) / 100 * price * amount
}
buy <- function (price, amount, fee = krakenFee) {
(100 + fee) / 100 * price * amount
}
rebuy <- function (sellPrice, buyPrice, sellAmount, buyAmount = FALSE, sellFee = krakenFee, buyFee = FALSE) {
if (! buyAmount)
buyAmount = sellAmount
if (! buyFee)
buyFee = sellFee
sell(sellPrice, sellAmount, sellFee) - buy(buyPrice, buyAmount, buyFee)
}
|
545944323b259fba864238fb9e161350198c26aa
|
3556b4217c921637cc7b7566c0d1b4f9bd1316c2
|
/Fonctions/optimFunction.R
|
d95cb16e23211b638b77edf19fe9c39527008dec
|
[] |
no_license
|
ehbihenscoding/MultiFiTimeSeries
|
f24b47c32ec8a456e2739e5963e826a24b5f6f29
|
2300500a0546138ef6318119e05ac2d688baaac8
|
refs/heads/master
| 2023-04-18T16:41:52.488958
| 2022-01-10T08:07:17
| 2022-01-10T08:07:17
| 215,967,008
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,609
|
r
|
optimFunction.R
|
Matern52<-function(distances){
return = (1+sqrt(5)*distances+5/3*distances^2)*exp(-sqrt(5)*distances) # OK
}
### Calcule de la dรฉrivรฉ pour ammรฉliorrer l'optimisaton
Matern52der<-function(x){
return = (sqrt(5)+5/3*x)*exp(-sqrt(5)*x) + sqrt(5)*(1+sqrt(5)*x+5/3*x^2)*exp(-sqrt(5)*x) # OK
}
covMatern <- function(z,y=NULL){
d <- dim(z)[2]
if(is.null(y)){
distances=as.matrix(dist(z,upper=TRUE))
} else{
y=matrix(y,ncol=d)
Nz=dim(as.matrix(z))[1]
Ny=dim(as.matrix(y))[1]
distances=matrix(0,Nz,Ny)
for(i in 1:Ny){
if(d==1){
distances[,i]=abs(z-y[i])
} else{
distances[,i]=sqrt(apply((z-t(as.numeric(y[i,])+matrix(0,d,Nz)))^2,1,sum))
}
}
}
return = Matern52(distances)
}
# dรฉrivรฉe de la covariance de matern
covMaternder <- function( z, l){
d <- dim(z)[2]
D <- length(l)
distance = as.matrix(dist(z,upper=TRUE))
G <- list() # pour gรฉnรฉraliser ร toutes les รฉchelles
for (k in 1:D){ G[[k]] = distance}
for( i in 1:d){
for(j in 1:d){
if( i == j) {
for(k in 1:D){ G[[k]][i,j] <-0 }
}else{
for(k in 1:D){ G[[k]][i,j] <-(z[k,i]-z[k,j])^2*l[k]*Matern52der(distance[i,j])/distance[i,j] }
}
}
}
return(lapply(G,as.matrix))
}
covtemp <- function( y, iRz){
Nz = dim(iRz)[1]
Nt = dim(y)[1]
mhat <- apply(y,1,mean) # cas du krigeage simple
mhat_mat <- matrix( 0, Nt, Nz) + as.numeric(mhat)
Rtparfait=1/(Nz-1)*(y-mhat_mat) %*% iRz %*% t(y-mhat_mat) # equation 43
Rt = 0.5*(Rtparfait+t(Rtparfait))
return(Rt)
}
# Fonction d'erreur de LOO
errLOO <- function(z,y, l){
d <- dim(z)[2]
Nz <- dim(z)[1]
#Nt <- dim(y)[1]
ht <- matrix(1,Nz,1)
# covariance
Rz <- covMatern(z %*% (1/l*diag(l*0+1)))
iRz <- inv(Rz+10^-4*diag(Nz))
Rt <- covtemp( y, iRz)
# Erreur LOO en temps et en espace
############################
### nugget
tau2 = 10^-5
K = inv(Rz + tau2 * diag(Nz)) # le calcule de cette approximation est dispo sur document Baptiste Kerleguer
###
tempsig = diag(K)^(-1)
diff = diag(tempsig) %*% K %*% t(y)
sig2 = matrix(tempsig,ncol=1)%*%t(diag(Rt))
############################
return=list(diff2=sum(diff^2),sig2=sig2)
}
# fonction de prรฉdiction conditionnรฉ par les paramรจtres
predKmFonc <- function( z, y, data, l){
Nz <- dim(z)[1]
#Ndata <- dim(data)[1]
ht <- matrix(1,Nz,1)
# covariance
Rz <- covMatern(z %*% (1/l*diag(l*0+1)))
iRz <- inv(Rz)
rz <- covMatern(z %*% (1/l*diag(l*0+1)), data %*% (1/l*diag(l*0+1)))
Rt <- covtemp( y, iRz)
# calcule de A*
A_star = (y %*% iRz %*% ht)/sum(iRz) # formule 2 equation 36
# calcule de la moyenne du processus
mu_starstar = A_star + (y - A_star%*% t(ht)) %*% iRz %*% rz # formule 3 et 4 equation 36 matrix(1,1,Ndata) permet d'avoir les bonnes tailles de matrice mais ne reprรฉsente rien
# calcule de la covariance du processus
c <- covMatern(data %*% (1/l*diag(l*0+1))) - t(rz) %*% iRz %*% rz # equation 33 sous formule
R_starstar <- Rt %x% as.numeric(c+(1-t(ht)%*%iRz%*%rz)^2/sum(iRz))
return( list( mu = mu_starstar, C = R_starstar, sd = sqrt(diag(R_starstar))))
}
# minimisation de l'erreur LOO de prediction
fct_cout<-function(l){
temp=errLOO(X,Y,abs(l))
return = temp$diff2 #sum(temp$diff2)
}
derfct_cout <- function(l){
# calcule des coefficients
temp = covMaternder(X , abs(l))
d = length(temp)
Rz <- covMatern(X %*% diag(1/abs(l)))
iRz <- inv(Rz)
# calcules des matrices
D = list()
for( k in 1:d){D[[k]] = 2*Y %*% iRz %*% diag(diag(iRz)^(-2)) %*%diag(diag(iRz %*% temp[[k]] %*% iRz))%*%diag(diag(iRz)^(-1))%*% iRz %*% t(Y) - 2*Y %*% iRz %*% diag(diag(iRz)^(-2)) %*% iRz %*% temp[[k]] %*% iRz %*%t(Y)}
return= lapply(D,sum)
}
|
24c70fde1a9c880f9811101cde7200aad80d8051
|
768f032aebf66e19997f1d049137e55cafc93c5f
|
/Codes/DESBED.R
|
7da1d2aeeec804882881d36633e23640988ddce7
|
[] |
no_license
|
jairpaulino/TS-DES
|
6dae0f0b62510eebe1346ea22c0195b104a70e7b
|
e9c1c4e5e52431761adad2e04978efc45b54db8d
|
refs/heads/master
| 2023-05-02T23:25:43.801590
| 2021-05-25T18:48:41
| 2021-05-25T18:48:41
| 322,049,364
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,950
|
r
|
DESBED.R
|
DESBED = NULL; DESBED_01 = NULL;
DESBED_02 = NULL; DESBED_03 = NULL
DESBED_04 = NULL; DESBED_05 = NULL
m = length(y_trainData)
n = length(y_allData)
for (j in (m+1):n){#j=m+1
allCandidates = X_allData
# calcula a distMin 01
distVector = NULL
distMin = 1e10
distMinIndex = NULL
for (i in 1:m){ #i=1
a = allCandidates[i,]
b = allCandidates[j,]
distVector = dist(rbind(a, b), method = 'euclidean')
# "euclidean", "maximum", "manhattan",
# "canberra", "binary" or "minkowski"
if(distVector < distMin){
distMin = distVector
distMinIndex = i
}
}
print(paste(distMinIndex, round(distMin, 4)))
# seleciona o PRIMEIRO melhor candidato
candidate = resultsAll_w[distMinIndex,]
plot(as.numeric(candidate)[2:31])
metricsCandidate = NULL
for(k in 2:31){#k=2
metricsCandidate[k] = getMSE(candidate$target, as.numeric(candidate[k]))
}
plot(metricsCandidate[2:30])
modelCand_01 = which.min(metricsCandidate)
# Calcula o DESBED_02
#DESBED[j-m] = mean(as.numeric(resultsAll_w[distMinIndex, c(modelCand_01, modelCand_02, modelCand_03, modelCand_04, modelCand_05)]))
DESBED_01[j-m] = as.numeric(resultsAll_w[j, c(modelCand_01)])
allCandidates[distMinIndex, ] = c(rep(10, 5))
distVector = NULL
distMin = 1e10
distMinIndex = NULL
for (i in 1:m){ #i=1
a = allCandidates[i,]
b = allCandidates[j,]
distVector = dist(rbind(a, b), method = 'euclidean')
# "euclidean", "maximum", "manhattan",
# "canberra", "binary" or "minkowski"
if(distVector < distMin){
distMin = distVector
distMinIndex = i
}
}
# candidate = resultsAll_w[distMinIndex,]
#
# metricsCandidate = NULL
# for(k in 2:31){#k=1
# metricsCandidate[k] = getMSE(candidate$target, as.numeric(candidate[k]))
# }
#
# plot(metricsCandidate)
# modelCand_02 = as.numeric(which.min(metricsCandidate))
# DESBED_02[j-m] = as.numeric(resultsAll_w[distMinIndex, c(modelCand_02)])
#
# allCandidates[distMinIndex, ] = c(rep(10, 5))
#
# # calcula a distMin 01
# distVector = NULL
# distMin = 1e10
# distMinIndex = NULL
# for (i in 1:m){ #i=1
# a = allCandidates[i,]
# b = allCandidates[j,]
#
# distVector = dist(rbind(a, b), method = 'euclidean')
# # "euclidean", "maximum", "manhattan",
# # "canberra", "binary" or "minkowski"
# if(distVector < distMin){
# distMin = distVector
# distMinIndex = i
# }
# }
#
# candidate = resultsAll_w[distMinIndex,]
#
# metricsCandidate = NULL
# for(k in 2:31){#k=1
# metricsCandidate[k] = getMSE(candidate$target, as.numeric(candidate[k]))
# }
#
# plot(metricsCandidate)
# modelCand_03 = as.numeric(which.min(metricsCandidate))
# DESBED_03[j-m] = as.numeric(resultsAll_w[distMinIndex, c(modelCand_03)])
#
# allCandidates[distMinIndex, ] = c(rep(10, 5))
#
# # calcula a distMin 01
# distVector = NULL
# distMin = 1e10
# distMinIndex = NULL
# for (i in 1:m){ #i=1
# a = allCandidates[i,]
# b = allCandidates[j,]
#
# distVector = dist(rbind(a, b), method = 'euclidean')
# # "euclidean", "maximum", "manhattan",
# # "canberra", "binary" or "minkowski"
# if(distVector < distMin){
# distMin = distVector
# distMinIndex = i
# }
# }
#
# candidate = resultsAll_w[distMinIndex,]
#
# metricsCandidate = NULL
# for(k in 2:31){#k=1
# metricsCandidate[k] = getMSE(candidate$target, as.numeric(candidate[k]))
# }
#
# plot(metricsCandidate)
# modelCand_03 = as.numeric(which.min(metricsCandidate))
# DESBED_03[j-m] = as.numeric(resultsAll_w[distMinIndex, c(modelCand_03)])
#
# ## 4 ##
# allCandidates[distMinIndex, ] = c(rep(10, 5))
#
# # calcula a distMin 01
# distVector = NULL
# distMin = 1e10
# distMinIndex = NULL
# for (i in 1:m){ #i=1
# a = allCandidates[i,]
# b = allCandidates[j,]
#
# distVector = dist(rbind(a, b), method = 'euclidean')
# # "euclidean", "maximum", "manhattan",
# # "canberra", "binary" or "minkowski"
# if(distVector < distMin){
# distMin = distVector
# distMinIndex = i
# }
# }
#
# candidate = resultsAll_w[distMinIndex,]
#
# metricsCandidate = NULL
# for(k in 2:31){#k=1
# metricsCandidate[k] = getMSE(candidate$target, as.numeric(candidate[k]))
# }
#
# plot(metricsCandidate)
# modelCand_04 = as.numeric(which.min(metricsCandidate))
# DESBED_04[j-m] = as.numeric(resultsAll_w[distMinIndex, c(modelCand_04)])
#
#
# ## 5 ##
# ## 4 ##
# allCandidates[distMinIndex, ] = c(rep(10, 5))
#
# # calcula a distMin 01
# distVector = NULL
# distMin = 1e10
# distMinIndex = NULL
# for (i in 1:m){ #i=1
# a = allCandidates[i,]
# b = allCandidates[j,]
#
# distVector = dist(rbind(a, b), method = 'euclidean')
# # "euclidean", "maximum", "manhattan",
# # "canberra", "binary" or "minkowski"
# if(distVector < distMin){
# distMin = distVector
# distMinIndex = i
# }
# }
#
# candidate = resultsAll_w[distMinIndex,]
#
# metricsCandidate = NULL
# for(k in 2:31){#k=1
# metricsCandidate[k] = getMSE(candidate$target, as.numeric(candidate[k]))
# }
#
# plot(metricsCandidate)
# modelCand_05 = as.numeric(which.min(metricsCandidate))
# DESBED_05[j-m] = as.numeric(resultsAll_w[distMinIndex, c(modelCand_05)])
#
# # calcula o DESBED
# #DESBED[j-m] = mean(as.numeric(resultsAll_w[distMinIndex, c(modelCand_01, modelCand_02, modelCand_03, modelCand_04, modelCand_05)]))
# #DESBED_03[j-m] = as.numeric(resultsAll_w[distMinIndex, c(modelCand_03)])
# p = 0.2
DESBED[j-m] = DESBED_01[j-m] #+ 0.2*DESBED_02[j-m] + 0.15*DESBED_03[j-m] + 0.1*DESBED_04[j-m] + 0.05*DESBED_05[j-m]
}
getMetrics = function(target, forecast){
#target = resultsAll_w$target[(m+1):n]
#forecast = smModel[(m+1):n]
target = na.omit(target)
forecast = na.omit(forecast)
metricsResults = data.frame(matrix(ncol=5, nrow = 1))
colnames(metricsResults) = c("MSE", "MAE", "MAPE", "THEIL", "ARV")
#colnames(metricsMatrix) = c("Oracle", "SA", "SM", "DESBED")
metricsResults$MSE = getMSE(target, forecast)
metricsResults$MAE = getMAE(target, forecast)
metricsResults$MAPE = getMAPE(target, forecast)
metricsResults$THEIL = getTheil(target,forecast)
metricsResults$ARV = getARV(target, forecast)
return(metricsResults)
}
metricsMatrix = data.frame(matrix(ncol=5, nrow = 4))
colnames(metricsMatrix) = c("MSE", "MAE", "MAPE", "THEIL", "ARV")
rownames(metricsMatrix) = c("Oracle", "SA", "SM", "DESBED")
metricsMatrix[1,] = getMetrics(resultsAll_w$target[(m+1):n], oracleModel[(m+1):n])
metricsMatrix[2,] = getMetrics(resultsAll_w$target[(m+1):n], saModel[(m+1):n])
metricsMatrix[3,] = getMetrics(resultsAll_w$target[(m+1):n], smModel[(m+1):n])
metricsMatrix[4,] = getMetrics(resultsAll_w$target[(m+1):n], DESBED)
write.table(metricsMatrix, file = paste('Results/metricsMatrix_', countryNames, '.csv', sep = ''), sep = ";")
plot.ts(resultsAll_w$target[(m+1):n], ylim=c(0.6, 1.2))
for (i in 2:31){#i=6
lines(resultsAll_w[[i]][(m+1):n], col = 'gray', lwd=2)
}
lines(resultsAll_w$target[(m+1):n], lwd=3)
lines(oracleModel[(m+1):n], col=2, lwd=3)
lines(saModel[(m+1):n], col=3, lwd=3)
lines(smModel[(m+1):n], col=4, lwd=3)
lines(DESBED, col = 6, lwd = 3, lty=3)
# getMSE(oracleModel[(m+1):n], resultsAll_w$target[(m+1):n])
# getMSE(saModel[(m+1):n], resultsAll_w$target[(m+1):n])
# getMSE(smModel[(m+1):n], resultsAll_w$target[(m+1):n])
# getMSE(DESBED, resultsAll_w$target[(m+1):n])
#
# getTheil(oracleModel[(m+1):n], resultsAll_w$target[(m+1):n])
# getTheil(saModel[(m+1):n], resultsAll_w$target[(m+1):n])
# getTheil(smModel[(m+1):n], resultsAll_w$target[(m+1):n])
# getTheil(resultsAll_w$target[(m+1):n], DESBED)
#
# getMAE(oracleModel[(m+1):n], resultsAll_w$target[(m+1):n])
# getMAE(saModel[(m+1):n], resultsAll_w$target[(m+1):n])
# getMAE(smModel[(m+1):n], resultsAll_w$target[(m+1):n])
# getMAE(DESBED, resultsAll_w$target[(m+1):n])
#
# getMAPE(oracleModel[(m+1):n], resultsAll_w$target[(m+1):n])
# getMAPE(saModel[(m+1):n], resultsAll_w$target[(m+1):n])
# getMAPE(smModel[(m+1):n], resultsAll_w$target[(m+1):n])
# getMAPE(DESBED, resultsAll_w$target[(m+1):n])
#
# getARV(oracleModel[(m+1):n], resultsAll_w$target[(m+1):n])
# getARV(saModel[(m+1):n], resultsAll_w$target[(m+1):n])
# # getARV(smModel[(m+1):n], resultsAll_w$target[(m+1):n])
# # getARV(DESBED, resultsAll_w$target[(m+1):n])
#
# plot.ts(resultsAll_w$target)#, ylim=c(0.2,0.9))
# for (i in 2:31){#i=6
# lines(resultsAll_w[[i]], col = 'gray')
# }
# lines(resultsAll_w$target[(m+1):n], lwd=3)
# lines(oracleModel[(m+1):n], col=2, lwd=3)
# lines(resultsAll_w$model_24, col=4, lwd=3)
# lines(smModel, col=3, lwd=3)
# #lines(DESBED, col = 6, lwd = 2)
# #lines(resultsAll_w$model_16[(m+1):n], col=4, lwd=3)
# abline(v=238, col=2, lwd=2, lty=2)
#
|
ca416dc96b4a4f503aec4466cbbe0bf19b26b555
|
d3c5f3ea6506d8fdadeb04b4182c93803c140e09
|
/code/1-basicmapping.R
|
ace352fcdb136be73c499dac9a05e4acc0311cf9
|
[] |
no_license
|
TravelersLab/ParliamentaryTravel
|
9d80b99675f780e9672fd9c2d8c5686ec1c56329
|
1df17b2edecd1b651fd67689f7e28d5b9807c007
|
refs/heads/master
| 2021-07-22T17:23:35.243452
| 2017-11-03T23:26:23
| 2017-11-03T23:26:23
| 104,917,653
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,310
|
r
|
1-basicmapping.R
|
library(maps)
library(mapdata)
library(ggmap)
library(dplyr)
setwd("~/MEGA/TravelersLab/Parliamentary\ Travel")
# maps english boroughs
qmplot(long, lat, pBoroughs, maptype="toner-background", data = pBoroughs, color = I("red"),
zoom = 7, color = TRUE)
# Saves burgess plot
ggsave("./figures/burgessplot.png")
# Parliamentary Longitude and Latitude
plat <- 51.500637
plong<- -0.127499
# Plots of english boroughs with parliament
dev.off()
qmplot(long, lat, pBoroughs, maptype="toner-background", data = pBoroughs, color = I("red"),
zoom = 7, color = TRUE) + geom_point(aes(x=plong, y=plat), color = "blue", size = 3)
# Graph that makes easy to see that the lines correspond to the actual paths
qmplot(long, lat, pBoroughs, maptype="toner-background", data = pBoroughs, color = I("red"),
zoom = 7, color = TRUE) +
geom_segment(x = plong, y = plat,
xend = pBoroughs$long, yend= pBoroughs$lat,
color = "pink", size = 1) +
geom_point(aes(x=plong, y=plat), color = "blue", size = 3)
ggsave("./figures/burgessplot_withpaths.png")
# Calculates the distance between a point and Parliament @ Westminster.
# In doing so, it provides us a measure to figure out who leaves first,
# as assuming uniform travel / time (false in individual cases, true as n -> inf)
# the group which leaves first should be the group that's the farthest away
# ASSUMPTION MADE HERE THAT DOESN'T HOLD TRUE -- England is flat, and degrees
# of latitude are constant in distance, and equivalent to degrees of longitude
# TO FIX : Using haversine formula
pBoroughs <- mutate(pBoroughs,
distance = sqrt( (plong - long)^2 + (plat - lat)^2),
start_t = max(pBoroughs$distance) - distance
)
# Sets 'final time' to be the distance of the farthest party
# (IE they'll be the ones travelling the longest)
t_f <- max(pBoroughs$distance)
# Creates a vector of 'time steps',
num_steps <- 100
t_step <- seq(0, t_f, t_f / (num_steps - 1))
# Calculates the position of a given party, in the 'as the crow flies'
# travel model, through a weighted average based on how long the party
# has been travelling.
# Assumes 0 <= t <= tf
# Inputs:
# tf: Final Time (aka longest distance needed to travel)
# ts: Start Time (IE when the group should leave to get to London at Parl Time)
# t : Current Time (IE some t in t_step)
# long_s, lat_s: Starting pos(IE long/lat of Borough)
# plong, plat : Pos in Dim (IE long/lat of Westminster)
# Outputs:
# loc, an object with $long as the current longitude, $lat as current latitude
calculateLongLat <- function(tf, ts, t, long_s, lat_s, plong, plat) {
logic_vector <- ts <= t
int_vector <- as.numeric(logic_vector)
c_int_vector <- 1 - int_vector
scale <- (tf - t) / (tf - ts)
long_if_set_out <- scale * long_s + (1-scale) * plong
lat_if_set_out <- scale * lat_s + (1-scale) * plat
c_long <- int_vector * long_if_set_out + c_int_vector * long_s
c_lat <- int_vector * lat_if_set_out + c_int_vector * lat_s
return(list("c_long" = c_long, "c_lat" = c_lat))
}
# Passes sanity checks so far
ex1 <- calculateLongLat(10, 0, 7, 10, 10, 0, 0)
ex2 <- calculateLongLat(10, 0, 5, 10, 10, 0, 0)
# Vector to track location over time
time_locs <- c()
# Foor loop, to calculate the locations over time
for(i in 1:num_steps) {
time_locs[[i]] <-calculateLongLat(t_f, pBoroughs$start_t, t_step[i], pBoroughs$long, pBoroughs$lat, plong, plat)
}
# Plots the locations @ given time step i (where 1 <= i < num_steps)
pot <- function(i) {
cl <- time_locs[[i]]
qmplot(long, lat, pBoroughs, maptype="toner-background", data = pBoroughs, color = I("red"),
zoom = 7, color = TRUE)+
geom_point(aes(x=cl$c_long, y=cl$c_lat), color = "purple", shape=17, size = 4)+
geom_point(aes(x=plong, y=plat), color = "blue", size = 3)
}
# Saves plot to file over time
for(i in 1:num_steps) {
ggsave(sprintf("burgess_travel%03d.jpg", i), pot(i),
device="jpg", path="./figures/travel_over_time/")
}
library(igraph)
g<- make_undirected_graph(c(), n=length(pBoroughs$Name))
V(g)$name <- pBoroughs$Name
for(i in 1:num_steps) {
thresh <- 0.01
cd <- time_locs[[i]]
for(j in 1:length(cd$c_long)) {
long_j <- time_locs[[i]]$c_long[j]
lat_j <- time_locs[[i]]$c_lat[j]
for(k in j:length(cd$c_long)){
long_k <- time_locs[[i]]$c_long[k]
lat_k <- time_locs[[i]]$c_lat[k]
distance <- sqrt( (long_j - long_k)^2 + (lat_j - lat_k)^2)
if (distance < thresh ) {
if(are.connected(g, j, k)) {
e_id <- get.edge.ids(g, c(j, k))
c_w <- get.edge.attribute(g, "w", index=e_id)
g <- set_edge_attr(g, "w", index=e_id, value=c_w + 1)
} else {
if (j != k ) {
g <- add_edges(g, c(j,k), "w" = 1)
print(paste(as.character(j), as.character(k)))
}
}
}
}
}
}
plot(g)
g <- delete.edges(g, E(g)[w < 5])
library(ggplot2)
qplot(E(g)$w)
library(d3Network)
df_nodes <- as_data_frame(g, "vertices")
df_links <- as_data_frame(g, "edges")
df_links <- mutate(df_links, w = 3*w)
df_links <- mutate(df_links,
from_index = match(from, df_nodes$name)-1,
to_index = match(to , df_nodes$name)-1)
df_nodes$group <- components(g)$membership
d3rep <- d3ForceNetwork(Links = df_links,
Nodes = df_nodes,
Source="from_index",
Target="to_index",
Value="w",
NodeID="name",
file="d3rep.html",
linkWidth = 1,
linkDistance = "function(d){return d.value }",
charge = -100,
fontsize = 15,
Group = "group"
)
pBoroughs$group <- df_nodes$group
g_colors <- as.color(pBoroughs$group)
gbplot <- qmplot(long, lat, pBoroughs, maptype="toner-background", data = pBoroughs, color = g_colors,
zoom = 7, color = TRUE, legend="none") + geom_point(aes(x=plong, y=plat), color = I("blue"), size = 3)+
theme(legend.position="none")
ggsave("./figures/groups_burgesses.png", gbplot, device="png")
# And then, to produce the GIF,
# we run `convert -delay 10 -loop 1 *jpg ../burgess_travel.gif`
# (This produces our GIF using ImageMagick on the command line)
|
9c6a14f5cf9bd5b424b22734c14fbbf9e0e5937f
|
529802384b1e60fda0652aaa8c95d849751bb372
|
/R/make_ggridges_and_get_urbanness_score_per_month.R
|
bf400bae151c57270d31b63bfb864e06c84467df
|
[] |
no_license
|
coreytcallaghan/intra_annual_urbanness
|
3095bfc224f2bca2f4fdaf09b4fcf8eb1ff75ed2
|
3270927fa90cc1195c174f61e69e61ac18c2dec4
|
refs/heads/master
| 2023-02-26T23:59:31.780168
| 2021-01-29T15:21:20
| 2021-01-29T15:21:20
| 226,186,531
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,212
|
r
|
make_ggridges_and_get_urbanness_score_per_month.R
|
# This is an R script to read in
# a dataset for each species
# and make a ggridges plot by month
# and export that ggridges plot to a folder
# and at the same time make a small dataframe of
# species-urban scores by month
# and a species-urban scores by day
# the RDS files were too large to push all of them
# so I pushed 30 'example RDSs'
# which would allow someone to reproduce this code for the sake of completeness
# to know what is happening
# if you wanted to reproduce this, below where it says "species_RDS", you would replace with "species_RDS_examples"
# which would loop through the 30 example species
# but, as noted this code currently is on a list of files that are not pushed to the repository
# packages
library(dplyr)
library(lubridate)
library(ggplot2)
library(ggridges)
library(mgcv)
library(scales)
# get list of file names
file_names <- list.files("Data/species_RDS/")
# loop through the filenames
# and do some summaries
for (i in file_names) {
df <- readRDS(paste0("Data/species_RDS/", i)) %>%
mutate(MONTH=month(OBSERVATION_DATE, abbr=TRUE, label=TRUE)) %>%
dplyr::filter(OBSERVATION_DATE > "2014-01-01")
title_name <- unique(df$COMMON_NAME)
# make a figure for visualization
# showing potential intra-annual differences for
# every species
# here is a vignette for ggridges: https://cran.r-project.org/web/packages/ggridges/vignettes/introduction.html
ggplot(df, aes(x=avg_rad, y=MONTH, height=..density..))+
geom_density_ridges(stat="density", fill="skyblue2")+
scale_x_log10(labels=comma)+
theme_classic()+
theme(axis.text=element_text(color="black"))+
ggtitle(paste0(title_name))+
ylab("")+
xlab(bquote('Average radiance ('* 'nW' ~cm^-2~sr^-1*')'))+
scale_y_discrete(labels=c("Dec", "Nov", "Oct", "Sep", "Aug",
"Jul", "Jun", "May", "Apr", "Mar", "Feb", "Jan"),
limits=c("Dec", "Nov", "Oct", "Sep", "Aug",
"Jul", "Jun", "May", "Apr", "Mar", "Feb", "Jan"))
# save figure out
ggsave(filename = paste0("Figures/species_ggridges/", title_name, ".png"),
width=4.6, height=3.8, units="in")
# write a function to resample the urbaness for each month
# using 100 random samples
# which helps to account for a lot of variation
resample_urbanness_month_function <- function(draw){
dat <- df %>%
group_by(MONTH) %>%
sample_n(100) %>%
group_by(COMMON_NAME, MONTH) %>%
summarise(urban_score=median(avg_rad, na.rm=TRUE)) %>%
mutate(month_numeric=1:12) %>%
mutate(resample=draw)
}
# repeat the above function 1000 times
# to converge on a 'mean' urbanness score for each month
# will also be able to see which month have the highest variance around it
list_of_resamples <- lapply(c(1:1000), function(x){resample_urbanness_month_function(x)})
resample_month_dfs <- bind_rows(list_of_resamples)
# summarize the resampled dataframes into a single
# mean urbanness score for each month
resampled_urbanness_month <- resample_month_dfs %>%
group_by(MONTH) %>%
summarize(mean_urbanness=mean(urban_score),
sd_urbanness=sd(urban_score))
# now get the static urbanness month score (without resampling)
# and join with the resampled urbanness month score above
urbanness_month <- df %>%
group_by(COMMON_NAME, MONTH) %>%
summarise(urban_score=median(avg_rad, na.rm=TRUE),
number_obs=n()) %>%
mutate(month_numeric=1:12) %>%
left_join(., resampled_urbanness_month, by="MONTH")
# save the as an RDS
saveRDS(urbanness_month, file = paste0("Data/species_monthly_summaries/", title_name, ".RDS"))
}
############################################################################
# repeat the above process, but for 'day of year' as opposed to month
# write a function to resample the urbaness for each day
# using 10 random samples per day
# as it is possible that a lot of days of the year could have very few samples
# for some species
# will also add 'replace=TRUE' in case a species has <10 observations on any given day
# this should be fine and would likely only influence the species with lower observations anyway, which would
# likely be the ones with the least variance anyhow
resample_urbanness_day_function <- function(draw){
dat <- df %>%
group_by(DAY) %>%
sample_n(10, replace=TRUE) %>%
group_by(COMMON_NAME, DAY) %>%
summarise(urban_score=median(avg_rad, na.rm=TRUE)) %>%
mutate(resample=draw)
}
# repeat the above function 1000 times
# to converge on a 'mean' urbanness score for each day
list_of_resamples <- lapply(c(1:1000), function(x){resample_urbanness_day_function(x)})
resample_day_dfs <- bind_rows(list_of_resamples)
# summarize the resampled dataframes into a single
# mean urbanness score for each month
resampled_urbanness_day <- resample_day_dfs %>%
group_by(DAY) %>%
summarize(mean_urbanness=mean(urban_score),
sd_urbanness=sd(urban_score))
urbanness_day <- df %>%
group_by(COMMON_NAME, DAY) %>%
summarise(urban_score=median(avg_rad, na.rm=TRUE),
number_obs=n()) %>%
left_join(., resampled_urbanness_day, by="DAY")
# save out this RDS
saveRDS(urbanness_day, file = paste0("Data/species_daily_summaries/", title_name, ".RDS"))
}
|
a90978f2d3af8bd2ff7cced5973b1eeb1104b6c0
|
8866f2576324045f7f57bf02b87433bd3ed34145
|
/man/get_childCodeIds.Rd
|
452903080c14b56529f1527c5e64bc21c2344df3
|
[] |
no_license
|
cran/rock
|
31ba91c6be5bff97c1659b3a8c3e5fbe6644f285
|
61999cb18c02680719a96b8ec3d0f33010849270
|
refs/heads/master
| 2022-12-26T21:02:05.960658
| 2022-12-13T11:30:02
| 2022-12-13T11:30:02
| 236,884,462
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,035
|
rd
|
get_childCodeIds.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_childCodeIds.R, R/get_descendentCodeIds.R
\name{get_childCodeIds}
\alias{get_childCodeIds}
\alias{get_descendentCodeIds}
\title{Get the code identifiers a code's descendents}
\usage{
get_childCodeIds(
x,
parentCodeId,
returnNodes = FALSE,
includeParentCode = FALSE
)
get_descendentCodeIds(x, parentCodeId, includeParentCode = FALSE)
}
\arguments{
\item{x}{The parsed sources object}
\item{parentCodeId}{The code identifier of the parent code}
\item{returnNodes}{For \code{get_childCodeIds()}, set this to \code{TRUE} to return
a list of nodes, not just the code identifiers.}
\item{includeParentCode}{Whether to include the parent code
identifier in the result}
}
\value{
A character vector with code identifiers (or a list of nodes)
}
\description{
Get the code identifiers of all children, or all descendents (i.e. including
grand-children, grand-grand-children, etc) of a code with a given identifier.
}
|
52b346b80a7d4843d00154a61281119925173566
|
dd3694903aea3932387264e36e4f59e6deaa817b
|
/Code/OldCode/build_adjacency_matrix.R
|
e3113469cbd2d8aaf1ed6b11f56c6748624beaab
|
[] |
no_license
|
alunmeredith/MSC-Project
|
7abe183a43ba53ecf8b7b1b9b9fa7fc5ad080fc7
|
496a55d07a9c18c3ee758406b954431b999c2526
|
refs/heads/master
| 2020-04-12T09:44:35.778375
| 2016-09-16T13:06:57
| 2016-09-16T13:06:57
| 60,303,501
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,441
|
r
|
build_adjacency_matrix.R
|
library(data.table)
library(dplyr)
library(readr)
# Load vector of patents into memory, this will be indexed against to produce adjacency matrix.
# Using data.table package to produce a vector key for binary search.
pat_index <- read_csv("../DataFiles/Cleaned/patent_cat.csv")
pat_index <- data.table(pat_index, key = c("Patent", "Date"))
find_adjacency <- function(Pat_no, index = pat_index) {
pat <- index[.(Pat_no), nomatch = 0]
if (nrow(pat) > 0) {
return(c(as.character(pat$Patent), as.character(Pat_no)))
}
else return(c(NA,as.character(Pat_no)))
}
adjacency_parse_file <- function(file = "../DataFiles/Cleaned/citation/1996.csv", write.file = "../DataFiles/Cleaned/adjacency.csv") {
cursor = 0
while (!is.null(cit_df <- read_csv(file, skip = cursor, n_max = 10000, col_types = "ccdD",
col_names = c("Patent", "Citation", "Date", "Date2")))) {
print(cursor)
adjacency <- lapply(cit_df$Citation, find_adjacency) %>% t
adjacency <- as.data.frame(matrix(unlist(adjacency), ncol = 2, byrow = TRUE))
colnames(adjacency) <- c("Patent", "Citation")
print(head(adjacency,3))
write_csv(adjacency, write.file, append = TRUE)
cursor <- cursor + 10000
}
}
file.list <- list.files("../DataFiles/Cleaned/citation/", full.names = TRUE)
for(file in file.list) {
print(file)
adjacency_parse_file(file)
}
|
30364ee5c0bd11cafbc8d863b6a374a539402203
|
70fc84a220fff7fc26d6610abb2c5995e613947d
|
/Scripts/PlottingScripts/PlotAccentDistribution.r
|
d7c3ccb003ab70aa205571e293bd3e00600bad8f
|
[] |
no_license
|
mohriner/flowBook
|
b8b33a90a3cc9ae52b66ae49abec8f15adeb4bfd
|
faaec23176acf7af339fdda5b119f4aa586bbbda
|
refs/heads/master
| 2020-04-01T21:57:44.605875
| 2020-02-13T15:29:46
| 2020-02-13T15:29:46
| 153,684,015
| 13
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 685
|
r
|
PlotAccentDistribution.r
|
PlotAccentDistribution = function(v,m.range=NA){
dat = corpora %>% filter(verse==v) %>%
mutate(meas = beatIndex %/% 4)
if(!is.na(m.range[1])){
dat = dat %>% filter(meas %in% m.range)
}
BIs = dat %>% filter(accent==1) %>%
.[["beatIndex"]] %>% mod(4)
tab = table(c(BIs,seq(0,3.75,.25)))-1
names(tab) = 0:15
n.meas = dat$meas %>% unique %>% length
tab = round(tab/n.meas,digits=2)
quartz(width=4.5,height=1.5)
par(mar=c(3.5,3.5,1,1),mgp = c(2.5,1,0),cex = .65,las=1,bty='n',family="Times New Roman")
barplot(tab,col = rep(c("black",rep("gray",3)),4),cex.names=.5,ylim=c(0,1),
xlab = "metric position",ylab="% of measure accenting position")
}
|
fea7a1f4b02c9555a6f794f87c22a5263896e5e2
|
7f77551f86a4b5b9e6bacd39cacd6d170141c1fa
|
/homeworks/mid_exam_home.R
|
75b0ad1a31824752a2d22da1ef570cec2d37b0ad
|
[] |
no_license
|
ocowchun/R_Computing-_for_Business_Data_Analytics
|
b96722ccd04c3d2a5c507694548c5764afb354e4
|
66276ec423f0cddf53215743019202546beef8dd
|
refs/heads/master
| 2021-01-10T20:29:05.086711
| 2015-01-16T10:40:38
| 2015-01-16T10:40:38
| 24,643,703
| 1
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,259
|
r
|
mid_exam_home.R
|
#Q1
heart_up=function(x){
sqrt(1-(abs(x)-1)^2)
}
heart_lo=function(x){
acos(1-abs(x))-pi
}
x=seq(-2,2,0.05)
# plot(x,heart_lo(x),ylim=c(heart_lo(0),1),type='l')
# lines(x,heart_up(x))
n=100000
machine_gun=function(n){
up_bound=max(heart_up(x))
lo_bound=min(heart_lo(x))
bullet.x=runif(n,-2,2)
bullet.y=runif(n,lo_bound,up_bound)
hit=sum(bullet.y<=heart_up(bullet.x)&bullet.y>=heart_lo(bullet.x))
box=4*(up_bound-lo_bound)
box*hit/n
}
machine_gun(n)
#Q2
bessel.element=function(a,v,z,m){
denominator=(gamma(m+a+1)*factorial(m))^v
numerator=(z/2)^(2*m+a)
numerator/denominator
}
fn=function(a,v,z,max,tolerance,m){
if(m>max){
return (0)
}
i=bessel.element(a,v,z,m)
if(i>tolerance)
{
return (i+fn(a,v,z,max,tolerance,m+1))
}
else{
return (0)
}
}
basselI_Gen=function(a,v,z,max,tolerance){
fn(a,v,z,max,tolerance,0)
}
#Q3
wald.interval=function(size,theta.hat){
upper_bound=theta.hat+1.96*sqrt(theta.hat*(1-theta.hat)/size)
lower_bound=theta.hat-1.96*sqrt(theta.hat*(1-theta.hat)/size)
result=list(upper_bound=upper_bound,lower_bound=lower_bound)
return (result)
}
adjustwald.interval=function(size,theta.hat){
theta.tilde=(size*theta.hat+2)/(size+4)
upper_bound=theta.tilde+1.96*sqrt(theta.tilde*(1-theta.tilde)/size)
lower_bound=theta.tilde-1.96*sqrt(theta.tilde*(1-theta.tilde)/size)
result=list(upper_bound=upper_bound,lower_bound=lower_bound)
return (result)
}
coverage.sim=function(size,theta){
n=5000
y=rbinom(n,size,theta)
thetas=rep(theta,n)
theta.hat=y/size
w=wald.interval(size,theta.hat)
a=adjustwald.interval(size,theta.hat)
w.overlap=thetas[thetas>=w$lower_bound&thetas<=w$upper_bound]
a.overlap=thetas[thetas>=a$lower_bound&thetas<=a$upper_bound]
wald.coverage=length(w.overlap)/n
adjust.coverage=length(a.overlap)/n
return (list(wald=wald.coverage,adjust=adjust.coverage))
}
coverage.graph=function(){
size=20
thetas=seq(0.01,0.49,0.02)
wald=c()
adjust=c()
for(i in 1:length(thetas)){
theta=thetas[i]
coverage=coverage.sim(size,theta)
wald[i]=coverage$wald
adjust[i]=coverage$adjust
}
plot(cbind(c(0,0.5),seq(0.65,1,0.05)),type="n",ylab='Coverage',xlab=expression(theta))
abline(h=0.95)
lines(thetas,wald,col='blue')
lines(thetas,adjust,col='red')
}
# coverage.graph()
|
f5e1e180620c773ebe5172898f7ef6cd74139db1
|
3ffe086b3037fb330a1d48921e2b3669310f5f94
|
/R/fontawesome.R
|
c4fb8170d98fe5b55da81c31476f1b5bf30eac02
|
[] |
no_license
|
leeper/waffle
|
bcbe9c873d4783ecc1ee8f461d8b8bb9c7486741
|
93c34bbac3ee827fba5e860a86deab0f1ccb911b
|
refs/heads/master
| 2021-01-13T08:51:44.556594
| 2016-10-25T10:12:47
| 2016-10-25T15:38:00
| 71,884,593
| 4
| 0
| null | 2016-10-25T10:10:28
| 2016-10-25T10:10:28
| null |
UTF-8
|
R
| false
| false
| 16,516
|
r
|
fontawesome.R
|
fa_unicode <- structure(c("\uf042", "\uf170", "\uf037", "\uf039", "\uf036",
"\uf038", "\uf0f9", "\uf13d", "\uf17b", "\uf209", "\uf103",
"\uf100", "\uf101", "\uf102", "\uf107", "\uf104", "\uf105",
"\uf106", "\uf179", "\uf187", "\uf1fe", "\uf0ab", "\uf0a8",
"\uf01a", "\uf190", "\uf18e", "\uf01b", "\uf0a9", "\uf0aa",
"\uf063", "\uf060", "\uf061", "\uf062", "\uf047", "\uf0b2",
"\uf07e", "\uf07d", "\uf069", "\uf1fa", "\uf1b9", "\uf04a",
"\uf05e", "\uf19c", "\uf080", "\uf080", "\uf02a", "\uf0c9",
"\uf236", "\uf0fc", "\uf1b4", "\uf1b5", "\uf0f3", "\uf0a2",
"\uf1f6", "\uf1f7", "\uf206", "\uf1e5", "\uf1fd", "\uf171",
"\uf172", "\uf15a", "\uf032", "\uf0e7", "\uf1e2", "\uf02d",
"\uf02e", "\uf097", "\uf0b1", "\uf15a", "\uf188", "\uf1ad",
"\uf0f7", "\uf0a1", "\uf140", "\uf207", "\uf20d", "\uf1ba",
"\uf1ec", "\uf073", "\uf133", "\uf030", "\uf083", "\uf1b9",
"\uf0d7", "\uf0d9", "\uf0da", "\uf150", "\uf191", "\uf152",
"\uf151", "\uf0d8", "\uf218", "\uf217", "\uf20a", "\uf1f3",
"\uf1f2", "\uf1f1", "\uf1f4", "\uf1f5", "\uf1f0", "\uf0a3",
"\uf0c1", "\uf127", "\uf00c", "\uf058", "\uf05d", "\uf14a",
"\uf046", "\uf13a", "\uf137", "\uf138", "\uf139", "\uf078",
"\uf053", "\uf054", "\uf077", "\uf1ae", "\uf111", "\uf10c",
"\uf1ce", "\uf1db", "\uf0ea", "\uf017", "\uf00d", "\uf0c2",
"\uf0ed", "\uf0ee", "\uf157", "\uf121", "\uf126", "\uf1cb",
"\uf0f4", "\uf013", "\uf085", "\uf0db", "\uf075", "\uf0e5",
"\uf086", "\uf0e6", "\uf14e", "\uf066", "\uf20e", "\uf0c5",
"\uf1f9", "\uf09d", "\uf125", "\uf05b", "\uf13c", "\uf1b2",
"\uf1b3", "\uf0c4", "\uf0f5", "\uf0e4", "\uf210", "\uf1c0",
"\uf03b", "\uf1a5", "\uf108", "\uf1bd", "\uf219", "\uf1a6",
"\uf155", "\uf192", "\uf019", "\uf17d", "\uf16b", "\uf1a9",
"\uf044", "\uf052", "\uf141", "\uf142", "\uf1d1", "\uf0e0",
"\uf003", "\uf199", "\uf12d", "\uf153", "\uf153", "\uf0ec",
"\uf12a", "\uf06a", "\uf071", "\uf065", "\uf08e", "\uf14c",
"\uf06e", "\uf070", "\uf1fb", "\uf09a", "\uf09a", "\uf230",
"\uf082", "\uf049", "\uf050", "\uf1ac", "\uf182", "\uf0fb",
"\uf15b", "\uf1c6", "\uf1c7", "\uf1c9", "\uf1c3", "\uf1c5",
"\uf1c8", "\uf016", "\uf1c1", "\uf1c5", "\uf1c5", "\uf1c4",
"\uf1c7", "\uf15c", "\uf0f6", "\uf1c8", "\uf1c2", "\uf1c6",
"\uf0c5", "\uf008", "\uf0b0", "\uf06d", "\uf134", "\uf024",
"\uf11e", "\uf11d", "\uf0e7", "\uf0c3", "\uf16e", "\uf0c7",
"\uf07b", "\uf114", "\uf07c", "\uf115", "\uf031", "\uf211",
"\uf04e", "\uf180", "\uf119", "\uf1e3", "\uf11b", "\uf0e3",
"\uf154", "\uf1d1", "\uf013", "\uf085", "\uf1db", "\uf06b",
"\uf1d3", "\uf1d2", "\uf09b", "\uf113", "\uf092", "\uf184",
"\uf000", "\uf0ac", "\uf1a0", "\uf0d5", "\uf0d4", "\uf1ee",
"\uf19d", "\uf184", "\uf0c0", "\uf0fd", "\uf1d4", "\uf0a7",
"\uf0a5", "\uf0a4", "\uf0a6", "\uf0a0", "\uf1dc", "\uf025",
"\uf004", "\uf08a", "\uf21e", "\uf1da", "\uf015", "\uf0f8",
"\uf236", "\uf13b", "\uf20b", "\uf03e", "\uf01c", "\uf03c",
"\uf129", "\uf05a", "\uf156", "\uf16d", "\uf19c", "\uf208",
"\uf033", "\uf1aa", "\uf157", "\uf1cc", "\uf084", "\uf11c",
"\uf159", "\uf1ab", "\uf109", "\uf202", "\uf203", "\uf06c",
"\uf212", "\uf0e3", "\uf094", "\uf149", "\uf148", "\uf1cd",
"\uf1cd", "\uf1cd", "\uf1cd", "\uf0eb", "\uf201", "\uf0c1",
"\uf0e1", "\uf08c", "\uf17c", "\uf03a", "\uf022", "\uf0cb",
"\uf0ca", "\uf124", "\uf023", "\uf175", "\uf177", "\uf178",
"\uf176", "\uf0d0", "\uf076", "\uf064", "\uf112", "\uf122",
"\uf183", "\uf041", "\uf222", "\uf227", "\uf229", "\uf22b",
"\uf22a", "\uf136", "\uf20c", "\uf23a", "\uf0fa", "\uf11a",
"\uf223", "\uf130", "\uf131", "\uf068", "\uf056", "\uf146",
"\uf147", "\uf10b", "\uf10b", "\uf0d6", "\uf186", "\uf19d",
"\uf21c", "\uf001", "\uf0c9", "\uf22c", "\uf1ea", "\uf19b",
"\uf03b", "\uf18c", "\uf1fc", "\uf1d8", "\uf1d9", "\uf0c6",
"\uf1dd", "\uf0ea", "\uf04c", "\uf1b0", "\uf1ed", "\uf040",
"\uf14b", "\uf044", "\uf095", "\uf098", "\uf03e", "\uf03e",
"\uf200", "\uf1a7", "\uf1a8", "\uf0d2", "\uf231", "\uf0d3",
"\uf072", "\uf04b", "\uf144", "\uf01d", "\uf1e6", "\uf067",
"\uf055", "\uf0fe", "\uf196", "\uf011", "\uf02f", "\uf12e",
"\uf1d6", "\uf029", "\uf128", "\uf059", "\uf10d", "\uf10e",
"\uf1d0", "\uf074", "\uf1d0", "\uf1b8", "\uf1a1", "\uf1a2",
"\uf021", "\uf00d", "\uf18b", "\uf0c9", "\uf01e", "\uf112",
"\uf122", "\uf079", "\uf157", "\uf018", "\uf135", "\uf0e2",
"\uf01e", "\uf158", "\uf09e", "\uf143", "\uf158", "\uf158",
"\uf156", "\uf0c7", "\uf0c4", "\uf002", "\uf010", "\uf00e",
"\uf213", "\uf1d8", "\uf1d9", "\uf233", "\uf064", "\uf1e0",
"\uf1e1", "\uf14d", "\uf045", "\uf20b", "\uf20b", "\uf132",
"\uf21a", "\uf214", "\uf07a", "\uf090", "\uf08b", "\uf012",
"\uf215", "\uf0e8", "\uf216", "\uf17e", "\uf198", "\uf1de",
"\uf1e7", "\uf118", "\uf1e3", "\uf0dc", "\uf15d", "\uf15e",
"\uf160", "\uf161", "\uf0de", "\uf0dd", "\uf0dd", "\uf162",
"\uf163", "\uf0de", "\uf1be", "\uf197", "\uf110", "\uf1b1",
"\uf1bc", "\uf0c8", "\uf096", "\uf18d", "\uf16c", "\uf005",
"\uf089", "\uf123", "\uf123", "\uf123", "\uf006", "\uf1b6",
"\uf1b7", "\uf048", "\uf051", "\uf0f1", "\uf04d", "\uf21d",
"\uf0cc", "\uf1a4", "\uf1a3", "\uf12c", "\uf239", "\uf0f2",
"\uf185", "\uf12b", "\uf1cd", "\uf0ce", "\uf10a", "\uf0e4",
"\uf02b", "\uf02c", "\uf0ae", "\uf1ba", "\uf1d5", "\uf120",
"\uf034", "\uf035", "\uf00a", "\uf009", "\uf00b", "\uf08d",
"\uf165", "\uf088", "\uf087", "\uf164", "\uf145", "\uf00d",
"\uf057", "\uf05c", "\uf043", "\uf150", "\uf191", "\uf204",
"\uf205", "\uf152", "\uf151", "\uf238", "\uf224", "\uf225",
"\uf1f8", "\uf014", "\uf1bb", "\uf181", "\uf091", "\uf0d1",
"\uf195", "\uf1e4", "\uf173", "\uf174", "\uf195", "\uf1e8",
"\uf099", "\uf081", "\uf0e9", "\uf0cd", "\uf0e2", "\uf19c",
"\uf127", "\uf09c", "\uf13e", "\uf0dc", "\uf093", "\uf155",
"\uf007", "\uf0f0", "\uf234", "\uf21b", "\uf235", "\uf0c0",
"\uf221", "\uf226", "\uf228", "\uf237", "\uf03d", "\uf194",
"\uf1ca", "\uf189", "\uf027", "\uf026", "\uf028", "\uf071",
"\uf1d7", "\uf18a", "\uf1d7", "\uf232", "\uf193", "\uf1eb",
"\uf17a", "\uf159", "\uf19a", "\uf0ad", "\uf168", "\uf169",
"\uf19e", "\uf1e9", "\uf157", "\uf167", "\uf16a", "\uf166"
), .Names = c("adjust", "adn", "align-center", "align-justify",
"align-left", "align-right", "ambulance", "anchor", "android",
"angellist", "angle-double-down", "angle-double-left", "angle-double-right",
"angle-double-up", "angle-down", "angle-left", "angle-right",
"angle-up", "apple", "archive", "area-chart", "arrow-circle-down",
"arrow-circle-left", "arrow-circle-o-down", "arrow-circle-o-left",
"arrow-circle-o-right", "arrow-circle-o-up", "arrow-circle-right",
"arrow-circle-up", "arrow-down", "arrow-left", "arrow-right",
"arrow-up", "arrows", "arrows-alt", "arrows-h", "arrows-v", "asterisk",
"at", "automobile", "backward", "ban", "bank", "bar-chart", "bar-chart-o",
"barcode", "bars", "bed", "beer", "behance", "behance-square",
"bell", "bell-o", "bell-slash", "bell-slash-o", "bicycle", "binoculars",
"birthday-cake", "bitbucket", "bitbucket-square", "bitcoin",
"bold", "bolt", "bomb", "book", "bookmark", "bookmark-o", "briefcase",
"btc", "bug", "building", "building-o", "bullhorn", "bullseye",
"bus", "buysellads", "cab", "calculator", "calendar", "calendar-o",
"camera", "camera-retro", "car", "caret-down", "caret-left",
"caret-right", "caret-square-o-down", "caret-square-o-left",
"caret-square-o-right", "caret-square-o-up", "caret-up", "cart-arrow-down",
"cart-plus", "cc", "cc-amex", "cc-discover", "cc-mastercard",
"cc-paypal", "cc-stripe", "cc-visa", "certificate", "chain",
"chain-broken", "check", "check-circle", "check-circle-o", "check-square",
"check-square-o", "chevron-circle-down", "chevron-circle-left",
"chevron-circle-right", "chevron-circle-up", "chevron-down",
"chevron-left", "chevron-right", "chevron-up", "child", "circle",
"circle-o", "circle-o-notch", "circle-thin", "clipboard", "clock-o",
"close", "cloud", "cloud-download", "cloud-upload", "cny", "code",
"code-fork", "codepen", "coffee", "cog", "cogs", "columns", "comment",
"comment-o", "comments", "comments-o", "compass", "compress",
"connectdevelop", "copy", "copyright", "credit-card", "crop",
"crosshairs", "css3", "cube", "cubes", "cut", "cutlery", "dashboard",
"dashcube", "database", "dedent", "delicious", "desktop", "deviantart",
"diamond", "digg", "dollar", "dot-circle-o", "download", "dribbble",
"dropbox", "drupal", "edit", "eject", "ellipsis-h", "ellipsis-v",
"empire", "envelope", "envelope-o", "envelope-square", "eraser",
"eur", "euro", "exchange", "exclamation", "exclamation-circle",
"exclamation-triangle", "expand", "external-link", "external-link-square",
"eye", "eye-slash", "eyedropper", "facebook", "facebook-f", "facebook-official",
"facebook-square", "fast-backward", "fast-forward", "fax", "female",
"fighter-jet", "file", "file-archive-o", "file-audio-o", "file-code-o",
"file-excel-o", "file-image-o", "file-movie-o", "file-o", "file-pdf-o",
"file-photo-o", "file-picture-o", "file-powerpoint-o", "file-sound-o",
"file-text", "file-text-o", "file-video-o", "file-word-o", "file-zip-o",
"files-o", "film", "filter", "fire", "fire-extinguisher", "flag",
"flag-checkered", "flag-o", "flash", "flask", "flickr", "floppy-o",
"folder", "folder-o", "folder-open", "folder-open-o", "font",
"forumbee", "forward", "foursquare", "frown-o", "futbol-o", "gamepad",
"gavel", "gbp", "ge", "gear", "gears", "genderless", "gift",
"git", "git-square", "github", "github-alt", "github-square",
"gittip", "glass", "globe", "google", "google-plus", "google-plus-square",
"google-wallet", "graduation-cap", "gratipay", "group", "h-square",
"hacker-news", "hand-o-down", "hand-o-left", "hand-o-right",
"hand-o-up", "hdd-o", "header", "headphones", "heart", "heart-o",
"heartbeat", "history", "home", "hospital-o", "hotel", "html5",
"ils", "image", "inbox", "indent", "info", "info-circle", "inr",
"instagram", "institution", "ioxhost", "italic", "joomla", "jpy",
"jsfiddle", "key", "keyboard-o", "krw", "language", "laptop",
"lastfm", "lastfm-square", "leaf", "leanpub", "legal", "lemon-o",
"level-down", "level-up", "life-bouy", "life-buoy", "life-ring",
"life-saver", "lightbulb-o", "line-chart", "link", "linkedin",
"linkedin-square", "linux", "list", "list-alt", "list-ol", "list-ul",
"location-arrow", "lock", "long-arrow-down", "long-arrow-left",
"long-arrow-right", "long-arrow-up", "magic", "magnet", "mail-forward",
"mail-reply", "mail-reply-all", "male", "map-marker", "mars",
"mars-double", "mars-stroke", "mars-stroke-h", "mars-stroke-v",
"maxcdn", "meanpath", "medium", "medkit", "meh-o", "mercury",
"microphone", "microphone-slash", "minus", "minus-circle", "minus-square",
"minus-square-o", "mobile", "mobile-phone", "money", "moon-o",
"mortar-board", "motorcycle", "music", "navicon", "neuter", "newspaper-o",
"openid", "outdent", "pagelines", "paint-brush", "paper-plane",
"paper-plane-o", "paperclip", "paragraph", "paste", "pause",
"paw", "paypal", "pencil", "pencil-square", "pencil-square-o",
"phone", "phone-square", "photo", "picture-o", "pie-chart", "pied-piper",
"pied-piper-alt", "pinterest", "pinterest-p", "pinterest-square",
"plane", "play", "play-circle", "play-circle-o", "plug", "plus",
"plus-circle", "plus-square", "plus-square-o", "power-off", "print",
"puzzle-piece", "qq", "qrcode", "question", "question-circle",
"quote-left", "quote-right", "ra", "random", "rebel", "recycle",
"reddit", "reddit-square", "refresh", "remove", "renren", "reorder",
"repeat", "reply", "reply-all", "retweet", "rmb", "road", "rocket",
"rotate-left", "rotate-right", "rouble", "rss", "rss-square",
"rub", "ruble", "rupee", "save", "scissors", "search", "search-minus",
"search-plus", "sellsy", "send", "send-o", "server", "share",
"share-alt", "share-alt-square", "share-square", "share-square-o",
"shekel", "sheqel", "shield", "ship", "shirtsinbulk", "shopping-cart",
"sign-in", "sign-out", "signal", "simplybuilt", "sitemap", "skyatlas",
"skype", "slack", "sliders", "slideshare", "smile-o", "soccer-ball-o",
"sort", "sort-alpha-asc", "sort-alpha-desc", "sort-amount-asc",
"sort-amount-desc", "sort-asc", "sort-desc", "sort-down", "sort-numeric-asc",
"sort-numeric-desc", "sort-up", "soundcloud", "space-shuttle",
"spinner", "spoon", "spotify", "square", "square-o", "stack-exchange",
"stack-overflow", "star", "star-half", "star-half-empty", "star-half-full",
"star-half-o", "star-o", "steam", "steam-square", "step-backward",
"step-forward", "stethoscope", "stop", "street-view", "strikethrough",
"stumbleupon", "stumbleupon-circle", "subscript", "subway", "suitcase",
"sun-o", "superscript", "support", "table", "tablet", "tachometer",
"tag", "tags", "tasks", "taxi", "tencent-weibo", "terminal",
"text-height", "text-width", "th", "th-large", "th-list", "thumb-tack",
"thumbs-down", "thumbs-o-down", "thumbs-o-up", "thumbs-up", "ticket",
"times", "times-circle", "times-circle-o", "tint", "toggle-down",
"toggle-left", "toggle-off", "toggle-on", "toggle-right", "toggle-up",
"train", "transgender", "transgender-alt", "trash", "trash-o",
"tree", "trello", "trophy", "truck", "try", "tty", "tumblr",
"tumblr-square", "turkish-lira", "twitch", "twitter", "twitter-square",
"umbrella", "underline", "undo", "university", "unlink", "unlock",
"unlock-alt", "unsorted", "upload", "usd", "user", "user-md",
"user-plus", "user-secret", "user-times", "users", "venus", "venus-double",
"venus-mars", "viacoin", "video-camera", "vimeo-square", "vine",
"vk", "volume-down", "volume-off", "volume-up", "warning", "wechat",
"weibo", "weixin", "whatsapp", "wheelchair", "wifi", "windows",
"won", "wordpress", "wrench", "xing", "xing-square", "yahoo",
"yelp", "yen", "youtube", "youtube-play", "youtube-square"))
#' Search FontAwesome names for a pattern
#'
#' @param pattern pattern to search for in the names of FontAwesome fonts
#' @export
fa_grep <- function(pattern) { grep(pattern, names(fa_unicode), value=TRUE) }
#' List all FontAwesome names
#'
#' @export
fa_list <- function() { print(names(fa_unicode)) }
|
a54f43ded7f61051c1b6f7a0e8f1e525e68ed8f7
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/EcoGenetics/examples/eco.NDVI.post.Rd.R
|
7d43e2e77be67ab1df3f6eb8b54ecb2e99009777
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,637
|
r
|
eco.NDVI.post.Rd.R
|
library(EcoGenetics)
### Name: eco.NDVI.post
### Title: Postprocessing for NDVI and MSAVI2 temporal series of Landsat 5
### and 7
### Aliases: eco.NDVI.post
### ** Examples
## Not run:
##D require(raster)
##D
##D data(tab)
##D data(eco3)
##D temp <- list()
##D
##D # we create 4 simulated rasters for the data included in the object tab:
##D
##D for(i in 1:4) {
##D temp[[i]] <- runif(19800, 0, 254)
##D temp[[i]] <- matrix(temp[[i]], 180, 110)
##D temp[[i]] <- raster(temp[[i]], crs="+proj=utm")
##D extent(temp[[i]])<-c(3770000, 3950000, 6810000, 6920000)
##D }
##D
##D writeRaster(temp[[1]], "20040719b4.tif", overwrite = T)
##D writeRaster(temp[[2]], "20040719b3.tif", overwrite = T)
##D writeRaster(temp[[3]], "20091106b4.tif", overwrite = T)
##D writeRaster(temp[[4]], "20091106b3.tif", overwrite = T)
##D
##D # Computing NDVI images:
##D
##D eco.NDVI(tab, "COST", "NDVI", "LT5")
##D
##D # Mean NDVI image computed over the NDVI images that we calculated:
##D
##D eco.NDVI.post(tab, "COST", "NDVI", what = c("mean", "var"))
##D mean.ndvi <- raster("NDVI.COST.mean.tif")
##D plot(mean.ndvi)
##D
##D # Extraction of the mean NDVI for each point in the object eco and plot
##D # of the data:
##D
##D ndvi <- extract(mean.ndvi, eco3[["XY"]])
##D ndvi<- aue.rescale(ndvi)
##D plot(eco3[["XY"]][, 1], eco3[["XY"]][, 2], col=rgb(ndvi, 0, 0),
##D pch=15, main = "Mean NDVI", xlab = "X", ylab = "Y")
## End(Not run)
file.remove(c("NDVICOST20040719.tif", "NDVICOST20091106.tif",
"20040719b4.tif", "20040719b3.tif", "20091106b4.tif",
"20091106b3.tif", "NDVI.COST.mean.tif", "NDVI.COST.var.tif",
"NDVICOSTtime.tif"))
|
94742b5940b07eecba368e720bc14bfc9baf9f56
|
e29b10858b9b2d8995401e464f9ba4462b15a006
|
/code/analysis/summaries/count_control_summaries.R
|
b62bb34b5d6c820ca0be6fa74a8453a2caf71900
|
[
"MIT"
] |
permissive
|
PeterNilssonBio/NilssonPernet2022
|
5de5d957b0ca80bc7bcf852a14933f0310feb27c
|
e0bc008eabb4005a4d3d959048c59311f80d8f08
|
refs/heads/main
| 2023-04-17T09:09:52.011512
| 2022-07-12T21:05:12
| 2022-07-12T21:05:12
| 512,929,667
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 573
|
r
|
count_control_summaries.R
|
# Calculate per-treatment count control summary statistics.
#
# Parameters
# ----------
# count_control_beakers: data.frame
# Data frame of per-beaker larval counts
#
# Returns
# -------
# data.frame
# A data frame with a row for each treatment (per experiment).
summarize_count_control <- function(count_control_beakers) {
count_control_beakers %>%
dplyr::mutate(proportion = .data$counted / .data$expected) %>%
dplyr::group_by(.data$spawn_date, .data$species, .data$experiment, .data$density) %>%
dplyr::summarize(proportion = mean(.data$proportion))
}
|
c28e073192e7c3d63e1ffb2993dc16e56cae2685
|
dfaa86a4d560c27f656c2e4ea5537413945006c6
|
/competitions/caterpillar-tube-pricing/Exploratory_1.R
|
d1c832d15bd74709419e68f6583f09dfefa3f300
|
[
"MIT"
] |
permissive
|
fxcebx/fast-furious
|
c6cfa6637c1e93156e5ae8c9ef695c818d7c1037
|
b974e6b71be92ad8892864794af57631291ebac1
|
refs/heads/master
| 2020-07-05T16:57:08.106938
| 2019-04-12T01:57:33
| 2019-04-12T01:57:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,679
|
r
|
Exploratory_1.R
|
library(caret)
library(Hmisc)
library(data.table)
library(kernlab)
library(subselect)
library(plyr)
library(binhf)
library(fBasics)
library(lattice)
require(xgboost)
require(methods)
getBasePath = function (type = "data") {
ret = ""
base.path1 = ""
base.path2 = ""
if(type == "data") {
base.path1 = "C:/docs/ff/gitHub/fast-furious/dataset/caterpillar-tube-pricing/competition_data"
base.path2 = "/Users/gino/kaggle/fast-furious/gitHub/fast-furious/dataset/caterpillar-tube-pricing/competition_data/"
} else if(type == "submission") {
base.path1 = "C:/docs/ff/gitHub/fast-furious/dataset/caterpillar-tube-pricing"
base.path2 = "/Users/gino/kaggle/fast-furious/gitHub/fast-furious/dataset/caterpillar-tube-pricing/"
} else if(type == "elab") {
base.path1 = "C:/docs/ff/gitHub/fast-furious/dataset/caterpillar-tube-pricing/elab"
base.path2 = "/Users/gino/kaggle/fast-furious/gitHub/fast-furious/dataset/caterpillar-tube-pricing/elab/"
} else if (type == "code") {
base.path1 = "C:/docs/ff/gitHub/fast-furious/competitions/caterpillar-tube-pricing"
base.path2 = "/Users/gino/kaggle/fast-furious/gitHub/fast-furious/competitions/caterpillar-tube-pricing/"
} else if (type == "process") {
base.path1 = "C:/docs/ff/gitHub/fast-furious/data_process"
base.path2 = "/Users/gino/kaggle/fast-furious/gitHub/fast-furious/data_process/"
} else {
stop("unrecognized type.")
}
if (file.exists(base.path1)) {
ret = paste0(base.path1,"/")
} else {
ret = base.path2
}
ret
}
################# DATA
sample_submission = as.data.frame( fread(paste(getBasePath("data") ,
"sample_submission.csv" , sep='')))
train_set = as.data.frame( fread(paste(getBasePath("data") ,
"train_set.csv" , sep='')))
test_set = as.data.frame( fread(paste(getBasePath("data") ,
"test_set.csv" , sep='')))
tube = as.data.frame( fread(paste(getBasePath("data") ,
"tube.csv" , sep='')))
bill_of_materials = as.data.frame( fread(paste(getBasePath("data") ,
"bill_of_materials.csv" , sep='')))
################ ANALYSIS
####### train_set , test_set
sum(is.na(train_set)) ## 0
sum(is.na(test_set)) ## 0
## >> quindi nel trainset and testset non ci sono NAs
tube_assembly_id.quantity = ddply(train_set , .(tube_assembly_id , quantity) , function(x) c(num=length(x$tube_assembly_id) ))
sum( tube_assembly_id.quantity$num > 1) ## 378
## >> che significa che (tube_assembly_id , quantity) e' una quasi-chiave primaria di train_set and test_set
tube_assembly_id.quantity = ddply(train_set , .(tube_assembly_id , quantity,annual_usage) , function(x) c(num=length(x$tube_assembly_id) ))
sum( tube_assembly_id.quantity$num > 1) ## 230
## >> perche' non esiste una chiave primaria del train set,
# e.g. due quotazioni fatte dalle stesso fornitore in date diverse (e costi !=)
train_set[train_set$tube_assembly_id=='TA-00178',]
# tube_assembly_id supplier quote_date annual_usage min_order_quantity bracket_pricing quantity cost
# 425 TA-00178 S-0072 2014-07-15 300 1 Yes 1 6.245723
# 426 TA-00178 S-0072 2014-07-12 300 1 Yes 1 6.663031
## nel test set idem con patate ...
tube_assembly_id.quantity = ddply(test_set , .(tube_assembly_id , quantity,annual_usage) , function(x) c(num=length(x$tube_assembly_id) ))
sum( tube_assembly_id.quantity$num > 1)
head(tube_assembly_id.quantity[tube_assembly_id.quantity$num>1 , ])
head(test_set[test_set$tube_assembly_id == 'TA-00340' , ])
length(unique(train_set[train_set$bracket_pricing == 'No' , ]$tube_assembly_id)) / length(train_set[train_set$bracket_pricing == 'No' , ]$tube_assembly_id) ##1
## >> che significa che per ogni tubo Non-Bracket esiste un e un solo caso da quotare
# nel qual caso nel campo <min_order_quantity> e' riportato la quantita' minima di pezzi da ordinare
# e nel campo <quantity> la quantia' minima per ordine
#
# NOTA che <quantity> puo' essere diverso da <min_order_quantity>
# es. tube_assembly_id = TA-00048
# min_order_quantity = 20
# quantity = 1
#
# >>>>>> ne deriva che anche per i tubi Non-Bracket il campo significativo e' <quantity> e non <min_order_quantity>
####### tube
sum(is.na(tube)) ##279
sum(is.na(tube$material_id)) ##279
tube[is.na(tube$material_id) , ]
length(unique(intersect(tube[is.na(tube$material_id) , ]$tube_assembly_id , train_set$tube_assembly_id))) ## 101 NAs nel train_set
length(unique(intersect(tube[is.na(tube$material_id) , ]$tube_assembly_id , test_set$tube_assembly_id))) ## 97 NAs nel test_set
## >> in tube ci sono 279 NAs tutti relativi al campo material_id associati a 101 tube nel train set (su ~30.000)
# e 97 tubi nel test set (su ~30.000)
## >> cambiamo il valore di NA dei material_id NA da NA a UNKNOWN
tube[is.na(tube$material_id) , 'material_id'] = 'UNKNOWN'
##
tube.mat.tubid = ddply(tube , .(tube_assembly_id ) , function(x) c(num=length(x$tube_assembly_id)))
sum(tube.mat.tubid$num>1) # 0
## >> il che signiifica che (tube_assembly_id) e' una chiave primaria di tube <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
## >> a questo punto i prezzi associati allo stessa coppia (material_id, quantity)
# dovrebbero essere abbastanza uniformi (zeta >> 1)
tube_train = merge(x = train_set , y = tube , by = 'tube_assembly_id' , all = F)
tube_test = merge(x = test_set , y = tube , by = 'tube_assembly_id' , all = F)
cost.material_id = ddply(tube_train , .(material_id , quantity ) ,
function(x) c( cost.mean = mean(x$cost)
, cost.var = sd(x$cost)
, num = length(x$cost)))
cost.material_id$zeta = cost.material_id$cost.mean / cost.material_id$cost.var
cost.material_id = cost.material_id[order(cost.material_id$zeta , decreasing = T) , ]
## tranne le dovute eccezzioni , es. material_id = SP-0037
# dove pero' si nota una differenze di specifiche tecniche, i.e. diameter wall length num_bends ...
tube_train[tube_train$material_id == 'SP-0037' & tube_train$bracket_pricing == 'Yes' , ]
########>>>>>>>>>>>>>>> ci chiediamo a questo punto che performance puo' avere un semplice modello in
# cui il costo del tubo e' dato semplicemente dalla media del cluster di appartenenza in cost.material_id
cluter_test = merge(x = tube_test , y = cost.material_id , by = c('material_id','quantity') , all.x = T , all.y = F)
if (dim(cluter_test)[1] != dim(test_set)[1]) stop('something wrong')
# abbiamo 141 missing (cluter_test$cost.mean)
# per questi 141 lavoreremo in seguito. Al momento li imputiamo con la media dei prezzi in prediction (~13.52663)
cluter_test[is.na(cluter_test$cost.mean) , ]$cost.mean = mean(cluter_test[! is.na(cluter_test$cost.mean) , ]$cost.mean)
# estraiamo la nostra prediction e la salviamo su disco ...
sub = cluter_test[ , c('id' , 'cost.mean')]
sub = sub[order(sub$id,decreasing = F) , ]
colnames(sub) = colnames(sample_submission)
cat(">> writing prediction on disk ... \n")
write.csv(sub,quote=FALSE,
file=paste(getBasePath("submission"),'sub_base.csv',sep='') ,
row.names=FALSE)
## >>>>>> otteniamo 0.625195 (769/869) dove il miglior modello performa come 0.210458
id_qty = ddply(tube_train , .(tube_assembly_id,quantity,material_id) , function(x) c(num = length(unique(x$material_id))) )
sum(id_qty$num>1) # 0
## >> per ogni coppia (tube_assembly_id,quantity) e' associato al piu' un e un solo <material_id>
# tuttavia per ogni tripletta (tube_assembly_id,quantity,material_id) possono esistere diversi costi
# correlati essenzialmente a diversi <annual_usage> , <date_quotation>, etc.
id_qty.var = ddply(tube_train , .(tube_assembly_id,quantity,material_id) , function(x) c(num = length(x$material_id)) )
describe(id_qty.var$num)
# n missing unique Info Mean
# 29815 0 5 0.04 1.013
#
# 1 2 3 4 6
# Frequency 29437 363 12 2 1
# % 99 1 0 0 0
####################################################################
## CONCLUSIONI
####################################################################
#
# 1) <tube_assembly_id> e' chiave primaria di <tube>, per cui conviene creare un dataset di caratteristiche tecniche
# di ogni tubo <tube_tech>, la cui chiave primaria e' <tube_assembly_id>
#
# 2) <tube_assembly_id , quantity> e' una quasi-chiave primaria di train_set and test_set, i.e. a meno di circa 1%
# dei casi (sia nel train set che nel test set) si comporta come chiave primaria. Nell'1% in cui non lo e' succede
# che ci possono essere diverse quotazioni a parita' di <tube_assembly_id , quantity> a seconda del <supplier>,
# <quote_date>, <annual_usage>, etc.
# Per cui conviene creare un dataset finale <tube_info> che sia la join tra <tube_tech> e <train_set>/<test_set>
#
# 3) Il campo <tube_assembly_id> e' sempre associato ad un e un solo <material_id> nel dataset <tube> tranne 1% dei casi.
# Tale campo sembra proprio essere il SAP material_id, per cui puo' essere usato per clusterizzare i dati, i.e.
# Se un tubo con un certo <tube_assembly_id> di cui occorre fare la quotazione nel <test_set> e' associato
# ad un certo <material_id> e lo zeta-score del <material_id> e' superiore ad una certa soglia,
# , allora il training del modello conviene farlo su tutti i tubi associati allo stesso
# <material_id> e la stessa <quantity> nel dataset <tube_info>, escludendo gli altri.
# Se invece, un tubo con un certo <tube_assembly_id>
# di cui occorre fare la quotazione nel <test_set> non e' associato ad un <material_id> (1% dei casi)
# oppure lo zeta score e' inferiore ad una certa soglia, allora
# il training del modello conviene farlo su tutti i tubi nel dataset <tube_info>.
#
# In sostanza, si clusterizza il train set / test set per ogni coppia <material_id,quantity>, i.e.
# all'interno dello stesso cluster <material_id,quantity> sono univoci.
#
# 4) il punto (3) ci porta anche ad un'architettura a 3 livelli in cui il primo livello e' costituito da un certo numero di modelli.
# Il secondo livello ha come feature le prediction del primo livello il <material_id> e la <quantity> e utilizza un altro set
# di modelli. L'eventuale terzo livello puo' essere costituito dalle medie pesate delle prediction del secondo livello.
|
2992bb22640ee380293dfdfe6d3c2bf3052c304c
|
aedb3dd2f909bc0f06becd80ca3bbdc92a11bd09
|
/man/iMUBAC.Rd
|
de9c23ba2829496ed13d827f18174dede18b6e23
|
[
"MIT"
] |
permissive
|
jaredmychal/iMUBAC
|
ec5d5a42da6439729b36c561049f33f90a244f04
|
8fb25172ecfaf2f7c6c20c2367e93b2710e2bd84
|
refs/heads/master
| 2023-02-05T13:15:06.171932
| 2020-12-25T17:25:45
| 2020-12-25T17:25:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 785
|
rd
|
iMUBAC.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/iMUBAC.R
\docType{package}
\name{iMUBAC}
\alias{iMUBAC}
\alias{package-iMUBAC}
\title{iMUBAC: Integration of Multi-Batch Cytometry datasets.}
\description{
High-dimentional cytometry, including spectral flow cytometry and mass cytometry (CyTOF), enables deep immunophenotyping at a single-cell resolution.
Analysis of cytometry data from multiple batches of experiments remains to be challenging due to the high-dimentionality and batch effects.
We designed iMUBAC (Integration of Multi-Batch Cytometry datasets) to enable a rational and streamlined inter-batch comparisons through i) preprocessing, ii) batch-correction, iii) unsupervised clustering, and iv) batch-specific cell-type identification.
}
|
232799215b7c70c1a5ee00ad096b799182d02291
|
dfdc779161b1802767b883d0f57db4a352db3377
|
/R/afrilandcover.r
|
9b482e84e1a2204b2ce323fda52c9d2e94abe28e
|
[
"CC-BY-4.0"
] |
permissive
|
afrimapr/afrilearndata
|
fbe881ae421dd6f92d84f88482643909ae748ad9
|
d8510ed4a732418c4cf1a385867604d341528ad0
|
refs/heads/master
| 2023-08-28T11:00:51.372188
| 2021-11-09T15:16:39
| 2021-11-09T15:16:39
| 307,471,995
| 14
| 8
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,602
|
r
|
afrilandcover.r
|
#' @name afrilandcover
#' @aliases afrilandcover
#' @title landcover raster for Africa, categorical, 20km resolution
#'
#' @description a \code{raster} object storing the majority landcover in 2019 for all 20km squares in Africa.
#' Categorical, 20km resolution from [MODIS](https://lpdaac.usgs.gov/products/mcd12c1v006/).
#' Cell values are numeric, landcover type names are stored in Raster Attribute Table (RAT) that can be accessed via `levels(afrilandcover)`
#' See data-raw/afrilearndata-creation.R for how the data object is created.
#'
#' @format Formal class 'raster';
#'
#' Geographical coordinates WGS84 datum (CRS EPSG 4326)
#'
#' @seealso
#' Friedl, M., D. Sulla-Menashe. MCD12C1 MODIS/Terra+Aqua Land Cover Type Yearly L3 Global 0.05Deg CMG V006. 2015, distributed by NASA EOSDIS Land Processes DAAC, https://doi.org/10.5067/MODIS/MCD12C1.006. Accessed 2021-06-07.#'
#' @source \url{https://lpdaac.usgs.gov/products/mcd12c1v006/}
#' @docType data
#' @keywords datasets sf
#' @examples
#' if (requireNamespace("raster", quietly = TRUE)) {
#' library(raster)
#' data(afrilandcover)
#' # or
#' filename <- system.file("extdata","afrilandcover.grd", package="afrilearndata", mustWork=TRUE)
#' afrilandcover <- raster::raster(filename)
#'
#' plot(afrilandcover)
#' }
#'
#' # interactive plotting with mapview
#' if (requireNamespace("mapview", quietly = TRUE) &
#' requireNamespace("rgdal", quietly = TRUE)) {
#' library(mapview)
#' mapview(afrilandcover,
#' att="landcover",
#' col.regions=levels(afrilandcover)[[1]]$colour)
#' }
#'
#'
"afrilandcover"
|
371a9b4bcb1b86ac14fc52caf3d0a5a7406706c2
|
70bd03a1f80969e8a30dc0ef4c29c2579a788a81
|
/man/read.resp.Rd
|
c2c0361a1365cbd0507c7b4ef293f882eb190c35
|
[] |
no_license
|
lebebr01/irtoys-2
|
68f2aa34b35a51f0fe32dc94b34c4be3cd05ff25
|
c8e33faeadb83aaf3a6937a57e16b4d205dc24ce
|
refs/heads/master
| 2021-01-23T13:29:58.776561
| 2014-09-04T21:08:36
| 2014-09-04T21:08:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,456
|
rd
|
read.resp.Rd
|
\name{read.resp}
\alias{read.resp}
\title{Read responses from a file}
\usage{
read.resp(file, na = ".")
}
\arguments{
\item{file}{File name}
\item{na}{The symbol used to represent missing data}
}
\value{
A matrix, typically of zeroes and ones, representing the
correct or wrong responses given by persons (rows) to
items (columns).
}
\description{
Reads responses to a questionnaire from a text file
}
\details{
Included for those who are too faint-hearted to write
\code{as.matrix(read.table(file, head=F))}. Of course,
data can be entered into R in many other ways.
The data values in the \code{file} must be separated with
blanks.
Responses are the empirical data used in IRT. Note that
\code{irtoys} deals with models for dichotomous data, and
typically expects data consisting of zeroes and ones,
without any missing values (non-responses are considered
as wrong responses). In fact, there are only two commands
in \code{irtoys} that accept other kinds of data:
\code{sco} and \code{tgp}.
\code{read.resp} does accept missing data and values
other than 0 and 1. Use \code{sco} and a key to score
multiple choice responses to 0/1. If you have dichotomous
data that contains NAs, you can use \code{sco} without a
key to change all NA to 0.
}
\examples{
\dontrun{
r <- read.resp("c:/myfiles/irt.dat")
}
}
\author{
Ivailo Partchev
}
\seealso{
\code{\link{sco}}, \code{\link{tgp}},
}
\keyword{IO}
|
03fcec9ae11db7e2ed1208ee23a15fd3a366fca7
|
bd55cede267ba7a9bb5737d669547971f2158a16
|
/ololo.R
|
b7eb7a513ac242d1110454afe438a2418b9242ab
|
[] |
no_license
|
llemish/reproduceable1
|
53594b704dd38b2f8399b45bc73a90555e7352b5
|
048bb2f1a204b41bf1142b20d5b4cc209d5afe95
|
refs/heads/master
| 2023-02-22T02:52:31.479837
| 2021-01-28T13:57:45
| 2021-01-28T13:57:45
| 332,663,862
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,023
|
r
|
ololo.R
|
get_stereotype <- function(ds, dist = dist) {
# print(str(ev[[1]]))
# stop()
if (!any(ds$event_type)){
ster <- amatch(ds$EVTYPE, dict, maxDist = dist)
if (!any(is.na(ster))){
# browser()
ds$EVTYPE <- dict[ster]
ds$event_type <- T
}
}
return(ds)
}
find_matches <- function(dataset, dist, max_dist = 4){
library(stringdist)
x <- by(dataset,
dataset$EVTYPE, get_stereotype, dist = dist)
browser()
return(x)
# for (i in 1:length(storm_data$EVTYPE)){
# if (!storm_data[i, 'event_type']) {
# type_mach <- get_stereotype(storm_data[i, 'EVTYPE'], dist)
# if (!is.na(type_mach)){
# storm_data[i, 'EVTYPE'] <- type_mach
# storm_data[i, 'event_type'] <- TRUE
# }
# }
# }
# if (dist < max_dist) {
# find_matches(storm_data, dist = dist + 1)
# }
}
find_matches2 <- function(dataset, max_dist = 4){
library(stringdist)
dataset$event_type <- rep(NA, length(dataset$EVTYPE))
ev_lvls <- levels(dataset$EVTYPE)
for (i in 1:length(ev_lvls)){
new_flag <- T
dist = 1
while (new_flag & (dist <= max_dist)){
if (length(grep("[()]", ev_lvls[i])) > 0){
break
}
new_lvl <- get_stereotype2(ev_lvls[i], dist)
if (!is.na(new_lvl)){
dataset[dataset$EVTYPE == ev_lvls[i],]['event_type'] <- new_lvl
new_flag <- F
} else {
dist <- dist + 1
}
}
if (new_flag) {
dataset[dataset$EVTYPE == ev_lvls[i],]['event_type'] <- "unclassified"
}
}
return(dataset)
}
get_stereotype2 <- function(old_lvl, dist){
a <- grep(old_lvl, dict)
if (length(a) > 0){
return(dict[a[1]])
break
}
ster <- amatch(old_lvl, dict, maxDist = dist)
if (!is.na(ster)){
return(dict[ster])
} else {
return(NA)
}
}
|
c09cb59e78e2b23ea28337f1f9daa0ee6181eb4d
|
3d9170ab762070ea7f7dd66663b134929b225076
|
/colonyR/R/colParseResults.R
|
9228b9c4fcd22a3a6a3f564a6c3322aa16de8b34
|
[] |
no_license
|
mdjbru-R-packages/colonyR
|
cb6dd5d2f49605059660e54f37fa21ef6edfaf4f
|
bdfdb810f1d1d3b6ba4d063490a4d4a307bf4ce9
|
refs/heads/master
| 2020-05-17T02:40:20.585893
| 2014-10-09T13:49:33
| 2014-10-09T13:49:33
| 24,934,659
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 782
|
r
|
colParseResults.R
|
#' @title Parse results from a Colony run
#'
#' @description Parse the monitoring information and final best configuration
#' for a Colony run
#' @param file basename of the output file name given to Colony
#' @param dir directory where the run took place
#'
#' @return A list with two elements: \code{run} with the monitoring
#' information and \code{pedigree} with the final pedigree
#'
#' @export
#'
colParseResults = function(file, dir = "") {
f = file.path(dir, file)
# load the analysis monitoring
fr = paste0(f, ".MidResult")
r = read.table(fr, header = T, comment.char = "")
# load the family data
fc = paste0(f, ".BestConfig")
c = read.table(fc, header = T, comment.char = "")
# output
o = list()
o[["run"]] = r
o[["pedigree"]] = c
return(o)
}
|
b16c8ccf8b3218f5f38f9fb0a941fe125f4c3805
|
6870dbf72162b6a0be218579e60feb229771b67d
|
/colocWrapper/man/importGWASCatalogSummary.Rd
|
2501e07a3a97803d3b6fbb3daae7ec0a75751312
|
[
"Apache-2.0"
] |
permissive
|
kauralasoo/colocWrapper
|
49c0e73dea3a64784385520cabe6229136e9bab3
|
02a814cc3148ee94c134abfe8d97d573a7c396a2
|
refs/heads/master
| 2021-06-24T20:34:53.518411
| 2020-10-14T10:53:55
| 2020-10-14T10:53:55
| 133,420,295
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 298
|
rd
|
importGWASCatalogSummary.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/import_data.R
\name{importGWASCatalogSummary}
\alias{importGWASCatalogSummary}
\title{Import GWAS Catalog summary stats}
\usage{
importGWASCatalogSummary(summary_path)
}
\description{
Import GWAS Catalog summary stats
}
|
aa3e69ea7be85eaf6430e86956999c8f19988076
|
40509fc494148bc2e7ddfe89884146199b308e53
|
/man/filterWideDF.Rd
|
8f036931e2ac7f143b9396e8aab74cea29d3fa41
|
[
"LicenseRef-scancode-public-domain-disclaimer",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
srcorsi-USGS/GLRItcl
|
f85889cff82c98d5597f78c65cc0eeca17859405
|
c4250bb93f680346fa90f00abd37a36c9ca78a1c
|
refs/heads/master
| 2020-02-26T13:46:53.490898
| 2013-12-03T04:31:18
| 2013-12-03T04:31:18
| 14,752,688
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 794
|
rd
|
filterWideDF.Rd
|
\name{filterWideDF}
\alias{filterWideDF}
\title{Filter the wide dataframe by parameter code}
\usage{
filterWideDF(wideDF, PCode)
}
\arguments{
\item{wideDF}{data.frame}
\item{PCode}{string 5 digit USGS parameter code (can be a
vector)}
}
\value{
DF dataframe
}
\description{
Filter the wide dataframe by parameter code, codes can
come in individually as a simple string, or in a
character vector
}
\examples{
genericCensoringValue <- function(qualifier,value, detectionLimit){
valueToUse <- ifelse("<" == qualifier, detectionLimit, value)
return(valueToUse)
}
wideDF <- wideGLRIData(filterGLRIData(QWPortalGLRI,genericCensoringValue))
temperature <- filterWideDF(wideDF,"00010")
multipleData <- filterWideDF(wideDF,c("00010","62818"))
}
\keyword{stat}
\keyword{summary}
|
a1a495b2906c9c7f1504a33ec0eeb74475bc9862
|
a3a49f8e285c685c2a80997bda0d214c53d28b18
|
/man/SDG-package.Rd
|
3f49e09bc089fb93e7a989e16947a2aafac61e36
|
[] |
no_license
|
jasenjov/SDG
|
c733aba91a74d61b8b03890e8a93750b19aa8e71
|
a7c84fc21acf350eaeed7a9b60079977bdc91a24
|
refs/heads/master
| 2020-12-24T13:16:28.567005
| 2015-08-28T07:23:56
| 2015-08-28T07:23:56
| 41,532,953
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,702
|
rd
|
SDG-package.Rd
|
\name{SDG-package}
\alias{SDG-package}
\alias{SDG}
\docType{package}
\title{
Synthetic Data Generation for the Educational Data Mining.
SDG
}
\description{
The goal of this package is to generate synthetic data. The package is very flexible while introducing parameters and very adaptive depending on the information the user owns. The structure of the library is based on a collection of functions that generate students' performance data according to different models. Then, there are also functions to generate different types of matrices and functions to plot the information in a really useful and understandable way.
}
\details{
\tabular{ll}{
Package: \tab SDG\cr
Type: \tab Package\cr
Version: \tab 1.0\cr
Date: \tab 2015-02-17\cr
}
}
\author{
Javier Asenjo Villamayor\cr
Supervisor: Michel C. Desmarais
Maintainer: Who to complain to <javier.asenjo.villamayor@gmail.com>
}
\references{
- Beheshti, B. and Desmarais, M.C. (2014). Assessing Model Fit With Synthetic vs. Real Data, Polytechnique Montreal.\cr\cr
- Beheshti, B. and Desmarais, M.C. (2014). Predictive performance of prevailing approaches to skills assessment techniques: Insights from real vs. synthetic data sets 7th Conference on Educational Data Data Mining (EDM 2014), London, England, p. 409-410.\cr\cr
-Desmarais, M.C. and Pelczer, I. (2010). On the Faithfullness of Simulated Student Performance Data. In Proceedings of Educational Data Mining 2010 (EDM2010). Pittsburg, PA, Jun. 11-13, p. 21-30.
}
\keyword{ package }
\seealso{
CDM - package : \url{http://cran.r-project.org/web/packages/CDM/}\cr
Partitions - package : \url{http://cran.r-project.org/web/packages/partitions/index.html}
}
\examples{
library(SDG)
}
|
3df47fb584e627f69b9b32650cc5df770d5baf50
|
c5c7454ae3cbdf33f0635a14b0146cc9dc9e4ca2
|
/man/is_infected_in_free_stall.Rd
|
ccb320af0a8f963162cad1cc65535df521f6f527
|
[
"MIT"
] |
permissive
|
fmsan51/blvibmjp
|
cc3cd507aac19903509557d66f82fb9795d7d287
|
91f7486d2bf6e96c0d731f8f1471205459524885
|
refs/heads/master
| 2022-12-22T15:46:22.482530
| 2020-09-02T20:21:45
| 2020-09-02T20:21:45
| 221,131,950
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 725
|
rd
|
is_infected_in_free_stall.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/probability_function.R
\name{is_infected_in_free_stall}
\alias{is_infected_in_free_stall}
\title{Whether cows are infected in free pastures}
\usage{
is_infected_in_free_stall(n_noinf, n_inf, month, param_sim)
}
\arguments{
\item{n_noinf}{The number of non-infected cows in a barn.}
\item{n_inf}{The number of infected cows in the barn.}
\item{month}{The current month (1, 2, ..., 12).}
\item{param_sim}{A list which combined \link{param}, a result of \code{\link[=process_param]{process_param()}} and a result of \code{\link[=calc_param]{calc_param()}}.}
}
\value{
A logical vector.
}
\description{
Whether cows are infected in free pastures
}
|
b4d8107c2a2efedf624a21701f60f81d2000c456
|
46d7e12a345a404d1ff20fea5ff5a872c45afa3b
|
/R/state.R
|
2353429a7dc9dee15d1b03f5fa4f0b4f13311c1d
|
[] |
no_license
|
cran/lifx
|
3d35b870b931eea683fb28cddb0f60db492cb4b3
|
d3398d410c7bb09224c52b749c3e049f0b533675
|
refs/heads/master
| 2022-11-13T00:18:59.746866
| 2020-06-24T11:10:06
| 2020-06-24T11:10:06
| 276,693,816
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,538
|
r
|
state.R
|
# generic state modifiers ----------------------------------------------------
#' set light state (lifx API endpoint PUT set state)
#'
#' @template param_power
#' @template param_color_name
#' @param brightness set the brightness (0-1)
#' @template param_infrared
#' @template param_duration
#' @template param_fast
#' @template param_selector
#' @template param_token
#' @return an 'httr' response object (see \code{\link[httr]{response}})
#' @references \url{https://api.developer.lifx.com/docs/set-state}
lx_state<-function(power=NULL,
color_name=NULL,
brightness=NULL,
infrared=NULL,
duration=0,
fast=FALSE,
selector="all",
token = lx_get_token()){
response <- lx_PUT(endpoint="state", # expected params
selector=selector, # expected params
token=token, # expected params
power=power, # query body as ... (dots):
color=color_name,
brightness=brightness,
infrared=infrared,
duration=duration,
fast=fast
)
invisible(response)
}
#' Change light state relative to current state (wrapper for POST state delta
#' @template param_colors
#' @template param_duration
#' @template param_infrared
#' @template param_power
#' @template param_selector
#' @template param_token
#' @return an 'httr' response object (see \code{\link[httr]{response}})
#' @references \url{https://api.developer.lifx.com/docs/state-delta}
lx_delta<-function(hue = NULL,
saturation = NULL,
brightness=NULL,
kelvin = NULL,
infrared=NULL,
duration=0,
power=NULL,
selector="all",
token = lx_get_token() ){
api_call <- lx_POST
endpoint <- "state/delta"
response <- lx_POST(endpoint=endpoint, # expected params
selector=selector, # expected params
token=token, # expected params
power=power, # body params as ... (dots):
duration=duration,
infrared=infrared,
hue = hue,
saturation = saturation,
brightness=brightness,
kelvin = kelvin
)
invisible(response)
}
|
dfe8d14ce54298609351425a84277a85ec7c85d6
|
cf09008185b813e272bbe208120852ebfb277fe8
|
/tidyr_script9.R
|
db81d05a2ba675c238d69662e30e6a6c75612f2c
|
[] |
no_license
|
AnkurDesai11/datascienceJHUcoursera
|
efd1eedd5ab29c8835ac0cf129fa1b9e68b88719
|
4a36448fb2827d4f5048c8b21c210684cf363746
|
refs/heads/master
| 2023-02-09T21:29:26.288860
| 2021-01-11T04:26:39
| 2021-01-11T04:26:39
| 255,146,982
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,655
|
r
|
tidyr_script9.R
|
# Append two more function calls to accomplish the following:
#
# 1. Use group_by() (from dplyr) to group the data by part and
# sex, in that order.
#
# 2. Use mutate to add two new columns, whose values will be
# automatically computed group-by-group:
#
# * total = sum(count)
# * prop = count / total
#
# score_range part sex count
# <chr> <chr> <chr> <int>
#1 700-800 read male 40151
#2 600-690 read male 121950
#3 500-590 read male 227141
#4 400-490 read male 242554
#5 300-390 read male 113568
#6 200-290 read male 30728
#7 700-800 read fem 38898
#8 600-690 read fem 126084
#9 500-590 read fem 259553
#10400-490 read fem 296793
# ... with 26 more rows
sat %>%
select(-contains("total")) %>%
gather(part_sex, count, -score_range) %>%
separate(part_sex, c("part", "sex")) %>%
### <Your call to group_by()> %>%
group_by(part, sex) %>%
mutate(total = sum(count),
prop = count / total
) %>% print
# score_range part sex count total prop
# <chr> <chr> <chr> <int> <int> <dbl>
#1 700-800 read male 40151 776092 0.0517
#2 600-690 read male 121950 776092 0.157
#3 500-590 read male 227141 776092 0.293
#4 400-490 read male 242554 776092 0.313
#5 300-390 read male 113568 776092 0.146
#6 200-290 read male 30728 776092 0.0396
#7 700-800 read fem 38898 883955 0.0440
#8 600-690 read fem 126084 883955 0.143
#9 500-590 read fem 259553 883955 0.294
#10400-490 read fem 296793 883955 0.336
# ... with 26 more rows
|
aa0f347d5136e52d5a58e43ccac0b4df1e8d54ad
|
d43ee2a310dccad76d62f696fe4562e55a138b0f
|
/revisions/supplementary_code/lottery_model.R
|
d06bdb49c7dcb393a97ae6a23160abff8cbd33de
|
[
"BSD-2-Clause"
] |
permissive
|
DominiqueGravel/ms_neutral_theory
|
d7444d2c2e9a19ac250db110b15bfc7f7dd75e8e
|
7a060780002cc0bc9aeb5f42f65c4d6762663b66
|
refs/heads/master
| 2016-09-03T07:31:12.058294
| 2013-09-25T14:17:22
| 2013-09-25T14:17:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,877
|
r
|
lottery_model.R
|
#######################################################################
# Individual based lottery model with spatially explicit
# dispersal and environmental heterogeneity
#
# The model collapse to a neutral model when the niche breadth tends to infinity
#
# Store the results in a time series of occupancy, a presence-absence matrix
# and a local abundance matrix
#
# By: Dominique Gravel (dominique_gravel@uqar.ca)
# Last edited September 2013
# For latest version:
# https://github.com/DominiqueGravel/ms_neutral_theory
########################################################################
# Main function
lottery_model = function(m,M,d,S,J,sdE,sdN,spatial_graph,nsteps) {
# Args:
# m: immigration probability from the neighbourhood
# M: immigration probability from outside the metacommunity
# k: local death rate
# S: number of species
# J: local community size
# sdE: within patch standard deviation of the environment
# sdN: niche breadth
# spatial_graph: a spatial graph object
# nsteps: number of time steps to run the simulation
#
# Returns:
# A list with a time series of occupancy for each species, a site presence-absence matrix
# and a site-abundance matrix from the last time step
#
########################################
# Prepare the simulation
####################
# Adjency matrix
adjMat = spatial_graph[[2]] # The original matrix
n = nrow(adjMat) # Number of nodes
degrees = apply(adjMat,2,sum) # Number of degrees for each node
w = adjMat*matrix(degrees^-1,nr = n,nc=n, byrow=T) # Weighted adjancy matrix
####################
# Local environmental conditions
Env = matrix(nr=n, nc = J)
for(i in 1:n) Env[i,] = rnorm(J,mean = runif(1,0,100), sd = sdE) # For a random distribution of environments
# for(i in 1:n) Env[i,] = rnorm(J,mean = i/n*100, sd = 0) # For a uniform distribution of environments
####################
# Niche optimums
u = runif(S, 0, 100) # For a random distribution of optimums
# u = 100*c(1:S)/S # For a uniform distribution of optimums
####################
# Initialization of the metacommunity
# Starts with uniform abundance at each location
localC = list()
P = matrix(0,nr = n, nc = S)
for(i in 1:n) {
localC[[i]] = t(rmultinom(n = J,size = 1, prob = numeric(S)+J^-1))
P[i,] = apply(localC[[i]],2,sum)/J
}
####################
# Matrix in which we record the occupancy over time
Series = matrix(nr=nsteps,nc=S+1)
####################
# Loop over all time steps
for(time in 1:nsteps) {
# Calculate the weighted relative abundance in the neighboring communities
wP = w%*%P/apply(w,1,sum)
####################
# Loop across the patches
for(i in 1:n) {
# Kill individuals at random
rand = runif(J,0,1)
localC[[i]][rand < k,] = 0
####################
# Calculate recruitment probability
# Relative abundance in the seed rain
rel_seed = M*S^-1 + m*wP[i,] + (1-m-M)*apply(localC[[i]],2,sum)/sum(localC[[i]])
# Weighting by local environmental conditions
surv = exp(-(matrix(Env[i,],nr = J, nc = S, byrow = F) - matrix(u,nr = J, nc = S, byrow = T))^2/2/sdN^2)
recruitProb = surv*matrix(rel_seed,nr = J,nc = S,byrow = T)/apply(surv*matrix(rel_seed,nr = J,nc = S,byrow = T),1,sum)
####################
# Replace dead individuals
recruit = t(apply(recruitProb,1,recruit_fn))
localC[[i]][rand<d,] = recruit[rand<d,]
# Record local relative abundance
P[i,] = apply(localC[[i]],2,sum)/sum(localC[[i]])
}
####################
# Transform abundance in presence/absence
pres = matrix(0,nr = n, nc = S)
pres[P>0] = 1
occ = apply(pres,2,mean)
# Record occupancy
Series[time,] = c(time,occ)
}
# Output
return(list(pres,Series,P))
}
# Convenient function used in the simulation
recruit_fn = function(prob) rmultinom(n = 1, size = 1, prob = prob)
|
7bd5f5995b5196c3a382c9ab27f2249c97993175
|
afde039a000379e7acb65de4e73b8ac175cb01ea
|
/tests/testthat/test-textmodel.R
|
1c8e4f9493b8734c6e98eb70cfbf1f8eacd63805
|
[] |
no_license
|
cran/newsmap
|
7002da70e06e282919747c250ba9aab38ecd4bab
|
dbebdb0b0393edf0d7c87b3a9569fdb92e3b7a9f
|
refs/heads/master
| 2023-04-08T10:30:21.596432
| 2023-03-16T07:50:02
| 2023-03-16T07:50:02
| 147,195,703
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,128
|
r
|
test-textmodel.R
|
require(quanteda)
toks_test <- tokens(data_corpus_inaugural, remove_punct = TRUE)
dfmt_test <- dfm(toks_test) %>%
dfm_remove(stopwords("en"))
toks_dict_test <- tokens_lookup(toks_test, data_dictionary_newsmap_en, level = 3)
dfmt_dict_test <- dfm(toks_test)
test_that("textmodel_newsmap() works with different inputs", {
toks <- tokens(data_corpus_inaugural, remove_punct = TRUE) %>%
tokens_remove(stopwords())
dfmt <- dfm(toks)
dfmt$Party <- factor(dfmt$Party)
smat <- xtabs( ~ docid(dfmt) + dfmt$Party, sparse = TRUE)
map1 <- textmodel_newsmap(dfmt, smat)
expect_equal(names(coef(map1)), levels(dfmt$Party))
expect_null(map1$weight)
mat <- as.matrix(smat)
map2 <- textmodel_newsmap(dfmt, mat)
expect_equal(names(coef(map2)), levels(dfmt$Party))
expect_null(map2$weight)
expect_error(textmodel_newsmap(list(), smat))
expect_error(textmodel_newsmap(dfmt, list()))
expect_error(textmodel_newsmap(dfmt, NULL))
expect_warning(textmodel_newsmap(dfmt, mat, aaa = 10),
"aaa argument is not used")
# use entropy weighting
map_loc <- textmodel_newsmap(dfmt, mat, entropy = "local")
expect_identical(dim(map_loc$weight), dim(map_loc$model))
expect_false(all(map_loc$weight[1,] == map_loc$weight[2,]))
expect_equal(coef(map_loc, 10)[[1]],
head(sort(map_loc$weight[1,] * map_loc$model[1,], decreasing = TRUE), 10))
map_avg <- textmodel_newsmap(dfmt, mat, entropy = "average")
expect_identical(dim(map_avg$weight), dim(map_avg$model))
expect_true(all(map_avg$weight[1,] == map_avg$weight[2,]))
expect_equal(coef(map_avg, 10)[[1]],
head(sort(map_avg$weight[1,] * map_avg$model[1,], decreasing = TRUE), 10))
map_glo <- textmodel_newsmap(dfmt, mat, entropy = "global")
expect_identical(dim(map_glo$weight), dim(map_glo$model))
expect_true(all(map_glo$weight[1,] == map_glo$weight[2,]))
expect_equal(coef(map_glo, 10)[[1]],
head(sort(map_glo$weight[1,] * map_glo$model[1,], decreasing = TRUE), 10))
expect_false(all(map_glo$weight[1,] == map_loc$weight[1,]))
expect_false(all(map_loc$weight[1,] == map_avg$weight[1,]))
expect_false(all(map_avg$weight[1,] == map_glo$weight[1,]))
})
test_that("methods for textmodel_newsmap works correctly", {
txt <- c("Ireland is famous for Guinness.",
"Guinness began retailing in India in 2007.",
"Cork is an Irish coastal city.",
"Titanic departed Cork Harbour in 1912.")
toks <- tokens(txt)
label_toks <- tokens_lookup(toks, data_dictionary_newsmap_en, levels = 3)
label_dfm <- dfm(label_toks)
feat_dfm <- dfm(toks, tolower = FALSE) %>%
dfm_select('^[A-Z][A-Za-z1-2]+', selection = "keep",
valuetype = 'regex', case_insensitive = FALSE)
map <- textmodel_newsmap(feat_dfm, label_dfm)
expect_equal(
names(map),
c("model", "entropy", "data", "weight", "feature", "call", "version")
)
# class association is calculated correctly
# note: both Guinness and Cork occur in IE only once
expect_equivalent(map$model['ie', c('Ireland', 'Guinness')],
map$model['ie', c('Irish', 'Cork')] )
expect_identical(map$feature, featnames(feat_dfm))
# rank argument is working
expect_equal(unname(predict(map)),
factor(c("ie", "in", "ie", "ie"), levels = c("in", "ie")))
expect_equal(unname(predict(map, rank = 2)),
factor(c("in", "ie", "in", "in"), levels = c("in", "ie")))
expect_error(predict(map, rank = 0))
# different prediction outputs agree
pred_top <- predict(map, confidence = TRUE)
pred_all <- predict(map, type = 'all')
expect_equivalent(pred_top$confidence.fit, apply(pred_all, 1, max))
expect_equivalent(pred_top$confidence.fit[1], pred_top$confidence.fit[3])
expect_warning(
predict(map, confidence.fit = TRUE),
"'confidence.fit' is deprecated; use 'confidence'"
)
expect_output(
textmodel_newsmap(feat_dfm, label_dfm, verbose = TRUE),
'Fitting textmodel_newsmap.*label = "ie".* label = "in"'
)
# print
expect_output(
print(map),
paste0('(\n)',
'Call:(\n)',
'textmodel_newsmap\\(.*\\)(\n)')
)
expect_output(
print(summary(map)),
paste0('(\n)',
'Call:(\n)',
'textmodel_newsmap\\(.*\\)(\n)',
'\n',
'Labels:(\n)',
'\\[1\\] "in" "ie"(\n)',
'(\n)',
'Data Dimension:(\n)',
'\\[1\\] 4 7(\n)')
)
})
test_that("textmodel_newsmap() raises error if dfm is empty", {
dfmt1 <- dfm(tokens("a b c"))
dfmt2 <- dfm(tokens("A"))
expect_error(textmodel_newsmap(dfm_trim(dfmt1, min_termfreq = 10), dfmt2),
"x must have at least one non-zero feature")
expect_error(textmodel_newsmap(dfmt1, dfm_trim(dfmt2, min_termfreq = 10)),
"y must have at least one non-zero feature")
})
test_that("label and drop_label are working", {
txt <- c("American and Japanese leaders met in Tokyo.",
"Paris Hilton visited British museum in London.",
"India and Pakistan are neighbours.",
"A man went to the Moon.")
toks <- tokens(txt)
toks_label <- tokens_lookup(toks, data_dictionary_newsmap_en, levels = 3)
dfmt <- dfm(toks)
dfmt_label <- dfm(toks_label)
map1 <- textmodel_newsmap(dfmt, dfmt_label)
expect_equal(names(coef(map1)), c("us", "jp", "in", "pk", "gb", "fr"))
map2 <- textmodel_newsmap(dfmt, dfmt_label, label = "max")
expect_equal(names(coef(map2)), c("jp", "in", "pk", "gb"))
map3 <- textmodel_newsmap(dfmt, dfmt_label, drop_label = FALSE)
expect_equal(names(coef(map3)), colnames(dfmt_label))
})
test_that("accuracy() is correct", {
v1 <- c("c", NA, "b", "a", "b", "c", "b", "b", "a", "c")
v2 <- c("c", "b", "a", "a", "b", "c", "b", "b", "a", "c")
accu <- accuracy(v1, v2)
expect_equal(accu$tp, c(2, 3, 3))
expect_equal(accu$fp, c(0, 1, 0))
expect_equal(accu$tn, c(6, 5, 6))
expect_equal(accu$fn, c(1, 0, 0))
expect_identical(
accu,
accuracy(rev(v1), rev(v2))
)
})
test_that("afe() is working", {
txt <- c("American and Japanese leaders met in Tokyo.",
"Paris Hilton visited British museum in London.",
"India and Pakistan are neighbours.",
"A man went to the Moon.")
toks <- tokens(txt)
toks_label <- tokens_lookup(toks, data_dictionary_newsmap_en, levels = 3)
dfmt <- dfm(toks)
dfmt_label <- dfm(toks_label)
expect_equal(afe(dfmt, dfmt_label),
7.90, tolerance = 0.1)
expect_error(afe(dfmt, matrix()))
expect_error(afe(list(), dfmt_label))
})
|
f2845933ffc0beeee29abf007e8c2fa0df347534
|
bde0a116ee7c08e1006683dcc70e0db43150baa7
|
/man/ptME.Rd
|
13a52aae47f7a855ec4a0e50b74079ab70dfa894
|
[] |
no_license
|
matloff/partools
|
bea3382d444d3048c4522eac31601dfe378e7ebd
|
14a7a8c701167280d71b2991cb03f8232ddf4f19
|
refs/heads/master
| 2022-10-14T13:46:31.102122
| 2022-10-13T22:31:03
| 2022-10-13T22:31:03
| 29,333,839
| 43
| 14
| null | 2022-10-13T22:31:04
| 2015-01-16T05:14:45
|
R
|
UTF-8
|
R
| false
| false
| 1,856
|
rd
|
ptME.Rd
|
\name{ptMEinit,ptMEinitSrvrs,ptMEinitCons,ptMEsend,ptMErecv,ptMEclose, ptMEtest,ptMEtestWrkr}
\alias{ptMEinit}
\alias{ptMEinitSrvrs}
\alias{ptMEinitCons}
\alias{ptMEsend}
\alias{ptMErecv}
\alias{ptMEclose}
\alias{ptMEtest}
\alias{ptMEtestWrkr}
\title{
Message-passing utilities.
}
\description{Simple MPI-like functions.}
\usage{
ptMEinit(cls)
ptMEinitSrvrs()
ptMEinitCons(srvr)
ptMEsend(obj,dest)
ptMErecv(dest)
}
\arguments{
\item{cls}{A cluster for the \pkg{parallel} package.}
\item{srvr}{A server, one of the worker nodes.}
\item{src}{A worker node from which to receive a message.}
\item{dest}{A worker node to which a message is to be sent.}
\item{obj}{An R object.}
}
\details{
This system of functions implements a message-passing system, similar to
MPI/Rmpi but much simpler and without the need for configuration.
Functions:
\itemize{
\item \code{ptMEinit}: General system initialization.
\item \code{ptMEinitSrvrs}: Called by \code{ptMEinit}. Sets up
socket connections for each pair of worker nodes. Each worker node
hosts a server for use by all nodes having \code{ partoolsenv$myid}
less than the server. Returns the server port.
\item \code{ptMEinitCons}: Also called by \code{ptMEinit}. Each worker
node, acting as a client, makes a connection with all servers having
\code{partoolsenv$myid} greater than the client.
\item \code{ptMEsend}: Send the given object to the given
destination.
\item \code{ptMErecv}: Receive an object from the given
source. Returns the received object.
\item \code{ptMEclose}: Close all worker-worker connections.
}
}
\value{
The function \code{ptMErecv()} returns the received value. The
intermediate function \code{ptMEinitSrvrs} returns a randomly chosen
server port number.
}
\examples{
}
\author{
Robin Yancey, Norm Matloff
}
|
bba0694affacfbc3dcd0130ee23d5b962bd5a0db
|
0257145576232d6fff7330d7bba434a31839b912
|
/R/jaws_pca.R
|
c9dd221235404d2351c512106d948d9eb9b3d2dd
|
[] |
no_license
|
ncchung/jaws
|
4176c77aa53489749c88f5b59d77954e209693c8
|
6325b32b97ea2af806168ec986b3ebfc8b304696
|
refs/heads/master
| 2021-01-10T07:02:53.645761
| 2016-11-17T11:50:15
| 2016-11-17T11:50:15
| 49,068,052
| 3
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,416
|
r
|
jaws_pca.R
|
#' The Jackstraw Weighted Shrinkage Estimation Method for Sparse Loadings in Principal Component Analysis
#'
#' Estimates sparse/shrunken loadings of principal component analysis.
#' Based on statistical sginificance of association between variables and principal components,
#' the sample loadings of principal components are shruken towards zeros, which improve its accuracy.
#' The only required inputs are the data matrix \code{dat} and the number of principal components \code{r} whose loadings you would like to estimate.
#'
#' By default, \code{jaws.pca} computes two canonical jackstraw weighted shrinkage estimators, namely \code{PIP} and \code{PNV}.
#' Additionally, other extra shrinkage techniques may apply, such as combining two canonical estimaotrs by setting \code{extra.shrinkage="PIPhard"}
#' and applying soft-thresholding to local fdr by setting \code{extra.shrinkage} to numerical threshold values between 0 and 1.
#' Please provide \code{r} numerical threshold values to be applied to \code{r} principal components.
#'
#' It is strongly advised that you take a careful look at your data and use appropriate graphical and statistical criteria
#' to determine a number of significant PCs, \code{r}. For example, see a contributed R package called `nFactors'.
#' In a case when you fail to specify \code{r}, \code{r} will be estimated from permutation Parallel Analysis (Buja and Eyuboglu, 1992)
#' via a function \link{permutationPA}, with a very liberal threshold.
#'
#' If \code{s} is not supplied, \code{s} is set to about 10\% of \code{m} variables.
#' If \code{B} is not supplied, \code{B} is set to \code{m*10/s}.
#'
#' @param dat a data matrix with \code{m} rows as variables and \code{n} columns as observations.
#' @param p a \code{m * r} matrix of p-values for association tests between variables and \code{r} principal components, generally computed from the jackstraw method. If \code{p} is not given, \code{jackstraw.PCA} is automatically applied.
#' @param r a number (a positive integer) of significance principal components.
#' @param s a number (a positive integer) of ``synthetic'' null variables (optional).
#' @param B a number (a positive integer) of resampling iterations (optional).
#' @param stat.shrinkage PNV shrinkage may be applied to "F-statistics" or "loadings" (default: F-statistics).
#' @param extra.shrinkage extra shrinkage methods may be used; see details below (optional).
#' @param verbose a logical specifying to print the progress (default: TRUE).
#' @param save.all a logical specifying to save all objects, including a large SVD object (default: FALSE).
#' @param seed a seed for the random number generator (optional).
#'
#' @return \code{jaws.pca} returns a list consisting of
#' \item{p}{p-values for association tests between variables and each of \code{r} principal components}
#' \item{pi0}{proportion of variables not associated with \code{r} principal components, individually}
#' \item{svd}{SVD object from decomposing \code{dat}}
#' \item{PIP}{a list of outputs derived from the posterior inclusion probabilities method (including \code{pr}, \code{u}, \code{var}, \code{PVE})}
#' \item{PNV}{a list of outputs derived from the proportion of null variables method (including \code{pi0}, \code{u}, \code{var}, \code{PVE})}
#' @return With appropriate \code{extra.shrinkage} options (for details, see the Supplementary Information of Chung and Storey (2013), the output may also include
#' \item{PIPhard}{a list of outputs from hard-threshoding the \code{PIP} loadings (including \code{u}, \code{var}, \code{PVE})}
#' \item{PIPsoft}{a list of outputs from soft-threshoding the \code{PIP} loadings (including \code{pr}, \code{u}, \code{var}, \code{PVE})}
#'
#' @section Detailed explanation of the output contained in \code{PIP} and \code{PNV}:
#' \describe{
#' \item{pr}{a matrix of posterior inclusion probabilities (equivalent to 1-lfdr) for \code{m} coefficients and \code{r} PCs.}
#' \item{pi0}{a vector of estimated proportion of null variables for \code{r} PCs.}
#' \item{u}{a \code{m*r} matrix of shrunken loadings.}
#' \item{var}{a vector of shrunken variances explained by \code{r} PCs.}
#' \item{PVE}{a vector of shrunken percent variances explained by \code{r} PCs.}
#' }
#'
#' @export jaws.pca
#' @importFrom corpcor fast.svd
#' @import jackstraw
#' @importFrom qvalue pi0est lfdr qvalue
#' @author Neo Chung \email{nchchung@@gmail.com}
#' @references Chung and Storey (2015) Forthcoming
#'
#' @seealso \link{jackstraw.PCA} \link{jaws.cov}
#'
#' @examples
#' set.seed(1234)
#' ## simulate data from a latent variable model: Y = BX + E
#' B = c(rep(1,50),rep(-1,50), rep(0,900))
#' X = rnorm(20)
#' E = matrix(rnorm(1000*20), nrow=1000)
#' dat = B %*% t(X) + E
#' dat = t(scale(t(dat), center=TRUE, scale=FALSE))
#'
#' ## estimate sparse loadings in PCA
#' jaws.pca.out = jaws.pca(dat, r=1)
#'
jaws.pca <- function(dat, p=NULL, r=NULL, s=NULL, B=NULL, stat.shrinkage="F-statistics", extra.shrinkage=NULL, verbose=TRUE, seed=NULL, save.all=TRUE) {
if(!is.null(seed)) set.seed(seed)
m=dim(dat)[1]; n=dim(dat)[2]
if(is.numeric(extra.shrinkage)) {
if(length(extra.shrinkage) != r) stop(paste0("For r=",r," principal components, provide r threshold values."))
if(min(extra.shrinkage) < 0 | max(extra.shrinkage) > 1) { stop(paste0("For soft-thresholding for local FDRs , threshold values must be in the valid range between 0 and 1.")) }
}
if(is.null(r)) {
warning("The number of significant PCs (r) is missing; this is strongly advised to manually determine r using various statistical and graphical criteria.")
r = permutationPA(dat=dat, threshold=.05, verbose=verbose)$r
message(paste0("Permutation Parallel Analysis, with a liberal threshold of 0.5, estimated r = ", r, "."))
}
if(!(r > 0 && r < n)) { stop("A number of significant PCs is not in valid range between 1 and n."); }
if(!is.null(p)) p = as.matrix(p)
if(is.null(p) & verbose==TRUE) message("\nThe association significance between variables and principal components are computed by the jackstraw.")
if(stat.shrinkage == "F-statistics") {
# compute p-values when stat.shrinkage=="F-statistics"
p = matrix(NA, nrow=m, ncol=r)
Fstat = matrix(NA, nrow=m, ncol=r)
for(i in 1:r) {
jackstraw.output = jackstraw.PCA(dat, r1=i, r=r, s=s, B=B, verbose=verbose)
p[,i] = jackstraw.output$p.value
Fstat[,i] = jackstraw.output$obs.stat
rm(jackstraw.output)
}
} else if(is.null(p) & stat.shrinkage == "loadings") {
# compute p-values when stat.shrinkage=="loadings"
p = matrix(NA, nrow=m, ncol=r)
for(i in 1:r) {
p[,i] = jackstraw.PCA(dat, r1=i, r=r, s=s, B=B, verbose=verbose)$p.value
}
}
if(ncol(p) != r | nrow(p) != m) stop("The p-value matrix must match m (nrow) and r (ncol).")
# compute pi0
# averaging two methods to calculate pi0 seems to be stable and accurate (esp. at a very high pi0)
pi0 = matrix(0, nrow=2, ncol=r)
for(i in 1:r) {
pi0[1,i] = tryCatch(pi0est(p[,i], pi0.method="smoother")$pi0, error=function(tmp) NA)
pi0[2,i] = tryCatch(pi0est(p[,i], pi0.method="bootstrap")$pi0, error=function(tmp) NA)
}
pi0 = apply(pi0, 2, function(x) mean(x, na.rm=TRUE))
svd.raw = fast.svd(dat)
var.total = sum(svd.raw$d^2) # =sum(diag(dat %*% t(dat))) may not be computed when m is large
r2 = r2.est(dat, svd.raw$u[,1:r,drop=FALSE] %*% diag(svd.raw$d[1:r], r) %*% t(svd.raw$v[,1:r,drop=FALSE]))
##############################################################################################################################
## Posterior Inclusion Probabilities: Empirical Bayes Shrinkage
if(verbose==TRUE) message(paste0("Computing the PIP Shrunken Loadings for the PC (r=", r, ") : "))
PIP = list(pr=matrix(NA, nrow=m, ncol=r), u=matrix(NA, nrow=m, ncol=r), var=vector("numeric", r), PVE=vector("numeric", r))
for(i in 1:r) {
PIP$pr[,i] = 1-lfdr(p[,i], pi0=pi0[i])
PIP$u[,i] = PIP$pr[,i] * svd.raw$u[,i]
PIP$var[i] = svd.raw$d[i]^2 * sum(PIP$u[,i]^2) #=sum(diag(e.PIP %*% t(e.PIP)))
PIP$PVE[i] = PIP$var[i] / var.total
if(verbose==TRUE) cat(paste(i," "))
}
PIP$r2 = r2.est(dat, PIP$u %*% diag(svd.raw$d[1:r], r) %*% t(svd.raw$v[,1:r,drop=FALSE]))
## proportion of null variables: Pi0 Hard Thresholding
if(verbose==TRUE) message(paste0("Computing the PNV Shrunken Loadings for the PC (r=", r, ") : "))
PNV = list(pi0=pi0, u=as.matrix(svd.raw$u[,1:r]), var=vector("numeric", r), PVE=vector("numeric", r))
for(i in 1:r) {
if(stat.shrinkage == "F-statistics") {
PNV$u[which(rank(abs(Fstat[,i])) <= m*pi0[i]),i] = 0
} else if(stat.shrinkage == "loadings") {
PNV$u[which(rank(abs(PNV$u[,i])) <= m*pi0[i]),i] = 0
}
PNV$var[i] = svd.raw$d[i]^2 * sum(PNV$u[,i]^2) #=sum(diag(e.PNV %*% t(e.PNV)))
PNV$PVE[i] = PNV$var[i] / var.total
if(verbose==TRUE) cat(paste(i," "))
}
PNV$r2 = r2.est(dat, PNV$u %*% diag(svd.raw$d[1:r], r) %*% t(svd.raw$v[,1:r,drop=FALSE]))
if(save.all == FALSE) {
out = list(call=match.call(), p=p, pi0=pi0, PIP=PIP, PNV=PNV, r2=r2)
} else {
out = list(call=match.call(), p=p, pi0=pi0, svd=svd.raw, PIP=PIP, PNV=PNV, r2=r2)
}
##############################################################################################################################
## Posterior Inclusion Probabilities + proportion of null variables (Pi0)
if(verbose==TRUE) message(paste0("Computing the PIPhard Shrunken Loadings for the PC (r=", r, ") : "))
if("PIPhard" %in% extra.shrinkage) {
PIPhard = list(u=matrix(NA, nrow=m, ncol=r), var=vector("numeric", r), PVE=vector("numeric", r))
for(i in 1:r) {
PIPhard$u[,i] = PIP$pr[,i] * PNV$u[,i]
PIPhard$var[i] = svd.raw$d[i]^2 * sum(PIPhard$u[,i]^2)
PIPhard$PVE[i] = PIPhard$var[i] / var.total
if(verbose==TRUE) cat(paste(i," "))
}
out = c(out, list(PIPhard=PIPhard))
}
## Posterior Inclusion Probabilities with Additional Soft Thresholding
if(verbose==TRUE) message(paste0("Computing the PIPsoft Shrunken Loadings for the PC (r=", r, ") : "))
if("PIPsoft" %in% extra.shrinkage) {
PIPsoft = list(threshold=vector(mode="numeric", length=r), pr=matrix(NA, nrow=m, ncol=r), u=matrix(NA, nrow=m, ncol=r), var=vector("numeric", r), PVE=vector("numeric", r))
for(i in 1:r) {
PIPsoft$threshold[i] = max(PIP$pr[which(rank(PIP$pr[,i]) <= round(pi0[i]*m)),i])
if(PIPsoft$threshold[i]<1) PIPsoft$pr[,i] = sapply(PIP$pr[,i], function(x) max((x - PIPsoft$threshold[i]), 0) / (1-PIPsoft$threshold[i]))
if(PIPsoft$threshold[i]==1) PIPsoft$pr[,i] = rep(0, m)
PIPsoft$u[,i] = PIPsoft$pr[,i] * svd.raw$u[,i]
PIPsoft$var[i] = svd.raw$d[i]^2 * sum(PIPsoft$u[,i]^2)
PIPsoft$PVE[i] = PIPsoft$var[i] / var.total
if(verbose==TRUE) cat(paste(i," "))
}
out = c(out, list(PIPsoft=PIPsoft))
}
if(is.numeric(extra.shrinkage)) {
PIPsoft = list(threshold=extra.shrinkage, lfdr=matrix(NA, nrow=m, ncol=r), u=matrix(NA, nrow=m, ncol=r), var=vector("numeric", r), PVE=vector("numeric", r))
for(i in 1:r) {
if(PIPsoft$threshold[i]>0) PIPsoft$lfdr[,i] = sapply(PIP$lfdr[,1], function(x) (PIPsoft$threshold[i] - max((PIPsoft$threshold[i] - x), 0)) / PIPsoft$threshold[i])
if(PIPsoft$threshold[i]==0) PIPsoft$lfdr[,i] = rep(1, m)
PIPsoft$u[,i] = (1-PIPsoft$lfdr[,i]) * svd.raw$u[,i]
PIPsoft$var[i] = svd.raw$d[i]^2 * sum(PIPsoft$u[,i]^2)
PIPsoft$PVE[i] = PIPsoft$var[i] / var.total
}
out = c(out, list(PIPsoft=PIPsoft))
}
return(out)
}
|
ac0e18845016e516d4062101d4bba7f48f4a8f31
|
483a05fd21e1cd199346d5045ae16060057501d2
|
/inst/tinytest/test_tiledbarray_extra.R
|
01c1121442d1eeadf1fe2f4537c50375215ca7e7
|
[
"MIT"
] |
permissive
|
TileDB-Inc/TileDB-R
|
34862c95815a77ba9b925f8d6e24dd362b3c617a
|
71901f8c9d5de860dbe078a0e708dda8f1b9b407
|
refs/heads/master
| 2023-09-04T05:28:09.468320
| 2023-08-23T13:24:33
| 2023-08-23T13:24:33
| 91,851,655
| 90
| 20
|
NOASSERTION
| 2023-09-14T20:48:10
| 2017-05-19T23:07:10
|
R
|
UTF-8
|
R
| false
| false
| 2,871
|
r
|
test_tiledbarray_extra.R
|
library(tinytest)
library(tiledb)
exit_file("Skip for now")
isOldWindows <- Sys.info()[["sysname"]] == "Windows" && grepl('Windows Server 2008', osVersion)
if (isOldWindows) exit_file("skip this file on old Windows releases")
isMacOS <- (Sys.info()['sysname'] == "Darwin")
if (tiledb_version(TRUE) < "2.7.0") exit_file("Needs TileDB 2.7.* or later")
ctx <- tiledb_ctx(limitTileDBCores())
hasDataTable <- requireNamespace("data.table", quietly=TRUE)
hasTibble <- requireNamespace("tibble", quietly=TRUE)
## GitHub Actions had some jobs killed on the larger data portion so we dial mem use down
if (Sys.getenv("CI") != "") set_allocation_size_preference(1024*1024*5)
## this test tickles a valgrind issue 'Conditional jump or move depends on uninitialized value'
## test encrypted arrays via high-level accessor
## (lower-level tests in test_densearray and test_arrayschema)
tmp <- tempfile()
dir.create(tmp)
encryption_key <- "0123456789abcdeF0123456789abcdeF"
## create 4x4 with single attribute
dom <- tiledb_domain(dims = c(tiledb_dim("rows", c(1L, 4L), 4L, "INT32"),
tiledb_dim("cols", c(1L, 4L), 4L, "INT32")))
schema <- tiledb_array_schema(dom, attrs=c(tiledb_attr("a", type = "INT32")), sparse = TRUE)
invisible( tiledb_array_create(tmp, schema, encryption_key) )
## write
I <- c(1, 2, 2)
J <- c(1, 4, 3)
data <- c(1L, 2L, 3L)
A <- tiledb_array(uri = tmp, encryption_key = encryption_key)
A[I, J] <- data
## read
A <- tiledb_array(uri = tmp, return_as="data.frame", encryption_key = encryption_key)
chk <- A[1:2, 2:4]
expect_equal(nrow(chk), 2)
expect_equal(chk[,"rows"], c(2L,2L))
expect_equal(chk[,"cols"], c(3L,4L))
expect_equal(chk[,"a"], c(3L,2L))
unlink(tmp, recursive = TRUE)
## delete fragment (2.12.0 or later)
if (tiledb_version(TRUE) < "2.12.0") exit_file("Remainder needs 2.12.0 or later")
N <- 5
ts <- rep(Sys.time(), N)
tmp <- tempfile()
dir.create(tmp)
uri <- file.path(tmp, "array")
D <- data.frame(index = paste0("A", format(trunc(runif(10)*1000))), value = cumsum(runif(10)))
fromDataFrame(D, uri, col_index=1, sparse=TRUE)
ts[1] <- Sys.time()
for (i in 2:N) {
Sys.sleep(0.25)
D <- data.frame(index = paste0(LETTERS[i], format(trunc(runif(10)*1000))), value = cumsum(runif(10)))
fromDataFrame(D, uri, col_index=1, mode="append", sparse=TRUE)
ts[i] <- Sys.time()
}
fraginfo <- tiledb_fragment_info(uri)
expect_equal(tiledb_fragment_info_get_num(fraginfo), N) # N (ie 5) before deletion
arr <- tiledb_array(uri)
arr <- tiledb_array_open(arr, "MODIFY_EXCLUSIVE")
expect_true(tiledb_array_is_open(arr))
expect_true(is(arr, "tiledb_array"))
expect_true(tiledb_array_delete_fragments(arr, ts[2]-0.1, ts[4]+0.1))
arr <- tiledb_array_close(arr)
fraginfo <- tiledb_fragment_info(uri)
expect_equal(tiledb_fragment_info_get_num(fraginfo), 2) # 2 after three deleted
unlink(tmp, recursive = TRUE)
|
aaa578dfacfc6adfe24f5fd46396fa724e62adc7
|
a9ec02af10525990255641f66ef82b24c6ae7502
|
/man/soil_erosion.Rd
|
cb528644e17f802aad923d784d5fd9aba87aefb5
|
[] |
no_license
|
tcobian/SoilCarbon
|
e37bb7ff87e0edd538928f1e99a8b11e8d3c9f19
|
1eb96793ec84a3b8d6da2a3c1dbf945d6e3524c9
|
refs/heads/master
| 2021-02-16T04:53:18.454825
| 2020-03-16T17:13:25
| 2020-03-16T17:13:25
| 244,967,971
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 489
|
rd
|
soil_erosion.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/soil_erosion.R
\name{soil_erosion}
\alias{soil_erosion}
\title{Soil Erosion Rates}
\usage{
soil_erosion(area, time)
}
\arguments{
\item{This}{function will take inputs of land are in units of hecatres and a unit of time in year}
}
\value{
This function will return the amount of top soil lost on kg after the specified time period
}
\description{
Soil Erosion Rates
}
\references{
}
\author{
Tyler D. Cobian
}
|
04fe91807c94de51a4b7b88b87a17ca70b2d493c
|
7a5810ea96d123ed70891a64a39104406a1e8429
|
/190815_hichip_statistics.R
|
77f6c6617f6cb33cf1f4335b8cd4c994fee7f95a
|
[] |
no_license
|
wesleylcai/bmcmedgenomics2020_metastasis
|
24ee04028028bcbb292f69f6cee42f8b04b4281a
|
16c9a013567a08c242e2c18ee58e57bf5e4235b9
|
refs/heads/master
| 2020-11-24T15:13:43.451580
| 2020-08-18T11:40:32
| 2020-08-18T11:40:32
| 228,210,666
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,635
|
r
|
190815_hichip_statistics.R
|
# Last Updated:
# Author: Wesley Cai
# Purpose:
library(data.table)
library(ggplot2)
library(reshape2)
# load gsea
# source("/Users/wcai/Google_Drive/_Lab/Data/Bioinformatics/resources/gsea/gmt/load_gmt.R")
# load for revamp
source("/Users/wcai/Google_Drive/_Lab/Data/Bioinformatics/190720_revamp/analysis/190720_load_files.R")
mainwd <- "/Users/wcai/Google_Drive/_Lab/Data/Bioinformatics/190720_revamp/analysis/hichip/"
inputfolder <- "input/"
outputfolder <- "annotation/"
dir.create(file.path(mainwd, inputfolder), recursive = TRUE, showWarnings = FALSE)
dir.create(file.path(mainwd, outputfolder), recursive = TRUE, showWarnings = FALSE)
setwd(file.path(mainwd, outputfolder))
load("/Users/wcai/Google_Drive/_Lab/Data/Bioinformatics/190720_revamp/analysis/hichip/output/inter.final.dt.RData")
load("/Users/wcai/Google_Drive/_Lab/Data/Bioinformatics/190720_revamp/analysis/hichip/output/inter.final.dt.all.RData")
## Total number of unique, replicated interactions
length(which(duplicated(c(paste0(inter.final.all.dt$atac1.peakid, inter.final.all.dt$atac2.peakid), paste0(inter.final.all.dt$atac2.peakid, inter.final.all.dt$atac1.peakid)))))
# Since ^ is 0 that means all interactions are unique and total inter is defined as nrow
nrow(inter.final.all.dt)
# 1659221 interactions
tmp <- inter.final.all.dt#[intercount.sum > 20]
# Percent of cross-chromosome interactions (only 0.652%)
tmp2 <- tmp[,.(atac1.peakid, atac2.peakid)]
tmp2[,atac1.chr := gsub("-.*", "", atac1.peakid)]
tmp2[,atac2.chr := gsub("-.*", "", atac2.peakid)]
nrow(tmp2[atac1.chr != atac2.chr])/nrow(tmp2)
# Remove prom-prom interactions of same genes
tmp.filter <- inter.final.all.dt
tmp.filter[is.na(annote1), annote1 := "unknown"]
tmp.filter[is.na(annote2), annote2 := "unknown"]
tmp.filter[, gene1 := gsub("_.*", "", gene1)]
tmp.filter[, gene2 := gsub("_.*", "", gene2)]
tmp.filter <- tmp.filter[!(gene1 == gene2 & annote1 == "promoter" & annote2 == "promoter"),]
#### Plot fractions with respect to connection strength ####
max.inter.count <- 50
inter.count.res <- list()
for(line in c("lm", "brm", "all")){
for(direction in c("up", "down")){
if(line == "all"){
direction <- "all"
tmp.filter.line <- tmp.filter
} else {
# Only keep changed peaks
if(direction == "up"){
changed.peaks <- res.list[["atac"]][get(paste0(line, "_l2fc")) > 0 & get(paste0(line, "_padj")) < 0.05,get("peakid")]
} else {
changed.peaks <- res.list[["atac"]][get(paste0(line, "_l2fc")) < 0 & get(paste0(line, "_padj")) < 0.05,get("peakid")]
}
tmp.filter.line <- tmp.filter[atac1.peakid %in% changed.peaks | atac2.peakid %in% changed.peaks,]
}
fract.dt <- data.table(inter.count = 0:max.inter.count)
for(i in fract.dt$inter.count){
message(i)
tmp <- tmp.filter.line[intercount.sum > i]
total.n <- nrow(tmp)
prom.prom.n <- nrow(tmp[annote1 == "promoter" & annote2 == "promoter",])
prom.enh.n <- nrow(tmp[(annote1 == "enhancer" & annote2 == "promoter") | (annote2 == "enhancer" & annote1 == "promoter"),])
enh.enh.n <- nrow(tmp[annote1 == "enhancer" & annote2 == "enhancer",])
unkown.unknown.n <- nrow(tmp[annote1 == "unknown" & annote2 == "unknown",])
enh.unknown.n <- nrow(tmp[(annote1 == "enhancer" & annote2 == "unknown") | (annote2 == "enhancer" & annote1 == "unknown"),])
prom.unknown.n <- nrow(tmp[(annote1 == "promoter" & annote2 == "unknown") | (annote2 == "promoter" & annote1 == "unknown"),])
fract.dt[inter.count == i, prom.prom := prom.prom.n/total.n]
fract.dt[inter.count == i, prom.enh := prom.enh.n/total.n]
fract.dt[inter.count == i, enh.enh := enh.enh.n/total.n]
fract.dt[inter.count == i, unkown.unknown := unkown.unknown.n/total.n]
fract.dt[inter.count == i, enh.unknown := enh.unknown.n/total.n]
fract.dt[inter.count == i, prom.unknown := prom.unknown.n/total.n]
}
fract.dt.melt <- melt(fract.dt, id.vars = "inter.count", measure.vars = colnames(fract.dt)[2:ncol(fract.dt)])
mp <- ggplot(fract.dt.melt, aes(inter.count, value, fill = variable)) +
geom_area() +
xlim(c(0,max.inter.count)) +
scale_fill_viridis_d()
mp
ggsave(paste0(max.inter.count, ".intercount.v.annote.fract.", line, ".", direction, ".png"), mp, width = 3, height = 5)
inter.count.res[[line]][[direction]] <- fract.dt.melt
if(line == "all") break
}
}
#### Plot fractions with respect to connection strength ####
#### Plot all fractions combined ####
inter.count.select <- 2
fract.dt.melt.combined <- data.table()
for(line in names(inter.count.res)){
for(direction in names(inter.count.res[[line]])){
tmp <- inter.count.res[[line]][[direction]][inter.count == inter.count.select,]
tmp[, group := paste(line, direction, sep = ".")]
fract.dt.melt.combined <- rbind(fract.dt.melt.combined, tmp)
}
}
mp <- ggplot(fract.dt.melt.combined, aes(group, value, fill = variable)) +
geom_bar(stat = "identity") +
scale_fill_viridis_d()
mp
ggsave(paste0(inter.count.select, ".intercount.all_fractions.png"), mp, width = 6, height = 5)
#### Plot all fractions combined ####
## Total number of promoter-promoter interactions: 156763 (9.45%)
tmp.count <- nrow(tmp[annote1 == "promoter" & annote2 == "promoter",])
tmp.count/nrow(tmp)
## Total number of promoter-enhancer interactions: 230804 (13.9%)
tmp.count <- nrow(tmp[(annote1 == "enhancer" & annote2 == "promoter") | (annote2 == "enhancer" & annote1 == "promoter"),])
tmp.count/nrow(tmp)
## Total number of enhancer-enhancer interactions: 364870 (22.0%)
tmp.count <- nrow(tmp[annote1 == "enhancer" & annote2 == "enhancer",])
tmp.count/nrow(tmp)
## Total number of NA-NA interactions: 173308 (10.4%)
tmp.count <- nrow(tmp[is.na(annote1) & is.na(annote2),])
tmp.count/nrow(tmp)
## Total number of enhancer-NA interactions: 442582 (27%)
tmp.count <- nrow(tmp[(annote1 == "enhancer" & is.na(annote2)) | (annote2 == "enhancer" & is.na(annote1)),])
tmp.count/nrow(tmp)
## Total number of promoter-NA interactions: 290894 (17.5%)
tmp.count <- nrow(tmp[(annote1 == "promoter" & is.na(annote2)) | (annote2 == "promoter" & is.na(annote1)),])
tmp.count/nrow(tmp)
## Gene interactions (all): 11961 (92.1%)
length(unique(c(tmp.filter$gene1, tmp.filter$gene2)))/
length(unique(res.list[["rna"]][,get("ensembl")]))
## Gene interactions (enh/prom): 11399 (87.7%)
tmp.filter.annote <- tmp.filter[annote1 %in% c("promoter", "enhancer") & annote2 %in% c("promoter", "enhancer")]
length(unique(c(tmp.filter.annote$gene1, tmp.filter.annote$gene2)))/
length(unique(res.list[["rna"]][,get("ensembl")]))
|
f479aa88e96e270dbd3403dc8eb96e126bf63081
|
73fca4174b7163e4e98899f416692f0ad85ed389
|
/code.R
|
ae0f378463990f8b469b12b86f64fbf8f79b85ab
|
[] |
no_license
|
akhil93/ExData_Plotting2
|
78c77a21cef54f6e2bc262760370f8c5625fb9f3
|
5c511636d5c9037d7aa2eedeb33d20db973e4571
|
refs/heads/master
| 2020-06-04T04:30:37.762001
| 2014-06-02T05:49:04
| 2014-06-02T05:49:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,804
|
r
|
code.R
|
#Plot 1
# Let's load the given data set's
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
# To plot the total emissions, we need to sum all the observations of Emissions by Year. I used Aggregate() function to calculate the Total #Emissions
total_emissions <- aggregate(Emissions ~ year, data = NEI, sum, na.rm=TRUE)
#Let's capture the plot using the png() Graphics Device.
png("plot1.png", height = 600, width = 480)
barplot((a$Emissions/1000000),a$year,names.arg = c("1999","2002","2005","2008"), # scaled down the Emissions
col = "steelblue",xlab = "Years",
ylab = expression("Total "* PM[2.5]*" Emissions in Mega-Tons"),
main = expression("Emissions of "* PM[2.5]*" by each year(all sources)"))
dev.off()
#Plot 2
# Let's load the given data set's
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
# Here we need to find the total emissions in the Baltimore City, Maryland. So lets subset only the data with fips == "24510".
baltimore = subset(NEI, NEI$fips == 24510)
# To plot the total emissions, we need to sum all the observations of Emissions by Year. I used Aggregate() function to calculate the Total #Emissions.
total_emissions_baltimore <- aggregate(Emissions ~ year, data = baltimore, sum, na.rm=TRUE)
#Let's capture the plot using the png() Graphics Device.
png("plot2.png", height = 600, width = 600)
barplot(total_emissions_baltimore$Emissions,total_emissions_baltimore$year,
names.arg = c("1999","2002","2005","2008"),col = "steelblue",
xlab = "Years",ylab = expression("Total "* PM[2.5]*" Emissions in Tons"),
main = expression("Emissions of "* PM[2.5]*" by each year in Baltimore City, Maryland (all sources)"))
dev.off()
#plot 3
# Let's load the given data set's
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
# Here we need to find the total emissions in the Baltimore City, Maryland. So lets subset only the data with fips == "24510".
baltimore = subset(NEI, NEI$fips == 24510)
# To plot the total emissions, we need to sum all the observations of Emissions by Year. I used Aggregate() function to calculate the Total #Emissions.
total_emissions_baltimore_type <- aggregate(Emissions ~ year+ type, data = baltimore, sum, na.rm=TRUE)
#Let's capture the plot using the png() Graphics Device.
png("plot3.png", height = 300, width = 800)cc
qplot(year, Emissions, data = total_emissions_baltimore, facets = .~type) +
scale_x_continuous("Year", breaks = total_emissions_baltimore$year) +
labs(title = expression("Total "* PM[2.5]*" Emissions by each Year and Type in Baltimore"))
dev.off()
#Plot 4
# Let's load the given data set's
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
#So we need only data from coal cumbustion related sources.
coal <- SCC[grepl("Coal",SCC$EI.Sector),]$SCC
total_coal <- subset(NEI, NEI$SCC %in% coal)
total_coal_aggregate = aggregate(Emissions ~ year, data = total_coal, sum, na.rm = TRUE)
#Let's capture the plot using the png() Graphics Device.
png("plot4.png", height = 300, width = 800)
barplot((total_coal_aggregate$Emissions/1000), names.arg = c("1999","2002","2005","2008"),
col = "steelblue",xlab = "Years",ylab = expression("Total "* PM[2.5]*" Emissions in Kilo-tons"),
main = expression("Emissions of "* PM[2.5]*" by each year for Coal Cumbustion related Sources"))
dev.off()
#Plot 5
# Let's load the given data set's
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
#So we need data from only Vehicles as sources.
vehicles <- SCC[grepl("Vehicles",SCC$EI.Sector),]$SCC
#Now subset the NEI data and then subset to get only Baltimore region data based on the sources we just got from the above code.
total_vehicles <- subset(NEI, NEI$SCC %in% vehicles)
total_vehicles_baltimore <- subset(total_vehicles, total_vehicles$fips == 24510)
total_vehicles_baltimore_year <- aggregate(Emissions ~ year, data = total_vehicles_baltimore, sum, na.rm=TRUE)
# Lets capture the plot using the png() Graphic device.
png("plot5.png" heigt = 400, width = 800)
barplot(total_vehicles_baltimore_year$Emissions, names.arg = c("1999","2002","2005","2008"),
col = "steelblue",xlab = "Years",ylab = expression("Total "* PM[2.5]*" Emissions tons"),
main = expression("Emissions of "* PM[2.5]*" by each year from motor vehicle Sources in Baltimore"))
dev.off()
#Plot 6
# Let's load the given data set's
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
vehicles <- SCC[grepl("Vehicles",SCC$EI.Sector),]$SCC
total_vehicles <- subset(NEI, NEI$SCC %in% vehicles)
total_vehicles_baltimore <- subset(total_vehicles, total_vehicles$fips == 24510)
total_vehicles_la <- subset(total_vehicles, total_vehicles$fips == "06037")
total_vehicles_baltimore_year <- aggregate(Emissions ~ year, data = total_vehicles_baltimore, sum, na.rm=TRUE)
total_vehicles_la_year <- aggregate(Emissions ~ year, data = total_vehicles_la, sum, na.rm=TRUE)
png("plot6.png", height = 800, width = 480)
par(mfrow = c(2,1))
barplot(total_vehicles_baltimore_year$Emissions, names.arg = c("1999","2002","2005","2008"),
col = "steelblue",xlab = "Years",ylab = expression("Total "* PM[2.5]*" Emissions tons"),
main = expression("Emissions of "* PM[2.5]*" by each year from motor vehicle in Baltimore"))
barplot(total_vehicles_la_year$Emissions, names.arg = c("1999","2002","2005","2008"),
col = "steelblue",xlab = "Years",ylab = expression("Total "* PM[2.5]*" Emissions tons"),
main = expression("Emissions of "* PM[2.5]*" by each year from motor vehicle Los Angeles"))
dev.off()
|
2b99563cb08f7a46600131181c3c17050aece354
|
123c73a55e217bc720516f8a0a672e303c347b85
|
/lib/TammData.r
|
aca00f7deff564c6b4310bd5e79e858d1470f5e7
|
[
"MIT"
] |
permissive
|
PSC-CoTC/PSC-FRAM-Admin
|
6ee00d6ac50695df105ff5fd1d7c075ffb5e4970
|
0e25a69cf8f9fb98fac52f6b0a0f4d841c43b8e4
|
refs/heads/master
| 2021-01-10T16:06:38.000374
| 2020-02-27T23:51:06
| 2020-02-27T23:51:06
| 51,110,110
| 1
| 2
|
MIT
| 2019-02-11T23:05:33
| 2016-02-04T22:24:05
|
HTML
|
UTF-8
|
R
| false
| false
| 5,092
|
r
|
TammData.r
|
################
#
# Common methods to deal with a TAMM Spreadsheet Model
#
# Nicholas Komick
# nicholas.komick@dfo-mpo.gc.ca
# January 12, 2017
# Using: http://google-styleguide.googlecode.com/svn/trunk/google-r-style.html
#
################
kDecimalRegEx <- "^[0-9]*[.]?[0-9]+([eE][-+]?[0-9]+)?$"
required.packages <- c("dplyr", "readxl")
InstallRequiredPackages(required.packages)
#' A helper function loading the total mortalities from the TAMM excel spreadsheet
#' based on a the worksheet name, column, and row of the cell value defined in the tamm.ref
#' data frame
#'
#' @param tamm.ref A data frame of cell references to load from the TAMM Excel document
#' @param tamm.filename: The file name of the TAMM Excel document
#'
#' NOTE: If a cell reference is NA for the worksheet, column, and row, a zero value is
#' automatically filled.
#'
#' @return A dataframe with the FRAM fisheries and associated TAMM mortalties
#'
#' Exceptions:
#' The method checks that values read for the Excel spreadsheet are numeric values.
#'
GetTammValues <- function (tamm.ref, tamm.filename) {
tamm.ref <- arrange(tamm.ref, tamm.worksheet.name)
tamm.ref$tamm.value <- as.character(NA)
prev.worksheet.name <- ""
worksheet.data <- NA
for(ref.idx in 1:nrow(tamm.ref)) {
if (is.na(tamm.ref$tamm.worksheet.name[ref.idx]) &
is.na(tamm.ref$tamm.cell.row[ref.idx]) &
is.na(tamm.ref$tamm.cell.col[ref.idx])) {
#No Cell Reference provided, so zero out the original FRAM value
tamm.ref$tamm.value[ref.idx] <- "0"
} else {
if (tamm.ref$tamm.worksheet.name[ref.idx] != prev.worksheet.name) {
worksheet.data <- read_excel(tamm.filename,
tamm.ref$tamm.worksheet.name[ref.idx],
col_names = FALSE)
cat(sprintf("Loading data from excel worksheet: %s\n",
tamm.ref$tamm.worksheet.name[ref.idx]))
prev.worksheet.name <- tamm.ref$tamm.worksheet.name[ref.idx]
}
tamm.ref$tamm.value[ref.idx] <- worksheet.data[tamm.ref$tamm.cell.row[ref.idx],
tamm.ref$tamm.cell.col[ref.idx]][[1]]
}
}
decimal.check <- grepl(kDecimalRegEx, tamm.ref$tamm.value)
if (any(!decimal.check)) {
cat("The following TAMM References do no return decimal values:\n\n")
cat(paste(names(tamm.ref), collapse=","))
cat("\n")
cat(paste(tamm.ref[!decimal.check,], collapse=",", sep="\n"))
cat("\n")
stop("A TAMM cell reference is not returing a decimal value, this must be fixed continue generating the report.")
}
tamm.ref$tamm.value <- as.numeric(tamm.ref$tamm.value)
return(tamm.ref)
}
#' Reads the specific Stock/Fishery mortality values from a TAMM model defined in the
#' provided file.
#'
#' @param tamm.filename The file name of TAMM excel spreadsheet
#' @param tamm.fishery.ref.filename The file name containing
#'
#' @return A dataframe with the FRAM fisheries/stock combination and associated TAMM mortalties
#'
#' @note The method checks that values read for the Excel spreadsheet are numeric values.
#'
GetTammFisheryMortality <- function (tamm.filename,
tamm.fishery.ref.filename) {
tamm.fishery.ref <-
ReadCsv(tamm.fishery.ref.filename, NA, unique.col.names=c("fram.stock.id", "fram.fishery.id")) %>%
mutate(tamm.worksheet.name = as.character(tamm.worksheet.name)) %>%
GetTammValues(tamm.filename) %>%
select(fram.stock.id, fram.fishery.id, tamm.value)
return (tamm.fishery.ref)
}
#' Reads the specific Escapement values from a TAMM model defined in the
#' tamm.esc.ref.filename
#'
#' @param tamm.filename The file name of TAMM excel spreadsheet
#' @param tamm.esc.ref.filename The file name of TAMM excel spreadsheet
#'
#' @result A dataframe with the FRAM stock ID and associated TAMM escapement
#'
#' @note The method checks that values read for the Excel spreadsheet are numeric values.
#'
GetTammEscapement <- function (tamm.filename,
tamm.esc.ref.filename) {
tamm.esc.ref <-
ReadCsv(tamm.esc.ref.filename, NA, unique.col.names=c("fram.stock.id")) %>%
mutate(tamm.worksheet.name = as.character(tamm.worksheet.name)) %>%
GetTammValues(tamm.filename) %>%
select(fram.stock.id, tamm.value)
return (tamm.esc.ref)
}
#' Reads the various values of the TAMM spreadsheet and packages data into a list
#'
#' @param tamm.filename The file name of TAMM excel spreadsheet
#' @param data.dir Directory that TAMM reference files are defined
#'
#' @return A list with a dataframe for fishery mortalities and escapement values
#'
GetTammData <- function (tamm.filename, tamm.fishery.ref.filename, tamm.esc.ref.filename) {
result.list <- list(tamm.fishery.mortalities = GetTammFisheryMortality(tamm.filename, tamm.fishery.ref.filename),
tamm.escapement = GetTammEscapement(tamm.filename, tamm.esc.ref.filename))
return (result.list)
}
|
019ef2907aef477fe83ea65e5d93cac374c2fded
|
41bc3ac7457527c3abd38891a7a5c4dc7c09b874
|
/R/rpivotTable.R
|
22789673fe303a2cc448aafda0a882905ae4139a
|
[
"MIT"
] |
permissive
|
jcizel/rpivotTable
|
282616e117b38e979afd04f0f6219f5a09cd74a8
|
d23afb10c8a355f0ace664c7c8ec465c5f25e669
|
refs/heads/master
| 2021-01-18T07:47:12.458895
| 2015-01-30T18:41:21
| 2015-01-30T18:41:21
| 30,082,383
| 4
| 1
| null | 2015-01-30T17:15:42
| 2015-01-30T17:15:42
| null |
UTF-8
|
R
| false
| false
| 1,321
|
r
|
rpivotTable.R
|
#' <Add Title>
#'
#' <Add Description>
#'
#' @import htmlwidgets
#'
#' @export
##'
##' @title
##' @param data
##' @param rows
##' @param cols
##' @param vals
##' @param aggregatorName
##' @param width
##' @param height
##' @return
##' @author Enzo Martoglio
rpivotTable <- function(
data = NULL,
rows = NULL,
cols = NULL,
vals = NULL,
aggregatorName = NULL,
width = NULL,
height = NULL) {
params <-
list(
rows = list(rows),
cols = list(cols),
vals = list(vals),
aggregatorName = list(aggregatorName)
)
x <- list(
data = data,
param = params
)
htmlwidgets::createWidget(
name = 'rpivotTable',
x,
width = width,
height = height,
package = 'rpivotTable'
)
}
#' Widget output function for use in Shiny
#'
#' @export
rpivotTableOutput <- function(outputId, width = '100%', height = '400px'){
shinyWidgetOutput(outputId, 'rpivotTable', width, height, package = 'rpivotTable')
}
#' Widget render function for use in Shiny
#'
#' @export
renderRpivotTable <- function(expr, env = parent.frame(), quoted = FALSE) {
if (!quoted) { expr <- substitute(expr) } # force quoted
shinyRenderWidget(expr, rpivotTableOutput, env, quoted = TRUE)
}
|
a665f809abf0d0d285d5995a7d5d66e21951c660
|
73e638dc549babb1034d2c103aa9b6fcdc5d7322
|
/examples/dl.R
|
94b90a31dfc8b2c5443a80d1d1ec4acb31f3b31e
|
[] |
no_license
|
yandorazhang/R2D2
|
b11e8c46902949a55ab4a1a0fe4f65687c915cc0
|
e734639929abb60e616c114ac7fe4e2beb5c7f9d
|
refs/heads/master
| 2023-01-14T08:39:13.803273
| 2020-11-18T13:43:30
| 2020-11-18T13:43:30
| 282,605,509
| 6
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 821
|
r
|
dl.R
|
rho <- 0.5
# Number of predictors
p <- 25
# Number of observations
n <- 60
# Construct beta
n_nonzero <- 5
beta <- rep(0, p)
beta[11:(10 + n_nonzero)] <- stats::rt(n_nonzero, df = 3) * sqrt(0.5/(3 * n_nonzero/2))
# Construct x
sigma <- 1
times <- 1:p
H <- abs(outer(times, times, "-"))
V <- sigma * rho^H
x <- mvtnorm::rmvnorm(n, rep(0, p), V)
x <- scale(x, center = TRUE, scale = FALSE)
# Construct y
y <- x %*% beta + stats::rnorm(n)
# Gibbs sampling
a.prior0 <- p/n
c.prior0 <- a.prior0/p
a.dl.prior0 <- 2 * c.prior0
mcmc.n <- 10000
fit.dl <- dl(x = x, y = y, hyper = list(a1 = 0.001, b1 = 0.001),
a.prior = a.dl.prior0, mcmc.n = mcmc.n, print = FALSE)
# Discard the early samples and get statistics of beta
burnIn <- 5000
beta.dl = fit.dl$beta[burnIn:mcmc.n, ]
mean.beta = apply(beta.dl, 2, mean)
|
d8c980169b105deaf1c3697f3bdd0216705250aa
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/ddalpha/examples/depth.space.halfspace.Rd.R
|
f3f0c084e147cc3b153239f3f20158dd99d2c76a
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 960
|
r
|
depth.space.halfspace.Rd.R
|
library(ddalpha)
### Name: depth.space.halfspace
### Title: Calculate Depth Space using Halfspace Depth
### Aliases: depth.space.halfspace
### Keywords: robust multivariate nonparametric
### ** Examples
# Generate a bivariate normal location-shift classification task
# containing 20 training objects
class1 <- mvrnorm(10, c(0,0),
matrix(c(1,1,1,4), nrow = 2, ncol = 2, byrow = TRUE))
class2 <- mvrnorm(10, c(1,1),
matrix(c(1,1,1,4), nrow = 2, ncol = 2, byrow = TRUE))
data <- rbind(class1, class2)
plot(data, col = c(rep(1,10), rep(2,10)))
# Get depth space using the random Tukey depth
dhA = depth.space.halfspace(data, c(10, 10))
(dhA)
# Get depth space using default exact method - "recursive"
dhE = depth.space.halfspace(data, c(10, 10), exact = TRUE)
(dhE)
data <- getdata("hemophilia")
cardinalities = c(sum(data$gr == "normal"), sum(data$gr == "carrier"))
depth.space.halfspace(data[,1:2], cardinalities)
|
12ed1d2dc38897a2de6afd3bb54ede737823a231
|
19cee5848f8a9bbc2d7d236258483719f49d5b97
|
/scripts/archive/cell_count_3var_analysis/poststratify_3var.R
|
8e1eee0f60fb97c57a0dd6ab5d7e08412e9f8799
|
[] |
no_license
|
AnthonyRentsch/DPsurveyweighting
|
dac945f36d99b591c4af8625607befed05dc81bc
|
0bf62262f88f6187f1d27f8c003e940785ded526
|
refs/heads/master
| 2020-05-17T20:34:45.777499
| 2019-05-17T01:56:20
| 2019-05-17T01:56:20
| 183,946,616
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,840
|
r
|
poststratify_3var.R
|
###########
# Perform computation of post-stratification weights
###########
#### Set up
rm(list = ls());
setwd("~/Desktop/Bhaven/Harvard/Classes/CS208/DPsurveyweighting/"); #move working directory to the root of our DPsurveyweighting repo
# install.packages("survey")
require(plyr); require(dplyr); require(readr); require(survey)
source("scripts/dp_utils.R");
acs_cell_counts <- read.csv("data/cell_counts_3var.csv"); # load ACS cell counts
state_weights <- read.csv("data/state_weights.csv"); #load the state weights for normalization
acs_cell_counts <- acs_cell_counts[,-1]; #get rid of index column
cces16 <- read_tsv("data/CCES16_Common_OUTPUT_Jul2017_VV.tab", col_names = TRUE); #load the CCES data
# process CCES data
weight_vars_cces <- c(#"gender", # sex
#"birthyr", # age
"educ", # education
"race", # race
#"employ", # employment status
#"marstat", # marital status
#"faminc", # family income
#"child18num", # number of children under 18
#"immstat", # citizenship status
"inputstate" # state
# metropolitan area
)
#create new columns in CCES data with education and race recoded
cces16 <- cces16 %>%
mutate(education = case_when(educ %in% c(1,8,9,NA) ~ 1,
educ == 2 ~ 2,
educ == 3 ~ 3,
educ == 4 ~ 4,
educ == 5 ~ 5,
educ == 6 ~ 6),
race = case_when(race == 1 ~ 1,
race == 2 ~ 2,
race == 3 ~ 3,
race == 4 ~ 4,
race %in% c(5,6,7,8,98,99,NA) ~ 5)
)
states <- data.frame(inputstate = seq(1:56),
state = tolower(c("AL","AK","","AZ","AR","CA","","CO","CT","DE","DC",
"FL","GA","","HI","ID","IL","IN","IA","KS","KY","LA",
"ME","MD","MA","MI","MN","MS","MO","MT","NE","NV","NH",
"NJ","NM","NY","NC","ND","OH","OK","OR","PA","","RI","SC",
"SD","TN","TX","UT","VT","VA","","WA","WV","WI","WY")))
cces16 <- left_join(cces16, states, by = "inputstate")
cces16_slim <- cces16 %>% select(state, education, race, CC16_364c, CC16_301a);
# process ACS data
# rescale weights by dividing by the max weight for any individual in any state
# acs_cell_counts$rescaled_n <- acs_cell_counts$n / max(state_weights$max_weight)
acs_cell_counts_slim <- acs_cell_counts %>% select(state, education, race, n) #***
# create svydesign object
# assume SRS for illustrative purposes
cces16.des <- svydesign(ids = ~ 1, data = cces16_slim)
cces16.des.ps <- postStratify(design = cces16.des,
strata = ~state+race+education,
population = acs_cell_counts_slim,
partial = TRUE)
# weighted to true ACS
voteshare.weighted <- as.data.frame(svytable(~CC16_364c+race, design=cces16.des.ps)) %>%
rename(preference=CC16_364c, share=Freq) %>%
mutate(preference = case_when(preference == 1 ~ "Trump",
preference == 2 ~ "Clinton",
preference == 3 ~ "Johnson",
preference == 4 ~ "Stein",
preference == 5 ~ "Other",
preference == 6 ~ "Won't vote",
preference == 7 ~ "Not sure",
preference %in% c(8,9, NA) ~ "Skipped/not asked"),
race = case_when(race == 1 ~ "White",
race == 2 ~ "Black",
race == 3 ~ "Hispanic",
race == 4 ~ "Asian",
race == 5 ~ "Other")) %>%
group_by(race) %>% mutate(share = share/sum(share))
voteshare.weighted %>% View()
# unweighted
cces16.unweighted.des <- svydesign(ids = ~ 1, data = cces16_slim)
unweighted.res <- as.data.frame(svytable(~CC16_364c+race, design=cces16.unweighted.des)) %>%
rename(preference=CC16_364c, share=Freq) %>%
mutate(preference = case_when(preference == 1 ~ "Trump",
preference == 2 ~ "Clinton",
preference == 3 ~ "Johnson",
preference == 4 ~ "Stein",
preference == 5 ~ "Other",
preference == 6 ~ "Won't vote",
preference == 7 ~ "Not sure",
preference %in% c(8,9, NA) ~ "Skipped/not asked"),
race = case_when(race == 1 ~ "White",
race == 2 ~ "Black",
race == 3 ~ "Hispanic",
race == 4 ~ "Asian",
race == 5 ~ "Other")) %>%
group_by(race) %>% mutate(share = share/sum(share))
unweighted.res %>% View()
##### weighted to noisy ACS
num_sims = 5; #number of releases to perform for each epsilon
epsilon <- 0.5
scale <- max(state_weights$max_weight)/epsilon; #calculate scale for adding Laplace noise
noisy_acs <- noisy_acs %>% select("state", "education")
combined <- voteshare.weighted; #dataframe to store results
colnames(combined) <- c("preference", "race", 'share_true');
for(i in 1:num_sims){
#add Laplace noise to ACS cell counts
noisy_acs <- acs_cell_counts_slim
noisy_acs$noisy_n <- noisy_acs$rescaled_n + rlap(mu=0, b=scale, size=nrow(noisy_acs));
#change DP count to 1 if it is less than 0
noisy_acs$noisy_n <- ifelse(noisy_acs$noisy_n < 0, 1, noisy_acs$noisy_n);
noisy_acs <- noisy_acs %>% select(state, education, race, noisy_n) #trim noisy_acs df to only the relevant columns
cces16.noisy.des <- svydesign(ids = ~ 1, data = cces16_slim); #create survey design object using CCES data we want weights for
cces16.noisy.des.ps <- postStratify(design = cces16.des, #get post-strafication weights
strata = ~state+race+education,
population = noisy_acs,
partial = TRUE);
#calculate the vote share for each candidate based on race
voteshare.noisy.weighted <- as.data.frame(svytable(~CC16_364c+race, design=cces16.noisy.des.ps)) %>%
rename(preference=CC16_364c, share=Freq) %>%
mutate(preference = case_when(preference == 1 ~ "Trump",
preference == 2 ~ "Clinton",
preference == 3 ~ "Johnson",
preference == 4 ~ "Stein",
preference == 5 ~ "Other",
preference == 6 ~ "Won't vote",
preference == 7 ~ "Not sure",
preference %in% c(8,9, NA) ~ "Skipped/not asked"),
race = case_when(race == 1 ~ "White",
race == 2 ~ "Black",
race == 3 ~ "Hispanic",
race == 4 ~ "Asian",
race == 5 ~ "Other")) %>%
group_by(race) %>% mutate(share = share/sum(share));
colnames(voteshare.noisy.weighted) <- c("preference", "race", paste("share_noisy",i, sep=""));
combined <- merge(combined, voteshare.noisy.weighted,
by=c("preference", "race"))
}
combined_trim <- combined[combined$preference == "Clinton" | combined$preference == "Trump", ];
races <- c("White", "Black", "Hispanic", "Asian", "Other");
rmse_results <- matrix(NA, nrow = length(races), ncol = 2)
for(r in 1:length(races) ){
# r = 1;
race <- races[r];
#get clinton data
clinton <- combined_trim[combined_trim$preference== "Clinton" & combined_trim$race == race, ];
clinton_true <- clinton[, 3];
clinton_noisy <- as.numeric(clinton[, 4:(4+num_sims-1)]);
#get trump data
trump <- combined_trim[combined_trim$preference== "Trump" & combined_trim$race == race, ];
trump_true <- trump[, 3];
trump_noisy <- as.numeric(trump[, 4:(4+num_sims-1)]);
#calculate the rmse for difference
true_dif <- clinton_true - trump_true; #calculate true difference
noisy_difs <- clinton_noisy - trump_noisy;
mse <- mean( (true_dif - noisy_difs)^2 );
rmse <- sqrt(mse);
rmse_results[r, ] <- c(race, rmse);
}
rmse_results
# combine to compare private versus non-private release
# combined <- merge(voteshare.weighted, voteshare.noisy.weighted,
# by=c("preference", "race"), suffixes=c("_true", "_noisy"))
|
ead624884e91114be2686553de3a381d9ed6a6ca
|
9210c5d9b5805b2dfc9393070793b38b7905f3d7
|
/bibliography.R
|
6fdaba621b353394367921744d0dc6e0c28a06b6
|
[] |
no_license
|
prodakt/learning
|
4d40c2666d61bb9e93cf52a5e6ea2fb3568b8077
|
69d4e931cea12da159a8b7fb7a377546cd81a2ba
|
refs/heads/main
| 2023-02-28T17:36:36.714211
| 2021-02-01T10:13:48
| 2021-02-01T10:13:48
| 320,534,892
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,274
|
r
|
bibliography.R
|
source("http://bioconductor.org/biocLite.R")
# bibtex --------------
biocLite("bibtex")
library(bibtex)
a <- read.bib("L:/Praca/Publikacje_Konferencje/TCDD.bib")
head(a)
# RISmed --------------
install.packages("RISmed")
library(RISmed)
search_topic <- 'copd'
search_query <- EUtilsSummary(search_topic, retmax=100, mindate=2012,maxdate=2012)
summary(search_query)
# see the ids of our returned query
QueryId(search_query)
# get actual data from PubMed
records<- EUtilsGet(search_query)
class(records)
# store it
pubmed_data <- data.frame('Title'=ArticleTitle(records),'Abstract'=AbstractText(records))
head(pubmed_data,1)
pubmed_data$Abstract <- as.character(pubmed_data$Abstract)
pubmed_data$Abstract <- gsub(",", " ", pubmed_data$Abstract, fixed = TRUE)
# see what we have
str(pubmed_data)
pyt <- "Jastrzebski JP[Author]"
kwer <- EUtilsSummary(pyt, retmax=100, mindate=2000, maxdate=2019)
records<- EUtilsGet(kwer)
id <- PMID(records)
records
# RefManageR ------------
biocLite("RefManageR")
library(RefManageR)
bib <- ReadBib("L:/Praca/Publikacje_Konferencje/TCDD.bib")
bib
lista_pub <- GetPubMedByID(id)
write
# pubmed.mineR ---------
install.packages("pubmed.mineR")
library(pubmed.mineR)
|
a7ec0d507674941ef359f69d73b05062261dd6c3
|
6f528d1394c665ccef7f5a1d39744dfd0b2568a7
|
/dssat_cornCV_calib.R
|
fe8e60552a10ac466f5aa4530ae60d80fff7fe81
|
[] |
no_license
|
Murilodsv/DSSATcorncalibration
|
6dfc39376a951606acb7cdf44461da82a05aa7da
|
2c1786a86be2c70da304a1b8fe8ad39fb8591064
|
refs/heads/master
| 2021-09-25T18:34:48.682874
| 2021-09-14T11:55:18
| 2021-09-14T11:55:18
| 176,268,443
| 1
| 0
| null | 2019-03-18T11:24:25
| 2019-03-18T11:24:25
| null |
UTF-8
|
R
| false
| false
| 8,682
|
r
|
dssat_cornCV_calib.R
|
library(sirad)
library(Dasst)
library(hydroGOF)
library(optimr)
library(dfoptim)
library(FME)
library(optimx)
wd = "D:/ISmalia_DSSAT"
Obs_data = read.csv(paste(wd,"/OBS_Calib-Data_.csv",sep=""))
#-------------------------SC10 cultivar------------------------------
OBStotalDW <- round(Obs_data$Value[Obs_data$Vari=="totaldryweight" & Obs_data$Nit=="190" & Obs_data$DAP %in% c("60","75") & Obs_data$Trt %in% c(1:20)],digits=0)
OBSGY <- round(Obs_data$Value[Obs_data$Vari=="GrainYield" & Obs_data$Nit=="190" & Obs_data$DAP=="110" & Obs_data$Trt %in% c(1:20)],digits=0)
par <- read.csv(paste(wd,"/Corn_calibration.csv",sep=""))
par_initia <- par$initial_values
par_min <- par$Calib_range_min
par_max <- par$Calib_range_max
myfunction <- function(X,Optfig){
#-----edit the CUL&ECO files----------
out_string <- paste0("!MAIZE CULTIVAR COEFFICIENTS: MZCER047 MODEL\n",
"!AHMED ATTIA (MAR-2019)\n",
"@VAR# VRNAME.......... EXPNO ECO# P1 P2 P5 G2 G3 PHINT")
CalibP <- c(formatC(X[1],format="f",digits=1),formatC(X[2],format="f",digits=3),formatC(X[3],format="f",digits=1),
formatC(X[4],format="f",digits=1), formatC(X[5],format="f",digits=2),
formatC(X[6],format="f",digits=2))
CalibP[5] <- paste(" ",CalibP[5],sep="")
cat(out_string,
"990001 LONG SEASON . IB0001",CalibP,file="C:/DSSAT47/Genotype/MZCER047.CUL",fill=T,append = F)
out_string2 <- paste0("*MAIZE ECOTYPE COEFFICIENTS: MZCER047 MODEL\n",
"@ECO# ECONAME......... TBASE TOPT ROPT P20 DJTI GDDE DSGFT RUE KCAN TSEN CDAY")
CalibP2 <- c(formatC(X[7],format="f",digits=1),formatC(X[8],format="f",digits=1),formatC(X[9],format="f",digits=1),
formatC(X[10],format="f",digits=1),formatC(X[11],format="f",digits=1),
formatC(X[12],format="f",digits=1),formatC(X[13],format="f",digits=1),formatC(X[14],format="f",digits=1),
formatC(X[15],format="f",digits=2))
CalibP2[2] <- paste(" ",CalibP2[2],sep="")
CalibP2[3] <- paste("",CalibP2[3],sep="")
CalibP2[4] <- paste(" ",CalibP2[4],sep="")
CalibP2[5] <- paste(" ",CalibP2[5],sep="")
CalibP2[6] <- paste(" ",CalibP2[6],sep="")
CalibP2[7] <- paste(" ",CalibP2[7],sep="")
CalibP2[8] <- paste(" ",CalibP2[8],sep="")
CalibP2[9] <- paste(" ",CalibP2[9],sep="")
cat(out_string2,
"IB0001 GENERIC MIDWEST1 ",CalibP2,file="C:/DSSAT47/Genotype/MZCER047.ECO",fill=T,append = F)
setwd(paste("C:/DSSAT47/Maize",sep = ""))
#--- write paramters used on the screen
message("")
message("Running DSSAT-MaizeCERES...")
#--- Call DSSAT047.exe and run X files list within DSSBatch.v47
system("C:/DSSAT47/DSCSM047.EXE MZCER047 B DSSBatch.v47",show.output.on.console = F)
plantgro <- read.dssat("C:/DSSAT47/Maize/PlantGro.OUT")
SIMtotalDW60 <- 0
SIMtotalDW75 <- 0
SIMtotalDW <- 0
SIMGY <- 0
for(i in 1:length(plantgro)){
data=as.data.frame(plantgro[[i]])
SIMtotalDW60[i] <- data$CWAD[data$DAP==60]
SIMtotalDW75[i] <- data$CWAD[data$DAP==75]
SIMGY[i] <- tail(data$GWAD,n=1)
}
simtotalDW=c(SIMtotalDW60,SIMtotalDW75)
simGY=c(SIMGY)
totalDW_rmse <- rmse(simtotalDW,OBStotalDW)
GY_rmse <- rmse(simGY,OBSGY)
y <- totalDW_rmse/100+
GY_rmse/50
if(Optfig==1){
plot(OBSGY/1000,simGY/1000,xlim=c(0,6),ylim=c(0,6))
SimregGY <- simGY/1000
ObsregGY <- OBSGY/1000
reg1<- lm(SimregGY~ObsregGY)
abline(reg1,pch=4,col=2,lwd=2, lty=2)
abline(0:1)
modeleval_GY <- modeval(simGY,OBSGY)
text(1,5,label=bquote("R"^2~":" ~ .(round(modeleval_GY$R2[[1]],digits=2))),cex=0.7)
text(5,2,label=noquote(paste0("RMSE: ",round(modeleval_GY$RMSE[[1]],digits=2))),cex=0.7)
}
print(c(X,GY_rmse,y))
return(y)
}
#------------------
par(mfrow=c(4,2),mar=c(4,4,3,2)+0.1,mgp=c(2.5,0.7,0))
myfunction(par_initia,Optfig=1)
resSC10=hjkb(par=par_initia,myfunction,Optfig=0,
lower=par_min,upper=par_max,control=list(maxfeval=100000))
res=modFit(f=myfunction,p=par_initia,Optfig=1,
lower=par_min,upper=par_max,method="Pseudo", control=list(numiter=50000))
resoptimr=optimx::optimx(par=par_initia,myfunction,Optfig=0,itnmax=100000,
lower=par_min,upper=par_max,method=c("Nelder-Mead","hjkb","L-BFGS-B"),
control=list(maxit=100000,all.methods=T,follow.on=T))
plot(OBStotalDW,simtotalDW,xlim=c(0,8000),ylim=c(0,8000))
reg1<- lm(simtotalDW~OBStotalDW)
abline(reg1,pch=4,col=2,lwd=2, lty=2)
abline(0:1)
modeleval_GY <- modeval(simtotalDW,OBStotalDW)
text(7000,50,label=bquote("R"^2~":" ~ .(round(modeleval_GY$R2[[1]],digits=2))),cex=0.7)
text(50,7000,label=noquote(paste0("RMSE: ",round(modeleval_GY$RMSE[[1]],digits=2))),cex=0.7)
#-------------------------TC310 cultivar------------------------------
OBStotalDW <- round(Obs_data$Value[Obs_data$Vari=="totaldryweight" & Obs_data$Nit=="190" & Obs_data$DAP %in% c("60","75") & Obs_data$Trt %in% c(21:40)],digits=0)
OBSGY <- round(Obs_data$Value[Obs_data$Vari=="GrainYield" & Obs_data$Nit=="190" & Obs_data$DAP=="110" & Obs_data$Trt %in% c(21:40)],digits=0)
par <- read.csv(paste(wd,"/Corn_calibration.csv",sep=""))
par_initia <- par$initial_values
par_min <- par$Calib_range_min
par_max <- par$Calib_range_max
myfunction <- function(X,Optfig){
#-----edit the CUL&ECO files----------
out_string <- paste0("!MAIZE CULTIVAR COEFFICIENTS: MZCER047 MODEL\n",
"!AHMED ATTIA (MAR-2019)\n",
"@VAR# VRNAME.......... EXPNO ECO# P1 P2 P5 G2 G3 PHINT")
CalibP <- c(formatC(X[1],format="f",digits=1),formatC(X[2],format="f",digits=3),formatC(X[3],format="f",digits=1),
formatC(X[4],format="f",digits=1), formatC(X[5],format="f",digits=2),
formatC(X[6],format="f",digits=2))
CalibP[5] <- paste(" ",CalibP[5],sep="")
cat(out_string,
"990001 LONG SEASON . IB0001",CalibP,file="C:/DSSAT47/Genotype/MZCER047.CUL",fill=T,append = F)
out_string2 <- paste0("*MAIZE ECOTYPE COEFFICIENTS: MZCER047 MODEL\n",
"@ECO# ECONAME......... TBASE TOPT ROPT P20 DJTI GDDE DSGFT RUE KCAN TSEN CDAY")
CalibP2 <- c(formatC(X[7],format="f",digits=1),formatC(X[8],format="f",digits=1),formatC(X[9],format="f",digits=1),
formatC(X[10],format="f",digits=1),formatC(X[11],format="f",digits=1),
formatC(X[12],format="f",digits=1),formatC(X[13],format="f",digits=1),formatC(X[14],format="f",digits=1),
formatC(X[15],format="f",digits=2))
CalibP2[2] <- paste(" ",CalibP2[2],sep="")
CalibP2[3] <- paste("",CalibP2[3],sep="")
CalibP2[4] <- paste(" ",CalibP2[4],sep="")
CalibP2[5] <- paste(" ",CalibP2[5],sep="")
CalibP2[6] <- paste(" ",CalibP2[6],sep="")
CalibP2[7] <- paste(" ",CalibP2[7],sep="")
CalibP2[8] <- paste(" ",CalibP2[8],sep="")
CalibP2[9] <- paste(" ",CalibP2[9],sep="")
cat(out_string2,
"IB0001 GENERIC MIDWEST1 ",CalibP2,file="C:/DSSAT47/Genotype/MZCER047.ECO",fill=T,append = F)
setwd(paste("C:/DSSAT47/Maize",sep = ""))
#--- write paramters used on the screen
message("")
message("Running DSSAT-MaizeCERES...")
#--- Call DSSAT047.exe and run X files list within DSSBatch.v47
system("C:/DSSAT47/DSCSM047.EXE MZCER047 B DSSBatch.v47",show.output.on.console = F)
plantgro <- read.dssat("C:/DSSAT47/Maize/PlantGro.OUT")
SIMtotalDW60 <- 0
SIMtotalDW75 <- 0
SIMtotalDW <- 0
SIMGY <- 0
for(i in 1:length(plantgro)){
data=as.data.frame(plantgro[[i]])
SIMtotalDW60[i] <- data$CWAD[data$DAP==60]
SIMtotalDW75[i] <- data$CWAD[data$DAP==75]
SIMGY[i] <- tail(data$GWAD,n=1)
}
simtotalDW=c(SIMtotalDW60,SIMtotalDW75)
simGY=c(SIMGY)
totalDW_rmse <- rmse(simtotalDW,OBStotalDW)
GY_rmse <- rmse(simGY,OBSGY)
y <- totalDW_rmse/100+
GY_rmse/50
if(Optfig==1){
plot(OBSGY/1000,simGY/1000,xlim=c(0,6),ylim=c(0,6))
SimregGY <- simGY/1000
ObsregGY <- OBSGY/1000
reg1<- lm(SimregGY~ObsregGY)
abline(reg1,pch=4,col=2,lwd=2, lty=2)
abline(0:1)
modeleval_GY <- modeval(simGY,OBSGY)
text(1,5,label=bquote("R"^2~":" ~ .(round(modeleval_GY$R2[[1]],digits=2))),cex=0.7)
text(5,2,label=noquote(paste0("RMSE: ",round(modeleval_GY$RMSE[[1]],digits=2))),cex=0.7)
}
print(c(X,GY_rmse,y))
return(y)
}
#--------------------------------------
myfunction(par_initia,Optfig=1)
resTC310=hjkb(par=par_initia,myfunction,Optfig=0,
lower=par_min,upper=par_max,control=list(maxfeval=100000))
|
b660ee24a3d899a06694c231aa0970fa42c558cc
|
a0dce4c3d980e8d9678509b0278a466ca9cb5677
|
/R/as.R
|
34d25a941fa9015136128c198ac7b78ec20c4023
|
[
"MIT"
] |
permissive
|
TobCap/importas
|
925a8799a14c0d429ceff69c5a20297ceb7972fa
|
1a709aa773818a6044d712f4819d833b5fb66732
|
refs/heads/master
| 2020-11-27T16:04:52.460940
| 2019-12-08T14:59:33
| 2019-12-08T14:59:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 317
|
r
|
as.R
|
#' @rdname importas
#' @param pkg A name of package
#' @param nm An abbreviated name of the package
#'
#' @examples
#' graphics %as% gr
#' gr$plot
#'
#' @aliases as
#' @export
`%as%` <- function(pkg, nm) {
assign(
deparse(substitute(nm)),
getNamespace(deparse(substitute(pkg))),
parent.frame()
)
}
|
e979044c7fc769b5abe868e2ceee046715912c2d
|
a888137b4504ef8ce8ea170d5d13ab973853e81f
|
/numericalAlgorithms/SecantMethod.R
|
461b56ba7e481bcf35c2c4bc036fbaab3eedc927
|
[] |
no_license
|
indrag49/Comp-phy
|
e4e24636945bca311c20617a7cbad62b421e53f7
|
d1265044073682d4066395ad9e3e89350da6c599
|
refs/heads/master
| 2021-09-16T20:21:33.526597
| 2018-06-24T16:24:26
| 2018-06-24T16:24:26
| 111,316,266
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 429
|
r
|
SecantMethod.R
|
## Author: Indranil Ghosh, Jadavpur University, Physics Department
## Secant method
f <- function(x) x*exp(x)-cos(x)
Secant <- function(x0, x1, Max, epsilon) {
k <- 1
while(k<=Max) {
f0 <- f(x0)
f1 <- f(x1)
x2 <- x1-f1*(x1-x0)/(f1-f0)
if(abs(x2 - x1)<epsilon) return (x2)
x0 <- x1
x1 <- x2
k <- k + 1
}
return ("error: maximum limit reached")
}
Secant(0.5, 1, 20, 0.0001)
|
e2904cc3e5deb41777a582189062a6c07d821bdb
|
983f36b7bd9ea298b0553c8f9cf976d993d93e88
|
/addLocationCsv.r
|
c894842c12561a6caf0c9113c33bd3bde1069e08
|
[] |
no_license
|
zupino/estate-analysis
|
71a1f5515e6e3f2d89376e3c928c723adf53871f
|
aad3d7b1318aa4e035b5d2d97cafce31dae2122c
|
refs/heads/master
| 2021-01-21T16:53:12.496709
| 2017-05-20T20:06:37
| 2017-05-20T20:06:37
| 91,913,533
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,452
|
r
|
addLocationCsv.r
|
#!/usr/bin/env Rscript
args = commandArgs(trailingOnly=TRUE)
# Tool function to remove spaces on strings
trim <- function (x) gsub("^\\s+|\\s+$", "", x)
# Define function to add clean "location" column
# The format will be XXX,Stuttgart. It is used
# to group and calculate average price per area
beautyLocation <- function(addr) {
addr <- as.character(addr)
a <- trim( tail( unlist(strsplit(addr, ",")),2))[1]
b <- trim( tail( unlist(strsplit(addr, ",")),2))[2]
return( paste(a,b,sep=",") )
}
# Read the input file
tryCatch({
d <- read.csv(file=args[1], head = TRUE)
},
warning = function(w) {
print()
},
error = function( err ) {
print("An error occurred while reading the input file, terminating.")
message( err )
print(" \n ")
quit()
})
if ('location' %in% names(d))
quit()
if (nrow(d) < 4)
quit()
# Add the 'location' field by cleaning the address
d$location <- lapply(d$address, beautyLocation )
d$location <- as.character( d$location )
# Create the mean price per each location
cc <- aggregate(d$price/d$meters, list(d$location), mean)
# Remove row with value Inf
cc <- cc[apply(cc[c(2)],1,function(z) !any(z==Inf)),]
# Change column names
colnames(cc) <- c("location", "avgPrice")
# Write the data to file
ifile <- as.character( sub(".*/", "", args[1]) )
ofile <- as.character( paste( strsplit(ifile, "[.]")[[1]], ".csv", sep="_priceByLocation")[[1]] )
write.csv(cc, file = ofile)
|
003d51dce62217f641625465506141e27cb81fc5
|
146b3f63e30de7b6cccd35b27d9a830a7d3ca364
|
/man/drawNormalValues.Rd
|
17e0608dffbf12eed446c3d23f4ac0d32423b0a5
|
[] |
no_license
|
codeForReviewer/kMajorityRule
|
cc749239f3de6c883cb17847a88eba8a8794e23f
|
307b53aa961a6f981c1d7776fb1ac3729ea29c03
|
refs/heads/master
| 2021-01-21T12:50:11.424292
| 2016-04-15T20:46:30
| 2016-04-15T20:46:30
| 30,224,968
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 704
|
rd
|
drawNormalValues.Rd
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/drawNormalValues.R
\name{drawNormalValues}
\alias{drawNormalValues}
\title{drawNormalValues
Draws ui or ei values from N(groupMean, groupStandardDeviation).}
\usage{
drawNormalValues(sizeOfDraw, groupMean, groupStandardDeviation)
}
\arguments{
\item{sizeOfDraw}{The number of elements that need to be drawn.}
\item{groupMean}{The mean of the normal distribution used to draw ui or ei for a group.}
\item{groupStandardDeviation}{The standard deviation of the normal distributuion used to draw ui or ei for a group.}
}
\description{
drawNormalValues
Draws ui or ei values from N(groupMean, groupStandardDeviation).
}
|
5fbfce7dbdd846e2c095b6fcdb05aced489b52e4
|
9da7ff9073e6775a34e9432ae33f6dac36891a42
|
/man/detect.change.Rd
|
03078da7c0e5d24b94bca5719d1fbc42d1e87758
|
[] |
no_license
|
eik4862/Subway
|
9f2cb24e611769226275aa209589042c257c42c0
|
64c672132a8619afe30338942bca560d5646a6c7
|
refs/heads/master
| 2023-08-15T05:16:10.235607
| 2020-05-25T17:35:02
| 2020-05-25T17:35:02
| 266,563,864
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,511
|
rd
|
detect.change.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/analysis.R
\name{detect.change}
\alias{detect.change}
\title{Change point detection}
\usage{
detect.change(data, type = c("mean", "var"), ..., plot = TRUE)
}
\arguments{
\item{data}{Time series data whose change point is to be detected.
The data should be either \code{ts} object or \code{numeric} vector.}
\item{type}{Type of change to be detected.
Should be one of \code{"mean"} and \code{"var"}. (default = "mean")}
\item{...}{Extra arguments for change point detection.
Refer to \code{\link{cpt.mean}} and \code{\link{cpt.var}}.}
\item{plot}{If \code{TRUE}, the chagne points will be plotted.
Otherwise, not. (default = TRUE)}
}
\value{
A list containing the following elements:\tabular{ll}{
\code{change.point} \tab Change point detection result(\code{cpt} object).\cr
\tab\cr
\code{plot} \tab \code{ggplot} object of the plot. (Returned only if \code{plot} is TRUE.)\cr
}
}
\description{
Take time series data and perform change point detection.
It uses \code{\link{cpt.mean}} or \code{\link{cpt.var}} from \code{changepoint} package for change point detection.
If \code{plot} is TRUE, it also plots the point where the changes take place.
}
\examples{
# 228 is the station code for SNU
data <- get.subway(228)
# Change point detection in mean
detect.change(data$total, type = "mean")
# Change point detection in variance
detect.change(data$total, type = "var")
}
\author{
Sanghyun Park, Daun Jeong, and Sehun Kim
}
|
66c97569fcf839bad3e481a76571eaae0b8417f2
|
ad4a497bec38cb4ad0e9f865d87005b9bece12a5
|
/Dissertation Code 2 Imputation.R
|
187ea8dd745f663253d9698557069f8ad352eef9
|
[] |
no_license
|
bartekbursa/Dissertation
|
f1abfa1fd4dc316ab87702957bc74425b5168a04
|
61a80d5ef401ec23aaf1a49c92a94acf02bd58eb
|
refs/heads/master
| 2020-03-18T19:37:45.430964
| 2015-09-27T18:41:21
| 2015-09-27T18:41:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 29,450
|
r
|
Dissertation Code 2 Imputation.R
|
#######################################################################
## R Code for Dissertation: Imputation and Setup for the Logit Model ##
#######################################################################
############# Things to do ##############
# Fix the multicollinear imputation regressions DONE
# Keep the predicted output required to reduce the size of the workspace
# Remove aliased variables to improve predictions - removal won't diminish predictive power
# List of individuals who made no journeys
# individuals.no.journeys <- outersect(unique(individual.household$IndividualID), unique(all.data$IndividualID))
library(clusterSEs)
library(data.table)
library(ggplot2)
library(car)
# i can move all of this stuff over to the base merge file for cleanup purposes
# as it is just renaming variables and turning them into factor class types
# Creating the real choice set
all.data.long[,FeasibleAlternative:=1]
all.data.long[IsChosenAlternative==0 & Alternative=="Bicycle" & BicycleOwner==0, FeasibleAlternative:=0]
all.data.long[IsChosenAlternative==0 & Alternative=="Car" & (DrivingLicense ==0 | NumCarVan==0 | is.na(VehicleID)==TRUE), FeasibleAlternative:=0]
################
## Imputation ##
################
rmse <- function (model) {
return(sqrt(sum(resid(model)^2)/length(resid(model))))
}
clusterSENB <- function (mod, dat, cluster) {
form <- mod$formula
variables <- all.vars(form)
clust.name <- all.vars(cluster)
used.idx <- which(rownames(dat) %in% rownames(mod$model))
dat <- dat[used.idx, ]
clust <- as.vector(unlist(dat[[clust.name]]))
G <- length(unique(clust))
ind.variables <- names(coefficients(mod))
cl <- function(dat, fm, cluster) {
M <- length(unique(cluster))
N <- length(cluster)
K <- fm$rank
dfc <- (M/(M - 1))
uj <- apply(estfun(fm), 2, function(x) tapply(x, cluster,
sum))
vcovCL <- dfc * sandwich(fm, meat. = crossprod(uj)/N)
coeftest(fm, vcovCL)
}
se.clust <- cl(dat, mod, clust)[ind.variables, 2]
beta.mod <- coefficients(mod)[ind.variables]
w <- beta.mod/se.clust
}
## Some PSUIDFactors do not exist for the long data set for each regression
##############
## Bus Time ##
##############
#
# length(all.data$TripPurpose)
# length(na.omit(all.data$BusRel))
# length(na.omit(all.data$BusStopWalkDist))
# length(na.omit(all.data$BusDifficulties))
# length(na.omit(all.data$WalkDifficulties))
bus.time.reg <- lm(formula=LogTravelTime~TripPurpose+Month+DayType+DayOfWeek+LogDistance+LogDistance2+BusRel+
BusStopWalkDist+BusDifficulties+WalkDifficulties+Sex+JourneyTime+BusFreq+PSUIDFactor,
family=gaussian, data=all.data[Mode=="Bus"])
imputereg1 <- npregbw(formula=LogTravelTime~TripPurpose+Month+DayType+DayOfWeek+LogDistance+LogDistance2+BusRel+
BusStopWalkDist+BusDifficulties+WalkDifficulties+Sex+JourneyTime+BusFreq+PSUIDFactor,
data=all.data[Mode=="Bus"], ckert="epanechnikov")
rmse.bus.time <- rmse(bus.time.reg)
# all.data[Mode=="Bus",PredictedTime:=Time]
# all.data[Mode=="Bus" & is.na(PredictedBusTime)==FALSE,PredictedTime:=exp(PredictedBusTime)*exp(0.5*rmse.bus.time^2)]
# all.data[Mode=="Bus",MPH:=6*Distance/PredictedBusTime]
# all.data[Mode=="Bus" & MPH>60, PredictedBusTime:=6*Distance/60]
# all.data[,MPH:=NULL]
plot(density(exp(fitted.values(bus.time.reg))*exp(0.5*rmse.bus.time^2)))
lines(density(all.data[Mode=="Bus",Time],to=200))
# cor(fitted.values(bus.time.reg), model.frame(bus.time.reg)$LogTravelTime)
# fitted.values.bus.time <- data.frame(exp(fitted.values(bus.time.reg))*exp(0.5*rmse.bus.time^2))
# View(fitted.values.bus.time)
#
# ctf <- data.frame(exp(fitted.values(bus.time.reg))*exp(0.5*rmse.bus.time^2))
# btf <- data.frame(exp(fitted.values(car.time.reg))*exp(0.5*car.time.rmse^2))
# colnames(ctf) <- "Travel Time (Minutes)"
# colnames(btf) <- "Travel Time (Minutes)"
#
# library(ggplot2)
#
# ggplot() + aes(x=`Travel Time (Minutes)`) + labs(title="Predicted Travel Time Distribution") + ylab("Density") + geom_density(data=ctf, linetype=4) + geom_density(data=btf)
#
# ggplot(data=all.data) + aes(x=Time) + geom_density(data=all.data[Mode=="Bus"]) + geom_density(data=all.data[Mode=="Car"])
# Some of the factors are not present in the top
all.data.long[PSUIDFactor %in% model.frame(bus.time.reg)$PSUIDFactor & Alternative=="Bus" & IsChosenAlternative==0,
LogTravelTime:=predict.glm(object=bus.time.reg, newdata=all.data.long[PSUIDFactor %in% model.frame(bus.time.reg)$PSUIDFactor & Alternative=="Bus" & IsChosenAlternative==0,])]
# bus.time.reg2 <- lm(formula=LogTravelTime~TripPurpose+Month+DayType+DayOfWeek+LogDistance+LogDistance2+BusRel+
# BusStopWalkDist+BusDifficulties+WalkDifficulties+Sex+JourneyTime+BusFreq+Region,
# family=gaussian, data=all.data[Mode=="Bus"])
#
#
# all.data.long[!(PSUIDFactor %in% model.frame(bus.time.reg)$PSUIDFactor) & Alternative=="Bus" & IsChosenAlternative==0,
# LogTravelTime:=predict.glm(object=bus.time.reg2, newdata=all.data.long[!(PSUIDFactor %in% model.frame(bus.time.reg)$PSUIDFactor) & Alternative=="Bus" & IsChosenAlternative==0,])]
#
# rm(bus.time.reg2)
# all.data.long[PSUIDFactor %in% model.frame(bus.time.reg)$PSUIDFactor & Alternative=="Bus" & IsChosenAlternative==0,]
# Check for multicollinearity between variables
# library(car)
# vif(bus.time.reg)
# na.omit(all.data[Mode=="Bus", .(LogTravelTime,TripPurpose,Month,DayType,DayOfWeek,LogDistance,JtPO,JtChem,JtGP,JtGroc,BusStopWalkDist,Sex,JourneyTime,BusFreq,BusRel,BusDifficulties,PSUIDFactor,WalkDifficulties, IndividualID)])
# bus.time.cse <- cluster.bs.glm(mod=bus.time.reg, dat=all.data[Mode=="Bus"], cluster=~IndividualID)
###############
## Bus Price ##
###############
bus.price.reg <- glm(formula=Price~Month+DayOfWeek+BusFreq+JourneyTime+LogDistance+LogDistance2+AgeCategory+WorkStatus+PSUIDFactor,
data=all.data[Mode=="Bus" & Concession==0 & TicketType %in% 0:4],family=gaussian)
# Adding predicted values to the long form matrix
all.data.long[PSUIDFactor %in% model.frame(bus.price.reg)$PSUIDFactor & Alternative=="Bus" & IsChosenAlternative==0,
Price:=predict.glm(object=bus.price.reg,
newdata=all.data.long[PSUIDFactor %in% model.frame(bus.price.reg)$PSUIDFactor & Alternative=="Bus" & IsChosenAlternative==0,])]
# Some issue with PSUID factor not appearing in the regression as it has been NAed out, could rectify this by fixing missing values
# unique(all.data.long[PSUIDFactor %in% model.frame(bus.price.reg)$PSUIDFactor, PSUID])
# unique(all.data[ ,PSUID])
# bus.price.cse <- cluster.bs.glm(mod=bus.time.reg,dat=all.data[Mode=="Bus" & TicketType %in% c("Annual Bus", "Weekly Bus", "Monthly Bus", "Annual Bus & Rail", "Weekly Bus & Rail", "Monthly Bus & Rail") & Concession==0], cluster=~IndividualID, report=T)
# summary(na.omit(all.data[Mode=="Bus" & TicketType %in% 0:4 & Concession==0,)]))
# all.data[Mode=="Bus" & TicketType %in% 0:4 & is.na(StageCost)==TRUE]
# View(all.data[Mode=="Bus" & TicketType %in% 0:4, .(StageCost,Month,DayType,TravDay,BusFreq,BusRel,JourneyTime,LogDistance,LogDistance2,AgeCategory,TicketType,WorkStatus,Concession)])
rmse.bus.price <- rmse(bus.price.reg)
# all.data.bus <- cbind(all.data[Mode=="Bus" & TicketType %in% 0:4 & Concession==0,.(IndividualID, TripID, StageID)][as.numeric(names(bus.price.reg$fitted.values))], fitted.values(bus.price.reg))
# names(all.data.bus)[4] <- "PredictedBusPrice"
# all.data <- merge(all.data, all.data.bus, by=c("IndividualID", "StageID", "TripID"), all.x=TRUE)
#all.data[is.na(PredictedBusPrice)==FALSE, Price:=PredictedBusPrice ]
# all.data[PredictedBusPrice<0,PredictedBusPrice:=0]
# all.data[Mode=="Bus" & Concession=="1", PredictedBusPrice:=0]
# all.data[Mode=="Bus" & as.numeric(TicketType)>=4, PredictedBusPrice:=0]
plot(density(all.data[Mode=="Bus" & Concession==0 & as.numeric(TicketType)<4 & Price>0 ,na.omit(Price)]))
# lines(density(exp(bus.time.reg.fitted), kernel="epanechnikov"))
# bus.price.cse <- cluster.bs.glm(mod=bus.price.reg, dat=all.data[Mode=="Bus"], cluster=~IndividualID, boot.reps=1)
# If price is negative set to zero not many cases less than 1%
# all.data.long[Alternative=="Bus" & IsChosenAlternative==0 & Price<=0,Price:=0]
# bus.price.reg2 <- glm(formula=Price~Month+DayOfWeek+BusFreq+JourneyTime+LogDistance+LogDistance2+AgeCategory+WorkStatus+Region,
# data=all.data[Mode=="Bus" & Concession==0 & TicketType %in% 0:4],family=gaussian)
#
# all.data.long[!(PSUIDFactor %in% model.frame(bus.price.reg)$PSUIDFactor) & Alternative=="Bus" & IsChosenAlternative==0,
# Price:=predict.glm(object=bus.price.reg2,
# newdata=all.data.long[!(PSUIDFactor %in% model.frame(bus.price.reg)$PSUIDFactor) & Alternative=="Bus" & IsChosenAlternative==0,])]
#
# rm(bus.price.reg2)
###############
## Walk Time ##
###############
walk.time.reg <- glm(formula=LogTravelTime~TripPurpose+LogDistance+AgeCategory*LogDistance
+Sex*LogDistance+AgeCategory*LogDistance2+Sex*LogDistance2+WalkDifficulties+PSUIDFactor, family=gaussian, data=all.data[Mode=="Walk"])
walk.time.rmse <-rmse(walk.time.reg)
reg.data.walk <- cbind(all.data[Mode=="Walk",.(IndividualID, TripID, StageID)][as.numeric(names(walk.time.reg$fitted.values))], fitted.values(walk.time.reg))
names(reg.data.walk)[4] <- "PredictedWalkTime"
# all.data <- merge(all.data, reg.data.walk, by=c("IndividualID", "StageID", "TripID"), all.x=TRUE)
# all.data[is.na(PredictedWalkTime)==FALSE,Time:=exp(PredictedWalkTime)*exp(0.5*walk.time.rmse^2)]
#
# all.data[Mode=="Walk", MPH:=6*Distance/Time]
# all.data[MPH>25, Time:=6*Distance/25]
# all.data[,MPH:=NULL]
plot(density(exp(fitted.values(walk.time.reg))*exp(0.5*walk.time.rmse^2), kernel="epanechnikov", bw=4))
lines(density(all.data[Mode=="Walk",Time],bw=4))
# walk.time.cse <- cluster.bs.glm(mod=walk.time.reg, dat=all.data[Mode=="Walk"], cluster=~IndividualID, report=T, boot.reps=1)
## use the row names provided
## tomorrow, use lars code to check predicted walk times
all.data.long[PSUIDFactor %in% model.frame(walk.time.reg)$PSUIDFactor & Alternative=="Walk" & IsChosenAlternative==0,
LogTravelTime:=predict.glm(object=walk.time.reg,
newdata=all.data.long[PSUIDFactor %in% model.frame(walk.time.reg)$PSUIDFactor & Alternative=="Walk" & IsChosenAlternative==0,])]
# walk.time.reg2 <- glm(formula=LogTravelTime~TripPurpose+LogDistance+AgeCategory*LogDistance
# +Sex*LogDistance+AgeCategory*LogDistance2+Sex*LogDistance2+WalkDifficulties+Region, family=gaussian, data=all.data[Mode=="Walk"])
#
# all.data.long[!(PSUIDFactor %in% model.frame(walk.time.reg)$PSUIDFactor) & Alternative=="Walk" & IsChosenAlternative==0,
# LogTravelTime:=predict.glm(object=walk.time.reg2,
# newdata=all.data.long[!(PSUIDFactor %in% model.frame(walk.time.reg)$PSUIDFactor) & Alternative=="Walk" & IsChosenAlternative==0,])]
#
# rm(walk.time.reg2)
################
## Cycle Time ##
################
bicycle.time.reg <- glm(formula=LogTravelTime~TripPurpose+Month+DayOfWeek+JourneyTime+PavementCond+CycleLanes+AgeCategory+Sex
+PSUIDFactor+AgeCategory*LogDistance+Sex*LogDistance+AgeCategory*LogDistance2+Sex*LogDistance2
, family=gaussian, data=all.data[Mode=="Bicycle",])
rmse.bicycle.time <- rmse(bicycle.time.reg)
# reg.data.walk <- cbind(all.data[Mode=="Bicycle",.(IndividualID, StageID, TripID)][as.numeric(names(bicycle.time.reg$fitted.values))], fitted(bicycle.time.reg))
# names(reg.data.walk)[4] <- "PredictedBicycleTime"
# all.data <- merge(all.data, reg.data.walk, by=c("IndividualID", "StageID", "TripID"), all.x=TRUE)
# all.data[,PredictedBicycleTime:=exp(PredictedBicycleTime)*exp(0.5*rmse.bicycle.time^2)]
# all.data[(6*Distance)/Time>35,PredictedBicycleTimeL:=(6*Distance/35)]
plot(density(all.data[Mode=="Bicycle",Time], to=200))
lines(density(exp(fitted.values(bicycle.time.reg))*exp(0.5*rmse.bicycle.time^2)))
all.data.long[PSUIDFactor %in% model.frame(bicycle.time.reg)$PSUIDFactor & Alternative=="Bicycle" & IsChosenAlternative==0,
LogTravelTime:=predict.glm(bicycle.time.reg, newdata=all.data.long[PSUIDFactor %in% model.frame(bicycle.time.reg)$PSUIDFactor & Alternative=="Bicycle" & IsChosenAlternative==0,])]
# bicycle.time.reg2 <- glm(formula=LogTravelTime~TripPurpose+Month+DayOfWeek+JourneyTime+PavementCond+CycleLanes+AgeCategory+Sex
# +Region+AgeCategory*LogDistance+Sex*LogDistance+AgeCategory*LogDistance2+Sex*LogDistance2
# , family=gaussian, data=all.data[Mode=="Bicycle",])
#
#
# all.data.long[!(PSUIDFactor %in% model.frame(bicycle.time.reg)$PSUIDFactor) & Alternative=="Bicycle" & IsChosenAlternative==0,
# LogTravelTime:=predict.glm(bicycle.time.reg2, newdata=all.data.long[!(PSUIDFactor %in% model.frame(bicycle.time.reg)$PSUIDFactor) & Alternative=="Bicycle" & IsChosenAlternative==0,])]
#
# rm(bicycle.time.reg2)
# bicycle.time.cse <- cluster.bs.glm(mod=bicycle.time.reg, dat=all.data[Mode=="Bicycle"], cluster=~IndividualID, report=TRUE)
##################
## Driving time ##
##################
car.time.reg <- lm(formula=LogTravelTime~TripPurpose*LogDistance+Month*LogDistance+DayOfWeek+DayType+JourneyTime*LogDistance+
JourneyTime*LogDistance2+DisabledDriver+PSUIDFactor*LogDistance, data=all.data[Mode=="Car"])
car.time.rmse <- rmse(car.time.reg)
reg.car.data <- cbind(all.data[Mode=="Car",.(IndividualID, StageID, TripID)][as.numeric(names(car.time.reg$fitted.values))], fitted(car.time.reg))
names(reg.data.walk)[4] <- "PredictedCarDriverTime"
all.data <- merge(all.data, reg.data.walk, by=c("IndividualID", "StageID", "TripID"), all.x=TRUE)
## This is for cleaning up the imputations
# all.data[Mode=="Car", TimeTest:=Time]
# all.data[is.na(PredictedCarDriverTime)==FALSE,TimeTest:=exp(PredictedCarDriverTime)*exp(0.5*car.time.rmse^2)]
# all.data[6*Distance/Time>70, TimeTest:=6*Distance/70]
plot(density(exp(fitted.values(car.time.reg))*exp(0.5*car.time.rmse^2),bw=2 ,to=100))
lines(density(all.data[Mode=="Car",Time]))
# there are two factors in disabled driver 3 and 4 which mean than the person doesnt drive any more so they will never choose to driver as a mode, also an issue with aliased coefficients
all.data.long[Alternative=="Car" & DisabledDriver %in% c(0,1,2) & IsChosenAlternative==0 & PSUIDFactor %in% model.frame(car.time.reg)$PSUIDFactor ,
LogTravelTime:=predict.glm(car.time.reg, newdata=all.data.long[Alternative=="Car" & DisabledDriver %in% c(0,1,2) & IsChosenAlternative==0 & PSUIDFactor %in% model.frame(car.time.reg)$PSUIDFactor,])]
# car.time.cse <- cluster.bs.glm(mod=car.time.reg, data=all.data.car, cluster=~IndividualID, report=TRUE)
###################
## Driving Price ##
###################
## Predicted litres/100km
all.data.long[Alternative=="Car" & is.na(VehicleID)==FALSE, FuelConsumption:=predict.lm(size.efficiency.reg, newdata=all.data.long[Alternative=="Car" & is.na(VehicleID)==FALSE,]) ]
all.data.long[Alternative=="Car" & FuelType=="Petrol", Price:=exp(LogDistance)*1.609334/100*FuelConsumption*(PetrolPrice)]
all.data.long[Alternative=="Car" & FuelType=="Diesel", Price:=exp(LogDistance)*1.609334/100*FuelConsumption*(DieselPrice)]
plot(density(all.data.long[FuelType=="Petrol" & Alternative == "Car", Price/100], to=10))
lines(density(all.data.long[FuelType=="Diesel" & Alternative == "Car", Price/100], to=10))
## we know full consumption per l/100km
## we know the cost per litre
# Add to data.long as well.
#
# fuel.pred <- all.data[Mode=="Car" & is.na(Price)==FALSE, .(Price, VehicleID, TripID)]
# setnames(fuel.pred, "Price", "JourneyPrice")
# all.data.long <- merge(all.data.long, fuel.pred, by=c("VehicleID", "TripID"), all.x=TRUE)
# all.data.long[Alternative=="Car" & IsChosenAlternative==1, Price:=JourneyPrice]
# all.data.long[,JourneyPrice:=NULL]
#
# all.data.long[Alternative=="Car" & is.na(VehicleID)==FALSE & IsChosenAlternative==0, Price:=predict.lm(size.efficiency.reg, newdata=all.data.long[Alternative=="Car" & is.na(VehicleID)==FALSE & IsChosenAlternative==0,])]
########################
## Car Passenger Time ##
########################
## can justify bandwidth smoothing due to people recording travel time in a non-continous manner
carp.time.reg <- glm(formula=LogTravelTime~TripPurpose*LogDistance+Month*LogDistance+DayOfWeek+DayType+JourneyTime*LogDistance+
JourneyTime*LogDistance2+Region*LogDistance+Region*LogDistance2+PSUIDFactor, dat=all.data[Mode=="Passenger"], family=gaussian)
rmse.carp <- rmse(carp.time.reg)
plot(density(exp(fitted.values(carp.time.reg))*exp(0.5*rmse.carp^2),bw=4, to=200))
lines(density(all.data[Mode=="Passenger" & Time>0,Time], to=200, bw=4))
## Rank deficient
all.data.long[Alternative=="Passenger" & IsChosenAlternative==0 & PSUIDFactor %in% model.frame(carp.time.reg)$PSUIDFactor,
LogTravelTime:=predict.glm(carp.time.reg, newdata=all.data.long[PSUIDFactor %in% model.frame(carp.time.reg)$PSUIDFactor & Alternative=="Passenger" & IsChosenAlternative==0,])]
# carp.time.reg2 <- glm(formula=LogTravelTime~TripPurpose*LogDistance+Month*LogDistance+DayOfWeek+DayType+JourneyTime*LogDistance+
# JourneyTime*LogDistance2+Region*LogDistance+Region*LogDistance2+Region, dat=all.data[Mode=="Passenger"], family=gaussian)
#
# all.data.long[Alternative=="Passenger" & IsChosenAlternative==0 & !(PSUIDFactor %in% model.frame(carp.time.reg)$PSUIDFactor),
# LogTravelTime:=predict.glm(carp.time.reg2, newdata=all.data.long[!(PSUIDFactor %in% model.frame(carp.time.reg)$PSUIDFactor) & Alternative=="Passenger" & IsChosenAlternative==0,])]
#
# rm(carp.time.reg2)
# carp.time.cse <- cluster.bs.glm(mod=carp.time.reg, data=all.data.carp, cluster=~IndividualID, report=TRUE)
######################
## Rail Travel Time ##
######################
rail.time.reg <- glm(formula=LogTravelTime~TripPurpose+Month+DayOfWeek+DayType+JourneyTime*LogDistance+Region*LogDistance
+WalkTimeRail+BusTimeRail+RailReliability+PSUIDFactor, data=all.data[Mode=="Rail",], family=gaussian)
rmse.rail.time <- rmse(rail.time.reg)
plot(density(all.data[Mode=="Rail", Time]))
lines(density(exp(fitted.values(rail.time.reg))*exp(0.5*rmse.rail.time^2)))
all.data.long[Alternative=="Rail" & IsChosenAlternative==0 & PSUIDFactor %in% model.frame(rail.time.reg)$PSUIDFactor,
LogTravelTime:=predict.glm(rail.time.reg,
newdata=all.data.long[Alternative=="Rail"
& IsChosenAlternative==0
& PSUIDFactor %in% model.frame(rail.time.reg)$PSUIDFactor,,])]
#
# rail.time.reg2 <- glm(formula=LogTravelTime~TripPurpose+Month+DayOfWeek+DayType+JourneyTime*LogDistance+Region*LogDistance
# +WalkTimeRail+BusTimeRail+RailReliability+Region, data=all.data[Mode=="Rail",], family=gaussian)
#
# all.data.long[Alternative=="Rail" & IsChosenAlternative==0 & !(PSUIDFactor %in% model.frame(rail.time.reg)$PSUIDFactor),
# LogTravelTime:=predict.glm(rail.time.reg2,
# newdata=all.data.long[Alternative=="Rail"
# & IsChosenAlternative==0
# & !(PSUIDFactor %in% model.frame(rail.time.reg)$PSUIDFactor),,])]
#
#
# rm(rail.time.reg2)
###############
## Rail Cost ##
###############
rail.price.reg <- glm(formula=Price~Month+DayOfWeek+JourneyTime*LogDistance+JourneyTime*LogDistance2
+RailReliability+RailFreq+AgeCategory+PSUIDFactor,
family=gaussian, data=all.data[Mode=="Rail",])
table(all.data.long$RailFreq)
rmse.rail.price <- rmse(rail.price.reg)
# table(all.data$RailFreq)
plot(density(all.data[Mode=="Rail" & is.na(Price)==FALSE, Price]))
lines(density(fitted.values(rail.price.reg)))
all.data.long[Alternative=="Rail"
& IsChosenAlternative==0
& PSUIDFactor %in% model.frame(rail.price.reg)$PSUIDFactor,
Price:=predict.glm(rail.price.reg,
newdata=all.data.long[Alternative=="Rail"
& IsChosenAlternative==0
& PSUIDFactor %in% model.frame(rail.price.reg)$PSUIDFactor,])]
# rail.price.reg2 <- glm(formula=Price~Month+DayOfWeek+JourneyTime*LogDistance+JourneyTime*LogDistance2
# +RailReliability+RailFreq+AgeCategory+Region,
# family=gaussian, data=all.data[Mode=="Rail",])
#
# all.data.long[Alternative=="Rail"
# & IsChosenAlternative==0
# & !(PSUIDFactor %in% model.frame(rail.price.reg)$PSUIDFactor),
# Price:=predict.glm(rail.price.reg2,
# newdata=all.data.long[Alternative=="Rail"
# & IsChosenAlternative==0
# & !(PSUIDFactor %in% model.frame(rail.price.reg)$PSUIDFactor),])]
######################
## Taxi Travel Time ##
######################
taxi.time.reg <- glm(formula=LogTravelTime~TripPurpose*LogDistance+Month*LogDistance+DayOfWeek+JourneyTime*LogDistance
+JourneyTime*LogDistance2+Region*LogDistance+Region*LogDistance2+PSUIDFactor, data=all.data[Mode=="Taxi",], family=gaussian)
rmse.tax.time <- rmse(taxi.time.reg)
plot(density(exp(fitted.values(taxi.time.reg))*exp(0.5*rmse.tax.time^2)))
lines(density(all.data[Mode=="Taxi", Time]))
all.data.long[Alternative=="Taxi" & IsChosenAlternative==0 & PSUIDFactor %in% model.frame(taxi.time.reg)$PSUIDFactor,
LogTravelTime:=predict.glm(taxi.time.reg,
newdata=all.data.long[Alternative=="Taxi" & IsChosenAlternative==0 & PSUIDFactor %in% model.frame(taxi.time.reg)$PSUIDFactor ,])]
################
## Taxi Price ##
################
taxi.price.reg <- glm(formula=Price~Month+DayOfWeek+JourneyTime*LogDistance+
JourneyTime*LogDistance2+Region*LogDistance+Region*LogDistance2+PSUIDFactor,
data=all.data[Mode=="Taxi" & Price>0,])
rmse.taxi.price <- rmse(taxi.price.reg)
plot(density(fitted.values(taxi.price.reg)))
lines(density(all.data[Mode=="Taxi" & Price>0, Price]))
all.data.long[Alternative=="Taxi" & IsChosenAlternative==0 & PSUIDFactor %in% model.frame(taxi.price.reg)$PSUIDFactor,
Price:=predict.glm(taxi.price.reg,
newdata=all.data.long[Alternative=="Taxi" & IsChosenAlternative==0 & PSUIDFactor %in% model.frame(taxi.price.reg)$PSUIDFactor,])]
## Set the price of these alternatives to zero
all.data.long[Alternative %in% c("Walk", "Bicycle", "Passenger"), Price:=0]
## Keep the feasible set of alternatives
all.data.long <- all.data.long[FeasibleAlternative==1]
## Keep the residuals for densiuty plots
bus.price.reg.fitted <- fitted.values(bus.time.reg)
bus.time.reg.fitted <- fitted.values(bus.time.reg)
walk.time.reg.fitted <- fitted.values(walk.time.reg)
bicycle.time.reg.fitted <- fitted.values(bicycle.time.reg)
car.time.reg.fitted <- fitted.values(car.time.reg)
carp.time.reg.fitted <- fitted.values(carp.time.reg)
rail.price.reg.fitted <- fitted.values(rail.price.reg)
rail.time.reg.fitted <- fitted.values(rail.time.reg)
taxi.price.reg.fitted <- fitted.values(taxi.price.reg)
taxi.time.reg.fitted <- fitted.values(taxi.time.reg)
## Clear up the workspace before saving a new image ##
bus.price.reg.frame <- model.frame(bus.time.reg)
bus.time.reg.frame <- model.frame(bus.time.reg)
walk.time.reg.frame <- model.frame(walk.time.reg)
bicycle.time.reg.frame <- model.frame(bicycle.time.reg)
car.time.reg.frame <- model.frame(car.time.reg)
carp.time.reg.frame <- model.frame(carp.time.reg)
rail.price.reg.frame <- model.frame(rail.price.reg)
rail.time.reg.frame <- model.frame(rail.time.reg)
taxi.price.reg.frame <- model.frame(taxi.price.reg)
taxi.time.reg.frame <- model.frame(taxi.time.reg)
## Clean up the work space
rm(carp.time.reg,
car.time.reg,
bus.price.reg,
bus.time.reg,
rail.price.reg,
rail.time.reg,
taxi.price.reg,
taxi.time.reg,
walk.time.reg,
bicycle.time.reg)
rm(reg.car.data, reg.data.walk)
## Saving a copy of the workspace image ##
##
## could possibly move this all to the .Rnw file
# all.data[PSUID==2012000002 & IndividualID==2012000015 & HouseholdID==2012000007 ,.(Mode,Price, TicketType, TicketTripCost)]
# vehicle.data[HouseholdID==2012000007,]
# rm(a, all.data.bus, diesel.data, diesel.data.melt, modes, petrol.data, petrol.data.melt, car.time.reg, carp.time.reg, bus.time.reg, bus.price.reg)
# rm(reg.car.data, reg.data.walk)
# rm(fuel.pred, regions.merge, vehicle.data, vehicle.efficiency.data)
# rm(walk.time.reg)
# Convert car driving price from pence to pounds to get it in line with the other prices
all.data.long[Price<=0, Price:=0]
all.data.long[Alternative=="Car", Price:=Price/100]
all.data.long[Alternative=="Car", Price:=Price + exp(LogDistance)*VehFixedCosts]
# all.data.long <- all.data.long[TripID %in% all.data.long[IsChosenAlternative==1,TripID],]
# Test of replacing with means brekaing down on category - this is the best for the moment until the replace PSUID code works
#
# all.data.long[is.na(LogTravelTime)==FALSE, LTTDecile:=findInterval(LogTravelTime, quantile(LogTravelTime, probs=seq(0.1,1,0.1)))]
# all.data.long[is.na(Price)==FALSE, PriceDecile:=findInterval(Price, quantile(Price, probs=seq(0.1,1,0.1)))]
# all.data.long[,MeanTravelTime:=mean(LogTravelTime, na.rm=TRUE), by=.(Alternative, Region, TripPurpose, AgeCategory, DayOfWeek)]
# all.data.long[is.na(LogTravelTime)==TRUE, LogTravelTime:=MeanTravelTime]
# all.data.long[,MeanPrice:=mean(Price), by=.(Alternative, Region, TripPurpose, AgeCategory, DayOfWeek)]
# all.data.long[is.na(Price)==TRUE, Price:=MeanPrice]
#### Creating the sample ####
# this removes all trip IDs that do not have price and travel time data for each alternative.
incomplete.sets <- all.data.long[is.na(Price)==TRUE | is.na(LogTravelTime)==TRUE, length(is.na(LogTravelTime)==TRUE), by=TripID]
all.data.long <- all.data.long[!(TripID %in% incomplete.sets$TripID)]
# all.data.long.ss <- unique(all.data.long[!(TripID %in% all.data.long.cc$TripID), TripID])
# rm(all.data.long.cc)
# Taking a subsample of the complete set to train the model
# Removing unneccesary columns
## This converts the dataset into the format required
## length(unique(all.data.longss$TripID))
all.data.long[,IsChosenAlternative:=as.logical(IsChosenAlternative)]
all.data.long[,TravelTime:=exp(LogTravelTime)]
all.data.long[,Distance:=exp(LogDistance)]
setnames(all.data.long, "Social class of individuals", "SocialClass")
setnames(all.data.long, "Household Income Quintiles - Diary Sample 2012", "IncomeQuintiles")
setnames(all.data.long, "Mobility difficulties summary", "Mobility")
all.data.long[,SocialClass:=factor(SocialClass)]
all.data.long[,IncomeQuintiles:=factor(IncomeQuintiles)]
all.data.long[,Mobility:=factor(Mobility)]
all.data.long[,Alternative:=factor(Alternative)]
# Select 5000 Individuals
all.data.long.ss <- all.data.long[!(AgeCategory %in% c("5-10", "11-16", "Pensioner")) & Region %in% c(12,13),.(IndividualID, TripID, IncomeQuintiles, TripPurpose, AgeCategory, Sex, TravelTime, Price, IsChosenAlternative, DrivingLicense, BicycleOwner, Minority, HHoldNumPeople, Alternative, Region)]
all.data.long.ss[,IID:=c(1:length(unique(IndividualID))), by=IndividualID]
# all.data.long.ss <- data.table(all.data.long.ss)
# sampleindividuals <- sample(unique(all.data.long.ss$IndividualID), size=432)
# all.data.long.ss <- all.data.long.ss[IndividualID %in% sampleindividuals,]
# sampletrips <- sample(unique(all.data.long.ss$TripID), size=5000)
# all.data.long.ss <- all.data.long.ss[TripID %in% sampletrips,]
# rm(sampleindividuals, sampletrips)
library(mlogit)
all.data.long.ss <- mlogit.data(data=all.data.long.ss,
choice="IsChosenAlternative",
shape="long",
alt.var = "Alternative",
#alt.levels=c("Bus", "Walk", "Bicyle", "Car", "Passenger", "Rail", "Taxi"),
id.var = "IndividualID",
chid.var="TripID")
all.data.long.ss2 <- mlogit.data(data=all.data.long.ss,
choice="IsChosenAlternative",
shape="long",
alt.var = "Alternative",
#alt.levels=c("Bus", "Walk", "Bicyle", "Car", "Passenger", "Rail", "Taxi"),
id.var = "IndividualID",
chid.var="TripID")
all.data.long.ss$IndividualID <- as.numeric(all.data.long.ss$IndividualID)
save.image("Data.Rda")
|
6c6f5ce56c6dc6bee14ca3c5cd77999827420cae
|
b2eb1052fe4e9a8ce458f3c786c8449330e0c09f
|
/man/formatCode.Rd
|
7d66d4bfbcf9e77b50ade606caffa259379e532d
|
[] |
no_license
|
duncantl/XDynDocs
|
9245e9c25ef5cc4b575c4a11e85552c09c1bcd43
|
6612b35323a195c2b9646a2591163092935f328f
|
refs/heads/master
| 2023-01-09T02:51:24.797126
| 2022-12-22T17:01:08
| 2022-12-22T17:01:08
| 40,777,819
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,183
|
rd
|
formatCode.Rd
|
\name{formatCode}
\alias{formatCode}
\title{Format R code for displaying in a document}
\description{
This function is used to take code as text and format it
for display in with the appropriate width - number of characters -
etc.
It is exported so that it can be called from XSL code that performs
the formatting.
}
\usage{
formatCode(node, defaultWidth = options()$width, ...)
}
\arguments{
\item{node}{the XML node that contains the R code to be parsed and
formatted.
The node can have attributes that control the formatting, e.g.
\code{width} which is passed to \code{\link[base]{deparse}.}}
\item{defaultWidth}{the width to use in the call to \code{deparse} if the node does not have an explicit
r:width attribute.}
\item{\dots}{additional parameters that are currently
ignored. Intended for methods.}
}
\value{
A character string.
}
\author{
Duncan Temple Lang
}
\note{
We may enhance this to format the results in HTML or FO or LaTeX.
In fact there is a collection of methods in highlight.R that are
overridden but are much more general.
}
\seealso{
\code{\link{dynDoc}}
}
\examples{
}
\keyword{programming}
\keyword{output}
|
5efb6818a32d874e08f2bbee312e1bddbcc20d9e
|
a0e7e2030de5f57222c393fb31638559954496fb
|
/sample.R
|
61cb8d313f64eef00470afbc57467a66961e2d55
|
[] |
no_license
|
Amelia820/softcarp_report
|
c6436ad21e22cbb193113004e06eb6364f92906a
|
03f9029efbf71b5d12e7561d13f5d4b836a2360d
|
refs/heads/master
| 2020-08-04T04:41:33.278771
| 2019-10-01T05:00:24
| 2019-10-01T05:00:24
| 212,010,146
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 45
|
r
|
sample.R
|
# comment on Steph's humour
print("hahaha")
|
fdcb0a47e9e1e3159c6ca1294845f3068829801a
|
a94876abe870a118f7ec2eb42362a8d05c8e049d
|
/raleigh/code/01-pullYelpData.R
|
dfef8614afd309f97b2bcedecf651e584bae3d27
|
[] |
no_license
|
CityofSyracuse/restaurant_inspections
|
dec5d7e0a8a0823b62cfc33fbb97f9da48a6396e
|
09c870888f67dce2361104a4fb6cce62244f64c0
|
refs/heads/master
| 2021-01-17T15:32:32.524096
| 2017-03-02T22:22:13
| 2017-03-02T22:24:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,862
|
r
|
01-pullYelpData.R
|
library(httr)
library(jsonlite)
library(dplyr)
library(plyr)
library(readr)
library(data.table)
writeData <- function(yelpList, yelpCategories, writeColNames){
yelpDF <- rbindlist(yelpList, use.names=TRUE, fill=TRUE, idcol=NULL)
yelpDF <- unique(yelpDF) # delete duplicates
write.table(yelpDF, file = "yelpData.csv", append = TRUE, row.names = TRUE, col.names = writeColNames, sep = ",")
categoryDF <- rbindlist(yelpCategories, use.names=TRUE, fill=TRUE, idcol=NULL)
categoryDF <- unique(categoryDF)
write.table(categoryDF, file = "yelpRestaurantCategories.csv", append = TRUE, row.names = TRUE, col.names = writeColNames, sep = ",")
}
accessToken <- "sDLP7PHbl53ruD4taAbSUl3kezQED4blRTEuSRPvf0w5a7C9nrndLl1R8sl4_FzoFZDQoN_Jhl1YuU-EmIdg_lh9zQZrx-pVpEOXV9tKsWmzIrdVsu9jJ_KPeN0kWHYx"
yelpUrl <- "https://api.yelp.com/v3/businesses/search?"
features = c("name",
"id",
"is_closed",
"rating",
"review_count",
"location.address1",
"location.zip_code",
"coordinates.latitude",
"coordinates.longitude",
"price",
"phone")
restaurant_names <- read_csv("raleigh/data/Restaurants_in_Wake_County.csv") %>%
data.table() %>%
dplyr::select(Name,
X,
Y)
restaurant_categories <- read_csv("yelpWork/foodCategories.csv")$CATEGORIES
numCats <- length(restaurant_categories)
yelpList <- list()
yelpCategories <- list()
WriteColNames <- TRUE
num_restaurants <- nrow(restaurant_names)
for(i in 1:num_restaurants){
print(paste("Processing", i, "of", num_restaurants))
row <- restaurant_names[i,]
#Tricky -doesn't handle spaces but we need to ensure we get the actual restaurant. Using first word.
#You can't merely delete the spaces because if there's a number after the name (indicating one of multiple
#versions of this restaurant, ie Starbucks 54), it won't work.
restaurantName <-sub(" [0-9]", "", sub("\\#.*", "", row$Name))
paramName <- gsub("\\ ", "%20", restaurantName)
params <- paste0("term=restaurants,", paramName, "&",
"latitude=", row$Y, "&",
"longitude=", row$X, "&",
"sort_by=", "best_match", "&",
"limit=", "20")
location_data <- GET(paste0(yelpUrl, params),
add_headers(Authorization = paste0("Bearer ",accessToken)))
location_content <- content(location_data, type = "text", encoding = "UTF-8");
jsondat <- fromJSON(location_content);
if("businesses" %in% names(jsondat) && length(jsondat$businesses) > 0){
bus_df <- flatten(data.frame(jsondat$businesses));
#sometimes not all of the columns get returned back (ie price).
availableFeatures <- intersect(features, colnames(bus_df))
df <- bus_df %>%
dplyr::select(one_of(availableFeatures));
# Extract categories as comma-separated string (will be split later).
df$categories <- unlist(lapply(bus_df$categories,
function(x) paste0(x$alias, collapse=",")))
bus_cats <- bus_df[,c("id","categories")]
bus_categoryDF <- setNames(data.frame(matrix(FALSE, ncol = (numCats + 1), nrow = length(bus_cats$id))), c("bus_id", restaurant_categories))
row.names(df) <- bus_df$id
row.names(bus_categoryDF) <- bus_df$id
for(i in 1:nrow(bus_cats)){
bus_categoryDF[i,]$bus_id <- id
cats <- intersect(bus_cats[i,]$categories[[1]]$alias, restaurant_categories)
bus_categoryDF[id,cats] <- TRUE
}
yelpList[[i]] <- df;
yelpCategories[[i]] <- bus_categoryDF;
}
if(i %% 50){
writeData(yelpList, yelpCategories, WriteColNames);
yelpList <- list();
yelpCategories <- list();
WriteColNames <- FALSE;
}
}
writeData(yelpList, yelpCategories, WriteColNames);
|
f1314903dc6faf9f334c67bbbc83db46e88420b0
|
4d5cd37123da9f827a46f4b20f7dd4e4a4d1c2a0
|
/construction-notes.R
|
5e1d867cc606d6e43db13cffa3276805b684bc81
|
[] |
no_license
|
conjugateprior/bara-data
|
2b624dea25aa0b7447331bd823edcd31948545e6
|
d5673cbf2120ec97bee46464439f0cb637a2ca02
|
refs/heads/master
| 2020-03-29T02:18:29.156386
| 2017-06-17T19:34:39
| 2017-06-17T19:34:39
| 94,643,844
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,746
|
r
|
construction-notes.R
|
# Processing notes
library(rvest)
deb <- read_html("abortion-debate-hansard.html")
mes <- html_nodes(deb, "div[class='hentry member_contribution']")
speakers <- html_attr(html_node(mes, "cite a"), "title")
voting <- list("abs"=c("Mr William Deedes", "Dr Horace King", "Sir John Hobson"),
"no"=c("Mr Kevin McNamara", "Mr Norman St John-Stevas",
"Mr Peter Mahon", "Mr William Wells", "Mrs Jill Knight"))
# rest voted yes
# by speaker turn
turns <- html_text(mes)
turn_data <- data.frame(speaker=speakers, text=turns,
stringsAsFactors=FALSE)
turn_data$vote <- ifelse(turn_data$speaker %in% voting$abs, "abs",
ifelse(turn_data$speaker %in% voting$no, "no", "yes"))
# by paragraph
paras <- list()
make_block <- function(m){
data.frame(speaker = html_attr(html_node(m, "cite a"), "title"),
text = html_text(html_nodes(m, "p")),
stringsAsFactors = FALSE)
}
para_data <- do.call(rbind, lapply(mes, make_block))
para_data$vote <- ifelse(para_data$speaker %in% voting$abs, "abs",
ifelse(para_data$speaker %in% voting$no, "no", "yes"))
# by speaker
by_speaker <- split(turn_data$text, turn_data$speaker)
speaker_contribs <- unlist(lapply(by_speaker, paste, collapse="\n"))
speaker_data <- data.frame(speaker=names(by_speaker),
text=speaker_contribs,
stringsAsFactors=FALSE)
speaker_data$vote <- ifelse(speaker_data$speaker %in% voting$abs, "abs",
ifelse(speaker_data$speaker %in% voting$no, "no", "yes"))
corpus_bara_speaker <- corpus(speaker_data)
corpus_bara_para <- corpus(para_data)
corpus_bara_turn <- corpus(turn_data)
|
4d988c2746d90c340d4c9c62411a25ee85f40927
|
68dcbb1717f26c91fb9b4f7c1a573381ba709bfb
|
/R/auth.r
|
a2df81d27fcf33c0db5d4afacf1299ec7b3545df
|
[
"BSD-2-Clause"
] |
permissive
|
wrathematics/rotp
|
253645a55fac2282c1760ecf226f9a8ab6165ba8
|
df3da2da5160e81e996c128e6ff842c3c1550d63
|
refs/heads/master
| 2021-06-06T21:56:17.676750
| 2020-06-28T17:12:47
| 2020-06-28T17:12:47
| 136,544,990
| 5
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,420
|
r
|
auth.r
|
auth_from_key = function(key)
{
cat("\nCtrl+c to exit\n")
while (TRUE)
{
rem = get_remaining_time()
p = totp_wrapper(key)
p_str = sprintf("%06d", p)
while (rem > 0)
{
cat('\r', paste0(p_str, " (", sprintf("%2d", rem), " seconds remaining ", progress_bar(rem), ") "))
utils::flush.console()
rem = rem - 1L
Sys.sleep(1)
}
}
rm(key)
invisible(gc(verbose=FALSE, reset=TRUE))
invisible(NULL)
}
#' auth
#'
#' Interactive authenticator interface. Similar in scope to Google
#' Authenticator. To use, you must first set up your database of keys
#' using \code{otpdb()}.
#'
#' @details
#' Only the requested database key will be decrypted in memory. Keys are never
#' decrypted on disk.
#'
#' @seealso \code{\link{otpdb}}
#' @references \url{https://en.wikipedia.org/wiki/Google_Authenticator}
#' @export
auth = function()
{
check.is.interactive()
check.has.pubkey()
prompt = "$ "
if (!file.exists(db_path()) || db_len() == 0)
stop("No keys stored in db! Add some by first running otpdb()\n")
else
choices = db_list()
utils::flush.console()
choice = otpdb_getchoice(choices, prompt, "Pick a key or enter Q/q to exit", mask=TRUE)
if (choice == "Q" || choice == "q")
return(invisible())
name = choices[choice]
key = decrypt(db_getkey(name)$encrypted_key)
auth_from_key(key)
invisible(NULL)
}
|
63a3baeeff835199b03895ccd042e8e859564bea
|
1f29a40d0d2bb1a165dc8606f04d7bac1de795b8
|
/DNC_Toolbox/Prune_Tree.R
|
bd27515a2722fa54a42678d92b097529df44d2e7
|
[] |
no_license
|
Gene-Weaver/Testing-Darwin-s-Naturalization-Conundrum
|
c23cc6a96fda3a751be6a51a77b9c5a00d263995
|
f5d5023f433a5090b901020fe98395bb7baf7e86
|
refs/heads/master
| 2020-03-30T08:31:36.726417
| 2018-10-22T05:01:55
| 2018-10-22T05:01:55
| 151,022,058
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 253
|
r
|
Prune_Tree.R
|
###
### Prune tree tips based on an imported .csv file
###
Prune.Tree <- function(SPECIES_TO_REMOVE,TREE,SAVE_FILE){
PRUNED_TREE<-drop.tip(TREE,TREE$tip.label[SPECIES_TO_REMOVE])
write.tree(PRUNED_TREE,file=SAVE_FILE)
return(PRUNED_TREE)
}
|
3901eae15a77e1e7cd44040fcb8954dcc80cf4cf
|
a1e0c1014d6de2ee4034accb9cd29cacb1533b08
|
/global.R
|
83c1e2264edd8c6044e99cf9710200af81f75db8
|
[] |
no_license
|
NickPyll/Prime
|
97e16c1a218176660c5c2fbbb17579224c13c9b6
|
5f861990b095bdf60a9a88a0c22aaae119ca4ff9
|
refs/heads/master
| 2021-10-21T22:02:11.242799
| 2021-10-11T13:09:48
| 2021-10-11T13:09:48
| 123,444,507
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 36
|
r
|
global.R
|
library(shiny)
library(shinythemes)
|
5a2bf4a23dbe5298b3417c7e64e638fc6604cb61
|
9f958bfe7662e0cc3da82697003ab0970822535b
|
/man/ac_get_campaigns.Rd
|
26c6f76d905f26e14c6f6ef015c6057691f7cc3b
|
[] |
no_license
|
mdancho84/ractivecampaign
|
23656aea4765988bfffe94eb29877414b8c09a80
|
17b14b518d95a485eabb1592ef73ed131490e3a7
|
refs/heads/master
| 2023-08-29T23:38:28.754582
| 2021-11-12T09:20:02
| 2021-11-12T09:20:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 593
|
rd
|
ac_get_campaigns.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ac_get_campaigns.R
\name{ac_get_campaigns}
\alias{ac_get_campaigns}
\title{Retrieve all campaings}
\usage{
ac_get_campaigns()
}
\value{
tibble with campaings metadata
}
\description{
Campaigns are broadcast emails sent out to a list of contacts.
}
\examples{
\dontrun{
Sys.setenv('ACTIVECAMPAGN_API_TOKEN' = "YOUR_TOKEN")
Sys.setenv('ACTIVECAMPAGN_API_URL' = "https://<your-account>.api-us1.com")
camps <- ac_get_campaigns()
}
}
\seealso{
\href{https://developers.activecampaign.com/reference#test-1}{Campaigns}
}
|
371b93229b228769fbe9f1eb5284f8ff9f4fdcf4
|
9d1ca53656d3b6d76c839eef335c82679dbc5067
|
/Calculations with dataframes - 20200407.R
|
a224bc495f8d7bfbe9727e24d28445c78e25a774
|
[] |
no_license
|
shirewoman2/HandsOnR
|
559fa449ab56e8ccec7f49ff38374a2c00a1f35e
|
aa3b3eca3f985e6487f93df3282b76192822414b
|
refs/heads/master
| 2021-06-30T18:01:52.650937
| 2020-10-24T17:53:08
| 2020-10-24T17:53:08
| 165,004,072
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,622
|
r
|
Calculations with dataframes - 20200407.R
|
# Calculations with dataframes - 20200407
# This script is for playing around with data.frames.
# Housekeeping -----------------------------------------------------------
library(tidyverse)
library(LaurasHelpers)
# Structures and classes of objects ---------------------------------------
data(iris)
str(iris)
unique(iris$Species)
data(Students)
head(Students)
unique(Students$Cookie)
as.numeric(unique(iris$Species))
iris$Species %>% unique() %>% as.numeric()
as.numeric(Students$Gender)
class(iris$Species)
class(Students$Gender)
# Sort on the column "Gender" in the Students data.frame and make the order 1. "M", 2. "F".
Students <- Students %>%
mutate(Gender = factor(Gender, levels = c("M", "F"))) %>%
arrange(Gender)
Students <- mutate(Students, Gender = factor(Gender, levels = c("M", "F")))
Students <- arrange(Students, Gender)
Students$Gender <- factor(Students$Gender, levels = c("M", "F"))
# Students <- sort(Students, Students$Gender)
nrow(Students)
# What are the ranges of values for the hours of sleep in the Students data.frame?
range(Students$Sleep.hr)
range(Students[, "Sleep.hr"])
# Mathematical calculations on data.frames -----------------------------------
# 1. For the Students data.frame, let's assume that they spread out their TV
# watching evenly over the week. Make a new column "VampTV.hr.perday" that is
# VampTV.hr divided by 7.
Students <- Students %>%
mutate(VampTV.hr.perday = VampTV.hr / 7)
# 2. Let's assume that people need about 5 hours per day to just take care of
# things like eating, bathing, doing laundry, working out, etc. Let's make a
# separate variable called BasicLivingTime and set it equal to 5.
BasicLivingTime = 5
# 3. Now that we know how much time per day students are spending on basic
# living, trashy vampire TV watching, and sleeping, let's see how much time they
# each have left for working in the lab! How would you do that?
Students <- Students %>%
mutate(LabTime.perday = 24 - Sleep.hr - VampTV.hr.perday - BasicLivingTime)
sum(Students$LabTime.perday)
# Regular expressions: Matching patterns in text -----------------------------
# With the Students data.frame, find all the students whose name ends in "a"
# using RegEx.
Students$Name[str_detect(Students$Name, "a$")]
Students$Name[str_detect(Students$Name, "a$|e$")]
# 3. With the Candidates data.frame, what are all the possible values for the
# column "jurisdiction"?
data(Candidates)
str(Candidates)
unique(Candidates$jurisdiction)
# 4. Make a new column in the Candidates data.frame called "WhichHouse" and, if
# the jurisdiction contains the word "SENATE" extract that word and put it in
# the column "WhichHouse", and if it contains "HOUSE", put that in the column
# "WhichHouse".
Candidates <- Candidates %>%
mutate(WhichHouse = str_extract(jurisdiction, "SENATE|HOUSE"))
House <- Candidates %>%
filter(WhichHouse == "HOUSE")
# 5. For some reason (I honestly don't know it), one of the entries in
# jurisdiction, "CITY OF BOTHELL *", includes that asterisk. Count how many
# instances there are in which "jurisdiction" contains the special character
# "*".
Candidates <- Candidates %>%
mutate(LookForAsterisk = str_extract(jurisdiction, "\\*"))
# Subsetting with Boolean operators ----------------------------------------
# 1. Make a new data.frame that is all the students in the Pharmaceutics
# Department.
# 2. Make a new data.frame that is all the students who spent more
# than 3 hours watching trashy vampire TV
MyNum <- c(5, 2, NA, 10)
MyNum[MyNum > 2]
MyNum[which(MyNum > 2)]
|
5e8b00818fd2dc43411535ca338197ff9f96718f
|
75f8a0d750aa880f5eaf72aafe9acba8746d9656
|
/tutorial/scripts/graphics/plot_sin.R
|
c13bce42368f5e08b665c75a42c1779fa61f6764
|
[] |
no_license
|
dkhramov/iad_2020
|
abe75b34c5fb422b1eb7ad320827a7253a7fb03d
|
701b9eb08f65c0262808717549c369b270883a14
|
refs/heads/master
| 2021-02-06T22:43:27.924767
| 2020-03-20T12:19:24
| 2020-03-20T12:19:24
| 243,954,164
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,423
|
r
|
plot_sin.R
|
# ะัะฐัะธะบ, ัะพััะฐะฒะปะตะฝะฝัะน ะธะท ัะปะตะผะตะฝัะพะฒ
x <- seq(-pi,pi,.1)
y <- sin(x)
plot.new()
plot.window(xlim = c(-pi,pi), ylim = c(-1,1))
points(x,y)
axis(1)
axis(2)
box()
title(xlab = "x")
title(ylab = "y")
# ะขะพั ะถะต ะณัะฐัะธะบ, ะฟะพัััะพะตะฝะฝัะน ั ะฟะพะผะพััั ััะฝะบัะธะธ ะฒััะพะบะพะณะพ ััะพะฒะฝั
plot(x,y)
# ะะพะฑะฐะฒะธะผ ะทะฐะณะพะปะพะฒะพะบ ะธ ะธะทะผะตะฝะธะผ ะผะตัะบะธ ะพัะตะน ะบะพะพัะดะธะฝะฐั
plot(x, y,
main = "2d-Graph",
xlab = "Axis X",
ylab = "Axis Y"
)
# ะฃะฟัะฐะฒะปะตะฝะธะต ัะธะฟะพะผ ะผะฐัะบะตัะฐ ะธ ัะฒะตัะพะผ ะณัะฐัะธะบะฐ
plot(x,y,pch="*",col="red")
# ะกะบะพะปัะบะพ ัะฒะตัะพะฒ ะธะผะตัั ัะฒะพะธ ะธะผะตะฝะฐ
length(colors())
# ะะพัััะพะตะฝะธะต ะณัะฐัะธะบะพะฒ ะบัะธะฒัั
, ัะพััะพััะธั
ะธะท ัะพัะตะบ ะธ ะปะธะฝะธะน, ัะพะปัะบะพ ะธะท ะปะธะฝะธะน.
x <- seq(-pi,pi,.3)
y <- sin(x)
plot(x,y,
col="#00FF00",
type="b" # ัะธะฟ ะพัะพะฑัะฐะถะตะฝะธั ะบัะธะฒะพะน
# "p" - ัะพัะบะธ (ะฟะพ ัะผะพะปัะฐะฝะธั),
# "l" - ะปะธะฝะธั,
# "b" - ัะพัะบะธ ะธ ะปะธะฝะธั (both))
)
lines(x+.3, y, col="#0000FF")
# ะฃะฟัะฐะฒะปะตะฝะธะต ัะธะฟะพะผ ะธ ัะพัะธะฝะพะน ะปะธะฝะธะธ
x <- seq(-pi,pi,.1)
y <- sin(x)
plot(x,y,
type="l", # ะปะธะฝะธั
lty="dashed", # ัััะธั
ะพะฒะฐั
lwd=2 # ัะพะปัะธะฝะพะน 2 ะฟั.
)
# ะกะพะทะดะฐะฝะธะต ะฟัััะพะณะพ ะณัะฐัะธะบะฐ
plot(x, y, type="n")
#### ะะปะพะฑะฐะปัะฝัะต ะธ ะปะพะบะฐะปัะฝัะต ะฟะฐัะฐะผะตััั ะณัะฐัะธะบะพะฒ
# par() ะฟะพะทะฒะพะปัะตั ัะทะฝะฐัั/ัััะฐะฝะพะฒะธัั ะฟะฐัะฐะผะตััั ะณัะฐัะธะบะฐ
par("col") # ัะฒะตั, ะธัะฟะพะปัะทัะตะผัะน ะดะปั ัะธัะพะฒะฐะฝะธั
# ะ ะฒะฐัะตะน ะฒะตััะธะธ R
paste0(version$major,".",version$minor)
# ัะธัะปะพ ะณัะฐัะธัะตัะบะธั
ะฟะฐัะฐะผะตััะพะฒ ัะฐะฒะฝะพ:
length(par())
# ะะพะบะฐะปัะฝัะต ะทะฝะฐัะตะฝะธั ะณัะฐัะธัะตัะธั
ะฟะฐัะฐะผะตััะพะฒ ะฟะตัะตะบััะฒะฐัั ะณะปะพะฑะฐะปัะฝัะต,
# ะทะฐะดะฐะฝะฝัะต par()
x <- seq(-pi,pi,.3)
y <- sin(x)
par(col="red")
plot(x,y,type="b")
lines(x+.3, y)
points(x-.3, y, col="blue")
# ะะพัััะฐะฝะพะฒะปะตะฝะธะต ะฟัะตะดัะดััะธั
ะทะฝะฐัะตะฝะธะน ะณัะฐัะธัะตัะบะธั
ะฟะฐัะฐะผะตััะพะฒ
par() # ัะทะฝะฐัั ัะตะบััะธะต ะฝะฐัััะพะนะบะธ
old_par <- par() # ัะดะตะปะฐัั ะธั
ะบะพะฟะธั
par(col = "red") # ัะพะทะดะฐัั ะฝะพะฒัะต ะฝะฐัััะพะนะบะธ
plot(x,y) # ะฟะพัััะพะธัั ะฝะตะพะฑั
ะพะดะธะผัะต ะณัะฐัะธะบะธ
par(old_par) # ะฒะพัััะฐะฝะพะฒะธัั ะฟัะตะถะฝะธะต ะฝะฐัััะพะนะบะธ
plot(x,y) # ะฟะพัััะพะธัั ัะพั ะถะต ะณัะฐัะธะบ, ะดะปั ะฟัะพะฒะตัะบะธ
## ะะพัััะพะตะฝะธะต ะณัะฐัะธะบะพะฒ ั ะปะตะณะตะฝะดะพะน
# ะกััะพะธะผ ััะธ ะณัะฐัะธะบะฐ
x <- seq(-pi, pi, len=20)
plot(x, sin(x), type="l", ylim=c(-1.2, 2.1), col=2, lty=4, lwd=2)
points(x, cos(x), pch=8, col=3)
lines(x, tan(x), type="b", lty=1, pch=4, col=4)
# ะกััะพะธะผ ะปะตะณะตะฝะดั
legend(-1.5, 2.1, # ะบะพะพัะดะธะฝะฐัั ะปะตะฒะพะณะพ ะฒะตัั
ะฝะตะณะพ ัะณะปะฐ
c("sin", "cos", "tan"), # ะฟะพะดะฟะธัะธ
col = c(2, 3, 4), # ัะฒะตั ะปะธะฝะธะธ ะฒ ะปะตะณะตะฝะดะต
text.col = "green4", # ัะฒะตั ะฟะพะดะฟะธัะธ
lty = c(4, -1, 1), # ัะธะฟ ะปะธะฝะธะธ ะฒ ะปะตะณะตะฝะดะต
pch = c(NA, 8, 4), # ะผะฐัะบะตั ะฒ ะปะตะณะตะฝะดะต
bg = "gray90" # ัะฒะตั ัะพะฝะฐ ะฒ ะปะตะณะตะฝะดะต
)
|
5eca603231075c7eb6c2d34c40aa3a47e8ff292e
|
3761011142bbaf90bfdfb60125eee5fb540cfe61
|
/man/vanmaps.Rd
|
99f95bdbcbab2c61370e04c96deaff8f2a41152f
|
[] |
no_license
|
tony-stone/vanmaps
|
5da82df1a2be3365f22ee435ce4a0a6f19b270fd
|
e63a1984377fc554e69f06935015f65206339187
|
refs/heads/master
| 2020-07-06T06:52:52.684386
| 2016-11-22T11:48:30
| 2016-11-22T11:48:30
| 74,054,363
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 419
|
rd
|
vanmaps.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/vanmaps.R
\docType{package}
\name{vanmaps}
\alias{vanmaps}
\alias{vanmaps-package}
\title{vanmaps: A package for producing choropleth maps at the English County
or Ambulance Service level.}
\description{
Contains National Statistics data: Crown copyright and database right 2016;
Contains OS data: Crown copyright and database right 2016
}
|
8d017377611d1f70ab43a571a402fd720fdf2d05
|
653b8ba356ed50f74a442455e409f62976b4464d
|
/modelAnalyzeR/man/exec_alert_counts.Rd
|
78802cc7caef63c44ab4339c7e5e50eedfae917d
|
[
"MIT"
] |
permissive
|
kiran1984/SCOPE-Anomaly-Detection-Case-Study
|
e5bcfaf981b78695f7ebebdfb8b40ed7871244c5
|
21a0bb9e16a200ba1fcf29354c544524cec9a154
|
refs/heads/master
| 2020-06-22T11:09:53.603581
| 2018-06-30T21:53:38
| 2018-06-30T21:53:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 713
|
rd
|
exec_alert_counts.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/case_study_results.R
\name{exec_alert_counts}
\alias{exec_alert_counts}
\title{Exec Alert Counts}
\usage{
exec_alert_counts(data, beginning_dates, middle_dates, end_dates)
}
\arguments{
\item{data}{Dataframe of territory RexT results}
\item{beginning_dates}{Vector of dates for beginning of the month}
\item{middle_dates}{Vector of dates for middle of the month}
\item{end_dates}{Vector of dates for end of the month}
}
\value{
A list containing dataframe of TS alert counts, a graph of the results, and a boxplot of the distribution
}
\description{
Calculate territory RexT alert counts for case study.
}
\author{
Stefanie Molin
}
|
f0d32bdceca30d837bf6fc58b5588b4b291d1cc8
|
f126f954a239b4bc4e6db2ced2957cfc558983a8
|
/reproducibilityChallenge Q4.R
|
e9a046a0288f617b0622b8621caec83bde218baf
|
[] |
no_license
|
dpaukner/Reproducibility-Challenge-QBio5
|
7c6b774a0247098b6c331f3f17deaecd456b92b1
|
6234270ee561095beb60c16d2a98fdc7c36c61d3
|
refs/heads/master
| 2020-07-23T11:57:13.727471
| 2019-09-10T22:05:13
| 2019-09-10T22:05:13
| 207,549,847
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 236
|
r
|
reproducibilityChallenge Q4.R
|
library(tidyverse)
setwd("~/Documents/BSD-QBio5/tutorials/reproducibility/data")
arth <- read.csv("cole_arthropod_data_1946.csv", stringsAsFactors = FALSE)
weev_data <- read.csv("cole_arthropod_data_1946.csv", stringsAsFactors = FALSE)
|
976caf736edcd9b52a093cb3a9221cd1ad6db425
|
82b1c5655856b660c053d18ec7ad94f3aa30a964
|
/tests/testthat/test-function-boxplot_main_extensions.R
|
4d252a878eb6e2580357ec9bd5ce0a663169339b
|
[
"MIT"
] |
permissive
|
KWB-R/kwb.fakin
|
9792dfa732a8dd1aaa8d2634630411119604757f
|
17ab0e6e9a63a03c6cb40ef29ee3899c2b2724a0
|
refs/heads/master
| 2022-06-09T22:25:09.633343
| 2022-06-08T21:24:14
| 2022-06-08T21:24:14
| 136,065,795
| 1
| 0
|
MIT
| 2021-03-15T10:55:17
| 2018-06-04T18:21:30
|
R
|
UTF-8
|
R
| false
| false
| 201
|
r
|
test-function-boxplot_main_extensions.R
|
test_that("boxplot_main_extensions() works", {
f <- kwb.fakin:::boxplot_main_extensions
expect_error(f())
f(data.frame(
extension = c("docx", "xls", "docx"),
size = c(1, 2, 3)
))
})
|
ca3df25ba70299112e51e5474007b6e39cfd025b
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/billboarder/examples/bb_lollipop.Rd.R
|
c892b527ac4dd7d5b3eec4f0136239037f974d80
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,123
|
r
|
bb_lollipop.Rd.R
|
library(billboarder)
### Name: bb_lollipop
### Title: Helper for creating a lollipop chart
### Aliases: bb_lollipop
### ** Examples
# From wikipedia
sw <- data.frame(
film = c("The Force Awakens", "The Phantom Menace",
"Revenge of the Sith", "A New Hope",
"Attack of the Clones", "The Empire Strikes Back",
"Return of the Jedi"
),
worldwide_gross = c(2068178225, 1027044677, 848754768,
775398007, 649398328, 538375067,
475106177)
)
# Simple example
billboarder() %>%
bb_lollipop(data = sw)
# Fancy example
billboarder() %>%
bb_lollipop(data = sw, rotated = TRUE)%>%
bb_y_grid(show = TRUE) %>%
bb_y_axis(tick = list(
values = c(0, 5e+08, 1e+09, 1.5e+09, 2e+09),
outer = FALSE,
format = htmlwidgets::JS("d3.formatPrefix('$,.0', 1e6)")
)) %>%
bb_x_axis(tick = list(centered = TRUE)) %>%
bb_labs(
title = "Star Wars - Total Lifetime Grosses",
caption = "Data source : wikipedia"
)
# With mapping
billboarder(data = sw) %>%
bb_lollipop(mapping = bbaes(x = film, y = worldwide_gross))
|
232e3741a7a4e21aaeb9992149c9378fb840c93e
|
e5c43a31a082bbfec5ebbc20b34d373896721579
|
/R/spartan/spartan_eg_fire_cal_out.R
|
5a6fd85d6c9f37304ff668dae0ea637f75a7416d
|
[] |
no_license
|
geryan/rfst
|
3dde3a499651f3a1ccc736f8c6597c5972f0e17c
|
0aac1f0c3b17096af0c5b0b06e1ad80ac6d709ca
|
refs/heads/master
| 2023-05-02T12:32:51.743467
| 2021-04-27T01:26:47
| 2021-04-27T01:26:47
| 164,573,310
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,974
|
r
|
spartan_eg_fire_cal_out.R
|
source("R/spartan/spartan_settings.R")
library(dplyr)
library(tibble)
library(raster)
library(rgeos)
library(sf)
library(tidyr)
source("R/functions/source.functions.R")
source.functions("R/functions")
ntimesteps <- 14
proj_path <- "/data/gpfs/projects/punim1340/eg_fc_out/"
###
eg_mask <- raster(x = "/data/gpfs/projects/punim1340/eg_fire_calibration/EG19_fc_3a_0/Ecoregion_EG.img") %>%
round.extent
#eg_mask <- raster(x = "junk/Ecoregion_EG.img")
eg_mask[!is.na(eg_mask)] <- 1
eg_mask <- mask(x = eg_mask,
mask = eg_mask,
filename = sprintf(
"%s/eg_mask.grd",
proj_path
),
overwrite = TRUE)
###
dl <- list.dirs(
path = "/data/gpfs/projects/punim1340/eg_fire_calibration/",
recursive = FALSE
)
scn_id <- sub(".*//", "", dl)
fi <- mapply(
FUN = get.landis.fire,
scn_path = dl,
scn_id = scn_id,
MoreArgs = list(
proj_path = proj_path,
out_path = "",
proj_mask = eg_mask,
timesteps = ntimesteps
),
SIMPLIFY = FALSE
)
saveRDS(
object = fi,
file = sprintf(
"%s/fire_sevs.Rds",
proj_path
)
)
# fi <- readRDS(
# file = sprintf(
# "%s/fire_sevs.Rds",
# proj_path
# )
# )
fip <- lapply(
X = fi,
FUN = function(z){
result <- lapply(
X = z,
FUN = function(x){
result <- rasterToPolygons(
x = x,
fun = function(y){y > 0},
dissolve = TRUE
)
if(!is.null(result)){
result <- result %>%
st_as_sf %>%
st_cast(to = "POLYGON")
}
return(result)
}
)
return(result)
}
)
saveRDS(
object = fip,
file = sprintf(
"%s/fire_polys.Rds",
proj_path
)
)
# fip <- readRDS(
# file = sprintf(
# "%s/fire_polys.Rds",
# proj_path
# )
# )
fia <- lapply(
X = fip,
FUN = function(y){
lapply(
X = y,
FUN = function(x){
if(is.null(x)){
NA
}else{
st_area(x)
}
}
)
}
)
saveRDS(
object = fia,
file = sprintf(
"%s/fire_area_list.Rds",
proj_path
)
)
# fia <- readRDS(
# file = sprintf(
# "%s/fire_area_list.Rds",
# proj_path
# )
# )
fiat <- lapply(
X = fia,
FUN = function(x){
rbind(x) %>%
t %>%
as_tibble %>%
mutate(
yr = 1:length(x)
)
}
) %>%
rbind %>%
t %>%
as_tibble %>%
mutate(
rep = names(fia)
) %>%
mutate(
rep = sub(
pattern = ".*/",
replacement = "",
x = rep
)
)
names(fiat) <- c("dat", "rep")
fire_area <- fiat %>%
unnest(dat) %>%
unnest(x) %>%
rename(area_m2 = x) %>%
mutate(area_ha = area_m2/10000) %>%
filter(!is.na(area_ha))
saveRDS(
object = fire_area,
file = sprintf(
"%s/fire_area.Rds",
proj_path
)
)
# fire_area <- readRDS(
# file = sprintf(
# "%s/fire_area.Rds",
# proj_path
# )
# )
|
ce0a19951240132a3952a2a45e657ed4b8b053b7
|
df33630313df583aa2a48614925dc524d982f27b
|
/16S/spatial_analysis/NEON_sampling_effort/NEON_sampling_effort_site.level_16S.r
|
f1f5731c5c2e34817c8771ebc7b4c4591e19b58a
|
[
"MIT"
] |
permissive
|
saracg-forks/NEFI_microbe
|
fa0e28587288059908a6ab144854e352eeae7443
|
e59ddef4aafcefdf0aff61765a8684859daad6e0
|
refs/heads/master
| 2023-03-22T03:21:57.634599
| 2021-03-15T09:13:16
| 2021-03-15T09:13:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,570
|
r
|
NEON_sampling_effort_site.level_16S.r
|
#testing for observation uncertainty at the site level for bacterial groups.
rm(list=ls())
source('paths.r')
source('NEFI_functions/tic_toc.r')
library(doParallel)
library(DirichletReg)
library(RCurl)
script <- getURL("https://raw.githubusercontent.com/colinaverill/NEFI_microbe/master/paths.r", ssl.verifypeer = FALSE)
eval(parse(text = script))
#set output path.----
output.path <- HARV_sampling_effort_analysis_16S.path
#detect and register cores.----
n.cores <- detectCores()
registerDoParallel(cores=n.cores)
#load data - focus on Harvard Forest (HARV) - 50 unique soil cores.----
dat <- readRDS(NEON_16S_phylo_fg_abundances.path)
dat <- as.data.frame(dat$phylum$abundances)
dat$site <- substr(rownames(dat), 1, 4)
all.dat <- dat
sites <- unique(dat$site)
dat <- dat[dat$site == 'HARV',]
dat$site <- NULL
#dat <- dat[,c('other','Ectomycorrhizal','Arbuscular','Saprotroph','Pathogen')]
dat <- (dat + 1)
dat <- dat/rowSums(dat)
#sampling depths and number of trials.
potential.n.samp <- c(3,5,6, 8, 10, 15, 20, 30, 40, 50)
n.trial <- 1000
#run simulation.----
cat(paste0('Running bootstrap simulation for ',n.trial,' iterations across ',length(sites),' sites...\n'))
tic()
super.super.out <- list()
for(k in 1:length(sites)){
dat <- all.dat[all.dat$site == sites[k],]
dat$site <- NULL
# dat <- dat[,c('other','Ectomycorrhizal','Arbuscular','Saprotroph','Pathogen')]
dat <- (dat + 1)
dat <- dat/rowSums(dat)
n.samp <- potential.n.samp[potential.n.samp <= nrow(dat)]
super.out <-
foreach(j = 1:n.trial) %dopar% {
output <- list()
for(i in 1:length(n.samp)){
#sample your dataset.
sample <- data.frame(dat[sample(nrow(dat), size = n.samp[i], replace = F),])
sample$Y <- DR_data(sample[,1:ncol(sample)])
#fit dirichlet interecept.
mod <- DirichReg(Y ~ 1, data = sample)
mult <- sum(mod$fitted.values$alpha[1,])
output[[i]] <- mult
}
output <- unlist(output)
return(output)
}
super.out <- do.call(rbind, super.out)
colnames(super.out) <- n.samp
#expand super.out into 2 column- y and n.samp.
y <- as.vector(super.out)
lab <- list()
for(i in 1:length(n.samp)){
lab[[i]] <- rep(n.samp[i],n.trial)
}
lab <- unlist(lab)
super.out2 <- data.frame(y, lab)
colnames(super.out2) <- c('mu','n.samp')
super.super.out[[k]] <- super.out2
cat(paste0(k,' of ',length(sites),' sites fitted.\n'))
}
names(super.super.out) <- sites
cat('Simulation complete.');toc()
#Save output for downstream analysis.----
saveRDS(super.super.out, output.path)
|
9ea1782f5491fd0baace6893a4fa2c46b061dc8c
|
727e96e85a03cf01d46c132225e171218c8dd1e5
|
/man/assessLakeProfiles.Rd
|
8e3485350d2204d2455a26d1e91d5bd3d81a4b8a
|
[
"MIT"
] |
permissive
|
utah-dwq/irTools
|
6faf9da88514cf72b2166f48074348c00a6f2137
|
da34c77f363a00767563d76112ea87004c3be0d4
|
refs/heads/master
| 2023-08-31T23:58:52.981989
| 2023-08-25T15:31:22
| 2023-08-25T15:31:22
| 147,577,302
| 2
| 0
|
MIT
| 2022-08-30T21:54:40
| 2018-09-05T20:44:04
|
R
|
UTF-8
|
R
| false
| true
| 2,042
|
rd
|
assessLakeProfiles.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/assessLakeProfiles.r
\name{assessLakeProfiles}
\alias{assessLakeProfiles}
\title{Run lake profile assessments}
\usage{
assessLakeProfiles(
data,
do_crit = list(`3A` = 4, `3B` = 3),
temp_crit = list(`3A` = 20, `3B` = 27),
uses_assessed = c("3A", "3B")
)
}
\arguments{
\item{data}{Lake profiles object returned by dataPrep step.}
\item{do_crit}{List of beneficial use classes and associated dissolved oxygen criteria to use for assessment. Defaults to list("3A"=5, "3B"=3). This excludes chronic & ELS present criteria per assessment methods. Sites with site specific criteria will be assessed regardless of criteria specified in this argument. Objects in this list should match the uses_assessed argument.}
\item{temp_crit}{List of beneficial use classes and associated water temperature criteria to use for assessment. Defaults to list("3A"=20, "3B"=27). This excludes chronic & ELS present criteria per assessment methods. Sites with site specific criteria will be assessed regardless of criteria specified in this argument. Objects in this list should match the uses_assessed argument.}
\item{uses_assessed}{Vector of beneficial uses to be assessed for lake profiles. Defaults to 3A & 3B uses.}
}
\value{
Returns a list of lake profile assessment dataframes. profile_asmnts_mlid_param contains site/parameter level profile assessments, profile_asmnts_individual contains assessments for each individual profile,
profile_criteria contains the criteria used for the profile assessment (excluding any site-specific criteria that may have occurred in the input dataset),
profiles_long contains profile data in long format including the numeric criterion associated with each parameter, profiles_wide contains profile data cast to wide format.
}
\description{
Performs lake profile assessments per IR assessment methods. This includes identifying stratified profiles & applying appropriate assessment methods for stratified & non stratified profiles.
}
|
dd839320833576e430819573161a7e0cf5a11e5f
|
05884bd8afb3222aec86c6a2b363e67ed3c64590
|
/toolbox/R/safeRbind.R
|
ddb285f2c51001a354c4bfddf378b5d98f35d9ab
|
[] |
no_license
|
nmarticorena/mineria_datos
|
bcfbea31e6de6f292e4404068b360638ab8a3cbb
|
6e3f22c2fb79fe551a5d8c94136f495638088813
|
refs/heads/master
| 2020-03-09T00:36:28.806062
| 2018-06-14T03:12:35
| 2018-06-14T03:12:35
| 128,492,056
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,041
|
r
|
safeRbind.R
|
#' Safe Rbind
#'
#' Recieve a list of data tables or data frames and combine them into one by columns. It fills the missing columns in a dataframe with \code{NA\'s}
#' @usage safeRbind(dataframes,\cr
#' cols = unique(do.call(c, llply(dataframes, colnames))))
#' @param dataframes a list of dataframes.
#' @param cols the columns to keep after combination, by default it takes all the columns of every dataframe
#' @return Return a unique dataframe made of all dataframes in \code{dataframes}, they appends in order, so the first row
#' belongs to the first dataframe, and the last to the last.
#' @author Martin Vicencio
#' @export
safeRbind = function(dataframes, cols = unique(do.call(c, llply(dataframes, colnames)))) {
safeLibrary(plyr)
safeLibrary(data.table)
dataframes = llply(dataframes, function(x) {
newcols = setdiff(cols, colnames(x))
if (length(newcols) > 0) {
x[, newcols] = NA
}
setcolorder(x, cols)
})
dataframes = do.call(rbind, dataframes)
return(dataframes)
}
|
1135a8fa6c78576e9d253c6dd93b7799a4e00cc4
|
ec4286470afad909542c230c7ad5563e00eca93f
|
/set_vars.R
|
556fafbae73ca0999278fe671d569066339eb90a
|
[] |
no_license
|
ether8unny/hopappmodule
|
20ac85ce9d9c8a907ca5e0d24e8f4f031ade9c61
|
15d5f87fd223a23ab611b56d9875b3200c306a8b
|
refs/heads/master
| 2020-04-03T18:01:19.122072
| 2018-10-30T22:51:29
| 2018-10-30T22:51:29
| 155,468,025
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,188
|
r
|
set_vars.R
|
#function for dummy inventory
make_inv_db <- function(){
load("~/hopappmodule/data/inv_classes")
load("~/hopappmodule/data/prod_list")
load("~/hopappmodule/data/prod_num_list")
inv_qty <- data.table(prod_name = prod_list, prod_num = prod_num_list)
inv_qty[,eval(inv_classes[1]) := as.integer(3)]
inv_qty[,eval(inv_classes[2:length(inv_classes)]) := as.integer(0)]
write.csv(inv_qty,file = "~/hopappmodule/data/inv_qty.csv")
save(inv_qty,file = "~/hopappmodule/data/inv_qty")
return(inv_qty)
}
init_timeline <- function(){
}
make_sales_log <- function(){
salesbydatebynumbers <- as.data.table(read.csv("~/hopappmodule/data/salesbydatebynumbers.csv",header = TRUE, stringsAsFactors = FALSE))
salesbydatebynumbers[,Date:=as.Date(Date,"%m/%d/%Y")]
tmp <- as.data.table(lapply(salesbydatebynumbers, function(x) stri_replace_all_fixed(x,"(","-")))
tmp <- as.data.table(lapply(tmp, function(x) stri_replace_all_fixed(x,")","")))
tmp <- as.data.table(lapply(tmp[,4:ncol(tmp)], function(x) as.numeric(x)))
sales_log <- data.table(salesbydatebynumbers[,1:3],tmp)
rm(tmp,salesbydatebynumbers)
write.csv(sales_log,file = "~/hopappmodule/data/sales_log.csv")
data_con <- antidb()
save(sales_log,file = "~/hopappmodule/data/sales_log")
dbWriteTable(data_con, "sales_log", sales_log, overwrite=TRUE)
dbDisconnect(data_con)
}
load_list <- c("full_clip_prod_dt","inv_qty","periods_by","periods_list","inv_classes","prod_list
","comp_info","comp_col_names","sales_log","comp_info_names","proc_list","tl_events_list",
"tl_range_list")
if(file.mtime("~/hopappmodule/data/comp_info") < file.mtime("~/hopappmodule/data/compinfo2.csv")){
load("~/hopappmodule/data/comp_col_names")
comp_info <- as.data.table((read.csv("~/hopappmodule/data/compinfo2.csv",header = TRUE,stringsAsFactors=FALSE)),keep.rownames = FALSE)
names(comp_info) <- comp_col_names
comp_info[,main_phone:= character()]
comp_info[,contact_name := character()]
comp_info[,contact_phone := character()]#
comp_info[,contact_email := character()]
comp_info[,lon := numeric()]
comp_info[,lat := numeric()]
for(i in 1: nrow(comp_info)){
if(is.na(comp_info[i,lon])){
tmp_df <- geocode(unlist(comp_info[i,3]),override_limit = TRUE,output = "latlon")
if(!is.na(tmp_df$lon)){
comp_info$lon[i] <- tmp_df$lon
comp_info$lat[i] <- tmp_df$lat
}
}
}
write.csv(comp_info,file = "~/hopappmodule/data/comp_info.csv")
save(comp_info,file = "~/hopappmodule/data/comp_info")
print("comp_info file updated from new download file and saved.")
}else{
print("comp_info file up to date.")
}
if(file.mtime("~/hopappmodule/data/sales_log") < file.mtime("~/hopappmodule/data/salesbydatebynumbers.csv")){
salesbydatebynumbers <- as.data.table((read.csv("~/hopappmodule/data/salesbydatebynumbers.csv",header = TRUE,stringsAsFactors=FALSE)),keep.rownames = FALSE)
salesbydatebynumbers[,Date:=as.Date(Date,"%m/%d/%Y")]
tmp <- as.data.table(lapply(salesbydatebynumbers, function(x) stri_replace_all_fixed(x,"(","-")))
tmp <- as.data.table(lapply(tmp, function(x) stri_replace_all_fixed(x,")","")))
tmp <- as.data.table(lapply(tmp[,3:length(salesbydatebynumbers)], function(x) as.numeric(x)))
sales_log <- data.table(salesbydatebynumbers[,1:2],tmp)
rm(salesbydatebynumbers,tmp)
data_con <- antidb()
dbWriteTable(data_con, "sales_log", sales_log,overwrite=TRUE)
dbDisconnect(data_con)
write.csv(sales_log,file = "~/hopappmodule/data/sales_log.csv")
save(sales_log,file = "~/hopappmodule/data/sales_log")
print("sales_log file updated from new download file and saved.")
}else{
print("sales_log file up to date.")
}
for(i in 1:length(load_list)){
# if(exists(load_list[i])){
# print(paste(load_list[i]," already exists. removing.",sep=""))
# rm(list = load_list[i])
# }
if(file.exists(paste("~/hopappmodule/data/",load_list[i],sep=""))){
load(paste("~/hopappmodule/data/",load_list[i],sep=""))
if(exists(load_list[i])){
print(paste(load_list[i]," loaded from file.",sep=""))
}
}
}
#variables
|
dccd0c232aa00025ccf60d7ea0a40d632cdb49dd
|
3c5e3f9a4c5db7b26a0a2cf4c286aa8fd4edd051
|
/man/shiny_rot.Rd
|
fd8012038fb0a5730f0e7a8ee65de30f474c4978
|
[
"MIT"
] |
permissive
|
leahpom/MATH5793POMERANTZ
|
f8f8f639e1f522da200f57cd242173e58d11ab1d
|
b8357d19142f15c9d0d7fa27c03af4a64083a352
|
refs/heads/master
| 2023-04-20T06:21:21.447810
| 2021-05-04T04:37:17
| 2021-05-04T04:37:17
| 335,433,107
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,321
|
rd
|
shiny_rot.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/shiny_rot.R
\name{shiny_rot}
\alias{shiny_rot}
\title{Rotations and Data Plots}
\usage{
shiny_rot()
}
\value{
Table with the first six entries of the uploaded data
A scatterplot that is a ggplot2 object
A second ggplot2 scatterplot
}
\description{
User uploads any multivariate data set with at least two continuous variables and it \emph{must}
be a .csv file. User then specifies which columns of data they are interested in, and a table and two scatterplots
are created. The table is the first six entries of the data set, for user reference. The first scatterplot
allows the user to create a aesthic graph and look at the drop-one correlation. The second allows the user
to rotate the axes by theta (from user input) and gives the correlation corresponding to that theta and the
theta at which the correlation will be zero.
}
\section{shiny}{
The shiny web server will run once this function is invoked and will open a web browser. You should learn how this is implemented.
The web server can be studied \url{https://shiny.rstudio.com/tutorial/}
}
\section{First Plot}{
This scatterplot responds to user input to do the following things:
\enumerate{
\item The user uploads the data set
\item The user selects their variables of interest by inputing the column number - default columns 1 and 2
\item The user sets the color of the scatterplot using hexadecimal color codes - default to black
\item The user sets the size of the points using a slider - default to 2.5
\item The user can update the labels of the axes - default to xLab and yLab
\item The user can click on a point on the graph and it will return the drop-1 correlation (correlation \emph{without} the clicked point)
}
}
\section{Second Plot}{
This scatterplot also uses the uploaded data and variables of interest and responds to user input to do the following things:
\enumerate{
\item The user selects a theta value that is then used to rotate the axes by theta degrees
\item The selected theta value is also used to calculate the sample correlation
\item The user also selects how long the rotated axes are
\item The sample correlation and the first-quadrant solution (calculated using the uniroot() function) are printed
}
}
\examples{
\dontrun{ shiny_rot()}
}
|
75310e7010d5664cf7186a4ce3c7f58eeb74e9aa
|
b3bcb9305cb597b1840e3c09bb76b440ea769ffb
|
/part3.R
|
8bdfd523e25d57c37a1f37e4fae0e0cc267e5144
|
[] |
no_license
|
aejb22122/sel_academy
|
a9b3dc75a2fa0274d954d54ba2a3f6748bc5ff4f
|
4f272d60452e86598bb5fb72b80743536a2a8b9b
|
refs/heads/master
| 2020-07-27T11:47:29.318862
| 2019-09-17T14:47:05
| 2019-09-17T14:47:05
| 209,079,727
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 14,585
|
r
|
part3.R
|
# Author: Annick Eudes JB Research Manager
# Date: June 14, 2018
library(readxl)
my_data <- read_excel("my_data.xlsx")
View(my_data)
# We start by defining the columns we whant to tranform to numeric :
numerizeCol <- c("Q10:1", "Q10:2", "Q10:3", "Q10:4", "Q10:5", "Q10:6", "Q10:7", "Q10:8")
my_data[numerizeCol] <- sapply(my_data[numerizeCol], as.numeric)
# We will be using the mutate() function from the dplyr package to compute the total number of
# SEL Academy sessions attended:
my_data <- mutate(my_data, attendance = `Q10:1`+`Q10:2`+`Q10:3`+`Q10:4`+`Q10:5`+`Q10:6`+`Q10:7`+`Q10:8`)
str(my_data)
attach(my_data)
# Responses by attendance :
barplot(table(attendance), xlab = "Qualified Responses by session attended") # Base R
ggplot(data = my_data, aes(x = attendance)) + geom_bar(position = "dodge") +
# geom_text(aes(label = ..count..), stat = "count", position=position_dodge(0.9),vjust=-0.4)+
labs(x = "Number of SEL Academy sessions attended")+
labs(title = "Qualified Responses")
# Q11 How often have you been able to use any self-awareness practices and tools?
# ggplot(data = my_data) +
# geom_bar(mapping = aes(x = attendance, fill = Q11), position = "dodge")+
# labs(x = "Number of SEL Academy sessions attended") +
# labs(title = "How often have you been able to use any \nself-awareness practices and tools?")
ggplot(data = my_data) +
geom_bar(mapping = aes(x = attendance, fill = Q11),
position = "fill")+
labs(x = "Number of SEL Academy sessions attended") +
labs(y = "freq")+
labs(title = "How often have you been able to use any \nself-awareness practices and tools?")
# Q14 How confident are you in your ability to use self-regulation practices and tools in your work or personal life?
# ggplot(data = my_data) +
# geom_bar(mapping = aes(x = attendance, fill = Q14), position = "dodge") +
# labs(x = "Number of SEL Academy session attended")+
# labs(title = "How confident are you in your ability to use \nself-regulation practices and tools in your work or personal life?")
ggplot(data = my_data) +
geom_bar(mapping = aes(x = attendance, fill = Q14),
position = "fill")+
labs(x = "Number of SEL Academy session attended")+
labs(y = "freq")+
labs(title = "How confident are you in your ability to use \nself-regulation practices and tools in your work or personal life?")
# Q17 How would you describe your social awareness in your personal and work relationships?-Moderately good
# ggplot(data = my_data) +
# geom_bar(mapping = aes(x = attendance, fill = Q17), position = "dodge") +
# labs(x = "Number of SEL academy session attended")+
# labs(title = "How would you describe your \nsocial awareness in your personal and work relationships?")
ggplot(data = my_data) +
geom_bar(mapping = aes(x = attendance, fill = Q17),
position = "fill")+
labs(x = "Number of SEL Academy session attended")+
labs(y = "Freq.")+
labs(title = "How would you describe your \nsocial awareness in your personal and work relationships?")
# Q18 Have you noticed an increase in your ability to be empathetic in your personal life and at work since attending the trainings?-A lot
ggplot(data = my_data) +
geom_bar(mapping = aes(x = attendance, fill = Q18), position = "dodge") +
labs(x = "Number of SEL Academy session attended")+
labs(title = "Have you noticed an increase in your ability to be \nempathetic in your personal life and at work since attending the trainings?")
barplot(table(Q18, attendance),
main = "Have you noticed an increase in your ability to be empathetic \nin your personal life and at work since \nattending the trainings?",
xlab = "SEL Academy session attended")
ggplot(data = my_data) +
geom_bar(mapping = aes(x = attendance, fill = Q18),
position = "fill")+
labs(x = "Number of SEL academy session attended")+
labs(y = "Freq.")+
labs(title = "Have you noticed an increase in your ability to be \nempathetic in your personal life and at work since attending the trainings?")
# Q20 Have you noticed an increase in your ability to effectively build healthy and rewarding relationships in your personal life and at work since attending the trainings?
ggplot(data = my_data) +
geom_bar(mapping = aes(x = attendance, fill = Q20), position = "dodge") +
labs(x = "Number of SEL academy session attended")+
labs(title = "Have you noticed an increase in your ability to effectively build \nhealthy and rewarding relationships in your personal life and at work \nsince attending the trainings?")
ggplot(data = my_data) +
geom_bar(mapping = aes(x = attendance, fill = Q20),
position = "fill")+
labs(x = "Number of SEL Academy session attended")+
labs(y = "Freq.")+
labs(title = "Have you noticed an increase in your ability to effectively build \nhealthy and rewarding relationships in your personal life and at work \nsince attending the trainings?")
# Q24 Have you been able to promote equity in your relationships with coworkers or youth?-A lot
ggplot(data = my_data) +
geom_bar(mapping = aes(x = attendance, fill = Q24), position = "dodge") +
labs(x = "Number of SEL academy session attended")+
labs(title = "Have you been able to promote equity \nin your relationships with coworkers or youth?")
# ---- Cross tables ----
options(digits = 3)
table("Promoting equity" = Q24, "Number of SEL academy session attended" = attendance)/67*100
# ---- Responsive classroom strategies ----
# Q26 Have you been able to apply Responsive Classroom strategies in your program?
ggplot(data = my_data) +
geom_bar(mapping = aes(x = attendance, fill = Q26)) +
labs(x = "Number of SEL Academy session attended")+
labs(title = "Have you been able to apply \nResponsive Classroom strategies in your program?")
ggplot(data = my_data) +
geom_bar(mapping = aes(x = attendance, fill = Q26),
position = "fill")+
labs(x = "Number of SEL Academy session attended")+
labs(title = "Have you been able to apply \nResponsive Classroom strategies in your program?")+
labs(y = "Freq.")
# Plot with the labels in the bars :
ggplot(mtcars,aes(x=factor(cyl),fill=factor(gear)))+
geom_bar(position="fill")+
geom_text(aes(label=scales::percent(..count../sum(..count..))),
stat='count',position=position_fill(vjust=0.5))
# Q28 How confident are you in your ability to implement Afternoon Meetings?
ggplot(data = my_data) +
geom_bar(mapping = aes(x = attendance, fill = Q28)) +
labs(x = "Number of SEL Academy session attended")+
labs(title = "How confident are you in your ability \nto implement Afternoon Meetings?")
ggplot(data = my_data) +
geom_bar(mapping = aes(x = attendance, fill = Q28),
position = "fill")+
labs(x = "Number of SEL Academy session attended")+
labs(title = "How confident are you in your ability \nto implement Afternoon Meetings?") +
labs(y = "Freq.")
# Q30 How confident are you in your ability to implement Second Step activities?
ggplot(data = my_data) +
geom_bar(mapping = aes(x = attendance, fill = Q30)) +
labs(x = "Number of SEL academy session attended")+
labs(title = "How confident are you in your ability to implement Second Step activities?")
ggplot(data = my_data) +
geom_bar(mapping = aes(x = attendance, fill = Q30),
position = "fill")+
labs(x = "Number of SEL Academy session attended")+
labs(title = "How confident are you in your ability to implement Second Step activities?") +
labs(y = "Freq.")
# Q32 How frequently are Afternoon Meetings conducted in your program?
ggplot(data = my_data) +
geom_bar(mapping = aes(x = attendance, fill = Q32)) +
labs(x = "Number of SEL academy session attended")+
labs(title = "How frequently are Afternoon Meetings conducted in your program?")
ggplot(data = my_data) +
geom_bar(mapping = aes(x = attendance, fill = Q32),
position = "fill")+
labs(x = "Number of SEL academy session attended")+
labs(title = "How frequently are Afternoon Meetings conducted in your program?")+
labs(y = "Freq.")
# Q36a How confident are you in your ability to implement SEL tools and practices throughout your program with coworkers and/or youth?
ggplot(data = my_data) +
geom_bar(mapping = aes(x = attendance, fill = Q36a)) +
labs(x = "Number of SEL academy session attended")+
labs(title = "How confident are you in your ability to implement \nSEL tools and practices \nthroughout your program with coworkers and/or youth?") + coord_flip()
# Q36b How confident are you in your ability to implement SEL tools and practices throughout your program with coworkers and/or youth?-Group Agreements
ggplot(data = my_data) +
geom_bar(mapping = aes(x = attendance, fill = Q36b)) +
labs(x = "Number of SEL academy session attended")+
labs(title = "How confident are you in your ability to implement \n'Group Agreements'")
# Q36c How confident are you in your ability to implement SEL tools and practices throughout your program with coworkers and/or youth?-Welcome Ritual
ggplot(data = my_data) +
geom_bar(mapping = aes(x = attendance, fill = Q36c)) +
labs(x = "Number of SEL academy session attended")+
labs(title = "How confident are you in your ability to implement \n'Welcome Ritual'")
# Q36d -Transition Pauses/Activities
ggplot(data = my_data) +
geom_bar(mapping = aes(x = attendance, fill = Q36d)) +
labs(x = "Number of SEL academy session attended")+
labs(title = "How confident are you in your ability to implement \n'Transition Pauses/Activities'")
# Q36e -Attention Cues
ggplot(data = my_data) +
geom_bar(mapping = aes(x = attendance, fill = Q36e)) +
labs(x = "Number of SEL academy session attended")+
labs(title = "How confident are you in your ability to implement \n'Attention Cues'")
# Q36f -Brain Breaks
ggplot(data = my_data) +
geom_bar(mapping = aes(x = attendance, fill = Q36f)) +
labs(x = "Number of SEL academy session attended")+
labs(title = "How confident are you in your ability to implement \n'Brain Breaks' \nthroughout your program with coworkers and/or youth?")
# Q36g -Optimistic Closure
ggplot(data = my_data) +
geom_bar(mapping = aes(x = attendance, fill = Q36g)) +
labs(x = "Number of SEL academy session attended")+
labs(title = "How confident are you in your ability to implement \n'Optimistic Closure' \nthroughout your program with coworkers and/or youth?")
# ---- Continue to script "part4.R" ----
# # ---- Attendance and SEL competencies ----
# # Colors
# library(RColorBrewer)
# display.brewer.all()
#
#
# spectral <- brewer.pal(5, "Spectral")
# set2 <- brewer.pal(8, "Set2")
# blues <- brewer.pal(6, "Blues")
#
# # Box plots by groups (sel self-awareness practices)
# #boxplot(attendance ~ Q11, data = df2, frame = FALSE, horizontal = F, notch = F,
# # names = c("Not at all", "A little", "Somewhat often", "Very often"),
# # border = "steelblue",
# # xlab = "'How often have you been able to use any self-awareness practices and tools?'",
# # ylab = "SEL academy session attended")
#
# # Barplots
# # Q11 How often have you been able to use any self-awareness practices and tools?
# #barplot(table(attendance, Q11))
# barplot(table(Q11, attendance),
# #legend.text = c("Not at all", "A little", "Somewhat often", "Very often"),
# border = "gray",
# col = blues,
# #col = c("red", "pink", "lightcyan","blue"),
# main = "How often have you been able to use any self-awareness practices and tools?",
# xlab = "SEL academy session attended"
# )
# legend("topleft",
# c("Not at all", "A little", "Somewhat often", "Very often"),
# fill = blues
# #c("red", "pink", "lightcyan","blue")
# )
#
# # Q14 How confident are you in your ability to use self-regulation practices and tools in your work or personal life?
# tq14 <- table(Q14)
# ggplot(data = my_data) +
# geom_bar(mapping = aes(x = attendance, fill = Q14)) +
# labs(x = "Number of SEL academy session attended")+
# labs(title = "How confident are you in your ability to use self-regulation practices and tools in your work or personal life?")
#
# #
# # barplot(table(Q14, attendance),
# # #legend.text = c("Not at all", "A little", "Somewhat often", "Very often"),
# # border = "gray",
# # col = set2,
# # #col = c("red", "pink", "lightcyan","blue"),
# # main = "How confident are you in your ability to use self-regulation practices and tools in your work or personal life?",
# # xlab = "SEL academy session attended"
# # )
# # legend("topleft",
# # #c("Not applicable", "I don't know", "Not confident at all", "Slightly confident",
# # # "Somewhat confident", "Confident", "Very confident"),
# # fill = set2
# # #c("red", "pink", "lightcyan","blue")
# # )
# #
# # # Q17 How would you describe your social awareness in your personal and work relationships?-Moderately good
# #
# # set1 <- brewer.pal(4, "Set1")
# # barplot(table(Q17, attendance),
# # border = "gray",
# # col = set1,
# # main = "How would you describe your social awareness in your personal and work relationships?",
# # xlab = "SEL academy session attended"
# # )
# # legend("topleft",
# # c("Very good", "Moderately good", "Not very good","I don't know"),
# # fill = set1
# # #c("red", "pink", "lightcyan","blue")
# # )
# #
# #
# # # Q17 How would you describe your social awareness in your personal and work relationships?-Moderately good
# #
# #
#
#
|
57ed2489e9311bd1f7ff90f9329fd2fafbf879a3
|
02cb1a96eb55e557c624acc55dbedba51ad13851
|
/man/table_selectBy_Status.Rd
|
f2fe4947affd3a103b57eb3e294d01ba22ae6b2f
|
[] |
no_license
|
takewiki/mdmpkg
|
ee7beecf4d2f5df37c7f0a43d42d2130861a3ae7
|
65b86a2d823f9f803adf935ac0f38cccfa571f79
|
refs/heads/main
| 2023-05-09T22:01:06.222887
| 2021-06-08T09:11:21
| 2021-06-08T09:11:21
| 305,067,122
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 513
|
rd
|
table_selectBy_Status.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/util.R
\name{table_selectBy_Status}
\alias{table_selectBy_Status}
\title{ๆ็ถๆๆฅ่ฏข่กจ}
\usage{
table_selectBy_Status(
config = cfg_edge,
table_name = "t_icitem_view",
FStatus = "FIsDo",
FValue = 0
)
}
\arguments{
\item{config}{้
็ฝฎๆไปถ}
\item{table_name}{่กจๅ}
\item{FStatus}{็ถๆๅญๆฎตๅ}
\item{FValue}{็ถๆๅผ}
}
\value{
่ฟๅๅผ
}
\description{
ๆ็ถๆๆฅ่ฏข่กจ
}
\examples{
table_selectBy_Status()
}
|
d27de4746538f3d5184f5beb7cdbf57861e0b966
|
72d9009d19e92b721d5cc0e8f8045e1145921130
|
/evolqg/man/TestModularity.Rd
|
d00910145917a3a38574cd3ba1165fe49e67d64a
|
[] |
no_license
|
akhikolla/TestedPackages-NoIssues
|
be46c49c0836b3f0cf60e247087089868adf7a62
|
eb8d498cc132def615c090941bc172e17fdce267
|
refs/heads/master
| 2023-03-01T09:10:17.227119
| 2021-01-25T19:44:44
| 2021-01-25T19:44:44
| 332,027,727
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,210
|
rd
|
TestModularity.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/TestModularity.R
\name{TestModularity}
\alias{TestModularity}
\title{Test modularity hypothesis}
\usage{
TestModularity(
cor.matrix,
modularity.hypot,
permutations = 1000,
MHI = FALSE,
...,
landmark.dim = NULL,
withinLandmark = FALSE
)
}
\arguments{
\item{cor.matrix}{Correlation matrix}
\item{modularity.hypot}{Matrix of hypothesis. Each line represents a trait and each column a module.
if modularity.hypot[i,j] == 1, trait i is in module j.}
\item{permutations}{Number of permutations, to be passed to MantelModTest}
\item{MHI}{Indicates if test should use Modularity Hypothesis Index instead of AVG Ratio}
\item{...}{aditional arguments passed to MantelModTest}
\item{landmark.dim}{Used if permutations should be performed mantaining landmark structure in geometric morphomotric data. Either 2 for 2d data or 3 for 3d data. Default is NULL for non geometric morphomotric data.}
\item{withinLandmark}{Logical. If TRUE within-landmark correlations are used in the calculation of matrix correlation. Only used if landmark.dim is passed, default is FALSE.}
}
\value{
Returns mantel correlation and associated probability for each modularity hypothesis, along with AVG+, AVG-, AVG Ratio for each module.
A total hypothesis combining all hypotesis is also tested.
}
\description{
Tests modularity hypothesis using cor.matrix matrix and trait groupings
}
\examples{
cor.matrix <- RandomMatrix(10)
rand.hypots <- matrix(sample(c(1, 0), 30, replace=TRUE), 10, 3)
mod.test <- TestModularity(cor.matrix, rand.hypots)
cov.matrix <- RandomMatrix(10, 1, 1, 10)
cov.mod.test <- TestModularity(cov.matrix, rand.hypots, MHI = TRUE)
nosize.cov.mod.test <- TestModularity(RemoveSize(cov.matrix), rand.hypots, MHI = TRUE)
}
\references{
Porto, Arthur, Felipe B. Oliveira, Leila T. Shirai, Valderes Conto, and Gabriel Marroig. 2009. "The Evolution of Modularity in the Mammalian Skull I: Morphological Integration Patterns and Magnitudes." Evolutionary Biology 36 (1): 118-35. doi:10.1007/s11692-008-9038-3.
}
\seealso{
\code{\link{MantelModTest}}
}
\author{
Diogo Melo, Guilherme Garcia
}
\keyword{mantel}
\keyword{modularity}
|
315d8e6df9e27f865a26de6e84f52a3ca740ad60
|
c1afead7b902e231afb0939f80ab3e26e434ab8c
|
/fourier.R
|
dcb13f9fab55d4daf88090e77252568aa960ce60
|
[
"MIT"
] |
permissive
|
SantoroPablo/economic_time_series
|
23af86f1eca94995935347e57b64836599cb353e
|
cef67f1273f2c39b642401edc5c581a8ede9f05c
|
refs/heads/master
| 2020-04-11T20:01:44.619462
| 2018-12-11T19:58:22
| 2018-12-11T19:58:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,097
|
r
|
fourier.R
|
#### Librerias ####
library(tidyverse)
library(lubridate)
set.seed(1)
#### Datos ####
gold = read_csv("data/GOLD_1791-2018.csv",skip = 3)
interest_rate = read_csv("data/INTERESTRATE_1857-2018.csv",skip = 1)
cpi = read_csv("data/USCPI_1774-2018.csv", skip=4)
gdp = read_csv("data/USGDP_1790-2018.csv", skip=2)
wage = read_csv("data/USWAGE_1774-2018.csv", skip=3)
#### Funciones ####
# Corrijo los encabezados de los csv, que no quedan bien en R. Paso los nombres de los encabezados originales a labels de las columnas (metadata)
change_header = function(tabla, nuevos_enc) {
if (is.data.frame(tabla)) {
for (i in names(tabla)) {
attr(tabla[[i]], "label") = i
}
names(tabla) = nuevos_enc
} else {
stop("Se tiene que pasar un data frame o similar objeto al campo tabla")
}
return(tabla)
}
calculo_r2 = function(datos, prediccion) {
1 - (sum((datos - prediccion) ^ 2, na.rm = TRUE) / sum((datos - mean(datos)) ^ 2, na.rm = TRUE))
}
# Programa
cpi = change_header(cpi, c("year", "cpi"))
gold = change_header(gold, c("year", "value"))
interest_rate = change_header(interest_rate, c("year", "short_term_ord", "short_term_surp", "long_term"))
gdp = change_header(gdp, c("year", "nominal", "real_2012_base", "gdp_deflator", "pop", "nominal_per_cap", "real_per_cap_2012_base"))
wage = change_header(wage, c("year", "cost_unsk", "prod_work_hourly_comp"))
#### Tasa de interes de largo plazo de los EE UU ####
# Visualizacion de la serie original
plot(interest_rate$long_term, type = "l", ylim = c(-3, 14), ylab = "Tasa de interes")
abline(h = mean(interest_rate$long_term), lty = 3)
interest_rate$long_term_cntr = interest_rate$long_term - mean(interest_rate$long_term)
fft_ir = fft(interest_rate$long_term_cntr)
# Visualizacion de la transformada de Fourier de la serie original
plot(Mod(fft_ir), type = "l", ylab = "Modulo", xlab = "Frecuencias")
plot(interest_rate$long_term_cntr, type = "l")
abline(h = 0, lty = 3)
# Frecuencias explicativas de la tasa de interes de largo plazo
ir_fft_freqs_explicativas6 = fft_ir * c(0, rep(1, 6), rep(0, length(fft_ir) - 13), rep(1, 6))
ir_antifft6 = Re(fft(ir_fft_freqs_explicativas6, inverse = TRUE) / nrow(interest_rate))
lines(ir_antifft6, col = "blue", type = "l") # En el รบltimo extremo parece que ajusta raro.
ir_rsq_antifft6 = calculo_r2(na.omit(interest_rate$long_term_cntr), ir_antifft6)
ir_rsq_antifft6
# Con las primeras 6 frecuencias logro un coeficiente de determinacion en el orden del 91.14%
ir_fft_freqs_explicativas4 = fft_ir * c(0, rep(1, 4), rep(0, length(fft_ir) - 9), rep(1, 4))
ir_antifft4 = Re(fft(ir_fft_freqs_explicativas4, inverse = TRUE) / nrow(interest_rate))
lines(ir_antifft4, col = "red", type = "l") # En el รบltimo extremo parece que ajusta raro.
# Con las primeras 4 frecuencias tambien se encuentra un R cuadrado aceptable.
ir_rsq_antifft4 = calculo_r2(na.omit(interest_rate$long_term_cntr), ir_antifft4)
ir_rsq_antifft4
# Residuos explicativos usando las 4 primeras frecuencias
ir_resid =
|
8dcb4ac54b4762e8b7639f05728a994556935cdd
|
8527c725baba37bc75031f40093183462db7073f
|
/scripts/create_templates.R
|
cbbd33782b578ee6f993bddc5847731dadcae9ad
|
[
"MIT"
] |
permissive
|
davidski/evaluator
|
776aafae717d68b017e2db8ee3fef8fc98342e66
|
27d475bb06ecda7ba11feef3e219519f2d6ce404
|
refs/heads/main
| 2023-01-20T20:15:17.259065
| 2022-01-25T04:55:13
| 2022-01-25T04:55:19
| 57,260,994
| 142
| 47
|
NOASSERTION
| 2022-12-21T05:01:15
| 2016-04-28T01:36:16
|
R
|
UTF-8
|
R
| false
| false
| 154
|
r
|
create_templates.R
|
#!/usr/local/bin/r
## initialize templates
message("Creating templates...")
library(evaluator)
create_templates("/data")
message("Templates created.")
|
522b0836ada79d890967068b5e74f2eb2e8f7f08
|
c6eca3d4330fa1560ada90c5dbb83264e7f04595
|
/man/consensus.Rd
|
e4e9659502846b800ac4035a313db8a4d4ff2d57
|
[] |
no_license
|
richelbilderbeek/BALCONY
|
7248a94ddfd7588a2822f33613f070efdc1a3667
|
7d26159aa93d1f56427b88e26fb5448b82246ce3
|
refs/heads/master
| 2022-12-31T18:55:02.309956
| 2020-10-26T09:57:29
| 2020-10-26T09:57:29
| 307,326,223
| 0
| 1
| null | 2020-10-26T09:48:50
| 2020-10-26T09:48:50
| null |
UTF-8
|
R
| false
| true
| 1,239
|
rd
|
consensus.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/conservationFuncs.R
\name{consensus}
\alias{consensus}
\title{Determine consensus sequence}
\usage{
consensus(alignment, threshold)
}
\arguments{
\item{alignment}{output of of \code{\link[seqinr]{read.alignment}} function or grouped alignment created with: \code{\link{align_seq_mtx2grs}} and \code{\link{alignment2matrix}}}
\item{threshold}{minimal fraction of amino acids on the certain position in all sequences of the alignment to be taken for consensus letter on this position; number in range 0-100.}
}
\value{
A character vector of length of the aligned sequence containing consesus sequence based on the input alignment
}
\description{
Function calculates consensus sequence for given alignment with a threshold of user's choice.
}
\details{
If maximum fraction of any amino acid on the certain position is lower than a threshold then "*" is printed instead.
}
\note{
Please note that this function masks the seqinr package function \code{\link[seqinr]{consensus}}
}
\examples{
data("alignment")
alignment = delete_isoforms(alignment)
threshold=80 # Set the consensus threshold
consensus_sequence=consensus(alignment, threshold)
}
\keyword{consensus}
|
e087a2b5435a112b37c250d2bf66743fa0e73979
|
879f8cc2e4662c68c2b1c0043237463c78c5ec5f
|
/package_management/load_package_locally.R
|
e77c7560b06a0303f27a214711f3d521c30f87d5
|
[] |
no_license
|
ClaraMarquardt/huhn
|
f063962e8ce578c2b640530de83b02268069d0f7
|
23a71e44f5e5ec62ba4b7acde8c738ab1c47559f
|
refs/heads/master
| 2021-08-23T22:11:35.977202
| 2017-12-06T20:10:52
| 2017-12-06T20:10:52
| 101,329,780
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,336
|
r
|
load_package_locally.R
|
#----------------------------------------------------------------------------#
# Purpose: Load scripts locally
# Author: Clara Marquardt
# Date: 2017
#----------------------------------------------------------------------------#
#----------------------------------------------------------------------------#
# CONTROL #
#----------------------------------------------------------------------------#
print(sprintf("package_path: %s", package_path))
print(sprintf("package_name: %s", package_name))
# dependencies
# paths
setwd(package_path)
#----------------------------------------------------------------------------#
# CODE #
#----------------------------------------------------------------------------#
# load scripts locally
#--------------------------------------#
for (x in list.files(paste0(package_path, package_name, "/R")) {
print(sprintf("source: %s", x))
source(paste0(package_path, package_name, "/R/", x))
}
#----------------------------------------------------------------------------#
# END #
#----------------------------------------------------------------------------#
|
ab72f16b7abb2d3b4b453bed22811ccc14c12253
|
f3405c30760de1e2e72b397e847921f8b4d96803
|
/src/process.r
|
1abafcc6dee62b3497e545f469bd32e2db6e3419
|
[] |
no_license
|
humodz-trabalhos-icmc/nlp.2018.1
|
84421e9976fd7d0e61224781c45c6d098f6e8263
|
ce8ffc9bfe5d5db53ceaee1834ba3dd840c7cfaa
|
refs/heads/master
| 2020-03-20T22:57:25.667318
| 2018-07-05T00:55:19
| 2018-07-05T00:55:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,893
|
r
|
process.r
|
# process.r INPUT_DIR OUTPUT_DIR
# Reads udpipe annotated tables from INPUT_DIR and writes processed text to OUTPUT_DIR
keywords = c(
"indicado", "indicada", "recomendado", "recomendada",
"utilizado", "utilizada", "destinado", "destinada")
# Converts IDs of the form "A-B" to A
fix.token.id <- function(token_id) {
as.str = as.character(token_id)
str.list = strsplit(as.str, '-')
fixed.id = as.numeric(str.list[[1]][1])
return (fixed.id)
}
# Usage example:
# sentence = c('comi', 'no', 'em', 'o', 'bar', 'do', 'de', 'o', 'zรฉ')
# token.ids = c('1', '2-3', '2', '3', '4', '5-6', '5', '6', '7')
# result = remove.expanded.contractions(sentence, token.ids)
# remove.expanded.contractions <- function(words, ids) {
# contraction.ids = as.character(ids[grep('-', ids, fixed=TRUE)])
# expanded.ids = unlist(strsplit(contraction.ids, '-', fixed=TRUE))
# unwanted.indices = match(expanded.ids, ids)
# return(words[-unwanted.indices])
# }
remove.expanded.contractions <- function(text) {
unwanted = c(
' de a ', ' de o ', ' de as ', ' de os ',
' em a ', ' em o ', ' em as ', ' em os ',
' a a ', ' a o ', ' a as ', ' a os ')
for(str in unwanted) {
text = gsub(str, ' ', text)
}
return (text)
}
process.annotated.table <- function(udpipe.table) {
bula = udpipe.table
token_id = sapply(bula$token_id, fix.token.id)
condition = which(bula$token %in% keywords)[1]
select = bula[condition,]
select_token_id = token_id[condition]
wanted.words = (bula$doc_id == select$doc_id &
bula$sentence_id == select$sentence_id &
token_id > select_token_id)
ids = bula[wanted.words, "token_id"]
words = bula[wanted.words, "token"]
# words = remove.expanded.contractions(words, ids)
para.que = paste(words, collapse=' ')
para.que = remove.expanded.contractions(para.que)
return (para.que)
}
process.dir <- function(from_dir, to_dir) {
dir.create(to_dir, showWarnings=FALSE, recursive=TRUE)
fnames = list.files(from_dir)
for(fname in fnames) {
no_extension = tools::file_path_sans_ext(fname)
fin = paste0(from_dir, '/', fname)
fout = paste0(to_dir, '/', no_extension, '.txt')
udpipe.table = read.csv(fin, sep=' ', encoding='utf-8')
processed.text = process.annotated.table(udpipe.table)
fileConn = file(fout, encoding='utf-8')
writeLines(processed.text, fileConn)
close(fileConn)
}
}
process.r.main <- function(args) {
if(length(args) != 2) {
cat('Usage:\n')
cat(' process.r INPUT_DIR OUTPUT_DIR\n')
return()
}
process.dir(from_dir=args[1], to_dir=args[2])
}
source('src/annotate.r')
annotate.r.main(c('data/filtered', 'data/annotated'))
process.r.main(c('data/annotated', 'data/final'))
|
0a53675569cac1d400ef96d7907d4e18a89b803b
|
07993f771b8a0cd6a83f3e7d1ea9c58cc17d602f
|
/Archive/Accessing CCII netCDF files.R
|
b5d65535bd5166ae1a30033e43eed60e89547ecb
|
[] |
no_license
|
SallyFreanOwen/insurance-and-climate
|
817be05881f9bd1fcb3df1bc94ebff0a11620187
|
fff9575347fe5a0d72f921f913949eebe6d4ea76
|
refs/heads/master
| 2021-07-08T22:21:18.497805
| 2020-07-28T03:29:35
| 2020-07-28T03:29:35
| 150,044,302
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,525
|
r
|
Accessing CCII netCDF files.R
|
### Beginning to think about extracting data from a netCDF using R
# Sally Owen
# Public Insurance Project
# November 2017
# Using the ncdf4 package:
install.packages("ncdf4")
install.packages("ncdf.tools")
install.packages("ncdf.tools")
install.packages("RNetCDF")
install.packages("chron")
install.packages("RColorBrewer")
install.packages("lattice")
install.packages("reshape2")
install.packages("dplyr")
install.packages("raster") # package for raster manipulation
install.packages("rgdal") # package for geospatial analysis
install.packages("ggplot2")
install.packages("RSAGA")
install.packages("GISTools")
library(ncdf.tools)
library(ncdf4)
library(ncdf4.helpers)
library(RNetCDF)
#Loading other helpful things:
library(chron)
library(RColorBrewer)
library(lattice)
library(reshape2)
library(dplyr)
# More helpful things
library(raster) # package for raster manipulation
library(rgdal) # package for geospatial analysis
library(RSAGA)
library(ggplot2) # package for plotting
library(GISTools)
# Setting working directory
#setwd("/Downloads")
# Reading in a first file:
raindata <- nc_open("Mon_TotalPrecipCorr_VCSN_BCC-CSM1.1_1971-2005_RCPpast.nc", write=FALSE, readunlim=TRUE, verbose=FALSE, auto_GMT=TRUE, suppress_dimvals=FALSE)
#raindata <- nc_open("Mon_TotalPrecipCorr_VCSN_BCC-CSM1.1_2006-2120_RCP8.5.nc", write=FALSE, readunlim=TRUE, verbose=FALSE, auto_GMT=TRUE, suppress_dimvals=FALSE)
print(raindata)
print(paste("The file has",raindata$nvars,"variables"))
rain <- raindata$var[[3]] # naming the "rain" variable, which has three dimension (lat long time)
lon <- ncvar_get(raindata, "longitude")
lat <- ncvar_get(raindata, "latitude")
t <- ncvar_get(raindata, "time")
rain.array <- ncvar_get(raindata, "rain") # store the data in a 3-dimensional array
dim(rain.array)
fillvalue <- ncatt_get(raindata, "rain", "_FillValue")
fillvalue
nc_close(raindata)
# Tidying:
rain.array[rain.array == fillvalue$value] <- NA # replacing fill value of -9999 for NA
# Pulling out one timeslice (or trying to:)
rain.tslice <- rain.array[, , 1]
dim(rain.tslice)
# Saving as a raster
r <- raster(t(rain.tslice), xmn=min(lon), xmx=max(lon), ymn=min(lat), ymx=max(lat), crs=CRS("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs+ towgs84=0,0,0"))
plot(r)
# Exporting raster as ascii
write.ascii.grid(rain.tslice, "raster", header = NULL, write.header = TRUE)
# Exporting as Polygon attempt
rainpolygons <- rasterToPolygons(r)
writeOGR(obj=rainpolygons, dsn="tempdir", layer="TotalPrecip", driver="ESRI Shapefile") # this is in geographical projection
varsize <- rain$varsize
ndims <- rain$ndims
nt <- varsize[ndims] # Remember timelike dim is always the LAST dimension!
library(arrayhelpers)
test<-array2df(rain.array,matrix=TRUE)
for( i in 1:nt ) {
# Initialize start and count to read one timestep of the variable.
start <- rep(1,ndims) # begin with start=(1,1,1,...,1)
start[ndims] <- i # change to start=(1,1,1,...,i) to read timestep i
count <- varsize # begin w/count=(nx,ny,nz,...,nt), reads entire var
count[ndims] <- 1 # change to count=(nx,ny,nz,...,1) to read 1 tstep
data <- ncvar_get( raindata, rain, start=start, count=count )
# Now read in the value of the timelike dimension
timeval <- ncvar_get( raindata, rain$dim[[ndims]]$name, start=i, count=1 )
#print(paste("Data for variable",rain$name,"at timestep",i,"(time value=",timeval,rain$dim[[ndims]]$units,"):"))
#print(data)
}
|
d2ea1331672b45f3022b557ec88ef886d5cc547d
|
15e71e39f2ddf6ad7fae8e749419142573852104
|
/R/binarizer.R
|
0baa736db422db68d9930df6eb6125712c6d36e5
|
[] |
no_license
|
LudvigOlsen/LRO.utilities
|
a61eb0239f2fac91818cfd9f4a639fa2df16bd5d
|
b21e5d8f1491807b85a1522db149c5b4e14476b8
|
refs/heads/master
| 2021-01-22T18:37:50.827997
| 2020-11-05T21:57:31
| 2020-11-05T21:57:31
| 85,097,753
| 1
| 2
| null | 2017-05-14T21:33:31
| 2017-03-15T16:47:56
|
R
|
UTF-8
|
R
| false
| false
| 2,926
|
r
|
binarizer.R
|
# Binarizer
#
#' @title Binarize multiple columns at once
#' @description Binarize multiple columns of a dataframe based on a given threshold.
#'
#' \strong{binarizer} is designed to work with \link[magrittr]{\%>\%} pipelines.
#'
#' \strong{binarizer_} is a standard evalution version.
#' @param data Dataframe, tbl, vector
#' @param ...,cols Variables to include/exclude.
#'
#' ... :
#' You can use same specifications as in dplyr's \link[dplyr]{select}.
#'
#' cols :
#' character vector
#'
#' If missing, defaults to all non-grouping variables.
#' @param thresh Threshold (Numeric).
#' @return Tibble where selected columns have been binarized.
#'
#' Above thresh is 1; below or equal to thresh is 0.
#' @details Binarizes each specified column and converts to tibble.
#' @aliases binarizer_
#' @export
#' @examples
#' # Attach package
#' library(LRO.utilities)
#'
#' # Create dataframe
#' df <- data.frame('a' = c(1,2,3,4,5,6,7),
#' 'b' = c(2,3,4,5,6,7,8))
#'
#' # First center both columns
#' centered_df <- scaler(df, scale = FALSE)
#'
#' # Binarizing multiple columns
#' binarizer(centered_df)
#' binarizer(centered_df, a, b)
#' binarizer(centered_df, 1:2)
#' binarizer(centered_df, c(a,b))
#'
#' # Binarize 'a'
#' binarizer(centered_df, a)
#'
#' # Binarize all but 'a'
#' binarizer(centered_df, -a)
#'
#' ## Standard evalutation versions
#'
#' binarizer_(centered_df, cols = c('b'))
#'
#' @importFrom dplyr '%>%'
binarizer <- function(data, ..., thresh = 0){
# If data is a vector
# Convert to tibble
# with name of passed object
# or "x" is object has no name
if (is.vector(data)){
# Get name of passed object
# If it is c(...) it will be set to "x"
# in the convert_and_... function
vector_name <- deparse(substitute(data))
data <- convert_and_name_vector(data, vector_name)
}
# Get columns from dots
cols <- get_dots_cols(data, ...)
binarizer_(data = data,
cols = cols,
thresh = thresh)
}
#' @rdname binarizer
#' @export
binarizer_ <- function(data, cols = NULL, thresh = 0){
# If data is a vector
# Convert to tibble
# with name of passed object
# or "x" is object has no name
if (is.vector(data)){
# Get name of passed object
# If it is c(...) it will be set to "x"
# in the convert_and_... function
vector_name <- deparse(substitute(data))
data <- convert_and_name_vector(data, vector_name)
}
if (is.null(cols)){
cols <- colnames(data)
}
# If data is a vector with no name
# convert to tibble and set colname
# and cols to "x"
if (is.null(cols) && is.vector(data)){
data <- tibble::tibble(x = data)
cols <- "x"
}
data %>%
tibble::as_tibble() %>%
dplyr::mutate_each_(
dplyr::funs(binarize_single(., thresh = thresh)),
vars = cols)
}
binarize_single <- function(col, thresh = 0){
ifelse(col > thresh,1,0)
}
|
8533e09b651dc3e6c451ba605d29a3bdf392d9c9
|
460a928062977465774d982516ecb2b6fa5997bf
|
/man/slider-package.Rd
|
8d11242ae9e32c92370277c44230f9c837542815
|
[
"MIT"
] |
permissive
|
erhard1/slider
|
ba73550842db4cadbf1248d7adcf86204273f3eb
|
bb0501f00918bb52f326a54434600a29cdafd190
|
refs/heads/master
| 2023-06-13T07:33:28.330909
| 2021-07-01T19:49:44
| 2021-07-01T19:49:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 812
|
rd
|
slider-package.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/slider-package.R
\docType{package}
\name{slider-package}
\alias{slider-package}
\alias{_PACKAGE}
\title{slider: Sliding Window Functions}
\description{
Provides type-stable rolling window functions over any R data
type. Cumulative and expanding windows are also supported. For more
advanced usage, an index can be used as a secondary vector that
defines how sliding windows are to be created.
}
\seealso{
Useful links:
\itemize{
\item \url{https://github.com/DavisVaughan/slider}
\item Report bugs at \url{https://github.com/DavisVaughan/slider/issues}
}
}
\author{
\strong{Maintainer}: Davis Vaughan \email{davis@rstudio.com}
Other contributors:
\itemize{
\item RStudio [copyright holder]
}
}
\keyword{internal}
|
c255aba50d11729733c2f8b1aade268aece974e3
|
4b258f2282aa412dedaa5b5d89d33a77c2bdf430
|
/man/summary.intELtest.Rd
|
50249f052d464dc16da6a478f281735bad667344
|
[] |
no_license
|
news11/survELtest
|
6ada624ef47bc513230398c10683be0c68cb0c24
|
578335a28dcfcee04c24a44dd031d025422afc80
|
refs/heads/master
| 2020-09-20T02:37:17.001049
| 2020-01-16T03:32:50
| 2020-01-16T03:32:50
| 224,186,255
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,748
|
rd
|
summary.intELtest.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/summary.intELtest.R
\name{summary.intELtest}
\alias{summary.intELtest}
\title{Summary function for intELtest object}
\usage{
\method{summary}{intELtest}(object, digits = max(3L, getOption("digits") - 3L), quiet = FALSE, ...)
}
\arguments{
\item{object}{the result of a call to the \code{intELtest} function}
\item{digits}{significant digits to print, the default value is \code{max(3L, getOption("digits") - 3L)}}
\item{quiet}{a logical indicating whether to reduce the amount of output or not, the default value is \code{FALSE}}
\item{...}{for future method}
}
\value{
\code{summary.intELtest} returns a list with following components:
\itemize{
\item \code{call} the statement used to create the \code{intELtest} object
\item \code{teststat} the resulting integrated EL statistics
\item \code{critval} the critical value based on bootstrap
\item \code{pvalue} the p-value of the test
\item \code{sided} the value of the input argument of intELtest
\item \code{alpha} the value of the input argument of intELtest
}
}
\description{
Returns a list containing the integrated EL statistics, the critical value based on bootstrap,
and the p-value of the test.
}
\examples{
library(survELtest)
result = intELtest(survival::Surv(hepatitis$time, hepatitis$censor) ~ hepatitis$group)
summary(result)
## OUTPUT:
## Call:
## intELtest(formula = survival::Surv(hepatitis$time, hepatitis$censor) ~
## hepatitis$group)
##
## Two-sided integrated EL test statistic = 1.42, p = 0.007,
## critical value based on bootstrap = 0.875 at a significance level of 0.05
}
\seealso{
\code{\link{hepatitis}}, \code{\link{intELtest}}, \code{\link{print.intELtest}}
}
|
f6d37bda62f3728fa10573dae4e163f609cdb88d
|
7eacb63dc1dad1483f8ca1bafd317f256f5ad860
|
/Carpentry2017.R
|
a09026ce57261025becf1bb2f84dd77d33109ac7
|
[] |
no_license
|
espinozav/Carpentry2017
|
4f6f8a80348a34a5f00f9054be870e34b7cee165
|
125dc8a5d72edb51fffa25b67bfb83e2c6c3100a
|
refs/heads/master
| 2021-01-22T17:39:27.916472
| 2017-08-18T23:05:46
| 2017-08-18T23:05:46
| 100,730,554
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,323
|
r
|
Carpentry2017.R
|
###DATA VIZ
library(tidyverse)
surveys_complete <- read.csv('data_output/surveys_complete.csv')
##ggplot2 ----
ggplot(data= surveys_complete, aes(x=weight, y=hindfoot_length)) + #telling we are going to be plotting off of this data, define aesthetics, plus sign adds more plotting instructions
geom_point(alpha =0.8, aes(color=species_id)) #tells it what plot to create
##Challenge create a scatterplot of weight over species_id with this plot types
##showing in different colors.Is this a good way to show this type of data?
ggplot(data= surveys_complete, aes(x=weight, y=species_id)) + #telling we are going to be plotting off of this data, define aesthetics, plus sign adds more plotting instructions
geom_point(alpha =0.8, aes(color=plot_type))
##NO!!!! USE BOXPLOT INSTEAD
ggplot(data= surveys_complete, aes(y=weight, x=species_id))+
geom_boxplot(aes(color=plot_type))+
facet_grid(sex ~.)+
labs(x="Species",
y="Weight",
title="plot",
caption="This is a test boxplot...cool to caption within the code")+
theme(legend.position="bottom")
##TIME SERIES ----
#total number of species over the years
yearly_counts <- surveys_complete %>%
group_by(year, species_id) %>%
tally
ggplot(data=yearly_counts,
aes(x=year, y=n, group=species_id, color=species_id)) +
geom_line()+
facet_wrap(~ species_id) # split this in different panels for specied_id
##now for yearly sex counts
yearly_sex_counts <- surveys_complete %>%
group_by(year, species_id, sex) %>%
tally
ggplot(data = yearly_sex_counts,
aes(x = year, y = n, color = sex)) +
geom_line()+
facet_wrap(~ species_id) # split this in different panels for specied_id
##Challenge (weight)
#create a plot that depicts how the average weight of each species changes through the years
yearly_weight <- surveys_complete %>%
group_by(year, species_id, weight) %>%
summarise(avg_weight=mean(weight)) %>%
tally
##assigning plot to myplot object
myplot <- ggplot(data = yearly_weight,
aes(x = year, y = n, color = species_id)) +
geom_line()+
facet_wrap(~ species_id) +
labs(x="year",
y="Mean Weight (g)")+
theme_bw()+
theme(axis.text.x = element_text(angle=90),
legend.position="none")
ggsave("my_plot.png", myplot,width=15, height = 10) #saving ggplot
|
a4a99349f2689d151b560721bcdd9e21f94fad48
|
520aa74848464c786cff396ad53748ee9b4b598a
|
/code/0_import_ebsco.R
|
8ba8a43960ea984d47ee905d1abdbea952560e23
|
[] |
no_license
|
gastonbecerra/bigdata_humanidades_biblio
|
1d61d2eb8a4828e6d6b4325966902e6ded1e3945
|
49e16fe63b85389499511f86676ea9c48841605f
|
refs/heads/main
| 2023-03-10T17:02:14.063927
| 2021-03-01T23:59:35
| 2021-03-01T23:59:35
| 323,950,015
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,588
|
r
|
0_import_ebsco.R
|
# import dataset ebsco
# - importar metadata
# - separar / importar ocr
# - separar / importar referencias
library(tidyverse)
library(foreach)
library(xml2)
library(rvest)
library(doParallel)
library(jsonlite)
# import ebsco metadata y ocr -------------------
path <- './data/raw/ebsco_full/'
global_metadata <- data.frame()
global_content <- data.frame()
html_todos <- list.files(path = path, recursive = TRUE)
html_metadata_list <- foreach::foreach(html_file=html_todos, .combine = list) %do% {
html_content <- xml2::read_html(paste0(path,html_file))
# metatags
registros <- xml2::xml_find_all(x=html_content ,xpath='//*[@id="records"]/dl')
i=0
for (i in 1:length(registros)) {
tags <- xml2::xml_children(registros[[i]]) %>% xml_name()
for (j in 1:length(tags)) {
# busco secuencia
if ((tags[j] == "dt") && (tags[j+1] == "dd")) {
x<-data.frame(
registro = i ,
tag = xml2::xml_child(registros[[i]], search = j) %>% xml_text() ,
value = xml2::xml_child(registros[[i]], search = j+1) %>% xml_text() %>% as.character() ,
html = xml2::xml_child(registros[[i]], search = j+1) %>% xml_contents() %>% as.character() ,
file = html_file
)
global_metadata <- rbind(global_metadata,x)
}
}
print(paste(html_file,i))
}
# full-content
content <- xml2::xml_find_all(x=html_content, xpath='//*[@id="records"]/div[@class="print-citation"]')
for (j in 1:length(content)) {
title <- xml2::xml_find_all(x=content[[j]], xpath='h1') %>% xml2::xml_text() %>% as.character()
x<-data.frame(
title = title,
tag = 'ocr',
value = as.character(content[j]),
file = html_file
)
global_content <- rbind(global_content,x)
print(paste("content: ",title))
}
}
rm(html_todos, html_metadata_list, html_content, html_file, registros, x, i, j,
tags, content, title, path)
global_metadata <- global_metadata %>%
mutate_if(is.factor, .funs = as.character) %>%
mutate(id= paste0("ebsco_",gsub("[^[:alnum:][:space:]]","",file),"_",registro)) %>%
select(id,tag,value,html)
## matcheo ocr y metatags
limpiar <- function(txt) {
txt = tolower(gsub("[^[:alnum:][:space:]]","",txt))
return(stringr::str_trim(txt))
}
global_content2 <- global_content %>%
mutate(title=limpiar(title)) %>%
inner_join(
global_metadata %>% filter(tag=="Tรญtulo:") %>% mutate(title=limpiar(value)) %>%
select(title,id),
by="title") %>%
select(id,fulltext=value)
rm(limpiar)
## limpiar ocr rapido
global_content2$fulltext <- gsub("<.*?>", "", global_content2$fulltext)
global_content2 %>% readr::write_csv(x = ., path = './data/ebsco_ocr.csv')
glimpse(global_refs)
glimpse(global_content2)
glimpse(global_metadata)
## limpieza
# junto metadata y referencias
data <- rbind(global_metadata , global_refs %>% mutate(html=""))
glimpse(data)
# limpio los tags
data$tag <- gsub("[^[:alnum:][:space:]]","",data$tag) %>% tolower() %>% trimws()
table(data$tag)
# divido los que tienen mas de 1 valor
separar <- c("materias","palabras clave proporcionadas por el autor",
"tรฉrminos temรกticos","author supplied keywords", "tรฉrminos geogrรกficos", "empresaentidad",
"gente", "revisiones y productos")
separadores <- function(x) {
y <- x
y <- gsub('<span class="medium-normal">', "", y, fixed = TRUE)
y <- gsub('</span>', "", y, fixed = TRUE)
y <- gsub("<br>" , "$", y, fixed = TRUE)
#y <- gsub("." , "$", y, fixed = TRUE)
#y <- gsub("," , "$", y, fixed = TRUE)
y <- gsub(";" , "$", y, fixed = TRUE)
y <- gsub(":" , "$", y, fixed = TRUE)
y <- gsub("โ " , "$", y, fixed = TRUE)
y <- gsub(" " , "$", y, fixed = TRUE)
y <- gsub("$" , " $ ", y, fixed = TRUE)
return(y)
}
sep <- data %>% filter(tag %in% separar) # primero los filtro. mismo criterio para reemplazar
sep <- sep %>% mutate(value3=separadores(html)) # los proceso, divido, y rejunto
kps <- as.data.frame(str_split_fixed(sep$value3, pattern = fixed("$"), n=50),stringsAsFactors = FALSE)
sep <- cbind(sep,kps)
rm(kps)
sep2 <- sep %>%
select(-value,-value3) %>%
tidyr::pivot_longer(cols = c(-tag, -id, -html), names_to = "v", values_to = "value") %>%
transform(value = gsub("[^[:alnum:][:space:]]","",value) %>% tolower() %>% trimws() ) %>%
filter(value != "") %>%
select(id,tag,value,html)
sep2 <- sep2 %>% mutate(tag="keyword")
data <- data %>% anti_join( data %>% filter(tag %in% separar) )
data <- rbind(data,sep2)
sep2 %>% select(id) %>% distinct(id) %>% pull(id) %>% length()
rm(sep,separar,separadores,sep2)
# fuentes
extraer_fuente <- function(fuente) {
retTag <- c()
for (i in 1:length(fuente)) {
y <- fuente[i]
y <- gsub("." , "$", y, fixed = TRUE)
y <- gsub(";" , "$", y, fixed = TRUE)
tag=sub("\\$.*", "", y)
retTag <- c(retTag,tag)
}
return( retTag )
}
sep <- data %>% filter(tag == "fuente")
sep <- sep %>% mutate(value = extraer_fuente(sep$value), tag = "journal")
data <- rbind(data,sep)
rm(sep, extraer_fuente)
# anios
extraer_anio <- function(fuente) {
retTag <- c()
for (i in 1:length(fuente)) {
y2 <- y <- fuente[i]
y <- stringr::str_extract(y, "[a-zA-Z]{3,}\\d{4}") # Sep2019
if (is.na(y)){
y <- stringr::str_extract(y2, "/\\d{4}") # /2015
if (is.na(y)){
y <- stringr::str_extract(y2, " \\d{4}") # /2015
}
}
y <- parse_number(y)
tag=as.integer(y)
retTag <- c(retTag,tag)
}
return( retTag )
}
sep <- data %>% filter(tag == "fuente")
sep <- sep %>% mutate(value = extraer_anio(sep$value), tag = "year")
# data <- data %>% anti_join( data %>% filter(tag == "fuente") )
table(sep$value)
data <- rbind(data,sep)
rm(sep, extraer_anio)
# resumen
data <- data %>% mutate(tag = ifelse(test = tag == "resumen inglรฉs", yes = "resumen", no = tag))
sep <- data %>% filter(tag == "resumen")
sep <- sep %>% mutate(tag = "abstract")
# data <- data %>% anti_join( data %>% filter(tag == "fuente") )
data <- rbind(data,sep)
rm(sep)
# titulo
sep <- data %>% filter(tag == "tรญtulo") %>% mutate(tag="titulo")
data <- data %>% anti_join( data %>% filter(tag == "tรญtulo") )
data <- rbind(data,sep)
rm(sep)
separar <- c("abstract", "keyword", "references", "journal", "year", "autor", "aff", "doi", "orcid", "titulo")
data2 <- data %>% filter(tag %in% separar) %>% select(id,tag,value)
table(data2$tag)
glimpse(data2)
readr::write_csv(x = data2, path = './data/ebsco_metadata.csv')
# importar autores -------------------------
extraer_autores <- function(autores) {
retTag <- c()
for (i in 1:length(autores)) {
y <- autores[i]
y <- gsub('<span class="medium-normal">', "", y, fixed = TRUE)
y <- gsub('</span>', "", y, fixed = TRUE)
y <- gsub('(AUTHOR)', "", y, fixed = TRUE)
y <- gsub('<sup>', "|", y, fixed = TRUE)
z <- str_split(string = y, pattern = "<br>", simplify = TRUE)
x <- ""
for (j in 1:length(z)) {
x <- paste( x , sub("\\|.*", "", trimws(z[j])), sep = "$")
}
retTag <- c(retTag, x)
}
return( retTag )
}
sep <- data %>% filter(tag == "autores")
sep <- sep %>% mutate(value = extraer_autores(sep$html))
kps <- as.data.frame(str_split_fixed(sep$value, pattern = fixed("$"), n=25),stringsAsFactors = FALSE)
sep <- cbind(sep,kps)
rm(kps)
sep2 <- sep %>%
select(-value) %>%
tidyr::pivot_longer(cols = c(-tag, -id, -html), names_to = "v", values_to = "value") %>%
# transform(value = gsub("[^[:alnum:][:space:]]","",value) %>% tolower() %>% trimws() ) %>%
transform(value = value %>% tolower() %>% trimws() ) %>%
filter(value != "") %>%
select(id,tag,value)
sep2 <- sep2 %>% mutate(tag="autor")
data <- rbind(data,sep2 %>% mutate(html=""))
autores <- sep2 %>% select(id,autor=value) %>%
mutate(orden=NA, aff=NA, aid=NA, pais=NA, orcid=NA)
autores %>% readr::write_csv(path = "./data/ebsco_autores.csv")
rm(sep, sep2, extraer_autores)
data %>% filter(tag=="autor") %>% select(id) %>% distinct(id) %>% pull(id) %>% length()
# afiliaciones -------------------------
# al borrar las posiciones, los autores y las afiliaciones no estan matcheadas
extraer_afiliaciones <- function(autores) {
retTag <- c()
for (i in 1:length(autores)) {
y <- autores[i]
y <- gsub('<span class="medium-normal">', "", y, fixed = TRUE)
y <- gsub('</span>', "", y, fixed = TRUE)
y <- gsub('(AUTHOR)', "", y, fixed = TRUE)
#y <- gsub('<sup>', "|", y, fixed = TRUE)
z <- str_split(string = y, pattern = "<br>", simplify = TRUE)
x <- ""
for (j in 1:length(z)) {
# x <- paste( x , sub("\\|.*", "", trimws(z[j])), sep = "$")
f <- substr(trimws(z[j]), start = str_length("<sup>1</sup>")+1, stop=str_length(trimws(z[j])))
x <- paste( x , f, sep = "$")
}
retTag <- c(retTag, x)
}
return( retTag )
}
sep <- data %>% filter(tag == "afiliaciones del autor")
sep <- sep %>% mutate(value = extraer_afiliaciones(sep$html))
kps <- as.data.frame(str_split_fixed(sep$value, pattern = fixed("$"), n=25),stringsAsFactors = FALSE)
sep <- cbind(sep,kps)
rm(kps)
sep2 <- sep %>%
select(-value) %>%
tidyr::pivot_longer(cols = c(-tag, -id, -html), names_to = "v", values_to = "value") %>%
# transform(value = gsub("[^[:alnum:][:space:]]","",value) %>% tolower() %>% trimws() ) %>%
transform(value = value %>% tolower() %>% trimws() ) %>%
filter(value != "") %>%
select(id,tag,value)
sep2 <- sep2 %>% mutate(tag="aff")
sep2
rm(sep, sep2, extraer_afiliaciones)
data %>% filter(tag=="aff") %>% select(id) %>% distinct(id) %>% pull(id) %>% length()
# importar referencias ---------------------
global_refs <- data.frame()
for (i in 1:length(global_content2$fulltext)) {
print(i)
p <- xml2::read_html(global_content2$fulltext[i]) %>%
xml2::xml_find_all(xpath = '//div[@class="print-citation"]') %>%
xml2::xml_children()
id <- global_content2$id[i]
first <- last <- NULL
j=0
for(pp in p) {
j=j+1
if (grepl( "ref_toc", as.character(pp), fixed = TRUE)) {first=j}
if (xml2::xml_text(pp)=="~~~~~~~~") {last=j}
}
if (!is.null(first) && !is.null(last)) {
global_refs <- rbind(global_refs ,
data.frame(cbind( id = id,
tag = "references",
value = xml_text(p[(first+1):(last-1)]) %>%
trimws()
)))
}
}
rm(p,first,last,j,i,pp,id)
global_refs %>% select(id) %>% distinct(id) %>% pull(id) %>% length()
|
7e6971618031cc49929690f9cdf2208bd02ff3cd
|
686ee8bbc2e754c22304261be5ce96f58b89a079
|
/plot4.R
|
f2b4929adde865d50bafabf8c3c37ba0b14ce6da
|
[] |
no_license
|
danielstallworthmit/ExData_Plotting1
|
ea10e2c90cd5c52142bbd35e2e3fde7a32bbe7cc
|
fd0a9c6a4c9d67a2d050081d03ac9855b094c57b
|
refs/heads/master
| 2021-01-19T09:03:48.813817
| 2017-04-09T18:21:57
| 2017-04-09T18:21:57
| 87,720,653
| 0
| 0
| null | 2017-04-09T16:00:04
| 2017-04-09T16:00:04
| null |
UTF-8
|
R
| false
| false
| 1,540
|
r
|
plot4.R
|
library(data.table)
# Set up data using filtered data from plot1
data_filtered <- fread("household_consumption_filtered.csv",sep = ",",header = T)
# Add DateTime column as combination of Date and Time columns
data_filtered <- within(data_filtered, DateTime <- paste(Date, Time, sep = " "))
data_filtered$DateTime <- as.POSIXct(data_filtered$DateTime,tz = "GMT")
# Plotting week day and Sub metering in 480x480 png
png("plot4.png", width = 480, height = 480)
par(mfrow=c(2,2))
# Plot1
with(data_filtered, plot(DateTime, Global_active_power, type = "l",
xlab="",
ylab="Global Active Power (kilowatts)"))
# Plot2
with(data_filtered, plot(DateTime, Voltage, type = "l",
xlab="datetime",
ylab="Voltage"))
# Plot3
with(data_filtered, plot(DateTime, Sub_metering_3,
xlab="", ylim = c(0,38),
ylab="Energy sub metering",
type="n"))
points(data_filtered$DateTime,data_filtered$Sub_metering_1,type="l")
points(data_filtered$DateTime,data_filtered$Sub_metering_2,type="l",col="red")
points(data_filtered$DateTime,data_filtered$Sub_metering_3,type="l",col="blue")
legend("topright",legend = c("Sub_metering_1","Sub_metering_2","Sub_metering_3"), col = c("black","red","blue"),pch = "-")
# Plot4
with(data_filtered, plot(DateTime, Global_reactive_power, type = "l",
xlab="datetime",
ylab="Global_reactive_power"))
dev.off()
|
febf1f5f77eaceae17c33338fc35be4a473d68b3
|
1aefcc070be0f20b0d87a961041a4655c12d3a65
|
/homework2/ui.r
|
0a4990efb72d3d0b04c306c3f7b038a511713de1
|
[] |
no_license
|
swang114/msan622
|
d6289573b05d788c729348e52d544e0088a993ee
|
34a9a7e10c76bdf8a3ed5215e9da3d91ad53a8e1
|
refs/heads/master
| 2021-01-20T07:20:38.603255
| 2014-05-16T14:49:06
| 2014-05-16T14:49:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,789
|
r
|
ui.r
|
library(shiny)
shinyUI(
pageWithSidebar(
headerPanel("IMDB Movie Ratings"),
sidebarPanel(
radioButtons(
"mpaar",
"MPAA Rating:",
c('All','NC-17','PG','PG-13','R')
),
br(),
checkboxGroupInput(
"gen",
"Movie Genres:",
c("Action","Animation","Comedy","Drama","Documentary","Romance","Short")
),
selectInput(
"colorScheme",
"Color Scheme:",
choices = c("Default", "Accent", "Set1", "Set2", "Set3", "Dark2", "Pastel1","Pastel2", "Color-Blind Friendly")
),
sliderInput(
"dsize",
"Dot Size:",
min = 1,
max = 10,
value = 2,
step = 1
),
sliderInput(
"alp",
"Dot Alpha:",
min = 0.1,
max = 1,
value = 0.5,
step = 0.1
),
br(),
wellPanel(
p(strong("Model predictions")),
checkboxInput(inputId = "mod_linear", label = "Linear (dot-dash)"),
checkboxInput(inputId = "mod_quadratic", label = "Quadratic (dashed)")
)
),
mainPanel(
tabsetPanel(
tabPanel("Scatter Plot",plotOutput("scatterPlot", height="600px")),
tabPanel("Genre by MPAA rating",tableOutput("table"))
),
conditionalPanel("input.mod_linear == true",
p(strong("Linear model")),
verbatimTextOutput(outputId = "mod_linear_text")
),
conditionalPanel("input.mod_quadratic == true",
p(strong("Quadratic model")),
verbatimTextOutput(outputId = "mod_quadratic_text")
)
)
)
)
|
0421e85aff236d51ce463da17788ae7df88dc3b4
|
49ff0bc7c07087584b907d08e68d398e7293d910
|
/mbg/mbg_core_code/mbg_central/LBDCore/man/rake_cell_pred.Rd
|
7833b3d2c095c092e1cc0cfbb72fde096103f80a
|
[] |
no_license
|
The-Oxford-GBD-group/typhi_paratyphi_modelling_code
|
db7963836c9ce9cec3ca8da3a4645c4203bf1352
|
4219ee6b1fb122c9706078e03dd1831f24bdaa04
|
refs/heads/master
| 2023-07-30T07:05:28.802523
| 2021-09-27T12:11:17
| 2021-09-27T12:11:17
| 297,317,048
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 5,554
|
rd
|
rake_cell_pred.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rake_cell_pred.R
\name{rake_cell_pred}
\alias{rake_cell_pred}
\title{Standard Raking Function}
\usage{
rake_cell_pred(cell_pred, rake_to, reg, year_list, pop_measure,
rake_method = "linear", rake_subnational = T, crosswalk = F,
shapefile_path = get_admin_shapefile(admin_level = 0, raking = T,
version = modeling_shapefile_version), field = "loc_id",
zero_heuristic = F, approx_0_1 = F, simple_raster = NULL,
simple_polygon = NULL, pop_raster = NULL,
modeling_shapefile_version = "current",
raking_shapefile_version = "current", if_no_gbd = "return_na",
custom_raking_shapefile = NULL, countries_not_to_subnat_rake = NULL,
...)
}
\arguments{
\item{cell_pred}{Cell pred object to be raked.}
\item{rake_to}{Df with `name`, `year`, and `mean` columns. values in name must match up with values in `field`.}
\item{reg}{character string - Region used to produce cell pred object.}
\item{year_list}{integer vector - vector of years}
\item{pop_measure}{character string - population measure (can be found in config of model run)}
\item{rake_method}{character string - must be either `linear` or `logit`}
\item{rake_subnational}{boolean default `T`. If true, uses the subnational raking shapefile, otherwise uses the adm0 shapefile. Make sure that values in `rake_to` contain the codes for level of raking chosen.}
\item{crosswalk}{Boolean default `T`, for models run before the new gaul shapefiles ready on 7/6/18, the cell_pred needs to be crosswalked to match the subnational raking raster. Introduces NAs into raster where the new simple raster has values but the original does not.}
\item{shapefile_path}{character string -Path to shapefile that will be used for raking. Preset to subnational raking shapefile, don't change.}
\item{field}{character string - Field in shapefile that has admin identifiers that match with rake_to. Preset to ihme_loc_ids at the lowest level in the shapefile. Don't change.}
\item{zero_heuristic}{Boolean default `F`. If logit raking, this will automatically set rf = -9999 for any country-year with a target value of 0. This produces a raked value of 0 for all pixels. Raking to a target of zero in logit space is very time-consuming and the algorithm
can only approach zero asymptotically. For situations where the target is truly zero (most useful for, say, an intervention pre-introduction) this will both speed up the process and ensure that zeros are returned.}
\item{approx_0_1}{Boolean default `F`. If logit raking, any values of zero will be replaced with 1e-10 and values of 1 will be replaced with (1-(1e-10)). Otherwise, logit transformations will fail in `NewFindK`. Useful if some areas have very low or high predicted values in `cell_pred`,
such that some draws are either 0 or 1 (or extremely close to these values).}
\item{simple_raster}{default `NULL`, option to pass in simple raster to function if it's been loaded already. NOTE: if the pop raster is not being passed in as well, the simple_polygon needs to be supplied as well. There is a check to ensure this.}
\item{simple_polygon}{default `NULL`, option to pass in simple polygon if its been loaded already. This is necessary if the simple raster is being passed in, but the pop raster is not. The covariate loading function requires a simple polygon.}
\item{pop_raster}{default `NULL`, option to pass in pop raster if its been loaded already.
Additional Parameters (...)}
\item{modeling_shapefile_version}{string identifying version of of shapefile used in modeling}
\item{raking_shapefile_version}{string identifying version of of shapefile to use in raking}
\item{if_no_gbd}{default `return_na`, other option `return_unraked`. If return_na, any location-years without gbd estimates will return NA raking factors. if return_unraked, will any location-years without gbd estimates will return 1 for linear raking, 0 for logit raking.}
\item{custom_raking_shapefile}{SPDF object -shapefile that will be used for raking. Used for passing in custom raking shapefile for choosing subnational countries to rake to. See `make_custom_raking_shapefile()`}
\item{countries_not_to_subnat_rake}{as it sounds. Used for constructing raking raster. Default: NULL}
\item{MaxJump}{default `10`. Maximum size of a jump to the answer (for logit raking).}
\item{MaxIter}{default `80`. Number of jumps towards the solution (for logit raking)}
\item{FunTol}{default `1e-5`. Maximum allowed difference between the raking target and raked results (for logit raking)}
\item{iterate}{default `F`. If logit raking for a location-year fails, try again with `MaxJump` and `MaxIter` times 10. If that fails, try again times 100. For circumstances where raking target is very far from estimate and raking does not converge.}
\item{if_no_gbd}{default `return_na`, other option `return_unraked`. If return_na, any location-years without gbd estimates will return NA raking factors. if return_unraked, will any location-years without gbd estimates will return 1 for linear raking, 0 for logit raking.}
}
\value{
Returns a named list with a raked cell pred object, simple raster used for raking, raking factors, and (optional) rasters of mean, lower, upper, and cirange for years in year list
}
\description{
A function used to rake mbg ouputs to GBD estimates. Can rake to either national level or subnational level (where available). Supports linear and logit raking. Optionally outputs summary rasters of raked cell pred object.
}
\author{
Michael Collison
}
|
9592eb0357177cb4b21cf46643413676e373c85b
|
5bd83f74cd2c7e88c0b56e25d3b9b415dcb18c06
|
/man/map_edge_attributes.Rd
|
2b720461cee028fe6bae9919b34368cb9069dc8b
|
[] |
no_license
|
meerapatelmd/chariotViz
|
123c04e6fc6b09b2ffdc9ef1eb9fa94d227ee846
|
c45947a963b23f75237fe4417dd03b6f27c620d5
|
refs/heads/master
| 2023-07-19T21:44:18.089969
| 2021-09-04T17:30:09
| 2021-09-04T17:30:09
| 394,073,446
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 5,568
|
rd
|
map_edge_attributes.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/map.R
\name{map_edge_attributes}
\alias{map_edge_attributes}
\title{FUNCTION_TITLE}
\usage{
map_edge_attributes(
nodes_and_edges,
fontsize = 26,
len = 5,
label_from = relationship_name,
style_from = defines_ancestry,
style_map = defines_ancestry_styles,
style_map_other = "dotted",
penwidth_from = NULL,
penwidth_map = NULL,
penwidth_map_other = NULL,
color_from = relationship_source,
color_map = relationship_source_colors,
color_map_other = "black",
arrowsize_from = defines_ancestry,
arrowsize_map = c(`0` = 2, `1` = 2),
arrowsize_map_other = 2,
arrowhead_from = is_hierarchical,
arrowhead_map = c(`1` = "vee", `0` = "vee"),
arrowhead_map_other = NULL,
arrowtail_from = NULL,
arrowtail_map = NULL,
arrowtail_map_other = NULL,
fontname_from = NULL,
fontname_map = NULL,
fontname_map_other = NULL,
fontsize_from = NULL,
fontsize_map = NULL,
fontsize_map_other = NULL,
fontcolor_from = NULL,
fontcolor_map = NULL,
fontcolor_map_other = NULL,
tooltip_from = NULL,
tooltip_map = NULL,
tooltip_map_other = NULL,
URL_from = NULL,
URL_map = NULL,
URL_map_other = NULL,
edgetooltip_from = NULL,
edgetooltip_map = NULL,
edgetooltip_map_other = NULL,
edgeURL_from = NULL,
edgeURL_map = NULL,
edgeURL_map_other = NULL,
dir_from = NULL,
dir_map = NULL,
dir_map_other = NULL,
headtooltip_from = NULL,
headtooltip_map = NULL,
headtooltip_map_other = NULL,
headURL_from = NULL,
headURL_map = NULL,
headURL_map_other = NULL,
headclip_from = NULL,
headclip_map = NULL,
headclip_map_other = NULL,
headlabel_from = NULL,
headlabel_map = NULL,
headlabel_map_other = NULL,
headport_from = NULL,
headport_map = NULL,
headport_map_other = NULL,
tailtooltip_from = NULL,
tailtooltip_map = NULL,
tailtooltip_map_other = NULL,
tailURL_from = NULL,
tailURL_map = NULL,
tailURL_map_other = NULL,
tailclip_from = NULL,
tailclip_map = NULL,
tailclip_map_other = NULL,
taillabel_from = NULL,
taillabel_map = NULL,
taillabel_map_other = NULL,
tailport_from = NULL,
tailport_map = NULL,
tailport_map_other = NULL,
decorate_from = NULL,
decorate_map = NULL,
decorate_map_other = NULL
)
}
\arguments{
\item{nodes_and_edges}{PARAM_DESCRIPTION}
\item{fontsize}{PARAM_DESCRIPTION, Default: 12}
\item{len}{PARAM_DESCRIPTION, Default: 1}
\item{label_from}{PARAM_DESCRIPTION, Default: relationship_name}
\item{style_from}{PARAM_DESCRIPTION, Default: NULL}
\item{style_map}{PARAM_DESCRIPTION, Default: NULL}
\item{penwidth_from}{PARAM_DESCRIPTION, Default: NULL}
\item{penwidth_map}{PARAM_DESCRIPTION, Default: NULL}
\item{color_from}{PARAM_DESCRIPTION, Default: NULL}
\item{color_map}{PARAM_DESCRIPTION, Default: NULL}
\item{arrowsize_from}{PARAM_DESCRIPTION, Default: NULL}
\item{arrowsize_map}{PARAM_DESCRIPTION, Default: NULL}
\item{arrowhead_from}{PARAM_DESCRIPTION, Default: is_hierarchical}
\item{arrowhead_map}{PARAM_DESCRIPTION, Default: c(\code{1} = "vee", \code{0} = "none")}
\item{arrowtail_from}{PARAM_DESCRIPTION, Default: NULL}
\item{arrowtail_map}{PARAM_DESCRIPTION, Default: NULL}
\item{fontname_from}{PARAM_DESCRIPTION, Default: NULL}
\item{fontname_map}{PARAM_DESCRIPTION, Default: NULL}
\item{fontsize_from}{PARAM_DESCRIPTION, Default: NULL}
\item{fontsize_map}{PARAM_DESCRIPTION, Default: NULL}
\item{fontcolor_from}{PARAM_DESCRIPTION, Default: NULL}
\item{fontcolor_map}{PARAM_DESCRIPTION, Default: NULL}
\item{tooltip_from}{PARAM_DESCRIPTION, Default: NULL}
\item{tooltip_map}{PARAM_DESCRIPTION, Default: NULL}
\item{URL_from}{PARAM_DESCRIPTION, Default: NULL}
\item{URL_map}{PARAM_DESCRIPTION, Default: NULL}
\item{edgetooltip_from}{PARAM_DESCRIPTION, Default: NULL}
\item{edgetooltip_map}{PARAM_DESCRIPTION, Default: NULL}
\item{edgeURL_from}{PARAM_DESCRIPTION, Default: NULL}
\item{edgeURL_map}{PARAM_DESCRIPTION, Default: NULL}
\item{dir_from}{PARAM_DESCRIPTION, Default: NULL}
\item{dir_map}{PARAM_DESCRIPTION, Default: NULL}
\item{headtooltip_from}{PARAM_DESCRIPTION, Default: NULL}
\item{headtooltip_map}{PARAM_DESCRIPTION, Default: NULL}
\item{headURL_from}{PARAM_DESCRIPTION, Default: NULL}
\item{headURL_map}{PARAM_DESCRIPTION, Default: NULL}
\item{headclip_from}{PARAM_DESCRIPTION, Default: NULL}
\item{headclip_map}{PARAM_DESCRIPTION, Default: NULL}
\item{headlabel_from}{PARAM_DESCRIPTION, Default: NULL}
\item{headlabel_map}{PARAM_DESCRIPTION, Default: NULL}
\item{headport_from}{PARAM_DESCRIPTION, Default: NULL}
\item{headport_map}{PARAM_DESCRIPTION, Default: NULL}
\item{tailtooltip_from}{PARAM_DESCRIPTION, Default: NULL}
\item{tailtooltip_map}{PARAM_DESCRIPTION, Default: NULL}
\item{tailURL_from}{PARAM_DESCRIPTION, Default: NULL}
\item{tailURL_map}{PARAM_DESCRIPTION, Default: NULL}
\item{tailclip_from}{PARAM_DESCRIPTION, Default: NULL}
\item{tailclip_map}{PARAM_DESCRIPTION, Default: NULL}
\item{taillabel_from}{PARAM_DESCRIPTION, Default: NULL}
\item{taillabel_map}{PARAM_DESCRIPTION, Default: NULL}
\item{tailport_from}{PARAM_DESCRIPTION, Default: NULL}
\item{tailport_map}{PARAM_DESCRIPTION, Default: NULL}
\item{decorate_from}{PARAM_DESCRIPTION, Default: NULL}
\item{decorate_map}{PARAM_DESCRIPTION, Default: NULL}
}
\value{
OUTPUT_DESCRIPTION
}
\description{
FUNCTION_DESCRIPTION
}
\details{
DETAILS
}
\examples{
\dontrun{
if(interactive()){
#EXAMPLE1
}
}
}
\seealso{
\code{\link[dplyr]{tidyeval-compat}},\code{\link[dplyr]{mutate}},\code{\link[dplyr]{distinct}}
}
|
9cab303d2f13363b427053d0df07c77d7e20d2ec
|
45bc4ef53de6d474dc28fc803b3879af6fe77e56
|
/assets/R/06202015_datacience_circles.R
|
2b28baf16679f30c35026ea945116b137dd53a83
|
[
"MIT"
] |
permissive
|
sumendar/simpleblog
|
224c293d9d8507ac4ff82058c3246fa9ea2ad683
|
8d411491dc2fe937bf56562539a34cc2e5f8a837
|
refs/heads/gh-pages
| 2021-08-19T07:56:39.262150
| 2017-11-25T10:17:46
| 2017-11-25T10:17:46
| 111,040,135
| 0
| 0
| null | 2017-11-17T01:01:00
| 2017-11-17T01:00:59
| null |
UTF-8
|
R
| false
| false
| 356
|
r
|
06202015_datacience_circles.R
|
# create a multi
library('ggplot2')
gg <- ggplot() + scale_x_continuous(limits=c(0,1)) + scale_y_continuous(limits=c(0,1))
for(i in 1:10) gg <- gg + geom_point(aes(x=rnorm(1, 0.5, 0.04), y=rnorm(1, 0.5, 0.04)), size=runif(1, 40, 100), color='red', alpha=.15)
plot(gg)
gg <- gg + geom_point(aes(x=.5, y=.5), size=100, color='black', alpha=.15)
plot(gg)
|
c549c7a46f2f58e75d2a0c73c01d007cf40457ed
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/MNS/R/gen.Network.R
|
2ea27188166d3e101ed9809001bb5bd42defd8c7
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,057
|
r
|
gen.Network.R
|
gen.Network <-
function(method="cohort", p, Nobs, Nsub, sparsity, REsize, REprob, REnoise){
# wrapper function for generating random networks
#
# INPUT:
# - method: method to be used to simulate networks. Either "cohort" for networks as described in Monti et al 2015 or "Danaher" for networks as described in Danaher et al 2013
# The following parameters are only relevant to "cohort":
# - p: number of nodes
# - Nsub: number of subjects
# - sparsity: sparsity level in population network
# - REsize: number of edges that have additional random effects (must be smaller than choose(p,2)!)
# - REprob: probability of re-wiring
# - REnoise: variance of random effects for edges
# The following parameters are only relevant to "Danaher":
# - compSize: size of each random component
# - compNum: number of indenpdent (ie unconnected) components)
# - popNum: number of populations (or subjects). By default 3 to recreate Danaher et al. (2014) sim
# - pow, m: power of preferential attachment and number of edges to add at each step (from barabasi.game function in igraph)
# - strUpper, strLower: define interval from which to sample edge strengths
if (method=="cohort"){
Networks = genREnetworksEnforceTrans(p = p, Nsub = Nsub, sparsity = sparsity, REsize=REsize, REprob=REprob, REnoise = REnoise)
} else if (method=="Danaher"){
compNum = 10 # arbitrarily set number of components to three following Danaher example
compSize = floor(p/compNum)
if (p != compNum * compSize){
warning(paste("number of nodes p must be divisible by 10 (number of components). Number of nodes has been changed from", p,"to", compSize*compNum))
}
if (!missing(Nsub)){
if (Nsub != 3){
warning("This method only generates networks for 3 subjects.")
}
}
Nsub = 3 # abritrarily set to 3 subjects as well!
p = compSize * compNum
Networks = generateJGLnetworks(compSize = compSize, compNum = compNum, popNum = Nsub, pow=1, m=1, strUpper =.4, strLower = .1)
} else {
stop("Unrecognized network generation method. \n Networks must be generated according to the cohort model or the Danaher model")
}
if (!missing(Nobs)){
# also generate data:
Dat = lapply(Networks$SubPres, FUN=function(x){ mvrnorm(n = Nobs, mu = rep(0,p), Sigma = solve(x))})
if (method=="cohort"){
SimDat = list(Networks=Networks$SubPres, Data=Dat, PopNet=Networks$K, RanNet=Networks$RE)
class(SimDat) = "MNS"
return(SimDat)
} else {
SimDat = list(Networks=Networks$SubPres, Data=Dat)
class(SimDat) = "MNS"
return(SimDat)
}
} else {
if (method=="cohort"){
SimNet = list(Networks=Networks$SubPres, PopNet=Networks$K, RanNet=Networks$RE)
class(SimNet) = "MNS"
return(SimNet)
} else {
SimNet = list(Networks=Networks$SubPres)
class(SimNet) = "MNS"
return(SimNet)
}
}
}
|
89d47d8f59eac1262256ac188e17f235c1a0b16f
|
289b70ac6d95d7f4585b1ac61439dfefd786fc77
|
/R/estUtil.R
|
084760c147630ed6606071823ec5159feac5594e
|
[] |
no_license
|
syerramilli/R-sysid
|
f8ede18883a691e363b5ca3110c2583a5d7a426c
|
be2928b20b5f3e1230f292ea45166ae95cc71a23
|
refs/heads/master
| 2023-06-08T17:55:07.929065
| 2023-06-07T03:29:14
| 2023-06-07T03:29:14
| 29,390,663
| 3
| 2
| null | 2023-06-07T03:29:15
| 2015-01-17T12:38:48
|
R
|
UTF-8
|
R
| false
| false
| 6,422
|
r
|
estUtil.R
|
# Implementation of the Levenberg Marquardt Algorithm
levbmqdt <- function(...,obj,theta0,N,opt){
dots <- list(...)
# Optimization Parameters
tol <- opt$tol; maxIter <- opt$maxIter
d <- opt$adv$LMinit; mu <- opt$adv$LMstep
df <- N - dim(theta0)[1]
# Initialize Algorithm
i <- 0
l <- obj(theta=theta0,e=NULL,dots)
e <- l$e; grad <- l$grad
sumsq0 <- sum(e^2)/df
# variable to count the number of times objective function is called
countObj <- 0
sumSqDiff <- 9E-3*sumsq0
repeat{
i=i+1
g <- 1/df*t(grad)%*%e
termPar <- norm(g,"2")
repeat{
# Update Parameters
H <- 1/df*t(grad)%*%grad + d*diag(dim(theta0)[1])
Hinv <- solve(H);
theta <- theta0 + Hinv%*%g
# Evaulate sum square error
l <- obj(theta,e,dots)
sumsq <- sum(l$fn^2)/df
sumSqDiff <- sumsq0-sumsq
countObj <- countObj + 1
if(termPar < tol) break
# no major improvement
if(abs(sumSqDiff) < 0.01*sumsq0) break
# If sum square error with the updated parameters is less than the
# previous one, the updated parameters become the current parameters
# and the damping coefficient is reduced by a factor of mu
if(sumSqDiff > 0){
d <- d/mu
theta0 <- theta
sumsq0 <- sumsq
e <- l$fn; grad <- l$grad
break
} else{ # increase damping coefficient by a factor of mu
d <- d*mu
}
}
if(termPar < tol) {
WhyStop <- "Tolerance"
break
}
if(abs(sumSqDiff) < 0.01*sumsq0){
WhyStop <- "No significant change"
break
}
if(i == maxIter){
WhyStop <- "Maximum Iteration Limit"
break
}
}
# theta <- theta0
sigma2 <- sum(e^2)/df
vcov <- 1/df*Hinv*sigma2
list(params=theta,residuals=e,vcov=vcov,sigma = sqrt(sigma2),
termination=list(WhyStop=WhyStop,iter=i,FcnCount = countObj,
CostFcn=sumsq0))
}
#' Create optimization options
#'
#' Specify optimization options that are to be passed to the
#' numerical estimation routines
#'
#' @param tol Minimum 2-norm of the gradient (Default: \code{1e-2})
#' @param maxIter Maximum number of iterations to be performed
#' @param LMinit Starting value of search-direction length
#' in the Levenberg-Marquardt method (Default: \code{0.01})
#' @param LMstep Size of the Levenberg-Marquardt step (Default: \code{2})
#' @param display Argument whether to display iteration details or not
#' (Default: \code{"off"})
#'
#' @export
optimOptions <- function(tol=1e-2,maxIter=20,LMinit=0.01,LMstep=2,
display=c("off","on")[1]){
return(list(tol=tol,maxIter= maxIter,
adv= list(LMinit=LMinit,LMstep=LMstep),display=display))
}
#' Parameter covariance of the identified model
#'
#' Obtain the parameter covariance matrix of the linear, identified
#' parametric model
#'
#' @param sys a linear, identified parametric model
#'
#' @export
getcov <- function(sys){
sys$stats$vcov
}
armaxGrad <- function(theta,e,dots){
y <- dots[[1]]; u <- dots[[2]]; order <- dots[[3]];
na <- order[1];nb <- order[2]; nc <- order[3]; nk <- order[4]
nb1 <- nb+nk-1 ; n <- max(na,nb1,nc);N <- dim(y)[1]
l <- list()
if(is.null(e)){
e <- dots[[4]]; l$e <- e
}
yout <- apply(y,2,padZeros,n=n)
uout <- apply(u,2,padZeros,n=n)
eout <- apply(e,2,padZeros,n=n)
reg <- function(i) {
if(nk==0) v <- i-0:(nb-1) else v <- i-nk:nb1
matrix(c(-yout[i-1:na,],uout[v,],eout[i-1:nc,]))
}
X <- t(sapply(n+1:(N+n),reg))
Y <- yout[n+1:(N+n),,drop=F]
fn <- Y-X%*%theta
# Compute Gradient
filt1 <- signal::Arma(b=1,a=c(1,theta[(na+nb+1:nc)]))
grad <- apply(X,2,signal::filter,filt=filt1)
l$grad <- grad[1:N,,drop=F];l$fn <- fn[1:N,,drop=F]
return(l)
}
oeGrad <- function(theta,e,dots){
y <- dots[[1]]; u <- dots[[2]]; order <- dots[[3]];
nb <- order[1];nf <- order[2]; nk <- order[3];
nb1 <- nb+nk-1 ; n <- max(nb1,nf)
N <- dim(y)[1]
l <- list()
if(is.null(e)){
iv <- dots[[4]]
fn <- y-iv; l$e <- fn
} else{
iv <- y-e
}
uout <- apply(u,2,leftPadZeros,n=n)
ivout <- apply(iv,2,leftPadZeros,n=n)
reg <- function(i) {
if(nk==0) v <- i-0:(nb-1) else v <- i-nk:nb1
matrix(c(uout[v,],-ivout[i-1:nf,]))
}
# Compute new regressor matrix and residuals
X <- t(sapply(n+1:N,reg))
fn <- y-X%*%theta
# Compute gradient
filt1 <- signal::Arma(b=1,a=c(1,theta[nb+1:nf,]))
grad <- apply(X,2,signal::filter,filt=filt1)
l$fn <- fn; l$grad<-grad
return(l)
}
bjGrad <- function(theta,e,dots){
y <- dots[[1]]; u <- dots[[2]]; order <- dots[[3]];
nb <- order[1];nc <- order[2]; nd <- order[3];
nf <- order[4]; nk <- order[5];nb1 <- nb+nk-1 ; n <- max(nb1,nc,nd,nf);
N <- dim(y)[1]
l <- list()
if(is.null(e)){
zeta <- dots[[4]]
w <- y-zeta
e <- dots[[5]]; l$e <- e
} else{
filt_ts <- signal::Arma(b=c(1,theta[nb+1:nc]),
a=c(1,theta[nb+nc+1:nd]))
w <- matrix(signal::filter(filt_ts,e))
zeta <- y-w
}
uout <- apply(u,2,leftPadZeros,n=n)
zetaout <- apply(zeta,2,leftPadZeros,n=n)
eout <- apply(e,2,leftPadZeros,n=n)
wout <- apply(w,2,leftPadZeros,n=n)
reg <- function(i) {
if(nk==0) v <- i-0:(nb-1) else v <- i-nk:nb1
ereg <- if(nc==0) NULL else eout[i-1:nc,]
matrix(c(uout[v,],ereg,wout[i-1:nd,],-zetaout[i-1:nf,]))
}
# Compute new regressor matrix and residuals
X <- t(sapply(n+1:N,reg))
fn <- y-X%*%theta
# Computing gradient
C_params <- if(nc==0) NULL else theta[nb+1:nc]
den <- as.numeric(polynom::polynomial(c(1,C_params))*
polynom::polynomial(c(1,theta[nb+nc+nd+1:nf])))
filt1 <- signal::Arma(b=c(1,theta[nb+nc+1:nd]),
a=den)
grad <- apply(X,2,signal::filter,filt=filt1)
l$fn <- fn; l$grad <- grad
return(l)
}
checkInitSys <- function(init_sys){
z <- strsplit(toString(sys.call(which=-1)),split = ",")[[1]][1]
if(init_sys$type!=z){
errMes <- paste("An idpoly model of",toupper(z),"structure expected for the",z,"command.")
stop(errMes)
}
}
leftPadZeros <- function(x,n) c(rep(0,n),x)
padZeros <- function(x,n) c(rep(0,n),x,rep(0,n))
integfilter <- function(x){
as.numeric(stats::filter(x,filter=c(1,-1),"convolution",sides = 1,
circular = T))
}
|
6ebd8c8482260cfffbde2c8717acb87493fda173
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/PAFit/examples/get_statistics.Rd.R
|
4943809ac4a8b0aad3b59c86a7766bd8bf573b9e
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 272
|
r
|
get_statistics.Rd.R
|
library(PAFit)
### Name: get_statistics
### Title: Getting summarized statistics from input data
### Aliases: get_statistics PAFit_data
### ** Examples
library("PAFit")
net <- generate_BA(N = 100 , m = 1)
net_stats <- get_statistics(net)
summary(net_stats)
|
f193ba32994bd842772f8d827917de6167aab4bf
|
ae987d62aee7832e529ff888f05f2886ef1634a5
|
/R/dcm_multiple2singlechoice.R
|
08e9ae99a632f3c3da2da696acc6ecaf598063a0
|
[] |
no_license
|
mbonoli/funcionesMBO
|
febcd54cd0284e645df26c91ff0318e5ecd8e043
|
5c371d0b30fdfa262c2d66495c476b61e1e2b914
|
refs/heads/master
| 2021-01-20T18:16:21.755333
| 2016-07-23T21:31:48
| 2016-07-23T21:31:48
| 63,553,704
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,777
|
r
|
dcm_multiple2singlechoice.R
|
dcm_multiple2singlechoice<- function (datalong, choicevar="choice", chidvar="chid", taskvar, indvar) {
require(dplyr)
names(datalong)[names(datalong)==choicevar] <- "choice"
names(datalong)[names(datalong)==chidvar] <- "chid"
choiceXchid <- summarise(group_by(datalong, chid), freq=sum(choice))
# print(head(choiceXchid))
chids <- choiceXchid$chid
nchids <- length(chids)
cat(paste0("\nCantidad de chids: ", nchids,"\n"))
dl <- data.frame()
dlist <- list()
k<-1
chid <- 1
task <- 0
indant <- 0
for (i in 1:nchids){ #for (i in 1:nchids
if (i/1000==floor(i/1000)) cat(paste0("\nCalculando chid ",i))
if (i/500==floor(i/500)){
dlist[[k]] <- dl
k<-k+1
dl <- data.frame()
}
dlaux <- filter(datalong, chid==i)
indcurr <- dlaux[1, indvar]
if (choiceXchid$freq[i]==1){
if (indcurr==indant) task <- task+1 else task <- 1
dlaux$chid <- chid
chid <- chid+1
dlaux[,taskvar] <- task
dl <- rbind(dl, dlaux)
} else {
choices <- which(dlaux$choice==1)
# print(choices)
dlaux$choice <- 0
for (j in choices){
if (indcurr!=indant & j==choices[1]) task <- 1 else task <- task+1
dlaux2 <- dlaux
dlaux2[j,"choice"] <- 1
dlaux2$chid <- chid
chid <- chid+1
dlaux2[,taskvar] <- task
dl <- rbind(dl, dlaux2)
}
}
indant <- indcurr
}
dlf <- data.frame()
for (i in 1:(k-1)){
print(i)
dlf <- rbind(dlf, dlist[[i]])
}
dl <- rbind(dlf, dl)
# print(dim(dl))
# print(1)
cat("\nRecalculando variable chid")
names(dl)[names(dl)=="choice"] <- choicevar
names(dl)[names(dl)=="chid"] <- chidvar
dl <- dcm_add_chid(dl, indvar=indvar, taskvar=taskvar, chidvarname="chid")
dl
}
|
4362cd8341fe3a451824891b50d9be8d93b8b909
|
093cf42c7d4bee93e30a19382f3748d1bddc5adc
|
/week05/datetime.R
|
ec08b65721e3bcf61b57e2df9c37ccd9ef01060f
|
[] |
no_license
|
Huride/radsp
|
262dfa0e1888c240a59771d3390c14188e778ee7
|
23e3713d1f9b3891cdae5f89b8021905b4feedbb
|
refs/heads/master
| 2022-12-05T16:02:11.174257
| 2020-08-19T09:58:44
| 2020-08-19T09:58:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,621
|
r
|
datetime.R
|
# "2020-03-03" "2020-03-04 23:11:34"
# "04/11/2020" "20-06-11" "2020/05/11"
# ํ์ผ์ ์ ์ฅ๋์ด ์๋ ๋ ์ง๋ค์? ๋ฌธ์์ด.
# R์ด ์ดํดํ ์ ์๋ ๋ ์ง ํ์์ผ๋ก ๋ณํ. => ์์ผ, 125์ผ ํ๋?
# ์ฐ๋ฆฌ ํ์ฌ ํ์์ ๋ง๋ ๋ฌธ์์ด๋ก ๋ฐ๊ฟ์, ์ ์ฅ.
d <- Sys.Date()
d
class(d)
d + 137
weekdays(d)
weekdays(d+137)
# 2020,07,22
# R์ ๋ ์ง ํ์ Date๋ฅผ , ๋ฌธ์์ด๋ก ๋ณํ(์ฐ๋ฆฌ ํ์ฌ ํ์์ ๋ง๊ฒ)
format(d, format="%Y,%m,%d")
# 2020==07?22
format(d, format="%Y==%m?%d")
format(d, format="%a")
format(d, format="%B")
format(d, format="%Y๋
%m์ ์ ๋๋ฉด ์ข๊ฒ ๋ค.")
some.day <- d + 100
today <- "2020-07-11"
today + 100
class(d)
class(today)
some.day
weekdays(some.day)
t <- Sys.time()
t
class(t)
format(t, format = "%H์ %M๋ถ")
format(t, format = "%H์ ํ๊ณ %M๋ถ %S์ด")
my.date <- c("2020-07-02", "2020-08-12", "2020-07-14",
"2020-09-22")
stock <- c(234, 567, 123, 333)
my.stock <- data.frame(my.date, stock)
str(my.stock)
stock.date <- my.stock[ 1 , 1 ]
stock.date <- as.Date(stock.date)
weekdays(stock.date)
class(my.stock$my.date)
my.stock$my.date <- as.Date( my.stock$my.date )
str(my.stock)
my.stock$days <- weekdays(my.stock$my.date)
my.stock[ my.stock$days == "ํ์์ผ" , ]
# 11์
str(my.stock)
my.stock$month <- format( my.stock$my.date , format = "%m์")
my.date <- c("20๋
07์02์ผ", "20๋
08์12์ผ",
"20๋
07์14์ผ", "20๋
09์22์ผ")
stock <- c(234, 567, 123, 333)
new.df <- data.frame(my.date, stock)
new.df$iso <- as.Date( new.df$my.date , format = "%y๋
%m์%d์ผ")
|
c6cc4e41e22b42ba164423168a7fc94b99d546ad
|
e5ac9d127cbb9dcba76d2f68a252380be2835942
|
/community.r
|
49e9f0733b15ff36d60dc5ab673e67f22cf04bb4
|
[] |
no_license
|
Arthur-Valance/Qualitative-Modeling-BarentsRISK
|
0f42d3ea144c3107468954349ec0d8c342295ddf
|
32de97a486e8081f56ee48b83a9cf601f339cfab
|
refs/heads/master
| 2023-07-15T07:51:25.173983
| 2021-07-27T09:34:40
| 2021-07-27T09:34:40
| 369,253,160
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,242
|
r
|
community.r
|
## Need util for URLencoding
library(utils)
## Extract node labels
node.labels <- function(edges) {
levels(edges$From)
}
## Convert edge descriptions to an adjacency matrix. The
## required.groups argument determines which edge groups will appear
## in the matrix.
adjacency.matrix <- function(edges,labels=F,required.groups=c(0)) {
z <- ifelse(edges$Group %in% required.groups,1,0)
labs <- node.labels(edges)
n <- length(labs)
A <- matrix(data = 0L,nrow = n, ncol = n,dimnames= list(labs,labs))
type <- c("N","P","U","Z")
weight <- c(-1,1,NA,0)
A[cbind(edges$To,edges$From)] <- z*weight[match(edges$Type,type)]
A
}
## Encode the edge descriptions as a url for the loop analysis site
loop.url <- function(edges,base="http://www.ent.orst.edu/loop/loopanalysis.aspx",
required.groups=c(0)) {
labels <- node.labels(edges)
A <- adjacency.matrix(edges,required.groups=required.groups)
paste(base,"?",
"matrix=n:=",length(labels),":",
"A:=array(1..n,1..n,[[",paste(apply(A,1,paste,collapse=","),collapse="],["),"]]);",
"[",URLencode(paste(labels,collapse=",")),"]",
sep="")
}
## Compute adjoint by Fedeew-Leverrier - if A has integer elements and
## computed with integer arithmetic the result is exact.
adjoint <- function(A) {
n <- nrow(A)
B <- diag(-1,n,n)
for(k in 1:(n-1)) {
B <- A%*%B
p <- sum(diag(B))/k
diag(B) <- diag(B)-p
}
B
}
charpoly <- function(A) {
n <- nrow(A)
B <- diag(-1,n,n)
p <- rep(1,n+1)
for(k in 1:n) {
B <- A%*%B
p[k+1] <- sum(diag(B))/k
diag(B) <- diag(B)-p[k+1]
}
p
}
## Add self loops to enforce self limitation
enforce.limitation <- function(edges) {
loops <- which(edges$To==edges$From)
#labels <- node.labels(edges)
limit <- setdiff(labels,edges$From[loops])
n <- length(limit)
rbind(edges,
data.frame(From=factor(limit,levels=labels),
To=factor(limit,levels=labels),
Group=rep(0,n),
Type=factor(rep("N",n),levels(edges$Type)),
Pair=max(edges$Pair)+1:n,
Dir=rep(1,n),
Start=rep(4,n),
End=rep(1,n)))
}
## Subset the edges by group
subset.groups <- function(edges,groups) {
edges[edges$Group %in% groups,]
}
## Create functions to generate random community matrices given the
## edge list describing the web topology. This returns a list of two
## functions, "community" draws a random community matrix, and
## "select" determines which optional edges will be retained in the
## web topology. The user can specify a list of the edge groups that are
## required to be retained in the model.
community.sampler <- function(edges,required.groups=c(0)) {
#labels <- node.labels(edges)
n.nodes <- length(labels)
n.edges <- nrow(edges)
W <- matrix(0,n.nodes,n.nodes)
## Ranges and indices of non-zero matrix entries
lower <- ifelse(edges$Type=="U" | edges$Type=="N",-1L,0L)
upper <- ifelse(edges$Type=="U" | edges$Type=="P",1L,0L)
k.edges <- as.vector(unclass(edges$To)+(unclass(edges$From)-1)*n.nodes)
## The indices of the matrix entries that can be omitted (zeroed), the
## expansion index that relates matching edges of a pair, and the
## number of edges that can be omitted.
required <- edges$Group %in% required.groups
k.optional <- k.edges[!required]
optional <- factor(edges$Pair[!required])
expand <- as.vector(unclass(optional))
n.omit <- max(0,expand)
optional.labels <- edge.labels(edges,F)[!required]
zs <- rep(1,n.omit)
if(n.omit > 0) {
community <- function() {
W[k.edges] <- runif(n.edges,lower,upper)
W[k.optional] <- W[k.optional]*zs[expand]
W
}
select <- function(p) {
zs <<- rbinom(n.omit,1,p)
zs
}
} else {
community <- function() {
W[k.edges] <- runif(n.edges,lower,upper)
W
}
select <- function(p=0) {
zs
}
}
weights <- function(W) {
W[k.edges]
}
list(community=community,
select=select,
weights=weights,
optional=optional.labels)
}
## Check the stability of a simulated community matrix W
stable.community <- function(W) {
all(Re(eigen(W,symmetric=FALSE,only.values=T)$values)<0)
}
## Return sign of s, except that value of magnitude less than epsilon
## are rounded down to zero.
signum <- function(s,epsilon=1.0E-3) {
(s > epsilon) - (s < -epsilon)
}
## Mutual information for discrete x, y
mutual.info <- function(x,y) {
tab <- table(factor(x),factor(y))
p <- tab/sum(tab)
sum(ifelse(tab==0,0,p*log2(p/(rowSums(p)%o%colSums(p)))))
}
## Generate a function to check a press condition. User must supply a
## vector of named elements that specify the relative magnitude of the
## press perturbation, and a vector of named elements that specify the
## signs of the change in the monitored nodes.
press.validate <- function(edges,perturb,monitor,epsilon=1.0E-3) {
labels <- node.labels(edges)
index <- function(name) {
k <- match(name,labels)
if(any(is.na(k)))
warning("Unknown nodes:",paste(name[is.na(k)],collapse=" "))
k
}
## Indices of perturb
k.perturb <- index(names(perturb))
k.monitor <- index(names(monitor))
S.press <- double(length(labels))
S.press[k.perturb] <- -perturb
monitor <- sign(monitor)
## Return function to check condition
function(W) {
s <- tryCatch(solve(W,S.press),error=function(e) NULL)
!is.null(s) && all(signum(s[k.monitor],epsilon)==monitor)
}
}
## Generate a function to determine the impact of a press perturbation
press.impact <- function(edges,perturb,monitor=NULL) {
labels <- node.labels(edges)
index <- function(name) {
k <- match(name,labels)
if(any(is.na(k)))
warning("Unknown nodes:",paste(name[is.na(k)],collapse=" "))
k
}
## Indices of perturb
k.perturb <- index(names(perturb))
S.press <- double(length(labels))
S.press[k.perturb] <- -perturb
if(length(monitor)==0) {
impact <- function(W) solve(W,S.press)
} else {
k.monitor <- index(names(monitor))
impact <- function(W) solve(W,S.press)[k.monitor]
}
## Return function to compute impact
impact
}
|
28e041b402d230471fb090a3555862a9ba4b264d
|
b02e46514d93b138756c0a5c73e79215e99995ae
|
/WorldTrade.R
|
ba6a49136699726434f6dfc28af6ab283cd6c454
|
[] |
no_license
|
AvijeetBose/WorldTradeProject
|
474d97edd29ea8c8ded43397280bcfc86e6d9bcd
|
2e811c3bb12d5634380e6ff46b98c641797b8900
|
refs/heads/master
| 2022-11-27T06:14:59.774148
| 2020-08-07T07:35:23
| 2020-08-07T07:35:23
| 285,767,746
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,176
|
r
|
WorldTrade.R
|
#install libraries
library(dplyr)
#load the dataset
WorldTradeRecord <- read.csv("D:/R-Code/Project/Raw data.csv")
#Create duplicate
TradeBusinessWorldwide <- WorldTradeRecord
#Update Column Name
names(TradeBusinessWorldwide) <- c("CountryName", "Year", "Commoditycode", "Commodity", "Flow",
"DollarValue", "Weight", "QtyName", "Qty", "Category")
#summary of data
summary(TradeBusinessWorldwide)
#Missing values present in Weight and Quantity either 0 or NA
#When Weight is 0 or NA, or Quantity is 0 or NA - then assign 1
TradeBusinessWorldwide$MissingData <- ifelse(TradeBusinessWorldwide$Weight < 1 | is.na(TradeBusinessWorldwide$Weight)
| TradeBusinessWorldwide$Qty < 1 | is.na(TradeBusinessWorldwide$Qty), "1", "0")
table(TradeBusinessWorldwide$MissingData)
#Filter Record with non Zero Values
Cleaned_Data<-filter(TradeBusinessWorldwide,MissingData==0)
summary(Cleaned_Data)
#Drop Col "MissingData"
Cleaned_Data$MissingData<-NULL
summary(Cleaned_Data)
#Write the data into csv to use in Tableau
write.csv(Cleaned_Data, "D:/R-Code/Project/Cleaned data.csv",row.names = FALSE)
|
b0d774d8b98a40e8b9579bc67c8e01bcece5b10f
|
e3e3843080406b83178105f59b2f366643f545a6
|
/fixChangelogLinks.R
|
2811b969a4e0ad7426df75b9a8a9c12dff8029ab
|
[] |
no_license
|
nickmckay/lipdverseR
|
c1dcdb9e6752a41a00d7f9386cf684f24f3f1f46
|
56b01e8ce33dc9f71e39bdf1c5c978f3b33d1db7
|
refs/heads/master
| 2023-06-07T19:16:43.787187
| 2023-05-31T18:20:31
| 2023-05-31T18:20:31
| 182,172,405
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 348
|
r
|
fixChangelogLinks.R
|
#fix summary links
library(stringr)
#"../changelogSummary.html" to "changelogSummary.html"
for(i in allIndex){
ad <- readLines(i)
if(any(str_detect(ad,pattern = "../changelogSummary.html" ))){
adf <- str_replace_all(ad,pattern = "../changelogSummary.html",replacement = "changelogSummary.html")
print(i)
writeLines(adf,i)
}
}
|
bf3eea2d96fa2181e88809dbc38a76122d6d51c9
|
5db34fe55462f237703358e5ead7c80299de3d02
|
/R/powerTransform.R
|
4d7c6b65ed6945426149d5d8b8271d7aa943393e
|
[] |
no_license
|
cran/tlm
|
687fe4cb6d25a1086f46e61afb5faa898037f9e2
|
4a399dc84a6b38f8681ef4709c14115d89505f27
|
refs/heads/master
| 2021-01-17T07:11:00.175043
| 2017-04-10T12:15:19
| 2017-04-10T12:15:19
| 23,803,445
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 98
|
r
|
powerTransform.R
|
powerTransform <-
function(x, power)
{
if (power == 0) xt <- log(x) else xt <- x^power
xt
}
|
9648916b5bf9b0e2b1cdec9e4184747c09369bda
|
294da1183cd4ac9feac38cea527f10b9e4ea8378
|
/test.R
|
b59b4635f1d211fcf0b87331a88266c6a74cf984
|
[] |
no_license
|
M-Atsuhiko/Result_Graphs
|
a6c4eb913ef1c611ce138fbd204d291dbdde0be8
|
b6d92ca02b106738032ce2cd1474d09cec7e8c3c
|
refs/heads/master
| 2021-01-01T20:05:51.186799
| 2015-02-05T15:02:43
| 2015-02-05T15:02:43
| 28,659,099
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 88
|
r
|
test.R
|
a <- 1:10
b <- 11:20
c <- 21:30
test <- data.frame(a,b,c)
print(test[test$a == 2,])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.