blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f0daa388ee0e36a38b880bd9609829493dee1e1b
|
9cbc066d59ff6101c1674c775eb8aba0bff11feb
|
/Conditional independence testing/low_dim_type1/simulate.R
|
57a8475f869977ccbb9c93e568df7c7d37bcf85c
|
[] |
no_license
|
xiangyh/Nonparametric-Graphical-Modeling
|
28e42d8388d15df4033e47853f840e5b18b87bae
|
7d0953507a1ff0e26c2755966231b477e0fd185f
|
refs/heads/master
| 2020-06-26T09:07:14.861953
| 2019-07-30T07:37:29
| 2019-07-30T07:37:29
| 199,591,539
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,260
|
r
|
simulate.R
|
# simulation: conditional independent testing
require(CondIndTests)
library(MASS)
library(bootstrap)
library(ranger)
library(cdcsis)
library(bootstrap)
loess_wrapper_extrapolate <- function (x, y, span.vals = seq(0.08, 0.8, by = 0.05), folds = 5){
# Do model selection using mean squared error
mean.sqr.error <- numeric(length(span.vals))
# Quantify error for each span, using CV
loess.model <- function(x, y, span){
loess(y ~ x, span = span)
}
loess.predict <- function(fit, newdata) {
predict(fit, newdata = newdata)
}
span.index <- 0
for (each.span in span.vals) {
span.index <- span.index + 1
y.hat.cv <- crossval(x, y, theta.fit = loess.model, theta.predict = loess.predict, span = each.span, ngroup = folds)$cv.fit
non.empty.indices <- !is.na(y.hat.cv)
mean.sqr.error[span.index] <- mean((y[non.empty.indices] - y.hat.cv[non.empty.indices])^2)
}
# find the span which minimizes error
best.span <- span.vals[which.min(mean.sqr.error)]
# fit and return the best model
best.model <- loess(y ~ x, span = best.span, control=loess.control(surface="direct"))
return(list(model = best.model))
}
True_Y <- function(z){
y <- sin(3*z)
return(y)
}
True_X <- function(z){
x <- cos(2*z)
return(x)
}
Iter = 200
size = c(50,100,200,300,400,500)
# Type I error
KCI_type1_1 <- matrix(NA, ncol = length(size), nrow = Iter)
KCI_type1_5 <- matrix(NA, ncol = length(size), nrow = Iter)
CDI_type1_1 <- matrix(NA, ncol = length(size), nrow = Iter)
CDI_type1_5 <- matrix(NA, ncol = length(size), nrow = Iter)
Ecov_type1_1 <- matrix(NA, ncol = length(size), nrow = Iter)
Ecov_type1_5 <- matrix(NA, ncol = length(size), nrow = Iter)
e_mu <- c(0,0)
e_sigma <- matrix(c(1,0,0,1),2,2)
for (i in 1:Iter) {
for (j in 1:length(size)) {
n <- size[j]
# simulate data
z <- runif(n, 0, 2)
e <- mvrnorm(n, e_mu, e_sigma)
mu_X <- True_X(z); x <- mu_X + e[,1]
mu_Y <- True_Y(z); y <- mu_Y + e[,2]
# KCI
KCI_p <- KCI(y, x, z)$pvalue
KCI_type1_1[i,j] <- KCI_p <= 0.01
KCI_type1_5[i,j] <- KCI_p <= 0.05
# conditional independence testing
CDI_p <- cdcov.test(y, x, z)$p.value
CDI_type1_1[i,j] <- CDI_p <= 0.01
CDI_type1_5[i,j] <- CDI_p <= 0.05
# Ecov
Hat.muY <- loess_wrapper_extrapolate(z, y)$model$fitted
Hat.muX <- loess_wrapper_extrapolate(z, x)$model$fitted
ECov2 <- mean((y-Hat.muY)*(x-Hat.muX))
EVarY2 <- mean((y-Hat.muY)^2)
EVarX2 <- mean((x-Hat.muX)^2)
Dp = (y-Hat.muY)*(x-Hat.muX)/sqrt(EVarY2*EVarX2) - ECov2/(2*sqrt(EVarY2*EVarX2))*((y-Hat.muY)^2/EVarY2 + (x-Hat.muX)^2/EVarX2)
s2 <- mean((Dp)^2)
onestep <- ECov2/sqrt(EVarY2*EVarX2)
test_stat <- sqrt(n)*onestep/sqrt(s2)
Ecov_p <- 2*pnorm(-abs(test_stat))
Ecov_type1_1[i,j] <- Ecov_p <= 0.01
Ecov_type1_5[i,j] <- Ecov_p <= 0.05
}
}
save(KCI_type1_1,file=".../low_dim_typeI/KCI_type1_1.rda")
save(KCI_type1_5,file=".../low_dim_typeI/KCI_type1_5.rda")
save(CDI_type1_1,file=".../low_dim_typeI/CDI_type1_1.rda")
save(CDI_type1_5,file=".../low_dim_typeI/CDI_type1_5.rda")
save(Ecov_type1_1,file=".../low_dim_typeI/Ecov_type1_1.rda")
save(Ecov_type1_5,file=".../low_dim_typeI/Ecov_type1_5.rda")
|
8055ed338537abe38de8ab21fcf1cd0b97f49852
|
1a9f72bb42cc02d8c63de30b90a93cbe69549d9d
|
/shannon_n_htz.R
|
a03b8c8c0ee03394611933187a49fcd43a95de81
|
[] |
no_license
|
Redsiana/GPG
|
3b5251c0ad79ece564ca5e77b68639470c0d9ca5
|
ec6617224a9e8c0e0f6170454fda7018b2a2c632
|
refs/heads/master
| 2018-12-08T07:12:53.727600
| 2018-10-12T17:00:26
| 2018-10-12T17:00:26
| 117,999,784
| 0
| 0
| null | 2018-01-18T15:35:22
| 2018-01-18T15:20:46
|
R
|
UTF-8
|
R
| false
| false
| 3,533
|
r
|
shannon_n_htz.R
|
# -----------
analysis_shannon <- function( newbabyX = newbabyX,
newbabyY = newbabyY,
popgenome = popgenome,
G = G, Xdim = Xdim ){
#### PHENOTYPIC DIVERSITY -> Shannon index based on RESOURCE CONSUMPTION
# some kind of shannon index based on the resource consumed. Of the total resource consumed,
# how much was eaten of each category of the 2*G. Pop of total heteroz would consume 1/2G of each.
newbabyXY = paste(newbabyX, ",", newbabyY, sep="")
occupiedpatches <- sort( unique( newbabyXY ) )
X_position <- as.numeric( unlist( strsplit(occupiedpatches, ",") )[ c(TRUE,FALSE) ] )
# patch_compet <- matrix( nrow = length(occupiedpatches) , ncol= 3*G )
# rownames( patch_compet ) = occupiedpatches
# for (g in 1:G){
# Tgenepatch <- table( factor( popgenome[,g], levels = 0:2), newbabyXY )
# #indivperpatch <- colSums( Tgenepatch )
# patch_compet[,(g*3-2)] <- Tgenepatch[1,] # vector with number of 00 genotypes on the patch
# patch_compet[,(g*3-1)] <- Tgenepatch[2,] # vector with number of 01 genotypes on the patch
# patch_compet[,(g*3)] <- Tgenepatch[3,]
# }
# freq_sp <- ( patch_compet / (apply(patch_compet, 1, sum)/G) )
# log_freq_sp <- log2( freq_sp )
# log_freq_sp[ is.infinite( log_freq_sp ) ] <- 0
#
# shannon <- - apply( freq_sp * log_freq_sp, 1, sum )
# std_shannon <- shannon / ( - log2 (3) )
#
# X_position <- as.numeric( unlist( strsplit(occupiedpatches, ",") )[ c(TRUE,FALSE) ] )
# Y_position <- as.numeric( unlist( strsplit(occupiedpatches, ",") )[ c(FALSE, TRUE) ] )
#
# shannon_gradient <- tapply(std_shannon, X_position, mean)
# res_shannon <- rep(NA, Xdim)
# res_shannon[ as.numeric( names(shannon_gradient)) + 1] <- shannon_gradient
#
#
shannon_function = function(x) {
## BEWARE this is not a true Shannon Index. The normalizing factor is if
## all individuals on the patch were different, or if all possible genotypes are represented
( - sum( ( table(x) / length(x) ) * log2 ( table(x) / length(x) ) ) ) / min( log2( length(x) ), log2( 3^G ) )
}
genome <- apply(popgenome, 1, paste, collapse="") # collapses loci in 1 string
shannon_per_patch <- tapply( genome, newbabyXY, shannon_function )
shannon_per_X <- tapply( shannon_per_patch, X_position, mean )
res_shannon <- rep(NA, Xdim)
res_shannon[ as.numeric( names(shannon_per_X)) + 1] <- shannon_per_X
return(res_shannon)
}
# ind_shannon_a <- std_shannon[ popRB[ repro == 'a' & popsurvival == 1 ] ]
# ind_shannon_s <- std_shannon[ popRB[ repro == 's' & popsurvival == 1 ] ]
# ---------
analysis_htz <- function( popgenome = popgenome,
G = G,
newbabyX = newbabyX, Xdim = Xdim){
# Ahtz <- rowSums( popgenome[ popcloneline == 0, ] ==1 )/G
# Shtz <- rowSums( popgenome[ popcloneline != 0, ] ==1 )/G
Thtz <- rowSums( popgenome ==1 )/G
#
# Thtz_gradient <- tapply(Thtz, newbabyX, mean)
# Ahtz_gradient <- tapply(Ahtz, newbabyX, mean)
Thtz_gradient <- tapply(Thtz, newbabyX, mean)
res_htz <- rep(NA, Xdim)
res_htz[ as.numeric( names(Thtz_gradient)) + 1] <- Thtz_gradient
return(res_htz)
}
library(compiler)
analysis_shannon <- cmpfun(analysis_shannon)
analysis_htz <- cmpfun(analysis_htz)
|
9b36b25cd10628e4561bba6628cb946e4e7b3bee
|
72166929998ef4ae822b411da9f11b6c375a09ed
|
/Scripts_Teste/Classes e métodos S3 e S4.R
|
2a2ab5fc72fd1e063b255e41281ada462f5d927e
|
[] |
no_license
|
rceratti/Dissertacao
|
03699c499ce435c32361c79450fc1d615a9a3def
|
7dcd06a2a6a983943b282fb816326d6a1b1032c2
|
refs/heads/master
| 2021-01-22T13:47:05.330564
| 2014-05-03T18:45:57
| 2014-05-03T18:45:57
| 19,409,309
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,485
|
r
|
Classes e métodos S3 e S4.R
|
# Exemplo S3:
Person <- function(name, weight, height){
x <- list(name = name, weight = weight, height = height)
class(x) <- "Person"
x
}
bmi <- function(x) UseMethod('bmi', x)
bmi.Person <- function(x) {
x$weight/x$height^2
}
print.Person <- function(x) {
cat(" Name: ", x$name, "\n",
"Weight: ", x$weight, "Kg\n",
"Height: ", x$height, "m\n",
" BMI: ", round(bmi(x), 2), "\n")
}
bob <- Person("Bob", 70, 1.76)
bob
# Exemplo S4:
setClass("PersonS4",
representation(name = 'character',
weight = 'numeric',
height = 'numeric'),
prototype(name = NA_character_,
weight = NA_real_,
height = NA_real_),
validity = function(object) {
if(object@weight > object@height)
TRUE
else
"Weight cannot be smaller than height!"
})
setGeneric("bmiS4", function(object) standardGeneric("bmiS4"))
setMethod("bmiS4", signature("PersonS4"),
function(object) {
object@weight/object@height^2
})
setMethod("show", signature("PersonS4"),
function(object) {
cat(" Name: ", object@name, "\n",
"Weight: ", object@weight, "Kg\n",
"Height: ", object@height, "m\n",
" BMI: ", round(bmiS4(object), 2), "\n")
})
bobS4 <- new("PersonS4", name = "Bob", weight = 70, height = 1.76)
bobS4
|
0d12c3c0e6e4a1a4f5960383e8159dae12a10b81
|
300661ccbbc892eb51789fef4458df72f2724da3
|
/Random_Forest.r
|
4ccabb5a3d5088b3c3a2d94c6cd65470c8d4e248
|
[] |
no_license
|
pratikshaborkar/Weather_Prediction
|
73ce6705b9fd455988d1ae38906586950fef3b56
|
f987c530240561914a166502afafda8b502315c1
|
refs/heads/master
| 2020-07-28T01:57:30.123114
| 2019-09-18T11:14:46
| 2019-09-18T11:14:46
| 209,274,685
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,552
|
r
|
Random_Forest.r
|
library('Metrics')
library('randomForest')
library('ggplot2')
library('ggthemes')
library('dplyr')
#set random seed
set.seed(100)
#loading dataset
data<-read.csv("Istanbul Weather Data.csv",stringsAsFactors= T)
data=data[,-1]
data
str(data)
#checking dimensions of data
dim(data)
data$Condition <- ifelse(data$Condition =="Sunny", 1, 0)
data$Condition
data$Condition<- factor(data$Condition, levels = c(0, 1))
table(data$Condition)
data$AvgWind<-as.integer(data$AvgWind)
data$MoonRise<-as.integer(data$MoonRise)
data$MoonSet<-as.integer(data$MoonSet)
data$SunRise<-as.integer(data$SunRise)
data$SunSet<-as.integer(data$SunSet)
data$Rain<-as.integer(data$Rain)
data$MinTemp<-as.integer(data$MinTemp)
data$AvgHumidity<-as.integer(data$AvgHumidity)
data$MaxTemp<-as.integer(data$MaxTemp)
data$AvgPressure<-as.integer(data$AvgPressure)
#dividing the dataset into train and test
train<-data[1:3000,]
test<-data[3001:3854,]
#applying Random Forest
model_rf<-randomForest(Condition ~., data = train)
model_rf
preds<-predict(model_rf,test[,-1])
preds = predict( model_rf,newdata = test[,-1], type ='class')
new<-data.frame("Rain"= 0,"MaxTemp"= 29,"MinTemp"= 23,"AvgWind"= 19,"AvgHumidity"= 57,"AvgPressure"=1017,"SunRise"=20,"SunSet"=175,"MoonRise"=934,"MoonSet"=165)
spred3<-predict(model_rf,new )
spred3
if (spred3==1) {
print('Sunny')
} else {
print('Rainy')
}
table(preds)
#checking accuracy
auc(preds,test$Condition)
str(model_rf)
importance(model_rf)
|
8c13d3066bbc0e9ceb0605ecd4b0dac7929d1c38
|
169a6494a475f42d0452d3ade4622bde1eb939cc
|
/tests/testthat/test-gnr_datasources.R
|
1a8bd7bd86c605001831869d5432cbd095140e46
|
[
"MIT"
] |
permissive
|
ropensci/taxize
|
d205379bc0369d9dcdb48a8e42f3f34e7c546b9b
|
269095008f4d07bfdb76c51b0601be55d4941597
|
refs/heads/master
| 2023-05-25T04:00:46.760165
| 2023-05-02T20:02:50
| 2023-05-02T20:02:50
| 1,771,790
| 224
| 75
|
NOASSERTION
| 2023-05-02T20:02:51
| 2011-05-19T15:05:33
|
R
|
UTF-8
|
R
| false
| false
| 482
|
r
|
test-gnr_datasources.R
|
context("gnr_datasources")
test_that("gnr_datasources returns the correct class", {
skip_on_cran()
vcr::use_cassette("gnr_datasources", {
tmp <- gnr_datasources()
})
expect_is(tmp, "data.frame")
expect_equal(NCOL(tmp), 12)
expect_is(tmp$title, "character")
expect_is(tmp$updated_at, "character")
expect_type(tmp$id, "integer")
})
test_that("gnr_datasources fails well", {
skip_on_cran()
expect_error(gnr_datasources(todf = 5),
"todf is defunct")
})
|
be2672b33f40ecff1eccfcbfc6e3f6f87fc6dbb9
|
0ff8bdeb6c6fbd53b58975cee95b81bae9259cbe
|
/run_analysis.R
|
c6aea862ad5f50f40bb877eb67fa6d642b33d174
|
[] |
no_license
|
sambitkumohanty183/Getting-and-Cleaning-Data-Course-Project
|
1f2e091444934723eddf6f960b801627af4dedd8
|
8d8c9ff546a95dc61e0fe676c6153d7464b2b9b1
|
refs/heads/master
| 2021-01-17T05:21:38.007348
| 2014-10-25T17:14:58
| 2014-10-25T17:14:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,185
|
r
|
run_analysis.R
|
require(data.table)
require(plyr)
#Feature table
features<-read.table("UCI HAR Dataset/features.txt")
#Activity labels
activitylabels<-read.table("UCI HAR Dataset/activity_labels.txt")
names(activitylabels)<-c("activity","description")
#Training labels
traininglabels<-read.table("UCI HAR Dataset/train/y_train.txt")
names(traininglabels)<-c("activity")
#Training Data
trainingdata<-read.table("UCI HAR Dataset/train/X_train.txt",header=F)
#Traning labels based on the description
trainlabels<-data.frame(activity=join(traininglabels,activitylabels)[,"description"])
#Test lables
testlabels<-read.table("UCI HAR Dataset/test/y_test.txt")
names(testlabels)<-c("activity")
#Test data
testdata<-read.table("UCI HAR Dataset/test/X_test.txt",header=F)
#Test labels based on the description
testlabels<-data.frame(activity=join(testlabels,activitylabels)[["description"]])
#Test subject
subjecttest<-read.table("UCI HAR Dataset/test/subject_test.txt")
names(subjecttest)<-c("subject")
#Traning subject
subjecttrain<-read.table("UCI HAR Dataset/train/subject_train.txt")
names(subjecttrain)<-c("subject")
#Extract only the relevant Features we need - means and standard deviations
relevantfeatures<-features[features[["V2"]] %like% c("mean\\(\\)") | features[["V2"]] %like% c("std\\(\\)"),]
testdata<-testdata[,relevantfeatures[["V1"]]]
trainingdata<-trainingdata[,relevantfeatures[["V1"]]]
#Set Appropriate column names
names(testdata)<-gsub("\\)","",gsub("\\(","",tolower(gsub("-","",relevantfeatures[["V2"]]))))
names(trainingdata)<-gsub("\\)","",gsub("\\(","",tolower(gsub("-","",relevantfeatures[["V2"]]))))
#Create test and training datasets with descriptive activities and subjects
testdata<-cbind(subjecttest,testlabels,testdata)
trainingdata<-cbind(subjecttrain,trainlabels,trainingdata)
#Finally Merge both datasets
finaldata<-rbind(testdata,trainingdata)
#Write tidy data set as CSV
write.csv(finaldata,file="output_activity_table.csv",row.names=FALSE)
#Second tidy data set
finaltidy<-ddply(finaldata,.(subject,activity),function(x){
colMeans(x[,c(3:68)])
})
#Write second tidy data set as CSV
write.csv(finaltidy,file="output_activity_text.txt",row.names=FALSE)
|
66ca9423786e85c7a69b1f74f71618d019f511dd
|
d2b60fc3b7411dea33ac32160383312e3015d9eb
|
/ggplot2_01.R
|
45b170fe6b810edfc92e0030bc1df951482cf88b
|
[] |
no_license
|
coin723/R_dreaming
|
0787dc3f6f88622872b26d33355a2e5949837040
|
3d96cf1b2989113f2e65b7208bcad798b87bd705
|
refs/heads/master
| 2021-01-10T13:15:17.894565
| 2015-10-04T03:59:31
| 2015-10-04T03:59:31
| 43,624,383
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 75
|
r
|
ggplot2_01.R
|
plot(iris)
plot(Nile)
data()
plot(BOD)
plot(BOD, type = 'l')
barplot(BOD)
|
bc7dcf6675267637dd18fd4ebcdae467a9034ffe
|
90d1821464cd074cb161d31fe4d0b18a22afd722
|
/server.R
|
417c3d9d350d48872e67997bbbe845e06c041b70
|
[] |
no_license
|
abmish/startup-funding
|
6420fe269e5037eadb18116c4e4c4e333207681c
|
6d46cce175a7fb15f9e341415ea2f7412fd9a8a5
|
refs/heads/master
| 2021-01-15T21:44:23.011879
| 2015-02-22T18:54:36
| 2015-02-22T18:54:36
| 31,161,828
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 876
|
r
|
server.R
|
library(UsingR)
raw_funding <- data.frame(read.csv("TechCrunchcontinentalUSA.csv", header = T, colClasses=c("NULL","NULL", "numeric", NA, "NULL", "NULL", "NULL", NA, "NULL", NA)))
funding <- raw_funding[complete.cases(raw_funding[, c('numEmps')]), ]
funding <- funding[(funding$category %in% c('mobile', 'software', 'web')),]
model <- model <- lm(raisedAmt ~ numEmps + category + round, data=funding)
shinyServer(
function(input, output) {
output$raisedFund <- renderText({
data <- data.frame(matrix(rep(NA, 3), nrow=1))
colnames(data) <- c("numEmps","category","round")
data$numEmps <- input$emps
data$category <- input$cat
data$round <- input$f_round
x <- abs(predict(model, data))
})
output$emps <- renderText({input$emps})
output$category <- renderText({input$cat})
output$round <- renderText({input$f_round})
})
|
debe111dee1744f43c1a0b3a6bc4304f139d44d0
|
8c1b4d741ba97c9882bbd994d04e196fff6c09c6
|
/R/measoshapes-package.R
|
4ffe5b139bfb6e09d99e79aec1b33e504c4abcbe
|
[] |
no_license
|
AustralianAntarcticDivision/measoshapes
|
3dcc20944603508fb5fd20db95fda67852295b69
|
5e53849561c05df2acef9e587a31d6f0a312cee4
|
refs/heads/master
| 2020-05-09T20:00:34.777139
| 2020-04-06T02:06:56
| 2020-04-06T02:06:56
| 181,392,566
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 498
|
r
|
measoshapes-package.R
|
#' @keywords internal
"_PACKAGE"
#' Sectors and zones for MEASO.
#'
#' Data is stored in 'sf' data frame form, in a native polar projection. Longitude,latitude
#' version is copied with a '_ll' suffix. Versions with the Antarctic coastline baked-in
#' are '_coastline' suffix.
#'
#' Longer names and decomposition of each region is in 'measo_names'.
#' @name measo_regions05
#' @aliases measo_regions05_coastline measo_regions05_ll measo_regions05_ll_coastline measo_names
#' @docType data
NULL
|
dd5e1e6f79fe8a5f73e51376fa9d2528dc3b7db5
|
23434e36a1e8501f544a2122a54ce4b030226f3c
|
/R/simulate.white.noise.R
|
a70766b1bd9cbdaadea44143c2f471df23a54bc6
|
[] |
no_license
|
sebastianduchene/NELSI
|
da2c08038964be58a53e71d8a8f06a803ed6c1f4
|
1a9365e335e28289df8677eaf986459b252d7582
|
refs/heads/master
| 2022-08-31T05:35:09.056560
| 2022-08-15T11:30:23
| 2022-08-15T11:30:23
| 17,787,440
| 5
| 6
| null | 2016-08-02T00:45:07
| 2014-03-15T23:00:37
|
HTML
|
UTF-8
|
R
| false
| false
| 653
|
r
|
simulate.white.noise.R
|
simulate.white.noise <-
function(tree, params = list(rate = 0.006, var = 0.000001)){
data.matrix <- get.tree.data.matrix(tree)
clocksubst <- tree$edge.length * params[[1]]
clocksubstscaled <- as.numeric(scale(clocksubst)) + 1
for(i in 1:length(clocksubst)) data.matrix[, 6][i] <- rlnorm(1, log(clocksubst[i]), abs(params[[2]] * clocksubstscaled[i]))
branch.rates <- data.matrix[, 6] / data.matrix[, 7]
data.matrix[, 5] <- branch.rates
tree$edge.length <- data.matrix[, 6]
res <- list(tree, tree.data.matrix = data.matrix)
names(res) <- c("phylogram", "tree.data.matrix")
class(res) <- "ratesim"
return(res)
}
|
5e7489f8cd343169a342f3e2ded30171757eb5f8
|
3645cc9ec4fc326a79c0f91e111f275c80980f08
|
/man/EvalCov.Rd
|
a6e60bc020ce3cfbc7df57f43c3425eebf637beb
|
[] |
no_license
|
cran/spatsurv
|
4ad176e1dba92957a5f97b1dd8f2c4c8e2c2c01b
|
530b040149bf08d8f94cb2ee6240a87e84144daa
|
refs/heads/master
| 2022-12-05T14:25:39.144853
| 2022-11-22T13:30:02
| 2022-11-22T13:30:02
| 17,699,980
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 482
|
rd
|
EvalCov.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/covarianceFunctions.R
\name{EvalCov}
\alias{EvalCov}
\title{EvalCov function}
\usage{
EvalCov(cov.model, u, parameters)
}
\arguments{
\item{cov.model}{an object of class covmodel}
\item{u}{vector of distances}
\item{parameters}{vector of parameters}
}
\value{
method EvalCov
}
\description{
This function is used to evaluate the covariance function within the MCMC run. Not intended for general use.
}
|
69aece5472ed7f5ee884a1821de0dbb681ceb89f
|
47b3f50a476b3990f505c45e68fce595c5991210
|
/062-u2-world-tour/readme.R
|
cae735f27b3d30f8fbef4300ec4f73036c8c2496
|
[
"Apache-2.0"
] |
permissive
|
baifengbai/r-posts
|
611235c514cbfbd14926fbde825d48c4f5ee7452
|
e694c45fb639b4a7bcf1b307084cc2bc1ddf5ea0
|
refs/heads/master
| 2020-03-11T19:21:00.671871
| 2018-02-05T03:04:30
| 2018-02-05T03:04:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,106
|
r
|
readme.R
|
+# packages ----------------------------------------------------------------
rm(list = ls())
library(dplyr)
library(tidyr)
library(rvest)
library(stringr)
library(highcharter)
library(lubridate)
library(ggmap)
library(purrr)
library(viridisLite)
options(highcharter.debug = TRUE)
# data --------------------------------------------------------------------
html <- read_html("https://en.wikipedia.org/wiki/U2_360%C2%B0_Tour")
df <- html_table(html, fill = TRUE)[[2]]
df <- tbl_df(df)
names(df) <- str_replace(str_to_lower(names(df)), " ", "_")
Sys.setlocale("LC_TIME","English")
df <- df %>%
mutate(leg = ifelse(str_detect(date, "Leg "), date, NA)) %>%
fill(leg) %>%
filter(!str_detect(date, "Leg "),
date != "TOTAL") %>%
mutate(attendance2 = str_extract(attendance, ".* "),
attendance2 = str_replace_all(attendance2, ",| |/", ""),
attendance2 = as.numeric(attendance2),
revenue2 = str_replace_all(revenue, ",|\\$", ""),
revenue2 = as.numeric(revenue2),
revenue2 = coalesce(revenue2, 0),
date2 = as.Date(date, format = "%d %B %Y"),
ym = date2 - day(date2) + days(1),
leg = str_replace(leg, "\\[\\d+\\]\\[\\d+\\]", ""))
glimpse(df)
dfym <- df %>%
group_by(leg, ym, country, city, venue) %>%
summarise(concerts = n(),
attendance = first(attendance2),
revenue = first(revenue2)) %>%
mutate(location = paste0(country, ", ", city),
z = revenue) %>%
ungroup()
gcodes <- map(dfym$location, geocode)
gcodes <- map_df(gcodes, as_data_frame)
dfym <- bind_cols(dfym, gcodes)
glimpse(dfym)
# first chart -------------------------------------------------------------
world <- hcmap(nullColor = "#424242", borderColor = "gray") %>%
hc_chart(backgroundColor = "#161C20") %>%
hc_plotOptions(series = list(showInLegend = FALSE))
world
world %>%
hc_add_series(data = dfym, type = "mapbubble", name = "Concerts",
maxSize = "2%", color = "white",
tooltip = list(valueDecimals = 0,
valuePrefix = "$",
valueSuffix = " USD")
)
# motion ------------------------------------------------------------------
# http://jsfiddle.net/gh/get/jquery/1.9.1/larsac07/Motion-Highcharts-Plugin/tree/master/demos/map-australia-bubbles-demo/
dateseq <- seq(min(dfym$ym), max(dfym$ym), by = "month")
sequences <- map2(dfym$ym, dfym$z, function(x, y){ ifelse(x > dateseq, 0, y)})
cols <- inferno(10, begin = 0.2)
scales::show_col(cols)
dfym <- mutate(dfym, sequence = sequences, color = colorize(revenue, cols))
dateslabels <- format(dateseq, "%Y/%m")
world %>%
hc_add_series(data = dfym, type = "mapbubble", name = "Concerts",
minSize = 0, maxSize = 15, animation = FALSE,
tooltip = list(valueDecimals = 0, valuePrefix = "$", valueSuffix = " USD")) %>%
hc_motion(enabled = TRUE, series = 1, labels = dateslabels,
loop = TRUE, autoPlay = TRUE,
updateInterval = 1000, magnet = list(step = 1))
|
c28e60d93e5d3bf35d19bc26253b16f3d2fc5c2b
|
f3b552e07781d7092de9b1334538245fa930922e
|
/01-IIM/iitb2.R
|
ba52dfe82fe5619fae680d7fee021a5d6c4125a6
|
[] |
no_license
|
uc4444/rAnalytics
|
247a7e228073a403e3d22042f308de8a22caeb0c
|
95d74152855d6bdfffd0f12f738d440dee0498ba
|
refs/heads/master
| 2021-01-01T00:54:19.865058
| 2020-02-08T09:58:34
| 2020-02-08T09:58:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 373
|
r
|
iitb2.R
|
data5
#Linear Regression
model1 = lm(y ~ x1 + x2, data=data5)
#rmse
#logistic
model2 = glm(y ~ x1 + x2 , data = data5)
#confusion matrix
#classification DT
model3 = rpart( y ~ x1 + x2, data = data5, method='class') #class
#confusion matrix
model3b = rpart( y ~ x1 + x2, data =data5, method='anova') #num
#
predict( model, newdata = ndata1 , type = 'response'/'class')
|
2330739e9dfb53aabc3a756858689eafc7860435
|
f6f694ac585304c9acd13fec13393d2d0aa63391
|
/Limpieza_ParticipacionPolitica.R
|
2e2e85bba81bcaf7e4f1fc2624c13f7c26f9dff3
|
[] |
no_license
|
jesuszaratev/TrabajoGrupal
|
b5c91e6c6644524a4ed791892fb7616b852a739a
|
d5154bb8e3b092471450007fa6930488c104fc78
|
refs/heads/master
| 2022-08-22T01:38:49.243138
| 2020-05-30T16:23:03
| 2020-05-30T16:23:03
| 268,102,030
| 0
| 0
| null | null | null | null |
ISO-8859-2
|
R
| false
| false
| 965
|
r
|
Limpieza_ParticipacionPolitica.R
|
#Revisamos la variable
View(PoliticalParticipation)
str(PoliticalParticipation)
#Eliminamos la "," y la sustituimos por "."
PoliticalParticipation$`PoliticalParticipation(2006)` =gsub(",", ".", PoliticalParticipation$`PoliticalParticipation(2006)`)
PoliticalParticipation$`PoliticalParticipation(2010)` =gsub(",", ".", PoliticalParticipation$`PoliticalParticipation(2010)`)
PoliticalParticipation$`PoliticalParticipation(2015)` =gsub(",", ".", PoliticalParticipation$`PoliticalParticipation(2015)`)
PoliticalParticipation$`PoliticalParticipation(2019)` =gsub(",", ".", PoliticalParticipation$`PoliticalParticipation(2019)`)
#Transformamos a numérico
PoliticalParticipation[2:5]=lapply(PoliticalParticipation[2:5], as.numeric)
#Recodificamos los nombres
names(PoliticalParticipation) = c("Pais", "Participacion_Politica(2006)","Participacion_Politica(2010)","Participacion_Politica(2015)","Participacion_Politica(2019)")
str(PoliticalParticipation)
|
365941af4090b7b60984317612181e9733b69bdd
|
ed17b93b41ebc74b11ef01963dd3374968aa51ec
|
/man/Get.Def.Par.RNN.Rd
|
9bdfff7c76b1d55995af7346fe52394cb5e0b619
|
[] |
no_license
|
MartinKies/RLR
|
64fe378ebad8e5a83628efe7bde7f8a628375cb6
|
3b8b1bfd4b4766b1e612de85a96c8d24e95d33e6
|
refs/heads/master
| 2022-03-24T09:32:00.219749
| 2019-12-17T16:23:26
| 2019-12-17T16:23:26
| 108,415,469
| 1
| 1
| null | 2018-03-19T13:48:34
| 2017-10-26T13:30:58
|
R
|
UTF-8
|
R
| false
| true
| 2,354
|
rd
|
Get.Def.Par.RNN.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RNN.R
\name{Get.Def.Par.RNN}
\alias{Get.Def.Par.RNN}
\title{Define default Parameters of the RNN Function}
\usage{
Get.Def.Par.RNN(setting = "ThesisOpt")
}
\arguments{
\item{setting}{Specefies the to be used setting. Currently the following settings are available: \itemize{
\item "ThesisOpt" - Default setting. The same parameters are used as for the final results of the thesis of Martin Kies.
\item "Legacy.v.0.1.6" - Uses the default settings of version 0.1.6 of this package
}}
}
\description{
Returns a List which may be used as \code{model.par} of e.g. the function \code{Setup.QLearning()} with the following parameters:\itemize{
\item \strong{name} - Identifier of Model. Per Default \"RNN\".
\item \strong{setup} - Function which should be used to setup the RNN. Per Default \code{Setup.RNN}
\item \strong{predict} - Function which should be used to predict the RNN. Per Default \code{Predict.RNN}
\item \strong{train} - Function which should be used to train/calibrate the RNN. Per Default \code{Train.RNN}
\item \strong{hidden.nodes} - A Vector consisting of the number of Neurons in each hidden layer - e.g. c(25,10) to have two hidden layers with the first layer having 25 Neurons.
\item \strong{layer.type} - A vector consisting of the names of the type of the hidden layer. Supported are "lstm", "gru", "dense". If lstm or gru are used in a deep layer the sequence is returned.
\item \strong{activation.hidden} - A Vector defining the activation functions of the hidden layers, e.g. c(\"relu\",\"relu\"). Has to have the same number of items as \code{hidden.nodes}. Supported are e.g. relu, tanh, sigmoid and linear
\item \strong{activation.output}. Activiation function of the output layer. Supported are e.g. relu, tanh, sigmoid and linear.
\item \strong{loss}. Specifies the loss function, e.g. \'mse\'
\item \strong{optimizer}. Specifies the used optimizer. By Default Adam Optimization is used with a Learning rate of 0.001.
\item \strong{mask.value}. Which value should be used for masking?
\item \strong{epochs}. How many epochs should the RNN be trained?
\item \strong{batch.size}. Batch Size of RNN.
\item \strong{verbose}. Should the RNN give an output? 0 for no output, 1 for output for each epoch, 2 for aggregate output every other epoch.
}
}
|
a71a60413c1c343a6b05aa9ce5181f936f68c6af
|
c37710e45d05176d20252a414512496acfb9d14f
|
/man/datasetLiverBrainLung.Rd
|
ca0920e81640f82450fca6b56e1458449f2e1a48
|
[
"MIT"
] |
permissive
|
ftmlik/LinSeed
|
7d9b5abef7ec93e526adecdaa6baf049d3a7ed84
|
1ab41330aee1a62d7643b9c109d84160b53fa7d9
|
refs/heads/master
| 2022-04-10T21:51:29.871351
| 2019-08-09T15:03:53
| 2019-08-09T15:03:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,528
|
rd
|
datasetLiverBrainLung.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/datasetLiverBrainLung.R
\docType{data}
\name{datasetLiverBrainLung}
\alias{datasetLiverBrainLung}
\title{GSE19830 dataset}
\format{An object of class \code{'data.frame'}}
\source{
\href{http://qtlarchive.org/db/q?pg=projdetails&proj=moore_2013b}{QTL Archive}
}
\usage{
data(datasetLiverBrainLung)
}
\description{
Gene Expression data from GSE19830 experiment:
3 tissues (liver, brain and lung) were mixed in different proportions:
}
\details{
GSM495209-GSM495211 pure lung samples
GSM495212-GSM495214 pure brain samples
GSM495215-GSM495217 pure liver samples
GSM495218-GSM495220 5 % Liver / 25 % Brain / 70 % Lung
GSM495221-GSM495223 70 % Liver / 5 % Brain / 25 % Lung
GSM495224-GSM495226 25 % Liver / 70 % Brain / 5 % Lung
GSM495227-GSM495229 70 % Liver / 25 % Brain / 5 % Lung
GSM495230-GSM495232 45 % Liver / 45 % Brain / 10 % Lung
GSM495233-GSM495235 55 % Liver / 20 % Brain / 25 % Lung
GSM495236-GSM495238 50 % Liver / 30 % Brain / 20 % Lung
GSM495239-GSM495241 55 % Liver / 30 % Brain / 15 % Lung
GSM495242-GSM495244 50 % Liver / 40 % Brain / 10 % Lung
GSM495245-GSM495247 60 % Liver / 35 % Brain / 5 % Lung
GSM495248-GSM495250 65 % Liver / 34 % Brain / 1 % Lung
}
\examples{
}
\references{
Shen-Orr SS, Tibshirani R, Khatri P, et al. cell type-specific gene expression differences in complex tissues. Nature methods. 2010;7(4):287-289. doi:10.1038/nmeth.1439.
(\href{http://www.ncbi.nlm.nih.gov/pubmed/20208531}{PubMed})
}
\keyword{datasets}
|
454ac4434c207b9938166282ac1af937e6f50f56
|
c332694aed4a74cf0e5802e8f7ff6b82dbf88acb
|
/tests/testthat/teardown.R
|
c08c455c477fc669553c7f10edb2153c45c552ca
|
[] |
no_license
|
mdlincoln/storrmap
|
82b9a11b7c618ef5dbefa6a21440263f5eab0354
|
7c7436696589ce69e8fb2554bfff96da8c252989
|
refs/heads/master
| 2020-03-29T07:53:17.283705
| 2018-09-24T11:33:03
| 2018-09-24T11:33:03
| 149,683,808
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 70
|
r
|
teardown.R
|
# Remove the storr directory used durnig testing
test_storr$destroy()
|
785ba7282c92b3119eb4650e7b833d0aa8bfeb69
|
f995139d5c4ff0c6dc2785edfebb84df43a6bb3f
|
/R/0280-extract-bcfishpass-rd-data-modelled_closed_bottom.R
|
71b83071670f66c6a781f3ee8d2b0e1f3bb40a89
|
[
"Apache-2.0"
] |
permissive
|
Mateo9569/fish_passage_bulkley_2020_reporting
|
4633713f012edc331552f2ef367b2f3456f14bae
|
49e25c50cef023c3f93654816618f714431da94a
|
refs/heads/master
| 2023-04-12T20:39:05.124435
| 2021-05-04T21:43:43
| 2021-05-04T21:43:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,771
|
r
|
0280-extract-bcfishpass-rd-data-modelled_closed_bottom.R
|
source('R/packages.R')
source('R/functions.R')
source('R/private_info.R')
source('R/0255-load-pscis.R')
##get the road info from the database
conn <- DBI::dbConnect(
RPostgres::Postgres(),
dbname = dbname,
host = host,
port = port,
user = user,
password = password
)
#
# ##listthe schemas in the database
dbGetQuery(conn,
"SELECT schema_name
FROM information_schema.schemata")
##see the size of each schema in the database (https://www.postgresonline.com/journal/archives/110-Determining-size-of-database,-schema,-tables,-and-geometry.html)
dbGetQuery(conn,
"SELECT schema_name,
pg_size_pretty(sum(table_size)::bigint),
(sum(table_size) / pg_database_size(current_database())) * 100
FROM (
SELECT pg_catalog.pg_namespace.nspname as schema_name,
pg_relation_size(pg_catalog.pg_class.oid) as table_size
FROM pg_catalog.pg_class
JOIN pg_catalog.pg_namespace ON relnamespace = pg_catalog.pg_namespace.oid
) t
GROUP BY schema_name
ORDER BY schema_name")
##size of tables
tab_size <- dbGetQuery(conn,
"SELECT
schema_name,
relname,
pg_size_pretty(table_size) AS size,
table_size
FROM (
SELECT
pg_catalog.pg_namespace.nspname AS schema_name,
relname,
pg_relation_size(pg_catalog.pg_class.oid) AS table_size
FROM pg_catalog.pg_class
JOIN pg_catalog.pg_namespace ON relnamespace = pg_catalog.pg_namespace.oid
) t
WHERE schema_name NOT LIKE 'pg_%'
ORDER BY table_size DESC;") %>%
arrange(table_size)
# #
# #
# # # ##list tables in a schema
dbGetQuery(conn,
"SELECT table_name
FROM information_schema.tables
WHERE table_schema='bcfishpass'")
# # # # #
# # # # # ##list column names in a table
dbGetQuery(conn,
"SELECT column_name,data_type
FROM information_schema.columns
WHERE table_name='fwa_stream_networks_sp'")
dbGetQuery(conn,
"DROP TABLE whse_basemapping.fwa_linear_boundaries_sp;")
# test <- dbGetQuery(conn, "SELECT * FROM bcfishpass.waterfalls")
dat <- pscis_all %>%
sf::st_as_sf(coords = c("easting", "northing"),
crs = 26909, remove = F) %>% ##don't forget to put it in the right crs buds
sf::st_transform(crs = 3005)
# add a unique id - we could just use the reference number
dat$misc_point_id <- seq.int(nrow(dat))
# dbSendQuery(conn, paste0("CREATE SCHEMA IF NOT EXISTS ", "test_hack",";"))
# load to database
sf::st_write(obj = dat, dsn = conn, Id(schema= "ali", table = "misc"))
# sf doesn't automagically create a spatial index or a primary key
res <- dbSendQuery(conn, "CREATE INDEX ON ali.misc USING GIST (geometry)")
dbClearResult(res)
res <- dbSendQuery(conn, "ALTER TABLE ali.misc ADD PRIMARY KEY (misc_point_id)")
dbClearResult(res)
dat_info <- dbGetQuery(conn, "SELECT
a.misc_point_id,
b.*,
ST_Distance(ST_Transform(a.geometry,3005), b.geom) AS distance
FROM
ali.misc AS a
CROSS JOIN LATERAL
(SELECT *
FROM fish_passage.modelled_crossings_closed_bottom
ORDER BY
a.geometry <-> geom
LIMIT 1) AS b")
##join the modelled road data to our pscis submission info
dat_joined <- left_join(
dat,
# dat_info,
select(dat_info,misc_point_id:fcode_label, distance, crossing_id), ##geom keep only the road info and the distance to nearest point from here
by = "misc_point_id"
)
# sf::st_drop_geometry() ##distinct will pick up geometries!!!!!!
dbDisconnect(conn = conn)
##we also need to know if the culverts are within a municipality so we should check
##get the road info from our database
conn <- DBI::dbConnect(
RPostgres::Postgres(),
dbname = "postgis",
host = "localhost",
port = "5432",
user = "postgres",
password = "postgres"
)
# load to database
sf::st_write(obj = dat, dsn = conn, Id(schema= "working", table = "misc"))
dat_info <- dbGetQuery(conn,
"
SELECT a.misc_point_id, b.admin_area_abbreviation, c.map_tile_display_name
FROM working.misc a
INNER JOIN
whse_basemapping.dbm_mof_50k_grid c
ON ST_Intersects(c.geom, ST_Transform(a.geometry,3005))
LEFT OUTER JOIN
whse_legal_admin_boundaries.abms_municipalities_sp b
ON ST_Intersects(b.geom, ST_Transform(a.geometry,3005))
")
dbDisconnect(conn = conn)
##add the municipality info
dat_joined2 <- left_join(
dat_joined,
dat_info,
by = "misc_point_id"
)
##we already did this but can do it again I guess. you cut and paste the result into kable then back
##into here using addin for datapasta
# tab_rd_tenure_xref <- unique(dat_joined2$client_name) %>%
# as_tibble() %>%
# purrr::set_names(nm = 'client_name') %>%
# mutate(client_name_abb = NA)
tab_rd_tenure_xref <- tibble::tribble(
~client_name, ~client_name_abb,
"DISTRICT MANAGER NADINA (DND)", "FLNR Nadina",
"CANADIAN FOREST PRODUCTS LTD.", "Canfor",
"WET'SUWET'EN VENTURES LTD", "Wetsuweten Ventures"
)
##add that to your dat file for later
dat_joined3 <- left_join(
dat_joined2,
tab_rd_tenure_xref,
by = 'client_name'
)
##make a dat to make it easier to see so we can summarize the road info we might want to use
# dat_rd_sum <- dat_joined3 %>%
# select(pscis_crossing_id, my_crossing_reference, crossing_id, distance, road_name_full,
# road_class, road_name_full, road_surface, file_type_description, forest_file_id,
# client_name, client_name_abb, map_label, owner_name, admin_area_abbreviation)
##make a dat to make it easier to see so we can summarize the road info we might want to use
dat_joined4 <- dat_joined3 %>%
mutate(admin_area_abbreviation = case_when(
is.na(admin_area_abbreviation) ~ 'MoTi',
T ~ admin_area_abbreviation),
my_road_tenure =
case_when(!is.na(client_name_abb) ~ paste0(client_name_abb, ' ', forest_file_id),
!is.na(road_class) ~ paste0(admin_area_abbreviation, ' ', road_class),
!is.na(owner_name) ~ owner_name)) %>%
mutate(my_road_tenure =
case_when(distance > 100 ~ 'Unknown', ##we need to get rid of the info for the ones that are far away
T ~ my_road_tenure))
# rename(geom_modelled_crossing = geom)
# test <- dat_joined4 %>% filter(pscis_crossing_id == 197656)
##build tables to populate the pscis spreadsheets
pscis1_rd_tenure <- left_join(
select(pscis_phase1, my_crossing_reference),
select(dat_joined4, my_crossing_reference, my_road_tenure),
by = 'my_crossing_reference'
) %>%
distinct(my_crossing_reference, my_road_tenure) %>%
tibble::rownames_to_column() %>%
mutate(rowname = as.integer(rowname)) %>%
mutate(rowname = rowname + 4) %>%
readr::write_csv(file = paste0(getwd(), '/data/extracted_inputs/pscis1_rd_tenure.csv'))
pscis_reassessmeents_rd_tenure <- left_join(
select(pscis_reassessments, pscis_crossing_id),
select(dat_joined4, pscis_crossing_id, my_road_tenure),
by = 'pscis_crossing_id'
) %>%
distinct(pscis_crossing_id, my_road_tenure) %>%
tibble::rownames_to_column() %>%
mutate(rowname = as.integer(rowname)) %>%
mutate(rowname = rowname + 4) %>%
readr::write_csv(file = paste0(getwd(), '/data/extracted_inputs/pscis_reassessmeents_rd_tenure.csv'))
pscis_phase2_rd_tenure <- left_join(
select(pscis_all %>% filter(source %like% 'phase2'), pscis_crossing_id), # %>% st_drop_geometry()
select(dat_joined4, pscis_crossing_id, my_road_tenure),
by = 'pscis_crossing_id'
) %>%
distinct(pscis_crossing_id, my_road_tenure) %>%
tibble::rownames_to_column() %>%
mutate(rowname = as.integer(rowname)) %>%
mutate(rowname = rowname + 4) %>%
readr::write_csv(file = paste0(getwd(), '/data/extracted_inputs/pscis_phase2_rd_tenure.csv'))
pscis_rd <- dat_joined4 %>%
# sf::st_drop_geometry() %>%
dplyr::mutate(my_road_class = case_when(is.na(road_class) & !is.na(file_type_description) ~
file_type_description,
T ~ road_class)) %>%
dplyr::mutate(my_road_class = case_when(is.na(my_road_class) & !is.na(owner_name) ~
'rail',
T ~ my_road_class)) %>%
dplyr::mutate(my_road_surface = case_when(is.na(road_surface) & !is.na(file_type_description) ~
'loose',
T ~ road_surface)) %>%
dplyr::mutate(my_road_surface = case_when(is.na(my_road_surface) & !is.na(owner_name) ~
'rail',
T ~ my_road_surface)) %>%
readr::write_csv(file = paste0(getwd(), '/data/extracted_inputs/pscis_rd.csv'))
####----tab cost multipliers for road surface-----
tab_cost_rd_mult <- pscis_rd %>%
select(my_road_class, my_road_surface) %>%
# mutate(road_surface_mult = NA_real_, road_class_mult = NA_real_) %>%
mutate(road_class_mult = case_when(my_road_class == 'local' ~ 4,
my_road_class == 'collector' ~ 4,
my_road_class == 'arterial' ~ 10,
my_road_class == 'highway' ~ 10,
my_road_class == 'rail' ~ 5,
T ~ 1)) %>%
mutate(road_surface_mult = case_when(my_road_surface == 'loose' |
my_road_surface == 'rough' ~
1,
T ~ 2)) %>%
# mutate(road_type_mult = road_class_mult * road_surface_mult) %>%
mutate(cost_m_1000s_bridge = road_surface_mult * road_class_mult * 12.5,
cost_embed_cv = road_surface_mult * road_class_mult * 25) %>%
# mutate(cost_1000s_for_10m_bridge = 10 * cost_m_1000s_bridge) %>%
distinct( .keep_all = T) %>%
tidyr::drop_na() %>%
arrange(cost_m_1000s_bridge, my_road_class) %>%
readr::write_csv(file = paste0(getwd(), '/data/extracted_inputs/tab_cost_rd_mult.csv'))
#######################################################################################################
#######-----------------add the update real bcfishpass information-----------
##if we run into issues we can come back and rebiuld from here
##this sucks and is super hacky but we are going to grab all the info from bcfishpass and add it
##connect again
##get the new bcfishpass info from the database
conn <- DBI::dbConnect(
RPostgres::Postgres(),
dbname = dbname,
host = host,
port = port,
user = user,
password = password
)
# dat_unique <- dat %>% distinct(pscis_crossing_id, .keep_all = T)
# load to database
sf::st_write(obj = dat, dsn = conn, Id(schema= "ali", table = "misc"))
##this is the way we did it before but seems brittle as bcfishpass is new than fish_passage schema
##this time we will pull bcfishpass raw and try to join based on the id and see if they are all the same
# dat_info <- dbGetQuery(conn,
# "SELECT a.crossing_id, b.*
# FROM ali.misc a
# LEFT OUTER JOIN
# bcfishpass.crossings b
# ON a.crossing_id = b.modelled_crossing_id")
dat_info <- dbGetQuery(conn, "SELECT
a.misc_point_id, a. pscis_crossing_id,
b.*,
ST_Distance(ST_Transform(a.geometry,3005), b.geom) AS distance
FROM
ali.misc AS a
CROSS JOIN LATERAL
(SELECT *
FROM bcfishpass.barriers_anthropogenic
ORDER BY
a.geometry <-> geom
LIMIT 1) AS b")
##test the join to see if we get the same matched points each time
test <- left_join(
select(dat_joined4, misc_point_id, crossing_id, distance),
select(dat_info, misc_point_id, modelled_crossing_id, distance2 = distance),
by = c('misc_point_id', 'crossing_id' = 'modelled_crossing_id')
) %>%
mutate(distance_diff = distance - distance2) %>%
filter(distance_diff > 10)
##this looks good with all the same match and only 1 crossing more than 10m away but it is a good match regardless
##here we find identical columns because we will want to remove them from one of the lists.
columns_to_remove <- intersect(names(dat_joined4), names(dat_info))
columns_to_keep <- c("misc_point_id", "pscis_crossing_id") #crossing_id
columns_to_remove <- setdiff(columns_to_remove, columns_to_keep) ##make sure to keep the joining column
##Lets keep the newest ones and remove from the old dataframe
dat_joined5 <- dat_joined4 %>%
# sf::st_drop_geometry() %>% ##the distinct was picking up the geometry!!!!!!!
# distinct(pscis_crossing_id, .keep_all = T) %>% ##need to get rid of duplicates
select(-all_of(columns_to_remove)) %>%
select(misc_point_id, everything())
# identical(dat_joined5[21,3],dat_joined5[22,3])
##join together the old and the new
dat_joined6 <- left_join(
dat_joined5,
select(dat_info, -pscis_crossing_id),
by = 'misc_point_id' #c('crossing_id' = 'modelled_crossing_id')
)
# distinct(pscis_crossing_id, my_crossing_reference, source, .keep_all = T)
dups <- dat_joined6 %>% group_by(pscis_crossing_id) %>%
mutate(duplicated = n()>1) %>%
filter(duplicated == T & !is.na(pscis_crossing_id)) ##this is not connected bc its an error with the geometry when its empty - feeds the top input though!!!
# distinct(pscis_crossing_id, .keep_all = T) %>%
# pull(my_crossing_reference)
##burn it all to a file we can use later
dat_joined6 %>% readr::write_csv(file = paste0(getwd(), '/data/extracted_inputs/bcfishpass.csv'))
|
2a90cb850ed516cbafc7f47a0bb07dd8f2dd08b0
|
b642c2729dffc40b700036e0bd27c121d8703df7
|
/pollutantmean.R
|
4bf413fcc627bceb3613a48c7661cc823fb342cb
|
[] |
no_license
|
OlesyaZ/R-codes
|
d1c5c339f435feb422e10d58766e2b4726812bb0
|
747cc71146320f4163e8a4653d0958d2e50af360
|
refs/heads/master
| 2016-09-06T07:41:47.233943
| 2014-05-23T02:37:54
| 2014-05-23T02:37:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 946
|
r
|
pollutantmean.R
|
pollutantmean <- function(directory, pollutant, id = 1:332) {
## 'directory' is a character vector of length 1 indicating
## the location of the CSV files
filepath<-paste(c("/Users/shurik/Documents/Olesya/RProjects/",directory,"/"),collapse="")
setwd(filepath)
filename<-dir()
n<-0
sum_pol<-0
num_pol<-0
for (i in id) {
datas <- read.csv(filename[i])
## 'pollutant' is a character vector of length 1 indicating
## the name of the pollutant for which we will calculate the
## mean; either "sulfate" or "nitrate".
sum_pol<-sum_pol+ sum(datas[[pollutant]][!is.na(datas[[pollutant]])])
m<-mean(datas[[pollutant]],na.rm=TRUE)
num_pol<-num_pol+length(datas[[pollutant]][!is.na(datas[[pollutant]])])
mean_pol<-sum_pol/num_pol
##print(m)
## print(mean_pol)
}
return(mean_pol,digi?ts=4)
}
|
a09bcbe9a104769b23785e6d629657fab8a0a424
|
f77394c2e20a1bb0d35d4dc28aec334d37086f0d
|
/man/nChoose2Inv.Rd
|
a5e642b625114b319a8e384f7216b090fe992684
|
[] |
no_license
|
stevencarlislewalker/lme4ord
|
350a55dff1d224b2681428912bf18876594a5c86
|
5f6266459ba752aba0ce02ebe13d37e1ae4c7e19
|
refs/heads/master
| 2021-01-18T22:21:38.385881
| 2016-05-09T13:41:13
| 2016-05-09T13:41:13
| 22,126,172
| 9
| 4
| null | 2015-03-22T19:31:55
| 2014-07-23T00:38:20
|
R
|
UTF-8
|
R
| false
| false
| 378
|
rd
|
nChoose2Inv.Rd
|
% Generated by roxygen2 (4.0.1): do not edit by hand
\name{nChoose2Inv}
\alias{nChoose2Inv}
\title{Inverse of the n-choose-2 function}
\usage{
nChoose2Inv(m)
}
\arguments{
\item{m}{a vector coercible to integer}
}
\description{
Inverse of the n-choose-2 function
}
\note{
No checks are made for non-integer input or output.
}
\examples{
nChoose2Inv(choose(1:10, 2)) # = 1:10
}
|
58bce9df0671bf901b8a567e64af172cd3bef66d
|
424a109c5f16ab0417c7f9ecc4fded3c0f38ae14
|
/thermal_profiles/scheduler/test_cvx_from_r.r
|
89675f84d5d6e2fd3de8e59f6ed332d7f0506783
|
[] |
no_license
|
adrianalbert/EnergyAnalytics
|
f784aca1e549be96b865db89f2190d2dd7566f83
|
39a5d5a6ee05a643ab723d4ef8d864282870cec8
|
refs/heads/master
| 2020-05-29T08:51:31.888860
| 2016-03-21T15:03:47
| 2016-03-21T15:03:47
| 7,062,053
| 6
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,210
|
r
|
test_cvx_from_r.r
|
n <- 50
p <- 10
x <- matrix(rnorm(n * p), n, p)
theta.true <- rnorm(p)
y <- x %*% theta.true + 0.1 * rnorm(n)
cvx.setup.dir <- "/usr/local/MATLAB/cvx/"
cvxcode <- paste("variables theta(p)",
"minimize(square_pos(norm(y - x * theta, 2)) / 2 + lam * norm(theta, 1))",
sep=";")
lasso <- CallCVX(cvxcode, const.vars=list(p=p, y=y, x=x, lam=2),
opt.var.names="theta", setup.dir=cvx.setup.dir)
# res = solve.QP(R1, dvec, Amat, bvec, meq=0, factorized=T)
# u = res$solution
# u = matrix(u, ncol = tau, byrow=T)
# val = as.numeric(res$value + t(g) %*% Q %*% g)
# if (is.null(.Object@SOLVER$cvx.setup.dir)) return(0)
#
# cvx.setup.dir <- "/usr/local/MATLAB/cvx/"
# cvxcode <- paste("variables u(n)",
# "minimize(-dvec' * u + u' * H * u / 2 + norm(u,1))",
# "subject to\n u >= -1; u <= 1",
# sep=";")
# res <- CallCVX(cvxcode, const.vars=list(dvec = dvec, H = as.matrix(H), n = npars),
# opt.var.names="u", setup.dir=cvx.setup.dir)
|
555c5fba61da92e515e0778f562228394117867a
|
0d0d0a8baa83af3ad38ea2e419544db094d4b9fd
|
/R/mort.al.R
|
1f7d530448501ddecbd009de6b21de31df49c174
|
[] |
no_license
|
cran/fishmethods
|
81162cf5bf35201c7ce85dd6e9815c4bca6b7646
|
ac49e77d9f2b5ee892eb5eae1807e802cddd4ac8
|
refs/heads/master
| 2023-05-17T13:53:37.128033
| 2023-04-27T07:33:01
| 2023-04-27T07:33:01
| 17,696,062
| 5
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,436
|
r
|
mort.al.R
|
mort.al<-function(relyr=NULL,tal=NULL,N=NULL, method=c(1,2,3),np=0,stper=NULL,nboot=500){
if(is.null(tal)) stop("No times-at-large data")
if(is.null(N)) stop("N data is missing")
if(length(relyr)!=length(tal)) stop("Length of year and tal differ")
if(length(N)!=length(tal)) stop("Length of N and tal differ")
if(length(N)!=length(relyr)) stop("Length of N and tal differ")
if(any(is.na(tal))) stop("Missing atlarge values are not allowed.")
if(any(is.na(N))) stop("Missing N values are not allowed.")
if(any(is.na(relyr))) stop("Missing relyr values are not allowed.")
if(is.null(np)|is.na(np)) stop("np must be numeric")
#Default setting
datar<-as.data.frame(cbind(relyr,tal,N))
datar$period<-"NA"
if(np==0) datar$period<-as.character(datar$relyr)
if(np>0){
if(length(stper)!=np) stop("The number of speriods does not match np")
if(length(stper)==np){
if(min(datar$relyr)!=stper[1]) stop("The first year in the dataset must be the first year in speriods ")
styrs<-c(stper,max(datar$relyr))
for(y in 1:np){
if(y<np){
for(l in 1:c(length(datar[,1]))) datar$period[l]<-ifelse(datar$relyr[l]>=styrs[y] & datar$relyr[l]<styrs[y+1],
c(paste(styrs[y],"-",c(styrs[y+1]-1),sep="")),datar$period[l])
}
if(y==np){
for(l in 1:c(length(datar[,1]))) datar$period[l]<-ifelse(datar$relyr[l]>=styrs[y] & datar$relyr[l]<=styrs[y+1],
c(paste(styrs[y],"-",styrs[y+1],sep="")),datar$period[l])
}
}
}
}
out<-data.frame(Method=NA,Period=NA,M=NA,MSE=NA,F=NA,FSE=NA,Z=NA,ZSE=NA)
cnt<-0
if(any(method==1)){
cnt<-cnt+1
getstats<-function(x){
c(((length(x)-1)/length(x))/mean(x),
((((length(x)-1)/length(x))/mean(x))/sqrt(length(x)-2))^2)
}
mcg<-aggregate(datar$tal,list(datar$period,datar$relyr),getstats)
mcg<-cbind(mcg[,1],mcg[,2],as.data.frame(mcg[,3]))
names(mcg)<-c("period","relyr","Z","VAR")
mcg<-aggregate(cbind(mcg$Z,mcg$VAR),list(mcg$period),mean,na.rm=T)
out[cnt:c(length(mcg[,1])),1]<-"McGarvey"
out[cnt:c(length(mcg[,1])),2]<-as.character(mcg[,1])
out[cnt:c(length(mcg[,1])),7]<-mcg$V1
out[cnt:c(length(mcg[,1])),8]<-sqrt(mcg$V2)
cnt<-c(length(mcg[,1]))
}
if(any(method==2)){
cnt<-cnt+1
bigN<-aggregate(datar$tal,list(datar$period,datar$relyr,datar$N),length)
names(bigN)<-c("period","relyr","N","n")
bigN$N<-as.numeric(as.character(bigN$N))
N1<-aggregate(cbind(bigN$N,bigN$n),list(bigN$period,bigN$relyr),sum)
names(N1)<-c("period","relyr","N","n")
npers<-length(unique(N1$period))
pers<-unique(N1$period)
st<-aggregate(datar$tal,list(datar$period,datar$relyr),sum,na.rm=T)
st$FF<-(N1$n)^2/(N1$N*st$x)
st$M<-((N1$N-N1$n)*N1$n)/(N1$N*st$x)
st$Z<-st$FF+st$M
stats<-NULL
for(t in 1:nboot){
temp3<-NULL
for(y in 1:c(length(N1[,1]))){
ntem<-data.frame(n=N1[y,"n"],N=N1[y,"N"])
newn<-rbinom(1,size=ntem$N,prob=ntem$n/ntem$N)
newn<-ifelse(newn==0,1,newn)
if(newn>0) {
storep<-data.frame(tal=rexp(newn,rate=st$Z[y]),period=st[y,1],relyr=st[y,2])
storep$tal<-ifelse(storep$tal<=0,min(datar$tal),storep$tal)
}
if(newn==0) storep<-data.frame(tal=0,period=st[y,1],relyr=st[y,2])
temp3<-rbind(temp3,storep)
}
bootst<-aggregate(temp3$tal,list(temp3$period,temp3$relyr),sum,na.rm=T)
bootn<-aggregate(temp3$tal,list(temp3$period,temp3$relyr),length)
bootst$FF<-(bootn$x)^2/(N1$N*bootst$x)
bootst$M<-((N1$N-bootn$x)*bootn$x)/(N1$N*bootst$x)
bootst$Z<-bootst$FF+bootst$M
stats<-rbind(stats,aggregate(cbind(bootst$FF,bootst$M,bootst$Z),list(bootst[,1],bootst[,2]),mean,na.rm=T))
}
bootse<-aggregate(cbind(stats[,3],stats[,4],stats[,5]),list(stats[,1],stats[,2]),sd,na.rm=T)
bootse<-aggregate(cbind(bootse[,3]^2,bootse[,4]^2,bootse[,5]^2),list(as.character(bootse[,1])),mean,na.rm=T)
bootse[,2:4]<-sqrt(bootse[,2:4])
means<-aggregate(cbind(st$FF,st$M,st$Z),list(st[,1]),mean,na.rm=T)
out[cnt:c(npers+cnt-1),1]<-"Gulland"
out[cnt:c(npers+cnt-1),2]<-as.character(bootse[,1])
out[cnt:c(npers+cnt-1),3]<-means[,3]
out[cnt:c(npers+cnt-1),4]<-bootse[,3]
out[cnt:c(npers+cnt-1),5]<-means[,2]
out[cnt:c(npers+cnt-1),6]<-bootse[,2]
out[cnt:c(npers+cnt-1),7]<-means[,4]
out[cnt:c(npers+cnt-1),8]<-bootse[,4]
cnt<-(npers+cnt-1)
}
return(out)
}
|
15ffe3d7d9ddd3456723bc48085306fc134e73d9
|
30f6bd1c17b55828b00c2e35ef89ea37a8dec03a
|
/man/dp.Rd
|
1158fd7bed63b9473efcf136969d2e94cfc7c58b
|
[] |
no_license
|
cran/gmvalid
|
011504e32a9ed7bbc30fc425fce48fa548ed38de
|
8d697d6d331b1f919ccb19cbc2690494feb42d91
|
refs/heads/master
| 2021-01-19T20:18:43.393160
| 2012-10-14T00:00:00
| 2012-10-14T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,081
|
rd
|
dp.Rd
|
\name{dp}
\alias{dp}
\docType{data}
\title{ Death penalty example of Simpson's paradox }
\description{
Well known example to illustrate Simpson's paradox.
The data set shows that ignoring victim's race lead to a different conclusion
than including victim's race in the analysis.
}
\usage{data(dp)}
\format{
A data frame with 326 observations on the following 3 variables.
\describe{
\item{\code{Defendants.Race}}{A factor with levels \code{white} and \code{black}}
\item{\code{Victims.Race}}{A factor with levels \code{white} and \code{black}}
\item{\code{Death.Penalty}}{A factor with levels \code{yes} and \code{no}}
}
}
\source{
Radelet ML (1981) \emph{Racial characteristics and the imposition of the Death penalty.}
American Sociological Review, 46(6):918-927.
}
\examples{
data(dp)
## Graphical model analysis shows that 'defendant's race' is
## independent from 'death penalty' given 'victim's race'.
\dontrun{
## until CoCo is available again ...
gm.analysis(dp,program="coco",recursive=TRUE)
}
}
\keyword{datasets}
|
a7e6f60735d330101364025ebc8dcb58cd3bc22f
|
c5477cf703076f8fab57215a76d7dfceadbe78f7
|
/codes/R/preprocessing/getgene_trratios_excUnexpr.R
|
3e1d26645d77de9d4ccf20ce01bb6147e87f6bf5
|
[
"MIT"
] |
permissive
|
PROBIC/diffsplicing
|
79ea61c7ca5bdedaee00159b801c22a889f56c6e
|
09b5c846de8834696c15459816e0a1916efa8b44
|
refs/heads/master
| 2021-01-22T14:25:27.216150
| 2017-01-11T13:43:40
| 2017-01-11T13:43:40
| 46,413,932
| 4
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,322
|
r
|
getgene_trratios_excUnexpr.R
|
getgene_trratios_excUnexpr <-
function(indexFile,mcmcFileName,noSkip,start_line,end_line,sep="\t") {
source("repmat.R")
#
source("readData.R")
source("readVarData.R")
dataFileNames=list('t0000.rpkmwrong_new_MeanTecVar_abstr','t0005.rpkmwrong_new_MeanTecVar_abstr','t0010.rpkmwrong_new_MeanTecVar_abstr','t0020.rpkmwrong_new_MeanTecVar_abstr','t0040.rpkmwrong_new_MeanTecVar_abstr\
','t0080.rpkmwrong_new_MeanTecVar_abstr','t0160.rpkmwrong_new_MeanTecVar_abstr','t0320.rpkmwrong_new_MeanTecVar_abstr','t0640.rpkmwrong_new_MeanTecVar_abstr','t1280.rpkmwrong_new_MeanTecVar_abstr')
meanInd=list(1)
varInd=list(2)
s1=1
s2=95309
dat=readData(dataFileNames,s1,s2,meanInd,varInd)
M=dat$mean
maxM=as.matrix(apply(M,1,max))
exc_ind=as.matrix(which(maxM<(-1.2)))
#
noLines=end_line-start_line+1
I=read.table(indexFile,nrows=noLines)
I=as.matrix(I)
N=max(I) # number of genes: 3811
mcmc_data=read.table(mcmcFileName,skip=noSkip,nrows=noLines)
mcmc_data=as.matrix(mcmc_data)
#
mcmc_data[exc_ind,]=0
#
J=ncol(mcmc_data) # number of MCMC samples in the data file: 500.
tr_ratios=matrix(0,noLines,J)
tr_levels=matrix(0,noLines,J)
gene_levels=matrix(0,N,J)
for (i in 1:N) {
tr_inds=which(I %in% i)
no_tr=length(tr_inds)
if (no_tr>1) {
tr_expr=mcmc_data[tr_inds,]
gene_expr=matrix(colSums(tr_expr),1,J)
} else {
tr_expr=as.matrix(mcmc_data[tr_inds,])
gene_expr=as.matrix(tr_expr)
}
tr_ratios[tr_inds,]=tr_expr/(repmat(gene_expr,no_tr,1))
tr_levels[tr_inds,]=tr_expr
gene_levels[i,]=gene_expr
}
genelevelsFileName=paste(mcmcFileName,"_gene_excUnexpr",sep="")
trratiosFileName=paste(mcmcFileName,"_trratios_excUnexpr",sep="")
trlevelsFileName=paste(mcmcFileName,"_abstr_excUnexpr",sep="")
write.table(gene_levels,file=genelevelsFileName,quote=F,sep='\t',col.names=FALSE,row.names=FALSE)
write.table(tr_ratios,file=trratiosFileName,quote=F,sep='\t',col.names=FALSE,row.names=FALSE)
write.table(tr_levels,file=trlevelsFileName,quote=F,sep='\t',col.names=FALSE,row.names=FALSE)
}
|
db5733fd5b0b12a5f7e05d4e114c910bb17dd567
|
39dc612b162490cba77b0a05003c5713c1f98765
|
/man/mgFind.Rd
|
7ae13b274c2560a6e94a6dec6c0e32fcd768a84b
|
[] |
no_license
|
cran/mugnet
|
2865fc0811f224bad965b72ba67c62035c77d8e8
|
0d1e7617faa2d3c3e3949bbdf1a25015b14da18a
|
refs/heads/master
| 2021-01-10T19:34:06.179290
| 2013-12-11T00:00:00
| 2013-12-11T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 872
|
rd
|
mgFind.Rd
|
\name{mgFind-method}
\alias{mgFind}
\alias{mgFind,catNetworkEvaluate-method}
\title{Find Network by Complexity}
\description{
This is a model selection routine that finds a network in a set of networks for a given complexity.
}
\usage{
mgFind(object, complexity = 0, alpha=0, factor=1)
}
\arguments{
\item{object}{\code{catNetworkEvaluate}}
\item{complexity}{an \code{integer}, target complexity}
\item{alpha}{a \code{character} or \code{numeric}}
\item{factor}{a \code{numeric}}
}
\details{The complexity must be at least the number of nodes of the networks. If no network with the requested complexity exists in the list, then the one with the closest complexity is returned.
Alternatively, one can apply some standard model selection with \code{alpha}="BIC" and \code{alpha}=AIC.
}
\value{A \code{mgNetwork} object.}
\author{N. Balov}
\keyword{methods}
|
9af652d17e43d2165c2f54b76f2a67b2b4db6b62
|
8919f1a0cdbfb0cea6c7f8ba518df14c54f70d44
|
/man/file_duration.Rd
|
7df9f11a0dd15303b71d1417a8de12d4abaa52f0
|
[] |
no_license
|
rethomics/tempaural
|
328846eead266244f24465eca637a32eb717f01e
|
4b76848c7119497637d42453316901532641038c
|
refs/heads/master
| 2022-12-23T08:30:01.734034
| 2020-09-29T08:31:52
| 2020-09-29T08:31:52
| 270,756,079
| 0
| 0
| null | 2020-09-29T08:31:53
| 2020-06-08T17:12:55
|
R
|
UTF-8
|
R
| false
| true
| 361
|
rd
|
file_duration.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/audio_utils.R
\name{file_duration}
\alias{file_duration}
\title{Retrieve total audio file duration}
\usage{
file_duration(path)
}
\arguments{
\item{path}{the path to the file}
}
\value{
a number of seconds
}
\description{
Uses ffprobe to estimate the duration of an audio resource
}
|
366ef9e8ea17dc98b0f84107a7fc5395d75a83f9
|
d69a4bc84e9a9a3fcc58d9e117f9f43b08be52d2
|
/4-fst_calc_and_plot-lmlc.R
|
53f1ed9a66c278b5add513b358ab35e591c900e8
|
[] |
no_license
|
ChristieLab/sea_lamprey_adaptation
|
a8c66db089c348b40cd52b72f52c27db901954d6
|
713a923d7d6a1db8d21b7d2634483445b46bdd2a
|
refs/heads/master
| 2021-02-14T12:13:01.307589
| 2020-12-07T00:39:53
| 2020-12-07T00:39:53
| 244,803,304
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,299
|
r
|
4-fst_calc_and_plot-lmlc.R
|
#########################################################
## calculate Fst between Lake Michigan and Lake Champlain
#########################################################
install.packages("splitstackshape",repos="https://repo.miserver.it.umich.edu/cran/",INSTALL_opts=c('--no-lock'))
library(splitstackshape)
flm<-read.csv(file="Z:/gatksep/rnavdna/pmarinus1/dp5af0025VF/popmuscle-wo408/fh-lm.csv")
flc<-read.csv(file="Z:/gatksep/rnavdna/pmarinus1/dp5af0025VF/popmuscle-wo408/fh-lc.csv")
flmtemp<-data.frame(flm$locus,flm$G11,flm$G21,flm$G22,flm$G31,flm$G32,flm$G33,flm$G41,flm$G42,flm$G43,flm$G44,flm$freq1,flm$freq2,flm$freq3,flm$freq4)
flctemp<-data.frame(flc$locus,flc$G11,flc$G21,flc$G22,flc$G31,flc$G32,flc$G33,flc$G41,flc$G42,flc$G43,flc$G44,flc$freq1,flc$freq2,flc$freq3,flc$freq4)
flmlc<-merge(flmtemp,flctemp,by.x="flm.locus",by.y="flc.locus")
flmlc$nlm<-(flmlc$flm.G11+flmlc$flm.G21+flmlc$flm.G22+flmlc$flm.G31+flmlc$flm.G32+flmlc$flm.G33+flmlc$flm.G41+flmlc$flm.G42+flmlc$flm.G43+flmlc$flm.G44)
flmlc$nlc<-(flmlc$flc.G11+flmlc$flc.G21+flmlc$flc.G22+flmlc$flc.G31+flmlc$flc.G32+flmlc$flc.G33+flmlc$flc.G41+flmlc$flc.G42+flmlc$flc.G43+flmlc$flc.G44)
r<-2
flmlc$nbar<-(flmlc$nlm+flmlc$nlc)/r
flmlc$nc<-(r*flmlc$nbar-((flmlc$nlm^2+flmlc$nlc^2)/(r*flmlc$nbar)))/(r-1)
flmlc$pbar1<-(flmlc$nlm*flmlc$flm.freq1+flmlc$nlc*flmlc$flc.freq1)/(r*flmlc$nbar)
flmlc$pbar2<-(flmlc$nlm*flmlc$flm.freq2+flmlc$nlc*flmlc$flc.freq2)/(r*flmlc$nbar)
flmlc$pbar3<-(flmlc$nlm*flmlc$flm.freq3+flmlc$nlc*flmlc$flc.freq3)/(r*flmlc$nbar)
flmlc$pbar4<-(flmlc$nlm*flmlc$flm.freq4+flmlc$nlc*flmlc$flc.freq4)/(r*flmlc$nbar)
flmlc$ssq1<-(flmlc$nlm*(flmlc$flm.freq1-flmlc$pbar1)^2+flmlc$nlc*(flmlc$flc.freq1-flmlc$pbar1)^2)/((r-1)*flmlc$nbar)
flmlc$ssq2<-(flmlc$nlm*(flmlc$flm.freq2-flmlc$pbar2)^2+flmlc$nlc*(flmlc$flc.freq2-flmlc$pbar2)^2)/((r-1)*flmlc$nbar)
flmlc$ssq3<-(flmlc$nlm*(flmlc$flm.freq3-flmlc$pbar3)^2+flmlc$nlc*(flmlc$flc.freq3-flmlc$pbar3)^2)/((r-1)*flmlc$nbar)
flmlc$ssq4<-(flmlc$nlm*(flmlc$flm.freq4-flmlc$pbar4)^2+flmlc$nlc*(flmlc$flc.freq4-flmlc$pbar4)^2)/((r-1)*flmlc$nbar)
flmlc$flm.h1<-(flmlc$flm.G21+flmlc$flm.G31+flmlc$flm.G41)/(flmlc$flm.G11+flmlc$flm.G21+flmlc$flm.G22+flmlc$flm.G31+flmlc$flm.G32+flmlc$flm.G33+flmlc$flm.G41+flmlc$flm.G42+flmlc$flm.G43+flmlc$flm.G44)
flmlc$flc.h1<-(flmlc$flc.G21+flmlc$flc.G31+flmlc$flc.G41)/(flmlc$flc.G11+flmlc$flc.G21+flmlc$flc.G22+flmlc$flc.G31+flmlc$flc.G32+flmlc$flc.G33+flmlc$flc.G41+flmlc$flc.G42+flmlc$flc.G43+flmlc$flc.G44)
flmlc$flm.h2<-(flmlc$flm.G21+flmlc$flm.G32+flmlc$flm.G42)/(flmlc$flm.G11+flmlc$flm.G21+flmlc$flm.G22+flmlc$flm.G31+flmlc$flm.G32+flmlc$flm.G33+flmlc$flm.G41+flmlc$flm.G42+flmlc$flm.G43+flmlc$flm.G44)
flmlc$flc.h2<-(flmlc$flc.G21+flmlc$flc.G32+flmlc$flc.G42)/(flmlc$flc.G11+flmlc$flc.G21+flmlc$flc.G22+flmlc$flc.G31+flmlc$flc.G32+flmlc$flc.G33+flmlc$flc.G41+flmlc$flc.G42+flmlc$flc.G43+flmlc$flc.G44)
flmlc$flm.h3<-(flmlc$flm.G31+flmlc$flm.G32+flmlc$flm.G43)/(flmlc$flm.G11+flmlc$flm.G21+flmlc$flm.G22+flmlc$flm.G31+flmlc$flm.G32+flmlc$flm.G33+flmlc$flm.G41+flmlc$flm.G42+flmlc$flm.G43+flmlc$flm.G44)
flmlc$flc.h3<-(flmlc$flc.G31+flmlc$flc.G32+flmlc$flc.G43)/(flmlc$flc.G11+flmlc$flc.G21+flmlc$flc.G22+flmlc$flc.G31+flmlc$flc.G32+flmlc$flc.G33+flmlc$flc.G41+flmlc$flc.G42+flmlc$flc.G43+flmlc$flc.G44)
flmlc$flm.h4<-(flmlc$flm.G41+flmlc$flm.G42+flmlc$flm.G43)/(flmlc$flm.G11+flmlc$flm.G21+flmlc$flm.G22+flmlc$flm.G31+flmlc$flm.G32+flmlc$flm.G33+flmlc$flm.G41+flmlc$flm.G42+flmlc$flm.G43+flmlc$flm.G44)
flmlc$flc.h4<-(flmlc$flc.G41+flmlc$flc.G42+flmlc$flc.G43)/(flmlc$flc.G11+flmlc$flc.G21+flmlc$flc.G22+flmlc$flc.G31+flmlc$flc.G32+flmlc$flc.G33+flmlc$flc.G41+flmlc$flc.G42+flmlc$flc.G43+flmlc$flc.G44)
flmlc$hbar1<-(flmlc$nlm*flmlc$flm.h1+flmlc$nlc*flmlc$flc.h1)/(r*flmlc$nbar)
flmlc$hbar2<-(flmlc$nlm*flmlc$flm.h2+flmlc$nlc*flmlc$flc.h2)/(r*flmlc$nbar)
flmlc$hbar3<-(flmlc$nlm*flmlc$flm.h3+flmlc$nlc*flmlc$flc.h3)/(r*flmlc$nbar)
flmlc$hbar4<-(flmlc$nlm*flmlc$flm.h4+flmlc$nlc*flmlc$flc.h4)/(r*flmlc$nbar)
flmlc$a1<-(flmlc$nbar/flmlc$nc)*(flmlc$ssq1-(1/(flmlc$nbar-1))*(flmlc$pbar1*(1-flmlc$pbar1)-(r-1)*flmlc$ssq1/r-1/4*flmlc$hbar1))
flmlc$a2<-(flmlc$nbar/flmlc$nc)*(flmlc$ssq2-(1/(flmlc$nbar-1))*(flmlc$pbar2*(1-flmlc$pbar2)-(r-1)*flmlc$ssq2/r-1/4*flmlc$hbar2))
flmlc$a3<-(flmlc$nbar/flmlc$nc)*(flmlc$ssq3-(1/(flmlc$nbar-1))*(flmlc$pbar3*(1-flmlc$pbar3)-(r-1)*flmlc$ssq3/r-1/4*flmlc$hbar3))
flmlc$a4<-(flmlc$nbar/flmlc$nc)*(flmlc$ssq4-(1/(flmlc$nbar-1))*(flmlc$pbar4*(1-flmlc$pbar4)-(r-1)*flmlc$ssq4/r-1/4*flmlc$hbar4))
flmlc$b1<-(flmlc$nbar/(flmlc$nbar-1))*(flmlc$pbar1*(1-flmlc$pbar1)-(r-1)*flmlc$ssq1/r-(2*flmlc$nbar-1)*flmlc$hbar1/(4*flmlc$nbar))
flmlc$b2<-(flmlc$nbar/(flmlc$nbar-1))*(flmlc$pbar2*(1-flmlc$pbar2)-(r-1)*flmlc$ssq2/r-(2*flmlc$nbar-1)*flmlc$hbar2/(4*flmlc$nbar))
flmlc$b3<-(flmlc$nbar/(flmlc$nbar-1))*(flmlc$pbar3*(1-flmlc$pbar3)-(r-1)*flmlc$ssq3/r-(2*flmlc$nbar-1)*flmlc$hbar3/(4*flmlc$nbar))
flmlc$b4<-(flmlc$nbar/(flmlc$nbar-1))*(flmlc$pbar4*(1-flmlc$pbar4)-(r-1)*flmlc$ssq4/r-(2*flmlc$nbar-1)*flmlc$hbar4/(4*flmlc$nbar))
flmlc$c1<-1/2*flmlc$hbar1
flmlc$c2<-1/2*flmlc$hbar2
flmlc$c3<-1/2*flmlc$hbar3
flmlc$c4<-1/2*flmlc$hbar4
flmlc$FST<-((flmlc$a1+flmlc$a2+flmlc$a3+flmlc$a4)/((flmlc$a1+flmlc$b1+flmlc$c1)+(flmlc$a2+flmlc$b2+flmlc$c2)+(flmlc$a3+flmlc$b3+flmlc$c3)+(flmlc$a4+flmlc$b4+flmlc$c4)))
flmlc$FIT<-(((flmlc$a1+flmlc$b1)+(flmlc$a2+flmlc$b2)+(flmlc$a3+flmlc$b3)+(flmlc$a4+flmlc$b4))/((flmlc$a1+flmlc$b1+flmlc$c1)+(flmlc$a2+flmlc$b2+flmlc$c2)+(flmlc$a3+flmlc$b3+flmlc$c3)+(flmlc$a4+flmlc$b4+flmlc$c4)))
flmlc$FIS<-((flmlc$b1+flmlc$b2+flmlc$b3+flmlc$b4)/((flmlc$b1+flmlc$c1)+(flmlc$b2+flmlc$c2)+(flmlc$b3+flmlc$c3)+(flmlc$b4+flmlc$c4)))
write.csv(flmlc,file="Z:/gatksep/rnavdna/pmarinus1/dp5af0025VF/popmuscle-wo408/flmlc.csv")
fst_lmlc_vcf<-read.table(file="Z:/gatksep/rnavdna/pmarinus1/dp5af0025VF/popmuscle-wo408/fst_lmlc.weir.fst",head=TRUE)
fst_lmlc_vcf$locus<-paste(fst_lmlc_vcf$CHROM,"-",fst_lmlc_vcf$POS)
fst_lmlc_vs<-merge(flmlc,fst_lmlc_vcf,by.x="flm.locus",by.y="locus")
pdf("Z:/gatksep/rnavdna/pmarinus1/dp5af0025VF/popmuscle-wo408/fst_lmlc_vs.pdf")
plot(fst_lmlc_vs$FST,fst_lmlc_vs$WEIR_AND_COCKERHAM_FST,xlab="fst_lmlc_calc",ylab="fst_lmlc_vcf")
dev.off()
#####################################################################################
data4_lm_hwe<-read.csv(file="Z:/gatksep/rnavdna/pmarinus1/dp5af0025VF/popmuscle-wo408/data4-lm-hwe.csv")
data4_lc_hwe<-read.csv(file="Z:/gatksep/rnavdna/pmarinus1/dp5af0025VF/popmuscle-wo408/data4-lc-hwe.csv")
hwe_lmlc<-merge(data4_lm_hwe,data4_lc_hwe,by.x="locus",by.y="locus")
flmlc_hwe<-merge(flmlc,hwe_lmlc,by.x="flm.locus",by.y="locus")
fst_lmlc_hwe<-data.frame(flmlc_hwe$flm.locus,flmlc_hwe$FST)
colnames(fst_lmlc_hwe)<-c("locus","FST")
#################################################################################################
# for (i in 1:dim(fst_lmlc_hwe)[1]){
# fst_lmlc_hwe$CHROM[i]<-strsplit(as.character(fst_lmlc_hwe$locus[i]),"-",fixed=TRUE)[[1]][1]
# fst_lmlc_hwe$SCAF[i]<-strsplit(as.character(fst_lmlc_hwe$CHROM[i]),"_",fixed=TRUE)[[1]][2]
# fst_lmlc_hwe$POS[i]<-strsplit(as.character(fst_lmlc_hwe$locus[i]),"-",fixed=TRUE)[[1]][2]
# }
#################################################################################################
fst_lmlc_hwe_1<-cSplit(fst_lmlc_hwe,"locus",sep = "-")
fst_lmlc_hwe_2<-cSplit(fst_lmlc_hwe_1,"locus_1",sep = "_")
fst_lmlc_hwe_2$CHROM<-sprintf("scaf_%05d",fst_lmlc_hwe_2$locus_1_2)
fst_lmlc_hwe_2$locus<-paste(fst_lmlc_hwe_2$CHROM,"-",fst_lmlc_hwe_2$locus_2)
fst_lmlc_hwe_2$SCAF<-sprintf("%05d",fst_lmlc_hwe_2$locus_1_2)
fst_lmlc_hwe_3<-data.frame(fst_lmlc_hwe_2$locus,fst_lmlc_hwe_2$FST,fst_lmlc_hwe_2$CHROM,fst_lmlc_hwe_2$SCAF,fst_lmlc_hwe_2$locus_2)
colnames(fst_lmlc_hwe_3)<-c("locus","FST","CHROM","SCAF","POS")
fstlmlchwe<-fst_lmlc_hwe_3[order(fst_lmlc_hwe_3$SCAF,fst_lmlc_hwe_3$POS),]
fstlmlchwe<-fstlmlchwe[fstlmlchwe$FST!="NaN",]
write.csv(fstlmlchwe,file="Z:/gatksep/rnavdna/pmarinus1/dp5af0025VF/popmuscle-wo408/fstlmlchwe.csv")
#######################
## plot Fst by scaffold
#######################
## ZFST
fstlmlchwe<-read.csv(file="Z:/gatksep/rnavdna/pmarinus1/dp5af0025VF/popmuscle-wo408/fstlmlchwe.csv")
avgFst_lmlc_hwe<-colMeans(as.matrix(fstlmlchwe$FST))
sdFst_lmlc_hwe<-sd(fstlmlchwe$FST)
fstlmlchwe$ZFST<-(fstlmlchwe$FST-avgFst_lmlc_hwe)/sdFst_lmlc_hwe
fstlmlchwe90<-fstlmlchwe[fstlmlchwe$SCAF<91,]
max.pos<-sapply(1:89,function(i){(max(fstlmlchwe90$POS[fstlmlchwe90$SCAF==i]))})
add1<-c(0,cumsum(max.pos))
add2<-c(1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90)
added.length<-(add1+add2)
fstlmlchwe90$gp<-fstlmlchwe90$POS+added.length[fstlmlchwe90$SCAF]
#########################################
# for (i in 1:dim(fstlmlchwe90)[1]){
# if((fstlmlchwe90$SCAF[i] %% 2) == 0) {
# fstlmlchwe90$color[i]<-"darkorchid3"
# } else {
# fstlmlchwe90$color[i]<-"black"
# }}
#########################################
fstlmlchwe90$color<-((fstlmlchwe90$SCAF%%2)==0)
fstlmlchwe90$color[fstlmlchwe90$color==TRUE]<-"darkorchid3"
fstlmlchwe90$color[fstlmlchwe90$color==FALSE]<-"black"
## plot(fstlmlchwe90$gp,fstlmlchwe90$ZFST,xlab="Genomic Position",ylab="Z(FST)",xlim=c(0,900000000),ylim=c(-2,10),col=fstlmlchwe90$color,pch=20)
## abline(h=5,col="red",lty=2)
write.csv(fstlmlchwe90,file="Z:/gatksep/rnavdna/pmarinus1/dp5af0025VF/popmuscle-wo408/fstlmlchwe90.csv")
write.csv(fstlmlchwe,file="Z:/gatksep/rnavdna/pmarinus1/dp5af0025VF/popmuscle-wo408/fstlmlchwe_zfst.csv")
##############################
## plot ZFST and ZFST-window
##############################
fstlmlchwe90<-read.csv(file="Z:/gatksep/rnavdna/pmarinus1/dp5af0025VF/popmuscle-wo408/fstlmlchwe90.csv")
pdf("Z:/gatksep/rnavdna/pmarinus1/dp5af0025VF/popmuscle-wo408/fstlmlchwe90.pdf")
plot(fstlmlchwe90$gp,fstlmlchwe90$ZFST,xlab="Genomic Position",ylab="Z(FST)",xlim=c(0,900000000),ylim=c(-2,10),col=fstlmlchwe90$color,pch=20)
abline(h=5,col="red",lty=2)
dev.off()
|
c2ccc4b93846071306bb4b3eeaffc12f99bb4093
|
27c6fd520f63e430400077f3cf5ccd355e3901a9
|
/man/comethyl.Rd
|
80cd977baab094acb6fc0dc02eccfdb9d11236a4
|
[
"MIT"
] |
permissive
|
cemordaunt/comethyl
|
cc6e1eb3328711a688553298fcf3e40b6d64b8d8
|
9e6616732093743a64e508e1bd2cf753472bc4d5
|
refs/heads/master
| 2023-05-01T19:21:16.619414
| 2023-04-16T22:55:52
| 2023-04-16T22:55:52
| 232,185,438
| 5
| 2
| null | null | null | null |
UTF-8
|
R
| false
| true
| 933
|
rd
|
comethyl.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/comethyl.R
\docType{package}
\name{comethyl}
\alias{comethyl}
\title{comethyl: An R package for weighted region comethylation network analysis}
\description{
Comethyl builds upon the \pkg{WGCNA} package to identify and interpret modules
of comethylated regions from whole-genome bisulfite sequencing data. Regions
are defined from clusters of CpG sites or from genomic annotations, and then
percent methylation values are used to identify comethylation modules.
Interesting modules are identified and explored by comparing with sample
traits and examining functional enrichments. Results are then visualized with
high-quality, editable plots from \pkg{ggplot2}.
}
\seealso{
\itemize{
\item GitHub: \url{https://github.com/cemordaunt/comethyl}
\item Documentation and vignettes: \url{https://cemordaunt.github.io/comethyl/}
}
}
\author{
Charles Mordaunt
}
|
e112667b341c51ecc0f50736911376aae2e995cf
|
263d98677c166025ad5b76ad1adc00f45e2c0a74
|
/matrix-visualize-afterCOVID19.R
|
93cfdc3346b5ba7d93d3d20c0ac699fa3c22bb6e
|
[] |
no_license
|
htb2m/Mid-TN-STEM-Hub-survey
|
9bc9c9a04d41b74105fd7c746e2cc0f3d5db3706
|
e1289a223cd2ea8fdbc1975fa095c8f104635c48
|
refs/heads/master
| 2022-11-20T11:03:17.892216
| 2020-07-08T21:07:15
| 2020-07-08T21:07:15
| 275,248,621
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,816
|
r
|
matrix-visualize-afterCOVID19.R
|
library(tidyverse)
library(likert)
library(xtable)
library(sjPlot)
library(HH)
library(data.table)
library(wesanderson)
library(here)
library(knitr)
### Loading data
raw <- read.csv(here::here("data", "Mid-TN-STEM-Hub-afterCOVID19.csv")) %>%
filter(Finished == TRUE)
### Gathering columns
data <- raw %>%
dplyr::select(AGREE_1:AGREE_6) %>%
gather(measure, response)
### Creating the contingency table
contingencytable <- table(data$measure, data$response) %>% as.data.frame.matrix()
# Reorder columns
contingencytable <- contingencytable %>%
data.table::setcolorder(c("Strongly disagree",
"Disagree",
"Neither agree nor disagree",
"Agree",
"Strongly agree"))
# Changing row name
rownames(contingencytable) <- c("The workshop was an \neffective use of my time",
"The workshop was understand-\nable, clear, and professional" ,
"The workshop leaders \nwere knowledgeable",
"The workshop was relevant \nto my professional context",
"The workshop addressed \na need I have as an educator",
"The workshop will help me to \nimprove my professional practice")
# adding rownames to columns
contingencytable <- tibble::rownames_to_column(contingencytable, var = "Measure")
### Visualization
likert(Measure ~ ., data=contingencytable, ylab=NULL,
ReferenceZero=3, as.percent=TRUE,
positive.order=FALSE,
main = list("Returning to School After COVID-19 Workshop Evaluation",x=unit(.5, "npc")),
sub= list("Agreement level",x=unit(.5, "npc")),
xlim=c(-10,0,20,40,60,80,100))
|
3b73b9f5f325aa00c84ced4b562542b1e3bbfa14
|
6da149100fc6ad2c8cfd6cd11369f72da7390ecf
|
/Plot2.R
|
e079526781ca5b6cfa950387da42a80ffc9f3ac1
|
[] |
no_license
|
corinnehelman/Datasciencecoursera
|
9dfa3f868f4718d858a10d3c469fedee490268c0
|
8e8913dce804a26e5f6775e8f059b844fcfdad51
|
refs/heads/master
| 2021-01-11T09:30:10.408331
| 2017-04-27T23:14:42
| 2017-04-27T23:14:42
| 81,219,379
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,035
|
r
|
Plot2.R
|
setwd("C:/Users/helmac1/Documents/Personal/Coursera/Exploratory Data Program 1")
# read the file given in the assignment
data=read.csv('household_power_consumption.txt',header=T, sep=';')
#merge column 1 and column 2 to create datatime variable
data$Datetime = paste(as.character(data[,1]) , data[,2])
# reformat the first colum as a data
data[,1]=as.Date(data$Date,'%d/%m/%Y')
# Only use the data collected between 1-2-2007 and 2-2-2007
data <- subset(data, Date == "2007-02-01" | Date == "2007-02-02")
# make sure that the data is numeric and can be plotted
data[,3] <- as.numeric(as.character(data[,3]))
#create a datetime object so we use days()
datetime <- strptime(data$Datetime, "%d/%m/%Y %H:%M:%S")
# create a png plot with required dimensions
dev.copy(png, file="plot2.png", width=480, height=480)
# plots a line graph with days on x axis and Global Active Power on y axis
plot(datetime, data$Global_active_power, ylab="Global Active Power (kilowatts)", xlab="", type="line")
dev.off()
|
86ee3134ed606b7a69bd51b05716b4da0806348b
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/flatxml/examples/fxml_importXMLFlat.Rd.R
|
11ee03398d0c80d998963ccfa4a52659746cf209
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 361
|
r
|
fxml_importXMLFlat.Rd.R
|
library(flatxml)
### Name: fxml_importXMLFlat
### Title: Handling flat XML files
### Aliases: fxml_importXMLFlat
### ** Examples
# Load example file with population data from United Nations Statistics Division
example <- system.file("worldpopulation.xml", package="flatxml")
# Create flat dataframe from XML
xml.dataframe <- fxml_importXMLFlat(example)
|
e6cbfdf3762e1d16033ac472fc798840b6c232ea
|
3997fbf41072fac9f272cf8b235fb48f992a7b68
|
/GAP/Phase3/v2/Clustering_v2_Cor.R
|
71d29f59ad7de2eb8c30b0ca9c1b9d98bd8779b8
|
[] |
no_license
|
haoybl/Data-Analysis
|
873ebc76caed64d40dffeae87b119e6bd6fe2f57
|
a51117cf8e40b303a87972b1ef1cd58bd1d38159
|
refs/heads/master
| 2021-06-13T11:35:50.770320
| 2017-02-08T12:09:24
| 2017-02-08T12:09:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,088
|
r
|
Clustering_v2_Cor.R
|
# --------------------------------------------------------------------------------------------------------------
# --------------------------------------------------------------------------------------------------------------
# --------------------------------------------------------------------------------------------------------------
# || Bounce Rate ||
# --------------------------------------------------------------------------------------------------------------
# --------------------------------------------------------------------------------------------------------------
Bounce_Rate_Raw <- read.table("C:/Yashwanth/Clustering/Bounce_rate.csv",
header = TRUE, sep = ",", quote = "\"", dec = "." ,fill=TRUE, comment.char="", as.is=TRUE)
colnames(Bounce_Rate_Raw)[1] <- "Pages"
Bounce_Rate_Raw_T <-read.table("C:/Yashwanth/Clustering/Bounce_Trans.csv",
header = TRUE, sep = ",", quote = "\"", dec = "." ,fill=TRUE, comment.char="", as.is=TRUE)
# --------------------------------------------------------------------------------------------------------------
# --------------------------------------------------------------------------------------------------------------
# || Transpose data ||
# --------------------------------------------------------------------------------------------------------------
# --------------------------------------------------------------------------------------------------------------
Bounce_Rate <- data.frame(t(Bounce_Rate_Raw))
rownames(Bounce_Rate)[1] <- "Weeks"
Demo <- Bounce_Rate[1:10,1:5]
Demo <- cbind(Weeks=rownames(Demo),Demo)
rownames(Demo) <- NULL
#rm(list=ls())
# --------------------------------------------------------------------------------------------------------------
# --------------------------------------------------------------------------------------------------------------
# || Correlation ||
# --------------------------------------------------------------------------------------------------------------
# --------------------------------------------------------------------------------------------------------------
Bounce_Rate_Cor <- cor(Bounce_Rate_Raw_T[,-1])
write.csv(Bounce_Rate_Cor,"C:/Yashwanth/Clustering/Bounce_Rate_Cor.csv")
# --------------------------------------------------------------------------------------------------------------
# --------------------------------------------------------------------------------------------------------------
# || Average Time on Page ||
# --------------------------------------------------------------------------------------------------------------
# --------------------------------------------------------------------------------------------------------------
Avg_Time_On_Page <- read.table("C:/Yashwanth/Clustering/AvgTimeOnPage.csv",
header = TRUE, sep = ",", quote = "\"", dec = "." ,fill=TRUE, comment.char="", as.is=TRUE)
Avg_Time_On_Page_T <- data.frame(t(Avg_Time_On_Page))
Avg_Time_On_Page_T <- read.table("C:/Yashwanth/Clustering/Avg_Time_On_Page_T.csv",
header = TRUE, sep = ",", quote = "\"", dec = "." ,fill=TRUE, comment.char="", as.is=TRUE)
# --------------------------------------------------------------------------------------------------------------
# --------------------------------------------------------------------------------------------------------------
# || Correlation ||
# --------------------------------------------------------------------------------------------------------------
# --------------------------------------------------------------------------------------------------------------
Avg_Time_On_Page_Cor <- cor(Avg_Time_On_Page_T[,-1])
write.csv(Avg_Time_On_Page_Cor,"C:/Yashwanth/Clustering/Avg_Time_On_Page_Cor.csv")
# --------------------------------------------------------------------------------------------------------------
# --------------------------------------------------------------------------------------------------------------
# || Visits ||
# --------------------------------------------------------------------------------------------------------------
# --------------------------------------------------------------------------------------------------------------
Visits <- read.table("C:/Yashwanth/Clustering/Visits.csv",
header = TRUE, sep = ",", quote = "\"", dec = "." ,fill=TRUE, comment.char="", as.is=TRUE)
Visits_T <- data.frame(t(Visits))
Visits_T <- read.table("C:/Yashwanth/Clustering/Visits_T.csv",
header = TRUE, sep = ",", quote = "\"", dec = "." ,fill=TRUE, comment.char="", as.is=TRUE)
# --------------------------------------------------------------------------------------------------------------
# --------------------------------------------------------------------------------------------------------------
# || Correlation ||
# --------------------------------------------------------------------------------------------------------------
# --------------------------------------------------------------------------------------------------------------
Visits_Cor <- cor(Visits_T[,-1])
write.csv(Visits_Cor,"C:/Yashwanth/Clustering/Visits_Cor.csv")
# --------------------------------------------------------------------------------------------------------------
# --------------------------------------------------------------------------------------------------------------
# || Product Page Views ||
# --------------------------------------------------------------------------------------------------------------
# --------------------------------------------------------------------------------------------------------------
Prod_Page_Views <- read.table("C:/Yashwanth/Clustering/ProductPageViews.csv",
header = TRUE, sep = ",", quote = "\"", dec = "." ,fill=TRUE, comment.char="", as.is=TRUE)
Prod_Page_Views_T <- data.frame(t(Prod_Page_Views))
Prod_Page_Views_T <- read.table("C:/Yashwanth/Clustering/Prod_Page_Views_T.csv",
header = TRUE, sep = ",", quote = "\"", dec = "." ,fill=TRUE, comment.char="", as.is=TRUE)
# --------------------------------------------------------------------------------------------------------------
# --------------------------------------------------------------------------------------------------------------
# || Correlation ||
# --------------------------------------------------------------------------------------------------------------
# --------------------------------------------------------------------------------------------------------------
Prod_Page_Views_Cor <- cor(Prod_Page_Views_T[,-1])
write.csv(Prod_Page_Views_Cor,"C:/Yashwanth/Clustering/Prod_Page_Views_Cor.csv")
# --------------------------------------------------------------------------------------------------------------
# --------------------------------------------------------------------------------------------------------------
# --------------------------------------------------------------------------------------------------------------
# --------------------------------------------------------------------------------------------------------------
# --------------------------------------------------------------------------------------------------------------
# --------------------------------------------------------------------------------------------------------------
# --------------------------------------------------------------------------------------------------------------
# --------------------------------------------------------------------------------------------------------------
|
e0a175b2247706ccb8118e12f9c3c5e41763b51e
|
9f014a3161bcf8a4dc5971e67c536c82210571a2
|
/script_logs/plotting_pca.R
|
e1babaccbe7315b2fb88ea56ae8067b04e5065a1
|
[] |
no_license
|
mas-agis/ch_at_selsig
|
c296681b9facd10492d219b3bffbeb5722be2f0a
|
3b1c7c2a22ff3e3511a676d75dfdf812562a1394
|
refs/heads/main
| 2023-08-27T16:44:39.507590
| 2021-11-11T08:06:17
| 2021-11-11T08:06:17
| 361,705,878
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,894
|
r
|
plotting_pca.R
|
##plotting pca for Chinese, Austrian, and other breeds
setwd("D:/maulana/third_project/pca/")
library(ggplot2)
library(dplyr)
#load dataset
data = read.table("filtered_multi_merged_pca.eigenvec",header=T, sep="\t")
data$Breed = data$FID
#adding group_id
data$group_id = data %>% group_indices(Breed)
data$group_id = as.factor(data$group_id)
#remove several breeds due to max. 30 combintaion of shape n color
data = subset(data, Breed!="Hereford")
data = subset(data, Breed!="Bohai")
data = subset(data, Breed!="Boran")
data = subset(data, Breed!="Shorthorn")
#reset rownames
rownames(data) = 1:nrow(data)
#remove brahman outlier
data = data[-c(108),] #remove BRahman outlier
data = subset(data, Breed!="Ogaden")
unique(data$Breed)
##Plot PC1 and PC2 of all breeds
gp <- ggplot(data,aes(x=data$PC1, y=data$PC2, group=Breed, color=Breed, shape=Breed)) +
scale_shape_manual(values=1:nlevels(data$Breed)) +
geom_hline(yintercept = 0, linetype="dotted") +
geom_vline(xintercept = 0, linetype="dotted") +
labs(x="Component 1", y=" Component 2", title = "pca of chinese and austrian")+
geom_point(size=3)
gp
#plot color based on taurine/indicine/admixed data
data1 = read.table("breeds_group", col.names = c("Breed", "Subspecies"), sep = "\t")
#data1$Breed = as.factor(data1$Breed)
test = left_join(data, data1, by="Breed")
test = subset(test, Breed!="Dabieshan")
test$subspecies = as.factor(as.character(test$subspecies))
##Plot PC1 and PC2 of all breeds
gp <- ggplot(test,aes(x=test$PC1, y=test$PC2, group=Subspecies, color=Subspecies, shape=Breed)) +
scale_shape_manual(values=1:nlevels(data$Breed)) +
geom_hline(yintercept = 0, linetype="dotted") +
geom_vline(xintercept = 0, linetype="dotted") +
labs(x="Component 1", y=" Component 2") + #, title = "pca of chinese and austrian")+
geom_point(size=3)
gp + scale_color_manual(values=c("firebrick3", "yellow3", "green3"))
|
fb39a010bb86e22ab41012a75b15e52503b90052
|
40c486fff25ed3fcfe28bca1f7b3b85c768ff017
|
/misc/create_hex.R
|
ed4578e863cbc57b15a4922767d884e17c1f1af1
|
[
"MIT"
] |
permissive
|
baifengbai/carbonate
|
18264f1b6a71ed79315fbcca851abe3d97e4afb4
|
4d6007da6ef090255cbae92e788b3f211519fb67
|
refs/heads/master
| 2020-03-25T03:57:02.149021
| 2018-08-03T02:50:03
| 2018-08-03T02:50:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 914
|
r
|
create_hex.R
|
imgurl <- 'https://www.mouthhealthy.org/~/media/MouthHealthy/Images/Articles/article_sparkling_water.jpg?h=307&la=en&w=460&hash=C5B22EB96AB5D4C5BFBDF360A745D108E2AF053F'
hexSticker::sticker(imgurl, package="carbonate", p_size=8, s_x=1, s_y=.75, s_width=.6,
p_color = "grey",
h_color = "grey",
h_fill = '#FFFFFF',filename="misc/hex.png")
library(magick)
magick::image_read(imgurl)%>%image_negate()%>%image_oilpaint()%>%
magick::image_write('misc/carbonate_black.png')
hexSticker::sticker('misc/carbonate_black.png', package="carbonate", p_size=8, s_x=1, s_y=.75, s_width=.6,
p_color = "white",
h_color = "white",
h_fill = 'black',filename="misc/hex_black.png")
magick::image_read('misc/hex_black.png')%>%magick::image_scale('150')%>%magick::image_write('inst/figures/hex_black_small.png')
|
3779e8c53ad52a0fbfda11b6dff6ee8c81bab18f
|
ebbd0565053ea3fb0518f2a720bc20d5c76b0f1b
|
/R/utils-select.R
|
3cfbf02fecb470a4efa2560aa321301729c91522
|
[
"Apache-2.0"
] |
permissive
|
steffilazerte/bcdata
|
1b7ace9585ac77905510cc725dd85607879758c1
|
4be2a3005126b542349db8d8963f4dbc76d9b79d
|
refs/heads/master
| 2020-08-15T16:37:03.895918
| 2019-10-11T21:23:24
| 2019-10-11T21:23:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 171
|
r
|
utils-select.R
|
#' select
#'
#' See \code{dplyr::\link[dplyr]{select}} for details.
#'
#' @name select
#' @rdname select
#' @keywords internal
#' @export
#' @importFrom dplyr select
NULL
|
6d96d522d22c60a19cad2f006e7e7d248378d193
|
077ea46024a43957a2ea613b84317ff21cb7e21a
|
/R/idlist.r
|
7b4906ecea81ee207b33e9b3660dcf56d288c2c0
|
[
"MIT"
] |
permissive
|
gschofl/rentrez
|
789832d8654d75871b3fbd17dc217b4c41da933d
|
e604fa13f6eb2d138ba78156d0aaa45383ed4cb8
|
refs/heads/master
| 2020-06-04T08:29:22.741764
| 2013-11-29T15:02:21
| 2013-11-29T15:02:21
| 3,693,664
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,375
|
r
|
idlist.r
|
#' @include utils.r
#' @include eutil.r
NULL
# idList ------------------------------------------------------------
#' A list of Entrez UIDs that match a text query
#'
#' @slot database Database from which the UIDs were retrieved.
#' @slot retstart The index of the first UID that is returned.
#' @slot retmax The number of UIDs out of the total number of
#' records that is returned.
#' @slot count The total number of records matching a query.
#' @slot queryTranslation The search term as translated by the
#' Entrez search system.
#' @slot queryKey
#' @slot webEnv
#' @slot idList A list of primary UIDs.
#'
#' @classHierarchy
#' @classMethods
setClass("idList",
representation(database = "character",
retmax = "numeric",
retstart = "numeric",
count = "numeric",
queryTranslation = "character",
queryKey = "numeric",
webEnv = "character",
idList = "character"),
prototype(database = NA_character_,
retmax = NA_integer_,
retstart = NA_integer_,
count = NA_integer_,
queryTranslation = NA_character_,
queryKey = NA_integer_,
webEnv = NA_character_,
idList = NA_character_))
# accessor methods -------------------------------------------------------
setMethod("database", "idList", function(x) x@database)
setMethod("retmax", "idList", function(x) x@retmax)
setMethod("retstart", "idList", function(x) x@retstart)
setMethod("count", "idList", function(x) x@count)
setMethod("queryTranslation", "idList", function(x) x@queryTranslation)
setMethod("queryKey", "idList", function(x) x@queryKey)
setMethod("webEnv", "idList", function(x) x@webEnv)
setMethod("idList", "idList", function(x, db = TRUE) {
if (db) {
structure(x@idList, database = database(x))
} else {
x@idList
}
})
# length method ----------------------------------------------------------
setMethod("length", "idList", function (x) {
if (has_webenv(x)) {
retmax(x)
} else {
count(x)
}
})
# subsetting method ------------------------------------------------------
#' @autoImports
setMethod("[", c("idList", "numeric"),
function (x, i, j, ..., drop = TRUE) {
ids <- x@idList[i]
initialize(.Object=x, retmax = length(compactNA(ids)),
retstart = retstart(x) + i[1L] - 1L,
idList = ids)
})
# show methods ------------------------------------------------------------
setMethod("show", "idList",
function (object) {
if (has_webenv(object)) {
cat(sprintf("Web Environment for the %s database.\n",
sQuote(database(object))))
cat(sprintf("Number of UIDs stored on the History server: %s\n",
count(object)))
cat(sprintf("Query Key: %s\nWebEnv: %s\n",
queryKey(object), webEnv(object)))
} else {
cat(sprintf("List of UIDs from the %s database.\n",
sQuote(database(object))))
print(idList(object))
}
invisible(NULL)
})
|
e218efac8d2c1c81cf72771ddd328205a68df99d
|
7ac133f9871f201f7a956f3b239d8f0030907c06
|
/man/bounds_to_sf.Rd
|
25fbbc83f73511efa07934cf9852489bba10d3ce
|
[
"MIT"
] |
permissive
|
gopalpenny/anem
|
1029318ca01a6172b365ddb7d2181135d909d92c
|
f2ba63622e0e67a08423b20c5f09a34b6433f6d0
|
refs/heads/master
| 2021-07-09T21:43:07.502292
| 2020-12-16T15:25:36
| 2020-12-16T15:25:36
| 219,404,991
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 406
|
rd
|
bounds_to_sf.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/anem_geoprocessing.R
\name{bounds_to_sf}
\alias{bounds_to_sf}
\title{Get bounds as sf object}
\usage{
bounds_to_sf(bounds, crs)
}
\description{
Convert bounds object with bID, x1, y1, x2, y2 to sf object
}
\examples{
\dontrun{
bounds <- data.frame(bID=1,x1=1,y1=1,x2=3,y2=3)
bounds_to_sf(bounds,crs=4326)
}
}
\keyword{internal}
|
90067368a477f0b8339e86b201e91ded8287b21c
|
2e153ae8555be87036cab0d3b51c1beb60f531d7
|
/Scripts/05-purrr.R
|
4f35aa1dc035d6fba99af8751fe77ab529694c9b
|
[] |
no_license
|
brunolucian/CursoIntermediarioR
|
c721d817fe8b296645a8d54dfee69a816c564d42
|
f2c2ae0cd75a3e24f98eb89ade31996c95ae8b85
|
refs/heads/main
| 2023-06-21T12:44:17.238391
| 2021-07-29T19:49:28
| 2021-07-29T19:49:28
| 380,760,876
| 0
| 0
| null | 2021-07-29T19:49:29
| 2021-06-27T14:29:41
|
R
|
UTF-8
|
R
| false
| false
| 2,005
|
r
|
05-purrr.R
|
## Instalando pacotes ---------------------------------
install.packages(c("Rtools", "devtools", "purrr"))
devtools::install_github("jennybc/repurrrsive")
library(repurrrsive)
library(tidyverse) # Ou
library(purrr)
## 01 - map -------------------------------------------
length(sw_people)
sw_people[[1]]
sw_people[1]
map(sw_people, ~length(.x$starships))
## ---------------------------------------------------
## 02 - map_*() --------------------------------------
map_int(sw_people, ~ length(.x[["starships"]]))
map_chr(sw_people, ~ .x[["hair_color"]])
map_lgl(sw_people, ~ .x[["gender"]] == "male")
map_dbl(sw_people, ~ .x[["mass"]])
map(sw_people, ~ .x[["mass"]])
map_dbl(sw_people, ~ as.numeric(.x[["mass"]]))
map_chr(sw_people, ~ .x[["mass"]]) %>%
readr::parse_number(na = "unknown")
## 03 - challenges --------------------------------------
map(sw_films, "characters") %>%
map_int(length) %>%
set_names(map_chr(sw_films, "title")) %>%
sort()
sw_species[[1]]$eye_colors
map_chr(sw_species, "eye_colors") %>%
strsplit(", ") %>%
map_int(length) %>%
set_names(map_chr(sw_species, "name"))
map_lgl(sw_planets[[61]], ~ "unknown" %in% .x) %>%
sum()
map_int(sw_planets,
~ map_lgl(.x, ~ "unknown" %in% .x) %>% sum()) %>%
set_names(map_chr(sw_planets, "name")) %>%
sort(decreasing = TRUE)
## 04 - map2() ---------------------------------------
x <- list(1, "a", 3)
x %>%
walk(print)
x <- list(1, 2, 3)
modify(x, ~.+2)
# ---------------------------------------
gap_split_small <- gap_split[1:10]
countries <- names(gap_split_small)
ggplot(gap_split_small[[1]], aes(year, lifeExp)) +
geom_line() +
labs(title = countries[[1]])
plots <- map2(gap_split_small, countries,
~ ggplot(.x, aes(year, lifeExp)) +
geom_line() +
labs(title = .y))
plots[[1]]
walk(plots, print)
walk2(.x = plots, .y = countries,
~ ggsave(filename = paste0(.y, ".pdf"), plot = .x))
file.remove(paste0(countries, ".pdf"))
|
77e7cb32259f3cd268715d0320f1ec9930e91c64
|
f5f142e469ba0526a2768a509630c8b5156b1fcb
|
/man/shuffle.Rd
|
88b91dd3f78c812ae97c8b7a71ff61db76541aef
|
[] |
no_license
|
JohnMCMa/dendextend
|
350ca633b439b8964eec739ba9247c9527ae37f4
|
1e25e5bf786d943b3aa651f4257336462187d43c
|
refs/heads/master
| 2021-01-18T16:05:01.686085
| 2017-03-30T14:15:29
| 2017-03-30T14:15:29
| 86,709,713
| 0
| 0
| null | 2017-03-30T14:06:03
| 2017-03-30T14:06:03
| null |
UTF-8
|
R
| false
| true
| 1,742
|
rd
|
shuffle.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/untangle.R
\name{shuffle}
\alias{shuffle}
\alias{shuffle.default}
\alias{shuffle.dendlist}
\alias{shuffle.dendrogram}
\alias{shuffle.hclust}
\alias{shuffle.phylo}
\title{Random rotation of trees}
\usage{
shuffle(dend, ...)
\method{shuffle}{default}(dend, ...)
\method{shuffle}{dendrogram}(dend, ...)
\method{shuffle}{dendlist}(dend, which, ...)
\method{shuffle}{hclust}(dend, ...)
\method{shuffle}{phylo}(dend, ...)
}
\arguments{
\item{dend}{a tree object (\link{dendrogram}/\link{hclust}/\link[ape]{phylo})}
\item{...}{Ignored.}
\item{which}{an integer vector for indicating
which of the trees in the dendlist object should be plotted
default is missing, in which case all the dends in dendlist
will be shuffled}
}
\value{
A randomlly rotated tree object
}
\description{
'shuffle' randomilly rotates ("shuffles") a tree, changing its presentation
while preserving its topolgoy.
'shuffle' is based on \link[dendextend]{rotate} and through its methods can
work for any of the major tree objects in R (\link{dendrogram}/\link{hclust}/\link[ape]{phylo}).
This function is useful in combination with \link{tanglegram} and \link{entanglement}.
}
\details{
'shuffle' is a function that randomilly rotates ("shuffles") a tree.
a dendrogram leaves order (by means of rotation)
}
\examples{
dend <- USArrests \%>\% dist \%>\% hclust \%>\% as.dendrogram
set.seed(234238)
dend2 <- shuffle(dend)
tanglegram(dend, dend2, margin_inner=7)
entanglement(dend, dend2) # 0.3983
# although these ARE the SAME tree:
tanglegram(sort(dend), sort(dend2), margin_inner=7)
}
\seealso{
\code{\link{tanglegram}}, \code{\link{entanglement}},
\code{\link[dendextend]{rotate}}
}
|
c55f6c433dd8b384574c0cbe501a7c2376500e12
|
8f25e74f97a270225c270f87d1cc48e8fc3cfd51
|
/proyecto numerico/pruebas.R
|
408f6624cad8a3db713393583426106ed58c7ba3
|
[] |
no_license
|
camiloserr/numerico
|
e7d6f96b3bf396604ccab6db01e5643e23049263
|
323caf00cf94e5b89d57c023df3389841e553390
|
refs/heads/master
| 2020-03-24T05:37:11.732799
| 2018-11-14T16:51:11
| 2018-11-14T16:51:11
| 142,495,415
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 648
|
r
|
pruebas.R
|
#ejemplos para la funcion del metodo secante
f <- function(x){
return (x*x -1)
}
g <- function(x){
return (sin(x))
}
h <- function(x){
return (x^3 - 4*x^2 + 5*x +88 )
}
secante(f , 0 , 2)
secante(g , 0.6 , 50)
#utilizando el paraetro opcional de error
secante(h , -5 , 6 , 0.0000001)
###############################################
#ejemplos para la funcion del metodo biseccion
#existe mas de una raiz en ese intervalo
bisec(f , -2 , 2)
#no existe niguna raiz en ese intervalo
bisec(f , -200 , -100)
#utilizando el parametro opcional de error
bisec( g , 0.5 , 5 , 0.000001)
bisec(h , -5 , 6)
|
528fd8e48fad92775db0a961c12d364d31d59b4e
|
ab8389cf60a6e9bec54b90641ed517f459a09e42
|
/hw04/code/functions.R
|
212fcb424c33c28bb107bef0a16db12a63f513e6
|
[] |
no_license
|
joshasuncion/stat133-hws-fall17
|
16ebf3655c86d57fb993ac338a2ad06919d3759a
|
ae173045a3a15f4cd62765bbee5616e23e096b5c
|
refs/heads/master
| 2021-03-27T13:13:48.367023
| 2017-12-04T05:11:06
| 2017-12-04T05:11:06
| 103,681,067
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,404
|
r
|
functions.R
|
# load required library
library("stringr")
# title: remove_missing
# description: remove missing values from numeric vector
# arguments: numeric vector
# output: numeric vector
remove_missing <- function(x) {
x[is.na(x) == FALSE]
}
# title: check_missing
# description: check if numeric vector has missing values and remove them
# arguments: numeric vector, na.rm
# output: numeric vector
check_missing <- function(x, na.rm = FALSE) {
if (na.rm == TRUE) {
remove_missing(x)
} else {
x
}
}
# title: check_numeric
# description: return error if input is not numeric vector
# arguments: object
# output: none or error
check_numeric <- function(x) {
if (is.numeric(x) == FALSE) {
stop("non-numeric argument")
}
}
# title: get_minimum
# description: find minimum value in numeric vector
# arguments: numeric vector, na.rm
# output: minimum value
get_minimum <- function(x, na.rm = FALSE) {
check_numeric(x)
x = check_missing(x, na.rm)
x = sort(x)
x[1]
}
# title: get_maximum
# description: find maximum value in numeric vector
# arguments: numeric vector, na.rm
# output: maximum value
get_maximum <- function(x, na.rm = FALSE) {
check_numeric(x)
x = check_missing(x, na.rm)
x = sort(x, decreasing = TRUE)
x[1]
}
# title: get_range
# description: find range of numeric vector
# arguments: numeric vector, na.rm
# output: range value
get_range <- function(x, na.rm = FALSE) {
check_numeric(x)
x = check_missing(x, na.rm)
get_maximum(x) - get_minimum(x)
}
# title: get_median
# description: find median of numeric vector
# arguments: numeric vector, na.rm
# output: median value
get_median <- function(x, na.rm = FALSE) {
check_numeric(x)
x = check_missing(x, na.rm)
x = sort(x)
len = length(x)
if (len %% 2 == 0) {
(x[len / 2] + x[(len / 2) + 1]) / 2
} else {
x[(len + 1) / 2]
}
}
# title: get_average
# description: find average of numeric vector
# arguments: numeric vector, na.rm
# output: average value
get_average <- function(x, na.rm = FALSE) {
check_numeric(x)
x = check_missing(x, na.rm)
total = 0
for (i in 1:length(x)) {
total = total + x[i]
}
total / length(x)
}
# title: get_stdev
# description: find standard deviation of numeric vector
# arguments: numeric vector, na.rm
# output: standard deviation value
get_stdev <- function(x, na.rm = FALSE) {
check_numeric(x)
x = check_missing(x, na.rm)
average = get_average(x)
total = 0
for (i in 1:length(x)) {
total = total + ((x[i] - average) ^ 2)
}
sqrt(total / (length(x) - 1))
}
# title: get_quartile1
# description: find first quartile of numeric vector
# arguments: numeric vector, na.rm
# output: first quartile value
get_quartile1 <- function(x, na.rm = FALSE) {
check_numeric(x)
x = check_missing(x, na.rm)
quantile(x, names = FALSE)[2]
}
# title: get_quartile3
# description: find third quartile of numeric vector
# arguments: numeric vector, na.rm
# output: third quartile value
get_quartile3 <- function(x, na.rm = FALSE) {
check_numeric(x)
x = check_missing(x, na.rm)
quantile(x, names = FALSE)[4]
}
# title: get_percentile10
# description: find tenth percentile of numeric vector
# arguments: numeric vector, na.rm
# output: tenth percentile value
get_percentile10 <- function(x, na.rm = FALSE) {
check_numeric(x)
x = check_missing(x, na.rm)
quantile(x, 0.10, names = FALSE)
}
# title: get_percentile90
# description: find ninetieth percentile of numeric vector
# arguments: numeric vector, na.rm
# output: ninetieth percentile value
get_percentile90 <- function(x, na.rm = FALSE) {
check_numeric(x)
x = check_missing(x, na.rm)
quantile(x, 0.90, names = FALSE)
}
# title: count_missing
# description: find number of missing values in numeric vector
# arguments: numeric vector
# output: number of missing values
count_missing <- function(x) {
length(x[is.na(x) == TRUE])
}
# title: summary_stats
# description: return list of summary statistics of numeric vector
# arguments: numeric vector, na.rm
# output: list of summary statistics
summary_stats <- function(x, na.rm = FALSE) {
check_numeric(x)
missing = count_missing(x)
x = check_missing(x, na.rm)
list(minimum = get_minimum(x),
percent10 = get_percentile10(x),
quartile1 = get_quartile1(x),
median = get_median(x),
mean = get_average(x),
quartile3 = get_quartile3(x),
percent90 = get_percentile90(x),
maximum = get_maximum(x),
range = get_range(x),
stdev = get_stdev(x),
missing = missing
)
}
# title: print_stats
# description: print list of summary statistics in nice format
# arguments: list of summary statistics
# output: formatted lists
print_stats <- function(x) {
cat(str_c(str_pad(names(x), 9, "right"),
":",
" ",
format(round(as.numeric(x), digits = 4), nsmall = 4)),
sep = "\n")
}
# title: rescale100
# description: rescale numeric vector from 0 to 100
# arugments: numeric vector, minimum value, maximum value
# output: rescaled numeric vector
rescale100 <- function(x, xmin, xmax) {
100 * (x - xmin) / (xmax - xmin)
}
# title: drop_lowest
# description: remove lowest value in numeric vector
# arguments: numeric vector
# output: numeric vector
drop_lowest <- function(x) {
len = length(x)
lowest = get_minimum(x)
for (i in 1:len) {
if (x[i] == lowest) {
x[i] = NA
x = remove_missing(x)
break
}
}
x
}
# title: score_homework
# description: optionally drop lowest score in vector of homework scores
# and find the average score
# arguments: numeric vector, drop
# output: average value
score_homework <- function(x, drop = FALSE) {
if (drop == TRUE) {
x = drop_lowest(x)
}
get_average(x)
}
# title: score_quiz
# description: optionally drop lowest score in vector of quiz scores
# and find the average score
# arguments: numeric vector, drop
# output: average value
score_quiz <- function(x, drop = FALSE) {
if (drop == TRUE) {
x = drop_lowest(x)
}
get_average(x)
}
# title: score_lab
# description: find lab score based on numeric value of lab attendance
# arguments: numeric value
# output: lab score
score_lab <- function(x) {
if (x == 11 | x == 12) {
100
} else if (x == 10) {
80
} else if (x == 9) {
60
} else if (x == 8) {
40
} else if (x == 7) {
20
} else {
0
}
}
|
e015a9c2e2c09d5ca1f1f9d8977b0360cd43d213
|
fc087468a9a228a59f819285eb0fc4c2b726934c
|
/week 2 notes.R
|
67a7da4759415c8e8ceaf33f1e8b6550395e36d3
|
[] |
no_license
|
jmsahakian/R_DAVIS_in_class_project
|
16475f15c78bb20ffb27d61bcd01feb04fc9f468
|
312481656b104706c692c59c501e658ff020eca3
|
refs/heads/master
| 2020-11-25T05:17:13.101369
| 2020-05-01T02:33:35
| 2020-05-01T02:33:35
| 228,517,141
| 0
| 0
| null | 2019-12-17T02:36:09
| 2019-12-17T02:36:09
| null |
UTF-8
|
R
| false
| false
| 1,064
|
r
|
week 2 notes.R
|
#R week 2
#assign value
x <- 99
x <- 5
y <- x*2
x <- 20
y
# we are evaluating and storing output
# we are not creating a link!
# can also use = for assignment, but use the <-
# use = to set arguments within functions
#how R talks back
log_of_word <- log("word")
#error
log_of_word
# there is no object because there was an error
# errors stop the whole process and don't give outputs other than the error
log_of_negative <- log(-2)
log_of_negative
#warning occurred, but an object was created
#r likes tidy data
#store null values as blank or NA
#avoid spaces, numbers, and special characters in column headers
#record metadata in text file
#excel is insane
#windows - days since jan 1 1990 but mac - days since jan 1 1904
#do not touch the raw data
#data validation in excel
#we love .csv
getwd()
setwd()
# ~ refers to your user name/home directory
# paths are either relative or absolute
# relative file path - "data/portal_data_joined.csv"
# use tab inside "" to search
# "../" go up a level in your directory
#treat generated output as diposable
|
42dcf45ac17ded4555aa4b7c459c050a42c8e2ff
|
67f1a42214608b4178521e88082021142c470142
|
/Practica 3/Practica3/PRACTICA3.2_BD_danishuni.R
|
477d67875391463bb3d7f075bfab0ce745ad5eb0
|
[] |
no_license
|
JORGECASAN/Gestion_Riesgo_Operativo
|
b49d20a63900c7ad89bf91814709971d9676c987
|
c3ed31cf1db7ebc03c9dca08454881550eeebc07
|
refs/heads/master
| 2020-06-01T00:05:35.998003
| 2019-06-06T09:05:51
| 2019-06-06T09:05:51
| 190,550,180
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,611
|
r
|
PRACTICA3.2_BD_danishuni.R
|
##################### VALORES EXTREMOS #######################
#Incluir
library(MASS)
library(CASdatasets)
library(car)
library(actuar)
library(fitdistrplus)
library(ggplot2)
#Los datos con los que trabajaremos son los de danishuni
data(danishuni)
head(danishuni)
x <-danishuni$Loss
summary(x)
var(x)
quantile(x,probs=c(0.05, 0.95))
quantile(x,seq(0,1, 0.20))
quantile(x,seq(0.9,1, 0.01))
#Comentario: La media y la mediana difieren, existe asimetr?a muy apuntada en torno al 4 cuartil.
# Datos de perdidas
danish.claim <- danishuni[,2]
danish.claim
#Block Maxima: Extrac. de los valores max de cada año
#Extrar las cuatro primeros valores de la variable
years <- as.numeric(substr(danishuni[,1], 1, 4))
years
danish.max <- aggregate(danish.claim, by=list(years), max, na.rm=TRUE)[,2]
danish.max
# Exceso sobre umbral. Extrac. de Valores que exceden un umbral
u <- 50
danish.exc <- danishuni[danishuni[,2] > u, 2]
danish.exc
#Visualizacion de las colas
n.u <- length(danish.exc) #n? de casos que superan u
surv.prob <- 1 - rank(danish.exc)/(n.u + 1) #rank ofrece el n? de orden
surv.prob
plot(danish.exc, surv.prob, log = "xy", xlab = "Excesos",
ylab = "Probabilidades", ylim=c(0.01, 1))
#Añadimos las prob teoricas de la D.Pareto
#Determinamos los parámetros necesarios
alpha <- - cov(log(danish.exc), log(surv.prob)) / var(log(danish.exc))
alpha
x = seq(u, max(danish.exc), length = 100) #divide de u a max() 100 interv.
y = (x / u)^(-alpha)
lines(x, y)
#Funci?n de distribuci?n acumulada
prob <- rank(danish.exc) / (n.u + 1)
plot(danish.exc, prob, log = "x", xlab= "Excesos", ylab = "Probabilidades de no excesos")
y = 1 - (x / u)^(-alpha)
lines(x, y)
######## ESTIMACION
#Dist. valores extremos generalizados (GEV)
nllik.gev <- function(par, data){
mu <- par[1]
sigma <- par[2]
xi <- par[3]
if ((sigma <= 0) | (xi <= -1))
return(1e6)
n <- length(data)
if (xi == 0)
n * log(sigma) + sum((data - mu) / sigma) +
sum(exp(-(data - mu) / sigma))
else {
if (any((1 + xi * (data - mu) / sigma) <= 0))
return(1e6)
n * log(sigma) + (1 + 1 / xi) *
sum(log(1 + xi * (data - mu) / sigma)) +
sum((1 + xi * (data - mu) / sigma)^(-1/xi))
}
}
sigma.start <- sqrt(6) * sd(danish.max) / pi
mu.start <- mean(danish.max) + digamma(1) * sigma.start
fit.gev <- nlm(nllik.gev, c(mu.start, sigma.start, 0),
hessian = TRUE, data = danish.max)
fit.gev
fit.gev$estimate #par.posici?n, escala y forma
sqrt(diag(solve(fit.gev$hessian)))
#Modelo Poisson-Generalizada de Pareto
nllik.gp <- function(par, u, data){
tau <- par[1]
xi <- par[2]
if ((tau <= 0) | (xi < -1))
return(1e6)
m <- length(data)
if (xi == 0)
m * log(tau) + sum(data - u) / tau
else {
if (any((1 + xi * (data - u) / tau) <= 0))
return(1e6)
m * log(tau) + (1 + 1 / xi) *
sum(log(1 + xi * (data - u) / tau))
}
}
u <- 10
tau.start <- mean(danish.exc) - u
fit.gp <- nlm(nllik.gp, c(tau.start, 0), u = u, hessian = TRUE,
data = danish.exc)
fit.gp
fit.gp$estimate
#El parametro eta m/n=0,005
#Intervalo de confianza del valor m?ximo del indice de cola (si=0,50)
# al 95%
prof.nllik.gp <- function(par,xi, u, data)
nllik.gp(c(par,xi), u, data)
prof.fit.gp <- function(x)
-nlm(prof.nllik.gp, tau.start, xi = x, u = u, hessian = TRUE,
data = danish.exc)$minimum
vxi = seq(0,1.8,by=.025)
prof.lik <- Vectorize(prof.fit.gp)(vxi)
plot(vxi, prof.lik, type="l", xlab = expression(xi),
ylab = "Profile log-likelihood")
opt <- optimize(f = prof.fit.gp, interval=c(0,3), maximum=TRUE)
opt
up <- opt$objective
abline(h = up, lty=2)
abline(h = up-qchisq(p = 0.95, df = 1), col = "grey")
I <- which(prof.lik >= up-qchisq(p = 0.95, df = 1))
lines(vxi[I], rep(up-qchisq(p = 0.95, df = 1), length(I)),
lwd = 5, col = "grey")
abline(v = range(vxi[I]), col = "grey", lty = 2)
abline(v = opt$maximum, col="grey")
#Point Process, o para valores err?ticos
nllik.pp <- function(par, u, data, n.b){
mu <- par[1]
sigma <- par[2]
xi <- par[3]
if ((sigma <= 0) | (xi <= -1))
return(1e6)
if (xi == 0)
poiss.meas <- n.b * exp(-(u - mu) / sigma)
else
poiss.meas <- n.b * max(0, 1 + xi * (u - mu) / sigma)^(-1/xi)
exc <- data[data > u]
m <- length(exc)
if (xi == 0)
poiss.meas + m * log(sigma) + sum((exc - mu) / sigma)
else {
if (any((1 + xi * (exc - mu) / sigma) <= 0))
return(1e6)
poiss.meas + m * log(sigma) + (1 + 1 / xi) *
sum(log(1 + xi * (exc - mu) / sigma))
}
}
n.b <- 1991 - 1980
u <- 10
sigma.start <- sqrt(6) * sd(danish.exc) / pi
mu.start <- mean(danish.exc) + (log(n.b) + digamma(1)) *
sigma.start
fit.pp <- nlm(nllik.pp, c(mu.start, sigma.start, 0), u = u,
hessian = TRUE, data = danishuni[,2], n.b = n.b)
fit.pp
#Estimacion otros indices de cola
#Intervalo confianza asintotico del ?ndice de cola
logXs <- log(sort(danishuni[,2], decreasing=TRUE))
n <- length(logXs)
xi <- 1/1:n * cumsum(logXs) - logXs
ci.up <- xi + qnorm(0.975) * xi / sqrt(1:n)
ci.low <- xi - qnorm(0.975) * xi / sqrt(1:n)
matplot(1:n, cbind(ci.low, xi, ci.up),lty = 1, type = "l",
col = c("blue", "black", "blue"), ylab = expression(xi),
xlab = "Numero valores extremos")
#Intervalo confianza de alfa=1/indice de cola
alpha <- 1 / xi
alpha.std.err <- alpha / sqrt(1:n)
ci.up <- alpha + qnorm(0.975) * alpha / sqrt(1:n)
ci.low <- alpha - qnorm(0.975) * alpha / sqrt(1:n)
matplot(1:n, cbind(ci.low, alpha, ci.up), lty = 1, type = "l",
col = c("blue", "black", "blue"), ylab = expression(alpha),
xlab = "Numero valores extremos")
#Exceso sobre la media
meanExcessPlot <- function(data, u.range = range(data),n.u = 100){
mean.excess <- ci.up <- ci.low <- rep(NA, n.u)
all.u <- seq(u.range[1], u.range[2], length = n.u)
for (i in 1:n.u){
u <- all.u[i]
excess <- data[data > u] - u
n.u <- length(excess)
mean.excess[i] <- mean(excess)
var.mean.excess <- var(excess)
ci.up[i] <- mean.excess[i] + qnorm(0.975) *
sqrt(var.mean.excess / n.u)
ci.low[i] <- mean.excess[i] - qnorm(0.975) *
sqrt(var.mean.excess / n.u)
}
matplot(all.u, cbind(ci.low, mean.excess, ci.up), col = 1,
lty = c(2, 1, 2), type = "l", xlab = "u", ylab = "Exc.sobre media")
}
meanExcessPlot(danish.exc)
#### VALIDACION DEL MODELO
#Q-Q Plot para la Dist. Generalizada de Pareto (DGP)
qqgpd <- function(data, u, tau, xi){
excess <- data[data > u]
m <- length(excess)
prob <- 1:m / (m + 1)
x.hat <- u + tau / xi * ((1 - prob)^-xi - 1)
ylim <- xlim <- range(x.hat, excess)
plot(sort(excess), x.hat, xlab = "Quantiles en la muestra",
ylab = "Quantiles ajustados", xlim = xlim, ylim = ylim)
abline(0, 1, col = "grey")
}
qqgpd(danishuni[,2], 10, 7, 0.5)#u=10, tau=7 y indice cola=0,5
#P-P Plot para la Dist. Generalizada de Pareto (DGP)
ppgpd <- function(data, u, tau, xi){
excess <- data[data > u]
m <- length(excess)
emp.prob <- 1:m / (m + 1)
prob.hat <- 1 - (1 + xi * (sort(excess) - u) / tau)^(-1/xi)
plot(emp.prob, prob.hat, xlab = "Probabilidades empiricas",
ylab = "Probabilidades ajustadas", xlim = c(0, 1),
ylim = c(0, 1))
abline(0, 1, col = "grey")
}
ppgpd(danishuni[,2], 10, 7, 0.5) #u=10, tau=7 y indice cola=0,5
############## VAR###################################
library(vars)
library(sn)
modelo<- qnorm(0.05,0.05,0.18)
modelo
R<- modelo*20000
R
var<--R
var
dnorm()
|
4a68bbadf1c01713677221e41b31ccc1db4e24e2
|
3f20308cfc97d01fb54ea04d1dfd96dd5ae531c7
|
/analysis/analyse-simulation-results.R
|
b72da876f4347b43db9e178cd7b2e9d4b8222cb9
|
[] |
no_license
|
plsm/mds
|
083f290b4d2515dd6f007df0903ed2a7c5f718b3
|
323daf2f447f3fd7cfde5909ba60f24a5b16159e
|
refs/heads/master
| 2020-03-16T09:02:42.826319
| 2018-05-16T07:46:06
| 2018-05-16T07:46:06
| 132,607,519
| 0
| 0
| null | 2018-05-08T12:43:34
| 2018-05-08T12:43:34
| null |
UTF-8
|
R
| false
| false
| 2,802
|
r
|
analyse-simulation-results.R
|
library ("data.table")
library ("igraph")
analyse.simulation.results <- function (
number.nodes,
index.graph,
interactive = FALSE
) {
## read simulation results ####
data <- fread (
input = sprintf (
"results-graph_%d.csv",
index.graph
),
header = FALSE
)
## compute solutions ####
graph.solution <- data [
,
lapply (
X = .SD,
FUN = function (x) return (ifelse (x > 35, 1, 0))
),
.SDcols = 8:(7 + number.nodes)
]
print (graph.solution)
columns <- sprintf (
"V%d",
c (8 : (7 + number.nodes))
)
columns <- paste (columns, collapse = ",")
intermediate <- graph.solution [
,
.N,
by = columns
]
print (intermediate)
## compute how many times each node appears ####
node.appearance <- data.table (t (graph.solution))
node.appearance [
,
node := .I
]
node.appearance <- node.appearance [
,
.(
count = base::sum (.SD)
),
by = .(node)
]
node.appearance <- node.appearance [, count]
print (node.appearance)
## read graph topology ####
graph.data <- fread (
input = sprintf (
"graph_%d.csv",
index.graph
),
header = FALSE
)
graph.data <- data.matrix (graph.data)
graph.data <- graph_from_adjacency_matrix (
adjmatrix = graph.data,
mode = "undirected"
)
# compute the layout to use in all graphs, igraph tends to compute different node placement...
graph.layout <- layout_with_kk (graph.data)
print (graph.layout)
if (!interactive) {
number.plots <- nrow (intermediate) + 1
dimx <- ceiling (sqrt (number.plots))
dimy <- dimx - (dimx * dimx - number.plots) %/% dimx
png (
filename = sprintf (
"graph-%d-stats.png",
index.graph
),
width = 300 * dimx,
height = 225 * dimy
)
par (
mfrow = c (dimx, dimy),
mar = c (0, 0, 2, 0)
)
}
plot.solution <- function (
nodes.id,
number.repeats
) {
nodes.id <- data.table (t (nodes.id))
nodes.id <- nodes.id [, V1 := .I * V1]
nodes.id <- nodes.id [V1 > 0, V1]
nodes.id <- as.list (nodes.id)
plot.igraph (
x = graph.data,
vertex.label = NA,
mark.groups = nodes.id,
mark.col = "blue",
layout = graph.layout,
main = sprintf (
"This solution was found %d time(s)",
number.repeats
)
)
if (interactive)
readline ("Press ENTER to continue")
return (0)
}
intermediate [
,
plot.solution (.SD, N),
by = columns,
.SDcols = 1:number.nodes
]
plot.igraph (
x = graph.data,
layout = graph.layout,
vertex.label = node.appearance,
main = "Number of times each node was in a solution"
)
if (!interactive) {
dev.off ()
}
return (0)
}
|
42d58d89c9ce2f032cb12be6cfc14e54bcfac780
|
c916b2c777d04f4b4697035d8a4a58626e953f8b
|
/run_analysis.R
|
31bb0f82c0b214c78b834c22677657c2a96d4f6f
|
[] |
no_license
|
frapgi/GettingAndCleaningData
|
72b485be5669d067f093a3d8d7acdfbf9f3cde80
|
86e3c3ae9d87bced103cd3912ef685401fcc8a04
|
refs/heads/master
| 2021-01-02T09:44:20.709922
| 2015-08-23T13:49:19
| 2015-08-23T13:49:19
| 41,250,045
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,125
|
r
|
run_analysis.R
|
#Reshape lets you flexibly restructure and aggregate data using just two functions: melt and cast.
library(reshape2)
filename <- "getdata_dataset.zip"
## Download and unzip the data
if (!file.exists(filename)){
fileURL <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip "
download.file(fileURL, filename, mode="wb")
}
if (!file.exists("UCI HAR Dataset")) {
unzip(filename)
}
# charge activity, labels, features
AcLabels <- read.table("UCI HAR Dataset/activity_labels.txt")
AcLabels[,2] <- as.character(AcLabels[,2])
features <- read.table("UCI HAR Dataset/features.txt")
features[,2] <- as.character(features[,2])
#Extract mean and standard deviation
FeaturesSelected <- grep(".*mean.*|.*std.*", features[,2])
FeaturesSelected.names <- features[FeaturesSelected,2]
FeaturesSelected.names = gsub('-mean', 'Mean', FeaturesSelected.names) #'-mean'is replaced by 'Mean'
FeaturesSelected.names = gsub('-std', 'Std', FeaturesSelected.names)
FeaturesSelected.names <- gsub('[-()]', '', FeaturesSelected.names)
# Load the datasets
train <- read.table("UCI HAR Dataset/train/X_train.txt")[FeaturesSelected]
TActivities <- read.table("UCI HAR Dataset/train/Y_train.txt")
TSubjects <- read.table("UCI HAR Dataset/train/subject_train.txt")
train <- cbind(TSubjects, TActivities, train)
test <- read.table("UCI HAR Dataset/test/X_test.txt")[FeaturesSelected]
testActivities <- read.table("UCI HAR Dataset/test/Y_test.txt")
testSubjects <- read.table("UCI HAR Dataset/test/subject_test.txt")
test <- cbind(testSubjects, testActivities, test)
# merge datasets and add labels
Datos <- rbind(train, test)
colnames(Datos) <- c("subject", "activity", FeaturesSelected.names)
# turn activities & subjects into factors
Datos$activity <- factor(Datos$activity, levels = AcLabels[,1], labels = AcLabels[,2])
Datos$subject <- as.factor(Datos$subject)
Datos.melted <- melt(Datos, id = c("subject", "activity"))
Datos.mean <- dcast(Datos.melted, subject + activity ~ variable, mean)
write.table(Datos.mean, "tidy.txt", row.names = FALSE, quote = FALSE)
|
f58254b63f23d50719f69c1a962c77d1287ddbf0
|
ec94dddf45e332663da3e37db2feeb709221d763
|
/man/gen_gumbel.Rd
|
2d98f3a308c0791ce1af23b78a0dbf0dabbcfd79
|
[
"Apache-2.0"
] |
permissive
|
AntoineDubois/sdcv2
|
44687ab28a1c7aa3c82702ee2506257a20475994
|
53041ecc32698089a66a0df7911dd7c0f461cc34
|
refs/heads/master
| 2023-07-16T20:07:11.525114
| 2021-09-06T15:27:46
| 2021-09-06T15:27:46
| 386,579,310
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 546
|
rd
|
gen_gumbel.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/distributions.R
\name{gen_gumbel}
\alias{gen_gumbel}
\title{gen_gumbel}
\usage{
gen_gumbel(n, m, param)
}
\arguments{
\item{n}{The number rows}
\item{m}{The number of columns}
\item{location}{The optional location parameter}
\item{scale}{The optional scale, a positive number}
}
\description{
A function generating a random matrix with gumbel distribution
}
\examples{
gen_gumbel(10, 5, param=list(location=3))
gen_gumbel(10, 5, param=list(location=3, scale=4))
}
|
9018325bae6168720b09707b567547c6dd64b6ba
|
a728853b176f767dcec0f43936dcf49835b8c7d4
|
/code/1october_cpg.R
|
c07d81ede56c22286188d6849b5edfa9e068a695
|
[] |
no_license
|
caleblareau/immgen_final
|
263a171d0e5f35f66f3eb457a3a78432bb62ff09
|
8a852a9b40b9709e043bb4e8190f8bf930e80c3b
|
refs/heads/master
| 2021-09-15T04:38:58.604526
| 2018-05-26T00:46:28
| 2018-05-26T00:46:28
| 100,637,884
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,959
|
r
|
1october_cpg.R
|
library(data.table)
library(GenomicRanges)
library(BSgenome.Mmusculus.UCSC.mm10)
library(tidyverse)
library(motifmatchr)
library(BuenColors)
library(chromVARmotifs)
library(magrittr)
library(SummarizedExperiment)
cpg <- read.table("../data/mm10_cpgIslandExt.txt")
expressedGenes <- read.table("../data/expressed_genes_SM.txt", header = FALSE, stringsAsFactors = FALSE)[,1]
cpgg <- makeGRangesFromDataFrame(cpg, seqnames.field = "V2", start.field = "V3", end.field = "V4")
gtf <- "../data/mm10.refGenes.2016.1018.csv"
gdf <- fread(gtf)
gdf <- gdf[gdf[["gene.name"]]%in% expressedGenes, ]
motifPromoter <- function(gdf, far ){
# Define promoter shape
near <- 00
# Process positive strand
posdf <- gdf[gdf$strand == "+", c("chrom", "TSS", "gene.name")]
posdf$start <- posdf$TSS - far
posdf$end <- posdf$TSS - near
posg <- makeGRangesFromDataFrame(posdf, keep.extra.columns = TRUE)
# Process negative strand
negdf <- gdf[gdf$strand == "-", c("chrom", "TSS", "gene.name")]
negdf$start <- negdf$TSS + near
negdf$end <- negdf$TSS + far
negg <- makeGRangesFromDataFrame(negdf, keep.extra.columns = TRUE)
bothg <- c(posg, negg)
inCpg <- 1:length(bothg) %in% queryHits(findOverlaps(bothg, cpgg))
# Compile final results
resdf <- data.frame(genes = c(mcols(posg)$gene.name,mcols(negg)$gene.name),
CPGi = inCpg,
stringsAsFactors = FALSE)
return(resdf)
}
farr <- 1
resdf <- motifPromoter(gdf, far = farr)
qdf <- read.table("../output/RNA_varianceComponents.txt", header = TRUE)
qdf$UnexplainedGene <- qdf$Unexplained > 90
qdf$PromoterGene <- qdf$Promoter > 90
qdf$DistalGene <- qdf$Distal > 90
bdf <- merge(qdf, resdf, by.x = "gene", by.y = "genes")
df <- data.frame(
hasCPGi = c(sum(bdf[,"DistalGene"] & bdf[,"CPGi"]), sum(bdf[,"PromoterGene"] & bdf[,"CPGi"]), sum(bdf[,"UnexplainedGene"] & bdf[,"CPGi"]),
sum((!bdf[,"DistalGene"] & !bdf[,"PromoterGene"] & !bdf[,"UnexplainedGene"])& bdf[,"CPGi"])),
noCPGi = c(sum(bdf[,"DistalGene"] & !bdf[,"CPGi"]), sum(bdf[,"PromoterGene"] & !bdf[,"CPGi"]), sum(bdf[,"UnexplainedGene"] & !bdf[,"CPGi"]),
sum((!bdf[,"DistalGene"] & !bdf[,"PromoterGene"] & !bdf[,"UnexplainedGene"])& !bdf[,"CPGi"]))
)
rownames(df) <- c("Distal", "Promoter", "Unexplained", "Mixed")
df
plotdf <- data.frame(CpGiFreq = c(df[,1] / (df[,1] + df[,2])))
plotdf$GeneType<- c("Distal", "Promoter", "Unexplained", "Mixed")
p1 <- ggplot() + pretty_plot() +
geom_bar(data = plotdf, aes(x=GeneType, y=CpGiFreq*100, fill=GeneType),stat = "identity", color = "black") +
scale_fill_manual(values=c("dodgerblue", "black", "green3", "red")) +
theme( legend.position="bottom")+labs(colour = "Variance Component", fill = "Variance Component") +
theme(axis.text.x = element_text(vjust = 0.25, angle = 90)) +
labs(x = "Gene Type", y = "% TSS Overlapping CpG Island")
p1
ggsave(p1, file = paste0("../output/cpgi_overlap.pdf"))
|
64ba8591efd5fe9e65848a3d4849cf8dee80fa4b
|
5f6fca5dcf1331f0f0ecba1af68d062612c9c7d3
|
/Projekt_2/IFSZuzanna/R/sierpinskiTriangle.R
|
06cb22e146c655555f3e0f5a76111a652b2d3b55
|
[] |
no_license
|
ultramargarine/ProgramowanieWizualizacja2017
|
421666d332c3ff3ffea40b6d12be10537ef41654
|
fcfd5b393717ec6ca96919656b44d3c4bcbd7d93
|
refs/heads/master
| 2021-09-15T09:01:58.860448
| 2018-03-08T22:00:50
| 2018-03-08T22:00:50
| 105,997,439
| 0
| 0
| null | 2018-03-08T21:58:51
| 2017-10-06T11:32:12
|
R
|
UTF-8
|
R
| false
| false
| 752
|
r
|
sierpinskiTriangle.R
|
#Sierpinski triangle
sierpinski_triangle_f1 <- function(point) {
x<- point[1]
y<- point[2]
c(0.5 * x, 0.5 * y)
}
sierpinski_triangle_f2 <- function(point) {
x<- point[1]
y<- point[2]
c(0.5 * x +0.5, 0.5 * y)
}
sierpinski_triangle_f3 <- function(point) {
x<- point[1]
y<- point[2]
c(0.5 * x + 0.25, 0.5 * y + sqrt(3)/4)
}
#' Sierpinski triangle fractal
#'
#' Specification of Sierpinski triangle fractal with can be plotted using plot function.
#' \href{https://en.wikipedia.org/wiki/Sierpinski_triangle}{Wikipedia}
#' @docType data
#' @keywords datasets
#' @name sierpinskiTriangle
#'
NULL
sierpinskiTriangle <- createIFS(list(sierpinski_triangle_f1, sierpinski_triangle_f2, sierpinski_triangle_f3))
|
3043b41e84265b3faa301ae56391e7081af60988
|
5ff3b78779f3eeb378de6f23b947b1249eb2d658
|
/install.R
|
331ddb969b14e005627f83709a16414eccff791d
|
[] |
no_license
|
gnguy/binder_base
|
70a08439d7eb38c7bdf9373382da25ca8341298b
|
25694a8f89da435d93d3171b930adefd1af4247b
|
refs/heads/master
| 2020-08-07T12:57:57.648354
| 2019-10-10T22:32:42
| 2019-10-10T22:32:42
| 213,460,196
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 439
|
r
|
install.R
|
install_packages <- function(package_name) {
if(!package_name %in% installed.packages()) install.packages(package_name)
}
package_list <- c("tidyverse", "readr", "dplyr", "shiny", "shinythemes", "plotly", "data.table",
"xgboost", "lme4", "Hmisc", "htmlwidgets", "openxlsx", "DT",
"rmarkdown", "leaflet", "lubridate", "sf", "bit64", "IRkernel", "arrow")
lapply(package_list, install_packages)
|
59eaf571193346b2d916ac4d3db9ffef43fb66f3
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/MixGHD/examples/MGHFA.Rd.R
|
537042453b5bedbde207e4338fbcdcbf7a04fb19
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 495
|
r
|
MGHFA.Rd.R
|
library(MixGHD)
### Name: MGHFA
### Title: Mixture of generalized hyperbolic factor analyzers (MGHFA).
### Aliases: MGHFA
### Keywords: Clustering Classification Generalized hyperboilc distribution
### ** Examples
## Classification
#70% belong to the training set
data(sonar)
label=sonar[,61]
set.seed(4)
a=round(runif(62)*207+1)
label[a]=0
##model estimation
model=MGHFA(data=sonar[,1:60], G=2, max.iter=25 ,q=2,label=label)
#result
table(model@map,sonar[,61])
summary(model)
|
2e29f4b500e7286dc497b618bce9038753789b6b
|
c8674dc53aa778b3d8c0759f117b8872196d3009
|
/man/DisplayFirstStageTestResult.Rd
|
abe43239f70f0a35c9ff062a409bb88999139c41
|
[] |
no_license
|
andrewhaoyu/TOP
|
d8acae9cd8668d70f424997cc6c91b21bf5b5dce
|
0de8cd088754079b9b9a11ee785fc6a34f3bab29
|
refs/heads/master
| 2022-10-04T21:39:09.104998
| 2022-08-25T21:19:01
| 2022-08-25T21:19:01
| 148,667,732
| 6
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 496
|
rd
|
DisplayFirstStageTestResult.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/display_95CI_p.R
\name{DisplayFirstStageTestResult}
\alias{DisplayFirstStageTestResult}
\title{Display the first stage parameters}
\usage{
DisplayFirstStageTestResult(logodds, sigma, places = 5)
}
\arguments{
\item{logodds}{the log odds ratio vectors}
\item{sigma}{the covariance matrix for the log odds ratio vectors}
\item{places}{the numerical places to keep}
}
\description{
Display the first stage parameters
}
|
68c5c6ca932dae04d941663133d588bd7d564565
|
9aafde089eb3d8bba05aec912e61fbd9fb84bd49
|
/codeml_files/newick_trees_processed/9105_0/rinput.R
|
ee75b748990249545685abf10665e2a532b496ac
|
[] |
no_license
|
DaniBoo/cyanobacteria_project
|
6a816bb0ccf285842b61bfd3612c176f5877a1fb
|
be08ff723284b0c38f9c758d3e250c664bbfbf3b
|
refs/heads/master
| 2021-01-25T05:28:00.686474
| 2013-03-23T15:09:39
| 2013-03-23T15:09:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 135
|
r
|
rinput.R
|
library(ape)
testtree <- read.tree("9105_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="9105_0_unrooted.txt")
|
53029be823a3c14da8572ecde4e51e9aaa789f30
|
fd6188d76cf7906f8b875a83a9422760f562feff
|
/Plot4.R
|
2e22248ca2fa632db6d9b36593e6d2ec362c97c7
|
[] |
no_license
|
s6juncheng/ExData_Plotting1
|
cd352f4338385acc4eccf65922167f9a2a6611a8
|
814e28205858246a21debb759cdd866c78bc37f8
|
refs/heads/master
| 2021-01-22T17:14:57.266191
| 2014-05-11T21:49:04
| 2014-05-11T21:49:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,300
|
r
|
Plot4.R
|
hhc = read.delim("~//household_power_consumption.txt", header = T, sep = ';')
# subset dates only 2007-02-01 and 2007-02-02
hhc_sub = hhc[which(hhc$Date %in% c('1/2/2007','2/2/2007')),]
#Plot
#Convert time information
time = hhc_sub$Time
dates = hhc_sub$Date
y = paste(dates, time)
x = strptime(y, '%d/%m/%Y %H:%M:%S')
x = as.POSIXct(x)
windows()
par(mfrow = c(2,2))
#plot1
plot(x, hhc_sub$Global_active_power,type = "l", ylab = 'Global Active Power (kilowatts)', xlab = '')
#plot2
plot(x, hhc_sub$Voltage, type = 'l', ylab = 'Voltage', xlab = 'datetime')
#plot3
plot(x, as.character(hhc_sub$Sub_metering_1),type = "l", ylab = 'Energy sub metering', xlab = '')
lines(x,as.character(hhc_sub$Sub_metering_2), col = 'red')
lines(x,as.character(hhc_sub$Sub_metering_3), col = 'blue')
legend("topright",
legend = c('Sub_metering_1', 'Sub_metering_2', 'Sub_metering_3'),
lty = c(1,1,1),
lwd = c(2.5,2.5,2.5),
col = c('black','red', 'blue'),
bty = 'n' #remove box
)
#plot4
plot(x, hhc_sub$Global_reactive_power, type = 'l', ylab = 'Global_reactive_power', xlab = 'datetime')
#Save plots
<<<<<<< HEAD
png('Plot4.png', width = 480, height=480)
=======
png('Plot4.png', width = 480, height=480)
>>>>>>> 542e48e0c4d3efec724faed10c0cf9a6aadc9cc9
|
a31f9ed8d1ad1a0e9b0afb2770842b65259a3f11
|
d8d26acba4663c2ac46a21b12564502c57ecc64b
|
/man/get.gist.forks.Rd
|
6c0ea5589fe44e53208a5bb23d724ef777cd7c6b
|
[
"MIT"
] |
permissive
|
cscheid/rgithub
|
c8bb02276999f202ec3b8053d7eccf96881d5d0c
|
e98afc3bbf2971a59536fc3ac5ee1ec7c54ead8b
|
refs/heads/master
| 2020-12-29T02:32:38.156169
| 2018-06-06T17:32:57
| 2018-06-06T17:32:57
| 8,487,945
| 57
| 26
| null | 2017-05-14T22:10:10
| 2013-02-28T20:57:49
|
R
|
UTF-8
|
R
| false
| true
| 378
|
rd
|
get.gist.forks.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gists.R
\name{get.gist.forks}
\alias{get.gist.forks}
\title{list the forks of a gist}
\usage{
get.gist.forks(id, ctx = get.github.context())
}
\arguments{
\item{id}{the gist id}
\item{ctx}{the github context object}
}
\value{
the list of fork information
}
\description{
list the forks of a gist
}
|
b95dbf26d04fb6841002b207e658bbea68aab7b2
|
37b9f0b3f0728963c34468a11c584bb6a109a508
|
/SurvivorshipPlotsFromDam.R
|
7209d00a99631e897921324aa18175619a39a092
|
[] |
no_license
|
PletcherLab/DAMSurvival
|
3a0a25429d7f7a785e18872326c35e9804ae883b
|
d2fa24054fd62bfeff40939e250889a114a5e1c6
|
refs/heads/master
| 2022-12-24T18:26:12.821259
| 2022-12-08T22:42:01
| 2022-12-08T22:42:01
| 149,170,949
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,323
|
r
|
SurvivorshipPlotsFromDam.R
|
#Strategy to create a program to find and plot lifespan of flies in activity monitors:
#Steps
#read in DAM txt files. Prompt to assign treatment group to each monitor file, or add headder with all columns labeled.
#merge data into single frame for similar treatments using time and date columns to align data
#Activity data is in column 11-33
#find start of experiment based on when DAM output file shows status change from 51 to 1. Record time in same row.
#delete all prior data containing 51 readings in column 4 except the 1 flanking the start of the experiment
#
#determine time last alive by searching through the data from the bottom to find the first nonzero entry
#determine lifespan by subtracting the time of death from the start time
#populate a new vector create new vectors of time lived for each treatment
#stack survival data in format for SurvComp analysis. Add second column of equal length with 1 as value (to represent deaths)
#
#
#
#Read DAM file into R and add header with column names
DAM13 <- read.table("Monitor13.txt", header = FALSE)
colnames(DAM13)<-c("Num", "Date", "Time", "Status", "Blank1", "Blank2", "Blank3", "Blank4", "Blank5",
"Light", "Channel1", "Channel2", "Channel3", "Channel4", "Channel5", "Channel6", "Channel7", "Channel8",
"Channel9", "Channel10", "Channel11", "Channel12", "Channel13", "Channel14", "Channel15",
"Channel16", "Channel17", "Channel18", "Channel19", "Channel20", "Channel21", "Channel22",
"Channel23", "Channel24", "Channel25", "Channel26", "Channel27", "Channel28", "Channel29",
"Channel30", "Channel31", "Channel32")
#Merge date and time columns into one and identify time format. Some issue with time formating,
#guessing problems are with the month abbreviation.Tried POSIXct and POSIXlt.
DAM13$CalDateTime <- as.POSIXct (paste(DAM13$Date, DAM13$Time), format = "%d-%b-%y %H:%M:%S")
#Identify start time based on status changing from 51 to 1. Save all 1s as vector RecordingTime. 1st element is StartTime
DAM13<-subset(DAM13,DAM13$Status==1)
GetDeathTime<-function(data,times){
tmp<-which(data>0)
if(length(tmp)==0){
result<-NA
}
else {
time.index<-tmp[length(tmp)]
result<-times[time.index]
}
result
}
#Extract last element from time based on a positive activity value in an activity column
#First index so that only channel data greater than 0 is included
death.times<-(lapply(DAM13[,11:42],GetDeathTime,DAM13$CalDateTime))
tmp<-rep(-1,32)
for(i in 1:32){
if(is.na(tmp[i])){
tmp[i]<-NA
}
else {
tmp[i]<-difftime(death.times[[i]],DAM13$CalDateTime[1],units="hours")
}
}
hours.at.death<-tmp
rm(tmp)
#Trying to figure out a way to not type all channel + number repetedly
AllChannels <-paste0("Channel", 1:32)
DAM13[,colnames(DAM13(AllChannels))]
DAM13[length("CalDateTime"),"CalDateTime"]
#trying to import all txt files into dataframes at one time.
filenames <- list.files(path="C:/DAMIsolationStarvationMale",
pattern="xyz+.*txt")
#Survival plotting code
library(survival)
Survdata <- read.csv (file = "SurvivalDataR.csv", header = TRUE, sep = ",")
surv.object<-Surv(Survdata$Age,rep(1,length(Survdata$Age)))
SurvCurve <- survfit(surv.object~Survdata$Trt)
SurvComp <- survdiff(surv.object~Survdata$Trt)
plot(SurvCurve, col=c(1,2))
print(SurvComp)
plot(SurvComp)
|
5e337e50e7d8d1dd391898a319aa76cfc6fbe3e0
|
26c8f804713c611eb5c931d09a2a71f526ead746
|
/sl_pairs_script.r
|
4b3b6b06d656402b14f7088b681dbea9eb2e033b
|
[] |
no_license
|
djinnome/crop
|
f57c2daec28bb040af87164706756a9f01f07a45
|
c3f409c5efc4d518b560b160e4afdb78d7824aec
|
refs/heads/master
| 2021-01-11T06:36:15.885581
| 2017-02-06T21:19:23
| 2017-02-06T21:19:23
| 81,134,029
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 945
|
r
|
sl_pairs_script.r
|
##jmd
##6.5.12
##sl_pairs_script.r
source('/msc/neurospora/FBA/farm/farm_header.r')
non.essential.thresh <- 0.05
#non-essential genes
g.mat <- read.csv('singleKO_growth.csv', row.names=1)
g.mat$growth[is.na(g.mat$growth)] <- 0
ne.genes <- setdiff(genes, g.mat[g.mat$growth<=non.essential.thresh, 'genes'])
ne.genes <- setdiff(ne.genes, genes[c('ace-7', 'ace-8', 'thi-4')])
#construct matrix of gene pairs
sl.gene.mat <- t(combn(x=ne.genes, m=2))
colnames(sl.gene.mat) <- c('g1', 'g2')
rownames(sl.gene.mat) <- apply(sl.gene.mat, 1, FUN=function(x){ paste(x[1], ',', x[2], sep='') })
sl.growth <- multiGeneKO(a=s.al, ncu=sl.gene.mat, gpr=gpr, fba.ub=fva.ub, quiet=TRUE)
length(sl.v <- names(sl.growth)[!is.na(sl.growth) & sl.growth<=10**-7])
names(sl.v) <- sapply(strsplit(x=sl.v, split=','), FUN=function(x){ paste(names(genes)[match(x, genes)], collapse=',') })
sl.v <- sl.v[order(names(sl.v))]
write.csv(sl.v, 'pred_synth_lethals.csv')
|
1bf20a218d81b4cfad45f5adb3acb0979449cf53
|
ae6803fae339a39e090694330342617b85c5a923
|
/test_runs.R
|
7751c36724d5df21ca968a95e5c0b2e053e7486a
|
[] |
no_license
|
blinatzju/Test
|
1157ce1a2c8aad3616a16534d9203e5c0d6bdfa0
|
576267be46d0a1092a06c2f339b610b3a44717e7
|
refs/heads/main
| 2023-06-25T01:43:42.231459
| 2021-07-17T15:08:14
| 2021-07-17T15:08:14
| 386,965,213
| 0
| 0
| null | 2021-07-17T14:59:20
| 2021-07-17T14:51:35
| null |
UTF-8
|
R
| false
| false
| 2,291
|
r
|
test_runs.R
|
# | (C) 2008-2021 Potsdam Institute for Climate Impact Research (PIK)
# | authors, and contributors see CITATION.cff file. This file is part
# | of MAgPIE and licensed under AGPL-3.0-or-later. Under Section 7 of
# | AGPL-3.0, you are granted additional permissions described in the
# | MAgPIE License Exception, version 1.0 (see LICENSE file).
# | Contact: magpie@pik-potsdam.de
# ----------------------------------------------------------
# description: test routine to run for new pull requests
# position: 5
# ----------------------------------------------------------
##### Version log (YYYYMMDD - Description - Author(s))
## 20200527 - Default SSP2 Baseline and Policy runs - FH,AM,EMJB,JPD
## Load lucode2 and gms to use setScenario later
library(lucode2)
library(gms)
# Load start_run(cfg) function which is needed to start MAgPIE runs
source("scripts/start_functions.R")
# Source default cfg. This loads the object "cfg" in R environment
source("config/default.cfg")
# choose a meaningful Pull Request (PR) flag
pr_flag <- "PR_MACC"
# Grab user name
user <- Sys.info()[["user"]]
cfg$results_folder <- "output/:title:"
## Create a set of runs based on default.cfg
for(ssp in c("SSP2")) { ## Add SSP* here for testing other SSPs. Basic test should be for SSP2
for(macc in c("PBL_2007","PBL_2019")) {
for (co2_price_path in c("BAU","POL")) {
cfg$gms$c57_macc_version <- macc
if (co2_price_path == "BAU") {
cfg <- setScenario(cfg,c(ssp,"NPI"))
cfg$gms$c56_pollutant_prices <- "R2M41-SSP2-NPi" #update to most recent coupled runs asap
cfg$gms$c60_2ndgen_biodem <- "R2M41-SSP2-NPi" ##update to most recent coupled runs asap
} else if (co2_price_path == "POL"){
cfg <- setScenario(cfg,c(ssp,"NDC"))
cfg$gms$c56_pollutant_prices <- "SSPDB-SSP2-26-REMIND-MAGPIE" #update to most recent coupled runs asap
cfg$gms$c60_2ndgen_biodem <- "SSPDB-SSP2-26-REMIND-MAGPIE" ##update to most recent coupled runs asap
}
cfg$title <- paste0(pr_flag,"_",user,"_",ssp,"-",co2_price_path,"_",macc) #Create easily distinguishable run title
cfg$output <- c("rds_report") # Only run rds_report after model run
start_run(cfg,codeCheck=TRUE) # Start MAgPIE run
#cat(cfg$title)
}
}
}
|
4be03dd5b320e964799ca5d2b4f637b70dc19f7b
|
0c5f062e3bbc9d24b4d27b9fc9076378a6b2e914
|
/man/plotPost.Rd
|
a6bee3465bd6a67fe4574d1cc98697a4fea682b7
|
[
"MIT"
] |
permissive
|
mzheng3/DBDA2E-utilities
|
d7ee545b56cf9b820739af6a3889fca64430b610
|
2b2ee721313e22703e7d65da4216e459330ec2ae
|
refs/heads/master
| 2020-12-19T22:48:53.507407
| 2017-09-24T21:19:16
| 2017-09-24T21:19:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 878
|
rd
|
plotPost.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/DBDA2E.R
\name{plotPost}
\alias{plotPost}
\title{Title}
\usage{
plotPost(paramSampleVec, cenTend = c("mode", "median", "mean")[1],
compVal = NULL, ROPE = NULL, credMass = 0.95, HDItextPlace = 0.7,
xlab = NULL, xlim = NULL, yaxt = NULL, ylab = NULL, main = NULL,
cex = NULL, cex.lab = NULL, col = NULL, border = NULL,
showCurve = FALSE, breaks = NULL, ...)
}
\arguments{
\item{paramSampleVec}{paramSampleVec}
\item{cenTend}{cenTend}
\item{compVal}{compVal}
\item{ROPE}{ROPE}
\item{credMass}{credMass}
\item{HDItextPlace}{HDItextPlace}
\item{xlab}{xlab}
\item{xlim}{xlim}
\item{yaxt}{yaxt}
\item{ylab}{ylab}
\item{main}{main}
\item{cex}{cex}
\item{cex.lab}{cex.lab}
\item{col}{col}
\item{border}{border}
\item{showCurve}{showCurve}
\item{breaks}{breaks}
}
\description{
Title
}
|
5f1ba94181a955032c8ec622dd5c34b2f3146ca6
|
92f821481a4430514c981aeb464a63ccd52939c3
|
/Wordcloud.R
|
19b0ae172cde001a3a1e405228fc505e7fa3cb5f
|
[] |
no_license
|
DongyuanZhou/MA615-Brand-Data-Analysis
|
ed28fb8bf0f71c82a9bde228ef66bd69da1de97a
|
4b118d78d7524f3fa867a56f0ba213e916342cbc
|
refs/heads/master
| 2021-09-07T12:20:42.191117
| 2018-02-22T20:48:19
| 2018-02-22T20:48:19
| 114,302,528
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,345
|
r
|
Wordcloud.R
|
library(plyr)
library(tidytext)
library(tidyverse)
library(twitteR)
library(stringr)
library(wordcloud)
library(reshape2)
library(RgoogleMaps)
library(ggmap)
library(ggplot2)
library(maptools)
library(sp)
library(tm)
library(NLP)
library(devtools)
library(streamR)
library(RCurl)
library(dplyr)
library(ROAuth)
library(graphTweets)
library(igraph)
library(readr)
library(leaflet)
library(rgdal)
library(SnowballC)
######################
##### Word cloud #####
######################
## Get data from twitter
sb.df <- read.csv("sb_text.csv", header = TRUE, sep = ",",stringsAsFactors = FALSE)
dd.df <- read.csv("dd_text.csv", header = TRUE, sep = ",",stringsAsFactors = FALSE)
## Stop words
data(stop_words)
my_stop_word <- data.frame(word=character(9))
my_stop_word$word <- c("star","bucks","starbucks","dunkin","donuts","dunkindonuts","https","rt","ed")
## word cloud for starbucks
## single word
sb.word <- sb.df %>%
group_by(id) %>%
unnest_tokens(word,text)%>%
anti_join(stop_words)%>%
anti_join(my_stop_word)%>%
filter(str_detect(word, "^[a-z']+$"))%>%
ungroup()
sb.word.freq <- sb.word %>% count(word,sort=TRUE)
sb.word.freq %>% with(wordcloud(word, n, max.words = 50,colors=brewer.pal(n=8, "Dark2"),random.order=FALSE,rot.per=0.35))
## bigrams
sb.bigrams <- sb.df%>%
group_by(id)%>%
unnest_tokens(bigram, text, token = "ngrams", n = 2)%>%
ungroup()
sb.bigrams.freq <- sb.bigrams%>%
count(bigram, sort = TRUE)
sb.bigrams.seperated <- sb.bigrams.freq%>%
separate(bigram, c("word1", "word2"), sep = " ")
sb.bigrams.filtered <- sb.bigrams.seperated %>%
filter(!word1 %in% stop_words$word) %>%
filter(!word2 %in% stop_words$word) %>%
filter(!word1 %in% my_stop_word$word) %>%
filter(!word2 %in% my_stop_word$word)%>%
filter(str_detect(word1, "^[a-z']+$"))%>%
filter(str_detect(word1, "^[a-z']+$"))
sb.bigrams.united <- sb.bigrams.filtered %>%
unite(bigram, word1, word2, sep = " ")
sb.bigrams.united %>%with(wordcloud(bigram, n, max.words = 50,colors=brewer.pal(8, "Dark2"),random.order=FALSE,rot.per=0.35))
## word cloud for dunkin donuts
## single word
dd.word <- dd.df %>%
group_by(id) %>%
unnest_tokens(word,text)%>%
anti_join(stop_words)%>%
anti_join(my_stop_word)%>%
filter(str_detect(word, "^[a-z']+$"))%>%
ungroup()
dd.word.freq <- dd.word %>% count(word,sort=TRUE)
dd.word.freq %>% with(wordcloud(word, n, max.words = 50,colors=brewer.pal(n=8, "Dark2"),random.order=FALSE,rot.per=0.35))
## bigrams
dd.bigrams <- dd.df%>%
group_by(id)%>%
unnest_tokens(bigram, text, token = "ngrams", n = 2)%>%
ungroup()
dd.bigrams.freq <- dd.bigrams%>%
count(bigram, sort = TRUE)
dd.bigrams.seperated <- dd.bigrams.freq%>%
separate(bigram, c("word1", "word2"), sep = " ")
dd.bigrams.filtered <- dd.bigrams.seperated %>%
filter(!word1 %in% stop_words$word) %>%
filter(!word2 %in% stop_words$word) %>%
filter(!word1 %in% my_stop_word$word) %>%
filter(!word2 %in% my_stop_word$word)%>%
filter(str_detect(word1, "^[a-z']+$"))%>%
filter(str_detect(word1, "^[a-z']+$"))
dd.bigrams.united <- dd.bigrams.filtered %>%
unite(bigram, word1, word2, sep = " ")
dd.bigrams.united %>% with(wordcloud(bigram, n, max.words = 50,colors=brewer.pal(8, "Dark2"),random.order=FALSE,rot.per=0.35))
|
4b862de1a959c93a67f5e83b19b432a050ef2f99
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/dlnm/examples/ps.Rd.R
|
18d987277c14e5fd05feaa1ae240d14589ed16cf
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 155
|
r
|
ps.Rd.R
|
library(dlnm)
### Name: ps
### Title: Generate a Basis Matrix for P-Splines
### Aliases: ps
### Keywords: smooth
### ** Examples
# to be added soon
|
b1855113077a9352eefd4297df85dfce6b313a20
|
abee2469068f2bbb363992fee86079042ccfbd71
|
/processing/prot_csv/Trial_1/plot.R
|
2acb9d4bac463bc518e044b3e9ef6dd92869e124
|
[] |
no_license
|
naoki709mm/thesis
|
5fe5bfa8ee02a9bfc604ad7130f6c4220e4de29b
|
8bd9cd03173f5935f5a266d99f85a64228bd40a6
|
refs/heads/master
| 2020-04-06T22:13:16.132169
| 2020-02-04T09:24:00
| 2020-02-04T09:24:00
| 157,828,461
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 278
|
r
|
plot.R
|
setwd("./")
csv_list <- list.files(pattern = "*.csv")
data_name <- c("time","dis","pre")
data <- read.csv(csv_list[13])
names(data) <- data_name
a=30000/4
par(mfcol=c(4,1))
for(i in 1:4){
plot(data$time,data$pre,xlim=c(8000+(i-1)*a,8000+i*a),ylim=c(400,1024),type="l")
}
|
1cf5a93913a654efc9f4fbf6df09833219c88685
|
57fb6ec71a43ff3181219c26e1f8886f961c5f71
|
/man/plotratetable.Rd
|
7bc60092bb58c5dd860965be5263401004daaf19
|
[] |
no_license
|
cran/cohorttools
|
bfcac04f739e8af036f5d8dd6a7bdacda6a8d554
|
0029bdd514ac9de75ee44d36c1333fe4f15ec1b4
|
refs/heads/master
| 2022-12-05T12:56:49.832995
| 2022-11-23T11:00:04
| 2022-11-23T11:00:04
| 218,080,722
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 732
|
rd
|
plotratetable.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cohorttools.R
\name{plotratetable}
\alias{plotratetable}
\title{Function makes plot(s) from ratetable}
\usage{
plotratetable(rt, RR = FALSE)
}
\arguments{
\item{rt}{Rate table produced by function mkratetable}
\item{RR}{Boolean, if TRUE rate ratios plotted}
}
\value{
ggplot object, or list if multiple variables in rate table
}
\description{
Function makes plot(s) from ratetable
}
\examples{
library(ggplot2)
library(survival)
tmp.lt1<-mkratetable(Surv(time,status)~ ph.ecog,data=lung,add.RR = FALSE)
plotratetable(tmp.lt1)
tmp.lt2<-mkratetable(Surv(time,status)~ sex+ph.ecog+cut(age,4),data=lung,add.RR=TRUE,lowest.N=1)
plotratetable(tmp.lt2,TRUE)
}
|
d2c13d723956e4a3a5a29c61be67dafd54c0c991
|
d202001d19455376b5651fd487c1dab09c786c29
|
/plot4.R
|
d7254bc6748c80de54b7093e1963a51fa5961e68
|
[] |
no_license
|
xinpingluo/ExData_Plotting1
|
0b88ada91e1e7805dcedda1ab4649162dbbc4056
|
ad4d36190989efe184da17f1b817ae11ce9d5acc
|
refs/heads/master
| 2021-01-10T21:23:15.343275
| 2014-11-06T15:58:24
| 2014-11-06T15:58:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,310
|
r
|
plot4.R
|
# load data and headers
setwd("C:/Users/DE-77691/Documents/1_Work/Tasks/Coursera/Lec4/W1/")
household_power_consumption <- read.table('household_power_consumption.txt', header=T, sep=';', skip=66637, nrows=2880)
header <- read.table('household_power_consumption.txt', header=T, sep=';', nrows=1)
colnames(household_power_consumption) <- colnames(header)
#create timestamp
data$Timestamp <- strptime(paste(data$Date, data$Time), "%d/%m/%Y %H:%M:%S")
png(filename="plot4.png")
# set 2x2 plot
par(mfrow=c(2,2))
#create line plot for top left
plot(data$Timestamp, data$Global_active_power, xlab="", ylab="Global Active Power", type='l')
#create line plot for top right
plot(data$Timestamp, data$Voltage, xlab="datetime", ylab="Voltage", type='l')
#create plot for bottom left with 3 lines and legend for this plot
plot(data$Timestamp, data$Sub_metering_1, xlab="", ylab="Energy sub metering", type='l')
lines(data$Timestamp, data$Sub_metering_2, col='red')
lines(data$Timestamp, data$Sub_metering_3, col='blue')
legend("topright", col=c('black', 'red', 'blue'), legend=c('Sub_metering_1', 'Sub_metering_2', 'Sub_metering_3'), lty=1)
#create plot for bottom right
plot(data$Timestamp, data$Global_reactive_power, xlab="datetime", ylab="Global_reactive_power", type='l')
dev.off()
|
b8a0bd96bd69b6ef6ff7b2f1bf28d1a45062ab59
|
36ffd73da2aa9f5e8f2cc0c327ae0ec273612137
|
/man/esri2sf.Rd
|
b7f74faaab4cd683cd968eee6652fdacdf32ea20
|
[] |
no_license
|
Memo1986/esri2sf
|
ba2d50cad08f91e0eb53450669d7ca6b92da4c40
|
d57a71a0729e63926ca9bf032c7ec9dc9f0f41d5
|
refs/heads/master
| 2023-08-02T20:17:35.225796
| 2021-09-27T02:55:36
| 2021-09-27T02:55:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,901
|
rd
|
esri2sf.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/esri2sf.R
\name{esri2sf}
\alias{esri2sf}
\alias{esri2df}
\alias{esrimeta}
\title{Import data from ESRI's ArcGIS Server}
\usage{
esri2sf(
url,
outFields = c("*"),
where = "1=1",
bbox = NULL,
token = "",
geomType = NULL,
crs = 4326,
progress = FALSE,
...
)
esri2df(
url,
outFields = c("*"),
where = "1=1",
token = "",
progress = FALSE,
...
)
esrimeta(url, token = "", fields = FALSE)
}
\arguments{
\item{url}{character string for service url, e.g. \url{https://sampleserver1.arcgisonline.com/ArcGIS/rest/services/Demographics/ESRI_Census_USA/MapServer/}.}
\item{outFields}{vector of fields you want to include. default is '*' for all fields".}
\item{where}{string for where condition. Default is \code{1=1} for all rows.}
\item{bbox}{bbox class object from \code{\link[sf:st_bbox]{sf::st_bbox()}}.}
\item{token}{string for authentication token (if needed).}
\item{geomType}{string specifying the layer geometry ('esriGeometryPolygon' or 'esriGeometryPoint' or 'esriGeometryPolyline' - if \code{NULL}, will try to be inferred from the server)}
\item{crs}{coordinate reference system (see \code{\link[sf:sf]{sf::st_sf()}}). Should either be NULL or a CRS that can be handled by GDAL through sf::st_sf(). Default is 4326. NULL returns the feature in the same CRS that the layer is hosted as in the Feature/Map Server.}
\item{progress}{Show progress bar with \code{\link[pbapply:pbapply]{pbapply::pblapply()}} if TRUE. Default FALSE.}
\item{...}{additional named parameters to pass to the query. ex) "resultRecordCount = 3"}
\item{fields}{\code{esrimeta} returns dataframe with fields if TRUE. Default FALSE.}
}
\value{
sf dataframe (\code{esri2sf}) or tibble dataframe (\code{esri2df}) or list or dataframe (\code{esrimeta}).
}
\description{
These functions are the interface to the user.
}
\section{Functions}{
\itemize{
\item \code{esri2sf}: Retrieve spatial object
\item \code{esri2df}: Retrieve table object (no spatial data).
\item \code{esrimeta}: Retrieve layer metadata
}}
\note{
When accessing services with multiple layers, the layer number must be
specified at the end of the service url (e.g.,
\url{https://sampleserver1.arcgisonline.com/ArcGIS/rest/services/Demographics/ESRI_Census_USA/MapServer/3}).
#' The list of layers and their respective id numbers can be found by viewing
the service's url in a web browser and viewing the "Layers" heading (e.g.,
\url{https://sampleserver1.arcgisonline.com/ArcGIS/rest/services/Demographics/ESRI_Census_USA/MapServer/#mapLayerList}).
}
\examples{
baseURL <- "https://sampleserver1.arcgisonline.com/ArcGIS/rest/"
url <- paste0(baseURL, "services/Demographics/ESRI_Census_USA/MapServer/3")
outFields <- c("POP2007", "POP2000")
where <- "STATE_NAME = 'Michigan'"
df <- esri2sf(url, outFields = outFields, where = where)
plot(df)
}
|
d9cd20e9550b1021ddd3f90645caab74f32ddf93
|
7df184a0ef265bb144ebf912f9348788f866fe4c
|
/man/draw.ipca.Rd
|
0b22a90243b84249d5f9d088926b735ae2c16264
|
[] |
no_license
|
DATAUNIRIO/BETS
|
43b60119c1cfc676c807696fa98dc5d943f44aa8
|
9fe70387d699d8db6878b5dd99dd13ecc2f94752
|
refs/heads/master
| 2021-01-21T20:23:54.349996
| 2017-05-23T01:57:25
| 2017-05-23T01:57:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 655
|
rd
|
draw.ipca.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/draw.ipca.R
\name{draw.ipca}
\alias{draw.ipca}
\title{Create a chart of the National Consumer Price Index time series}
\usage{
draw.ipca()
}
\arguments{
\item{start}{A \code{character}. The stating period of the series.}
\item{ylim}{A \code{numeric vector}. Y axis limits.}
\item{xlim}{A \code{numeric vector}. x axis limits.}
}
\value{
An image file is saved in the 'graphs' folder, under the BETS installation directory.
}
\description{
Creates a plot of series 13522 (NCPI), along with series 4466 (NCPI core)
}
\author{
Talitha Speranza \email{talitha.speranza@fgv.br}
}
|
f7e7f6f576b439f9c46593a06bb60d9e48d508c6
|
37800c21f243e1a19e856ea8b7f14ee2744573fc
|
/fevd.R
|
7f6c90f5b7cd24437f76f76984f3d09eb155e8d9
|
[] |
no_license
|
KlayLee/Wavelet-DY
|
a174f2c79ef759953459315e595022ee77b63df4
|
dfbf62c601668f352fa1951e50d0baf7b6c46271
|
refs/heads/main
| 2023-06-02T16:28:35.227350
| 2021-06-28T07:38:35
| 2021-06-28T07:38:35
| 380,890,493
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,591
|
r
|
fevd.R
|
irf <- function(est, n.ahead, ...) {
if (class(est) %in% c("varest", "vec2var")) {
# cat("The model is from the vars package, using irf function from there.")
return(vars::irf(est, n.ahead = n.ahead, boot = F, ortho = F))
} else if (class(est)=="BigVAR.results") {
# cat("The model is from BigVAR package, using own irf function.")
return(irf.bigvar(est, n.ahead = n.ahead))
} else {
stop("Unsupported class of estimate")
}
}
#' @import methods
methods::setMethod("residuals", signature(object = "BigVAR.results"), function(object) {
object@resids
})
irf.bigvar <- function(est, n.ahead) {
B <- est@betaPred
H <- n.ahead
p <- est@lagmax
k <- nrow(B)
# Remove the constants
B <- B[,-1]
betas <- lapply(1:p, function(i) B[,1:k + (i-1)*k])
lags_obs <- c( lapply(1:(p-1), function(i) matrix(0, nrow = k, ncol = k)),
list(diag(k)),
lapply(1:H, function(i) matrix(0, nrow = k, ncol = k)))
for (i in 1:H) {
for (j in 1:p) {
lags_obs[[p+i]] <- t(betas[[j]])%*%lags_obs[[p+i-j]] + lags_obs[[p+i]]
}
}
lags_obs <- lags_obs[p:length(lags_obs)]
return(list(irf = lapply(1:k, function(j) t(sapply(lags_obs, function(i) i[j,])))))
}
#' Compute a forecast error vector decomposition in recursive identification scheme
#'
#' This function computes the standard forecast error vector decomposition given the
#' estimate of the VAR.
#'
#' @param est the VAR estimate from the vars package
#' @param n.ahead how many periods ahead should be taken into account
#' @param no.corr boolean if the off-diagonal elements should be set to 0.
#' @return a matrix that corresponds to contribution of ith variable to jth variance of forecast
#' @export
#' @author Tomas Krehlik \email{tomas.krehlik@@gmail.com}
#' @import vars
#' @import urca
#' @import stats
fevd <- function(est, n.ahead = 100, no.corr = F) {
# Get the unorthogonalized impulse responses (essentially Wold decomposition
# coefficients thats why the name Phi.)
Phi <- irf(est, n.ahead = n.ahead, boot = F, ortho = F)
# Extract them from standard format
Phi <- lapply(1:(n.ahead + 1), function(j) sapply(Phi$irf, function(i) i[j,]))
# Estimate the covariance matrix of the residuals
Sigma <- t(residuals(est)) %*% residuals(est)/nrow(residuals(est))
# Eliminate the off-diagonal elements of the covariance matrix to only
# see the effects of the coefficients
# This is primarily useful for Lasso.
if (no.corr) {
Sigma <- diag(diag(Sigma))
}
# Estimate the denominator of the ration of FEVD.
denom <- diag(Reduce('+', lapply(Phi, function(i) i%*%Sigma%*%t(i))))
# This computes the enumerator, essentially compute all the elements of the
# sum and then reduce them using the sum operator.
enum <- Reduce('+', lapply(Phi, function(i)
( chol(Sigma) %*% t(i) )^2
)
)
# Compute the ration and return the matrix.
return(
t(
sapply(1:ncol(enum), function(i) enum[,i]/denom[i] )
)
)
}
#' Compute a FFT transform of forecast error vector decomposition in recursive identification scheme
#'
#' This function computes the decomposition of standard forecast error vector decomposition given the
#' estimate of the VAR. The decomposition is done according to the Stiassny (1996)
#'
#' @param est the VAR estimate from the vars package
#' @param n.ahead how many periods ahead should be taken into account
#' @param no.corr boolean if the off-diagonal elements should be set to 0.
#' @param range defines the frequency partitions to which the spillover should be decomposed
#' @return a list of matrices that corresponds to contribution of ith variable to jth variance of forecast
#'
#' @export
#' @author Tomas Krehlik \email{tomas.krehlik@@gmail.com}
#' @import vars
#' @import urca
#' @import stats
fftFEVD <- function(est, n.ahead = 100, no.corr = F, range) {
# Warn if the n.ahead is too low.
if (n.ahead < 100) {
warning("The frequency decomposition works with unconditional IRF. You have opted for
IRF with horizon lower than 100 periods. This might cause trouble, some frequencies
might not be estimable depending on the bounds settings.")
}
# Get the unorthogonalized impulse responses (essentially Wold decomposition
# coefficients thats why the name Phi.)
Phi <- irf(est, n.ahead = n.ahead, boot = F, ortho = F)
# Get the Fourier transform of the impulse responses
fftir <- lapply(Phi$irf, function(i) apply(i, 2, fft))
# Transform them into shape we work with
fftir <- lapply(1:(n.ahead+1), function(j) sapply(fftir, function(i) i[j,]))
# Estimate the covariance matrix of the residuals
Sigma <- t(residuals(est))%*%residuals(est) / nrow(residuals(est))
# Eliminate the off-diagonal elements of the covariance matrix to only
# see the effects of the coefficients
# This is primarily useful for Lasso.
if (no.corr) {
Sigma <- diag(diag(Sigma))
}
# Here I compute the variance, and only include the frequencies mentioned
# in range. This is because of the co-integration setting, where I want to
# only standardize by variance from some frequency to 2*pi.
denom <- diag(
Re(
Reduce('+', lapply(fftir, function(i)
i %*% Sigma %*% t(Conj(i)) / (n.ahead + 1)
)[range])
)
)
# Compute the enumerator of the ration for every given frequency
enum <- lapply(fftir, function(i)
( abs( i %*% t(chol(Sigma)) ) )^2 / (n.ahead+1)
)
# Compute the whole table by division of the individual elements.
tab <- lapply(enum, function(i) t(sapply(1:nrow(i), function(j) i[j,]/(denom[j]))))
return(tab)
}
#' Compute a forecast error vector decomposition in generalised VAR scheme.
#'
#' This function computes the standard forecast error vector decomposition given the
#' estimate of the VAR.
#' There are common complaints and requests whether the computation is ok and why
#' it does not follow the original Pesaran Shin (1998) article. So let me clear two things
#' out. First, the \eqn{\sigma} in the equation on page 20 refers to elements of \eqn{\Sigma}, not standard
#' deviation. Second, the indexing is wrong, it should be \eqn{\sigma_jj} not \eqn{\sigma_ii}. Look, for example,
#' to Diebold and Yilmaz (2012) or ECB WP by Dees, Holly, Pesaran, and Smith (2007)
#' for the correct version.
#'
#' @param est the VAR estimate from the vars package
#' @param n.ahead how many periods ahead should be taken into account
#' @param no.corr boolean if the off-diagonal elements should be set to 0.
#' @return a matrix that corresponds to contribution of ith variable to jth variance of forecast
#'
#' @export
#' @author Tomas Krehlik \email{tomas.krehlik@@gmail.com}
#' @import vars
#' @import urca
#' @import stats
genFEVD <- function(est, n.ahead = 100, no.corr = F) {
# Get the unorthogonalized impulse responses (essentially Wold decomposition
# coefficients thats why the name Phi.)
Phi <- irf(est, n.ahead = n.ahead, boot = F, ortho = F)
# Extract them from standard format
Phi <- lapply(1:(n.ahead + 1), function(j) sapply(Phi$irf, function(i) i[j,]))
# Estimate the covariance matrix
Sigma <- t(residuals(est))%*%residuals(est) / nrow(residuals(est))
# Remove the individual elements, if needed.
if (no.corr) {
Sigma <- diag(diag(Sigma))
}
# Compute the variance for standardization
# One wants to do this before nullifying the elements, because otherwise
# the ratios could get weird in principle.
denom <- diag(
Reduce('+', lapply(Phi, function(i)
i %*% Sigma %*% t(i)
)
)
)
# Compute the enumerator of the ration, see for example D&Y 2012
enum <- Reduce('+', lapply(Phi, function(i) (i%*%Sigma)^2))
# Compute the elements of the FEVD
tab <- sapply(1:nrow(enum), function(j)
enum[j,] / ( denom[j] * diag(Sigma) )
)
# Standardize rows as they don't have to add up to one.
tab <- t(apply(tab, 2, function(i) i / sum(i) ))
return(tab)
}
#' Compute a DWT transform of forecast error vector decomposition in generalised VAR scheme.
#'
#' This function computes the decomposition of standard forecast error vector decomposition given the
#' estimate of the VAR. The decomposition is done according to the Stiassny (1996)
#'
#' @param est the VAR estimate from the vars package
#' @param n.ahead how many periods ahead should be taken into account
#' @param no.corr boolean if the off-diagonal elements should be set to 0.
#' @param range defines the frequency partitions to which the spillover should be decomposed
#' @return a list of matrices that corresponds to contribution of ith variable to jth variance of forecast
#'
#' @export
#' @author Tomas Krehlik \email{tomas.krehlik@@gmail.com}
#' @import vars
#' @import urca
#' @import stats
dwtGenFEVD <- function(est, n.ahead = 100, no.corr = F, range) {
# Warn if the n.ahead is too low.
if (n.ahead < 100) {
warning("The frequency decomposition works with unconditional IRF. You have opted for
IRF with horizon lower than 100 periods. This might cause trouble, some frequencies
might not be estimable depending on the bounds settings.")
}
# Get the unorthogonalized impulse responses (essentially Wold decomposition
# coefficients thats why the name Phi.)
Phi <- irf(est, n.ahead = n.ahead, boot = F, ortho = F)
# Get the Fourier transform of the impulse responses
dwtir <- lapply(Phi$irf, function(i) apply(i, 2, dwt))
# Transform them into shape we work with
dwtir <- lapply(1:(n.ahead+1), function(j) sapply(dwtir, function(i) i[j,]))
# Estimate the covariance matrix
Sigma <- t(residuals(est))%*%residuals(est) / nrow(residuals(est))
# Remove the individual elements, if needed.
if (no.corr) {
Sigma <- diag(diag(Sigma))
}
# Compute the variance for standardization
# One wants to do this before nullifying the elements, because otherwise
# the ratios could get weird in principle.
# Here I compute the variance, and only include the frequencies mentioned
# in range. This is because of the co-integration setting, where I want to
# only standardize by variance from some frequency to 2*pi.
denom <- diag(
Re(
Reduce('+', lapply(dwtir, function(i)
i %*% Sigma %*% t( Conj(i) ) / (n.ahead + 1)
)[range]
)
)
)
# Compute the enumerator of the equation
enum <- lapply(dwtir, function(i)
( abs( i %*% Sigma ) )^2 / (n.ahead+1)
)
# Compute the fevd table be dividing the individual elements
tab <- lapply(enum, function(i)
sapply(1:nrow(i), function(j)
i[j, ] / ( denom[j] * diag(Sigma) )
)
)
# Compute the totals over the range for standardization
tot <- apply(Reduce('+', tab[range]), 2, sum)
# Standardize so that it sums up to one row-wise
tab <- lapply(tab, function(i) t(i)/tot)
return(tab)
}
|
a556887b108e6f9f5d28e4e5213cee862955e904
|
6dde5e79e31f29db901c81e4286fea4fa6adbc48
|
/demo/growthreg.R
|
0e7cd5d7f3de0b5939fcb43a68e0ed7588f5773f
|
[] |
no_license
|
cran/fda
|
21b10e67f4edd97731a37848d103ccc0ef015f5a
|
68dfa29e2575fb45f84eb34497bb0e2bb795540f
|
refs/heads/master
| 2023-06-08T07:08:07.321404
| 2023-05-23T22:32:07
| 2023-05-23T22:32:07
| 17,696,014
| 23
| 19
| null | 2022-03-13T17:58:28
| 2014-03-13T04:40:29
|
R
|
UTF-8
|
R
| false
| false
| 4,383
|
r
|
growthreg.R
|
# ---------------------------------------------------------------------
# Register the velocity curves for the girls
# ---------------------------------------------------------------------
# load the data
load("growthfd")
hgtffdPar <- growthfd$hgtffdPar
hgtffd <- hgtffdPar$fd
age <- c( seq(1, 2, 0.25), seq(3, 8, 1), seq(8.5, 18, 0.5))
nage <- length(age)
ncasef <- 54
# set up the basis for function Wfd
rng <- c(1,18)
nbasisw <- 15
norder <- 5
basisw <- create.bspline.basis(rng, nbasisw, norder)
# set up the mean velocity curve as the preliminary target for
# registration
hgtfmeanfd <- mean(hgtffd)
y0fd <- deriv(hgtfmeanfd, 1)
# curves to be registered
yfd <- deriv(hgtffd, 1)
# set up functional parameter object for function Wfd
coef0 <- matrix(0,nbasisw,ncasef)
Wfd0 <- fd(coef0, basisw)
lambda <- 10
WfdPar <- fdPar(Wfd0, 2, lambda)
# register the data. It might be a good idea to disable
# buffered output in the Misc menu for the R Console in order
# to track progress of this fairly slow process.
reglist <- register.fd(y0fd, yfd, WfdPar)
yregfd <- reglist$regfd # registered curves
Wfd <- reglist$Wfd # functions defining warping functions
# evaluate the registered curves and warping functions
agefine <- seq(1, 18, len=101)
ymat <- eval.fd(agefine, yfd)
y0vec <- eval.fd(agefine, y0fd)
yregmat <- eval.fd(agefine, yregfd)
warpmat <- eval.monfd(agefine, Wfd)
warpmat <- 1 + 17*warpmat/(matrix(1,101,1)%*%warpmat[101,])
# plot the results for each girl:
# blue: unregistered curve
# red: target curve
# green: registered curve
par(mfrow=c(1,2),pty="s",ask=T)
for (i in 1:ncasef) {
plot (agefine, ymat[,i], type="l", ylim=c(0,20), col=4,
xlab="Year", ylab="Velocity", main=paste("Case",i))
lines(agefine, y0vec, lty=2, col=2)
lines(agefine, yregmat[,i], col=3)
plot (agefine, warpmat[,i], type="l",
xlab="Clock year", ylab="Biological Year")
abline(0,1,lty=2)
}
# Comments: we see that not all curves are properly registered.
# Curves 7, 11, 13 and 25, to mention a few, are so far from
# the target that the registration is unsuccessful. This
# argues for a preliminary landmark registration of the
# velocity curves prior to the continuous registration
# process. However, we will see some improvement below.
# compute the new mean curve as a target
y0fd2 <- mean(yregfd)
# plot the unregistered mean and the registered mean
par(mfrow=c(1,1),pty="s",ask=F)
plot(y0fd2, col=4, xlab="Year", ylab="Mean Velocity")
lines(y0fd, col=3)
legend(10,15, c("Registered", "Unregistered"), lty=c(1,1), col=c(4,3))
# Comment: The new mean has a sharper peak at the pubertal
# growth spurt, which is what we wanted to achieve.
# define the registered curves and the new curves to be registered
yfd2 <- yregfd
# register the curves again, this time to a better target
reglist2 <- register.fd(y0fd2, yfd2, WfdPar)
yregfd2 <- reglist2$regfd # registered curves
Wfd2 <- reglist2$Wfd # functions defining warping functions
y0vec2 <- eval.fd(agefine, y0fd2)
yregmat2 <- eval.fd(agefine, yregfd2)
warpmat2 <- eval.monfd(agefine, Wfd2)
warpmat2 <- 1 + 17*warpmat2/(matrix(1,101,1)%*%warpmat2[101,])
# plot the results for each girl:
# blue: unregistered curve
# red: target curve
# green: registered curve
par(mfrow=c(1,2),pty="s",ask=T)
for (i in 1:ncasef) {
# plot velocity curves
plot (agefine, ymat[,i], type="l", ylim=c(0,20), col=4,
xlab="Year", ylab="Velocity", main=paste("Case",i))
lines(agefine, y0vec2, lty=2, col=2)
lines(agefine, yregmat[,i], col=3, lty=3)
lines(agefine, yregmat2[,i], col=3)
# plot warping functions
plot (agefine, warpmat2[,i], type="l",
xlab="Clock year", ylab="Biological Year")
abline(0,1,lty=2)
}
# compute the new mean curve as a target
y0fd3 <- mean(yregfd2)
# plot the unregistered mean and the registered mean
par(mfrow=c(1,1),pty="s",ask=F)
plot(y0fd3, col=4, xlab="Year", ylab="Mean Velocity")
lines(y0fd2, col=3)
lines(y0fd, col=3, lty=3)
legend(10,15, c("Registered twice", "Registered once", "Unregistered"),
lty=c(1,1,3), col=c(4,3,3))
# Comment: The second round of registered made hardly any
# difference for either the individual curves or the mean curve.
|
9e442cc1cf9a6e14b55bd7143a89a873ea6f6a00
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/RSocrata/examples/write.socrata.Rd.R
|
043d52a207fa6f0d438b54562da097c6e4e4f6c3
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 623
|
r
|
write.socrata.Rd.R
|
library(RSocrata)
### Name: write.socrata
### Title: Write to a Socrata dataset (full replace or upsert)
### Aliases: write.socrata
### ** Examples
# Store user email and password
socrataEmail <- Sys.getenv("SOCRATA_EMAIL", "mark.silverberg+soda.demo@socrata.com")
socrataPassword <- Sys.getenv("SOCRATA_PASSWORD", "7vFDsGFDUG")
datasetToAddToUrl <- "https://soda.demo.socrata.com/resource/xh6g-yugi.json" # dataset
# Generate some data
x <- sample(-1000:1000, 1)
y <- sample(-1000:1000, 1)
df_in <- data.frame(x,y)
# Upload to Socrata
write.socrata(df_in,datasetToAddToUrl,"UPSERT",socrataEmail,socrataPassword)
|
543f787153c31b4122a373ec1fd53d06461a21bb
|
89809ddebb058648cef55bc925b2c2da92d0ae7f
|
/R/validate_and_errors_methods.R
|
dfd237f34f12b81f9b4dc53b319895003cba917f
|
[
"MIT"
] |
permissive
|
calejero/RswissknifeDB
|
62dc797a9521c4d40c7433cfa899e61a56126c42
|
63902f2ed9cf2459efa7d5e60d126964cfe90244
|
refs/heads/master
| 2020-12-04T12:57:02.417850
| 2020-06-04T20:12:12
| 2020-06-04T20:12:12
| 231,773,424
| 1
| 0
|
MIT
| 2020-06-04T16:30:26
| 2020-01-04T14:05:24
|
R
|
UTF-8
|
R
| false
| false
| 1,100
|
r
|
validate_and_errors_methods.R
|
ManagementDBOperationsErrors <- function(type, connection) {
message(paste("===================================================="))
message(paste("Wrong operation:", type))
message(paste("Connection name:", connection))
message(paste("===================================================="))
}
ValidateSlaveQuery <- function(query) {
patron.c <- c("create", "truncate", "drop", "insert", "optimize", "update", "delete")
if (TRUE %in% stringr::str_detect(tolower(query), patron.c)) {
return(FALSE)
} else {
return(TRUE)
}
}
ValidateInsertColumns <- function(data.df, db.table, db.name) {
query.df <- SelectByParamsDB("SELECT * FROM @param1", db.table, db.name)
query.df <- query.df[, colnames(query.df) %in% colnames(data.df), ]
result.df = data.frame(query = unlist(lapply(query.df, class)),
data = unlist(lapply(data.df, class)),
stringsAsFactors = FALSE)
result.df$flag <- ifelse(result.df$query == result.df$data, 1, 0)
if (min(result.df$flag) == 0) {
return(FALSE)
}
else{
return(TRUE)
}
}
|
0780cfb9b74c39d4ab0977f36c3428c972a79886
|
307b0f73161701e48e24192aea10713c4c76db13
|
/R/transform-lancet.R
|
ec4624f75a5a8051c9a7d800ce9121baff0f707f
|
[] |
no_license
|
spgarbet/tangram
|
aef70355a5aa28cc39015bb270a7a5fd9ab4333c
|
bd3fc4b47018ba47982f2cfbe25b0b93d1023d4f
|
refs/heads/master
| 2023-02-21T03:07:43.695509
| 2023-02-09T17:47:22
| 2023-02-09T17:47:22
| 65,498,245
| 58
| 3
| null | 2020-03-24T15:28:05
| 2016-08-11T20:07:01
|
R
|
UTF-8
|
R
| false
| false
| 1,663
|
r
|
transform-lancet.R
|
# tangram a general purpose table toolkit for R
# Copyright (C) 2017-2018 Shawn Garbett
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#' Style Bundle for Lancet style
#'
#' List of lists, should contain a "Type" entry with a function to determine type of vector passed in.
#' Next entries are keyed off returned types from function, and represent the type of a row.
#' The returned list should contain the same list of types, and represents the type of a column. Thus it now returns
#' a function to process the intersection of those two types.
#'
#' @include cell-lancet.R
#' @keywords data
#' @export
#'
lancet <- list(
Type = hmisc_data_type,
Numerical = list(
Numerical = summarize_spearman,
Categorical = summarize_kruskal_horz
),
Categorical = list(
Numerical = summarize_kruskal_vert,
Categorical = summarize_chisq
),
Cell = lancet_cell,
Footnote = "N is the number of non-missing value. ^1^Kruskal-Wallis. ^2^Pearson. ^3^Wilcoxon."
)
|
f840b0c422d8373e2d8f2a5422318a6e0bf651f7
|
f8adc1feb8c2c0893090fa8cd80cfcd6ad984c24
|
/man/wtc.sig.Rd
|
6b3b4bb1cc2504a23271943f0363a2173c447cb3
|
[] |
no_license
|
wafels/biwavelet
|
5eb0770613d90bf26d0161002c1d9ad3ae076f23
|
c551b36852d2c14e216f15b5a427996afcd1444c
|
refs/heads/master
| 2021-01-18T16:52:01.055064
| 2014-09-10T07:47:37
| 2014-09-10T07:47:37
| 24,968,435
| 0
| 0
| null | 2016-09-09T15:17:46
| 2014-10-09T01:27:27
| null |
UTF-8
|
R
| false
| false
| 2,603
|
rd
|
wtc.sig.Rd
|
\name{wtc.sig}
\alias{wtc.sig}
\title{
Determine significance of wavelet coherence
}
\description{
Determine significance of wavelet coherence
}
\usage{
wtc.sig (nrands = 300, lag1, dt, ntimesteps, pad = TRUE, dj = 1/12, s0, J1,
max.scale=NULL, mother = c("morlet", "paul", "dog"), sig.level = 0.95,
quiet = FALSE)
}
\arguments{
\item{nrands}{
number of Monte Carlo randomizations. Default is 300.
}
\item{lag1}{
vector containing the AR(1) coefficient of each time series.
}
\item{dt}{
length of a time step.
}
\item{ntimesteps}{
number of time steps in time series.
}
\item{pad}{
pad the values will with zeros to increase the speed of the transform. Default is \code{TRUE}.
}
\item{dj}{
spacing between successive scales. Default is 1/12.
}
\item{s0}{
smallest scale of the wavelet. Default is \code{2*dt}
}
\item{J1}{
number of scales - 1.
}
\item{max.scale}{ maximum scale }
\item{mother}{
type of mother wavelet function to use. Can be set to \code{morlet}, \code{dog},
or \code{paul}. Default is \code{morlet}. Significance testing is only available
for \code{morlet} wavelet.
}
\item{sig.level}{ significance level to compute. Default is \code{0.95} }
\item{quiet}{ Do not display progress bar. Default is \code{FALSE} }
}
\value{
Returns significance matrix containing the \code{sig.level} percentile of wavelet
coherence at each time step and scale.
}
\references{
Cazelles, B., M. Chavez, D. Berteaux, F. Menard, J. O. Vik, S. Jenouvrier, and
N. C. Stenseth. 2008. Wavelet analysis of ecological time series.
\emph{Oecologia} 156:287-304.
Grinsted, A., J. C. Moore, and S. Jevrejeva. 2004. Application of the cross
wavelet transform and wavelet coherence to geophysical time series.
\emph{Nonlinear Processes in Geophysics} 11:561-566.
Torrence, C., and G. P. Compo. 1998. A Practical Guide to Wavelet Analysis.
\emph{Bulletin of the American Meteorological Society} 79:61-78.
Torrence, C., and P. J. Webster. 1998. The annual cycle of persistence in the
El Nino/Southern Oscillation.
\emph{Quarterly Journal of the Royal Meteorological Society} 124:1985-2004.
}
\note{
The Monte Carlo randomizations can be extremely slow for large datasets.
For instance, 1000 randomizations of a dataset consisting of 1000 samples will
take ~30 minutes on a 2.66 GHz dual-core Xeon processor.
}
\author{
Tarik C. Gouhier (tarik.gouhier@gmail.com)
Code based on WTC MATLAB package written by Aslak Grinsted.
}
\examples{
## Not run: wtcsig=wtc.sig(nrands, lag1 = c(d1.ar1, d2.ar1), dt,
## pad, dj, J1, s0, mother = morlet)
}
|
0bb07121bd8d7c818a607909104fb84a3ec96a48
|
0f81dbed2957a99be8c0907ef956534343a2c5f7
|
/Step17_HiC_integration_TAD.r
|
19e94c5f219563dcc41699c7db3db473ec294e8a
|
[] |
no_license
|
NoonanLab/Geller_et_al_Enhancer_Screen
|
ffd10a9581dae946c03810e1b3625552ff3df90d
|
2e00efe9243cad10662fc787cab1af74ccdbaf28
|
refs/heads/master
| 2020-09-01T21:49:07.754006
| 2019-12-02T17:47:53
| 2019-12-02T17:47:53
| 219,067,401
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,576
|
r
|
Step17_HiC_integration_TAD.r
|
library(GenomicInteractions)
library(rtracklayer)
library(GenomicFeatures)
library(ggbio)
#import topolically-associating domains (TAD) derived from 50-kilobase contact frequency matrix and idenified from chromatin interactions generated in human neural precursor cells (NPC)
hNSC_50kb_tad <- import("/Users/evangeller/Desktop/enhancer_analysis/hic_analysis/NPC_50kb_TAD.bed")
#import biological effects generated by genetic disruption of genes and conserved regions within cortical enhancers active in human neural stem cells
S0X_exp <- makeGRangesFromDataFrame(read.table("/Users/evangeller/Desktop/enhancer_analysis/mageck/S0X_mageck_annotate_classified_PCA.txt", header= TRUE), keep.extra.columns= TRUE)
S0X_phenotype <- S0X_exp[(S0X_exp$t12.phenotype == "negative") | (S0X_exp$t12.phenotype == "positive"),]
#import gencode annotation of genes
gencode <- import.gff("/Users/evangeller/Desktop/enhancer_analysis/bed/gencode.v19.annotation.gtf")
gencode_gene <- gencode[gencode$type == "gene"]
gencode_gene$id <- gencode_gene$gene_name
gencode_gene_phenotype <- gencode_gene[gencode_gene$gene_name %in% S0X_phenotype$name,]
expandRange = function(x, upstream=10000, downstream=10000) {
strand_is_minus = strand(x) == "-"
on_plus = which(!strand_is_minus)
on_minus = which(strand_is_minus)
start(x)[on_plus] = start(x)[on_plus] - upstream
start(x)[on_minus] = start(x)[on_minus] - downstream
end(x)[on_plus] = end(x)[on_plus] + downstream
end(x)[on_minus] = end(x)[on_minus] + upstream
x
}
#extend gencode annotations
gencode_gene_phenotype_promoter_gene <- expandRange(gencode_gene_phenotype)
#import enhancer-level summary of proliferation-altering regions
enhancer_set <- makeGRangesFromDataFrame(read.table( "/Users/evangeller/Desktop/enhancer_analysis/mageck/S0X_enhancer_annotate_obs.txt", header= TRUE), keep.extra.columns= TRUE)
enhancer_set$id <- as.character(granges(enhancer_set))
ac_set <- enhancer_set[enhancer_set$class == 'ac',]
ac_set_phenotype <- ac_set[ac_set$n.phenotypes.obs > 0,]
#extend H3K27ac enhancers
start(ac_set_phenotype) <- start(ac_set_phenotype) - 10000
end(ac_set_phenotype) <- end(ac_set_phenotype) + 10000
#select enhancers that overlap with a TAD identified in human neural precursor cells
S0X_enhancer_assign <- ac_set_phenotype[ac_set_phenotype %over% hNSC_50kb_tad,]
#associate proliferation-altering enhancers with proliferation-altering genes based on co-localization within TAD boundaries
results <- GRanges()
for(i in seq_along(S0X_enhancer_assign)){
local_domain <- hNSC_50kb_tad[hNSC_50kb_tad %over% S0X_enhancer_assign[i,]]
local_gene_phenotypes <- gencode_gene_phenotype_promoter_gene[gencode_gene_phenotype_promoter_gene %over% local_domain]
enhancer_gene_pair <- rep(S0X_enhancer_assign[i,], length(local_gene_phenotypes))
enhancer_gene_pair$gene_name <- local_gene_phenotypes$id
results <- c(results, enhancer_gene_pair)
}
#function association gene proliferation score with gene identified in enhancer-promoter chromatin interactions within TAD
enhancer_gene_PC1 <- function(enhancer) {
gene_id <- enhancer$gene_name
gene_phenotype <- S0X_phenotype[S0X_phenotype$name == gene_id,]
return(gene_phenotype$PC1)
}
#apply enhancer_gene_PC1 function to granges object of enhancer-promoter chromatin interactions within TAD
mcols(results)$gene.PC1 <- t(sapply(results, enhancer_gene_PC1))
output <- as.data.frame(results, row.names = NULL)
write.table(output, "/Users/evangeller/Desktop/enhancer_analysis/hic_analysis/NPC_TAD_enhancer_gene_interaction.txt", row.names=F, sep=" ", quote= F)
|
433baea802b6d201ef0481e3907d01b77ce03d04
|
f253279e925166c89fca4de7aaddf1fb1a23758e
|
/Docker_intro/toy-env/src/source-me.R
|
d8d26255a8b8bb35b388b09d8122f67a233ad479
|
[] |
no_license
|
azh3/PublicWiki
|
9cf4d258512ff6f721b32de349ae26e57d380965
|
6e17b589d1e49cd2283be300bed56e018fd1648c
|
refs/heads/master
| 2022-12-22T01:58:41.551414
| 2020-09-26T09:45:27
| 2020-09-26T09:45:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 604
|
r
|
source-me.R
|
# Load
f = "prim_forests_loss_area.csv"
d = read.csv(f, stringsAsFactor=F)
d$annual.rate = ((d$arealoss/d$areaforest)/18)*100
d = d[order(d$annual.rate),]
# Plot
png('defor.png',units="in", width=5, height=6, res=150)
barplot(height = d$annual.rate,
names.arg = d$fips,
beside = true, las = 2,
main = "Tropical primary forest loss (2000-2018)",
ylab = "Annual deforestation (%)",
border = "black", axes = TRUE)
dev.off()
# Write html file
fileConn<-file('/home/app/index.html')
writeLines('<h1>My deforestation analysis app</h1>/n<img src="src/defor.png" alt="">', fileConn)
close(fileConn)
|
a5282c5a2fff6120c94e88b8675909a92e2f8cf8
|
abbc2dbebacaeb8a0b2ad9f1a57df6507eeccb6c
|
/man/plot.pickapoint.Rd
|
2c47da65be2bcfc7b175eb231771635688b2bffe
|
[] |
no_license
|
cran/probemod
|
d0e55d49b60a05773793edd6acf1d1dac705c39a
|
6a4a8822cebbd7607c2ecdf0f07f79f3fb5cd579
|
refs/heads/master
| 2020-09-24T14:05:48.233543
| 2015-04-22T00:00:00
| 2015-04-22T00:00:00
| 31,644,598
| 2
| 2
| null | 2015-04-23T18:15:08
| 2015-03-04T07:44:55
|
R
|
UTF-8
|
R
| false
| false
| 1,283
|
rd
|
plot.pickapoint.Rd
|
% Generated by roxygen2 (4.1.0.9001): do not edit by hand
% Please edit documentation in R/plot.pickapoint.R
\name{plot.pickapoint}
\alias{plot.pickapoint}
\title{Plot Function For Pick-a-Point}
\usage{
\method{plot}{pickapoint}(x, xlab = "", ylab = "", xlim = 0, ylim = 0,
axlwd = 10, cesize = 1.2, cilwd = 5, \dots)
}
\arguments{
\item{x}{An object of class \code{"pickapoint"}.}
\item{xlab}{A title for the x axis (character).}
\item{ylab}{A title for the y axis (character).}
\item{xlim}{Coordinates range for x axis (numeric vector). Determined by the range of the given data by default.}
\item{ylim}{Coordinates range for y axis (numeric vector). Determined by the range of the given data by default.}
\item{axlwd}{Axis line width (numeric vector). \code{axlwd=10} by default.}
\item{cesize}{Size of the conditional effect marker (numeric vector). \code{cesize=1.2} by default.}
\item{cilwd}{Conditional interval line width (numeric vector). \code{cilwd=5} by default.}
\item{\dots}{Additional arguments (not supported yet).}
}
\value{
none
}
\description{
Plot function for objects of class \code{"pickapoint"}.
}
\examples{
\dontrun{
myModel <- lm('DV ~ IV + MOD', data=someData)
papresults <- pickapoint(myModel, dv='DV', iv='IV', mod='MOD')
plot(papresults)
}
}
|
1486baaaa08d44a14b86f201d7156aad64ddd300
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/prioritizrdata/tests/test_salt_data.R
|
60ea9d67668a4675cb5ef6c986b78d2b7ecbaeb6
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 169
|
r
|
test_salt_data.R
|
context("salt data")
test_that("salt_pu", {
expect_is(salt_pu, "RasterLayer")
})
test_that("salt_features", {
expect_is(salt_features, "RasterStack")
})
|
a7f62987472452e08863e81866a3dc00f7b2840b
|
31ea8595b1b023988c18875d71ce2a5202c5f3ea
|
/getdata/PA/run_analysis.R
|
0d02be0db3aae90a63343ed0b276ff3ddd338f98
|
[] |
no_license
|
datawrecker/datasciencecoursera
|
3fef8322c062442e2a8222e36bdf187462c295b3
|
ce1d0940fec6c0f4123d48b51a30598c24bbf074
|
refs/heads/master
| 2020-04-05T15:20:08.066152
| 2015-03-21T15:10:58
| 2015-03-21T15:10:58
| 31,636,947
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,433
|
r
|
run_analysis.R
|
# assume the file "getdata_projectfiles_UCI HAR Dataset.zip"
# already unzipped in the current directory
# get the indices and names for all features
features <- read.table("UCI HAR Dataset/features.txt", col.names=c("Seq", "Name"), stringsAsFactors=F)
# get the indices of mean for each measurement
meanIdx <- grep("mean\\(\\)", features$Name)
# get the indices of standard deviation for each measurement
stdIdx <- grep("std\\(\\)", features$Name)
# get the indices for all the measurements of interest
featureIdx <- sort(c(meanIdx, stdIdx))
# clean up the names of measurements of interest
featureNames <- features$Name[featureIdx]
featureNames <- gsub("-", "_", featureNames)
featureNames <- gsub("\\(", "", featureNames)
featureNames <- gsub("\\)", "", featureNames)
# load the "activity labels" data
activity_labels <- read.table("UCI HAR Dataset/activity_labels.txt", col.names=c("ActivityId", "ActivityLabel"))
# extract features of interest from training datasets
X_train <- read.table("UCI HAR Dataset/train/X_train.txt")[featureIdx]
names(X_train) <- featureNames
y_train <- read.table("UCI HAR Dataset/train/y_train.txt")
names(y_train) <- "ActivityId"
acts_train <- merge(y_train, activity_labels, sort=F)
subject_train <- read.table("UCI HAR Dataset/train/subject_train.txt")
names(subject_train) <- "Subject"
# form the training dataset with features of interest
trainSet <- cbind(acts_train, subject_train, X_train)
# extract features of interest from test datasets
X_test <- read.table("UCI HAR Dataset/test/X_test.txt")[featureIdx]
names(X_test) <- featureNames
y_test <- read.table("UCI HAR Dataset/test/y_test.txt")
names(y_test) <- "ActivityId"
acts_test <- merge(y_test, activity_labels, sort=F)
subject_test <- read.table("UCI HAR Dataset/test/subject_test.txt")
names(subject_test) <- "Subject"
# form the test dataset with features of interest
testSet <- cbind(acts_test, subject_test, X_test)
# form the whole dataset with features of interest
allSet <- rbind(trainSet, testSet)
# aggregate data for required tidy dataset
tidySet <- aggregate(allSet[, 4:length(names(allSet))], by=list(allSet$ActivityLabel, allSet$Subject), mean)
# make descriptive names for features of tidy dataset
tidyNames <- paste(featureNames, "Average", sep="_")
tidyNames <- c("Activity", "Subject", tidyNames)
names(tidySet) <- tidyNames
# persist the tidy dataset
write.table(tidySet, "har_tidy.txt", sep=",", row.name=F)
|
a0d1d3542f5c6398c7595d3baf2fc5da35d2b0c1
|
c4650d8acabab510903632cedd757cf065b69c08
|
/R/data_timevar.R
|
9b2a1887741082c33cccb3381624420d21d6dfd3
|
[] |
no_license
|
cran/jlctree
|
8fadac87addb6c76b3bec810b39afa2b9410f4bd
|
989833ffb197efadf1cd6244f08fadb9a113e1f0
|
refs/heads/master
| 2021-06-11T18:01:31.911604
| 2021-04-15T10:10:02
| 2021-04-15T10:10:02
| 163,344,486
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,291
|
r
|
data_timevar.R
|
#' A simulated dataset with time-varying longitudinal outcome and covariates.
#'
#' A simulated dataset with time-varying longitudinal outcome,
#' time-to-event, and time-varying covariates.
#' The dataset is already converted into left-truncated right-censored (LTRC) format,
#' so that the Cox model with time-varying longitudinal outcome as a covariate can be fit.
#' See, for example, Fu and Simonoff (2017).
#'
#' @format A data frame with 866 rows and 11 variables.
#' The variables are as follows:
#' \describe{
#' \item{ID}{subject identifier (1 - 500)}
#' \item{X1}{continuous covariate between 0 and 1; time-varying}
#' \item{X2}{continuous covariate between 0 and 1; time-varying}
#' \item{X3}{binary covariate; time-varying}
#' \item{X4}{continuous covariate between 0 and 1; time-varying}
#' \item{X5}{categorical covariate taking values from {1, 2, 3, 4, 5}; time-varying}
#' \item{time_L}{left-truncated time}
#' \item{time_Y}{right-censored time}
#' \item{delta}{censoring indicator, 1 if censored and 0 otherwise}
#' \item{y}{longitudinal outcome; time-varying}
#' \item{g}{true latent class identifier {1, 2, 3, 4}, which is determined by
#' the outcomes of \eqn{1\{X1 > 0.5\}} and \eqn{1\{X2 > 0.5\}}, with some noise}
#' }
#'
#' @examples
#' # The data for the first five subjects (ID = 1 - 5):
#' #
#' # ID X1 X2 X3 X4 X5 time_L time_Y delta y g
#' # 1 0.27 0.53 0 0.0 4 0.09251632 1.536030 0 -0.2191137 1
#' # 1 0.49 0.71 1 0.0 5 1.53603028 4.366769 1 0.6429496 2
#' # 2 0.37 0.68 1 0.4 4 0.44674406 1.203560 0 0.5473454 2
#' # 2 0.65 0.67 0 0.2 5 1.20355968 1.330767 1 1.5515773 4
#' # 3 0.57 0.38 0 0.2 4 0.82944637 1.267248 0 1.1410397 3
#' # 3 0.79 0.19 1 0.4 4 1.26724819 5.749602 1 1.0888787 3
#' # 4 0.91 0.95 0 0.9 1 0.81237396 1.807741 1 2.2105303 4
#' # 5 0.20 0.12 1 0.3 5 0.80510669 1.029981 0 -0.1167814 1
#' # 5 0.02 0.31 0 0.4 5 1.02998145 6.404183 1 -0.1747389 1
#' @docType data
#' @keywords data
#' @name data_timevar
#' @usage data(data_timevar)
#' @references Fu, W. and Simonoff, J. S. (2017). Survival trees for left-truncated and right-censored data, with application to time-varying covariate data. Biostatistics, 18(2), 352-369.
NULL
|
955bf5f22942a5cb991650af2623e147077435c9
|
62cfdb440c9f81b63514c9e545add414dc4d5f63
|
/R/qat_data_varcontent_ncdf.R
|
a7c67c5517cc5e4f03df973959d9087ab26c1c23
|
[] |
no_license
|
cran/qat
|
7155052a40947f6e45ba216e8fd64a9da2926be4
|
92975a7e642997eac7b514210423eba2e099680c
|
refs/heads/master
| 2020-04-15T16:53:45.041112
| 2016-07-24T01:26:59
| 2016-07-24T01:26:59
| 17,698,828
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 341
|
r
|
qat_data_varcontent_ncdf.R
|
qat_data_varcontent_ncdf <-
function(obj,numofvar) {
## functionality: give back content of a variable
## author: André Düsterhus
## date: 30.10.2012
## version: A0.2
## input: ncdf object, number of variable
## output: content of variable
# library("ncdf4")
var<-obj$var[[numofvar]]
varcontent<-ncvar_get(obj,var)
return(varcontent)
}
|
72d0e9919a9518f1ba215c6dd715ffe76c39fc16
|
76b12a776e78e262dd471ab19de1190ab62fa283
|
/man/rmult.bcl.Rd
|
7cb5090757e07590b578b2846657cdec8aadd02e
|
[] |
no_license
|
AnestisTouloumis/SimCorMultRes
|
2535dc58bfb198c2643ac37bb34cef85f9f7b4d3
|
79026562789e403431246635296460f2bd630199
|
refs/heads/master
| 2023-07-23T12:58:52.856192
| 2023-07-10T13:03:15
| 2023-07-10T13:03:15
| 94,232,274
| 7
| 4
| null | null | null | null |
UTF-8
|
R
| false
| true
| 6,175
|
rd
|
rmult.bcl.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rmult.bcl.R
\name{rmult.bcl}
\alias{rmult.bcl}
\title{Simulating Correlated Nominal Responses Conditional on a Marginal
Baseline-Category Logit Model Specification}
\usage{
rmult.bcl(clsize = clsize, ncategories = ncategories, betas = betas,
xformula = formula(xdata), xdata = parent.frame(),
cor.matrix = cor.matrix, rlatent = NULL)
}
\arguments{
\item{clsize}{integer indicating the common cluster size.}
\item{ncategories}{integer indicating the number of nominal response
categories.}
\item{betas}{numerical vector or matrix containing the value of the marginal
regression parameter vector.}
\item{xformula}{formula expression as in other marginal regression models
but without including a response variable.}
\item{xdata}{optional data frame containing the variables provided in
\code{xformula}.}
\item{cor.matrix}{matrix indicating the correlation matrix of the
multivariate normal distribution when the NORTA method is employed
(\code{rlatent = NULL}).}
\item{rlatent}{matrix with \code{(clsize * ncategories)} columns containing
realizations of the latent random vectors when the NORTA method is not
preferred. See details for more info.}
}
\value{
Returns a list that has components: \item{Ysim}{the simulated
nominal responses. Element (\eqn{i},\eqn{t}) represents the realization of
\eqn{Y_{it}}.} \item{simdata}{a data frame that includes the simulated
response variables (y), the covariates specified by \code{xformula},
subjects' identities (id) and the corresponding measurement occasions
(time).} \item{rlatent}{the latent random variables denoted by
\eqn{e^{NO}_{it}} in \cite{Touloumis (2016)}.}
}
\description{
Simulates correlated nominal responses assuming a baseline-category logit
model for the marginal probabilities.
}
\details{
The formulae are easier to read from either the Vignette or the Reference
Manual (both available
\href{https://CRAN.R-project.org/package=SimCorMultRes}{here}).
The assumed marginal baseline category logit model is \deqn{log
\frac{Pr(Y_{it}=j |x_{it})}{Pr(Y_{it}=J |x_{it})}=(\beta_{tj0}-\beta_{tJ0})
+ (\beta^{'}_{tj}-\beta^{'}_{tJ}) x_{it}=\beta^{*}_{tj0}+ \beta^{*'}_{tj}
x_{it}} For subject \eqn{i}, \eqn{Y_{it}} is the \eqn{t}-th nominal response
and \eqn{x_{it}} is the associated covariates vector. Also \eqn{\beta_{tj0}}
is the \eqn{j}-th category-specific intercept at the \eqn{t}-th measurement
occasion and \eqn{\beta_{tj}} is the \eqn{j}-th category-specific regression
parameter vector at the \eqn{t}-th measurement occasion.
The nominal response \eqn{Y_{it}} is obtained by extending the principle of
maximum random utility (\cite{McFadden, 1974}) as suggested in
\cite{Touloumis (2016)}.
\code{betas} should be provided as a numeric vector only when
\eqn{\beta_{tj0}=\beta_{j0}} and \eqn{\beta_{tj}=\beta_j} for all \eqn{t}.
Otherwise, \code{betas} must be provided as a numeric matrix with
\code{clsize} rows such that the \eqn{t}-th row contains the value of
(\eqn{\beta_{t10},\beta_{t1},\beta_{t20},\beta_{t2},...,\beta_{tJ0},
\beta_{tJ}}). In either case, \code{betas} should reflect the order of the
terms implied by \code{xformula}.
The appropriate use of \code{xformula} is \code{xformula = ~ covariates},
where \code{covariates} indicate the linear predictor as in other marginal
regression models.
The optional argument \code{xdata} should be provided in ``long'' format.
The NORTA method is the default option for simulating the latent random
vectors denoted by \eqn{e^{NO}_{itj}} in \cite{Touloumis (2016)}. In this
case, the algorithm forces \code{cor.matrix} to respect the assumption of
choice independence. To import simulated values for the latent random
vectors without utilizing the NORTA method, the user can employ the
\code{rlatent} argument. In this case, row \eqn{i} corresponds to subject
\eqn{i} and columns
\eqn{(t-1)*\code{ncategories}+1,...,t*\code{ncategories}} should contain the
realization of \eqn{e^{NO}_{it1},...,e^{NO}_{itJ}}, respectively, for
\eqn{t=1,\ldots,\code{clsize}}.
}
\examples{
## See Example 3.1 in the Vignette.
betas <- c(1, 3, 2, 1.25, 3.25, 1.75, 0.75, 2.75, 2.25, 0, 0, 0)
sample_size <- 500
categories_no <- 4
cluster_size <- 3
set.seed(1)
x1 <- rep(rnorm(sample_size), each = cluster_size)
x2 <- rnorm(sample_size * cluster_size)
xdata <- data.frame(x1, x2)
equicorrelation_matrix <- toeplitz(c(1, rep(0.95, cluster_size - 1)))
identity_matrix <- diag(categories_no)
latent_correlation_matrix <- kronecker(equicorrelation_matrix,
identity_matrix)
simulated_nominal_dataset <- rmult.bcl(clsize = cluster_size,
ncategories = categories_no, betas = betas, xformula = ~ x1 + x2,
xdata = xdata, cor.matrix = latent_correlation_matrix)
suppressPackageStartupMessages(library("multgee"))
nominal_gee_model <- nomLORgee(y ~ x1 + x2,
data = simulated_nominal_dataset$simdata, id = id, repeated = time,
LORstr = "time.exch")
round(coef(nominal_gee_model), 2)
}
\references{
Cario, M. C. and Nelson, B. L. (1997) \emph{Modeling and
generating random vectors with arbitrary marginal distributions and
correlation matrix}. Technical Report, Department of Industrial Engineering
and Management Sciences, Northwestern University, Evanston, Illinois.
Li, S. T. and Hammond, J. L. (1975) Generation of pseudorandom numbers with
specified univariate distributions and correlation coefficients. \emph{IEEE
Transactions on Systems, Man and Cybernetics} \bold{5}, 557--561.
McFadden, D. (1974) \emph{Conditional logit analysis of qualitative choice
behavior}. New York: Academic Press, 105--142.
Touloumis, A. (2016) Simulating Correlated Binary and Multinomial Responses
under Marginal Model Specification: The SimCorMultRes Package. \emph{The R
Journal} \bold{8}, 79--91.
Touloumis, A., Agresti, A. and Kateri, M. (2013) GEE for multinomial
responses using a local odds ratios parameterization. \emph{Biometrics}
\bold{69}, 633--640.
}
\seealso{
\code{\link{rbin}} for simulating correlated binary responses,
\code{\link{rmult.clm}}, \code{\link{rmult.crm}} and
\code{\link{rmult.acl}} for simulating correlated ordinal responses.
}
\author{
Anestis Touloumis
}
|
611143cc517a9caee5ba2c1ba1b849ea305d0570
|
8ab5c170e4e7158a5bd473730f23ee23b5eacab3
|
/programs/shiny/m_preview.R
|
d973ab69f20283d7a8f235e83b3624c826421b64
|
[] |
no_license
|
stbnjenkins/rinpharma_workshop_2021
|
0efca283b0c6be8c1f1ef2ff62134f4295b4e213
|
35b6dd3b7fb19b25e41bbf2200c7c0f87523dc80
|
refs/heads/main
| 2023-09-04T23:16:04.225919
| 2021-10-25T14:48:26
| 2021-10-25T14:48:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 197
|
r
|
m_preview.R
|
m_preview_ui <- function(id) {
ns <- NS(id)
"Data Preview"
}
m_preview_srv <- function(id, list_reactive_datasets) {
moduleServer(
id,
function(input, output, session) {
}
)
}
|
197e7191ec4569fd4199a0d3b829698f76ba0ac5
|
35fa3b8b482e51784120e2451bdab584faae07a9
|
/ex_3.R
|
a33383391d2d77cd53fc7a4218212a2f9682f41c
|
[] |
no_license
|
Adnei/ds_repo
|
c0d6cac85566bf78e79bc5d320bd7158e63598e2
|
25a0f13e8651e5d2fe2fe8b7db6e00d09b3dfc0f
|
refs/heads/main
| 2023-08-24T19:46:11.548465
| 2021-10-20T10:06:47
| 2021-10-20T10:06:47
| 412,187,728
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,202
|
r
|
ex_3.R
|
require('ggplot2')
require('dplyr')
require('caret')
require('lubridate')
N_EXPERIMENTS <- 30
dataset <- read.table("soja_milho_modificado.csv", sep="|", dec=",", head=T, stringsAsFactors=F)
conf_mtx_stats <- data.frame(
tfp = c(rep(NA,N_EXPERIMENTS)),
tvp = c(rep(NA, N_EXPERIMENTS))
)
for (exp_idx in c(1:N_EXPERIMENTS)){
training_sample <- dataset %>%
slice_sample(prop=0.25) %>%
arrange(obs)
#Removing the training sample from the original dataset
#testing.df <- dataset %>% filter(! obs %in% training_sample$obs)
training_sample$data <- dmy(training_sample$data)
#Identifying harvest time
actual <- month(training_sample$data) >= 1 & month(training_sample$data) <= 5
predicted <- (training_sample$ano == 2014 & training_sample$soja >= 67) | (training_sample$ano == 2015 & training_sample$soja >= 58 & training_sample$soja <= 62)
conf_mtx <- confusionMatrix(data=factor(predicted), reference=factor(actual))
# | VN | FN |
# | FP | VP |
conf_mtx
fp <- conf_mtx$table[2,1]
vp <- conf_mtx$table[2,2]
conf_mtx_stats$tfp[exp_idx] <- fp / length(actual)
conf_mtx_stats$tvp[exp_idx] <- vp / length(actual)
}
sort_stats <- conf_mtx_stats %>% arrange(tvp)
|
97780294fe4bbdce22542ff1d7e3ebd3b498d598
|
8833844a3423e7defb638c8231183fe025a35e21
|
/Plot3.R
|
9ef12854fc66cdfda4e23f1f4bfa71976a615523
|
[] |
no_license
|
vanni1/ExData_Plotting1
|
31b7dba0a625829de0accb510473043ec4130c3b
|
ed5b59f4db2e6d3136c3ea1c277bef0ea403d537
|
refs/heads/master
| 2021-05-14T04:34:10.619058
| 2018-01-11T10:43:52
| 2018-01-11T10:43:52
| 116,647,568
| 0
| 0
| null | 2018-01-08T08:03:23
| 2018-01-08T08:03:23
| null |
UTF-8
|
R
| false
| false
| 1,138
|
r
|
Plot3.R
|
## download, unzip and read household_power_consumption file
url <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(url=url,destfile = "household_power_consumption.zip")
unzip("household_power_consumption.zip")
file = "household_power_consumption.txt"
HPC<-read.delim(file, header = TRUE, sep = ";", na.strings = "?")
str(HPC)
## retain only data for 2 dates : 1-2 feb 2007, convert factors to a date-time variable
HPC <- HPC[grep("^(1/2/2007|2/2/2007)$",HPC$Date),]
HPC$datetime <- strptime(paste(as.character(HPC$Date),as.character(HPC$Time),sep=" "),format="%d/%m/%Y %H:%M:%S")
# Plot 3
with(HPC,plot(datetime,Sub_metering_1+Sub_metering_2+Sub_metering_3,type="n",ylab = "Energy sub metering", ylim = c(0,38)))
with(HPC,lines(datetime,Sub_metering_3,col="blue"))
with(HPC,lines(datetime,Sub_metering_2,col="red"))
with(HPC,lines(datetime,Sub_metering_1,col="black"))
legend("topright", 95, col = c("black", "red", "blue"), legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty = c(1,1,1))
dev.copy(png,"Plot3.png")
dev.off()
|
c05d45676ca67edef5633b0366b38a35147fe1ea
|
e02fae6e544a11fc147d2b7110d05ac55c87ba1f
|
/cachematrix.R
|
ea8d9dfbb1d86ed5f141397d65eef11ed2e896f2
|
[] |
no_license
|
gatsalvatge/ProgrammingAssignment2
|
0990c7cebc98523068ce32720b1dd681f2f9201f
|
bd186b0eebee05bf1b57508bcc6adddcf854b48e
|
refs/heads/master
| 2020-12-11T07:56:27.873085
| 2015-06-17T18:22:32
| 2015-06-17T18:22:32
| 37,519,374
| 0
| 0
| null | 2015-06-16T08:59:24
| 2015-06-16T08:59:24
| null |
UTF-8
|
R
| false
| false
| 1,390
|
r
|
cachematrix.R
|
## Put comments here that give an overall description of what your
## functions do
## This function creates a special "matrix" object that can cache its inverse
makeCacheMatrix <- function(x = matrix()) {
##First we make sure the value of m is NULL before storing a new matrix on the cache
m <- NULL
##Store data of the new matrix on the cache
set <- function() {
x <<- y
m <<- NULL
}
##Recovers the value of the matrix
get <- function() x
##Sets the value of the inverse
setinv <- function(solve) m <<- solve
##Shows the sotored result of the inverse
getinv <- function() m
##Creates a list with all the cached values
list(set = set, get = get, setinv = setinv, getinv = getinv)
}
## This function computes the inverse of the special "matrix" returned by
## `makeCacheMatrix` above. If the inverse has already been calculated
## (and the matrix has not changed), then `cacheSolve` should retrieve the
## inverse from the cache
cacheSolve <- function(x = matrix(), ...) {
## Return a matrix that is the inverse of 'x'
##First we call m to see if the inverse has been calculated before and
##if its value is NULL we proceed to call the solve function and store the inverse on cache
m <- x$getinv()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinv(m)
m
}
|
f57739f7f832e5d519fc0bca04aa51dbd51299fd
|
86347e19447a2ee4a2b65cb3e1d68e5ac26d82c1
|
/man/VariableEnv.Rd
|
4e035c0d9b48bc2873cb275b0f45143ecdbf6324
|
[
"MIT"
] |
permissive
|
brshipley/megaSDM
|
a9e8cd4de0affaba17fdcda5c3489bc2ab7312dd
|
5d285705e295d168ba26435bc453b5242b0ac2dd
|
refs/heads/master
| 2023-06-22T14:46:43.898117
| 2023-06-09T13:36:19
| 2023-06-09T13:36:19
| 206,993,880
| 18
| 3
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,625
|
rd
|
VariableEnv.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/VariableEnv.R
\name{VariableEnv}
\alias{VariableEnv}
\title{Use species-specific sets of environmental data for SDMs}
\usage{
VariableEnv(occlist, bglist, env_vars, occ_output, bg_output)
}
\arguments{
\item{occlist}{a list of .csv files, each containing the occurrence
points of a given taxon. The files should be named the same as the taxon
of interest (e.g.,: ".../Canis_lupus.csv").}
\item{bglist}{a list of .csv files corresponding to the background points
provided for each taxon. This list of files should be order in the same
way as the files given by \code{occlist}.}
\item{env_vars}{a vector containing the names of the environmental variables
(must exactly match column names) to keep for each species as a character
strings separated by commas. For example, \code{c("Bio1,Bio3", "Bio1,Bio12")}.
Like above, this vector should be in the same order as the lists of
species/background points.}
\item{occ_output}{the directory where output occurrence files will be placed.}
\item{bg_output}{the directory where output background files will be placed.}
}
\value{
Writes .csv files of species occurrences to the sub-directory given by the \code{occ_output} argument
and .csv files of background points to the sub-directory given by the \code{bg_output} argument.
}
\description{
Using different sets of environmental variables to model species
distributions can help to make more informative species distribution
models. This function allows for the modelling of each species based
upon a unique subset of the environmental variables.
}
|
33ddea1605060ff90cc69122815f6b16cf25a109
|
b9e54258e540f0a0447045729bb4eecb0e490426
|
/Bölüm 05 - Vektörlerde İstatistiksel İşlemler/5.2 - Serbestlik Derecesi.R
|
817ba369278b0bd6a833f4e63e261c52e86c4e82
|
[] |
no_license
|
sudedanisman/RUdemy
|
b36b67b9e875206a5424f33cc784fd13506f8d8d
|
28a9814706873f5d2e5985e4ba795354144d52c4
|
refs/heads/master
| 2023-01-30T01:54:26.321218
| 2020-12-14T11:36:00
| 2020-12-14T11:36:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 43
|
r
|
5.2 - Serbestlik Derecesi.R
|
x <- c(1,2,3,4,5)
sd <- length(x) -1
sd
|
1d948a5f46fb59509534ca7d44acba591b7bee97
|
0ba08455f27022f23296c06f57a6da24fdd1168d
|
/Homeworks/homework1.R
|
c1b168eb8aa8559ad51446759b0fa33c5eea8f38
|
[] |
no_license
|
lbraun/applied_mathematics
|
ba44c1f1f15387c9c5b80be99639d4cac2b0f8d8
|
a077b676e34c90d2f3e5858efc15d1d5e57aef79
|
refs/heads/master
| 2021-05-11T18:13:24.544412
| 2018-02-05T08:39:27
| 2018-02-05T08:39:27
| 117,818,125
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,867
|
r
|
homework1.R
|
#
usair.dat<-source(paste(getwd(), "/Data/chap3usair.dat", sep = ""))$value
panel.hist <- function(x, ...)
{
usr <- par("usr"); on.exit(par(usr))
par(usr = c(usr[1:2], 0, 1.5) )
h <- hist(x, plot = FALSE)
breaks <- h$breaks; nB <- length(breaks)
y <- h$counts; y <- y/max(y)
rect(breaks[-nB], 0, breaks[-1], y, col = "cyan", ...)
}
pairs(usair.dat,panel=function(x,y) {abline(lsfit(x,y)$coef,lwd=2)
lines(lowess(x,y),lty=2,lwd=2)
points(x,y)}, diag.panel=panel.hist)
####
attach(usair.dat)
plot(Manuf, SO2)
abline(lsfit(Manuf, SO2), lwd=2)
abline(lines(lowess(Manuf, SO2), lty=2, lwd=2))
####
chiplot(Manuf, SO2, vlabs=c("SO2", "Manufacturing"))
# chiplot(Manuf, Pop, vlabs=c("Population", "Manufacturing"))
####
# Get functions
source(paste(getwd(), "/functions.txt", sep=""))
bvbox(cbind(Manuf, SO2), xlab="Manufacturing", ylab="SO2")
# bvbox(cbind(Manuf, Pop), xlab="Manufacturing", ylab="SO2")
####
coplot(Manuf~SO2|Days)
#### Euclidian distances
# Original data
dis<-dist(usair.dat)
dis.matrix<-dist2full(dis) # Function dist2full is defined at the bottom of this file
round(dis.matrix,digits=2)
# Normalized data
usair.dat <- data.matrix(usair.dat)
std<-sd(usair.dat)
usair.dat.std<-sweep(usair.dat,2,std,FUN="/")
dis<-dist(usair.dat.std)
dis.matrix<-dist2full(dis)
round(dis.matrix,digits=2)
# Mahalobis
Sx <- cov(usair.dat)
mdis <- mahalanobis(usair.dat, colMeans(usair.dat), Sx)
#### Q-Q Plot
qqnorm(usair.dat[,1],ylab="Ordered observations of SO2")
qqline(usair.dat[,1])
dev.copy(png,'3.1.SO2_Neg.Temp.png')
dev.off()
qqnorm(usair.dat[,2],ylab="Ordered observations of Neg.Temp")
qqline(usair.dat[,2])
dev.copy(png,'3.2.SO2_Manuf.png')
dev.off()
qqnorm(usair.dat[,3],ylab="Ordered observations of Manuf")
qqline(usair.dat[,3])
dev.copy(png,'3.3.SO2_Neg.Temp.png')
dev.off()
qqnorm(usair.dat[,4],ylab="Ordered observations of Pop")
qqline(usair.dat[,4])
dev.copy(png,'3.4.SO2_Neg.Temp.png')
dev.off()
qqnorm(usair.dat[,5],ylab="Ordered observations of Wind")
qqline(usair.dat[,5])
dev.copy(png,'3.5.SO2_Neg.Temp.png')
dev.off()
qqnorm(usair.dat[,6],ylab="Ordered observations of Precip")
qqline(usair.dat[,6])
dev.copy(png,'3.6.SO2_Neg.Temp.png')
dev.off()
qqnorm(usair.dat[,7],ylab="Ordered observations of Days")
qqline(usair.dat[,7])
dev.copy(png,'3.7.SO2_Neg.Temp.png')
dev.off()
#### 2.6 Chiplot
chiplot(SO2, Neg.Temp, vlabs=c("SO2", "Neg.Temp"))
dev.copy(png,'4.1.SO2_Neg.Temp.png')
dev.off()
chiplot(SO2, Manuf, vlabs=c("SO2", "Manuf"))
dev.copy(png,'4.2.SO2_Manuf.png')
dev.off()
chiplot(SO2, Pop, vlabs=c("SO2", "Pop"))
dev.copy(png,'4.3.SO2_Pop.png')
dev.off()
chiplot(SO2, Wind, vlabs=c("SO2", "Wind"))
dev.copy(png,'4.4.SO2_Wind.png')
dev.off()
chiplot(SO2, Precip, vlabs=c("SO2", "Precip"))
dev.copy(png,'4.5.SO2_Precip.png')
dev.off()
chiplot(SO2, Days, vlabs=c("SO2", "Days"))
dev.copy(png,'4.6.SO2_Days.png')
dev.off()
chiplot(Neg.Temp, Manuf, vlabs=c("Neg.Temp", "Manuf"))
dev.copy(png,'4.7.Neg.Temp_Manuf.png')
dev.off()
chiplot(Neg.Temp, Pop, vlabs=c("Neg.Temp", "Pop"))
dev.copy(png,'4.8.Neg.Temp_Pop.png')
dev.off()
chiplot(Neg.Temp, Wind, vlabs=c("Neg.Temp", "Wind"))
dev.copy(png,'4.9.Neg.Temp_Wind.png')
dev.off()
chiplot(Neg.Temp, Precip, vlabs=c("Neg.Temp", "Precip"))
dev.copy(png,'4.10.Neg.Temp_Precip.png')
dev.off()
chiplot(Neg.Temp, Days, vlabs=c("Neg.Temp", "Days"))
dev.copy(png,'4.11.Neg.Temp_Days.png')
dev.off()
chiplot(Manuf, Pop, vlabs=c("Manuf", "Pop"))
dev.copy(png,'4.12.Manuf_Pop.png')
dev.off()
chiplot(Manuf, Wind, vlabs=c("Manuf", "Wind"))
dev.copy(png,'4.13.Manuf_Wind.png')
dev.off()
chiplot(Manuf, Precip, vlabs=c("Manuf", "Precip"))
dev.copy(png,'4.14.Manuf_Precip.png')
dev.off()
chiplot(Manuf, Days, vlabs=c("Manuf", "Days"))
dev.copy(png,'4.15.Manuf_Days.png')
dev.off()
chiplot(Pop, Wind, vlabs=c("Pop", "Wind"))
dev.copy(png,'4.16.Pop_Wind.png')
dev.off()
chiplot(Pop, Precip, vlabs=c("Pop", "Precip"))
dev.copy(png,'4.17.Pop_Precip.png')
dev.off()
chiplot(Pop, Days, vlabs=c("Pop", "Days"))
dev.copy(png,'4.18.Pop_Days.png')
dev.off()
chiplot(Wind, Precip, vlabs=c("Wind", "Precip"))
dev.copy(png,'4.19.Wind_Precip.png')
dev.off()
chiplot(Wind, Days, vlabs=c("Wind", "Days"))
dev.copy(png,'4.20.Wind_Days.png')
dev.off()
chiplot(Precip, Days, vlabs=c("Precip", "Days"))
dev.copy(png,'4.21.Precip_Days.png')
dev.off()
#
#
#
#
var(usair.dat)
cor(usair.dat)
#
#
dis<-dist(usair.dat)
dis.matrix<-dist2full(dis) # Function dist2full is defined at the bottom of this file
round(dis.matrix,digits=2)
#
usair.dat <- data.matrix(usair.dat)
std<-sd(usair.dat)
# Error: (list) object cannot be coerced to type 'double'
# Solutions:
std<-sd(data.matrix(usair.dat)) # Doesn't work
std=c(sd(usair.dat[,1]), sd(usair.dat[,2]), sd(usair.dat[,3]), sd(usair.dat[,4]), sd(usair.dat[,5]), sd(usair.dat[,6]), sd(usair.dat[,7]))
#
#
# sweep usage: sweep(x, MARGIN, STATS, FUN = "-", check.margin = TRUE, …)
# Divide columns of data matrix by the appropriate standard deviation to normalize the data
usair.dat.std<-sweep(usair.dat,2,std,FUN="/")
dis<-dist(usair.dat.std)
dis.matrix<-dist2full(dis)
round(dis.matrix,digits=2)
#
#load MASS library
library(MASS)
#set seed for random number generation to get the same plots
set.seed(1203)
X<-mvrnorm(200,mu=c(0,0),Sigma=matrix(c(1,0.5,0.5,1.0),ncol=2))
#
#
par(mfrow=c(1,2))
qqnorm(X[,1],ylab="Ordered observations")
qqline(X[,1])
qqnorm(X[,2],ylab="Ordered observations")
qqline(X[,2])
#
#
par(mfrow=c(1,1))
chisplot(X)
#
par(mfrow=c(1,2))
qqnorm(log(abs(X[,1])),ylab="Ordered observations")
qqline(log(abs(X[,1])))
qqnorm(log(abs(X[,2])),ylab="Ordered observations")
qqline(log(abs(X[,2])))
#
par(mfrow=c(1,1))
chisplot(log(abs(X)))
#
dist2full<-function(dis) {
n<-attr(dis,"Size")
full<-matrix(0,n,n)
full[lower.tri(full)]<-dis
full+t(full)
}
|
be0b5a32d749b3bc1e688103cb10e2a68abe705a
|
51207c22706f87c8656b86ea5184b7cb956f5ae0
|
/man/marg_dist.Rd
|
b3f35c72b4c5f53d063e37cc7f01f691b2244c5f
|
[] |
no_license
|
1Edtrujillo1/udeploy
|
c23e5bffa4c829cd0c6ba045525f310fe343f9fc
|
ffcd0e7f941fde780b606b739b3c4f896cc78cb2
|
refs/heads/main
| 2023-06-10T01:06:58.236145
| 2021-07-07T23:17:08
| 2021-07-07T23:17:08
| 329,095,593
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 957
|
rd
|
marg_dist.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/stats.R
\name{marg_dist}
\alias{marg_dist}
\title{marg_dist}
\usage{
marg_dist(df, num_int_var)
}
\arguments{
\item{df}{dataset to obtain the descriptive statistical analysis}
\item{num_int_var}{\emph{ONE} NUMERICAL VARIABLE from the dataset}
}
\value{
List where each element is the marginal distribution of \code{num_int_var} from each combination levels of the factor variables.
}
\description{
Create the marginal distribution of each combination of levels from the factor variables.
}
\details{
This function allows you to create the marginal distribution of a numeric variable for each combination of levels from the factor variables.
}
\note{
\itemize{
\item If there is no factor variable(s) then it is going to bring the whole distribution of \code{num_int_var}
\item This function is applied to one numeric variable \code{num_int_var}
}
}
\author{
Eduardo Trujillo
}
|
32f5b70045ea626b2684ddca1b9ef72414d4cecc
|
8a95bbec438d0abd7398bf2634b401fa76203a44
|
/data/makeData.R
|
e93c1aaf11d2febfdc1c4713c6c71ab7b702f8ef
|
[] |
no_license
|
UrbanInstitute/dc-equity-indicators
|
faaff3b4f00105b879992e7ba063f853bb673a03
|
21836256004e1156cb5112f104f9e6d55a161691
|
refs/heads/master
| 2020-03-28T21:39:38.547575
| 2019-08-20T14:54:53
| 2019-08-20T14:54:53
| 149,172,893
| 5
| 0
| null | 2019-07-24T16:11:03
| 2018-09-17T18:52:43
|
JavaScript
|
UTF-8
|
R
| false
| false
| 5,274
|
r
|
makeData.R
|
library(tidyverse)
library(readxl)
name_mapping <- read_excel("Indicator_name_mapping.xlsx", sheet = "mapping")
labels <- read_excel("source/DC equity text spreadsheet.xlsx")
data_sheets <- excel_sheets("source/Updated data for equity feature_May2019.xlsx")
city_dat <- read_excel("source/Updated data for equity feature_May2019.xlsx", sheet = data_sheets[1])
ward_dat <- read_excel("source/Updated data for equity feature_May2019.xlsx", sheet = data_sheets[2])
cluster_dat <- read_excel("source/Updated data for equity feature_May2019.xlsx", sheet = data_sheets[3])
# function for adding "and" as appropriate to cluster names
addAnd <- function(cluster_name) {
namesplit <- str_split(cluster_name, pattern = ",")
if(length(namesplit[[1]]) == 1) {
return(cluster_name)
}
else if(length(namesplit[[1]]) == 2) {
return(paste(namesplit[[1]][1], " and", namesplit[[1]][2], sep = ""))
}
else {
comma_locations <- str_locate_all(cluster_name, ",")
last_comma_location <- max(comma_locations[[1]])
return(paste(substr(cluster_name, 1, last_comma_location), " and", substr(cluster_name, last_comma_location + 1, nchar(cluster_name)), sep=""))
}
}
# clean cluster names by getting rid of "Cluster X" and the parentheses from each name
# also filter out clusters 42, 45 and 46 which have small sample sizes
cluster_dat_clean <- cluster_dat %>%
filter(!(geo %in% c("Cluster 42 (Observatory Circle)", "Cluster 45 (National Mall, Potomac River)", "Cluster 46 (Arboretum, Anacostia River)"))) %>%
filter(equityvariable != "-") %>%
mutate(geo2 = str_extract(geo, "\\(.*\\)")) %>%
mutate(geo3 = str_sub(geo2, 2, -2)) %>%
mutate(numerator = as.numeric(numerator)) %>%
mutate(denom = as.numeric(denom)) %>%
mutate(equityvariable = as.numeric(equityvariable)) %>%
select(indicator, year, "geo" = geo3, numerator, denom, equityvariable)
dat <- bind_rows(city_dat, ward_dat, cluster_dat_clean) %>%
select(-year) %>%
mutate(geo = replace(geo, geo=="Washington, D.C.", "DC")) %>%
filter(indicator != "Total Population") %>%
left_join(name_mapping, by = c("indicator" = "data_name")) %>%
left_join(labels, by=c("text_name" = "Full name of indicator")) %>%
mutate(summary_sentence = str_c(`Summary sentence-pt 1`, map_chr(geo, addAnd), `Summary sentence-pt 2`, sep=" ")) %>%
mutate(reverse_gap_sentence = str_c(`Opposite gap sentence`, map_chr(geo, addAnd), "than in", sep = " ")) %>%
# mutate(value = case_when(
# indicator %in% c("Small business lending per employee", "Age-adjusted premature mortality rate", "Violent Crime Rate per 1000 people") ~ round(equityvariable, digits = 0),
# indicator == "unemployment" ~ round(equityvariable, digits = 3),
# TRUE ~ round(equityvariable, digits = 2)
# )) %>%
select(indicator_full_name = text_name, indicator = `Abberviated name of indicator`,
year = `Year`, geo, numerator, denom, value = equityvariable,
blue_bar_label = `Blue bar label`, diff_bar_label = `Yellow/pink bar label`,
grey_bar_label = `Gray bar label`, summary_sentence, reverse_gap_sentence)
# add a row with [fake] data to initialize the bar chart with
dat <- add_row(dat, indicator_full_name = "Initial",
indicator = "Initial",
year = "",
geo = "Initial",
numerator = "",
denom = "",
value = 0,
blue_bar_label = "",
diff_bar_label = "",
grey_bar_label = "",
summary_sentence = NA,
reverse_gap_sentence = NA)
write_csv(dat, "equity_data.csv")
# NOTE: manually edit this CSV to remove the extra space in front of the period for
# the small business lending summary sentence
##### Racial demo data ##############
race_data_sheets <- excel_sheets("source/Equity feature_2019update_racial composition.xlsx")
race_city_dat <- read_excel("source/Equity feature_2019update_racial composition.xlsx", sheet = race_data_sheets[1])
race_ward_dat <- read_excel("source/Equity feature_2019update_racial composition.xlsx", sheet = race_data_sheets[2])
race_cluster_dat <- read_excel("source/Equity feature_2019update_racial composition.xlsx", sheet = race_data_sheets[3])
race_cluster_dat_clean <- race_cluster_dat %>%
filter(!(geo %in% c("Cluster 42 (Observatory Circle)", "Cluster 45 (National Mall, Potomac River)", "Cluster 46 (Arboretum, Anacostia River)"))) %>%
mutate(geo2 = str_extract(geo, "\\(.*\\)")) %>%
mutate(geo3 = str_sub(geo2, 2, -2)) %>%
select(indicator, year, "geo" = geo3, numerator, denom, equityvariable)
race_dat <- bind_rows(race_city_dat, race_ward_dat, race_cluster_dat_clean) %>%
mutate(geo = replace(geo, geo=="Washington, D.C.", "DC")) %>%
mutate(race = case_when(
indicator == "percent white" ~ "White",
indicator == "percent black" ~ "Black",
indicator == "percent latino" ~ "Latino",
indicator == "percent Asian and Pacific Islander" ~ "Asian and Pacific Islander",
indicator == "percent other or multiple race" ~ "Other or multiple race"
)) %>%
select(race, geo, n = numerator, total_population = denom, pct_population = equityvariable)
write_csv(race_dat, "racial_demo_data.csv")
|
df3c64c6e3074d227f78c5fc02a3dec9a2410c04
|
2a84df5caeda321781df754d3d9172d1e4c3180e
|
/Bin/kidney_DF_compare.R
|
09097ae8b804168d9b6078e97387d46bdeecf4db
|
[] |
no_license
|
bzrry/bisulfite_cfDNA
|
2bc7b4be757be5f230cccbceb9a67f22651dd30f
|
55c56c33ff998120e5017012d10cf6f7595f037c
|
refs/heads/master
| 2022-01-11T10:26:40.525303
| 2019-04-15T16:19:45
| 2019-04-15T16:19:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,882
|
r
|
kidney_DF_compare.R
|
#!/usr/bin/env RScript
# Title: Kidney vs Donor fraction
# Authors: Alexandre Pellan Cheng
# Brief description: Creates figure XYZ
# Initialize -------------------------------------------------------------------------------------------------
rm(list=ls())
# Load libraries ---------------------------------------------------------------------------------------------
library(ggplot2)
library(gridExtra)
library(RColorBrewer)
library(data.table)
# Paths ------------------------------------------------------------------------------------------------------
tissue_origin.path <- "../V2/tissues_of_origin/"
lists<- "../lists/"
# Load tissues of origin -------------------------------------------------------------------------------------
mp.df<-fread(paste0(tissue_origin.path, "mp.txt"))
# Load donor fractions from non bisulfite treated sequencing ------------------------------------------------
donor_fractions_std<-fread(paste0(lists, "donor_fractions_std.list"))
colnames(donor_fractions_std)<-c("sample", "std_df")
#Format for plotting
mp.df<-mp.df[!grepl("MET-1-10|MET-1-18", mp.df$sample),] # Remove samples that also received a bone marrow transplant
mp.df<-merge(mp.df[, c("sample", "kidsum", "methDF", "group", "rel_error")], donor_fractions_std, by="sample", all.x=TRUE)
mp.df$methDF<-mp.df$methDF/100
mp.df$std_df<-mp.df$std_df/100
mp.df$group<-factor(mp.df$group, levels=c("BKVN", "BKV+/N-", "ETP", "HLY", "UTI"))
mp.df$df_error<-abs(mp.df$methDF-mp.df$std_df)
non.sexmm<-mp.df[which(is.na(mp.df$methDF)),c("kidsum", "group", "methDF", "rel_error")]
non.sexmm$methDF[which(is.na(non.sexmm$methDF))]<-"N.S."
corelation<-cor.test(mp.df[!is.na(mp.df$methDF),]$kidsum, mp.df[!is.na(mp.df$methDF), ]$methDF, method="spearman")
# Plot data -------------------------------------------------------------------------------------------------
save_eps=TRUE
if(save_eps){pdf(file="../Figures/kidney_total_errorbars.pdf",
width=1.825, height=1.825, paper="special", bg="white",
fonts="Helvetica", colormodel = "cmyk", pointsize=6, useDingbats = FALSE)}
ggplot(data=mp.df)+geom_abline(intercept=0)+
#geom_errorbar(aes(x=kidsum, ymin=(methDF-df_error/2), ymax=(methDF+df_error/2), width=0.03), size=0.2)+
#geom_errorbarh(aes(xmin=(kidsum-rel_error/2), xmax=(kidsum+rel_error/2), y=methDF, height=0.03), size=0.2)+
geom_point(aes(y=methDF, x=kidsum), color="black", size=1.0, stroke=0.2)+
xlab("Kidney fraction")+
ylab("Donor fraction")+
theme_bw()+coord_cartesian(ylim=c(0,1), xlim=c(0,1))+
theme(plot.background=element_blank())+
theme(legend.position="none")+
theme(axis.title.x=element_text(family="Helvetica", size=8), axis.title.y=element_text(family="Helvetica", size=8),
axis.text=element_text(family="Helvetica", size=6),
plot.title=element_text(family="Helvetica", size=6))
if(save_eps)(dev.off())
|
2f815b0c33c1dc7c34f0970bed09894c6dbdda00
|
f4bb70c1ec36124534419514056c44684aee065f
|
/cachematrix.R
|
efd0faa1c77a0096a52e6775f6672594c2413a95
|
[] |
no_license
|
NiladriMohanty/ProgrammingAssignment2
|
3d31f4cea4c08fe2c1cdd21fa36c146e67c1d43f
|
c0ec36842146672a08c568e81aec6ceddc20595f
|
refs/heads/master
| 2021-01-18T05:49:49.973657
| 2014-07-27T22:47:54
| 2014-07-27T22:47:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,777
|
r
|
cachematrix.R
|
## It computes the inverse of a square matrix
## But it uses caching method
## That means if elements of a matrix are not changing,
## then it is not wise enough to compute inverse repeatedly (e.g. in a loop)
## Because it is waste of resources and time
## By taking the advantage of lexical scooping of R programming language,
## the value of the inverse of a matrix is cached
## This function used to create a special matrix object that can cache its inverse
## This function contains a list of functions calling to set the value of the matrix,
## to get the value of the matrix, to set the value of the matrix inverse and
## to get the value of the matrix inverse.
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinverse <- function(inverse) inv <<- inverse
getinverse <- function() inv
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## This function calculates the inverse of a matrix created by the above function
## First, it checks matrix inverse has already been calculated
## If so, it retrieves the inverse from the cache and skips the computation
## If matrix inverse is not calculated,
## it calculates the inverse of the matrix by solve(X) function
## and sets the value of the mean in the cache via the setmean function.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getinverse()
if(!is.null(inv)) {
message("getting inverse of data")
return(inv)
}
data <- x$get()
inv <- solve(data, ...)
x$setinverse(inv)
inv
}
|
90db2cd349d435d918e120fc5a4905dca3a43594
|
f538a1c8d59dc761693faddb27db3e1dbe845e1f
|
/Checkdis.R
|
f868d332f61a6f4369aa750f208218f1a9711909
|
[] |
no_license
|
boazgiron/TCL
|
c1327685b69ee1670f66d38fac61e6616cb170ff
|
317d8e5d0cf1af25b824d1a338a0faec0343872d
|
refs/heads/master
| 2020-06-25T10:09:07.497386
| 2017-08-23T14:49:43
| 2017-08-23T14:49:43
| 96,972,019
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,532
|
r
|
Checkdis.R
|
library("e1071")
createdfz <-function(h){
zr =NULL
for(i in 1:length(h$x)){
for(j in 1:length(h$y)){
zr = rbind(zr , c(i,j,h$x[i],h$y[j],h$z[i,j]))
}
}
zr
}
fsum <- function(x){sum(x,na.rm = T)}
is.notinfinite.any <- function(x){!any( is.infinite(x) ) }
grouping <-function(clnames,plotpathg,lastdev){
s = sample(nrow(filedatals),min(1e4,nrow(filedatals)))
i = 0
cont = TRUE
h2 = NULL
n = 25
while( cont & (i < n ) ) {
#plot(filedatals[s,clnames],pch =19,cex= 0.3)
hlast = h2
h2 <- kde2d(filedatals[s,clnames[1]], filedatals[s,clnames[2]], n = 30,lims = c(0, max(filedatals[s,clnames[1]]) ,c(1e2,quantile(filedatals[s,clnames[2]],0.99))))
h2$z = 2*(n - i) *(h2$z- min(h2$z))/diff(range(h2$z))
h2$z[h2$z < 1] = 0
h2$z[h2$z > 1] = 1
h2$z = GetLabel(h2$z)
unique(as.vector(h2$z))
dfh = as.data.frame(createdfz(h2))
colnames(dfh) <- c("indx","indy","x","y","z")
ag = aggregate(dfh$x,list(dfh$z),mean)
agl = aggregate(dfh$x,list(dfh$z),length)
agl = agl[!(agl$Group.1 == 0),]
agl = agl[order(agl$x,decreasing = T),]
agl = agl[agl$x > 10,]
cont = length(agl$x) < 2
i= i + 1
}
#getMaximumGroup
ta = table(as.vector(h2$z))
ta = ta[!names(ta) =="0"]
wh = which(max(ta) == ta)
maxgroupnumber = as.numeric(names(ta)[wh])
if(PLOTS) {
if(is.null(hlast)){
hlast = h2
}
png(filename= paste0(plotpathg,"LastChekcdis","_CD45_", clnames[2],"_vs_",clnames[1],".png"))
image(hlast,breaks = 1:(n+1),col = rainbow(n) )
dev.off()
png(filename= paste0(plotpathg,"Chekcdis","_CD45_", clnames[2],"_vs_",clnames[1],".png"))
image(h2,breaks = 1:(n+1),col = rainbow(n) )
points(ag[2:3,"x"],ag[2:3,"y"])
dev.off()
}
dfh = dfh[dfh$z != 0,]
dfh$z = factor(ifelse( dfh$z == maxgroupnumber,1,2))
dfh = dfh[,c("x","y","z")]
svm_model <- svm(z ~ ., data=dfh)
summary(svm_model)
df = data.frame(x = filedatals[s,clnames[1]],y= filedatals[s,clnames[2]])
pre = predict(svm_model,df)
if(PLOTS){
png(filename= paste0(plotpathg,"CD45_", clnames[2],"_vs_",clnames[1],"_SVM.png"))
with(df,plot(x,y, col = unlist(pre),pch = 19,cex = 0.3))
if(lastdev){
dev.off()
}
}
df = data.frame(x = filedatals[,clnames[1]],y= filedatals[,clnames[2]])
pre = predict(svm_model,df)
}
checkdis <-function(wrkingFilepath,datadir){
#wrkingFilepath = fll
filedata = read.csv(wrkingFilepath,header = T)
runInformation = getCartrigeNameAdnDevice(wrkingFilepath)
CarName = paste0("C",runInformation$Cartnum)
Cartnum = runInformation$Cartnum
plotpathin = paste0(datadir,"/",Cartnum,"/")
if(PLOTS){
if(!dir.exists(plotpath)){
dir.create(plotpath)
}
}
#Remove low width
widthb = filedata$Width > 4
if(PLOTS) {
png(filename= paste0(plotpath,"Width",".png"))
hist(filedata$Width,500,xlim = c(0,30),xlab = "Width", main = as.character(CarName))
abline(v = 4,col ="red",lwd = 2 )
DEV.OFF()
}
filedata = filedata[widthb,]
filedatals = data.frame(apply(filedata[,ty[1:9]],2,log10))
filedatals$FCS <- filedata$Peak9
filedatals= filedatals[unlist(apply(filedatals,1,is.notinfinite.any)),]
filedatals= filedatals[complete.cases(filedatals),]
filedatals$sumCh = apply(filedatals[,1:8],1,fsum )
#filedata[which(apply(filedata,1,is.infinite.any)),]
clnames = c("sumCh","FCS")
m1 = grouping(clnames,plotpathin,FALSE)
#df = with(filedatals,data.frame(x=sumCh,y=FCS))
#pr1 = predict(m1,df)
me = aggregate(filedatals$sumCh,list(m1),mean)
AverageLowGroup = min(me$x)
abline(v = AverageLowGroup ,col = 3 ,lwd =2,lty =2)
dev.off()
filedatals = filedatals[filedatals$sumCh > AverageLowGroup,]
clnames = c("Area1","FCS")
m2 = grouping(clnames,plotpathin,TRUE)
#df = with(filedatals,data.frame(x=Area1,y=FCS))
#pr2 = predict(m2,df)
##DD210817xxx
#hist(filedatals$Area1)
#hist(filedatals$Peak9)
}
dirpath = choose.dir(caption = "Select floder wit the list of ..._events.csv files")
timeStemp = gsub("-","_",gsub(":","_",Sys.time()))
fileList = dir(dirpath,full.names = T)
dataDirname = paste0(dirpath,"\\data", timeStemp )
dir.create(dataDirname)
fileList = fileList[!(file.info(fileList))$isdir]
for(fll in fileList[1:1]){
#re= checkdis(fll,dataDirname)
checkdis(fll,dataDirname)
#br = rbind(br,re[,1])
#runfile = c(runfile,basename(fll))
}
|
132e7cc39cbeca96e6c60239fcf7e7fef8f71727
|
b0c09959df30b73d953fa98b8bb6c10810fa080d
|
/man/on_any_overlap.Rd
|
98fc13ff63630afe11ca3166038ed749ad0dda17
|
[] |
no_license
|
k-hench/fftidy
|
f325ed1aaefb9d0af395ef21acef387849f6a1f1
|
a8c2cd364f1597de8612188bbe73cccd7d539d37
|
refs/heads/master
| 2023-03-15T11:02:11.998683
| 2021-03-05T16:37:54
| 2021-03-05T16:37:54
| 300,317,485
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| true
| 284
|
rd
|
on_any_overlap.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/encounter_assignment.R
\name{on_any_overlap}
\alias{on_any_overlap}
\title{Determine if timepoint x is on any overlap}
\usage{
on_any_overlap(x)
}
\description{
Determine if timepoint x is on any overlap
}
|
94acb162c0ad54cc182743c108f59bdf2b39360c
|
926f533d414383458700d2305f5a76518357d490
|
/ExploratoryDataAnalysis/plot1.r
|
da4e74f08a117286a486092ca9bb79021ce53262
|
[] |
no_license
|
tjaensch/datasciencecoursera
|
aae3a57b9704cd730274bd878f4a10817172f090
|
a4bc40627f68e26d099db29773bf19989bd33308
|
refs/heads/master
| 2021-01-25T06:18:08.304031
| 2017-06-28T18:18:12
| 2017-06-28T18:18:12
| 93,550,120
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 354
|
r
|
plot1.r
|
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
graph <- aggregate(NEI[c("Emissions")], list(year = NEI$year), sum)
png('plot1.png', width=480, height=480)
plot(graph$year, graph$Emissions, type = "l",
main = "Total PM2.5 Emission in the US 1999-2008",
xlab = "Year", ylab = "Emissions")
dev.off()
|
456ac03967e4c130a73f50fa72bf9a398c832224
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/mrgsolve/examples/simargs.Rd.R
|
808b4ae8a73836be66e19636735bc8e5bd7f12db
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 225
|
r
|
simargs.Rd.R
|
library(mrgsolve)
### Name: simargs
### Title: Access or clear arguments for calls to mrgsim
### Aliases: simargs
### ** Examples
mod <- mrgsolve:::house()
mod %>% Req(CP,RESP) %>% carry_out(evid,WT,FLAG) %>% simargs
|
f0e395a024107bc0086ecbb761436e905bd9e468
|
a2f73bc570cd7fbda4598513da6749ed0acfaee3
|
/man/pangenome_matrix.Rd
|
d14b91ca95ccaefbdb0e734f3304285ff8423001
|
[] |
no_license
|
SWittouck/tidygenomes
|
2249cdb44f1187adff92cd1072721c1d2b4ca032
|
d4c0b0a7888ae012034eadbc3852fa334ab18e0a
|
refs/heads/master
| 2023-02-08T11:46:26.419095
| 2023-01-27T15:59:51
| 2023-01-27T15:59:51
| 126,355,208
| 1
| 2
| null | null | null | null |
UTF-8
|
R
| false
| true
| 546
|
rd
|
pangenome_matrix.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getters.R
\name{pangenome_matrix}
\alias{pangenome_matrix}
\title{Return a pangenome matrix}
\usage{
pangenome_matrix(tg)
}
\arguments{
\item{tg}{A tidygenomes object}
}
\value{
A matrix with the pangenome
}
\description{
This function returns the orthogroup content of the genomes in the form of a
matrix where the rows represent genomes, the columns represent orthogroups
and the cells contain the copy number or presence status of the orthogroups
in the genomes.
}
|
5e290ea4579dd1cd0694e152c1ad54f03fedbfdb
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/embryogrowth/examples/resultNest_newp.Rd.R
|
94568c654963bf9bf52a444d818e83012a6f6608
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 604
|
r
|
resultNest_newp.Rd.R
|
library(embryogrowth)
### Name: resultNest_newp
### Title: Fit using the nest database with anchored parameters
### Aliases: resultNest_newp
### Keywords: datasets
### ** Examples
## Not run:
##D library(embryogrowth)
##D data(nest)
##D formated <- FormatNests(nest)
##D newp <- GenerateAnchor(nests=formated, number.anchors=7)
##D pfixed <- c(rK=2.093313)
##D resultNest_newp <- searchR(parameters=newp, fixed.parameters=pfixed,
##D temperatures=formated, derivate=dydt.Gompertz, M0=1.7,
##D test=c(Mean=39.33, SD=1.92))
##D data(resultNest_newp)
##D plotR(resultNest_newp)
## End(Not run)
|
f820ff23b34346ee0f8e309c6f02d973d431259c
|
7b0b23a0ac16a7a89c082d0d64cd209846f1ccc7
|
/test/run-testthat-lidarlicious.R
|
c3bdb404a1db2495ea1a3ab765d0f80bfae08d26
|
[] |
no_license
|
dyerlab/LiDARlicious
|
de7bb1ae126cd0ebb6a1b250268c8453acb22c36
|
19974e85a28d69b4d8b631ccaf9063bdc71361da
|
refs/heads/master
| 2021-01-20T20:08:34.127177
| 2016-07-27T18:54:15
| 2016-07-27T18:54:15
| 64,320,237
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 63
|
r
|
run-testthat-lidarlicious.R
|
require(testthat)
require(raster)
test_package("lidarlicious")
|
bdf05411ddc4b3b12c84acb536e1c1a222dde794
|
e31ffbcde0cec170ab56cdbfdf643c3b76862f40
|
/Scripts/functions practice.R
|
4b489f1702de76c8981ac08d6f5500ad87666d07
|
[] |
no_license
|
tanka-mesh/Practice
|
d95f1828de861f8dd07c69253eadc141431298b4
|
29aaaa735878bd34cb822e79131752e94a233bc3
|
refs/heads/master
| 2022-12-10T07:38:57.471858
| 2020-09-14T05:10:44
| 2020-09-14T05:10:44
| 281,646,752
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 392
|
r
|
functions practice.R
|
add2 <- function(x,y){
x+y
}
above10 <- function(x){
x[x>10]
}
aboven <- function(x,n=10){
x[x>n]
}
columnmean <- function(y){
means <- vector(mode="numeric",length=ncol(y))
for(i in 1:ncol(y)){
means[i] <- mean(y[,i],na.rm=TRUE)
}
print(means)
}
##SCOPING EXAMPLE
make.power <- function(n){
pow <- function(x) {
x^n
}
pow
}
##comment to test commits
|
8030046b9cfdaacb5fa03946f10e5ffef2f2ae0c
|
25060ed2d7da42b136f564f692799d45c77e8869
|
/R/find.lineages.R
|
8385dfe820f05664121bd9dab9dd36b1eed2429a
|
[
"MIT"
] |
permissive
|
mjanuario/paleobuddy
|
4cd85935d9646437b28c679a32aef4540061670c
|
d38ab786b8802d34b7ce3de3b2172d8a1536b7c5
|
refs/heads/master
| 2022-12-16T15:32:23.641024
| 2020-09-18T22:30:45
| 2020-09-18T22:30:45
| 298,705,462
| 0
| 0
| null | 2020-09-26T00:18:52
| 2020-09-26T00:18:52
| null |
UTF-8
|
R
| false
| false
| 8,625
|
r
|
find.lineages.R
|
#' Separate a paleobuddy simulation into monophyletic clades
#'
#' Separates a \code{sim} object into \code{sim} objects each with a mother
#' species and its descendants. Returns by default the list of \code{sim} objects
#' descended from each species with an \code{NA} parent in the original input.
#' Allows for the user to input a vector of species to be the mother of each
#' resulting member of the returning list instead. Returns for each clade a vector
#' with the original identity of member species as well.
#'
#' @inheritParams make.phylo
#'
#' @param S A vector of species in \code{sim}. If not supplied, \code{S} will be
#' the starting species in the simulation (i.e. those for which the parent is
#' \code{NA}).
#'
#' @author Bruno Petrucci and Matheus Januario.
#'
#' @return A \code{list} object with (named) \code{sim} objects corresponding to
#' the clades descended from species in \code{S}. For each clade, an extra vector
#' \code{sim$LIN} is included so the user can identify the order of species in the
#' return with the order of species in the original simulation.
#'
#' @examples
#'
#' # we will start with examples where S are the starting species
#'
#' ###
#' # first, let us try a simulation with 3 clades,
#' sim <- bd.sim(n0 = 3, lambda = 0.1, mu = 0.1, tMax = 10, nFinal = c(20, Inf))
#'
#' # using the functions
#' clades <- find.lineages(sim)
#'
#' # set up for plotting side by syde
#' par(mfrow = c(1,length(clades)))
#'
#' # for each clade
#' for (i in 1:length(clades)) {
#' # change NA to 0 on the clade's TE
#' clades[[i]]$TE[clades[[i]]$EXTANT] = 0
#'
#' # if there is only one lineage in the clade
#' if (length(clades[[i]]$TE) < 2) {
#' # placeholder plot
#' plot(NA, xlim = c(-1, 1), ylim = c(-1, 1))
#' text("simulation with \n just one lineage", x = 0, y = 0.5, cex = 2)
#' }
#' # if it is a proper phylogeny
#' else {
#' if (requireNamespace("ape", quietly = TRUE)) {
#' plot <- ape::plot.phylo(
#' make.phylo(clades[[i]]),
#' main = "red: extinction events \n blue: speciation events");
#' ape::axisPhylo()
#' }
#'
#' # checking speciation times:
#' for (j in 2:length(clades[[i]]$TS)) {
#' # the subtraction is just to adjust the wt with the plot scale
#' lines(x = c(
#' sort(clades[[i]]$TS, decreasing = TRUE)[2] - clades[[i]]$TS[j],
#' sort(clades[[i]]$TS, decreasing = TRUE)[2] - clades[[i]]$TS[j]),
#' y = c(plot$y.lim[1], plot$y.lim[2]), lwd = 2, col = "blue")
#' }
#'
#' # checking extinction times:
#' for (j in 1:length(sim$TE)) {
#' # the subtraction is just to adjust the wt with the plot scale
#' lines(x = c(
#' sort(clades[[i]]$TS, decreasing = TRUE)[2] - clades[[i]]$TE[j],
#' sort(clades[[i]]$TS, decreasing = TRUE)[2] - clades[[i]]$TE[j]),
#' y = c(plot$y.lim[1], plot$y.lim[2]), lwd = 2, col = "red")
#' }
#' }
#' }
#'
#' ###
#' # it works with any number of clades, of course
#' sim <- bd.sim(n0 = 5, lambda = 0.1, mu = 0.08, tMax = 10, nFinal = c(20, Inf))
#'
#' # using the functions
#' clades <- find.lineages(sim)
#'
#' # set up for plotting side by syde
#' par(mfrow = c(1,length(clades)))
#'
#' # for each clade
#' for (i in 1:length(clades)) {
#' # change NA to 0 on the clade's TE
#' clades[[i]]$TE[clades[[i]]$EXTANT] = 0
#'
#' # if there is only one lineage in the clade
#' if (length(clades[[i]]$TE) < 2) {
#' # placeholder plot
#' plot(NA, xlim = c(-1, 1), ylim = c(-1, 1))
#' text("simulation with \n just one lineage", x = 0, y = 0.5, cex = 2)
#' }
#' # if it is a proper phylogeny
#' else {
#' if (requireNamespace("ape", quietly = TRUE)) {
#' plot <- ape::plot.phylo(
#' make.phylo(clades[[i]]),
#' main = "red: extinction events \n blue: speciation events");
#' ape::axisPhylo()
#' }
#'
#' # checking speciation times:
#' for (j in 2:length(clades[[i]]$TS)) {
#' # the subtraction is just to adjust the wt with the plot scale
#' lines(x = c(
#' sort(clades[[i]]$TS, decreasing = TRUE)[2] - clades[[i]]$TS[j],
#' sort(clades[[i]]$TS, decreasing = TRUE)[2] - clades[[i]]$TS[j]),
#' y = c(plot$y.lim[1], plot$y.lim[2]), lwd = 2, col = "blue")
#' }
#'
#' # checking extinction times:
#' for (j in 1:length(sim$TE)) {
#' # the subtraction is just to adjust the wt with the plot scale
#' lines(x = c(
#' sort(clades[[i]]$TS, decreasing = TRUE)[2] - clades[[i]]$TE[j],
#' sort(clades[[i]]$TS, decreasing = TRUE)[2] - clades[[i]]$TE[j]),
#' y = c(plot$y.lim[1], plot$y.lim[2]), lwd = 2, col = "red")
#' }
#' }
#' }
#'
#' ###
#' # including one clade
#' sim <- bd.sim(n0 = 1, lambda = 0.1, mu = 0.08, tMax = 10, nFinal = c(5, Inf))
#'
#' par(mfrow = c(1, 2))
#'
#' # plotting sim and find.lineages(sim) - should be equal
#' if (requireNamespace("ape", quietly = TRUE)) {
#' ape::plot.phylo(make.phylo(sim), main="original")
#' ape::axisPhylo()
#' ape::plot.phylo(make.phylo(find.lineages(sim)[[1]]),
#' main="after find.lineages()")
#' ape::axisPhylo()
#' }
#'
#' ###
#' # now let us check that when S does not contain a starting species, we still
#' # get correct subsets of the simulation
#' sim <- bd.sim(1, 0.1, 0.05, 40, nFinal = c(5, Inf))
#'
#' # making sure we have a couple of clades to explore
#' while ((length(which(sim$PAR == 1)) < 3) | (length(which(sim$PAR == 2)) < 3) |
#' (length(which(sim$PAR == 3)) < 3)) {
#' sim <- bd.sim(1, 0.2, 0.1, 10)
#' }
#'
#' if (requireNamespace("ape", quietly = TRUE)) {
#' # first we plot the clade started by 1
#' ape::plot.phylo(make.phylo(sim), main="original")
#'
#' # this should look the same
#' ape::plot.phylo(make.phylo(find.lineages(sim)[[1]]),
#' main="after find.lineages()")
#'
#' # and these should be part of the previous phylogenies
#' ape::plot.phylo(make.phylo(find.lineages(sim, c(2, 3))$clade_2),
#' main = "Clade_2")
#' ape::plot.phylo(make.phylo(find.lineages(sim, c(2, 3))$clade_3),
#' main = "Clade_3")
#' }
#'
#' @name find.lineages
#' @rdname find.lineages
#' @export
find.lineages <- function(sim, S = NULL) {
# if S is null, the user wants to find the lineages with the simulation's
# starting species as parents
if (is.null(S)) {
# by convention, species without parents in the output of the BD functions
# have parents set to NA
S = which(is.na(sim$PAR))
}
# create a final list
final <- list()
# find lineages for each species
for (s in S) {
# name the clade, and use the helper function below to find the species
# descended from s for each s in S
final[[paste0("clade_", s)]] = find.lineage(sim, s)
}
return(final)
}
###
# helper function for find.lineages
# does the exact same, but for one species
find.lineage <- function(sim, s) {
# if s is not on the simulation, we have a problem
if (s > length(sim$TE)) {
stop("This species is not on the simulation")
}
# lineage starts with a species
lin <- c(s)
# daughters of the first species of the lineage
dau <- which(sim$PAR == s)
# while species in the lineage have daughters
while (length(dau) > 0) {
# append the daughters to the lineage
lin <- c(lin, dau)
# find the daughters of the previous daughters
dau <- which(sim$PAR %in% dau)
}
# make vectors for the clade
TE <- sim$TE[lin]
TS <- sim$TS[lin]
PAR <- sim$PAR[lin]
EXTANT <- sim$EXTANT[lin]
# PAR here still follows the identifications on the original sim, so we need
# to rename the species
if (length(PAR)>1) {
# if the first species is not already unparented, it will be now
PAR[1] = NA
# first species of the clade (the one that generated the second) is 1
PAR[PAR==PAR[2]] = 1
# every other species follows the order in lin, to preserve the order
# of TE and TS
for (p in unique(PAR[PAR != 1 & !is.na(PAR)])) {
PAR[PAR == p] = which(lin == p)
}
}
# append it to a sim
# note the inclusion of lin - this way, a user can tell which species in sim1
# corresponded to which species in sim
sim1 <- list(TE = TE, TS = TS, PAR = PAR, EXTANT = EXTANT, LIN = lin)
return(sim1)
}
|
55940234596992f1b8a6c16ca037b2e24777ec41
|
6d269c8d8331bdc42eb57f68414ddec97380647d
|
/man/remap.Rd
|
6fe3e0a16bbd91a1b0847382577d8b4c633a512f
|
[] |
no_license
|
cran/mappings
|
95547d35647e86b6dfba65a94886088ae9eb542c
|
5471ab70d1a382b8ade8f210b26d3bd41cbb9d92
|
refs/heads/master
| 2023-06-04T18:21:05.915129
| 2021-06-23T05:30:05
| 2021-06-23T05:30:05
| 379,600,215
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 525
|
rd
|
remap.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mapping.R
\name{remap}
\alias{remap}
\title{Re-map a variable}
\usage{
remap(x, ...)
}
\arguments{
\item{x}{The values to apply the \code{\link{mapping}} to.}
\item{...}{Passed to \code{\link[=mapping]{mapping()}}.}
}
\value{
The values returned by calling the \code{\link{mapping}} function.
}
\description{
Apply a mapping to a vector directly. The mapping is temporary and not saved.
}
\examples{
x <- c("A", "B", "A")
remap(x, c(A=0, B=1))
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.