blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3fbb03e860c1fc8415300bd43be482c0facfe5a4 | f6c05a987a86dab871ba163006ddab4a3551d3ed | /ALCOVE/v0.2.3/server_transcriptomics.R | c0a5c3c110201606d02967a201fb085d26f33fdb | [] | no_license | acshetty/ALCOVE | 8b8107d7cc64f42f31552c1ba719347647a3911b | 9e863a6f858f1fe4edb64212ea97fdc7ad8be01a | refs/heads/master | 2021-05-21T20:25:31.502407 | 2020-05-07T15:11:28 | 2020-05-07T15:11:28 | 252,788,442 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,070 | r | server_transcriptomics.R | userinp <- reactive({input$user})
output$projectControl <- renderUI({
projects <- project_assign$Project.ID[project_assign$User.ID == userinp()]
memo <- project_assign$Pipeline.ID[project_assign$Pipeline.ID == projects]
selectInput("project", label = h3("Choose Project"),
choices = c(projects,"-"),
selected = "-")
})
project.select <- reactive({input$project})
output$data <- renderUI({
memo <- project_assign$Pipeline.ID[project_assign$User.ID == userinp() & project_assign$Project.ID == project.select()]
selectInput("pipeline", label = h3("Choose Pipeline"), choices = c(memo,"-"), selected = "-")
})
data.select <- reactive({input$pipeline})
output$user_select <- renderMenu({
selectInput("user_input", label = h5("Select User"),
choices = c(unique(project_assign$User.ID),"-"),
selected = "-")
})
user_inp_select <- reactive({input$user_input})
output$project_select <- renderMenu({
projects <- project_assign$Project.ID[project_assign$User.ID == user_inp_select()]
memo <- project_assign$Pipeline.ID[project_assign$Project.ID == projects]
selectInput("project_input", label = h5("Select Project"),
choices = c(projects,"-"),
selected = "-")
})
project_input_select <- reactive({input$project_input})
output$data_select <- renderMenu({
memo <- project_assign$Pipeline.ID[project_assign$User.ID == user_inp_select() & project_assign$Project.ID == project_input_select()]
selectInput("data_select", label = h5("Choose Dataset"), choices = c(memo,"-"), selected = "-")
})
output$project_option_select <- renderUI({selectInput("select", label = h3("Select Project"),
choices = mixedsort(fastqc_dir_names))
})
base_path = reactive({normalizePath(file.path(paste0(project_assign$Repository.Root[project_assign$User.ID == user_inp_select() & project_assign$Project.ID == project_input_select() & project_assign$Pipeline.ID == data.select()]), paste0("output_repository")))})
|
0661fe47aee3616268b50146a839d24e9fca76e3 | 3bd2beaf0a507430ba6d581a855ffcd8df13cf43 | /Imputing_Missing_Data.R | 229d157863f8e2fe5fb1c9e47b06f778decdf72f | [] | no_license | MeenakshiMadan/Yelp-Data-Analysis | dabebb1c5432c7ac30c00374c97838ab19020b32 | 9f53007c0d63d6e4e2c49cd737badcdea38cd154 | refs/heads/master | 2020-05-01T21:00:04.616224 | 2019-06-10T19:40:21 | 2019-06-10T19:40:21 | 177,665,057 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,980 | r | Imputing_Missing_Data.R | ####IMporting the packages
library(mice)
library(VIM)
library(lattice)
library(ggplot2)
library(dplyr)
library(caret)
library("randomForest")
library(lubridate)
library(LiblineaR)
library(pROC)
####Reading the file ######
Data_missing <- read.csv("Untitled Folder/combined_yelp_new.csv",header=TRUE, sep=",")
View(Data_missing)
###Removing the records where restaurants is not open
Data_missing <- Data_missing[Data_missing$is_open == "1" ,]
colnames(Data_missing)
####Finding Missing Data in Columns
percentage_null<-NULL
for(i in 1:51){
percentage_null[i]<-(sum(is.na(Data_missing[,i]))/58567)*100
}
Column_Names<-colnames(Data_missing)
#xyz<-xyz[-c(which(abc<="50.5"))]
#abc<-abc[-c(which(abc<="50.5"))]
combined_names<-as.data.frame(cbind(Column_Names,percentage_null))
combined_names
####THere are few columns which have 0 missing values and some have more than 99% missing values
###here i will be dropping all the columns which have more than 50% missing data , as the
##mice package will not be able to do justtice imputing the data
Data_yelp<-Data_missing[-c(which(percentage_null>="50"))]
colnames(Data_yelp)
data_yelp<-as.data.frame(Data_yelp)
#####Also dropping the columns which have categorical data
###or columns which will not help in prediction are removed
data_yelp<-select (data_yelp,-c(RestaurantsCounterService,Open24Hours,AgesAllowed,Smoking,BYOB,Corkage,BestNights
,RestaurantsAttire,Alcohol,WiFi,NoiseLevel,Categories,Name
,Business_id,Categories,Latitude,longitude,is_open,state,city,postal_code,BYOBCorkage
,Open24Hours))
colnames(data_yelp)
#####Dividing the dataset into parts based on the type of data in columns
data_part<-data_yelp[1:2]
data_part1<-data_yelp[3:10]
data_part2<-data_yelp[11:18]
data_part3<-data_yelp[19:23]
###### imputing missing data
###converting the columns with no missing values to data frame
data_part<-as.data.frame(data_part)
#####imputation of 1st part and then picking the final result
tempData1 <- mice(data_part1,m=3,maxit=5,meth='pmm',seed=100)
summary(tempData1)
complete_data1<-complete(tempData1,1)
summary(complete_data1)
sum(is.na(complete_data1[,1:8]))
complete_data1<-as.data.frame(complete_data1)
#####imputation of 2nd part and then picking the final result
tempData2 <- mice(data_part2,m=3,maxit=5,meth='pmm',seed=100)
summary(tempData2)
complete_data2<-complete(tempData2,1)
summary(complete_data2)
sum(is.na(complete_data2[,1:8]))
complete_data2<-as.data.frame(complete_data2)
#####imputation of 3rd part and then picking the final result
tempData3 <- mice(data_part3,m=3,maxit=5,meth='pmm',seed=100)
summary(tempData3)
complete_data3<-complete(tempData3,1)
summary(complete_data3)
sum(is.na(complete_data3[,1:4]))
complete_data3<-as.data.frame(complete_data3)
####combining all the parts from imputations and converting to data frame
data_no_missing_values<-cbind(data_part,complete_data1,complete_data2,complete_data3)
data_no_missing_values<-as.data.frame(data_no_missing_values)
####checking the null values
sum(is.na(data_no_missing_values[,1:23]))
#####Applying the transformations
new_df<-as.data.frame(data_no_missing_values)
# making the star rating into binary classifcation as a good or bad resturant
new_df$Resturant_Type[new_df$stars>=3]<-"Good"
new_df$Resturant_Type[new_df$stars<3]<-"Bad"
# Character columns to factors to be used for random forest
new_df[sapply(new_df, is.character)] <- lapply(new_df[sapply(new_df, is.character)], as.factor)
new_df$GoodForKids<-as.factor(new_df$GoodForKids)
new_df$GoodForDessert<-as.factor(new_df$GoodForDessert)
new_df$GoodForLatenight<-as.factor(new_df$GoodForLatenight)
new_df$GoodForDinner<-as.factor(new_df$GoodForDinner)
new_df$GoodForBrunch<-as.factor(new_df$GoodForBrunch)
new_df$GoodForBreakfast<-as.factor(new_df$GoodForBreakfast)
new_df$GarageParking<-as.factor(new_df$GarageParking)
new_df$StreetParking<-as.factor(new_df$StreetParking)
new_df$Validated<-as.factor(new_df$Validated)
new_df$LotParking<-as.factor(new_df$LotParking)
new_df$ValetParking<-as.factor(new_df$ValetParking)
new_df$Caters<-as.factor(new_df$Caters)
new_df$RestaurantsTableService<-as.factor(new_df$RestaurantsTableService)
new_df$OutdoorSeating<-as.factor(new_df$OutdoorSeating)
new_df$BikeParking<-as.factor(new_df$BikeParking)
new_df$HasTV<-as.factor(new_df$HasTV)
new_df$RestaurantsGoodForGroups<-as.factor(new_df$RestaurantsGoodForGroups)
new_df$RestaurantsDelivery<-as.factor(new_df$RestaurantsDelivery)
new_df$BusinessAcceptsCreditCards<-as.factor(new_df$BusinessAcceptsCreditCards)
new_df$Resturant_Type<-as.factor(new_df$Resturant_Type)
####removing the star value
new_df<-select (new_df,-c(stars))
##new_df<-select (new_df,-c(Validated))
##new_df<-select (new_df,-c(BusinessAcceptsCreditCards))
##new_df<-select (new_df,-c(OutdoorSeating))
# spliting the data into training and test set
# Training Set : Validation Set = 70 : 30 (random)
set.seed(100)
train <- sample(nrow(new_df), 0.8*nrow(new_df), replace = FALSE)
TrainingSet <- new_df[train,]
ValidationSet <- new_df[-train,]
summary(TrainingSet)
summary(ValidationSet)
###########Random Forest Model
model_rf<- randomForest(Resturant_Type~ . , data=TrainingSet, importance=TRUE, ntree=100,prOximity=TRUE,
na.action=na.roughfix)
####printing the details for random forest for the first run
model_rf
varImpPlot(model_rf,main='Variable ImportancePlot :Model1',pch=16)
#Printing the variable importance plot
imp_1<-importance(model_rf)
library('e1071')
predict_rf_model1 <- predict(model_rf, ValidationSet, type = "class")
confusion_matrix1 <- confusionMatrix(predict_rf_model1,ValidationSet$Resturant_Type)
confusion_matrix1
varImp(model_rf)
####removing the review count and training the data
new_df1<-select (new_df1,-c(review_count))
# spliting the data into training and test set
# Training Set : Validation Set = 70 : 30 (random)
set.seed(100)
train1 <- sample(nrow(new_df1), 0.8*nrow(new_df1), replace = FALSE)
TrainingSet1 <- new_df[train1,]
ValidationSet1 <- new_df[-train1,]
summary(TrainingSet1)
summary(ValidationSet1)
###########Random Forest Model
model_rf2<- randomForest(Resturant_Type~ . , data=TrainingSet1, importance=TRUE, ntree=300,prOximity=TRUE,
na.action=na.roughfix)
####printing the details for random forest for the first run
model_rf2
varImpPlot(model_rf2,main='Variable ImportancePlot :Model2',pch=16)
#Printing the variable importance plot
imp_2<-importance(model_rf2)
library('e1071')
predict_rf_model2 <- predict(model_rf2, ValidationSet1, type = "class")
confusion_matrix2 <- confusionMatrix(predict_rf_model2,ValidationSet1$Resturant_Type)
confusion_matrix2
varImp(model_rf2)
####Applying KNN##################################
knn_df<-as.data.frame(new_df)
colnames(knn_df)
knn_df$GoodForKids<-as.factor(knn_df$GoodForKids)
knn_df$GoodForDessert<-as.factor(knn_df$GoodForDessert)
knn_df$GoodForLatenight<-as.factor(knn_df$GoodForLatenight)
knn_df$GoodForDinner<-as.factor(knn_df$GoodForDinner)
knn_df$GoodForBrunch<-as.factor(knn_df$GoodForBrunch)
knn_df$GoodForBreakfast<-as.factor(knn_df$GoodForBreakfast)
knn_df$GarageParking<-as.factor(knn_df$GarageParking)
knn_df$StreetParking<-as.factor(knn_df$StreetParking)
knn_df$LotParking<-as.factor(knn_df$LotParking)
knn_df$ValetParking<-as.factor(knn_df$ValetParking)
knn_df$Caters<-as.factor(knn_df$Caters)
knn_df$RestaurantsTableService<-as.factor(knn_df$RestaurantsTableService)
knn_df$OutdoorSeating<-as.factor(knn_df$OutdoorSeating)
knn_df$BikeParking<-as.factor(knn_df$BikeParking)
knn_df$HasTV<-as.factor(knn_df$HasTV)
knn_df$RestaurantsGoodForGroups<-as.factor(knn_df$RestaurantsGoodForGroups)
knn_df$RestaurantsDelivery<-as.factor(knn_df$RestaurantsDelivery)
knn_df$BusinessAcceptsCreditCards<-as.factor(knn_df$BusinessAcceptsCreditCards)
knn_df$Resturant_Type<-as.factor(knn_df$Resturant_Type)
####Removing the unwanted columns
knn_df<-select (knn_df,-c(Validated))
intrain <- createDataPartition(y = knn_df$Resturant_Type, p =0.80, list = FALSE)
trainx <- knn_df[intrain,]
testx <- knn_df[-intrain,]
training.x<- trainx[,-22]
training.y <- trainx[,22]
testing.x<- testx[,-22]
testing.y <- testx[,22]
training.y = as.factor(training.y)
testing.y = as.factor(testing.y)
########## KNN model ###########################
set.seed(130)
ctrl<- trainControl(method = "repeatedcv", number = 5, repeats = 3)
knnFit <- train(x = training.x , y = training.y,
method = "knn",
preProc = c("center", "scale"),
tuneLength = 20,
trControl = ctrl)
knnFit
####### KNN performance #############
knn.prediction<- predict(knnFit, testing.x)
knn.results <- data.frame(obs = testing.y, pred = knn.prediction)
knn.summary<-defaultSummary(knn.results)
knn.summary
confusionMatrix(knn.prediction,testing.y, positive = "Good")
knn.varImp <-varImp(knnFit)
plot(knn.varImp)
|
b019f2565cc24b4cf2f78a763285ff664657c250 | 7a7a325a0f01d92a41e7c35278e7f938452fed84 | /R/dataLBI.R | 3a124511bdb9769d2c3f6a9fa74c710c7a44ca2b | [] | no_license | IMPRESSPROJECT/Rfishpop | cca94973e345a7841634b5ab4a7ba5fdebf42e24 | c4055e61b7126d9ab0b4264855f39584405a8a16 | refs/heads/master | 2022-08-30T10:35:13.002516 | 2022-08-26T16:51:16 | 2022-08-26T16:51:16 | 252,695,701 | 0 | 5 | null | 2020-11-05T10:49:10 | 2020-04-03T10:04:49 | R | UTF-8 | R | false | false | 4,205 | r | dataLBI.R | #' @title Data for Length Based Indicators (LBI)
#'
#' @description The function provides required information for computing Length Based Indicators: Length distributions of catches and the corresponding average weight per length.
#'
#' @param Pop.Mod A list containing the components returned by Population.Modeling function (main function).
#' @param CV The coefficient of variation associated to the log-normal distribution used in Distribution.length function (see Details of such function).
#' @param RF.value The number of values generated for each age (given a year and an iteration) from the log-normal distribution used in Distribution.length function (see Details of such function). By default RF.value=1000.
#' @details The function reports the length distributions of catches for each year and iteration in our Pop.Mod object. Furthermore, the corresponding average weight per length (for each year and iteration) is also provided. The catches length distribution is computed using the Distribution.length function of this package.
#'
#'
#' @return A list containing the following components.\itemize{
#' \item{length:} the length distributions of catches for each year and iteration.
#' \item{weight:} the average weight per length for each year and iteration.
#'}
#' @author
#' \itemize{
#' \item{Marta Cousido-Rocha}
#' \item{Santiago Cerviño López}
#' }
#' @examples
#'
#'# The first step is to simulate the population.
#' ctrPop<-list(years=seq(1980,2020,1),niter=1,N0=15000,ages=0:15,minFage=2,
#' maxFage=5,tc=0.5,seed=NULL)
#' number_ages<-length(ctrPop$ages);number_years<-length(ctrPop$years)
#' Mvec=c(1,0.6,0.5,0.4,0.35,0.35,0.3,rep(0.3,9))
#' M<-matrix(rep(Mvec,number_years),ncol = number_years)
#' colnames(M)<-ctrPop$years
#' rownames(M)<-ctrPop$ages
#' ctrBio<-list(M=M,CV_M=0, L_inf=20, t0=0, k=0.3, CV_L=0, CV_LC=0, a=6*10^(-6), b=3,
#' a50_Mat=4, ad_Mat=-0.2,CV_Mat=0)
#'# Logistic selectivity
#' ctrSEL<-list(type="Logistic", par=list(a50_Sel=2.3, ad_Sel=-0.2),CV_SEL=0)
#' f=matrix(rep(0.5,number_years),ncol=number_years,nrow=1,byrow=TRUE)
#' ctrFish<-list(f=f,ctrSEL=ctrSEL)
#' a_BH=15000; b_BH=50; CV_REC_BH=0
#' SR<-list(type="BH",par=c(a_BH,b_BH,CV_REC_BH))
#' Pop.Mod<-Population.Modeling(ctrPop=ctrPop,ctrBio=ctrBio,ctrFish=ctrFish,SR=SR)
#'
#' # Then the function is used to obtain the length distributions and average
#' # weight per length.
#' # UNCOMMENT THE FOLLOWING LINES
#' #resul=Data.to.LBI(Pop.Mod,CV=0.2)
#' #freq=resul$length[[1]]
#' #wal=resul$weight[[1]]
#'
#' # Furthermore, than the data provided by Data.to.LBI
#' # function the LBI method also needs some life history parameters.
#' # For example:
#'
#' L_inf=Pop.Mod$Info$ctrBio$L_inf # von Bertalanffy asymptotic length (Linf)
#' k=Pop.Mod$Info$ctrBio$k
#' t0=Pop.Mod$Info$ctrBio$t0
#' x50=Pop.Mod$Info$ctrBio$a50_Mat
#' L50=Length_VB(L_inf, k, x50, t0) # Length at 50% maturity (L50)
#' # Since M is a constant through the year but age dependent,
#' # the natural mortality divided by von Bertalanffy k coefficient
#' # is computed as follows, using the mean of the age vector.
#' M.vec=Pop.Mod$Matrices$M[,1,1]
#' MK <-mean(M.vec)/k
#'
#' # Finally, running the following line of code after load the
#' # required code LBI are computed.
#' #LBI=lb_ind(data=freq,binwidth=3,linf=L_inf,lmat=L50,mk_ratio=MK,weight=wal)
#' @export
Data.to.LBI=function(Pop.Mod,CV, RF.value=1000){
data.list=list()
weight.list=list()
a=Pop.Mod$Info$ctrBio$a; b=Pop.Mod$Info$ctrBio$b
niter=dim(Pop.Mod$Matrices$N)[3]
number_years=dim(Pop.Mod$Matrices$N)[2]
if(RF.value!=1000){
L.D<-Distribution.length(Pop.Mod,CV=CV,Type="LengthC", RF.value=RF.value)}
L.D<-Distribution.length(Pop.Mod,CV=CV,Type="LengthC")
for (i in 1:niter){
freq=(cbind(as.numeric(rownames(L.D))+0.5,L.D[,,i]))
freq=as.data.frame(freq)
years=as.numeric(colnames(Pop.Mod$Matrices$N))
colnames(freq)=c("MeanLength",years)
wal=freq
wal[,2:(number_years+1)]<-rep(a*freq[,1]^b,number_years)
data.list[[i]]=freq
weight.list[[i]]=wal}
return(list(length=data.list,weight=weight.list))
}
|
a779c56041c63adefa9726959ce6a9be3d5167dc | 5551a151f01991ed9f7acd42e6c0b1df3a4f7820 | /server.R | cf66ad2ff1627ff1fac87d48fa20994547530176 | [] | no_license | mbayekebe/ddpshiny | c5c1db86d08654a08499d2b710601fb194ec8029 | ec882f50e822d29f20a58b7380c516440f60c7ee | refs/heads/master | 2020-05-30T18:00:45.982929 | 2014-09-12T00:46:48 | 2014-09-12T00:46:48 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 963 | r | server.R | ## server.R
library(shiny)
library(caret)
titanicds <- read.csv("data/titanicdatc.txt", sep=",")
titanicds$Class <- as.factor(titanicds$Class)
titanicds$Age <- as.factor(titanicds$Age)
titanicds$Sex <- as.factor(titanicds$Sex)
inTrain <- createDataPartition(y=titanicds$Survived, p=0.7, list=FALSE)
training <- titanicds[inTrain,]
testing <- titanicds[-inTrain,]
set.seed(125)
modFit <- glm(Survived ~ Class+Age+Sex,data=training,family=binomial(logit))
shinyServer(function(input, output) {
output$Outcome <- renderText({
switch(input$class,"0"="0","1"="1","2"="2","3"="3")
switch(input$age,"1"="1","0"="0")
switch(input$sex,"1"="1","0"="0")
freshdata <- data.frame(Class=input$class,Age=input$age,Sex=input$sex)
freshPred <- predict(modFit,freshdata,type="response")
if (freshPred > 0.5) {
Outcome="survived"
} else {
Outcome="did not survive"
}
paste("Traveler", Outcome)
})
}) |
9d3cce0ba8973a6935c7a72d8655ad912cbc25dd | 412cdc99366e34a54c9441df5995a58bf3e59a36 | /code/scraper.R | c1fed63e8704bd6832b3d711e207c79ba4295e16 | [
"MIT"
] | permissive | Hutchins-Center/diversity-economics | 8c03a0a4236bd3f4ad1e42e1c74fdc50431d6c12 | b4e86e8f017fc626e15b6e5d0eaf493363b347b5 | refs/heads/master | 2023-06-11T02:04:53.986804 | 2021-07-02T15:54:16 | 2021-07-02T15:54:16 | 382,394,844 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,363 | r | scraper.R | library(RSelenium)
#docker pull selenium/standalone-chrome
#docker run -d -v ~/Documents/Projects/diversity-economics:/home/seluser/Downloads -p 4445:4444 selenium/standalone-chrome
eCaps <- list(
chromeOptions =
list(
prefs = list("profile.default_content_settings.popups" = 0L,
"download.prompt_for_download" = FALSE,
"download.default_directory" = "home/seluser/Downloads")
)
)
remDr <- RSelenium::remoteDriver(remoteServerAddr = 'localhost',
port= 4445L,
extraCapabilities = eCaps,
browserName = 'chrome')
remDr$open(silent = TRUE)
# navigate to the website of interest
remDr$navigate("https://www.fedscope.opm.gov/ibmcognos/bi/v1/disp?b_action=powerPlayService&m_encoding=UTF-8&BZ=1AAABnhOp%7EWx42oVOy26DMBD8GS9pD43Wy%7EvAAWyjILWQBu4VIU4UFTAiXPL3FXBI20tmtNJodkYaqyy2ZVUcVCaj22RGnckXILoqEh73Hc%7EhInFs3w9UwGPuJwqlH9rkAdGrNXdVfBC7fVztIqC0Mf2k_wkoPZv2pEdwE3CwrzsNttzs6_a7vujbl_qG1tw73U8bcCVQOqyfv%7EFHCgjfgLDUw6S7ox6BcOZctmQptqLIcyWqrMjz_ENFT4tW8hmdERlHRM6RMYbMRUbIZjIWX3Tf3IEQ6ASEcdsChu_mqaer6f%7EZDCgAshFIc6AjULga%7EGGwBUD2HP8FvnBR65jl1gkrfgA1H2oj")
# confirm you got there
remDr$getTitle()
remDr$screenshot(display = TRUE)
webElement <- remDr$findElement("class name", "dimbar")
<frame name="ActionPane" src="/ibmcognos/bi/v1/disp?b_action=ppfetchfile.absolute&pps_tracking=5000;dd2qM8yqw8l4q4jj9jslMqj4wsjjMq8l4MGM2Gvd;MzNEMzdFQjMtMjY5QS00QjBFLUJBQTEtNDA4RkY0MjczMUZF;11412;conversationComplete&m_encoding=UTF-8&FN2=MzA4MURGMDYwOTJBODY0ODg2RjcwRDAxMDcwNkEwODFEMTMwODFDRTAyMDEwMDMwODFDODA2MDAzMDFEMDYwOTYwODY0ODAxNjUwMzA0MDEwMjA0MTA1MDMyNUQzMDZBMjYzMTM0NDQzNjQ3NTQ3MjNENDIzQjgwODFBNEFGQTU3NDYzMjU0Q0Y3QUJBMzI1NzU0NTFFMzJDM0E0NTk1RkQ5MzQ0RTVCM0FFNDU4NDM5QkQxNzJFQTYyRkU5RDU0NzlGRkJGN0NGOEE0MEFDMkY0ODM2RDJCQzg2NzAxQUQxQ0RBREEzRjAzNkI0REE2QTJBRjJCNkUyNTA0N0I2NzFDMzk5RThCNzM5QzM1NjlCQjQzMzZCOENBMzcwOUJDQTFEREQ2M0M1Q0YyMzJENzlGMDM2QTY1ODQ2QTAyOUNCNEJEM0IzNDY5MUQyRkQ4NkYyMzg0RDI5MDQ4RUNDRTZCRTRCQzlBM0IzOTNGNzkzRDAxMDEyMDIzQzg2NzEwMTkyRjEwRTFBRDA5Qjg5RDZENDAxQzQ1QjI2RjIzODBFOTIzM0UyNUQwQ0ZCOUVCRjFFMUUwN0E0ODU0Njg1RkU5MjVzcGxpdHRlcjAyMDcwMDAwMDA1MzQ4NDEyRDMyMzUzNkNBQkY0ODM4RUIyNDA1RUM3Q0UyMzZCMUI0MjcxQzFBNUJCMTQ3REI3N0Q4QzE0MjhENjY1QTgzNEUzMkEwRDk=" marginwidth="0" marginheight="0" scrolling="auto" onresize="actPaneResize(event);">
remDr$navigate("/ibmcognos/bi/v1/disp?b_action=ppfetchfile.absolute&pps_tracking=5000;dd2qM8yqw8l4q4jj9jslMqj4wsjjMq8l4MGM2Gvd;MzNEMzdFQjMtMjY5QS00QjBFLUJBQTEtNDA4RkY0MjczMUZF;11412;conversationComplete&m_encoding=UTF-8&FN2=MzA4MURGMDYwOTJBODY0ODg2RjcwRDAxMDcwNkEwODFEMTMwODFDRTAyMDEwMDMwODFDODA2MDAzMDFEMDYwOTYwODY0ODAxNjUwMzA0MDEwMjA0MTA1MDMyNUQzMDZBMjYzMTM0NDQzNjQ3NTQ3MjNENDIzQjgwODFBNEFGQTU3NDYzMjU0Q0Y3QUJBMzI1NzU0NTFFMzJDM0E0NTk1RkQ5MzQ0RTVCM0FFNDU4NDM5QkQxNzJFQTYyRkU5RDU0NzlGRkJGN0NGOEE0MEFDMkY0ODM2RDJCQzg2NzAxQUQxQ0RBREEzRjAzNkI0REE2QTJBRjJCNkUyNTA0N0I2NzFDMzk5RThCNzM5QzM1NjlCQjQzMzZCOENBMzcwOUJDQTFEREQ2M0M1Q0YyMzJENzlGMDM2QTY1ODQ2QTAyOUNCNEJEM0IzNDY5MUQyRkQ4NkYyMzg0RDI5MDQ4RUNDRTZCRTRCQzlBM0IzOTNGNzkzRDAxMDEyMDIzQzg2NzEwMTkyRjEwRTFBRDA5Qjg5RDZENDAxQzQ1QjI2RjIzODBFOTIzM0UyNUQwQ0ZCOUVCRjFFMUUwN0E0ODU0Njg1RkU5MjVzcGxpdHRlcjAyMDcwMDAwMDA1MzQ4NDEyRDMyMzUzNkNBQkY0ODM4RUIyNDA1RUM3Q0UyMzZCMUI0MjcxQzFBNUJCMTQ3REI3N0Q4QzE0MjhENjY1QTgzNEUzMkEwRDk=")
remDr$getTitle()
|
1a292e981badeca6df08104a22431c3c4d5093fc | 89aa5317c8a5e7add803e055e2d527ae7189e653 | /man/logspace.Rd | d990c532bfe92f944831e58b1dbb1f8c72d8f667 | [] | no_license | cran/rdetools | af341a0a3a277d1cdba4013920b44f20735f2d73 | a13755e0f70b341c4f8841f80d980e2986e5b75e | refs/heads/master | 2020-06-14T07:18:27.634094 | 2008-09-03T00:00:00 | 2008-09-03T00:00:00 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 736 | rd | logspace.Rd | \name{logspace}
\alias{logspace}
\title{Logarithmically spaced sequence generation}
\description{
Function generates a logarithmically spaced sequence of \code{n} values between decades
\eqn{10^l} and \eqn{10^u}.
}
\usage{
logspace(l, u, n)
}
\arguments{
\item{l}{\eqn{10^l} will be the lower value to start from}
\item{u}{\eqn{10^u} will be the upper value to end with}
\item{n}{number of values to generate}
}
\value{
Logarithmically spaced sequence of length \code{n} between \eqn{10^l} and \eqn{10^u}.
}
\author{Jan Saputra Mueller}
\seealso{\code{\link{seq}}, \code{\link{selectmodel}}}
\examples{
## generate 100 logarithmically spaced values between 10^(-3) and 10^3
logspace(-3, 3, 100)
}
\keyword{models}
|
f675f170e4c3dfbdde0399ec1f167025089b5d57 | 07b57751b3d92333845b178a273c195c26769907 | /SecondRun.R | 96530a416d1362b864f13cc7e9eaa42b41f6ce79 | [] | no_license | lshep/BuildReportDatabase | 20741893accfbcc4691d2ba335f9cc9f591b8cda | b25e8470cc860bf43b6f2d51ee3e7ac82bc96e55 | refs/heads/master | 2023-01-11T17:11:29.960339 | 2020-11-13T17:11:23 | 2020-11-13T17:11:23 | 290,224,968 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 13,687 | r | SecondRun.R | ######################################################################################
#
#On malbec2 in ~biocbuild/public_html/BBS/3.11/bioc:
#
# - meat-index.dcf: same list of packages as in the manifest EXCEPT
#that packages that have a DESCRIPTION file that is too broken for the
#builds are separated and put in skipped-index.dcf
#
# - skipped-index.dcf: list of packages from manifest that have a
#DESCRIPTION file that is too broken for the builds.
#
# - gitlog: folder containing 1 little dcf file per package in the
#manifest e.g. gitlog/git-log-BiocGenerics.dcf:
#
#
# The OS, Platform, and R version used on each node can be extracted from
#~biocbuild/public_html/BBS/3.11/bioc/nodes/<node_name>/NodeInfo/R-sessionInfo.txt
if(F){
cd /home/shepherd/Projects/BuildReportDatabase/TempCopyOfFiles
scp biocbuild@malbec1.bioconductor.org:/home/biocbuild/public_html/BBS/3.12/bioc/*.dcf 3.12/bioc/
scp -r biocbuild@malbec1.bioconductor.org:/home/biocbuild/public_html/BBS/3.12/bioc/gitlog 3.12/bioc/
scp -r biocbuild@malbec1.bioconductor.org:/home/biocbuild/public_html/BBS/3.12/bioc/nodes/malbec1/NodeInfo 3.12/bioc/nodes/malbec1/
scp -r biocbuild@malbec1.bioconductor.org:/home/biocbuild/public_html/BBS/3.12/bioc/nodes/tokay1/NodeInfo 3.12/bioc/nodes/tokay1/
scp -r biocbuild@malbec1.bioconductor.org:/home/biocbuild/public_html/BBS/3.12/bioc/nodes/merida1/NodeInfo 3.12/bioc/nodes/merida1/
scp -r biocbuild@malbec1.bioconductor.org:/home/biocbuild/public_html/BBS/3.12/bioc/nodes/nebbiolo1/NodeInfo 3.12/bioc/nodes/nebbiolo1/
scp biocbuild@malbec2.bioconductor.org:/home/biocbuild/public_html/BBS/3.11/bioc/*.dcf 3.11/bioc/
scp -r biocbuild@malbec2.bioconductor.org:/home/biocbuild/public_html/BBS/3.11/bioc/gitlog 3.11/bioc/
scp -r biocbuild@malbec2.bioconductor.org:/home/biocbuild/public_html/BBS/3.11/bioc/nodes/malbec2/NodeInfo 3.11/bioc/nodes/malbec2/
scp -r biocbuild@malbec2.bioconductor.org:/home/biocbuild/public_html/BBS/3.11/bioc/nodes/tokay2/NodeInfo 3.11/bioc/nodes/tokay2/
scp -r biocbuild@malbec2.bioconductor.org:/home/biocbuild/public_html/BBS/3.11/bioc/nodes/machv2/NodeInfo 3.11/bioc/nodes/machv2/
}
######################################################################################
library(tidyr)
library(dplyr)
library(jsonlite)
library(yaml)
library(httr)
library(RMariaDB)
library(DBI)
library(dplyr)
library(plyr)
config = read_yaml("https://master.bioconductor.org/config.yaml")
versions <- c(config$release_version, config$devel_version)
# See the following for mariadb setup
# https://github.com/r-dbi/RMariaDB
con <- dbConnect(RMariaDB::MariaDB(), group = "my-db")
## ver = versions[1]
for(ver in versions){
message("working on release: ", ver)
# eventually loop over repos but for now concentrate on one
repos <- c("bioc", "data-experiment", "workflows") #,"data-annotation")
# for(repo in repos) {
repo="bioc"
message("working on repo: ", repo)
#repo = repos[1]
file <- paste0("https://master.bioconductor.org/checkResults/",ver,"/",
repo, "-LATEST/STATUS_DB.txt")
###############################################
# Check report date
# If found, the build report didn't generate
# skip to not duplicate entries
###############################################
date_report <- cache_info(HEAD(file))[["modified"]]
# See if failed to build report based on date
qry <- paste0("SELECT * FROM reports WHERE date='",date_report,"';")
res <- dbSendQuery(con, qry)
tbl_reports<- dbFetch(res)
dbClearResult(res)
if(nrow(tbl_reports) != 0){
message("Duplicate Report. Build Report Failed to post")
next
}else{
repo_type <- ifelse(repo == "bioc","software", repo)
dbAppendTable(con, "reports", data.frame(date=date_report, repo_type=repo_type))
qry <- paste0("SELECT * FROM reports WHERE date='",date_report,"';")
res <- dbSendQuery(con, qry)
tbl_reports<- dbFetch(res)
dbClearResult(res)
}
###############################################
# Read Status file
###############################################
tbl <- read.table(file, comment.char="")
names(tbl) = c("builder", "status")
status <- tbl %>% separate(builder, c("package", "builder", "stage"), "#")
status$stage <- gsub(status$stage, pattern=":", replacement="")
status$status[which(is.na(status$status))] = "NA"
# git problems as defined by skipped, ERROR, TIMEOUT
#idx <- which(tolower(status[,"status"]) %in% tolower(c("skipped","ERROR",
# "TIMEOUT")))
#status <- status[idx,]
gitcommitid <- rep("", dim(status)[1])
gitcommitdate <- rep("", dim(status)[1])
for(i in seq_len(dim(status)[1])){
pkg <- status[i, "package"]
dcf <-
read.dcf(paste0("/home/shepherd/Projects/BuildReportDatabase/TempCopyOfFiles/", ver, "/bioc/gitlog/git-log-", pkg,".dcf"))
gitcommitid[i] <- dcf[,"Last Commit"]
gitcommitdate[i] <- dcf[,"Last Changed Date"]
}
## Is this faster?
## pkgs <- unique(status[,"package"])
## for(pkg in pkgs){
##
## dcf <-
## read.dcf(paste0("/home/shepherd/Projects/BuildReportDatabase/TempCopyOfFiles/", ver, "/bioc/gitlog/git-log-", pkg,".dcf"))
## gitcommitid[which(status[,"package"]==pkg)] <- dcf[,"Last Commit"]
## gitcommitdate[which(status[,"package"]==pkg)] <- dcf[,"Last Changed Date"]
##
## }
status <- cbind(status, git_commit_id=gitcommitid, git_commit_date=gitcommitdate)
###############################################
# Check today's builders
# If needed add to builders database table
# Else retrieve builder_id
###############################################
ActiveBuilders <- system2("ls", args= paste0("/home/shepherd/Projects/BuildReportDatabase/TempCopyOfFiles/", ver, "/bioc/nodes"), stdout=TRUE)
df <- matrix("", nrow=length(ActiveBuilders), ncol=4)
rownames(df) <- ActiveBuilders
colnames(df) <- c("r_version", "platform", "os", "bioc_version")
for(i in ActiveBuilders){
text <-
readLines(paste0("/home/shepherd/Projects/BuildReportDatabase/TempCopyOfFiles/", ver, "/bioc/nodes/",i,"/NodeInfo/R-sessionInfo.txt"),
n=3)
df[i,] <- c(trimws(gsub(pattern="Platform:|Running under:", replacement="", text)), ver)
}
res <- dbSendQuery(con, "SELECT * FROM builders")
builders <- dbFetch(res)
dbClearResult(res)
# verify there is an entry in the database and get builder_id for df
builder_id <- rep(NA_integer_, nrow(df))
found <- match_df(builders, as.data.frame(df))
builder_id[match(unname(unlist(found["builder"])), rownames(df))] = found$builder_id
# Update builders table if needed
if (nrow(df) != nrow(found)){
if(nrow(found) == 0){
not_fnd <- cbind(as.data.frame(df), builder=rownames(df))
}else{
not_fnd <- df[-(match(found$builder, rownames(df))),,drop=FALSE]
not_fnd <- cbind(not_fnd, builder=rownames(not_fnd))
not_fnd <- as.data.frame(not_fnd) %>% select(colnames(builders)[-1])
}
dbAppendTable(con, "builders", not_fnd)
res <- dbSendQuery(con, "SELECT * FROM builders")
builders <- dbFetch(res)
dbClearResult(res)
builder_id <- rep(NA_integer_, nrow(df))
found <- match_df(builders, as.data.frame(df))
builder_id[match(unname(unlist(found["builder"])), rownames(df))] = found$builder_id
} else {
message("All builders found")
}
df <- cbind(builder_id, df)
###############################################
# Update status table for
# builder_id and report_id
# Add to database
###############################################
status$builder_id= unname(df[match(status$builder, rownames(df)), "builder_id"])
status$report_id = unname(tbl_reports$report_id)
status <- select(status, c("builder_id", "report_id", "package",
"git_commit_id", "git_commit_date", "stage", "status"))
dbAppendTable(con, "status", status)
# need to check skipped file for errors to add to status
#} #End loop over repositories
} # End loop over versions
#Disconnect from the database
dbDisconnect(con)
###############################################
###############################################
###############################################
###############################################
###############################################
###############################################
###############################################
###############################################
###############################################
###############################################
###############################################
###############################################
###############################################
###############################################
## biocBuildReport package
library(RMariaDB)
library(dplyr)
library(stringr)
con <- dbConnect(RMariaDB::MariaDB(), group = "my-db")
## bu = tbl(con, "builders")
## rt = tbl(con, "reports")
## st = tbl(con, "status")
## Disconnect from the database
## dbDisconnect(con)
##########################################################
##
## Is my package failing on today's build report?
##
###########################################################
today <- format(Sys.time(), "%Y-%m-%d")
todays_report_id <- tbl(con, "reports") %>% filter(str_detect(date, today)) %>%
select("report_id") %>% collect(Inf) %>% `[[`("report_id")
if(length(todays_report_id) == 0){
message("Build Report For Today is not posted yet")
}
if(F){
yesterday <- format(Sys.time()-(24*60*60), "%Y-%m-%d")
todays_report_id <- tbl(con, "reports") %>% filter(str_detect(date, yesterday)) %>%
select("report_id") %>% collect(Inf) %>% `[[`("report_id")
}
#pkg = "BiocFileCache"
pkg = "a4"
any((tbl(con, "status") %>% filter(report_id %in% todays_report_id) %>%
filter(package == pkg) %>% collect %>% `[[`("status") %>% unique) %in%
c("ERROR", "TIMEOUT"))
## In release or devel?
temp <- inner_join((tbl(con, "status") %>% filter(report_id %in% todays_report_id) %>%
filter(package == pkg) %>% filter(status %in% c("ERROR", "TIMEOUT"))),
tbl(con, "builders"))
temp %>% collect %>% `[[`("bioc_version") %>% unique()
## On what builders?
temp %>% collect %>% `[[`("builder") %>% unique()
##########################################################
##
## How long has my package been failing?
##
###########################################################
pkg = "a4"
temp = tbl(con, "status") %>% filter(package == pkg) %>% select("builder_id",
"report_id", "status")
temp2 = inner_join(inner_join((temp %>% filter(status %in% c("ERROR", "TIMEOUT"))),
tbl(con, "reports")), tbl(con, "builders")) %>%
select("status", "date", "builder", "bioc_version")
##########################################################
##
## What commit version is the builder using?
##
###########################################################
today <- format(Sys.time(), "%Y-%m-%d")
todays_report_id <- tbl(con, "reports") %>% filter(str_detect(date, today)) %>% select("report_id") %>% collect(Inf) %>% `[[`("report_id")
if(length(todays_report_id) == 0){
message("Build Report For Today is not posted yet")
}
if(F){
yesterday <- format(Sys.time()-(24*60*60), "%Y-%m-%d")
todays_report_id <- tbl(con, "reports") %>% filter(str_detect(date, yesterday)) %>%
select("report_id") %>% collect(Inf) %>% `[[`("report_id")
}
#pkg = "AnnotationHub"
pkg = "a4"
## tbl(con, "status") %>% filter(report_id %in% todays_report_id) %>% filter(package == pkg) %>% select("git_commit_id", "git_commit_date") %>% distinct()
## potentially different
temp <- inner_join((tbl(con, "status") %>% filter(report_id %in% todays_report_id) %>%
filter(package == pkg)),
tbl(con, "builders"))
temp %>% select("builder", "bioc_version", "git_commit_id", "git_commit_date") %>% distinct()
if(F){
## think it would be useful to ask historical questions, like 'how many successes has my package had, across all platforms, in the last x days'. Also it might be useful to ask more complicated questions, like 'for each day, did my package build successfully? did any of it's dependencies fail to build? where there changes (git commit ids?) in any of my dependencies' and also 'when I changed my package, did any of my reverse dependencies break?' Most of these I'm thinking return a tibble with columns for date, my package status, dependency/reverse dependency status, etc
}
## Disconnect from database
dbDisconnect(con)
|
4a59537eb20c1ec0aade73f46484dbefb0ac9685 | 3710bdb5c5598d3db3c79e343d826543a1329f83 | /plot2.R | 6bd2e09deb502ce767b8e969bd8dc283a838a512 | [] | no_license | pfurrow/ExData_Plotting1 | 35b61b6e298fc1c51505ee4da72aa0429b7e9e9e | 38264f388777adb8e5ac59908d4e0a80013f356c | refs/heads/master | 2021-01-21T08:33:05.819749 | 2015-07-10T23:05:10 | 2015-07-10T23:05:10 | 38,636,343 | 0 | 0 | null | 2015-07-06T17:59:50 | 2015-07-06T17:59:48 | null | UTF-8 | R | false | false | 941 | r | plot2.R | ##
## Read the complete power consumption file, then subset to select only
## the 2 days to be plotted (01Feb2007 - 02Feb2007)
##
power <- read.table("data/household_power_consumption.txt", na.strings = "?",
stringsAsFactors = FALSE, sep = ";", header = TRUE,
colClasses = c("character","character","numeric",
"numeric","numeric","numeric","numeric",
"numeric","numeric"), comment.char = "")
power <- power[(power$Date == "1/2/2007" | power$Date == "2/2/2007"),]
power$DateTime <- strptime(paste(power$Date, power$Time), "%d/%m/%Y %H:%M:%S", tz="GMT")
##
## Generate the line plot to the PNG graphics device file as required
##
png(filename = "../datasciencecoursera/ExData_Plotting1/plot2.png")
plot(power$DateTime, power$Global_active_power, type="l", xlab = "",
ylab = "Global Active Power (kilowatts)")
dev.off() |
566536cbacadcbe7d4d492d9f4977c5d540965fd | 403845497e0b4e4db08c647da083e732bd80c2ec | /man/camt.fdr.Rd | df16c1b36f11ad1ca67642689f2b9038b3ea8247 | [] | no_license | jchen1981/CAMT | d3023abd0f1104ae6ec894359c7e31fe35615465 | 02e70d664aedeed1737fd555e179e6161f937cd9 | refs/heads/master | 2021-07-02T05:38:51.122378 | 2021-01-01T04:22:56 | 2021-01-01T04:22:56 | 203,678,153 | 2 | 1 | null | null | null | null | UTF-8 | R | false | true | 4,581 | rd | camt.fdr.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/camt.cor.func.R
\name{camt.fdr}
\alias{camt.fdr}
\title{Perform covariate-adaptive false discovery rate control}
\usage{
camt.fdr(
pvals,
pi0.var = NULL,
f1.var = NULL,
data = NULL,
pvals.cutoff = 1e-15,
pi0.low = 0.1,
alg.type = c("EM", "OS"),
EM.paras = list(iterlim = 50, k.init = NULL, tol = 1e-05, pi0.init = NULL, nlm.iter =
5),
control.method = c("hybrid", "knockoff+"),
burnin.no = 500,
trace = FALSE,
return.model.matrix = TRUE,
...
)
}
\arguments{
\item{pvals}{a numeric vector of p-values.}
\item{pi0.var}{a formula, a vector, a data frame, or a matrix of covariate values for the prior null probability.}
\item{f1.var}{a formula, a vector, a data frame, or a matrix of covariate values for the alternative distribution.}
\item{data}{a data frame containing the covariates, only used when pi0.var, f1.var are classes of 'formula'.}
\item{pvals.cutoff}{a numeric value to replace p-values below that value, which is used to increase the stability of the algorithm.}
\item{pi0.low}{the allowed minimal pi0 value, which could guard against the dominance of the prior.}
\item{alg.type}{a character string indicating the algorithm used. 'OS' - direct one-step optimization using a Newton-type algorithm,
'EM' - using EM algorithm. 'OS' is fast but could be inaccurate under some scenarios. Default 'EM'.}
\item{EM.paras}{a list of control arguments for the EM algorithm
\itemize{
\item{iterlim}{an integer value indicating the maximum number of iterations.}
\item{tol}{a numeric value giving the tolerance in the relative change in the log likelihood below which the algorithm is considered to be converged.}
\item{pi0.init, k.init}{two scalars giving the initial guess of the average pi0 and k parameter.}
\item{nlm.iter}{an integer indicating the allowed maximum iteration in running \code{'nlm'}, used to speed up computation.}
}}
\item{control.method}{a character string indicating the FDR control variant. 'knockoff+': the knockoff+ procedure of Barber-Candes (BC),
conservative at sparse signal/small FDR levels. 'hybrid': an empirical method to correct the conservativeness of 'knockoff+'. The method is
based on taking the maximum over the BC-type (knockoff) and BH-type FDR estimates for a certain number (as specified by \code{'burnin.no'}) of
the most promising hypotheses at the start of the algorithm. The rest use knockoff-based FDR estimator. Default is 'hybrid'.}
\item{burnin.no}{an integer value indicating the number of the most promising hypotheses that will apply the 'hybrid' procedure above.}
\item{trace}{a logical value indicating whether to print the process.}
\item{return.model.matrix}{a logical value indicating whether to return the model matrix. Consider setting to FALSE if it's huge.}
\item{...}{parameters passing to \code{'nlm'} optimization.}
}
\value{
A list with the elements
\item{call}{the call made.}
\item{pi0}{a vector of the estimated null probabilities.}
\item{k}{a vector of the estimated shape parameters for the alternative distribution.}
\item{EM.paras}{actually used parameters in EM algorithm.}
\item{EM.iter}{the number of iteration actually used.}
\item{loglik}{log likelihood.}
\item{pi0.coef, k.coef}{a vector of the coefficients for pi0 and k.}
\item{pi0.var, f1.var}{the actual model matrix used if its return is requested.}
\item{fdr}{a numeric vector of the adjusted p-values.}
\item{pvals}{a numeric vector of the original p-values used.}
\item{ts}{a numeric vector of the thresholds (t) below which the corresponding hypothesis will be rejected.}
}
\description{
The function implements a scalable, flexible, robust and powerful FDR control method for large-scale multiple testing exploiting the auxiliary covariates.
It allows both the prior null probability and the alternative distribution to depend on covariates.
}
\examples{
data <- simulate.data(feature.no = 10000, covariate.strength = 'Moderate', covariate.model = 'pi0',
sig.density = 'Medium', sig.strength = 'L3', cor.struct = 'None')
camt.fdr.obj <- camt.fdr(pvals = data$pvals, pi0.var = data$pi0.var, f1.var = data$f1.var,
alg.type = 'EM', control.method = 'knockoff+')
plot.camt.fdr(camt.fdr.obj, covariate = as.vector(rank(data$pi0.var)), covariate.name = 'Covariate rank',
log = TRUE, file.name = 'CovariateModerate.pdf')
}
\references{
Xianyang Zhang, Jun Chen. Covariate Adaptive False Discovery Rate Control with Applications to Omics-Wide Multiple Testing. JASA. To appear.
}
\author{
Jun Chen
}
\keyword{FDR}
|
0dc90aad830677794ef313ff4a571dc5ab5a7172 | 320ddefc84d992475db8b83befde46e2b780314f | /R/see.move.r | 32ccd6979e1debb6f59c0b5345da3ae37729f8fd | [] | no_license | cran/asbio | bca146e402058cd67ff5fc42423cb0c0544f942b | 3cb01b7cb1a8dec60a5f809f91bc460a6566954d | refs/heads/master | 2023-08-31T23:43:08.304864 | 2023-08-20T02:22:36 | 2023-08-20T04:30:48 | 17,694,492 | 3 | 2 | null | null | null | null | UTF-8 | R | false | false | 7,392 | r | see.move.r | see.move<-function(){
local({
have_ttk <- as.character(tcl("info", "tclversion")) >= "8.5"
if(have_ttk) {
tkbutton <- ttkbutton
tkframe <- ttkframe
tklabel <- ttklabel
}
tclServiceMode(FALSE)
top <- tktoplevel()
tktitle(top) <- "Demonstration of Least Squares Regression -- Move points"
buttons <- tkframe(top)
tkpack(buttons, side="bottom", fill="x", pady="2m")
dismiss <- tkbutton(buttons, text="Exit",
command=function()tkdestroy(top))
tkpack(dismiss, side="right", expand=TRUE)
canvas <- tkcanvas(top, relief="raised", width=550, height=350)
tkpack(canvas, side="top", fill="x")
plotFont <- "Helvetica 9"
plotFont2 <- "Helvetica 11"
tkcreate(canvas, "polygon", 100, 50, 100, 250, 400, 250, 400, 50, width=0, fill = "white")
tkcreate(canvas, "line", 100, 250, 400, 250, width=2)
tkcreate(canvas, "line", 100, 250, 100, 50, width=2)
tkcreate(canvas, "text", 275, 20, text="Moving points in simple linear regression",
font=plotFont2)
tkcreate(canvas, "text", 250, 290, text="x",
font=c("Helvetica", "11","italic"))
tkcreate(canvas, "text", 30, 150, text="y",
font=c("Helvetica", "11","italic"))
tkcreate(canvas, "polygon", 430, 60, 430, 180, 530, 180, 530, 60, width=2, fill = "white")
# X tickmarks & labels; from x = 100 to 400
for (i in 0:10) {
x <- 100 + i * 30
tkcreate(canvas, "line", x, 250, x, 245, width=2)
tkcreate(canvas, "text", x, 254,
text=10*i, anchor="n", font=plotFont)
}
# Y tickmarks & labels; from y = 50 to 250
for (i in 0:5) {
y <- 250 - i * 40
tkcreate(canvas, "line", 100, y, 105, y, width=2)
tkcreate(canvas, "text", 96, y,
text=formatC(50*i,format="f",digits=1),
anchor="e", font=plotFont)
}
# The (original) data
points <- matrix(c(12, 56,
20, 94,
33, 98,
32, 120,
61, 180,
75, 160,
98, 223), ncol=2, byrow=TRUE)
## `self-drawing' point object
point.items <- apply(points, 1, function(row) {
x <- 100 + 3 * row[1]
y <- 250 - 4/5 * row[2]
item <- tkcreate(canvas, "oval", x - 5, y - 5, x + 5, y + 5,
width=1, outline="black",
fill="SkyBlue2")
tkaddtag(canvas, "point", "withtag", item)
item
})
plotDown <- function(x, y) {
## Arguments:
## x, y - The coordinates of the mouse press.
x <- as.numeric(x)
y <- as.numeric(y)
tkdtag(canvas, "selected")
tkaddtag(canvas, "selected", "withtag", "current")
tkitemraise(canvas,"current")
lastX <<- x
lastY <<- y
}
plotMove <- function(x, y) {
## Arguments:
## x, y - The coordinates of the mouse.
x <- as.numeric(x)
y <- as.numeric(y)
tkmove(canvas, "selected", x - lastX, y - lastY)
lastX <<- x
lastY <<- y
}
step1 <- function(){lapply(point.items,
function(item)
as.double(tkcoords(canvas,item)))}
plotLine <- function(){
coords <-step1()
x <- sapply(coords, function(z) (z[1]+z[3])/2)
y <- sapply(coords, function(z) (z[2]+z[4])/2)
lm.out <- lm(y~x)
x0 <- range(x)
y0 <- predict(lm.out, data.frame(x=x0))
tkcreate(canvas, "line", x0[1], y0[1], x0[2], y0[2], width=3)
}
line <- plotLine()
slope.func<-function(){
coords <-step1()
x <- sapply(coords, function(z) (z[1]+z[3])/2)
x <- (x-100)/3
y <- sapply(coords, function(z) (z[2]+z[4])/2)
y <- -1*(y-250)*5/4
lm.out<-lm(y~x)
slope<-lm.out$coefficients[2]
tkcreate(canvas, "text", 500, 80, text=formatC(slope,format="f",digits=2),
font=plotFont)
}
slope<-slope.func()
pre.func0 <- function(){
tkcreate(canvas, "text", 460, 80, text= "Slope = ",
font=plotFont)}
pre0 <-pre.func0()
yint.func<-function(){
coords <-step1()
x <- sapply(coords, function(z) (z[1]+z[3])/2)
x <- (x-100)/3
y <- sapply(coords, function(z) (z[2]+z[4])/2)
y <- -1*(y-250)*5/4
lm.out<-lm(y~x)
yint<-lm.out$coefficients[1]
tkcreate(canvas, "text", 505, 105, text=formatC(yint,format="f",digits=2),
font=plotFont)
}
yint<-yint.func()
pre.func01 <- function(){
tkcreate(canvas, "text", 460, 105, text= "Y int. = ",
font=plotFont)}
pre01 <-pre.func01()
r.func <-function(){
coords <-step1()
x <- sapply(coords, function(z) (z[1]+z[3])/2)
y <- sapply(coords, function(z) (z[2]+z[4])/2)
r<-round(cor(y,x),2)
tkcreate(canvas, "text", 485, 140, text=formatC(-1*r,format="f",digits=2),
font=plotFont)
}
r <-r.func()
pre.func1 <- function(){
tkcreate(canvas, "text", 460, 140, text= "r = ",
font=plotFont)}
pre1 <-pre.func1()
r2.func <-function(){
coords <-step1()
x <- sapply(coords, function(z) (z[1]+z[3])/2)
y <- sapply(coords, function(z) (z[2]+z[4])/2)
r<-round(cor(y,x),2)
tkcreate(canvas, "text", 485, 165, text=formatC(r*r,format="f",digits=2),
font=plotFont)
}
r2 <-r2.func()
pre.func2 <- function(){
tkcreate(canvas, "text", 460, 165, text= 'r\u00b2 = ',
font=plotFont)}
pre2 <-pre.func2()
lastX <- 0
lastY <- 0
tkitembind(canvas, "point", "<Any-Enter>",
function() tkitemconfigure(canvas, "current",
fill="red"))
tkitembind(canvas, "point", "<Any-Leave>",
function() tkitemconfigure(canvas, "current",
fill="SkyBlue2"))
tkitembind(canvas, "point", "<1>", plotDown)
tkitembind(canvas, "point", "<ButtonRelease-1>",
function(x){
tkdtag(canvas, "selected")
tkdelete(canvas, "withtag", line)
tkdelete(canvas, "withtag", slope)
tkdelete(canvas, "withtag", yint)
tkdelete(canvas, "withtag", r)
tkdelete(canvas, "withtag", r2)
tkdelete(canvas, "withtag", pre0)
tkdelete(canvas, "withtag", pre01)
tkdelete(canvas, "withtag", pre1)
tkdelete(canvas, "withtag", pre2)
line <<- plotLine()
slope <<- slope.func()
yint <<- yint.func()
pre0 <<- pre.func0()
pre01 <<- pre.func01()
r <<- r.func()
r2 <<- r2.func()
pre1 <<- pre.func1()
pre2 <<- pre.func2()
})
tkbind(canvas, "<B1-Motion>", plotMove)
tclServiceMode(TRUE)
})
}
|
a9ba31f40362cfe6617e2fb66d4e1b6de77ab7b6 | 8ceef923a736f0161077cb8aeeb3a8ba595aff3c | /scripts/biplot outliers (ALL).R | 5b64c71dad8f0161dad3a4fb83596d8f73752add | [] | no_license | cont-limno/lagos_limno_qaqc | 65501b0d23b4102cdf04841f492faf67e7ba5765 | 390ee4700c0533ff8095d184f6be29a5e2fb93e9 | refs/heads/master | 2022-12-01T05:24:57.802137 | 2020-08-05T19:13:47 | 2020-08-05T19:13:47 | 250,358,374 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,154 | r | biplot outliers (ALL).R | require(gplots)
library(DMwR)
library(FNN)
library(robustbase)
library(scales)
library(gridExtra)
library(mvoutlier)
# library(MVN)
library(rstatix)
outlier.dist = function(dat,k=4,method = "brute",iqr=15,alpha=0.0001) {
#function to estimate euclidian distances between all points in a pairwise dataset (X)
#distance to the k nearest neighbor is estimated and robust boxplot is used to identify
#distances that are considered outliers at specified iqr value
# dist = knn.dist(data = dat,k = k,algorithm = method)
# scores = dist[,k]
# box = adjbox(scores,range=iqr,plot = FALSE)
# if (box$fence[2]==0) out.box = box$out>max(scores) else out.box = box$out>box$fence[2]
# box = box$out[out.box]
# out <- dat %>% mutate(outlier1 = (scores %in% box))
#normal Mahalab
threshold <- stats::qchisq(1-alpha, dim(dat)[2])
.data <- dat %>%
as.matrix()
distance <- stats::mahalanobis(
.data,
center = colMeans(.data),
cov = cov(.data)
)
out <- dat %>% mutate(outlier1 = distance > threshold)
dat[,1] = log(dat[,1]+(1-min(dat[,1]))) #log transform data
dat[,2] = log(dat[,2]+(1-min(dat[,2])))
threshold <- stats::qchisq(1-alpha, dim(dat)[2])
.data <- dat %>%
as.matrix()
distance <- stats::mahalanobis(
.data,
center = colMeans(.data),
cov = cov(.data)
)
out <- out %>% mutate(outlier2 = distance > threshold)
out <- out %>% mutate(outlier3 = (outlier1 +outlier2))
return(out)
}
#setup the flags dataframe
out.flags = data.frame(lagoslakeid = NA,eventidb = NA,programid = NA,variable=NA,value=NA,flag=NA)
out.flags = out.flags[-1,]
combinations = combn(x = c(1:length(variable.cols)),m = 2,simplify = TRUE)
combinations = data.frame(x=combinations[1,],y=combinations[2,])
first.cols = c(lakeid.col,event.col,programid.col)
options(device="quartz")
pdf(file=paste("output/BiPlot.pdf",sep=""),width=8.5,height=10,onefile=TRUE,pointsize=10)
par(mfrow=c(5,3), cex=1,ps=10,mar=c(2.2,1,0,0) + 0.1,oma=c(0,0,2,0),mgp=c(1.2,.5,0),pty="s")
i=1
plot = list()
for (s in 1:nrow(combinations)) {
x.col = variable.cols[combinations[s,1]]
y.col = variable.cols[combinations[s,2]]
X = Data[,c(first.cols,x.col,y.col)] %>% drop_na() #%>% # lines 41 and 42 to be deleted after I have incorporated range checks
# gather(key = "parameter",value= "value", -lagoslakeid,-eventida,-programid_lagos_us) %>%
# filter(value > 0) %>% spread(value = value, key = parameter) %>% drop_na()
if(nrow(X) >= 100) { #only run this analysis if there are 100 pairs of data
# raw.data <- X
# X[,4] = log(X[,4]+(1-min(X[,4]))) #log transform data
# X[,5] = log(X[,5]+(1-min(X[,5])))
plot.data = outlier.dist(dat = X[,4:5]) #identify the outliers
#create plots
plot[[i]] <- ggplot(data = plot.data ,aes_string(x=names(plot.data)[1],y=names(plot.data)[2])) + geom_bin2d(bins=100) +
geom_point(data = plot.data %>% filter(outlier3==2),
aes_string(x=names(plot.data)[1],y=names(plot.data)[2]),color="red",size=2) +
#geom_point(data = plot.data %>% filter(outlier1==TRUE),
#aes_string(x=names(plot.data)[1],y=names(plot.data)[2]),colour=alpha("purple",0.5),size=2) +
lims(x=range(plot.data[,1]),y=range(plot.data[,2]))
#extract points to flag as outliers
# new.data = cbind(raw.data,plot.data[,3])
# new.data = new.data[which(new.data[,6]==TRUE),]
# if(nrow(new.data)>0) {
# new.data$flag = "BIPLOT"
# new.data$variable = names(new.data)[4]
# temp.flag = new.data[,c(1,2,3,8,4,7)]
# names(temp.flag)[5] = "value"
# out.flags = rbind(out.flags,temp.flag)
# new.data$variable = names(new.data)[5]
# temp.flag = new.data[,c(1,2,3,8,5,7)]
# names(temp.flag)[5] = "value"
# out.flags = rbind(out.flags,temp.flag)
# }
if (i %% 6 == 0) { ## print 8 plots on a page
print (do.call(grid.arrange, c(plot,ncol=2)))
plot = list() # reset plot
i = 0 # reset index
}
i = i + 1
}
}
if (length(plot) != 0) {
print (do.call(grid.arrange, plot))
}
dev.off()
graphics.off()
#
#
#
#
#
# if(length(na.omit(X[,4]))>0) {
# lower = min(X[,4],na.rm=TRUE)
# x.adj = abs(lower-1)
# if(lower < 1) {
# xadj=TRUE
# X[,4] = X[,4]+x.adj
# }
# Y = Data[,y.col]
# if(length(na.omit(Y))>0) {
# lower = min(Y,na.rm=TRUE)
# y.adj = abs(lower-1)
# if(lower < 1) {
# yadj =TRUE
# Y = Y+y.adj
# }
# new.data = data.frame(X,Y)
# names(new.data)[4:5] = c(variable.names[combinations[s,1]],variable.names[combinations[s,2]])
# new.data = new.data[complete.cases(new.data),]
# if(nrow(new.data)==0) {
# plot[[i]] <- ggplot(data = new.data, aes_string(x=names(new.data)[4],y=names(new.data)[5])) + annotate("text",label = "No Data",x=0.1,y=0.5)
# } else {
# raw.data = new.data
# new.data[,c(4:5)] = log(new.data[,c(4:5)])
# if(nrow(new.data)>20){
# plot.data = outlier.dist(new.data[,c(4:5)],k=10,iqr=25) #10 in 871
# plot[[i]] <- ggplot(data = plot.data %>% filter(Outlier==FALSE),aes_string(x=names(plot.data)[1],y=names(plot.data)[2])) + geom_bin2d(bins=100) +
# geom_point(data = plot.data %>% filter(Outlier==TRUE),aes_string(x=names(plot.data)[1],y=names(plot.data)[2]),colour=alpha("red",0.5),size=2) +
# lims(x=range(plot.data[,1]),y=range(plot.data[,2]))
# new.data = cbind(raw.data,plot.data[,3])
# new.data = new.data[which(new.data[,6]==TRUE),]
# if(nrow(new.data)>0) {
# new.data$flag = "BIPLOT"
# new.data$variable = names(new.data)[4]
# temp.flag = new.data[,c(1,2,3,8,4,7)]
# names(temp.flag)[5] = "value"
# out.flags = rbind(out.flags,temp.flag)
# new.data$variable = names(new.data)[5]
# temp.flag = new.data[,c(1,2,3,8,5,7)]
# names(temp.flag)[5] = "value"
# out.flags = rbind(out.flags,temp.flag)
# }
# } else { plot[[i]] <- ggplot(data = new.data, aes_string(x=names(new.data)[4],y=names(new.data)[5])) + geom_point() }
# }
# } else { plot[[i]] <- ggplot(data = new.data, aes_string(x=names(new.data)[4],y=names(new.data)[5])) + annotate("text",label = "No Data",x=0.1,y=0.5)
# }
# }
# if (i %% 6 == 0) { ## print 8 plots on a page
# print (do.call(grid.arrange, c(plot,ncol=2)))
# plot = list() # reset plot
# i = 0 # reset index
# }
# i = i + 1
# }
# if (length(plot) != 0) {
# print (do.call(grid.arrange, plot))
# }
# dev.off()
# graphics.off()
#
#
# out.flags = out.flags[!duplicated(out.flags[,c(2,4)]),]
#
# outlier.flags = rbind(outlier.flags,out.flags)
#
# out.vars = unique(out.flags$variable)
#
# for(i in 1:length(out.vars)){
# which.col = which(names(Data)==out.vars[i])
# temp.data = out.flags[which(out.flags$variable==out.vars[i]),]
# Data[match(temp.data$eventidb,Data$eventidb),which.col]
# Data[match(temp.data$eventidb,Data$eventidb),which.col] = NA
# }
|
3d0bdc510ce7bab879bd5ab7a98d3c55a4db0aa4 | 3661e6f63531ee8b791f03c2323ecabed1c354a9 | /R/lu.R | 2bf652e5fac82e3b28de68b0a31a8edee92a2548 | [] | no_license | jdh4/parallel_lu_decomp | e97f3500027681b708c2aba4d736bf67897c8421 | a92f959173517330d151b967f929257f7ac49b5c | refs/heads/master | 2023-08-05T23:33:21.236703 | 2021-09-25T21:53:45 | 2021-09-25T21:53:45 | 281,433,091 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 153 | r | lu.R | # install.packages("microbenchmark")
library(microbenchmark)
library(Matrix)
N <- 10000
microbenchmark(lu(matrix(rnorm(N*N), N, N)), times=5, unit="s")
|
8041eb0468ea778c123c267c5c0aa53dab74512a | 35d57ab8dcc9b441eac90b6c20215043538bee4d | /01_scripts/01_louse_txome_analysis.R | 62813354b58b44b4bd5b7c0d00f54089bba857fe | [] | no_license | bensutherland/2017_ms_immunostim | 337dfbc2280d975faa5d19a3b317693389a10033 | 55f2aa06932f1226a1adfc768c2e19a8e0a923f6 | refs/heads/master | 2021-01-19T11:34:19.683178 | 2017-02-27T14:57:16 | 2017-02-27T14:57:16 | 45,756,795 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,330 | r | 01_louse_txome_analysis.R | # Transcriptome analysis for the Ssal/Lsal immunostim project
# rm(list=ls())
## Install packages
# source("http://www.bioconductor.org/biocLite.R")
# biocLite()
# biocLite("limma")
library(limma)
#### 1. Lice transcriptome analysis ####
setwd("~/Documents/koop/immunostim/2017_ms_immunostim")
#### 1.a. Input data and quality control ####
# Load targets file (i.e. interpretation file)
targets <- readTargets("00_archive/Targets_2color_immunostim.csv", row.names="Name", sep = ",")
# Load annotation file
annot <- read.table("00_archive/koop_Lsal38K_2010-02-25b-short.txt",
header=TRUE, sep = "\t") #note sep = "\t" is important when empty cells
annot.df <- as.data.frame(annot)
dim(annot.df) #38132 rows; 5 columns - note: this should match the length of MA.flt1 given above
str(annot.df)
# Load expression data (block and channel sep.)
RG <- read.maimages(path = "02_raw_data/",
files = targets[,c("FileNameCy3","FileNameCy5")],
source="imagene",
columns = list(f="Signal Median", b="Background Median"))
dim(RG$R) # number of probes and number samples
targets # see details on the samples
# View density plot of unprocessed data
plotDensities(RG, log=F) # note that there is a bump at the saturation point
plotDensities(RG, log=T) # this makes it easier to view the distribution, will produce NAs
# Set probe control type
spottypes <- readSpotTypes(file = "00_archive/SpotTypes.txt")
RG$genes$Status <- controlStatus(types = spottypes, genes = RG)
# Quality Control plots ##
plotMA3by2(RG, path = "03_analysis/") #MA plots per array saved to working directory
imageplot3by2(RG, path = "03_analysis/") #imageplots per array saved to working directory
# Boxplots non-normalized foreground/background per channel
par(mfrow=c(2,2), mar= c(3,3,0.5,1) + 0.2, mgp = c(2,0.75,0))
units <- c("R", "G", "Rb", "Gb")
for(i in 1:4) {
boxplot(log2(RG[[units[i]]]),
xaxt = "n",
ylab = "log2(fluor. units)", xlab = "samples", main = units[i])
}
print(dimnames(RG[["R"]])[[2]]) # this is the order of the plot
#### 1.b. Quality filtering and normalization ####
par(mfrow=c(3,1), mar= c(3,3,0.5,1) + 0.2, mgp = c(2,0.75,0))
## Set low expressed genes statement in $RG$genes$isexpr column
RG$genes$isexpr.R <- rowSums(RG$R >= 500) >= 3 #here, value >= 500 in at least 3 arrays
##isexpr > 500 in at least 3 arrays, lowest sample size = 4 for ld.
table(RG$genes$isexpr.R)
RG$genes$isexpr.G <- rowSums(RG$G >= 500) >= 3 ##isexpr > 500 in at least 3 arrays
table(RG$genes$isexpr.G)
## Identify saturated probes
RG$genes$satspot.R <- rowSums(RG$R == 65535) >= 11 #saturated in all arrays
table(RG$genes$satspot.R)
RG$genes$satspot.G <- rowSums(RG$G == 65535) >= 11
table(RG$genes$satspot.G)
## background correction
MA.bm <- backgroundCorrect(RG, method="minimum")
plotDensities(MA.bm)
## within array normalization (loess)
MA.bmW <- normalizeWithinArrays(MA.bm, method="loess", weights=NULL)
plotDensities(MA.bmW)
## between array normalization using Cy3 channel (Gquantile)
MA.bmWG <- normalizeBetweenArrays(MA.bmW, method="Gquantile")
plotDensities(MA.bmWG)
## only retain custom probes, no control spots
MA.flt1 <- MA.bmWG[MA.bmWG$genes$Status == "Unknown",]
dim(MA.flt1) #38132 rows; 11 columns
## Attach annotation to the MA list
MA.flt1$genes$spotID <- annot.df[,1]
MA.flt1$genes$description <- annot.df[,2]
MA.flt1$genes$GeneID <- annot.df[,4]
## Remove low expressed and saturated probes
MA.flt <- MA.flt1[MA.flt1$genes$isexpr.R == "TRUE" & MA.flt1$genes$isexpr.G == "TRUE"
& MA.flt1$genes$satspot.R != "TRUE" & MA.flt1$genes$satspot.G != "TRUE",]
dim(MA.flt)
plotDensities(MA.flt) ##densitiy plot of expression values
#### 1.c. Data extraction ####
## Save out quality filtered MAlist
write.csv(cbind(MA.flt$genes,MA.flt$M), "04_output/all_expr_data.csv", row.names=FALSE)
## Select probe expression specifically for correlation with qPCR GOIs
probes.of.interest <- c("C042R126","C088R114","C263R087")
probes.for.cor <- as.data.frame(cbind(
MA.flt$M[MA.flt$genes[12] == probes.of.interest[1]]
, MA.flt$M[MA.flt$genes[12] == probes.of.interest[2]]
, MA.flt$M[MA.flt$genes[12] == probes.of.interest[3]]))
rownames(probes.for.cor) <- colnames(MA.flt$M)
colnames(probes.for.cor) <- probes.of.interest
probes.for.cor
str(probes.for.cor)
## Save out probes.for.cor
saveRDS(probes.for.cor, file = "04_output/probes_for_cor.rds")
## Save out background list for GO enrichment testing
write.csv(MA.flt$genes, file = "04_output/background_immunostim.csv")
#### 1.d. Differential expression analysis ####
### Differential Expression Analysis ##
design <- modelMatrix(targets, ref = "ref")
attributes(design)
design
fit <- lmFit(object = MA.flt, design = design)
cont.matrix <- makeContrasts(ld-con, levels=design) ##makes contrasts
fit2 <- contrasts.fit(fit, cont.matrix) ###computes contrasts from lmod fit (outputs)
fit3 <- eBayes(fit2) ##given related parameter estim and SE, will comute mod t-stat, etc
output <- topTable(fit3, number=45000, adjust="none", p.value=0.05) # number=45000 makes sure all will be output
write.table(output, file="04_output/transcriptome_DE_results.txt", sep="\t")
output.df <- data.frame(output) |
681965a367f58469c0f567753b967be7183a7c93 | 8f7ef7ce474d886bcb3fce482133b771469500d3 | /Pothole Data Cleaning.R | c64bc45ddd9588cfeea19a16bd8c499e3f35dcfb | [] | no_license | jkgenser/Potholes | 251e1a023e69af401cb970163d48678903dcdab1 | 263e57e5d66462fe7d0ac2577f02736f98ba4b32 | refs/heads/master | 2021-01-23T13:22:52.453368 | 2015-09-02T23:09:34 | 2015-09-02T23:09:34 | 41,259,381 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,453 | r | Pothole Data Cleaning.R | setwd("H:/USER/JGenser/Pot Holes/Program")
library(plyr)
library(ggplot2)
library(ggmap)
library(sp)
library(lubridate)
library(leaflet)
bos = read.csv("Boston_Pothole_Cases.txt", header=T, sep="\t")
cam = read.csv("Cambridge_Pot_Holes.csv", header=T, sep=",")
cam2=cam
#convert the Cambridge location information into lattitudes and longitudes
cam$TEMP = gsub(".*\n|).*","",cam$Address)
cam$LATITUDE = as.numeric(substr(cam$TEMP,2,11))
cam$LONGITUDE = as.numeric(substr(gsub(".*,","",cam$TEMP),2,12))
#remove records with missing location information from the Cambridge data subset
cam = subset(cam, is.na(cam$LATITUDE) == F)
#convert daates to an R-usable format
bos$dt_close = as.Date(bos$date_closed, origin = "1899-12-30")
cam$dt_close = as.Date(cam$date_closed, origin = "1899-12-30")
bos$dt_open = as.Date(bos$date_open, origin = "1899-12-30")
cam$dt_open = as.Date(cam$date_open, origin = "1899-12-30")
#create placeholder fields in data in order to stack them
cam$Source <- ""
bos$Status <- ""
keep = c("Status","Source" ,"LATITUDE", "LONGITUDE", "dt_close", "dt_open" )
cam2 = cam[keep]
bos2 = bos[keep]
#lag_counts = count(cam2[, c("LATITUDE", "LONGITUDE")])
#lag_counts = table(bos2[, c("LATITUDE", "LONGITUDE")])
df_combined = rbind(cam2,bos2)
lag_counts = count(df_combined[, c("LATITUDE", "LONGITUDE")])
many_fills = lag_counts[ lag_counts$freq > 10,]
cam2 = cam2[month(cam2$dt_close)<4 & year(cam2$dt_close) ==2015,]
bos2 = bos2[month(bos2$dt_close)<4 & year(bos2$dt_close) ==2015,]
# cam2 = subset(cam2, (year(dt_close) == 2015 & month(dt_close<4)))
# bos2 = subset(bos2, (year(dt_close) == 2015 & month(dt_close<4)))
##combine cambridge and boston datasets
df <- rbind(cam2, bos2)
##calculate the time to closure in days
df$lag = as.numeric(difftime(df$dt_close, df$dt_open, units="days"))
df$lag[which(df$lag < -1 )] <- -1
write.table(df, "mappingData.csv", sep="," , col.names=NA)
write.table(many_fills, "manyFills.csv", sep=",", col.names=NA)
#############################################
############################################
###TEST CODE##################################
###################
cam$month = month(cam$dt_close)
map <- qmap('02143', zoom=12, maptype = 'terrain',source ='stamen')
cit <- subset(df, grepl("Citizen", Source))
cit_pts <- geom_point(data = cit, x=cit$LONGITUDE, y = cit$LATITUDE, color = 'orange', alpha = 0.05)
bos_pts <- geom_point(data = bos, x=bos$LONGITUDE, y = bos$LATITUDE, color = 'black', alpha = 0.05)
cam_pts <- geom_point(data = cam, x=cam$LONGITUDE, y = cam$LATITUDE, color = "blue", alpha = 0.05)
map + bos_pts + cam_pts
cam2$lag = as.numeric(difftime(cam2$dt_close, cam2$dt_open, units="days"))
bos2$lag = as.numeric(difftime(bos2$dt_close, bos2$dt_open, units="days"))
p = ggplot(cam2, aes(x = lag)) +
geom_histogram(aes(y=..density..),
color = "black", fill = "white") +
geom_density(alpha = .2, fill = "#FF6666")
bplot = ggplot(bos2, aes(x = lag)) +
geom_histogram(aes(y=..density..),
color = "black", fill = "white") +
geom_density(alpha = .2, fill = "#FF6666")
# pot.pts <- SpatialPointsDataFrame(coords,bos[10], proj4string = CRS("+init=epsg:4326"))
# plot(pot.pts, pch = ".", col="light blue")
# cam_coord <- cbind(Longitude = cam$LONGTIDUE, Latitude = cam$LATITUDE)
# bos_coords <- cbind(Longitude = as.numeric(as.character(bos$LONGITUDE)), Latitude = as.numeric(as.character(bos$LATITUDE)))
|
4a6cf000d0aa5c8c69d20c13d1a7ffe0a8d7f1b0 | 29585dff702209dd446c0ab52ceea046c58e384e | /ArgumentCheck/tests/testthat/test-castingErrorsAndWarnings.R | 6460ab0abee981b495e88ab2f0d6b2919b092b0c | [] | no_license | ingted/R-Examples | 825440ce468ce608c4d73e2af4c0a0213b81c0fe | d0917dbaf698cb8bc0789db0c3ab07453016eab9 | refs/heads/master | 2020-04-14T12:29:22.336088 | 2016-07-21T14:01:14 | 2016-07-21T14:01:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 752 | r | test-castingErrorsAndWarnings.R | context("finishArgCheck Errors and Warnings")
test_that("Cast Appropriate Error",
{
Check <- newArgCheck()
addError("New Error",
Check)
expect_error(finishArgCheck(Check),
"1: New Error")
})
test_that("Cast Appropriate Warning",
{
Check <- newArgCheck()
addWarning("New Warning",
Check)
expect_warning(finishArgCheck(Check),
"1: New Warning")
})
test_that("Cast Simultaneous Error and Warning",
{
Check <- newArgCheck()
addError("New Error",
Check)
addWarning("New Warning",
Check)
expect_warning(expect_error(finishArgCheck(Check),
"1: New Error"),
"1: New Warning")
}) |
03a17d89f663b6cc3ccdece43fb9133ff892716b | 5f6fca5dcf1331f0f0ecba1af68d062612c9c7d3 | /Projekt_2/IFSbmp/man/plot_IFS.Rd | 90aa0ef3e8f4fdc52ee347fb568ec3b57961d694 | [] | no_license | ultramargarine/ProgramowanieWizualizacja2017 | 421666d332c3ff3ffea40b6d12be10537ef41654 | fcfd5b393717ec6ca96919656b44d3c4bcbd7d93 | refs/heads/master | 2021-09-15T09:01:58.860448 | 2018-03-08T22:00:50 | 2018-03-08T22:00:50 | 105,997,439 | 0 | 0 | null | 2018-03-08T21:58:51 | 2017-10-06T11:32:12 | R | UTF-8 | R | false | true | 1,365 | rd | plot_IFS.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot_IFS.R
\name{plot.IFS}
\alias{plot.IFS}
\title{Draws a plot for an object of class IFS with adjustable parameters of depth and starting point for IFS iterations.}
\usage{
\method{plot}{IFS}(x = NULL, start = c(0, 0), depth = 1, ...)
}
\arguments{
\item{x}{A list of class IFS.}
\item{start}{Coordinates of the starting point for the IFS method.}
\item{depth}{Parameter indicating number of iterations to be executed.}
\item{...}{Additional parameters of the generic plot function.}
}
\value{
Plotted fractal image.
}
\description{
Draws a plot for an object of class IFS with adjustable parameters of depth and starting point for IFS iterations.
}
\examples{
lin <- function(a,b,c,d,e,f){function(x,y) {c(a*x+b*y+c,d*x+e*y+f)}}
plot(createIFS(lin(.5,-.5,0,.5,.5,0),lin(-.5,-.5,1,.5,-.5,0)),depth=16,type="p",cex=.1,col="orange")
f <- lin(1/2,0,0,0,1/2,0); g <- lin(1/2,0,1/2,0,1/2,0); h <- lin(1/2,0,1/4,0,1/2,sqrt(3)/4)
plot(createIFS(f,g,h),depth=8,cex=.1)
lin3 <- function(a,b) {lin(1/3,0,a/3,0,1/3,b/3)}
plot(createIFS(lin3(0,0),lin3(0,1),lin3(0,2),lin3(1,0),lin3(1,2),lin3(2,0),lin3(2,1),lin3(2,2)))
plot(createIFS(lin(0,0,0,0,0.16,0),lin(0.85,0.04,0,-0.04,0.85,1.6),lin(0.2,-0.26,0,0.23,0.22,1.6),
lin(-0.15,0.28,0,0.26,0.24,0.44)),depth=9,type="p",cex=.5,col="green")
}
|
9d52b01e79210d818fcbefce08911b30b03db0d0 | 651639c305da650240f8c9af46dd5943b0597360 | /plot1.R | bd1b37ad795f06f099d4a47b933ca6b5a2f1876b | [] | no_license | HolyZero/Coursera-Exploratory-Data-Analysis-Project1 | 9f1ab23088174b8ff1dac4aa931502ca5ae3a068 | 04dbfc6929b0e56a76d9c3ce177bf62d903cc889 | refs/heads/master | 2020-12-11T04:05:46.594961 | 2015-11-06T18:20:11 | 2015-11-06T18:20:11 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 497 | r | plot1.R | # Read data into table
data <- read.table("data.txt", sep=";", header=TRUE, na.strings="?",
colClasses=c("character","character",rep("numeric",7)))
subdata <- subset(data,Date=="1/2/2007"|Date=="2/2/2007")
head(subdata)
# Plot histogram into png device machine.
png("plot1.png", width = 480, height = 480, units = "px")
hist(subdata$Global_active_power,col="red",
main="Global Active Power",xlab="Global Active Power (killowatts)",
breaks=12, ylim=c(0,1200))
dev.off() |
e123d5028e4c669973296a0b9eedfdbb6e70c3dd | d08bca25b7895233b559b78e9e06cbe3e1c010ff | /man/get_files_dir.Rd | 6761162b762bbf96b7435bae5d4895a6980ee137 | [] | no_license | kun-ecology/ecoloop | 87e2980db62bb305fc9c24a5ace5aca9d7f4e466 | c90906c849b5f78654a7974b0308556e3aaf01b9 | refs/heads/master | 2023-07-29T08:50:35.823804 | 2023-07-14T06:51:47 | 2023-07-14T06:51:47 | 242,403,772 | 1 | 2 | null | null | null | null | UTF-8 | R | false | true | 555 | rd | get_files_dir.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_files_dir.R
\name{get_files_dir}
\alias{get_files_dir}
\title{A function that return list of file naes with specified patterns}
\usage{
get_files_dir(directory = NULL, pattern = NULL)
}
\arguments{
\item{directory}{a string, absolute file path}
\item{pattern}{specified patterns e.g., "txt", "xlsx", and etc.}
}
\value{
a list, file path as the element while file name as the element name
}
\description{
A function that return list of file naes with specified patterns
}
|
76cbf10421c88b30d9765c4e9941ad6e233070b6 | cf91d775282ed873820d80c34b6b84cdb5e4b1b2 | /src/test.R | 2df83ffab9086e3f457ed62f96cfa931217ca2b5 | [] | no_license | Cohey0727/rlang-noob | 89f99666dd28db9fbd68c5feb7b4d84e7b61dbe6 | 9ac4b867a974f8d61a590b259a3d694b686e8912 | refs/heads/master | 2023-03-07T20:04:10.159984 | 2021-02-13T04:53:55 | 2021-02-13T04:53:55 | 338,386,537 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 676 | r | test.R | # 変数に値を代入する際は<-を用いる
var <- 'TEST'
var
# 別の型の値を再代入することも可能
var <- 100
var
var + 100
# 日本語を変素名にすることも可能
日本語 <- 100
日本語
# as.numericを用いることで文字列を数値として扱うことができる
var <- '5'
as.numeric(var)
# 変換できない場合はエラーになる。
var <- 'foo'
as.numeric(var)
# コードフォーマットはctrl+A, ctrl+shift+A
# packageのinstallにはinstall.packagesを用いる
install.packages("tidyverse")
install.packages("purrr")
install.packages("lubridate")
# packageの利用にはlibraryを用いる
library(lubridate)
|
8682941f9c8712fcabece3acda017e5286ba0b29 | 2f104163e540bacbe611408c3ca1ea585b969d53 | /experiments/Table4/archive/1vs2TestCoarseTestRMPI_rmpi.R | f7a608f63c16c92a7f845336f1806efb7d2f42fc | [] | no_license | hkasahar/normalregMix | 529475b1388df312a06d0a686d16d6c3b424ba5f | 60ad78c5107f7f9d84e88ceaf8e6f75aa80467d4 | refs/heads/master | 2020-04-15T13:38:32.599270 | 2018-05-21T08:14:46 | 2018-05-21T08:14:46 | 58,227,243 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,179 | r | 1vs2TestCoarseTestRMPI_rmpi.R | #install.packages("normalregMix_1.0.tar.gz", repos = NULL, type="source")
library(snow)
library(doParallel)
library(Rmpi)
library(normalregMix)
## Generates EM test result according to the dimension of X
PerformEMtest <- function (sample, q, an, m = 1, z = NULL, parallel) {
library(doParallel) # workers might need information
library(normalregMix) # workers might need information
testMode(TRUE) # for replication
n <- as.integer(length(sample)/(q+1))
y <- sample[1:n] # first n elements represents y data
if (q <= 0)
return (normalmixMEMtest(y, m = m, z = z, an = an, crit.method = "asy",
parallel = parallel))
# the other part consists of n chuck of q-length x data
x <- matrix(sample[(n+1):length(sample)], nrow = n, byrow = TRUE)
return (regmixMEMtest(y, x, m = m, z = z, an = an, crit.method = "asy",
parallel = parallel))
}
## Returns frequency that the null H0: m=1 is rejected
# out of replications of given an and data that consists of columns of samples
PerformEMtests <- function (an, data, crit = 0.05, q = 1, m = 1,
parallel, rmpi) {
if (rmpi)
{
# need to transform data (matrix) to a list first; each element is a column (y x_1' x_2' ... x_n')'
ldata <- lapply(seq_len(ncol(data)), function(i) data[,i])
print(system.time(out <- mpi.applyLB(ldata, PerformEMtest, q = q, an = an, m = m, z = NULL,
parallel = parallel)))
}
else
print(system.time(out <- apply(data, 2, PerformEMtest, q = q, an = an, m = m, z = NULL,
parallel = parallel)))
pvals <- sapply(out, "[[", "pvals")
print(list(an, K2 = mean(pvals[2,] < crit), K3 = mean(pvals[3,] < crit)))
return (list(K2 = mean(pvals[2,] < crit), K3 = mean(pvals[3,] < crit)))
}
# Returns data set of rejection frequency rate corresponding to each an,
# the value of optimal an that is closest to given sig. level (0.05 by default), and
# the frequency of rejection according to the optimal an.
FindOptimal1vs2an <- function (phidatapair, anset, m = 1,
parallel = 0, rmpi = TRUE) {
phi <- phidatapair$phi
data <- phidatapair$data
crit <- phi$crit
q <- length(phi$betaset)
# loop over each a_n.
output <- lapply(anset, function (an) (an*0.1))
freqsK2 <- sapply(output, "[[", "K2")
freqsK3 <- sapply(output, "[[", "K3")
# show me what you've got.
table <- data.frame(anset, freqsK2, freqsK3)
colnames(table) <- c("an", "K=2", "K=3")
optimal <- anset[which(abs(freqsK2-crit)==min(abs(freqsK2-crit)))]
optimalresult <- freqsK2[which(abs(freqsK2-crit)==min(abs(freqsK2-crit)))]
print(table)
return (list(optimal.value = optimal, optimal.perf = optimalresult))
}
## Generate a column that represents a sample using phi given.
# each column has the form (y x_1' x_2' ... x_n')'
# where each column x_i represents q data for each observation
GenerateSample <- function(phi) {
n <- phi$n
betaset <- phi$betaset
q <- 0
if (!is.null(betaset))
q <- length(betaset[[1]])
if (q <= 0)
print("Error; in this experiment, dim(X) > 0")
x.sample <- matrix(rnorm(n*q), nrow = n) # each row is one observation
y.sample <- rnorm(n)
y.sample <- apply(x.sample, 1, function(x.obs)
rnorm(1, mean = ((betaset*x.obs)), sd = 1))
sample <- c(y.sample, c(t(x.sample)))
return (sample)
}
## Generate a pair of phi and data, where data is generated by replication.
GeneratePhiDataPair <- function(phi, replication) {
phi <- as.list(phi) # make it atomic.
# data is an (n replication) matrix whose column represents a sample of size n,
data <- do.call(cbind, replicate(replication, GenerateSample(phi = phi), simplify = FALSE))
return (list(phi = phi, data = data))
}
## Create data given phiset and replication
GeneratePhiDataPairs <- function(phiset, replication = 14) {
apply(phiset, 1, GeneratePhiDataPair, replication = replication) # list of (phi data)
}
## Rmpi setup
print("collecting workers..")
mpi.spawn.Rslaves()
mpi.setup.rngstream()
mpi.bcast.Robj2slave(performEMtest, all=TRUE)
print("workers loaded.")
## ====== BEGIN EXPERIMENT ======
## Initiliazation & data generation
# Model specification (per row of the table)
# dim(X) = 1
dimx <- 1
anlb <- 0.2
anub <- 1
ancount <- 10
SEED <- 111111
# init.
set.seed(SEED)
anset <- seq(anlb,anub,length.out = ancount+2)[1:ancount+1]
betaset <- rep(0.5, dimx)
# generate data
phiset <- expand.grid(n=c(20,30), crit = c(0.01,0.05))
phiset$betasets <- lapply(1:nrow(phiset), function(j) betaset)
pairs <- GeneratePhiDataPairs(phiset)
## 2. Create a row for a table.
cols <- list()
for (i in 1:length(pairs)) {
phi <- pairs[[i]]$phi
data <- pairs[[i]]$data
n <- phi$n
crit <- phi$crit
result <- FindOptimal1vs2an(pairs[[i]], anset = anset, m = 1)
cols[[i]] <- list(crit, n, result$optimal.value, result$optimal.perf)
df <- data.frame(matrix(unlist(cols), ncol = length(cols[[1]]), byrow=T))
colnames(df) <- c("crit", "n", "optimal.value", "optimal.perf")
print(df) # save every time
}
print(df)
## ====== END EXPERIMENT ======
# Rmpi termination
mpi.close.Rslaves()
|
e24de79c938de5ac2751bef34e903ea88831c8d6 | 54749c2d3abef807cc747930217f66e537576200 | /cachematrix.R | 8eada034fe0a9fa0eebbeaadbb24cc40a1e479a8 | [] | no_license | baardo/ProgrammingAssignment2 | 3379ce1788f70cd6d231573ee1a66718d4b72c86 | 8fd145b54d098f6f2c5a5a6e0e2ce350583138d5 | refs/heads/master | 2021-08-07T20:05:53.460714 | 2017-11-08T22:47:44 | 2017-11-08T22:47:44 | 110,023,119 | 0 | 0 | null | 2017-11-08T19:50:16 | 2017-11-08T19:50:15 | null | UTF-8 | R | false | false | 1,119 | r | cachematrix.R | ## Put comments here that give an overall description of what your
## functions do
## These two functions will cache the inverse of a matrix
## rather than re-evaluting it over and over.
## makeCacheMatrix creates a list to set and get the value of the matrix
## and then to set and get the value of the inverse
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinverse <- function(inverse) inv <<- inverse
getinverse <- function() inv
list(set=set, get=get, setinverse=setinverse, getinverse=getinverse)
}
## cacheSolve checks if the inverse of the matrix is not already cached.
## If not cached, it computes the inverse and sets the value in cache.
## It then returns the inverse of the matrix, either from cache or computed.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getinverse()
if(!is.null(inv)) {
message("Getting cached data.")
return(inv)
}
data <- x$get()
inv <- solve(data)
x$setinverse(inv)
inv
}
|
cab4ba591003fbd0ca99b93e8641110aec623349 | 4ca1e2ec00d140b5565f05121496fb17c76e8dfe | /Algas/01-prepare.R | c20d4b8881fdb5fd69c30640ba16e85ea8c13129 | [] | no_license | abraham314/mineria | fe3336c6202f041b55454d4832d3497360fc0146 | 3f8c3732fabf9904618146b8b5a04cf9877adad8 | refs/heads/master | 2021-05-06T08:46:12.738434 | 2017-12-19T22:04:29 | 2017-12-19T22:04:29 | 114,060,106 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 249 | r | 01-prepare.R | source('toolset.R')
source('utils.R')
source('metadata.R')
library(tidyr)
library(purrr)
library(ggplot2)
library(dplyr)
#new<-german_data%>%separate(personal_status_and_sex,c("sex", "personal_status"), sep=':')
#saveRDS(new,file="german-tidy") |
1ee18ae4392e48b8c60a20ebcf16c0e25992974e | 5e28bdc8e8aa84d5a919a92be655cfb381e179f0 | /R/tech_block.R | a91e5f13f63f6d7f23184bb43fdd254676eb7715 | [] | no_license | grkhr/ryandexdirect | bde9248bca476ab852dfc39dbbf2cb2c84883f75 | 1d7adf3ad657ac16aa83f4d1520ffcd816d2cd2f | refs/heads/master | 2020-09-22T11:46:38.738297 | 2019-12-10T12:10:23 | 2019-12-10T12:10:23 | 160,968,163 | 0 | 0 | null | null | null | null | WINDOWS-1251 | R | false | false | 989 | r | tech_block.R | # тех функция для авторизации в других функциях
tech_auth <- function(login = NULL, token = NULL, AgencyAccount = NULL, TokenPath = NULL) {
# Если задан токен то пропускаем проверку
if (! is.null(token) ) {
# Определяем класс объекта содержащего токен
if(class(token) == "list") {
Token <- token$access_token
} else {
Token <- token
}
# Если токен не задан то необходимо его получить
} else {
# определяем тип аккаунта, агентский или клиентский
load_login <- ifelse(is.null(AgencyAccount) || is.na(AgencyAccount), login, AgencyAccount)
# загружаем токен
Token <- yadirAuth(Login = load_login, TokenPath = TokenPath, NewUser = FALSE)$access_token
}
# возвразаем токен
return(Token)
} |
af1d923fbfa6d506295008cb840a9a9f7cde334a | 2814fb8233b94513f400c626dfc31b36adcd4bf4 | /tests/testthat/helper_objects.R | c68fac6379f8195d583a232efdcea060a46bfff3 | [] | no_license | skummerf/GenomicWidgets | 384a055600f06ba60ef387b3a4bc693f1dd9d10d | 51b831a9ea240ac74d047d2e0574f7de137cde9f | refs/heads/master | 2018-07-18T07:44:54.077271 | 2018-06-01T20:22:46 | 2018-06-01T20:22:46 | 111,747,630 | 3 | 1 | null | null | null | null | UTF-8 | R | false | false | 876 | r | helper_objects.R | library("GenomicRanges")
library("SummarizedExperiment")
genomation_dir <- system.file("extdata", package = "genomationData")
samp.file <- file.path(genomation_dir,'SamplesInfo.txt')
samp.info <- read.table(samp.file, header=TRUE, sep='\t',
stringsAsFactors = FALSE)
samp.info$fileName <- file.path(genomation_dir, samp.info$fileName)
ctcf.peaks <- genomation::readBroadPeak(
system.file("extdata",
"wgEncodeBroadHistoneH1hescCtcfStdPk.broadPeak.gz",
package = "genomationData"))
ctcf.peaks <- ctcf.peaks[GenomicRanges::seqnames(ctcf.peaks) == "chr21"]
ctcf.peaks <- ctcf.peaks[order(-ctcf.peaks$signalValue)]
ctcf.peaks <- GenomicRanges::resize(ctcf.peaks, width = 501, fix = "center")
#tss <- GenomicRanges::promoters(SummarizedExperiment::rowRanges(rpkm_chr21),
# up = 1, down = 1) |
c3b05ec6927f7ab22cb4cddc6c72ab46bab7f8d7 | 7a98661410bbf9c2e0a1097c819a76cfb2c9f9c0 | /Source/Liquor License Pre_processing.R | 50c3a8fd6c2027e37c258b15136390da1edeffcd | [] | no_license | JaseZiv/Liquor-Licensing-Victoria | bf4757d5b38f16446ea6b25c59cb18a0aafa3538 | d1f1610bd5e50dbc64b5c05057404f8b24328bd2 | refs/heads/master | 2020-04-01T00:11:05.603253 | 2019-01-23T01:14:54 | 2019-01-23T01:14:54 | 152,685,783 | 0 | 0 | null | 2019-01-23T01:14:55 | 2018-10-12T02:55:52 | R | UTF-8 | R | false | false | 7,676 | r | Liquor License Pre_processing.R | # load libraries
library(tidyverse)
library(DataExplorer)
library(rvest)
#####################################################
#---------- Read in Liquor Licensing Data ----------#
#####################################################
# https://www.data.vic.gov.au/data/dataset/victorian-liquor-licences-by-location
# read in liquor licence data
raw_liquor <- readxl::read_xlsx("Data/Raw/current_victorian_licences_by_location_august_2018.xlsx", col_names = F)
# inspect the raw data
head(raw_liquor)
# columns X__13 and X__14 are breaks for formatting. Remove them
raw_liquor <- raw_liquor %>%
select(-X__13, -X__14)
# the variable names are in the third row. Need to get them out of row 3 and into variable names
names(raw_liquor) <- raw_liquor[3,]
# now remove the first three lines of useless data
raw_liquor <- raw_liquor[-c(1:3),]
# look at the data again
head(raw_liquor)
# headers have spaces and we don't want that - extract a vector of variable names and remove spaces
clean_up_header <- names(raw_liquor)
# use stringr package to remove spaces in variable names
clean_up_header <- str_replace_all(string = clean_up_header, pattern = " ", replacement = "")
clean_up_header <- str_replace_all(string = clean_up_header, pattern = "/", replacement = "_")
# replace the variable names with the cleaned up version
colnames(raw_liquor) <- clean_up_header
# now have a look again
head(raw_liquor)
# how many NAs are in each variable?
print(colSums(is.na(raw_liquor)))
####################################
#---------- Analyse data ----------#
####################################
glimpse(raw_liquor)
unique_liquor_data <- sapply(raw_liquor, n_distinct)
# what type of licences are there?
raw_liquor %>%
count(Category) %>%
arrange(desc(n))
#see Notes_on_Data_Selection.md re the following exclusions
raw_liquor <- raw_liquor %>%
filter(Category != "Pre-retail Licence",
Category != "Wine and Beer Producer's Licence")
print(colSums(is.na(raw_liquor)))
##################################################################################################################################
# There appears to be duplicates in the data - at least at the address level. The same venue can have multiple licenses.
# Need to remove these for our summarised data purposes later (otherwise double counting will occur for number of liquor licenses
# within suburbs).
##################################################################################################################################
liquor_data_deduped <- raw_liquor %>%
arrange(Licensee, TradingAs, Address, Postcode) %>%
distinct(Licensee, Address, Postcode, .keep_all = TRUE) %>%
mutate(Longitude = as.numeric(Longitude),
Latitude = as.numeric(Latitude))
print(colSums(is.na(liquor_data_deduped)))
#---------------------------------
# Get geocoordinate data
#---------------------------------
################################
# Use the googleway package to
# get missing geocordinate data
################################
# first, get a subset of all the missing lat/lon observations
missing_latlon <- liquor_data_deduped %>%
filter(is.na(Longitude)) %>%
mutate(full_address = paste(Address, Suburb, Postcode, sep = ", "))
#---------------------------
# googleway steps:
#---------------------------
source("Source/AddGeocoordinates.R")
#---------------------------
# also need to clean up council names to be able to join on to crime data
liquor_data_deduped$Council <- str_remove(liquor_data_deduped$Council, " CITY COUNCIL")
liquor_data_deduped$Council <- str_remove(liquor_data_deduped$Council, " SHIRE COUNCIL")
liquor_data_deduped$Council <- str_remove(liquor_data_deduped$Council, " RURAL")
liquor_data_deduped$Council <- str_remove(liquor_data_deduped$Council, " BOROUGH COUNCIL")
# remove variables not needed for analysis
liquor_data_deduped <- liquor_data_deduped %>%
select(-starts_with("Postal"), -full_address) %>%
select(-TradingHours, -After11pm, -MaximumCapacity, -Gaming)
# save file for later analysis
saveRDS(liquor_data_deduped, "Data/Analysis/liquor_data_deduped.rds")
#######################################
#---------- Population Data ----------#
#######################################
# read in population data
raw_population <- readxl::read_xls("Data/Raw/32350ds0011_lga_summary_statistics_2016.xls", sheet = "Table 1")
# because the data comes in formatted excel, remove all rows and columns not needed
raw_population <- raw_population[-c(1:7), c(2,4,7,9,13)]
# rename column headings
colnames(raw_population) <- c("STName", "LGAName", "Population", "SexRatio", "MedianAge")
# select only Victoria
raw_population <- raw_population %>%
filter(STName == "Victoria")
# don't know what the parentheses are in the population data... will gsub them out an keep whatever is the LGA
raw_population$LGAName <- sub('\\(.*', '', raw_population$LGAName) %>% str_squish() #this removes the whitespace after the LGA
raw_population$LGAName <- toupper(raw_population$LGAName)
head(raw_population)
# where are the NAs?
print(colSums(is.na(raw_population)))
# remove the NAs in the LGA
raw_population <- raw_population %>%
filter(!is.na(LGAName)) %>%
select(-STName)
# coerce pop_total,
raw_population[c(2:4)] <- sapply(raw_population[c(2:4)], as.numeric)
##########################################
#---------- LGA Square KM Data ----------#
##########################################
# define the url
url <- "https://en.wikipedia.org/wiki/Local_government_areas_of_Victoria"
# set url as a html object
webpage <- read_html(url)
# read in info
scraped_LGA_data <- html_table(webpage, fill = TRUE)
# separate out first table as it has an additional column
tab1 <- scraped_LGA_data[[1]]
tab1 <- tab1[,c(1,5)]
names(tab1) <- c("LocalGovernmentArea", "Area_KMSqr")
tab1 <- tab1[-1,]
# loop through the remaining 8 tables, extracting only the columns required
LGA_DF <- data.frame()
for(i in 2:9) {
df <- scraped_LGA_data[[i]]
df <- df[,c(1,5)]
names(df) <- c("LocalGovernmentArea", "Area_KMSqr")
df <- df[-1,]
LGA_DF <- rbind(LGA_DF, df)
}
# bing the first table and the rest of the scraped tables together
LGA_DF <- rbind(tab1, LGA_DF)
# convert LGA to uppercase
LGA_DF$LocalGovernmentArea <- toupper(LGA_DF$LocalGovernmentArea)
# Remove prefixes at the beginning or end of LGA name
LGA_DF$LocalGovernmentArea <- str_remove(LGA_DF$LocalGovernmentArea, " CITY COUNCIL")
LGA_DF$LocalGovernmentArea <- str_remove(LGA_DF$LocalGovernmentArea, " SHIRE COUNCIL")
LGA_DF$LocalGovernmentArea <- str_remove(LGA_DF$LocalGovernmentArea, "SHIRE OF ")
LGA_DF$LocalGovernmentArea <- str_remove(LGA_DF$LocalGovernmentArea, "CITY OF ")
LGA_DF$LocalGovernmentArea <- str_remove(LGA_DF$LocalGovernmentArea, " SHIRE")
LGA_DF$LocalGovernmentArea <- str_remove(LGA_DF$LocalGovernmentArea, "RURAL")
LGA_DF$LocalGovernmentArea <- str_remove(LGA_DF$LocalGovernmentArea, "BOROUGH OF ")
# remove any unnecessary whitespace
LGA_DF$LocalGovernmentArea <- str_squish(LGA_DF$LocalGovernmentArea)
# need to add a "-" to Colac Otway: Need to do this to make joining possible later
LGA_DF$LocalGovernmentArea[LGA_DF$LocalGovernmentArea == "COLAC OTWAY"] <- "COLAC-OTWAY"
# remove commas and other special characters from numeric variables and convert to numeric
LGA_DF$Area_KMSqr <- str_remove(LGA_DF$Area_KMSqr, ",") %>% as.numeric()
# Join population and area dataframes together
LGA_data <- raw_population %>%
left_join(LGA_DF, by = c("LGAName" = "LocalGovernmentArea")) %>%
filter(LGAName != "UNINCORPORATED VIC")
# save LGA data to disk
saveRDS(LGA_data, "Data/Analysis/LGA_Data.rds")
|
be25f22e470ff837f1599966f7df24347bf86966 | 39388bfc9829fc209532b5b58b3a15ea67b07a91 | /Digit Recognizer/R/Digit Recognizer/3. Naive Bayes.R | c677288e51719495ff85963eb8dbf36a48381a83 | [] | no_license | crismbris/MUSS-KDD | b55d266df0bc6a0aba531c8e4f7e3efc4288d5fa | 48eb8ff0d86125dc566f0d1ae3dc15dbfad99506 | refs/heads/master | 2020-04-25T19:01:35.369438 | 2019-02-27T23:22:09 | 2019-02-27T23:22:09 | 173,005,152 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 465 | r | 3. Naive Bayes.R | rm(list = ls())
#loading libraries
library(e1071)
library(data.table)
#loading datasets
train <- fread("train-kaggle.csv")
test <- fread("test.csv")
print("started modelling...")
train$label <- factor(train$label)
rf_model <- naiveBayes(label ~ ., data = train)
print("naive bayes trained.")
pred <- predict(rf_model, newdata = test)
pred_df <- data.frame(ImageId = 1:28000, Label = pred )
write.csv(pred_df, file = "naiveBayesResult.csv", row.names = FALSE)
|
d1a81fddd4a6311c4c6652999593379344df3c1d | d4bced16b6a3a756cc60925c4c89e754f89f22e7 | /residus.R | cdcbbcd527bb1d035ce04e67d8e12febdf75c3a4 | [] | no_license | Dilhan-Ozturk/Spatial-modelling-NOx-in-Brussels | 50bed3c405969c33629c1f75d65a5cda5a6d1605 | 297c91999bc91f5989d992a07a10474b460a862b | refs/heads/master | 2022-09-14T21:42:20.416806 | 2020-05-30T19:26:06 | 2020-05-30T19:26:06 | 268,145,760 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,956 | r | residus.R | # Besoin de data_traitment et modeleNOxPond
#**********************************************************************************************
totalJours <- rep(Ixelles$Jour,5)
totalHeures <- rep(Ixelles$Heure,5)
totalStation <- c(rep("Haren",244560/5),rep("Molenbeek",244560/5),rep("Ixelles",244560/5),
rep("Uccle",244560/5),rep("Woluwe",244560/5))
totalNO2obs <- c(NO2_BE$X41N043,NO2_BE$X41R001,NO2_BE$X41R002,NO2_BE$X41R012,NO2_BE$X41WOL1)
totalNO2pred <- c(Haren$NO2,Molenbeek$NO2,Ixelles$NO2,Uccle$NO2,Woluwe$NO2)
totalNOobs <- c(NO_BE$X41N043,NO_BE$X41R001,NO_BE$X41R002,NO_BE$X41R012,NO_BE$X41WOL1)
totalNOpred <- c(Haren$NO,Molenbeek$NO,Ixelles$NO,Uccle$NO,Woluwe$NO)
totalNOxobs <- c(1.912*(NO2_BE$X41N043/1.912+NO_BE$X41N043/1.247),
1.912*(NO2_BE$X41R001/1.912+NO_BE$X41R001/1.247),
1.912*(NO2_BE$X41R002/1.912+NO_BE$X41R002/1.247),
1.912*(NO2_BE$X41R012/1.912+NO_BE$X41R012/1.247),
1.912*(NO2_BE$X41WOL1/1.912+NO_BE$X41WOL1/1.247))
totalNOxpred <- c(1.912*(Haren$NO2/1.912+Haren$NO/1.247),
1.912*(Molenbeek$NO2/1.912+Molenbeek$NO/1.247),
1.912*(Ixelles$NO2/1.912+Ixelles$NO/1.247),
1.912*(Uccle$NO2/1.912+Uccle$NO/1.247),
1.912*(Woluwe$NO2/1.912+Woluwe$NO/1.247))
resNO2 <- totalNO2obs-totalNO2pred
resNO <- totalNOobs-totalNOpred
resNOx <- totalNOxobs-totalNOxpred
residus <- data.frame(totalStation)
residus$Jour <- totalJours
residus$Heure <- totalHeures
residus$NO2obs <- totalNO2obs
residus$NO2pred <- totalNO2pred
residus$NOobs <- totalNOobs
residus$NOpred <- totalNOpred
residus$NOxobs <- totalNOxobs
residus$NOxpred <- totalNOxpred
residus$resNO2 <- resNO2
residus$resNO <- resNO
residus$resNOx <- resNOx
setwd('C:/Users/demo/OneDrive - UCL/PROJET NOx/generate')
write.csv(residus,"residus.csv",row.names = FALSE, quote = FALSE)
|
d64fc32125b0cae0e7fd5b047588b2b36b52a9f4 | ee4247f18094e489032120b3ad29ed4512dec048 | /jhds02_rprogramming/ps1/complete.R | 903ee8b2cbc6b90a1166c998b11f6ff5cfda9f29 | [] | no_license | Treasaigh/jhds | 182c184278b6187b70fdc114c814b0b200e2d568 | 58542e7262d9ea8ac5894f6cf90bf3ad5e96ee2f | refs/heads/master | 2021-01-22T12:48:49.976240 | 2015-02-26T17:38:12 | 2015-02-26T17:38:12 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 650 | r | complete.R | complete <- function(directory, id = 1:332) {
dir_base <- "~/Dropbox/Education/JHDS 2 - R Programming/ps1"
setwd(paste(dir_base, directory, sep='/'))
pm_pollution <- data.frame()
for (i in id) {
list_files <- list.files()
dfa <- read.table(file=list_files[i], sep=',', header=TRUE)
pm_pollution <- rbind(pm_pollution, dfa)
}
setwd(dir_base)
pm_pollution <- pm_pollution[complete.cases(pm_pollution),]
pm_agg <- as.data.frame(table(pm_pollution$ID), row.names=NULL)
names(pm_agg) <- c('id','nobs')
pm_agg <- data.frame(pm_agg[order(id),], row.names=c(1:(length(pm_agg$id))))
pm_agg
} |
ab88a3a4638def2f11dd724bcff0a9694fe3f917 | 53f499108a5c46f69e06a2a20e4671b1d203f960 | /R/get.R | 647b8027665b20664347ed5a872377be9f0d80c4 | [
"MIT"
] | permissive | xiechengyong123/plinkbinr | b89a235b9ce04872bc78ac587fa456be4b0bc957 | fd2a23a7b47fa3f01476af9515e2e49bf366ea76 | refs/heads/master | 2023-03-18T18:10:16.448135 | 2019-10-14T21:13:22 | 2019-10-14T21:13:22 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 509 | r | get.R | #' Detect plink exe for operating system
#'
#' Returns error if not found
#'
#' @export
#' @return path to plink binary
get_plink_exe <- function()
{
os <- Sys.info()['sysname']
a <- paste0("bin/plink_", os)
if(os == "Windows") a <- paste0(a, ".exe")
plink_bin <- system.file(a, package="plinkbinr")
if(!file.exists(plink_bin))
{
stop("No plink2 executable available for OS '", os, "'. Please provide your own plink2 executable file using the plink_bin argument.")
}
return(plink_bin)
}
|
6afaaad8a041a116c2f31566ee42b1b8c59fb88f | 8470f399e6a92bb2d3125b9938f63e842de1e06c | /simulator file.R | 680aab5fd7d53f792fabcf19ef861bdb07045861 | [] | no_license | kroehlm/Permutation_Mediation_Test | ff2c71221bcca5b8e34e24ac22903c2c6eee0595 | 1e86154de084758a41a571667b366ca52bd2b98d | refs/heads/master | 2020-05-20T16:19:53.268569 | 2019-05-08T19:25:58 | 2019-05-08T19:25:58 | 185,664,178 | 0 | 0 | null | 2019-09-06T14:45:59 | 2019-05-08T19:02:33 | R | UTF-8 | R | false | false | 2,997 | r | simulator file.R | ################################################################################
### file: simulator file.R
### authors: Miranda Kroehl, miranda.kroehl@ucdenver.edu
### date: April 2018
###
### purpose: run simulations with bootstraps
###
### change log:
### 04/12/18 - file created. adapted from simulatorPerms files for dissertation work
################################################################################
rm(list = ls())
# Load required packages
library(parallel)
library(mediation)
library(boot)
library(dplyr)
library(snow)
# Set your working directory
setwd("")
# As long as all the files are in the above directory, this should call up all the functions
source("helper_functions.R")
source("mediationTestPermMethods.R")
###################################################################################
# Simulator
# size: N (sample size)
# n.perms: number of permutations (10k recommended)
# n.boots: number of bootstrap redraws (5k recommended)
# sims.per.seed: number of simulations to run
# cor.x1.x2: specified correlation between x1 and x2
# cor.x1.m: specified correlation between x1 and m
# cor.x2.m: specified correlation between x2 and m
# cor.x1.y: specified correlation between x1 and y
# cor.x2.y: covariance between x2 and y
# cor.m.y: specified correlation between m and y
###################################################################################
size <- 30
n.perms <- 10000
n.boots <- 5000
sims.per.seed <- 1000
cor.x1.x2 <- 0.6
cor.x1.m <- 0
cor.x2.m <- 0
cor.x1.y <- 0.6
cor.x2.y <- 0.6
cor.m.y <- 0.6
# Set up the initial seed values for a simulation run:
sim.seeds <- expand.grid(size, n.perms, n.boots, cor.x1.x2, cor.x1.m, cor.x2.m,
cor.x1.y, cor.x2.y, cor.m.y)
sim.seeds <- data.frame(sim.seeds)
options(cl.cores = detectCores() - 1)
# set up a cluster,
this.cluster <- makeCluster(getOption("cl.cores", 2))
clusterCall(cl = this.cluster, fun = function(){library(lasso2); library(boot)})
clusterExport(cl = this.cluster, list = objects())
SIMULATION.RESULTS <-
parLapply(cl = this.cluster,
1:nrow(sim.seeds),
function(idx) {
args <- as.list(sim.seeds[idx, ])
# args$conf.levels = conf.levels
formals(mediationTestPermMethods) <- args
rtn <-
replicate(sims.per.seed, mediationTestPermMethods(), simplify = FALSE)
return(rtn)
})
stopCluster(this.cluster)
### Save the simulation results - Peter set it up to save as an image, which can then
# be loaded (below) and summarized using the helper functions
save.image(file = paste("sim_n_", size, "_corX1X2_", cor.x1.x2, "_corX1M_", cor.x1.m,
"_corX2M_", cor.x2.m, "_corX1Y_", cor.x1.y, "_corX2Y_", cor.x2.y,
"_corMY_", cor.m.y, ".RData", sep = ""))
|
d3f8dcb06521020c818ecea9901cee92c71654c8 | 41bbdd673bef8e9f1fdc724556591f4607b220e6 | /pascal/man/cot.Rd | 877616a3c115005feb445b8c6c0e2283bb567733 | [] | no_license | pascal-niklaus/pascal | bd42a5f00fd06d9b9f8b4e6227419943a817b409 | 88723458c71609a8971925d363671910a4fa421c | refs/heads/master | 2023-06-27T03:37:37.156306 | 2023-06-14T13:35:16 | 2023-06-14T13:35:16 | 27,164,003 | 3 | 1 | null | null | null | null | UTF-8 | R | false | true | 500 | rd | cot.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cot.R
\name{cot}
\alias{cot}
\alias{cotpi}
\title{Cotangent}
\usage{
cot(z)
cotpi(z)
}
\arguments{
\item{z}{argument in radians, or multiple of pi}
}
\value{
cotangent of z
}
\description{
\code{cot} calculates the cotangent. It is better than computing
1/tan(x) like done in \code{pracma} because it does not fail for
\code{pi/2}.
}
\examples{
cot(0.25*pi)
}
\author{
Pascal Niklaus \email{pascal.niklaus@ieu.uzh.ch}
}
|
f34e385d64ddb785c99240015fe4a0fd34d668f7 | b33735d157848984bc57288b41ce2bded79e2710 | /R/fit.CInLPN.R | d638062ea85f6732e149e5f4918fe7902c737e2d | [] | no_license | bachirtadde/CInLPN | 9a00a0cabf5af34ba8403b17d81763db6067b990 | bb51492aa116635c4bf70afd29de7bdd58de15a5 | refs/heads/master | 2023-06-30T15:21:04.684650 | 2023-06-20T14:31:49 | 2023-06-20T14:31:49 | 163,929,733 | 2 | 1 | null | 2021-03-28T14:58:25 | 2019-01-03T06:00:38 | R | UTF-8 | R | false | false | 6,579 | r | fit.CInLPN.R | #' Marginal and subject-specific predictions on the training data for a CInLPN object
#'
#' @param object CInLPN object
#' @param newdata dataset
#' @param MCnr an integer that gives the number of Monte Carlo iterations
#' @param TimeDiscretization a boolean indicating if the inital time have to be discretized. When setting to FALSE, It allows to avoid discretization when running univarite model during parameter initialization.
#' @param \dots optional parameters
#'
#' @return list of marginal and subject-specific predictions
#' @export
fit.CInLPN <- function(object, newdata, TimeDiscretization=TRUE, MCnr = 10, ...){
model <- object
cl <- match.call()
if(missing(model)) stop("The argument model should be specified")
if(class(model)!="CInLPN") stop("argument model must be a CInLPN object")
x <- model$call
if(missing(newdata)) stop("The argument newdata should be specified")
### identification of all components and sub-models of the model
## components of structural model
fixed_X0 <- as.formula(x$structural.model$fixed.LP0)
fixed_DeltaX <- as.formula(x$structural.model$fixed.DeltaLP)
randoms_DeltaX <- as.formula(x$structural.model$random.DeltaLP)
mod_trans <- as.formula(x$structural.model$trans.matrix)
DeltaT <- model$DeltaT
# components of measurement model
link <- as.formula(x$measurement.model$link.functions$links)
knots <- as.formula(x$measurement.model$link.functions$knots)
## subject, Time
subject <- x$subject
Time <- x$Time
### checking newdata format
colnames<-colnames(newdata)
# if(missing(DeltaT) || DeltaT < 0 ) stop("DeltaT of the model must not be null or negative")
if(!(subject%in%colnames))stop("Subject should be in the data")
if(!(Time %in% colnames)) stop("time should be in the data")
if(!TimeDiscretization){ # If discretization process is external, we need to check that time is multiple of DeltaT
if(!all(round((newdata[,Time]/DeltaT)-round(newdata[,Time]/DeltaT),8)==0.0))stop(paste("Discretized Time must be multiple of", DeltaT, sep = " "))
}
if(dim(unique(newdata))[1] != dim(newdata)[1]) stop("Some rows are the same in the dataset, perhaps because of a too large discretization step")
### pre-processing of data
### outcomes and latent processes ####
outcome <- as.character(attr(terms(fixed_DeltaX),"variables"))[2]
outcomes_by_LP<-strsplit(outcome,"[|]")[[1]]
nD <- length(outcomes_by_LP) # nD: number of latent process
outcomes <- NULL
mapping.to.LP <- NULL
for(n in 1:nD){
outcomes_n <- strsplit(outcomes_by_LP[n],"[+]")[[1]]
outcomes_n <-as.character(sapply(outcomes_n,FUN = function(x)gsub("[[:space:]]","",x),simplify = FALSE))
outcomes_n <- unique(outcomes_n)
outcomes <- c(outcomes, outcomes_n)
mapping.to.LP <- c(mapping.to.LP, rep(n,length(outcomes_n)))
}
K <- length(outcomes)
all.Y<-seq(1,K)
### pre-processing of fixed effect
fixed_X0.models =strsplit(gsub("[[:space:]]","",as.character(fixed_X0)),"~")[[2]]
fixed_X0.models<- as.vector(strsplit(fixed_X0.models,"[|]")[[1]])
#
fixed_DeltaX.model=strsplit(gsub("[[:space:]]","",as.character(fixed_DeltaX)),"~")[[3]]
fixed_DeltaX.models<-strsplit(fixed_DeltaX.model,"[|]")[[1]]# chaque model d'effet fixe mais en vu de connaitre tous les pred.fixed du modele multi
### pre-processing of random effect
randoms_X0.models <- rep("1",nD)
randoms_DeltaX.model=strsplit(gsub("[[:space:]]","",as.character(randoms_DeltaX)),"~")[[2]]
randoms_DeltaX.models<-strsplit(randoms_DeltaX.model,"[|]")[[1]]
#### pre-processing of mod_trans transition matrix
mod_trans.model=strsplit(gsub("[[:space:]]","",as.character(mod_trans)),"~")[[2]]
#### predictors
predictors <- model$predictors
if(!all(predictors %in% colnames)) stop("All explicative variables must be in the dataset")
################### discretization of the data with discretization value given by the user ##########################
#
if(TimeDiscretization){
data <- TimeDiscretization(rdata=newdata, subject = subject, outcomes = outcomes, predictors = predictors,
Time = Time, Delta = DeltaT)
}else{
data <- newdata
}
################### created formatted data ##########################
data_F <- DataFormat(data=data, subject = subject, fixed_X0.models = fixed_X0.models,
randoms_X0.models = randoms_X0.models, fixed_DeltaX.models = fixed_DeltaX.models,
randoms_DeltaX.models = randoms_DeltaX.models, mod_trans.model = mod_trans.model,
outcomes = outcomes, nD = nD, link=link, knots = knots,
Time = Time, DeltaT=DeltaT)
### calling C++ function pred to compute fitted vallues of the outcomes from newdata
if_link <- rep(0,K)
for(k in 1:K){
if(link[k] !="linear") if_link[k] <- 1
}
Marginal_Predict <- data_F$id_and_Time
SubjectSpecific_Predict <- data_F$id_and_Time
col <- colnames(Marginal_Predict)
if(requireNamespace("splines2", quietly = TRUE)){
Predict <- fit(K = K, nD = nD, mapping = mapping.to.LP, paras = model$coefficients,
m_is= data_F$m_i, Mod_MatrixY = data_F$Mod.MatrixY, df= data_F$df,
x = data_F$x, z = data_F$z, q = data_F$q, nb_paraD = data_F$nb_paraD, x0 = data_F$x0, z0 = data_F$z0,
q0 = data_F$q0, if_link = if_link, tau = data_F$tau,
tau_is=data_F$tau_is, modA_mat = data_F$modA_mat, DeltaT=DeltaT,
MCnr = MCnr, data_F$minY, data_F$maxY, data_F$knots, data_F$degree, epsPred = 1.e-9)
}else{
stop("Need package MASS to work, Please install it.")
}
kk <- 1
for(k in 1: K){
Marginal_Predict <- cbind(Marginal_Predict,data_F$Y[,k],Predict[,kk],
(data_F$Y[,k]-Predict[,kk]),Predict[,(kk+1):(kk+3)])
SubjectSpecific_Predict <- cbind(SubjectSpecific_Predict,data_F$Y[,k],Predict[,(kk+4)],
(data_F$Y[,k]-Predict[,(kk+4)]),Predict[,c((kk+1),(kk+5),(kk+6))])
col <- c(col,outcomes[k],paste(outcomes[k], "Pred", sep="."), paste(outcomes[k], "Res", sep="."),
paste(outcomes[k], "tr", sep="-"), paste(outcomes[k], "tr.Pred", sep="-"),
paste(outcomes[k], "tr.Res", sep="-"))
kk <- kk+7
}
colnames(Marginal_Predict) <- col
colnames(SubjectSpecific_Predict) <- col
### returning fitted values
return(list(Marginal_Predict = Marginal_Predict, SubjectSpecific_Predict = SubjectSpecific_Predict))
} |
a8d497df754265b449370a9cd1afcbf789a0c668 | 0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb | /cran/paws.internet.of.things/man/iot_list_thing_groups.Rd | 25e343c705ba7a03d4b2fcc56b8c38eb4b435bcf | [
"Apache-2.0"
] | permissive | paws-r/paws | 196d42a2b9aca0e551a51ea5e6f34daca739591b | a689da2aee079391e100060524f6b973130f4e40 | refs/heads/main | 2023-08-18T00:33:48.538539 | 2023-08-09T09:31:24 | 2023-08-09T09:31:24 | 154,419,943 | 293 | 45 | NOASSERTION | 2023-09-14T15:31:32 | 2018-10-24T01:28:47 | R | UTF-8 | R | false | true | 1,265 | rd | iot_list_thing_groups.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/iot_operations.R
\name{iot_list_thing_groups}
\alias{iot_list_thing_groups}
\title{List the thing groups in your account}
\usage{
iot_list_thing_groups(nextToken, maxResults, parentGroup,
namePrefixFilter, recursive)
}
\arguments{
\item{nextToken}{To retrieve the next set of results, the \code{nextToken} value from a
previous response; otherwise \strong{null} to receive the first set of
results.}
\item{maxResults}{The maximum number of results to return at one time.}
\item{parentGroup}{A filter that limits the results to those with the specified parent
group.}
\item{namePrefixFilter}{A filter that limits the results to those with the specified name
prefix.}
\item{recursive}{If true, return child groups as well.}
}
\value{
A list with the following syntax:\preformatted{list(
thingGroups = list(
list(
groupName = "string",
groupArn = "string"
)
),
nextToken = "string"
)
}
}
\description{
List the thing groups in your account.
}
\section{Request syntax}{
\preformatted{svc$list_thing_groups(
nextToken = "string",
maxResults = 123,
parentGroup = "string",
namePrefixFilter = "string",
recursive = TRUE|FALSE
)
}
}
\keyword{internal}
|
eee6a351733007ca1471c85d7e4b0e6f212d4a19 | 3f4d597965befa4655be7d810f485758284fc06b | /R/adjustPipelineFun.R | 21793717e1f23c35726de8e43c8f0f21b2245280 | [] | no_license | jianhong/cellCounter | b14ecc7ad4b20f7953209b8fe10258949e5944be | 82ed0744a9c95ba7dcf27b8783f7c7adc0313b3a | refs/heads/master | 2022-06-18T13:01:54.511625 | 2022-06-09T15:13:33 | 2022-06-09T15:13:33 | 121,533,564 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,670 | r | adjustPipelineFun.R | #' GaussianBlur
#' @description apply Gaussian blur to a Image
#' @param img an object of \link[EBImage:Image-class]{Image}.
#' @param GaussianBlur_sigma sigma for \link[EBImage:gblur]{gblur}.
#' @param \dots not used.
#' @import EBImage
#' @export
#' @author Jianhong Ou
#' @examples
#' library(EBImage)
#' img <- readImage(system.file("extdata", "low.jpg", package="cellCounter"))
#' GaussianBlur(img)
GaussianBlur <- function(img, GaussianBlur_sigma=5, ...){
stopifnot(inherits(img, c("Image", "Image2")))
if(is(img, "Image2")){
img <- toImage(img)
}
gblur(img, sigma = GaussianBlur_sigma)
}
#' rescaleImage
#' @description apply rescale Image data to [0, 1].
#' @param img an object of \link[EBImage:Image-class]{Image}.
#' @param \dots not used.
#' @import EBImage
#' @export
#' @author Jianhong Ou
#' @examples
#' library(EBImage)
#' img <- readImage(system.file("extdata", "low.jpg", package="cellCounter"))
#' rescaleImage(channel(img, "blue"))
rescaleImage <- function(img, ...){
stopifnot(inherits(img, c("Image", "Image2")))
if(is(img, "Image2")){
img <- toImage(img)
}
if(colorMode(img)!=0){
stop("colorMode of img must be Grayscale")
}
if(numberOfFrames(img, type="render")==1){
imageData(img) <- rescale(imageData(img), to=c(0, 1))
}else{
for(i in seq.int(numberOfFrames(img, type="render"))){
imageData(img)[, , i] <- rescale(imageData(img)[, , i], to=c(0, 1))
}
}
img
}
#' increaseContrast
#' @description increase the Image contrast by multiply the image data by a value.
#' @param img an object of \link[EBImage:Image-class]{Image}.
#' @param increaseContrast_times a numeric vector.
#' @param \dots not used.
#' @import EBImage
#' @export
#' @author Jianhong Ou
#' @examples
#' library(EBImage)
#' img <- readImage(system.file("extdata", "low.jpg", package="cellCounter"))
#' increaseContrast(img)
increaseContrast <- function(img, increaseContrast_times=2, ...){
stopifnot(inherits(img, c("Image", "Image2")))
if(is(img, "Image2")){
img <- toImage(img)
}
stopifnot(is.numeric(increaseContrast_times))
img*increaseContrast_times
}
#' removeBackground
#' @description substract the background from Image data.
#' If background is not given, it will be the median of the Image.
#' @param img an object of \link[EBImage:Image-class]{Image}.
#' @param removeBackground_background background cut off value.
#' @param \dots not used.
#' @import EBImage
#' @export
#' @author Jianhong Ou
#' @examples
#' library(EBImage)
#' img <- readImage(system.file("extdata", "low.jpg", package="cellCounter"))
#' removeBackground(img)
removeBackground <- function(img, removeBackground_background, ...){
stopifnot(inherits(img, c("Image", "Image2")))
if(is(img, "Image2")){
img <- toImage(img)
}
data <- imageData(img)
if(missing(removeBackground_background)){
data[data==0] <- NA
removeBackground_background <- median(data, na.rm = TRUE)
}
data[is.na(data)] <- 0
data[data<removeBackground_background[1]] <- 0
imageData(img) <- data
img
}
#' ScurveAdjust
#' @description apply S-curve adjust for a Image by formula: data = L / (1 + exp (-k * (data - x0)))
#' @param img an object of \link[EBImage:Image-class]{Image}.
#' @param ScurveAdjust_k slope
#' @param ScurveAdjust_L max value
#' @param ScurveAdjust_x0 mean value
#' @param \dots not used.
#' @import EBImage
#' @export
#' @author Jianhong Ou
#' @examples
#' library(EBImage)
#' img <- readImage(system.file("extdata", "low.jpg", package="cellCounter"))
#' ScurveAdjust(img)
ScurveAdjust <- function(img, ScurveAdjust_k=10, ScurveAdjust_L=1, ScurveAdjust_x0=0.5, ...){
stopifnot(inherits(img, c("Image", "Image2")))
if(is(img, "Image2")){
img <- toImage(img)
}
data <- imageData(img)
data <- ScurveAdjust_L / (1 + exp(-ScurveAdjust_k*(data-ScurveAdjust_x0)))
imageData(img) <- data
img
}
#' changeCellSize
#' @description apply rescale Image data to [0, 1]
#' @param img an object of \link[EBImage:Image-class]{Image}.
#' @param changeCellSize_direction "erode" or "dilate". erode make the cell size smaller
#' and dilate make the cell size bigger.
#' @param changeCellSize_targetChannel the target channel.
#' @param changeCellSize_size,changeCellSize_shape Brush size and shape, see \link[EBImage:makeBrush]{makeBrush}
#' @param \dots not used.
#' @import EBImage
#' @export
#' @author Jianhong Ou
#' @examples
#' library(EBImage)
#' img <- readImage(system.file("extdata", "low.jpg", package="cellCounter"))
#' changeCellSize(img)
changeCellSize <- function(img, changeCellSize_direction=c("erode", "dilate"),
changeCellSize_targetChannel=c("red", "green", 'blue'),
changeCellSize_size=3, changeCellSize_shape="disc", ...){
stopifnot(inherits(img, c("Image", "Image2")))
if(is(img, "Image2")){
img <- toImage(img)
}
changeCellSize_direction <- match.arg(changeCellSize_direction)
kern <- makeBrush(size=changeCellSize_size, shape=changeCellSize_shape)
if(colorMode(img)==0){
return(switch(changeCellSize_direction,
erode=erode(img, kern),
dilate=dilate(img, kern)))
}
changeCellSize_targetChannel <- match.arg(changeCellSize_targetChannel, several.ok = TRUE)
imgs <- lapply(c("red", "green", "blue"), channel, x=img)
names(imgs) <- c("red", "green", "blue")
imgs.new <- lapply(imgs, function(im){
switch(changeCellSize_direction,
erode=erode(im, kern),
dilate=dilate(im, kern))
})
for(i in changeCellSize_targetChannel){
imgs[[i]] <- imgs.new[[i]]
}
rgbImage(imgs[["red"]], imgs[["green"]], imgs[["blue"]])
}
|
dec1f1eb3c4c405f9ef1de633a47b0b04a3d6abe | fadffda2b80e86944daa588709156dd0d8af6f32 | /KnockoutNetsSynth/man/randomModelWithCycles.Rd | 4a615a986f5025b141cd910f4e371dd5dd4ad720 | [
"MIT"
] | permissive | sverchkov/vaske-fgnem | f623a3d80100f54760a6eb5b0066509fca5f3df9 | e47aac5d5ea1ee362c9358500a3b361f410453bc | refs/heads/master | 2021-01-21T06:38:15.461926 | 2017-02-28T01:08:33 | 2017-02-28T01:08:33 | 83,258,207 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,208 | rd | randomModelWithCycles.Rd | \name{randomModelWithCycles}
\alias{randomModelWithCycles}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{ ~~function to do ... ~~ }
\description{
~~ A concise (1-5 lines) description of what the function does. ~~
}
\usage{
randomModelWithCycles(n, edgeGenerator = function() {
1
}, gNames = paste("s", LETTERS[1:n], sep = ""), nBackLinks = function(n) {
1
})
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{n}{ ~~Describe \code{n} here~~ }
\item{edgeGenerator}{ ~~Describe \code{edgeGenerator} here~~ }
\item{gNames}{ ~~Describe \code{gNames} here~~ }
\item{nBackLinks}{ ~~Describe \code{nBackLinks} here~~ }
}
\details{
~~ If necessary, more details than the description above ~~
}
\value{
~Describe the value returned
If it is a LIST, use
\item{comp1 }{Description of 'comp1'}
\item{comp2 }{Description of 'comp2'}
...
}
\references{ ~put references to the literature/web site here ~ }
\author{ ~~who you are~~ }
\note{ ~~further notes~~
~Make other sections like Warning with \section{Warning }{....} ~
}
\seealso{ ~~objects to See Also as \code{\link{help}}, ~~~ }
\examples{
##---- Should be DIRECTLY executable !! ----
##-- ==> Define data, use random,
##-- or do help(data=index) for the standard data sets.
## The function is currently defined as
function(n, edgeGenerator=function() {1},
gNames=paste('s', LETTERS[1:n], sep=''),
nBackLinks=function(n) {1}) {
if (is.function(nBackLinks)) nBackLinks <- nBackLinks(n)
tree <- randomTree(n, edgeGenerator=edgeGenerator, gNames=gNames)
triscalar2pair <- function(n, position) {
if (position > n*(n-1)/2) {
stop("triangular position is out of bounds")
}
i <- 1
while(position >= n) {
i <- i + 1
n <- n - 1
position <- position - n
}
return(c(i, position + i))
}
positions <- sample(n*(n-1)/2, nBackLinks)
for (position in positions) {
p <- triscalar2pair(n, position)
tree[p[2],p[1]] <- edgeGenerator()
}
return(tree)
}
}
\keyword{ ~kwd1 }% at least one, from doc/KEYWORDS
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
335e03fd0fe448f0b500ea2db87a22196dcd2444 | 5fbb558dfc5865e6fcb1e575f41d2a5478649fc3 | /ui.R | e3b9d3203883a4af94597508ca5c6be200e846a6 | [] | no_license | mingzhuye/data-products--shiny | dfdff3e3b5048d7673e6e067eafca4146790a05c | b66acf58f614f0c534cfbf16d77e8dbb21bd1ef6 | refs/heads/master | 2021-01-01T06:27:44.149646 | 2015-06-18T19:38:26 | 2015-06-18T19:38:26 | 37,680,273 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,903 | r | ui.R | library(shiny)
# Define UI for dataset viewer application
shinyUI(fluidPage(
#Application Title
titlePanel("Choose General Infomation about the Dataset You Want"),
sidebarLayout(
sidebarPanel(
textInput("caption", "Dataset Caption:", "Data Summary"),
helpText(" Hint: just name what you want and then a caption on the right panel will show up."),
selectInput("dataset", "Choose a dataset:",
choices = c("airquality", "iris", "mtcars", "swiss")),
helpText(" Hint: we've already learned about this four datasets many times from this
Data Scientist Specialization.
Just choose one of the datasets and then review it."),
sliderInput("slider1", label = h4("Number of observations to view:"), min = 0,
max = 30, value = 10),
helpText(" Hint: choose number of observations. While the data view will show only the specified",
"number of observations, the summary will still be based",
"on the full dataset."),
submitButton("Update View")
),
mainPanel(
h1(textOutput("caption", container = span)),
h4("Data Summary"),
verbatimTextOutput("summary"),
h4("Observations"),
tableOutput("view")
)
)
)) |
9f06ad86de0ef7f6047a15bb7a78f097af9ce652 | f2ca5431d921b1189a6ebaacd88aef3a9a1a1820 | /R/TaskClust_usarrest.R | 1b3b59364379a160ad1308c0c21b2da6635e2a45 | [] | no_license | mlr-org/mlr3cluster | 44747d2b4fae9170b5ea20704cccfdad777f198f | 161aee5e75aa299bea29617020339768a8d9a75c | refs/heads/main | 2023-06-22T09:58:51.455583 | 2023-06-15T22:32:15 | 2023-06-15T22:32:15 | 157,852,274 | 15 | 7 | null | 2023-03-10T01:08:56 | 2018-11-16T10:32:38 | R | UTF-8 | R | false | false | 811 | r | TaskClust_usarrest.R | #' @title US Arrests Cluster Task
#'
#' @name mlr_tasks_usarrests
#' @include aaa.R
#' @format [R6::R6Class] inheriting from [TaskClust].
#'
#' @section Construction:
#' ```
#' mlr_tasks$get("usarrests")
#' tsk("usarrests")
#' ```
#'
#' @description
#' A cluster task for the [datasets::USArrests] data set.
#' Rownames are stored as variable `"states"` with column role `"name"`.
#'
NULL
load_task_usarrests = function(id = "usarrests") {
b = as_data_backend(load_dataset("USArrests", "datasets", keep_rownames = TRUE), keep_rownames = "state")
task = TaskClust$new(id, b, label = "US Arrests")
b$hash = task$man = "mlr3cluster::mlr_tasks_usarrests"
task$col_roles$name = "state"
task$col_roles$feature = setdiff(task$col_roles$feature, "state")
task
}
tasks[["usarrests"]] = load_task_usarrests
|
d1d69d146e0c0c936e38ee0ebce123002f461c33 | 999bae91d5c15cd8f5371468a89a977c2cb36cbc | /man/calcSurProb.Rd | caff039cbdbd593f6967090523a30903832aa509 | [] | no_license | ataudt/CINsim | 1311be6c4a5e28206366a07074031b540841c097 | 4f21df879b275a7e33ce45236fd408c7635bd185 | refs/heads/master | 2021-01-22T01:10:45.500354 | 2017-09-03T17:41:55 | 2017-09-03T17:41:55 | 102,203,380 | 0 | 0 | null | 2017-09-02T14:49:19 | 2017-09-02T14:49:19 | null | UTF-8 | R | false | true | 649 | rd | calcSurProb.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/calcSurProb.R
\name{calcSurProb}
\alias{calcSurProb}
\title{Calculate probability of survival in a fitness-dependent manner}
\usage{
calcSurProb(karyotype, numberOfChromosomes = NULL, probDf = NULL)
}
\arguments{
\item{karyotype}{A karyotype provided as a vector.}
\item{numberOfChromosomes}{The number of chromosomes in the karyotype.}
\item{probDf}{A fitness probability matrix.}
}
\value{
A probability of survival.
}
\description{
This function will calculate a probability of survival based on karyotypic fitness.
}
\author{
Bjorn Bakker
}
|
e389306281bd342df0d3df037e2a2cb5f3c4b03c | cb8a9bb08b766b1c6543d9f676180dfec3f21273 | /man/SparreAndersen-class.Rd | f2df68d1288edf6aa397cdc5279f4a41a0606312 | [] | no_license | irudnyts/ruin | d15b89b05956be90a4b0e84346e56f6f64d9d544 | 664117bc3d367365a4e405fe8e25b09982e3330d | refs/heads/master | 2020-03-21T22:29:35.612424 | 2018-07-30T12:18:08 | 2018-07-30T12:18:08 | 139,129,922 | 2 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,935 | rd | SparreAndersen-class.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AllClass.R
\docType{class}
\name{SparreAndersen-class}
\alias{SparreAndersen-class}
\title{A formal S4 class SparreAndersen}
\description{
A formal S4 class representation of classical Sparre Andersen model.
}
\details{
The model is defined as follows:
\deqn{X(t) = u + ct - \sum_{i=1}^{N(t)} Y_i,}
where \eqn{u} is the initial capital (\code{initial_capital}), \eqn{c} is the
premium rate (\code{premium_rate}), \eqn{N(t)} is the renewal process defined
by distribution of interarrival times (\code{claim_interarrival_generator}
and \code{claim_interarrival_parameters}), \eqn{Y_i} are iid claim sizes
(\code{claim_size_generator} and \code{claim_size_parameters}).
Objects of class can be created only by using the constructor
\code{\link{SparreAndersen}}.
}
\section{Slots}{
\describe{
\item{\code{initial_capital}}{a length one numeric non-negative vector specifying an
initial capital.}
\item{\code{premium_rate}}{a length one numeric non-negative vector specifying a
premium rate.}
\item{\code{claim_interarrival_generator}}{a function indicating the random
generator of claims' interarrival times.}
\item{\code{claim_interarrival_parameters}}{a named list containing parameters for
the random generator of claims' interarrival times.}
\item{\code{claim_size_generator}}{a function indicating the random generator of
claims' sizes.}
\item{\code{claim_size_parameters}}{a named list containing parameters for the
random generator of claims' sizes.}
}}
\references{
\itemize{
\item Andersen, E. Sparre. \emph{On the collective theory of risk in case of
contagion between claims}. Transactions of the XVth International Congress
of Actuaries, 2(6), 1957.
\item Thorin O. \emph{Some Comments on the Sparre Andersen Model in the Risk
Theory}. ASTIN Bulletin: The Journal of the IAA, 8(1):104-125, 1974.
}
}
\seealso{
\code{\link{SparreAndersen}}
}
|
e8017638db2877a8dafdabc96a8248413e62aa11 | 1d6758016165234fac88a80b4bd4d5e8c534d883 | /R/nonnegativeleastsquares.R | 2f23faf9d52636f3db48f4a18040c9e24dee5587 | [] | no_license | cran/gettingtothebottom | dfdfc294f2c466f0df5df5d960815d4d2f1a38db | 17bb8f6c359a7ee8baace40f1c2bf277744dd0e3 | refs/heads/master | 2016-09-06T05:32:22.471104 | 2014-12-04T00:00:00 | 2014-12-04T00:00:00 | 17,696,368 | 1 | 2 | null | null | null | null | UTF-8 | R | false | false | 7,630 | r | nonnegativeleastsquares.R | #' Generate random nonnegative mixture components
#'
#' \code{generate_nnm} Function to random nonnegative mixture components
#'
#' @param n Number of samples
#' @param p Number of components
#' @param seed Random seed
#'
#' @export
#'
#' @examples
#' n <- 1e3
#' p <- 10
#' nnm <- generate_nnm(n,p)
#'
generate_nnm <- function(n,p,seed=12345) {
set.seed(seed)
t <- seq(-10,10,length.out=n)
# Choose some centers
mu <- double(p)
mu <- seq(-7,7,length.out=p)
mu <- mu + rnorm(p)
# Choose some widths
sigma <- rgamma(p,shape=0.75)
X <- matrix(NA,n,p)
for (i in 1:p) {
X[,i] <- dnorm(t,mean=mu[i],sd=sigma[i])
}
return(list(X=X,t=t,mu=mu,sd=sigma))
}
#' Nonnegative Least Squares via MM
#'
#' \code{nnls_mm} Iteratively computes the solution to the nonnegative least squares problem via a majorization-minimization algorithm.
#'
#' @param y Nonnegative response
#' @param X Nonnegative design matrix
#' @param b Nonnegative initial regression vector
#' @param max_iter Maximum number of iterations
#' @param tol Relative tolerance for convergence
#'
#' @export
#'
#' @examples
#' set.seed(12345)
#' n <- 100
#' p <- 3
#' X <- matrix(rexp(n*p,rate=1),n,p)
#' b <- matrix(runif(p),p,1)
#' y <- X %*% b + matrix(abs(rnorm(n)),n,1)
#'
#' ## Setup mixture example
#' n <- 1e3
#' p <- 10
#' nnm <- generate_nnm(n,p)
#' set.seed(124)
#' X <- nnm$X
#' b <- double(p)
#' nComponents <- 3
#' k <- sample(1:p,nComponents,replace=FALSE)
#' b[k] <- matrix(runif(nComponents),ncol=1)
#' y <- X%*%b + 0.25*matrix(abs(rnorm(n)),n,1)
#'
#' # Obtain solution to mixture problem
#' nnm_sol <- nnls_mm(y,X,runif(p))
#'
nnls_mm <- function(y,X,b,max_iter=1e2,tol=1e-4) {
# add checks for nonnegativity
W <- apply(X,2,FUN=function(x) {return(x/sum(x))})
b_last <- b
for (iter in 1:max_iter) {
b <- (t(W) %*% (y / (X%*%b_last))) * b_last
if (norm(as.matrix(b - b_last),'f') < tol*(1 + norm(as.matrix(b_last), 'f')))
break
b_last <- b
}
return(list(b=b,iter=iter))
}
#' MM Algorithm - Plot NNM
#'
#' \code{plot_nnm} Function for plotting nnm
#'
#' @param nnm NNM object from generate_nnm function
#'
#' @export
#'
#' @examples
#' # Generate nonnegative matrix
#' n <- 1e3
#' p <- 10
#' nnm <- generate_nnm(n,p)
#'
#' # Plot nonnegative matrix
#' plot_nnm(nnm)
#'
#' @author Jocelyn T. Chi
#'
plot_nnm <- function(nnm){
x = values = ind = NULL
a <- nnm$X
nnm_stack <- stack(as.data.frame(a))
nnm_stack$x <- rep(seq_len(nrow(a)), ncol(a))
p <- qplot(x, values, data = nnm_stack, group = ind, colour = ind, geom = "line")
p + theme_bw(base_size=14) + xlab("Frequency") + ylab("Intensity") + theme(legend.position = "none")
}
#' MM Algorithm - Plot NNM Objective
#'
#' \code{plot_nnm_obj} Function for plotting the NNM Objective Function
#'
#' @param y Nonnegative response
#' @param X Nonnegative design matrix
#' @param b Nonnegative initial regression vector
#' @param max_iter (Optional) Maximum number of iterations
#'
#' @export
#'
#' @examples
#' set.seed(12345)
#' n <- 100
#' p <- 3
#' X <- matrix(rexp(n*p,rate=1),n,p)
#' b <- matrix(runif(p),p,1)
#' y <- X %*% b + matrix(abs(rnorm(n)),n,1)
#'
#' plot_nnm_obj(y,X,b)
#'
#' @author Jocelyn T. Chi
#'
plot_nnm_obj <- function(y,X,b,max_iter=100){
bhat <- b
loss <- double(max_iter)
for (i in 1:max_iter) {
bhat <- nnls_mm(y,X,bhat,max_iter=1)$b
loss[i] <- 0.5*norm(as.matrix(y - X%*%bhat),'f')**2
}
x <- data.frame(1:max_iter)
loss <- data.frame(loss)
dat <- data.frame(x,loss)
colnames(dat) <- c('x','loss')
p <- ggplot(dat, aes(x=x,y=loss))
p + geom_line() + theme_bw(base_size=14) + xlab("Iterates") + ylab("Value of the loss function")
}
#' MM Algorithm - Plotting the Spectroscopic Signal
#'
#' \code{plot_spect} Function for plotting the spectroscopic signal
#'
#' @param n Number of samples
#' @param nnm NNM object from generate_nnm function
#' @param y Nonnegative response
#' @param X Nonnegative design matrix
#' @param b Nonnegative initial regression vector
#'
#' @export
#'
#' @examples
#' # Setup mixture example
#' n <- 1e3
#' p <- 10
#' nnm <- generate_nnm(n,p)
#'
#' set.seed(12345)
#' X <- nnm$X
#' b <- double(p)
#' nComponents <- 3
#' k <- sample(1:p,nComponents,replace=FALSE)
#' b[k] <- matrix(runif(nComponents),ncol=1)
#' y <- X%*%b + 0.25*matrix(abs(rnorm(n)),n,1)
#'
#' plot_spect(n,y,X,b,nnm)
#'
#' @author Jocelyn T. Chi
#'
plot_spect <- function(n,y,X,b,nnm){
t <- data.frame(nnm$t)
y <- data.frame(y)
dat <- data.frame(t,y)
colnames(dat) <- c('t','y')
p <- ggplot(dat, aes(x=t,y=y))
p + geom_line() + theme_bw(base_size=14) + xlab("Frequency") + ylab("Intensity")
}
#' MM Algorithm - Plotting the Reconstruction
#'
#' \code{plot_nnm_reconstruction} Function for plotting the nnm_sol reconstruction
#'
#' @param nnm NNM object from generate_nnm function
#' @param X Nonnegative design matrix
#' @param nnm_sol Solution object from nnls_mm function
#'
#' @export
#'
#' @examples
#' # Setup mixture example
#' n <- 1e3
#' p <- 10
#' nnm <- generate_nnm(n,p)
#'
#' set.seed(12345)
#' X <- nnm$X
#' b <- double(p)
#' nComponents <- 3
#' k <- sample(1:p,nComponents,replace=FALSE)
#' b[k] <- matrix(runif(nComponents),ncol=1)
#' y <- X%*%b + 0.25*matrix(abs(rnorm(n)),n,1)
#'
#' # Obtain solution to mixture problem
#' nnm_sol <- nnls_mm(y,X,runif(p))
#'
#' # Plot the reconstruction
#' plot_nnm_reconstruction(nnm,X,nnm_sol)
#'
plot_nnm_reconstruction <- function(nnm,X,nnm_sol){
x = NULL
t <- data.frame(nnm$t)
Xb <- data.frame(X%*%nnm_sol$b)
dat <- data.frame(t,Xb)
colnames(dat) <- c('t','Xb')
p <- ggplot(dat, aes(x=t, y=Xb))
p + geom_line() + theme_bw(base_size=14) + xlab("Frequency") + ylab("Reconstructed Intensity")
}
#' MM Algorithm - Plotting the True Signal
#'
#' \code{plot_nnm_truth} Function for plotting the true mixture signal
#'
#' @param nnm NNM object from generate_nnm function
#' @param X Nonnegative design matrix
#' @param b Nonnegative initial regression vector
#'
#' @export
#'
#' @examples
#' # Setup mixture example
#' n <- 1e3
#' p <- 10
#' nnm <- generate_nnm(n,p)
#'
#' set.seed(12345)
#' X <- nnm$X
#' b <- double(p)
#' nComponents <- 3
#' k <- sample(1:p,nComponents,replace=FALSE)
#' b[k] <- matrix(runif(nComponents),ncol=1)
#' y <- X%*%b + 0.25*matrix(abs(rnorm(n)),n,1)
#'
#' # Plot the truth
#' plot_nnm_truth(X,b,nnm)
#'
plot_nnm_truth <- function(X,b,nnm){
t <- data.frame(nnm$t)
Xb <- data.frame(X%*%b)
dat <- data.frame(t,Xb)
colnames(dat) <- c('t','Xb')
p <- ggplot(dat, aes(x=t, y=Xb))
p + geom_line() + theme_bw(base_size=14) + xlab("Frequency") + ylab("True Intensity")
}
#' MM Algorithm - Plotting the NNMLS regression coefficients
#'
#' \code{plot_nnm_coef} Function for plotting the NNMLS regression coefficients
#'
#' @param nnm_sol Solution object from nnls_mm function
#'
#' @export
#'
#' @examples
#' # Setup mixture example
#' n <- 1e3
#' p <- 10
#' nnm <- generate_nnm(n,p)
#'
#' set.seed(12345)
#' X <- nnm$X
#' b <- double(p)
#' nComponents <- 3
#' k <- sample(1:p,nComponents,replace=FALSE)
#' b[k] <- matrix(runif(nComponents),ncol=1)
#' y <- X%*%b + 0.25*matrix(abs(rnorm(n)),n,1)
#'
#' # Obtain solution to mixture problem
#' nnm_sol <- nnls_mm(y,X,runif(p))
#'
#' # Plot the regression coefficients
#' plot_nnm_coef(nnm_sol)
#'
plot_nnm_coef <- function(nnm_sol){
b <- data.frame(nnm_sol$b)
x <- data.frame(1:nrow(b))
dat <- data.frame(x,b)
colnames(dat) <- c('x','b')
p <- ggplot(dat, aes(x=x, y=b))
p + geom_point() + theme_bw(base_size=14) + xlab("k") + ylab(expression(paste(b[k])))
} |
e0b1ecc3eb2e83277d6f61f4e762d5276435a8ac | 085c1f0d348b6be6eef1917e74cfde2247853d84 | /man/random_coefmats.Rd | ef857543b13cbb6849ed24261093bc63ffc155fa | [] | no_license | saviviro/gmvarkit | 657691acd9577c5aacefe01778bb53d8746613c2 | ad17dd159d0dfa816dcdf9f3ff8be8d633e3b0ef | refs/heads/master | 2023-07-11T19:48:43.851308 | 2023-06-26T08:18:28 | 2023-06-26T08:18:28 | 193,906,969 | 6 | 1 | null | null | null | null | UTF-8 | R | false | true | 1,109 | rd | random_coefmats.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generateParams.R
\name{random_coefmats}
\alias{random_coefmats}
\title{Create random VAR-model \eqn{(dxd)} coefficient matrices \eqn{A}.}
\usage{
random_coefmats(d, how_many, scale)
}
\arguments{
\item{d}{the number of time series in the system.}
\item{how_many}{how many \eqn{(dxd)} coefficient matrices \eqn{A} should be drawn?}
\item{scale}{non-diagonal elements will be drawn from mean zero normal distribution
with \code{sd=0.3/scale} and diagonal elements from one with \code{sd=0.6/scale}.
Larger scale will hence more likely result stationary coefficient matrices, but
will explore smaller area of the parameter space. Can be for example
\code{1 + log(2*mean(c((p-0.2)^(1.25), d)))}.}
}
\value{
Returns \eqn{((how_many*d^2)x1)} vector containing vectorized coefficient
matrices \eqn{(vec(A_{1}),...,vec(A_{how_many}))}. Note that if \code{how_many==p},
then the returned vector equals \strong{\eqn{\phi_{m}}}.
}
\description{
\code{random_coefmats} generates random VAR model coefficient matrices.
}
\keyword{internal}
|
28326587e629d843aa9972595e98ae9d20a66438 | d75d0e96f51291627ef8dff4f51631c9da397625 | /Metrics.R | c730dc8c1866997b48cb0d9184c56873f3f63f02 | [] | no_license | 13KumariJyoti/Regression-Metrics | 31365ff43cdf9912b3b2b2f826620a6eeca8f79a | 95442a958d2de672dbff6e03b9802eddaba82869 | refs/heads/master | 2021-01-16T16:18:18.351982 | 2020-02-26T07:17:27 | 2020-02-26T07:17:27 | 243,181,112 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 9,140 | r | Metrics.R | #' mean calculation
#'
#' @param x
#' A numeric vector whose mean is to be calculated
#' @return
#' numeric value;mean calculated
#' @export
#'
#' @examples
#' x <- c(5,6,7,8,9,10)
#' mean <- calculatemean(x)
calculatemean <- function(x){
if(!is.vector(x)){
stop("x should be a vector")
}
if(any(is.na(x))){
warning("Data contains NA, coercing NA to Zero")
x[which(is.na(x))] <- 0
}
if(length(x)<1){
stop("x contains no element")
}
if(class(x) != "numeric"){
stop("x should be a numeric vector")
}
sum <- sum(x) #Summation
nos <- length(x) #number of observations
mean <- sum/nos
return(mean)
}
#' For Calculating Median
#'
#' @param x
#' A numeric vector whose median is to be calculated
#' @return
#' numeric value;mean calculated
#' @export
#'
#' @examples
#' x <- c(5,6,7,8,9,10)
#' median <- calculatemean(x)
calculatemedian <- function(x){
if(!is.vector(x)){
stop("x should be a vector")
}
if(any(is.na(x))){
warning("x contains NA, coercing NA to Zero")
x[which(is.na(x))] <- 0
}
if(length(x)<1){
stop("x contains no element")
}
if(class(x) != "numeric"){
stop("x should be a numeric vector")
}
x <- sort(x)
if(length(x)%%2!=0){
median <- x[(length(x)+1)/2]
}else{
b <- x[length(x)/2]
b <- c(b,x[(length(x)/2)+1])
median <- calculatemean(b)
}
return(median)
}
#' For calculating Variance
#'
#' @param x
#' A numeric vector whose median is to be calculated
#' @return
#' numeric value;variance calculated
#' @export
#'
#' @examples
#' x <- c(5,6,7,8,9,10)
#' var <- calculatevariance(x)
calculatevariance <- function(x){
if(!is.vector(x)){
stop("x should be a vector")
}
if(any(is.na(x))){
warning("x contains NA, coercing NA to Zero")
x[which(is.na(x))] <- 0
}
if(length(x)<1){
stop("x contains no element")
}
if(class(x) != "numeric"){
stop("x should be a numeric vector")
}
#x <- MarketingData$Sales
result <- NULL
mean <- calculatemean(x) #Calculating the mean.
for(i in 1:length(x)){
#i <- 2
res <- (x[i]-mean)^2
if(is.null(result)){
result <- res
}else{
result <- result + res
}
}
variance <- result/(length(x)-1)
return(variance)
}
#' For Calculating Standard Deviation
#'
#' @param x
#' A numeric vector whose standard deviation needs to be calculated
#' @return
#' numeric value;variance calculated
#' @export
#'
#' @examples
#' x <- c(5,6,7,8,9,10)
#' sd <- calculatesd(x)
calculatesd <- function(x){
if(!is.vector(x)){
stop("x should be a vector")
}
if(any(is.na(x))){
warning("x contains NA, coercing NA to Zero")
x[which(is.na(x))] <- 0
}
if(length(x)<1){
stop("x contains no element")
}
if(class(x) != "numeric"){
stop("x should be a numeric vector")
}
variance <- calculatevariance(x)
stddev <- variance^0.5
return(stddev)
}
#' For Calculating Correlation
#'
#' @param x
#' A numeric vector
#' @param y
#' A numeric vector
#' @return
#' correlation number
#' @export
#'
#' @examples
#' data(mtcars)
#' x <- mtcars$mpg
#' y <- mtcars$cyl
#' corr <- correlation(x,y)
correlation <- function(x,y){
if(!is.vector(x) |!is.vector(y)){
stop("Input data should be a vector")
}
if(any(is.na(x)) | any(is.na(y))){
warning("Inut data contains NA, coercing NA to Zero")
x[which(is.na(x))] <- 0
}
if(length(x)<1 | length(y)<1){
stop("Input Data contains no element")
}
if(class(x) != "numeric" | class(y) != "numeric"){
stop("Input Data should be a numeric vector")
}
xmean <- calculatemean(x)
ymean <- calculatemean(y)
result <- NULL
if(length(x)==length(y)){
for(i in 1:length(x)){
res <- (x[i]-xmean)*(y[i]-ymean)
if(is.null(result)){
result <- res
}else{
result <- result + res
}
}
}
xcalc <- calculatevariance(x)*(length(x)-1)
ycalc <- calculatevariance(y)*(length(y)-1)
correlation <- result/(xcalc*ycalc)^0.5
return(correlation)
}
#' For Getting Correlation table
#' It gives a correlation table
#' @param Data
#' A data frame for which the correlation needs to be calculated
#' @return
#' A data frame which gives the correlation for two variables
#' @export
#'
#' @examples
#' data(mtcars)
#' coratab <- Correlationtable(mtcars)
Correlationtable <- function(Data){
if(any(is.na(Data))){
stop("Data is having NA")
}
if(class(Data)!="data.frame"){
stop("Data is having a data type other than data frame")
}
if(any(apply(X = Data,MARGIN = 2,FUN = is.numeric)) != T){
stop(" Data should have only numeric vectors")
}
rowname <- colnames(Data)
colname <- colnames(Data)
cortable <- data.frame(matrix(data = NA,nrow = ncol(Data),ncol = ncol(Data),byrow = T,dimnames = list(rowname,colname)))
for(i in 1:length(Data)){
for(j in 1:length(Data)){
cortable[i,j] <- correlation(Data[,i],Data[,j])
}
}
return(cortable)
}
#' For Calculating MAPE(Mean Absolute Percentage Error)
#'
#' @param Actual
#' A vector of Dependent variable.
#' @param Predicted
#' A vector of fitted values obtained from using any technique
#'
#' @return
#' It returns a numeric value using mape calculation
#' @export
#'
#' @examples
#' data(mtcars)
#' pred <- lm(mpg~cyl,data = mtcars)
#' predicted <- predict(pred)
#' mape <- MAPE(mtcars$mpg,predicted)
MAPE <- function(Actual,Predicted){
if(!is.vector(Actual) |!is.vector(Predicted)){
stop("Input data should be a vector")
}
if(any(is.na(Actual)) | any(is.na(Predicted))){
warning("Inut data contains NA")
}
if(length(Actual)<1 | length(Predicted)<1){
stop("Input Data contains no element")
}
if(class(Actual) != "numeric" | class(Predicted) != "numeric"){
stop("Input Data should be a numeric vector")
}
mape <- ((sum(abs((Actual - Predicted)/Actual)))/(length(Predicted)))*100
return(mape)
}
#' For Calculating (MSE/RMSE) Mean Squared Error
#'
#' @param Actual
#' A vector of Dependent variable.
#' @param Predicted
#' A vector of fitted values obtained from using any technique
#' @param Root
#' If TRUE it will return a vector containing both MSE and RMSE
#' @return
#' It will return mean square error and root mean squared error.
#' @export
#'
#' @examples
#' data(mtcars)
#' pred <- lm(mpg~cyl,data = mtcars)
#' predicted <- predict(pred)
#' MSE <- MSE(mtcars$mpg,predicted)
MSE <- function(Actual,Predicted,Root = NULL){
if(!is.vector(Actual) |!is.vector(Predicted)){
stop("Input data should be a vector")
}
if(any(is.na(Actual)) | any(is.na(Predicted))){
warning("Inut data contains NA")
}
if(length(Actual)<1 | length(Predicted)<1){
stop("Input Data contains no element")
}
if(class(Actual) != "numeric" | class(Predicted) != "numeric"){
stop("Input Data should be a numeric vector")
}
mse <- sum((Actual-Predicted)^2)/length(Predicted)
if(Root == TRUE){
mse <- c(mse,sqrt(mse))
names(mse) <- c("mse","rmse")
}
return(mse)
}
#' For Calculating RSquared
#'
#' @param Actual
#' A vector of Dependent variable.
#' @param Predicted
#' A vector of fitted values obtained from using any technique
#' @return
#' It will calculate RSquared value
#' @export
#'
#' @examples
#' data(mtcars)
#' pred <- lm(mpg~cyl,data = mtcars)
#' predicted <- predict(pred)
#' R2 <- Rsquared(mtcars$mpg,predicted)
Rsquared <-function(Actual,Predicted){
if(!is.vector(Actual) |!is.vector(Predicted)){
stop("Input data should be a vector")
}
if(any(is.na(Actual)) | any(is.na(Predicted))){
warning("Inut data contains NA")
}
if(length(Actual)<1 | length(Predicted)<1){
stop("Input Data contains no element")
}
if(class(Actual) != "numeric" | class(Predicted) != "numeric"){
stop("Input Data should be a numeric vector")
}
SSres <- sum((Actual - Predicted)^2)
SStot <- sum((Actual - mean(Actual))^2)
return(1-(SSres/SStot))
}
#' For Calulating Adjusted RSqaured
#'
#' @param n
#' Number of observations
#' @param k
#' Number of Independent Variables
#' @param R2
#' Calculated Value of Rsquared
#' @return
#' It will return the Adjusted R Squared value
#' @export
#'
#' @examples
#' data(mtcars)
#' pred <- lm(mpg~cyl,data = mtcars)
#' predicted <- predict(pred)
#' R2 <- Rsquared(mtcars$mpg,predicted)
#' AR2 <- AdjRSquared(n = 32,k = 1,R2 = 0.721068)
AdjRSquared <- function(n,k,R2){
if((!is.vector(n)) | (!is.numeric(k)) | (!is.numeric(R2))){
stop("Input data should be a vector")
}
if(is.na(n) | is.na(k) | is.na(R2)){
warning("Inut data contains NA")
}
if(length(n)<1 | length(k)<1 | length(R2)<1){
stop("Input Data contains no element")
}
if(class(n) != "numeric" | class(k) != "numeric"| class(R2) != "numeric"){
stop("Input Data should be a numeric vector")
}
return(1 - (((1-R2) * (n-1))/(n-k-1)))
}
|
7167396633f7a3d412afb9632020bea34d7e27e8 | 63aab023a31f099e302038a988fbc439b3392c09 | /R/runcode.R | 9c3d6a854a65dc88d724c516052d07ffcbcfe35c | [] | no_license | TEDS-DataScience/AirbnbProject | 054d8f8bc520709b78f218b29c5ff2bdc217aeed | ae46c6331d8dc0490fe3d46675a07b7be7f4d993 | refs/heads/master | 2022-01-19T04:21:52.305116 | 2016-02-20T11:02:24 | 2016-02-20T11:02:24 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,527 | r | runcode.R | ######################################################################
# generate the report, slides, and if needed start the web application
reportfilename = paste(report_file, "Rmd", sep=".")
docreportfilename = paste("doc", reportfilename, sep="/")
htmloutput = paste(report_file, "html", sep = ".")
dochtmloutput = paste("doc", htmloutput, sep="/")
#local_directory <- getwd()
unlink( "doc/TMPdirReport", recursive = TRUE )
dir.create( "doc/TMPdirReport" )
setwd( "doc/TMPdirReport" )
file.copy( paste("../..",docreportfilename, sep="/"),reportfilename, overwrite = T )
#knit2html( reportfilename, quiet = TRUE )
setwd("../")
render(reportfilename)
#render(paste("../doc/TMPdirReport",reportfilename, sep="/"))
#file.copy( paste("doc/TMPdirReport/",htmloutput), paste("doc",dochtmloutput, sep="/"), overwrite = T )
#setwd( "../../" )
unlink( "TMPdirReport", recursive = TRUE )
# reportfilename = paste(slides_file, "Rmd", sep=".")
# docreportfilename = paste("doc", reportfilename, sep="/")
# htmloutput = paste(slides_file, "html", sep = ".")
# dochtmloutput = paste("doc", htmloutput, sep="/")
#
# unlink( "TMPdirSlides", recursive = TRUE )
# dir.create( "TMPdirSlides" )
# setwd( "TMPdirSlides" )
# file.copy( paste(local_directory,docreportfilename, sep="/"),reportfilename, overwrite = T )
# slidify( reportfilename )
# file.copy( htmloutput, paste(local_directory,dochtmloutput, sep="/"), overwrite = T )
# setwd( "../" )
# unlink( "TMPdirSlides", recursive = TRUE )
# setwd( "../" )
|
90b413908adcb3ad8b4d37fdd494c91358aa89d5 | c0840d2d6f8adbb0ccff63fe4428f5a52e6b63b0 | /cachematrix.R | 16969394f9feedaf433ef3808532db3c90507d86 | [] | no_license | carmeloatucm/ProgrammingAssignment2 | fae224605e6e40e5c7d1296eaa598f6db1d5567d | df6ed86cb030112653b079563dd783e76fa4e2cf | refs/heads/master | 2020-05-29T11:48:34.030618 | 2014-11-22T22:59:39 | 2014-11-22T22:59:39 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,602 | r | cachematrix.R | ### This function stores the inverse and the value of the matrix for future use
makeCacheMatrix <- function(x =matrix()){
### initially the data starts as a NULL object, it will be tested and cached later on
inverse <- NULL
data <-NULL
set <- function(y){
x <<- y
inverse <<-NULL
}
get <- function( ) x
setinverse <- function(solve) inverse <<- solve
### takes the solve funtion to get the inverse matrix
getinverse <- function() inverse
### yields the functions that gets the inverse matrix
set2 <- function(z){
x <<- z
data <<-NULL
}
setcached <- function(identity) data <<- identity
getcached <- function() data
list(set=set,
get=get,setinverse=setinverse,
getinverse=getinverse, set2=set2, setcached=setcached, getcached=getcached)
### The outcome is a list of functions that are used to get a
### cache to be tested later on
}
cacheSolve<- function(x,...){
inverse <-x$getinverse()
### gets the inverse matrix from the cached matrix if any defined
if(identical(x$getcached() ,x$get())==TRUE){
message("getting cached data")
return(inverse)
}
### checks whether the input matrix and the cached matrix are identical
### and whether the inverse has been already calculated
### if positive, the inverse matrix is returned
### if negative the function proceeds to get the inverse from input matrix
data <- identity(x$get())
inverse <- solve(data)
x$setinverse(inverse)
x$setcached(data)
### and gets the input cached matrix and its inverse matrix to the cache
inverse
}
|
3b62d77acabff6739115b0c4ff53b1c96350074e | 625c6620f117f50ab79f5fd3296e9576a0910187 | /man/guthion.Rd | 9bb278cf9541a42268763a7f692e6b8ac5f5caec | [] | no_license | DoseResponse/drcData | 378c850587d3332caa076192e480b4efb6904ba9 | 09f9da308aeea62322b0a7b67946435a87c36589 | refs/heads/master | 2023-02-24T02:20:00.374757 | 2021-01-28T12:04:31 | 2021-01-28T12:04:31 | 108,513,898 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 485 | rd | guthion.Rd | \name{guthion}
\alias{guthion}
\docType{data}
\title{guthion}
\description{guthion data.}
\usage{data(guthion)}
\format{
A data frame with 6 observations on the following 6 variables.
\describe{
\item{\code{trt}}{a categorial vector}
\item{\code{dose}}{a numeric vector}
\item{\code{alive}}{a numeric vector}
\item{\code{moribund}}{a numeric vector}
\item{\code{dead}}{a numeric vector}
\item{\code{total}}{a numeric vector}
}
}
\keyword{datasets}
|
dad7e1eaeb4e9c307fd18faa5f335c2ce6f949ba | 0ae69401a429092c5a35afe32878e49791e2d782 | /trinker-lexicon-4c5e22b/R/profanity_banned.R | 0837995e7a14fd61c2f9005be029eedb224899f3 | [] | no_license | pratyushaj/abusive-language-online | 8e9156d6296726f726f51bead5b429af7257176c | 4fc4afb1d524c8125e34f12b4abb09f81dacd50d | refs/heads/master | 2020-05-09T20:37:29.914920 | 2019-06-10T19:06:30 | 2019-06-10T19:06:30 | 181,413,619 | 3 | 0 | null | 2019-06-05T17:13:22 | 2019-04-15T04:45:06 | Jupyter Notebook | UTF-8 | R | false | false | 729 | r | profanity_banned.R | #' bannedwordlist.com's List of Profane Words
#'
#' A dataset containing a character vector of profane words from bannedwordlist.com.
#'
#' @section Disclaimer: From the original author: "These lists are free to download. You may use them for
#' any purpose you wish and may copy, modify and distribute them freely. The
#' swear words lists are provided "as-is" without any warranty or guarantee
#' whatsoever. Don't blame me when the users of your forum, blog or community
#' find more creative ways of offending people."
#' @docType data
#' @keywords datasets
#' @name profanity_banned
#' @usage data(profanity_banned)
#' @format A character vector with 77 elements
#' @references \url{http://www.bannedwordlist.com}
NULL
|
cf833eb1b7f10b5a2fe63ba6ad7eabdfc5433167 | e5f1d57bb83370be465afa5325ac6e2783a20484 | /Helpers/Function Helper/Cleansing.R | 45abdf9856eb99471be3f4a75e81dbe38ca66f6c | [] | no_license | mfdhan/Indonesia-Public-Election-Twitter-Sentiment-Analysis | a08027edf67557544792fface4a01879801e9796 | 9a0924ec373d3ccd58903e583cc172b38a98e745 | refs/heads/master | 2022-12-03T09:50:32.063081 | 2020-08-23T05:05:05 | 2020-08-23T05:05:05 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,089 | r | Cleansing.R | library(tm)
library(xml2)
library(stringr)
library(dplyr)
library(katadasaR)
removeURL <- function(x){
gsub("http[^[:space:]]*", "", x)
}
removeMention <- function(x){
gsub("@\\w+", "", x)
}
removeCarriage <- function(x){
gsub("[\r\n]", "", x)
}
removeEmoticon <- function(x){
gsub("[^\x01-\x7F]", "", x)
}
removeInvoice <- function(x){
gsub("inv/[0-9]+/+[xvi]+/[xvi]+/[0-9]+", "", x, ignore.case = T)
}
unescapeHTML <- function(str) {
xml2::xml_text(xml2::read_html(paste0("<x>", str, "</x>")))
}
toSpace <- content_transformer(function(x, pattern){
gsub(pattern, " ", x)
})
# Spell Normalization Function
spell.correction = content_transformer(function(x, dict){
words = sapply(unlist(str_split(x, "\\s+")),function(x){
if(is.na(spell.lex[match(x, dict$slang),"formal"])){
x = x
} else{
x = spell.lex[match(x, dict$slang),"formal"]
}
})
x = paste(words, collapse = " ")
})
# Stemming Words
stemming = function(x){
paste(sapply(unlist(str_split(x,'\\s+')),katadasar),collapse = " ")
} |
e24ef466ecdf0d6658dd768d5ded6e8cc477a816 | 20511f2fa1c5a284c5d5cb7a88b20fc320fd8252 | /simdata/exp_normal.R | f341c4da5b54512e6315171418072285aeef2abf | [] | no_license | fredbec/drnn | 029708f87ab6b1991b20ef0e4b65515504e558d7 | 9582f18516cfd33a5583686e8fd726ac0ad5d890 | refs/heads/main | 2023-08-02T03:54:11.457759 | 2021-09-17T08:36:04 | 2021-09-17T08:36:04 | 358,614,074 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,146 | r | exp_normal.R | exp_normal <- function(n, k, xrange = c(-1, 1), betas = NULL, stand = TRUE){
#generate coefficients from uniform
beta1 <- runif(k, 0, 1)
beta2 <- runif(k, 0, 1)
beta3 <- runif(k, 0, 1)
runifcl <- rep(n, times = k)
x <- sapply(runifcl, runif, xrange[1], xrange[2])
#generate parameters
mu <- x %*% beta1 + exp(x %*% beta2)
sigma <- exp(x) %*% beta3
#generate y with the specified parameters
y <- rnorm(n, mean = mu, sd = sigma)
data <- data.frame(x, y = y)
names(data) <- c(sapply(1:k, function(k1) paste0("x", k1)), "y")
if (stand){
#unnecessary since predictors are already in same range
#for (i in 1:k){
# data[,i] <- (data[,i] - min(data[,i])) / (max(data[,i]) - min(data[,i]))
#}
data$y <- (data$y - mean(data$y)) / (sd(data$y))
}
#shuffle data just in case
inds <- sample(nrow(data))
data <- data[inds,]
rownames(data) <- NULL
#training/test split
inds <- sample(nrow(data), 0.25*nrow(data))
data$testid <- 0
data$testid[inds] <- 1
return(data)
}
#expdat <- exp_sim(10000)
#plot(expdat$x, expdat$y)
#write.csv(expdat, paste0(path, "/exp2.csv"))
|
7af7ee670a60f40d46ede22868ad7aca56c03b35 | 2b19e8f985f6c6b0eb3cff16e5a90d0f75be5ea5 | /CovidClinicalTrial_analysis2.R | 892999040dfbe488bb3b087fbc601493610c86eb | [] | no_license | arthurberg/COVID_Clinical_Trials | 1d601a773dc756c5887ceb8716f16dc180627368 | 7d57cabd13cac11d42aa5bd2305a89091819cfae | refs/heads/master | 2023-02-22T06:12:20.787004 | 2021-02-01T00:13:09 | 2021-02-01T00:13:09 | 334,785,078 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 28,509 | r | CovidClinicalTrial_analysis2.R | rm(list=ls())
setwd("~/Library/Mobile Documents/com~apple~CloudDocs/Nour Hawila/COVID clinical trials/Second resubmission/software/clean")
#setwd("~/Library/Mobile Documents/com~apple~CloudDocs/COVID clinical trials/Second resubmission/software")
fn=c("studies_20191201.txt","studies_20201201.txt")
fn2=paste(unlist(strsplit(fn,".txt")),".RData",sep="")
for(i in 1:length(fn)){
d1 <- read.table(file = fn[i],
header = TRUE,
sep = "|",
na.strings = "",
comment.char = "",
quote = "\"",
stringsAsFactors=FALSE,
fill = FALSE)
rownames(d1)=d1$nct_id
############
d2=d1[,c("nct_id","source","study_first_submitted_date","study_first_submitted_qc_date","completion_date","study_type","completion_date_type","enrollment","enrollment_type","phase","overall_status","start_date","start_date_type","primary_completion_date_type","primary_completion_date")]
save(d2,file=fn2[i])
}
fn4=paste(unlist(strsplit(fn2,".RData")),"_subset.RData",sep="")
d.source=read.csv("source4.csv",header=T,na.strings=c("","NA"))
d.source[128,"Source"]="Boston Children’s Hospital"
for(i in 1:length(fn2)){
load(fn2[i])
d1=d2
d.source2=d.source[!is.na(d.source$Country),]
rownames(d.source2)=d.source2$Source
d2=d1[is.element(d1$source,d.source2$Source),]
d2$sector=d.source2[d2$source,c("Class")]
d2$country=d.source2[d2$source,c("Country")]
d2$cancer=d.source2[d2$source,c("Cancer")]
d.region=read.csv("continents2.csv")
rownames(d.region)=d.region$name
countries=unique(d2$country)
#countries[!is.element(countries,d.region$name)]
d2$region=d.region[d2$country,"region"]
d2$subregion=d.region[d2$country,"sub.region"]
save(d2,file=fn4[i])
}
#############################################
source("functions4.R")
load("studies_20191201_subset.RData")
d2.2019=d2
load("studies_20201201_subset.RData")
d2.2020=d2
d.covid=read.csv("20201204030641_covid-19.tsv",sep="\t",stringsAsFactors=FALSE)
#dat=mytabfun2(2020,d2.2020,Measure="Submitted",Country="United States")
#sum(is.element(dat$nct_id,d.covid$nct_id))
#mean(is.element(dat$nct_id,d.covid$nct_id))
library(patchwork)
options(scipen=999)
library(scales)
#####################################
################# Table #################
####################################
#### Table with COVID trials removed
# remove COVID trials from d2.2020
d2.2020.sub=d2.2020[!is.element(d2.2020$nct_id,d.covid$nct_id),]
## 2019
overall.2019.submitted=addmargins(table(factor(mytabfun(2019,d2.2019,Measure="Submitted")$sector,levels=c("Academic","Pharma"))))
us.2019.submitted=addmargins(table(factor(mytabfun(2019,d2.2019,Measure="Submitted",Country="United States")$sector,levels=c("Academic","Pharma"))))
europe.2019.submitted=addmargins(table(factor(mytabfun(2019,d2.2019,Measure="Submitted",Region="Europe")$sector,levels=c("Academic","Pharma"))))
asia.2019.submitted=addmargins(table(factor(mytabfun(2019,d2.2019,Measure="Submitted",Region="Asia")$sector,levels=c("Academic","Pharma"))))
overall.2019.completed=addmargins(table(factor(mytabfun(2019,d2.2019,Measure="Completed")$sector,levels=c("Academic","Pharma"))))
us.2019.completed=addmargins(table(factor(mytabfun(2019,d2.2019,Measure="Completed",Country="United States")$sector,levels=c("Academic","Pharma"))))
europe.2019.completed=addmargins(table(factor(mytabfun(2019,d2.2019,Measure="Completed",Region="Europe")$sector,levels=c("Academic","Pharma"))))
asia.2019.completed=addmargins(table(factor(mytabfun(2019,d2.2019,Measure="Completed",Region="Asia")$sector,levels=c("Academic","Pharma"))))
overall.2019.primary.completed=addmargins(table(factor(mytabfun(2019,d2.2019,Measure="Primary.Completed")$sector,levels=c("Academic","Pharma"))))
us.2019.primary.completed=addmargins(table(factor(mytabfun(2019,d2.2019,Measure="Primary.Completed",Country="United States")$sector,levels=c("Academic","Pharma"))))
europe.2019.primary.completed=addmargins(table(factor(mytabfun(2019,d2.2019,Measure="Primary.Completed",Region="Europe")$sector,levels=c("Academic","Pharma"))))
asia.2019.primary.completed=addmargins(table(factor(mytabfun(2019,d2.2019,Measure="Primary.Completed",Region="Asia")$sector,levels=c("Academic","Pharma"))))
## 2020
overall.2020.submitted=addmargins(table(factor(mytabfun(2020,d2.2020.sub,Measure="Submitted")$sector,levels=c("Academic","Pharma"))))
us.2020.submitted=addmargins(table(factor(mytabfun(2020,d2.2020.sub,Measure="Submitted",Country="United States")$sector,levels=c("Academic","Pharma"))))
europe.2020.submitted=addmargins(table(factor(mytabfun(2020,d2.2020.sub,Measure="Submitted",Region="Europe")$sector,levels=c("Academic","Pharma"))))
asia.2020.submitted=addmargins(table(factor(mytabfun(2020,d2.2020.sub,Measure="Submitted",Region="Asia")$sector,levels=c("Academic","Pharma"))))
overall.2020.completed=addmargins(table(factor(mytabfun(2020,d2.2020.sub,Measure="Completed")$sector,levels=c("Academic","Pharma"))))
us.2020.completed=addmargins(table(factor(mytabfun(2020,d2.2020.sub,Measure="Completed",Country="United States")$sector,levels=c("Academic","Pharma"))))
europe.2020.completed=addmargins(table(factor(mytabfun(2020,d2.2020.sub,Measure="Completed",Region="Europe")$sector,levels=c("Academic","Pharma"))))
asia.2020.completed=addmargins(table(factor(mytabfun(2020,d2.2020.sub,Measure="Completed",Region="Asia")$sector,levels=c("Academic","Pharma"))))
overall.2020.primary.completed=addmargins(table(factor(mytabfun(2020,d2.2020.sub,Measure="Primary.Completed")$sector,levels=c("Academic","Pharma"))))
us.2020.primary.completed=addmargins(table(factor(mytabfun(2020,d2.2020.sub,Measure="Primary.Completed",Country="United States")$sector,levels=c("Academic","Pharma"))))
europe.2020.primary.completed=addmargins(table(factor(mytabfun(2020,d2.2020.sub,Measure="Primary.Completed",Region="Europe")$sector,levels=c("Academic","Pharma"))))
asia.2020.primary.completed=addmargins(table(factor(mytabfun(2020,d2.2020.sub,Measure="Primary.Completed",Region="Asia")$sector,levels=c("Academic","Pharma"))))
tab=cbind(rbind(overall.2019.submitted,us.2019.submitted,europe.2019.submitted,asia.2019.submitted,overall.2019.completed,us.2019.completed,europe.2019.completed,asia.2019.completed
,overall.2019.primary.completed,us.2019.primary.completed,europe.2019.primary.completed,asia.2019.primary.completed),
rbind(overall.2020.submitted,us.2020.submitted,europe.2020.submitted,asia.2020.submitted,overall.2020.completed,us.2020.completed,europe.2020.completed,asia.2020.completed
,overall.2020.primary.completed,us.2020.primary.completed,europe.2020.primary.completed,asia.2020.primary.completed
))
tab2=cbind(
round((tab[,4]-tab[,1])/tab[,1]*100,1),
round((tab[,5]-tab[,2])/tab[,2]*100,1),
round((tab[,6]-tab[,3])/tab[,3]*100,1))
mytable=cbind(tab,tab2)
mytable2=mytable
mytable2[,4]=paste(mytable[,4]," (",mytable[,7],"%)",sep="")
mytable2[,5]=paste(mytable[,5]," (",mytable[,8],"%)",sep="")
mytable2[,6]=paste(mytable[,6]," (",mytable[,9],"%)",sep="")
write.csv(mytable2,file="new-table1-covid-removed.csv")
################## Table without COVID trials removed
## 2020
overall.2020.submitted=addmargins(table(factor(mytabfun(2020,d2.2020,Measure="Submitted")$sector,levels=c("Academic","Pharma"))))
us.2020.submitted=addmargins(table(factor(mytabfun(2020,d2.2020,Measure="Submitted",Country="United States")$sector,levels=c("Academic","Pharma"))))
europe.2020.submitted=addmargins(table(factor(mytabfun(2020,d2.2020,Measure="Submitted",Region="Europe")$sector,levels=c("Academic","Pharma"))))
asia.2020.submitted=addmargins(table(factor(mytabfun(2020,d2.2020,Measure="Submitted",Region="Asia")$sector,levels=c("Academic","Pharma"))))
overall.2020.completed=addmargins(table(factor(mytabfun(2020,d2.2020,Measure="Completed")$sector,levels=c("Academic","Pharma"))))
us.2020.completed=addmargins(table(factor(mytabfun(2020,d2.2020,Measure="Completed",Country="United States")$sector,levels=c("Academic","Pharma"))))
europe.2020.completed=addmargins(table(factor(mytabfun(2020,d2.2020,Measure="Completed",Region="Europe")$sector,levels=c("Academic","Pharma"))))
asia.2020.completed=addmargins(table(factor(mytabfun(2020,d2.2020,Measure="Completed",Region="Asia")$sector,levels=c("Academic","Pharma"))))
overall.2020.primary.completed=addmargins(table(factor(mytabfun(2020,d2.2020,Measure="Primary.Completed")$sector,levels=c("Academic","Pharma"))))
us.2020.primary.completed=addmargins(table(factor(mytabfun(2020,d2.2020,Measure="Primary.Completed",Country="United States")$sector,levels=c("Academic","Pharma"))))
europe.2020.primary.completed=addmargins(table(factor(mytabfun(2020,d2.2020,Measure="Primary.Completed",Region="Europe")$sector,levels=c("Academic","Pharma"))))
asia.2020.primary.completed=addmargins(table(factor(mytabfun(2020,d2.2020,Measure="Primary.Completed",Region="Asia")$sector,levels=c("Academic","Pharma"))))
tab=cbind(rbind(overall.2019.submitted,us.2019.submitted,europe.2019.submitted,asia.2019.submitted,overall.2019.completed,us.2019.completed,europe.2019.completed,asia.2019.completed
,overall.2019.primary.completed,us.2019.primary.completed,europe.2019.primary.completed,asia.2019.primary.completed),
rbind(overall.2020.submitted,us.2020.submitted,europe.2020.submitted,asia.2020.submitted,overall.2020.completed,us.2020.completed,europe.2020.completed,asia.2020.completed
,overall.2020.primary.completed,us.2020.primary.completed,europe.2020.primary.completed,asia.2020.primary.completed
))
tab2=cbind(
round((tab[,4]-tab[,1])/tab[,1]*100,1),
round((tab[,5]-tab[,2])/tab[,2]*100,1),
round((tab[,6]-tab[,3])/tab[,3]*100,1))
mytable=cbind(tab,tab2)
mytable2=mytable
mytable2[,4]=paste(mytable[,4]," (",mytable[,7],"%)",sep="")
mytable2[,5]=paste(mytable[,5]," (",mytable[,8],"%)",sep="")
mytable2[,6]=paste(mytable[,6]," (",mytable[,9],"%)",sep="")
write.csv(mytable2,file="new-table1-covid-not-removed.csv")
##########################################################
##########################################################
###################################
##########################################
##########################################
Measure="Submitted"
Type="Interventional"
Country="United States"
#Sector="Academic"
Sector=NULL
dat=mydatfun2(Data=d2.2020,Measure=Measure,Type=Type,Country=Country,Sector=Sector,Year=2020)
gg_color_hue <- function(n) {
hues = seq(15, 375, length = n + 1)
hcl(h = hues, l = 65, c = 100)[1:n]
}
mycols=gg_color_hue(2)[2:1]
mymonths=seq(as.Date("2017-03-01"), as.Date("2020-10-01"), "months")
library(lubridate)
#as.Date(c("2017-03-01","2017-04-01","2018-03-01","2019-03-01","2020-03-01","2020-11-01"))
#c("Mar 2017","Apr 2017","Mar 2018","Mar 2019","Mar 2020","Nov 2020")
pdf(file="gr1.pdf",width=8.5,height=4)
par(mar=c(4,3,1,1))
plot(dat$date.month,dat$value2,type="n",xlab="",ylab="",xaxt="n")
points(dat$date.month,dat$value2,pch=16,col=mycols[dat$postcovid+1],cex=1.3)
points(dat$date.month,dat$value2.seasonal,pch=3,col="magenta",cex=1,type="b")
#mtext("Month",side=1,line=2)
mtext("log(Num Submitted Trials)",side=2,line=2)
abline(v=as.Date("2020-03-01"),col="goldenrod",lwd=6,lty=1)
axis(1,at=mymonths,labels=as.character(month(mymonths,label=T)),las=3)
axis(1,at=as.Date(c("2017-06-01","2018-06-01","2019-06-01","2020-06-01")),labels=c("2017","2018","2019","2020"),line=2,tick=FALSE)
dev.off()
########################################
######################################
Measure="Submitted"
Type="Interventional"
#Country="United States"
Country=NULL
Region="Asia"
Region=NULL
#Sector="Academic"
Sector=NULL
seasonal.correction=TRUE
dat=mydatfun2(Data=d2.2020,Measure=Measure,Type=Type,Country=Country,Sector=Sector,Year=2020,seasonal.correction=seasonal.correction,Region=Region)
dat.2019=mydatfun2(Data=d2.2019,Measure=Measure,Type=Type,Country=Country,Sector=Sector,Year=2019,seasonal.correction=seasonal.correction,Region=Region)
dat.diff=dat
dat.diff$value3=dat.diff$value3-dat.2019$value3
pdf("gr2.pdf",width=9,height=3)
ylim=NULL
#ylim=c(-.3,.3)
gr1=mygraphfun(Data=dat,Year=2020,ylim=ylim) + ggtitle("2020 Data")
gr2=mygraphfun(Data=dat.2019,Year=2019,ylim=ylim)+ ggtitle("2019 Data")
gr3=mygraphfun(Data=dat.diff,Year=2020,ylim=ylim)+ ggtitle("Difference")
gr1+gr2+gr3 + theme(plot.margin = margin(0, 0, 0, 0))
dev.off()
######################################################
################## gr3new ####################################
######################################################
myMeasure=c("Submitted","Completed","Primary.Completed")
#myCountry=c("United States","France","China","All")
myRegion=c("Europe","Asia","All")
Type="Interventional"
seasonal.correction=TRUE
info=array(NA,dim=c(length(myMeasure)*4,3))
counter=1
mylist=list(NA)
for(var.measure in myMeasure){
# for(var.country in myCountry){
for(var.region in myRegion){
#var.country2=var.country
#if(var.country=="All"){var.country=NULL}
var.region2=var.region
if(var.region=="All"){var.region=NULL}
#dat=mydatfun2(Data=d2.2020,Measure=var.measure,Type=Type,Country=var.country,Year=2020,seasonal.correction=seasonal.correction)
#dat.2019=mydatfun2(Data=d2.2019,Measure=var.measure,Type=Type,Country=var.country,Year=2019,seasonal.correction=seasonal.correction)
dat=mydatfun2(Data=d2.2020,Measure=var.measure,Type=Type,Region=var.region,Year=2020,seasonal.correction=seasonal.correction)
dat.2019=mydatfun2(Data=d2.2019,Measure=var.measure,Type=Type,Region=var.region,Year=2019,seasonal.correction=seasonal.correction)
dat$value.2019=dat.2019$value
dat$value3.2019=dat.2019$value3
dat2=dat
dat2$value3=dat2$value3-dat2$value3.2019
#fit=lm(value3~date.num2 + postcovid:date.num2 ,data=dat2)
#pval=signif(summary(fit)$coef[3,4],3)
fit1=lm(value3~date.num2*postcovid ,data=dat2)
fit2=lm(value3~date.num2,data=dat2)
#pval=format.pval(anova(fit1,fit2)$Pr[2],eps=.0001)
pval=scales::pvalue(anova(fit1,fit2)$Pr[2],accuracy=.0001,decimal.mark=".",add_p=T)
mylist[[counter]]=mygraphfun2(Data=dat2,Year=2020) + #ggtitle(paste("p-value=",pval,sep=""))
ggtitle(pval)
#info[counter,]=c(counter,var.measure,var.country2)
info[counter,]=c(counter,var.measure,var.region2)
counter=counter+1
print(counter)
flush.console()
}}
dat=mydatfun2(Data=d2.2020,Measure="Submitted",Type=Type,Country="United States",Year=2020,seasonal.correction=seasonal.correction)
dat.2019=mydatfun2(Data=d2.2019,Measure="Submitted",Type=Type,Country="United States",Year=2019,seasonal.correction=seasonal.correction)
dat$value.2019=dat.2019$value
dat$value3.2019=dat.2019$value3
dat2=dat
dat2$value3=dat2$value3-dat2$value3.2019
#fit=lm(value3~date.num2 + postcovid:date.num2 ,data=dat2)
#pval=signif(summary(fit)$coef[3,4],3)
fit1=lm(value3~date.num2*postcovid ,data=dat2)
fit2=lm(value3~date.num2,data=dat2)
#pval=format.pval(anova(fit1,fit2)$Pr[2],eps=.0001)
pval=scales::pvalue(anova(fit1,fit2)$Pr[2],accuracy=.0001,decimal.mark=".",add_p=T)
mylist[[length(myMeasure)*3+1]]=mygraphfun2(Data=dat2,Year=2020) + ggtitle(pval)
dat=mydatfun2(Data=d2.2020,Measure="Completed",Type=Type,Country="United States",Year=2020,seasonal.correction=seasonal.correction)
dat.2019=mydatfun2(Data=d2.2019,Measure="Completed",Type=Type,Country="United States",Year=2019,seasonal.correction=seasonal.correction)
dat$value.2019=dat.2019$value
dat$value3.2019=dat.2019$value3
dat2=dat
dat2$value3=dat2$value3-dat2$value3.2019
#fit=lm(value3~date.num2 + postcovid:date.num2 ,data=dat2)
#pval=signif(summary(fit)$coef[3,4],3)
fit1=lm(value3~date.num2*postcovid ,data=dat2)
fit2=lm(value3~date.num2,data=dat2)
#pval=format.pval(anova(fit1,fit2)$Pr[2],eps=.0001)
pval=scales::pvalue(anova(fit1,fit2)$Pr[2],accuracy=.0001,decimal.mark=".",add_p=T)
mylist[[length(myMeasure)*3+2]]=mygraphfun2(Data=dat2,Year=2020) + ggtitle(pval)
##########
dat=mydatfun2(Data=d2.2020,Measure="Primary.Completed",Type=Type,Country="United States",Year=2020,seasonal.correction=seasonal.correction)
dat.2019=mydatfun2(Data=d2.2019,Measure="Primary.Completed",Type=Type,Country="United States",Year=2019,seasonal.correction=seasonal.correction)
dat$value.2019=dat.2019$value
dat$value3.2019=dat.2019$value3
dat2=dat
dat2$value3=dat2$value3-dat2$value3.2019
#fit=lm(value3~date.num2 + postcovid:date.num2 ,data=dat2)
#pval=signif(summary(fit)$coef[3,4],3)
fit1=lm(value3~date.num2*postcovid ,data=dat2)
fit2=lm(value3~date.num2,data=dat2)
#pval=format.pval(anova(fit1,fit2)$Pr[2],eps=.0001)
pval=scales::pvalue(anova(fit1,fit2)$Pr[2],accuracy=.0001,decimal.mark=".",add_p=T)
mylist[[length(myMeasure)*3+3]]=mygraphfun2(Data=dat2,Year=2020) + ggtitle(pval)
###########
pdf("gr3new.pdf",width=12,height=9)
((mylist[[length(myMeasure)*3+1]] | mylist[[1]] | mylist[[2]] | mylist[[3]])/(mylist[[length(myMeasure)*3+2]] | mylist[[4]] | mylist[[5]] | mylist[[6]])/(mylist[[length(myMeasure)*3+3]] | mylist[[7]] | mylist[[8]] | mylist[[9]]))
dev.off()
#######################
#########gr4new###########
#######################
myMeasure=c("Submitted","Completed","Primary.Completed")
mySector=c("Academic","Pharma")
#myRegion=c("Americas","Europe","Asia","All")
Type="Interventional"
seasonal.correction=TRUE
info=array(NA,dim=c(length(myMeasure)*length(mySector),3))
counter=1
mylist=list(NA)
for(var.measure in myMeasure){
for(var.sector in mySector){
dat=mydatfun2(Year=2020,Data=d2.2020,Measure=var.measure,Type=Type,Sector=var.sector,seasonal.correction=seasonal.correction)
dat.2019=mydatfun2(Year=2019,Data=d2.2019,Measure=var.measure,Type=Type,Sector=var.sector,seasonal.correction=seasonal.correction)
dat$value.2019=dat.2019$value
dat$value3.2019=dat.2019$value3
dat2=dat
dat2$value3=dat2$value3-dat2$value3.2019
#fit=lm(value3~date.num2 + postcovid:date.num2 ,data=dat2)
#pval=signif(summary(fit)$coef[3,4],3)
fit1=lm(value3~date.num2*postcovid ,data=dat2)
fit2=lm(value3~date.num2,data=dat2)
#pval=format.pval(anova(fit1,fit2)$Pr[2],eps=.0001)
pval=scales::pvalue(anova(fit1,fit2)$Pr[2],accuracy=.0001,decimal.mark=".",add_p=T)
mylist[[counter]]=mygraphfun2(Data=dat2,Year=2020) + ggtitle(pval)
info[counter,]=c(counter,var.measure,var.sector)
counter=counter+1
print(counter)
flush.console()
}}
pdf("gr4new.pdf",width=12,height=9)
((mylist[[1]] | mylist[[3]] | mylist[[5]])/(mylist[[2]] | mylist[[4]] | mylist[[6]]))
dev.off()
########### World Map #################
Year=2019
Data=d2.2019
start.date1=as.Date(paste(Year,"04-01",sep="-"))
end.date1=as.Date(paste(Year,"11-01",sep="-"))
dat.submitted=data.frame(date=as.Date(Data$study_first_submitted_date),type=Data$study_type,country=Data$country) %>% filter((date>=start.date1 & date<end.date1) & type=="Interventional")
dat.completed=data.frame(date=as.Date(Data$completion_date),country=Data$country,completion_date_type=Data$completion_date_type,type=Data$study_type) %>% filter((date>=start.date1 & date<end.date1) & completion_date_type=="Actual" & type=="Interventional")
mycountries.submitted=names(which(sort(table(dat.submitted$country),decreasing=T)>=5))
mycountries.completed=names(which((sort(table(dat.completed$country),decreasing=T))>=5))
res.submitted=array(NA,dim=c(length(mycountries.submitted),2))
for(i in 1:length(mycountries.submitted)){
dat=mytabfun(2020,d2.2020,Measure="Submitted",Country=mycountries.submitted[i])
res.submitted[i,1]=ifelse(is.null(dim(dat)),0,dim(dat)[1])
dat=mytabfun(2019,d2.2019,Measure="Submitted",Country=mycountries.submitted[i])
res.submitted[i,2]=ifelse(is.null(dim(dat)),0,dim(dat)[1])
print(i)
flush.console()
}
rownames(res.submitted)=mycountries.submitted
res.completed=array(NA,dim=c(length(mycountries.completed),2))
for(i in 1:length(mycountries.completed)){
dat=mytabfun(2020,d2.2020,Measure="Completed",Country=mycountries.completed[i])
res.completed[i,1]=ifelse(is.null(dim(dat)),0,dim(dat)[1])
dat=mytabfun(2019,d2.2019,Measure="Completed",Country=mycountries.completed[i])
res.completed[i,2]=ifelse(is.null(dim(dat)),0,dim(dat)[1])
print(i)
flush.console()
}
rownames(res.completed)=mycountries.completed
#save(res.submitted,res.completed,file="world_map_data.RData")
#load("world_map_data.RData")
library(maps) #map_data
library(dplyr)
library(scales)
library(viridis)
world_map <- map_data("world")
#grep("Cyprus",world_map$subregion,value=T)
#grep("Cyprus",world_map$region,value=T)
world_map$region[world_map$region=="UK"]="United Kingdom"
world_map$region[world_map$region=="USA"]="United States"
world_map$region[world_map$region=="Guinea-Bissau"]="Guinea Bissau"
unique(d2.2020[!is.element(d2.2020$country,world_map$region),"country"])
unique(d2.2019[!is.element(d2.2019$country,world_map$region),"country"])
df.submitted=data.frame(res.submitted)
colnames(df.submitted)=c("Submitted.2020","Submitted.2019")
df.submitted$sub.tot=rescale(log(df.submitted$Submitted.2020+df.submitted$Submitted.2019),to=c(0,1))
df.submitted$sub.pd=(df.submitted$Submitted.2020-df.submitted$Submitted.2019)/(df.submitted$Submitted.2019)*100
df.submitted$sub.pd.cut=cut(df.submitted$sub.pd,breaks=c(-100,-30,-15,0,15,30,100),right=F)
min(df.submitted$sub.pd)
max(df.submitted$sub.pd)
df.submitted$region=rownames(res.submitted)
df.submitted.map=right_join(df.submitted,world_map, by = "region")
rownames(df.submitted)[!is.element(rownames(df.submitted),unique(world_map$region))]
df.completed=data.frame(res.completed)
colnames(df.completed)=c("Completed.2020","Completed.2019")
df.completed$com.tot=rescale(log(df.completed$Completed.2020+df.completed$Completed.2019),to=c(0,1))
df.completed$com.pd=(df.completed$Completed.2020-df.completed$Completed.2019)/(df.completed$Completed.2019)*100
df.completed$com.pd.cut=cut(df.completed$com.pd,breaks=c(-100,-30,-15,0,15,30,100),right=F)
#df.completed$com.pd.cut=cut(df.completed$com.pd,breaks=c(-100,-50,-40,-30,-20,-10,0,10,20,100),right=F)
table(df.completed$com.pd.cut)
min(df.completed$com.pd)
max(df.completed$com.pd)
df.completed$region=rownames(res.completed)
df.completed.map=right_join(df.completed,world_map, by = "region")
rownames(df.completed)[!is.element(rownames(df.completed),unique(world_map$region))]
dev.new(width=7, height=4, unit="in")
theme_set(theme_void())
ggplot(df.submitted.map, aes(long, lat, group = group,alpha=sub.tot))+
geom_polygon(aes(fill = sub.pd.cut), color = "gray",size = 0.05) + scale_fill_brewer(palette = "RdBu",name = "% Change",na.value="gray97",labels=c("< -30%","-30% to -15%","-15% to 0%","0% to 15%","15% to 30%","> 30%","NA")) + guides(alpha = F) +scale_alpha(range=c(1,1))+ theme(text=element_text(size=13,family="Comic Sans MS"),plot.margin = unit(c(0, 0, 0, -1), "cm"),plot.title = element_text(hjust = 0.5,face = "bold")) + ggtitle("Submitted Interventional Trials")
ggsave("fig5.png",type="cairo",dpi=600)
dev.new(width=7, height=4, unit="in")
#mycol=rgb(0,1,0,.1)
mycol="white"
theme_set(theme_void())
ggplot(df.completed.map, aes(long, lat, group = group,alpha=com.tot))+
geom_polygon(aes(fill = com.pd.cut), color = "gray",size = 0.05) + scale_fill_brewer(palette = "RdBu",name = "% Change",na.value="gray97",labels=c("< -30%","-30% to -15%","-15% to 0%","0% to 15%","15% to 30%","> 30%","NA")) + guides(alpha = F) +scale_alpha(range=c(1,1))+ theme(text=element_text(size=13,family="Comic Sans MS"),plot.margin = unit(c(0, 0, 0, -1), "cm"),plot.title = element_text(hjust = 0.5,face = "bold")) + ggtitle("Completed Interventional Trials")
ggsave("fig6.png",type="cairo",dpi=600)
ord1=order(df.submitted$sub.pd)
df1=df.submitted[ord1,]
ord2=order(df.completed$com.pd)
df2=df.completed[ord2,]
################################
####### MODELING ###############
################################
dat=mytabfun2(2020,d2.2020,Measure="Submitted",Country="United States")
sum(is.element(dat$nct_id,d.covid$nct_id))
mean(is.element(dat$nct_id,d.covid$nct_id))
###################################
Measure="Submitted"
Type="Interventional"
Sector="Academic"
Region="Americas"
Data=d2
Year=2020
Subregion=NULL
Country=NULL
mydat=mydatfun2(Data=d2,Measure=Measure,Type=Type,Sector=Sector,Region=Region,Year=Year)
count=1
myMeasure=c("Submitted","Completed","Primary.Completed")
Type=c("Interventional")
mySector=c("Academic", "Pharma")
myRegion=c("Americas", "Europe", "Asia")
Country=NULL
for(var.measure in myMeasure){
#for(var.type in myType){
for(var.sector in mySector){
for(var.region in myRegion){
if(var.region!="Americas"){Country=NULL}
if(var.region=="Americas"){
Country="United States"
var.region=NULL}
dat=mydatfun2(Data=d2.2020,Measure=var.measure,Type=Type,Region=var.region,Sector=var.sector,Year=2020,Country=Country)
dat.2019=mydatfun2(Data=d2.2019,Measure=var.measure,Type=Type,Region=var.region,Sector=var.sector,Year=2019,Country=Country)
if(is.null(var.region)){var.region="United States"}
if(!is.null(dim(dat)) & !is.null(dim(dat.2019))){
dat$measure=var.measure
dat$sector=var.sector
dat$region=var.region
dat$group=count
dat$value.2019=dat.2019$value
dat$value3.2019=dat.2019$value3
}
if(is.null(dim(dat)) | is.null(dim(dat.2019))){
dat=array(NA,dim=c(43,dim(mydat)[2]))
colnames(dat)=colnames(mydat)
dat=as.data.frame(dat)
dat$measure=var.measure
dat$type=var.type
dat$sector=var.sector
dat$region=var.region
dat$group=count
dat$value.2019=NA
dat$value3.2019=NA
}
if(count==1){mega=dat}
if(count>1){mega=bind_rows(mega,dat)}
count=count+1
print(count-1)
flush.console()
}}}
mega2=mega[!is.na(mega$value3),]
mega2$region=relevel(factor(mega2$region),ref="United States")
mega2$sector=relevel(factor(mega2$sector),ref="Pharma")
mega_region=mega2
#save(mega_region,file="mega_region_new.RData")
#load("mega_region_new.RData")
library(gtsummary)
library(gt)
dat = mega_region %>% filter(measure=="Submitted")
fit.submitted=lm(value3~value3.2019+scale(date.num2)+postcovid*sector+region,data=dat) %>% tbl_regression() %>% bold_labels() %>% bold_p()
dat = mega_region %>% filter(measure=="Completed")
fit.completed=lm(value3~value3.2019+scale(date.num2)+postcovid*sector+region,data=dat) %>% tbl_regression() %>% bold_labels() %>% bold_p()
dat = mega_region %>% filter(measure=="Primary.Completed")
fit.primary.completed=lm(value3~value3.2019+scale(date.num2)+postcovid*sector+region,data=dat) %>% tbl_regression() %>% bold_labels() %>% bold_p()
tbl=tbl_merge(tbls=list(fit.submitted,fit.completed,fit.primary.completed),tab_spanner=c("**Submitted**","**Completed**","**Primary.Completed**"))
tbl %>% as_gt() %>% gtsave(filename="newmodel.rtf")
####################
########################################
########################################
Data=d2.2019
Measure="Primary.Completed"
Type="Interventional"
Sector=NULL
Region=NULL
Subregion=NULL
Country=NULL
mydate=as.Date("2019-11-01")
dt1=data.frame(date=as.Date(Data$primary_completion_date),value=1,type=Data$study_type,sector=Data$sector,region=Data$region,subregion=Data$subregion,country=Data$country,completion_date_type=Data$primary_completion_date_type) %>% filter(completion_date_type=="Anticipated") %>% arrange(date) %>% filter((date>mydate ))
dt2=dt1
if(!is.null(Country)){dt2=(dt2 %>% filter(country %in% Country))}
if(!is.null(Region)){dt2=(dt2 %>% filter(region==Region))}
if(!is.null(Subregion)){dt2=(dt2 %>% filter(subregion==Subregion))}
if(!is.null(Type)){dt2=(dt2 %>% filter(type==Type))}
if(!is.null(Sector)){dt2=(dt2 %>% filter(sector==Sector))}
res.2019=log(as.numeric((dt2$date-as.Date("2019-11-01"))))
######### 2020
Data=d2.2020
Measure="Primary.Completed"
Type="Interventional"
Sector=NULL
Region=NULL
Subregion=NULL
Country=NULL
mydate=as.Date("2020-11-01")
dt1=data.frame(date=as.Date(Data$primary_completion_date),value=1,type=Data$study_type,sector=Data$sector,region=Data$region,subregion=Data$subregion,country=Data$country,completion_date_type=Data$primary_completion_date_type) %>% filter(completion_date_type=="Anticipated") %>% arrange(date) %>% filter((date>mydate ))
dt2=dt1
if(!is.null(Country)){dt2=(dt2 %>% filter(country %in% Country))}
if(!is.null(Region)){dt2=(dt2 %>% filter(region==Region))}
if(!is.null(Subregion)){dt2=(dt2 %>% filter(subregion==Subregion))}
if(!is.null(Type)){dt2=(dt2 %>% filter(type==Type))}
if(!is.null(Sector)){dt2=(dt2 %>% filter(sector==Sector))}
res.2020=log(as.numeric((dt2$date-as.Date("2020-11-01"))))
df=data.frame(days=c(res.2019,res.2020),year=c(rep(2019,length(res.2019)),rep(2020,length(res.2020))))
boxplot(df$days~df$year)
wilcox.test(df$days~df$year)
|
098d51a4950b2264d61a9c2c10f2e161830f5032 | 073c50a70500e880aa7be129ee5cd7fa5ed4e350 | /assignment/plot3.R | c32f1d732b693c08e60b23a3bf2bd3bd6c2ff8ad | [] | no_license | mgraef/ExData_Plotting1 | e37d204ef7b428041807b4fe5e236ac9d767765f | d29c432b1ac5455a3cb736df88d0fa043bd60e6f | refs/heads/master | 2021-01-22T15:04:36.086600 | 2014-05-10T16:44:29 | 2014-05-10T16:44:29 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,049 | r | plot3.R | ## Read Data from csv file
fn <- "household_power_consumption.txt"
hpc <- read.csv(fn, sep=";")
## Select data from 2007-02-01 to 2007-02-02
FebData<-hpc[as.Date(hpc$Date,format="%d/%m/%Y") >= as.Date("2007-02-01") &
as.Date(hpc$Date,format="%d/%m/%Y") <= as.Date("2007-02-02"),]
## Store values of Sub Metering in 3 separate vectors
Sub_metering_1<-as.numeric(as.character(FebData$Sub_metering_1))
Sub_metering_2<-as.numeric(as.character(FebData$Sub_metering_2))
Sub_metering_3<-as.numeric(as.character(FebData$Sub_metering_3))
## Store values of DateTime in a vector
tt<-as.POSIXct(paste(FebData$Date,FebData$Time),format="%d/%m/%Y %H:%M:%S")
## Create plot3.png
png(file="plot3.png")
plot(tt,Sub_metering_1,type="n",ylab="Energy sub metering",xlab="")
legend("topright",col=c("black","red","blue"),lty=1,
legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
points(tt,Sub_metering_1,type="l",col="black")
points(tt,Sub_metering_2,type="l",col="red")
points(tt,Sub_metering_3,type="l",col="blue")
dev.off()
|
8821cf3f71b2eb76d67acbc8a8a78b5ceb9245ef | aa9563fb769372b219e83b53430e0afc65fe02c0 | /aggregation.R | bd4279351bde2403b035f4e5714d9d6fc73b807d | [] | no_license | peterkabai/dataScience | e357d35b01c610a38488109bb2ef4f197b15286c | 99652c4c05ca8f26df8760c67b5882d13935a324 | refs/heads/master | 2020-04-09T09:54:41.677461 | 2019-05-09T04:33:22 | 2019-05-09T04:33:22 | 152,819,355 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,916 | r | aggregation.R | # read the census data as a data frame
dat = read.csv("https://raw.githubusercontent.com/peterkabai/dataScience/master/data/census.csv")
# how many people are in each category of education?
edu_tbl = table(dat$education)
edu_tbl
# plot the table with using function 'plot'
plot(edu_tbl)
# plot the table with 'barplot'
barplot(edu_tbl)
# apply sort to edu_tbl, then plot
barplot(sort(edu_tbl))
# convert the table into a data frame, where one column
# shows education and the other column shows a count
frame = as.data.frame(edu_tbl)
# rename the columns of the data frame to "education" and "count"
names(frame) = c("education","count")
frame
# sort the data frame by decreasing count values
frame[order(-frame$count),]
# using 'table', find the count of each combination of sex and workclass
edu_tbl = table(dat$sex, dat$workclass)
edu_tbl
# convert your result to a dataframe, and then rename columns
frame = as.data.frame(edu_tbl)
names(frame) = c("Sex","Workclass","Frequency")
frame
# for each native country, what is the average education num?
age = aggregate(education_num ~ native_country, data=dat, mean)
age
# sort the resulting data frame by decreasing mean education num
age[order(-age$education_num),]
# for each occupation, compute the median age
age = aggregate(age ~ occupation, data=dat, mean)
age
# order the output by increasing age
inc = age[order(age$age),]
inc
# plot the result as a bar plot
barplot(inc$age, names.arg = inc$occupation)
# write a function is_adult that returns TRUE just when the input
# number is >= 21
is_adult = function(number) {
if (number >= 21) {
return(TRUE)
} else {
return(FALSE)
}
}
is_adult(21) # should be true
is_adult(20) # should be false
# apply this function to every value of the 'age' column using 'sapply'
sapply(dat$age, is_adult)
# create a data frame by getting only capital_gain and capital_loss
# columns from dat
dat_new = dat[,c("capital_gain","capital_loss")]
dat_new
# create a vector consisting of the means of each column
means2 = apply(dat_new, 2, mean)
means
# create a vector of boolean values indicating, for each row,
# whether at least one of the two values was 0
at_least_one_zero = c(dat_new[,"capital_gain"] == 0 | dat_new[,"capital_loss"] == 0)
at_least_one_zero
# using this vector, check whether all rows have at least one 0
all(at_least_one_zero)
# find median age by occupation, not using aggregation, but by
# creating a box plot
boxplot(age ~ occupation, data=dat)
# to make the occupation values easier to read, make a horizontal
# box plot
boxplot(age ~ occupation, data=dat, horizontal=TRUE)
# using one aggregate function call, get both the average
# education_num for each native country, as well as the number of people
# associated with that native country
agg = aggregate(
education_num ~ native_country,
data=dat,
function(x) c(mean=mean(x), count=round(length(x)))
)
agg
|
0d623adf6b18428a0e25c6bb0d98be431e9d8fd9 | 04b26d022d1993b53412f72dde3af795388fa58e | /AllpseudoBulk_Venn.R | 15fdf6a94e8fdbe6f87860dc76d4e967fbb6c6a9 | [] | no_license | rajansanjana09/BMCHetero2021 | 7628f7a3e5e349a922efee9c2943db4aaae5a28c | c63b43e1a7db147cea046a2b233f647985fbce9e | refs/heads/main | 2023-06-19T06:10:49.448327 | 2021-07-10T02:33:16 | 2021-07-10T02:33:16 | 383,254,792 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,384 | r | AllpseudoBulk_Venn.R | # Psudobulk to determine gene differnetially regulated on tissue colonization and
# Venn diagram to visualize shared genes
# # Merge into a single Seurat object
# OS <- merge(OB, y = c(os17.cx.raw, os17.tib.raw, os17.lung.raw,
# t143b.cx.raw, t143b.tib.raw, t143b.lung.raw,
# OS2.cx.raw, OS2.tib.raw, OS2.lung.raw,
# OS7.cx.raw, OS7.tib.raw, OS7.lung.raw),
# add.cell.ids = c("OB", "OS17_Culture", "OS17_Tibia", "OS17_Lung",
# "t143b_Culture", "t143b_Tibia", "t143b_Lung",
# "NCHOS2_Flank", "NCHOS2_Tibia", "NCHOS2_Lung",
# "NCHOS7_Flank", "NCHOS7_Tibia", "NCHOS7_Lung"),
# project = "Heterogeneity")
#OS17
os17 <- subset(OS, cells = WhichCells(OS, expression = cond == "OS17"))
t143B <- subset(OS, cells = WhichCells(OS, expression = cond == "t143b"))
NCHOS2 <- subset(OS, cells = WhichCells(OS, expression = cond == "NCHOS2"))
NCHOS7 <- subset(OS, cells = WhichCells(OS, expression = cond == "NCHOS7"))
#extract markers
data <- os17
DimPlot(data, group.by = "src")
#rename Idents to src type before DGE analysis
Idents(data, cells = WhichCells(data, expression = src == "OS17_Culture")) <- "Culture"
Idents(data, cells = WhichCells(data, expression = src == "OS17_Lung")) <- "Lung"
Idents(data, cells = WhichCells(data, expression = src == "OS17_Tibia")) <- "Tibia"
data[["tissue"]] <- Idents(data)
os17 <- data
DimPlot(os17)
data <- list(
os17 = os17,
t143B = t143B,
NCHOS2 = NCHOS2,
NCHOS7 = NCHOS7
)
#Lung marker - UP
Lung.up <- list(NULL)
for (i in 1:length(data)) {
tmp <- data[[i]]
x <- FindMarkers(tmp, ident.1 = "Lung", ident.2 = "Culture", only.pos = TRUE, min.pct = 0.1)
Lung.up[[i]] <- rownames(x)
}
#Tibia markers - UP
Tibia.up <- list(NULL)
for (i in 1:length(data)) {
tmp <- data[[i]]
x <- FindMarkers(tmp, ident.1 = "Tibia", ident.2 = "Culture", only.pos = TRUE, min.pct = 0.1)
Tibia.up[[i]] <- rownames(x)
}
#Lung markers - DOWN
Lung.down <- list(NULL)
for (i in 1:length(data)) {
tmp <- data[[i]]
x <- FindMarkers(tmp, ident.1 = "Lung", ident.2 = "Culture", only.pos = FALSE, min.pct = 0.1)
x <- x[x$avg_log2FC<0,]
Lung.down[[i]] <- rownames(x)
}
#Tibia markers - DOWN
Tibia.down <- list(NULL)
for (i in 1:length(data)) {
tmp <- data[[i]]
x <- FindMarkers(tmp, ident.1 = "Tibia", ident.2 = "Culture", only.pos = FALSE, min.pct = 0.1)
x <- x[x$avg_log2FC<0,]
Tibia.down[[i]] <- rownames(x)
}
############################Example Venn Diagram
# if (!require(devtools)) install.packages("devtools")
# devtools::install_github("yanlinlin82/ggvenn")
library(ggvenn)
if (!require(devtools)) install.packages("devtools")
# devtools::install_github("gaospecial/ggVennDiagram")
# library("ggVennDiagram")
genes <- Lung.down
x <- list(
A = genes[[1]],
B = genes[[2]],
C = genes[[3]],
D = genes[[4]]
)
pdf("Lung.down.pdf", width = 7, height = 7)
plot <- ggvenn(
x,
fill_color = c("#0073C2FF", "#EFC000FF", "#868686FF", "#CD534CFF"),
stroke_size = 0.9, set_name_size = 10, show_percentage = TRUE
)
plot
dev.off()
# ggVennDiagram(x[1:4], label_alpha = 0.7, label = "count",
# category.names = c("OS17",
# "143B",
# "NCHOS2",
# "NCHOS7")) +
# scale_fill_gradient(low = "#F4FAFE", high = "#4981BF")+
# theme(legend.title = element_text(color = "black"),
# legend.position = "right")
###Extract genes that are shared in these datasets
genes <- Lung.down
A = genes[[1]]
B = genes[[2]]
C = genes[[3]]
D = genes[[4]]
# # Preparing clusterProfiler to perform hypergeometric test on msigdb signatures
# m_t2g.c2 <- msigdbr(species = "Homo sapiens", category = "C2") %>%
# dplyr::select(gs_name, human_gene_symbol)
# m_t2g.c6 <- msigdbr(species = "Homo sapiens", category = "C6") %>%
# dplyr::select(gs_name, human_gene_symbol)
# m_t2g.h <- msigdbr(species = "Homo sapiens", category = "H") %>%
# dplyr::select(gs_name, human_gene_symbol)
# m_t2n.h <- msigdbr(species = "Homo sapiens", category = "H") %>%
# dplyr::select(gs_id, gs_name)
# m_t2g=rbind(m_t2g.c2,m_t2g.c6)
#
# # msigdb signature to use
# msig.gene.set = m_t2g.h
# msig.name = m_t2n.h
intersect <- c((intersect(intersect(intersect(A,B),C),D)),
(intersect(intersect(A,B),C)),
(intersect(intersect(A,B),D)),
(intersect(intersect(A,D),C)),
(intersect(intersect(B,D),C)))
tmp <- enricher(intersect, TERM2GENE=msig.gene.set, TERM2NAME = msig.name)
em=tmp@result[,c("ID", "p.adjust")] #pvalue/p.adjust
rownames(em) <- NULL
em <- em[1:10,]
em[,2] <- -log10(em[,2])
# barplot(em)
# pathways <- list(NULL)
pathways[[4]] <- em
write.xlsx(pathways, file ="R:/RESRoberts/Bioinformatics/Analysis/Sanjana/2020/pathways.xlsx", col.names = TRUE, row.names = TRUE, append = FALSE)
save.image(file ="R:/RESRoberts/Bioinformatics/Analysis/Sanjana/2020/AllpseudoBulk_Venn.RData")
####################Intersection of deferentially regulated genes between Bone colonization and Lung colonization
# total <- list(
# Tibia.up = Tibia.up,
# Tibia.down = Tibia.down,
# Lung.up = Lung.up,
# Lung.down = Lung.down
# )
# all.genes <- list(NULL)
# for (i in 1:4) {
# genes <- total[[i]]
# A = genes[[1]]
# B = genes[[2]]
# C = genes[[3]]
# D = genes[[4]]
# intersect <- intersect(intersect(intersect(A,B),C),D)
# all.genes[[i]] <- intersect
# }
#
# x <- list(
# A = all.genes[[1]],
# B = all.genes[[2]],
# C = all.genes[[3]],
# D = all.genes[[4]]
# )
#
# pdf("TibiaUpdown_Lungupdown.pdf", width = 5, height = 5)
# plot <- ggvenn(
# x,
# fill_color = c("#0073C2FF", "#EFC000FF", "#868686FF", "#CD534CFF"),
# stroke_size = 0.9, set_name_size = 4, show_percentage = TRUE
# )
# plot
# dev.off()
################ Two sided Barplot
SanjanaPlots <- read.delim("C:/Users/rssxr002/Downloads/SanjanaPlots.txt")
ggplot(SanjanaPlots, aes(x = -1 * Order,
y = p.adjust,
fill = p.adjust > 0)) +
geom_bar(stat = "identity") +
coord_flip() +
facet_wrap(~ Tissue) +
scale_fill_brewer(type = "qual", palette = "Set1") +
geom_text(aes(y = Label_y * 3, label = Pathway)) +
theme_bw() +
ylab("") +
ylim(-5, 5)
|
d657311ad43429d33587278750e29b3458544f2c | 736894dd80e1bc287d78b56c48e1479033c0ce44 | /man/subsetColnames.Rd | e4f380b9f6518d0a65f0a137f491947fbd9d95f7 | [
"MIT"
] | permissive | campbio/ExperimentSubset | cf87a0887e844789aa992743e88ead51997d443a | e11ce736489e22089692937d63f9d9e6c8a91aba | refs/heads/master | 2023-08-04T17:47:04.848732 | 2021-09-05T14:30:17 | 2021-09-05T14:30:17 | 282,447,756 | 11 | 0 | MIT | 2021-09-05T10:56:27 | 2020-07-25T13:18:54 | R | UTF-8 | R | false | true | 2,689 | rd | subsetColnames.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/allGenerics.R,
% R/SubsetRangedSummarizedExperiment.R, R/SubsetSingleCellExperiment.R,
% R/SubsetSummarizedExperiment.R, R/SubsetSpatialExperiment.R,
% R/SubsetTreeSummarizedExperiment.R
\name{subsetColnames}
\alias{subsetColnames}
\alias{subsetColnames<-}
\alias{subsetColnames,SubsetRangedSummarizedExperiment,character-method}
\alias{subsetColnames<-,SubsetRangedSummarizedExperiment,character-method}
\alias{subsetColnames,SubsetSingleCellExperiment,character-method}
\alias{subsetColnames<-,SubsetSingleCellExperiment,character-method}
\alias{subsetColnames,SubsetSummarizedExperiment,character-method}
\alias{subsetColnames<-,SubsetSummarizedExperiment,character-method}
\alias{subsetColnames,SubsetSpatialExperiment,character-method}
\alias{subsetColnames<-,SubsetSpatialExperiment,character-method}
\alias{subsetColnames,SubsetTreeSummarizedExperiment,character-method}
\alias{subsetColnames<-,SubsetTreeSummarizedExperiment,character-method}
\title{subsetColnames}
\usage{
subsetColnames(x, subsetName)
subsetColnames(x, subsetName) <- value
\S4method{subsetColnames}{SubsetRangedSummarizedExperiment,character}(x, subsetName)
\S4method{subsetColnames}{SubsetRangedSummarizedExperiment,character}(x, subsetName) <- value
\S4method{subsetColnames}{SubsetSingleCellExperiment,character}(x, subsetName)
\S4method{subsetColnames}{SubsetSingleCellExperiment,character}(x, subsetName) <- value
\S4method{subsetColnames}{SubsetSummarizedExperiment,character}(x, subsetName)
\S4method{subsetColnames}{SubsetSummarizedExperiment,character}(x, subsetName) <- value
\S4method{subsetColnames}{SubsetSpatialExperiment,character}(x, subsetName)
\S4method{subsetColnames}{SubsetSpatialExperiment,character}(x, subsetName) <- value
\S4method{subsetColnames}{SubsetTreeSummarizedExperiment,character}(x, subsetName)
\S4method{subsetColnames}{SubsetTreeSummarizedExperiment,character}(x, subsetName) <- value
}
\arguments{
\item{x}{Input \code{ExperimentSubset} object.}
\item{subsetName}{Name of the subset to set \code{colnames} to.}
\item{value}{Specify the colname values to replace.}
}
\value{
A \code{vector} of \code{colnames}.
Input object with colnames set to a subset.
}
\description{
Get \code{colnames} from a subset in the \code{ExperimentSubset} object.
Set \code{colnames} to a subset in the \code{ExperimentSubset} object.
}
\examples{
data(sce_chcl, package = "scds")
es <- ExperimentSubset(sce_chcl)
es <- createSubset(es,
"subset1",
rows = c(10,11,50,56,98,99,102,105,109, 200),
cols = c(20,21,40,45,90,99,100,123,166,299),
parentAssay = "counts")
subsetColnames(es, "subset1")
}
|
271a2816e0aac60341eec439feb74516eb9dc809 | 1963ea1341f60c40055035cb7609fb1e140bfef2 | /flowtype_flowcap_pipeline-master/05_data_trim-feat.R | 01ae051e35a98cd17704542df59e2bba95d1b64f | [] | no_license | aya49/flowGraph_experiments | 86359a457c0049790b892b91d7713ff7e65b27b3 | 2ef4e0b53f425a090b2ee6c1010d91e675a893de | refs/heads/master | 2023-04-27T04:46:53.195705 | 2023-04-15T22:31:32 | 2023-04-15T22:31:32 | 178,804,954 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,643 | r | 05_data_trim-feat.R | # FlowType; Trim feature matrices using all pvalues
# aya43@sfu.ca 20180405
## root directory
root = "~/projects/flowCAP-II"
result_dir = "result"; suppressWarnings(dir.create (result_dir))
setwd(root)
## input directories
meta_dir = paste0(result_dir,"/meta")
meta_cell_dir = paste(meta_dir, "/cell", sep="")
feat_dir = paste(result_dir, "/feat", sep=""); dir.create(feat_dir, showWarnings=F)
# feat_file_cell_count_dir = paste(feat_dir, "/file-cell-count", sep="")
## output directories
## libraries
source("~/projects/IMPC/code/_funcAlice.R")
libr("stringr")
libr("entropy")
libr("foreach")
libr("doMC")
## cores
no_cores = 15#detectCores() - 1
registerDoMC(no_cores)
## options
options(stringsAsFactors=FALSE)
options(device="cairo")
options(na.rm=T)
#matrices to not trim
notrimmatrix = "Max|pval"
writecsv = T
start = Sys.time()
start1 = Sys.time()
#get list of children for each non-leaf node & save
cat("\ncreating child matrix")
# m = get(load(paste0(feat_file_cell_count_dir,".Rdata")))
meta_cell = get(load(paste0(meta_cell_dir,".Rdata")))
pvalTRIM_paths = list.files(feat_dir, full.names=T, pattern=".Rdata")
pvalTRIM_paths = pvalTRIM_paths[grepl("TRIM",pvalTRIM_paths) & grepl("pval",pvalTRIM_paths)]
pvalTRIM_labels = gsub(".Rdata","",fileNames(pvalTRIM_paths))
pvalTRIM_labels = sapply(str_split(pvalTRIM_labels,"[-]"),
function(x) paste(x[3:length(x)],collapse="-") )
pvalTRIMs <- lapply(pvalTRIM_paths, function(x) get(load(x)))
names(pvalTRIMs) = pvalTRIM_labels
feat_paths = list.files(feat_dir, full.names=T, pattern=".Rdata")
feat_paths = feat_paths[!grepl("TRIM|FULL",feat_paths)]
feat_paths = feat_paths[!grepl(notrimmatrix,feat_paths)]
a = foreach (feat_path = feat_paths) %dopar% {
feat_m0 = get(load(feat_path))
feat_mcol0 = sapply(str_split(colnames(feat_m0),"_"), function(x) x[length(x)])
for (pvalTRIM_name in names(pvalTRIMs)) {
feat_p = pvalTRIMs[[pvalTRIM_name]]
feat_mcol_ind = feat_mcol0%in%colnames(feat_p)
feat_mrow_ind = rownames(feat_m0)%in%rownames(feat_p)
feat_m = feat_m0[feat_mrow_ind, feat_mcol_ind]
feat_mcol = sapply(str_split(colnames(feat_m),"_"), function(x) x[length(x)])
pis0 = which(as.matrix(feat_p)==0,arr.ind=T)
for (pis0c in unique(pis0[,2])) {
rowind = rownames(feat_m) %in% rownames(feat_p)[ pis0[pis0[,2]%in%pis0c,1] ]
colind = feat_mcol %in% colnames(feat_p)[pis0c]
feat_m[rowind, colind] = 0
}
feat_m = Matrix(feat_m, sparse=T)
save(feat_m,file=gsub(".Rdata",paste0(".",pvalTRIM_name,".Rdata"),feat_path))
}
}
TimeOutput(start)
|
566c254d54c314c27fdd75d53543c0e9f1a2c909 | 6e7152b53e80ca4e03a7b4144bf2526a3c1dcc52 | /hex_plots.R | 9e046e0ff0a6ca33522a088068984b5cd887e485 | [] | no_license | cbur24/El-Nino-Analysis | 66184b9fdacd80c41bb9a98d98877a9a3a48fb01 | 28e934dd487be92278d174712844ea1ea7940cb7 | refs/heads/master | 2021-04-15T09:50:24.241544 | 2019-01-14T22:10:59 | 2019-01-14T22:10:59 | 126,665,747 | 2 | 1 | null | null | null | null | UTF-8 | R | false | false | 954 | r | hex_plots.R | #hex plots?
ggplot(data=tempScatter_all, aes(x=TAVG_station, y=TAVG_model, colour=id)) +
facet_wrap(~id, scale="free", ncol=3)+
geom_hex(bins = 15, show.legend = T)+
stat_smooth(method = "lm", linetype="longdash", col="black", size=1)+
geom_abline(intercept = 0,slope=1, col="black", size=1)+
theme_bw()+
theme(aspect.ratio = 1)+
xlab("Station Temp. (°C)")+
ylab("Model Temp. (°C)")+
theme(legend.position="none")+
theme(strip.background =element_rect(fill="peachpuff2"))
#
# geom_text(aes(label=paste("R2=", round(r.squared,digits=2), sep = "")),col="grey30",
# x=-Inf, y=Inf, hjust=-0.2, vjust=1.2, size=3.5)+
# geom_text(aes(label=paste("MAE=", round(mae,digits=2), sep = "")),col="grey30",
# x=-Inf, y=Inf, hjust=-0.2, vjust=2.4, size=3.5)+
# geom_text(aes(label=paste("RMSE=", round(rmse,digits=2), sep = "")),col="grey30",
# x=-Inf, y=Inf, hjust=-0.2, vjust=3.6, size=3.5)
|
3b0464747726d6d9a1dfa341159bef9c0ed5aea4 | 9423c2ae3708e7e7ac78f72b4344b2dcd16119e2 | /smooth_per_factors.R | 5ee0a26b618a75664553aceee2a5d7835566404a | [
"MIT"
] | permissive | TaiSakuma/mianRs | 57d5d3e41438a35d560e7c1f0e954ab453710ac1 | 9b133cad30f5a0cadec510e96b552b0be7ba48f8 | refs/heads/master | 2021-01-18T23:18:32.366644 | 2018-05-19T08:09:16 | 2018-05-19T08:09:16 | 28,632,406 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,703 | r | smooth_per_factors.R | # Copyright (C) 2010 Tai Sakuma <sakuma@bnl.gov>
smooth.per.factors <- function(data, newx, xy.names, w.name, ...)
{
main <- function(data, degrees.of.freedom)
{
factor.names <- names(data)[!names(data) %in% c(xy.names, w.name)]
if(length(factor.names) == 0)
{
data$ffffff <- 'f'
factor.names <- c('ffffff')
}
b <- do.smooting.by.factors(data, factor.names, xy.names, w.name, ...)
f <- smooting.by.to.data.frame(b, data, newx, factor.names, xy.names)
h <- merge(data[factor.names], f)
h[is.na(h[xy.names[2]]), xy.names[2]] <- 0
f$ffffff <- NULL
f
}
do.smooting.by.factors <- function(data, factor.names, xy.names, w.name, ...)
{
factor.list <- lapply(seq_along(factor.names), function(i) data[ ,factor.names[i]])
names(factor.list) <- factor.names
b <- by(data[c(xy.names, w.name)], factor.list, function(x) call.smooth.spline(x[ ,xy.names[1]], x[ ,xy.names[2]], x[ ,w.name], ...))
}
call.smooth.spline <- function(x, y, w, ...)
{
if(length(unique(x)) < 4) return(NULL) # smooth.spline() needs at least four points.
s <- smooth.spline(x, y, w, ...)
s
}
smooting.by.to.data.frame <- function(by, data, newx, factor.names, xy.names)
{
add.rows.smooth.spline <- function(blank.row, spline, newx)
{
if(is.null(spline)) { return(data.frame()) }
if(length(factor.names) != 1)
r <- blank.row[rep(1, length(newx)),]
else
{
r <- data.frame(rep(blank.row, length(newx)))
names(r) <- factor.names
}
pre <- predict(spline, newx)
r <- cbind(r, data.frame(x = pre$x, y = pre$y))
r
}
if(length(factor.names) == 0) return(NULL)
factors <- lapply(factor.names, function(x) if(is.factor(data[[x]])) levels(data[[x]]) else levels(factor(data[[x]]) ))
blank.data.frame <- expand.grid(factors)
names(blank.data.frame) <- factor.names
f <- by.to.data.frame(by, blank.data.frame, newx, add.rows.smooth.spline)
f[xy.names[1]] = f$x
f[xy.names[2]] = f$y
if(!('x' %in% xy.names)) f$x <- NULL
if(!('y' %in% xy.names)) f$y <- NULL
f
}
by.to.data.frame <- function (by, blank.data.frame, newx, add.rows)
{
dff <- data.frame()
for(i in 1:nrow(blank.data.frame))
{
dff <- rbind(dff, add.rows(blank.data.frame[i,], by[[i]], newx))
}
dff
}
main(data, degrees.of.freedom)
}
|
f8df0ad84afc018f6032ba084e89b9c56c2cd0e1 | f01fedea632515c6e523939689081e18f8b45699 | /annotate_peaks_ChIPpeakAnno/annotate_peaks.R | eeb7e7c52a870b1a4a3fc49f642bb4b61b4d6d7c | [] | no_license | WeiSong-bio/Bioinformatics--------------- | 9c6e7a05e9234b03b9471421e8901c7ff65f7dfe | a7ab7b3536bb8cc7fbe902d854694c769a93170c | refs/heads/master | 2020-06-16T01:01:24.602945 | 2018-07-25T22:22:41 | 2018-07-25T22:22:41 | 195,438,451 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,567 | r | annotate_peaks.R | #!/usr/bin/env Rscript
## USAGE: annotate_peaks.R /path/to/peaks.bed /path/to/output/annotated_peaks.tsv
## DESCRIPTION: This script will run annotate peaks with ChIPpeakAnno, using hg19
# get script args
args <- commandArgs(TRUE)
message("\nScript args are:\n")
print(args)
input_peaks_file <- args[1]
output_annotated_peaks_file <- args[2]
# check number of lines > 0
if(length(readLines(input_peaks_file)) < 1){
message(sprintf("ERROR: No lines present in input BED file:\n%s\n", input_peaks_file))
message(sprintf("No regions present, making empty file: %s", output_annotated_peaks_file))
file.create(output_annotated_peaks_file)
quit()
}
message("\nLoading packages...\n")
# source("https://bioconductor.org/biocLite.R")
# biocLite("ChIPpeakAnno")
library("ChIPpeakAnno")
library("biomaRt")
# read in the BED file
message("\nReading in the BED file...\n")
peaks_granges <- toGRanges(input_peaks_file, format="BED", header=FALSE)
# for hg19
# get biomart reference genome information
# check for a saved copy first..
biomart_data_file <- file.path(getwd(), "biomart_data.RData")
if(file.exists(biomart_data_file)){
message(sprintf("Found biomaRt data file:\n%s\nLoading data from file...", biomart_data_file))
load(biomart_data_file)
} else {
message("Saved biomaRt data file not found!")
message("Retreiving reference information for hg19 from biomaRt, this might take a few minutes...")
martEns <- useMart(host="grch37.ensembl.org", biomart="ENSEMBL_MART_ENSEMBL", dataset="hsapiens_gene_ensembl", verbose=F)
martEnsTSS <- getAnnotation(mart=martEns, featureType="TSS")
martEnsDF <- getBM(attributes=c("ensembl_gene_id", "external_gene_name", "gene_biotype"), mart=martEns)
message(sprintf("Saving biomaRt data to file:\n%s\n", biomart_data_file))
save(martEns, martEnsTSS, martEnsDF, file = biomart_data_file)
}
# get the annotations
message("\nGetting annotations...\n")
peaks_granges <- annotatePeakInBatch(peaks_granges, AnnotationData = martEnsTSS, PeakLocForDistance = "middle", FeatureLocForDistance = "TSS", output = "shortestDistance", multiple = TRUE)
# merge the annotations with the peaks
message("\nMerging annotations...\n")
peaks_granges_df <- merge(as.data.frame(peaks_granges) , martEnsDF , by.x=c("feature"), by.y=c("ensembl_gene_id") , all.x=TRUE)
# save the output
message("\nSaving the output...\n")
write.table(peaks_granges_df, row.names = FALSE, sep = '\t', quote = FALSE,
file = output_annotated_peaks_file)
message("Session Information:\n")
sessionInfo()
|
cba45aac74c50d45d939bb431edab4e572371c0d | 2589272d2a5a669352a8cefd9f86e9167a75fbfe | /plot2.R | c45c2fe84ec89f98f06f2fa07d93cc83de0c9f07 | [] | no_license | vjcbsn/ExData_Plotting1 | a03a3d7857fc8d39c55195946fd2523f7c92a821 | cd44f7076cd13c15f1a567888c3dcaa48af86e9e | refs/heads/master | 2023-04-13T16:25:13.689364 | 2015-10-09T20:15:58 | 2015-10-09T20:15:58 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,005 | r | plot2.R | library(data.table)
# reading only lines for 1 and 2 Feb 2007
hpc <- fread("household_power_consumption.txt", na.strings = "?", skip = "1/2/2007", nrows = 2880,
colClasses = c("POSIXlt", "character", rep("numeric", 7)))
# setting proper variable names
names(hpc) <- c("Date", "Time", "GlobalActivePower",
"GlobalReactivePower", "Voltage", "GlobalIntensity",
"SubMetering1", "SubMetering2", "SubMetering3")
# adding Timeline variable
hpc$Date <- as.Date(as.character(hpc$Date), format = "%d/%m/%Y")
hpc$Time <- as.ITime(as.character(hpc$Time), format = "%H:%M:%S")
hpc <- within(hpc, { Timeline = format(as.POSIXct(paste(Date, Time)), "%d/%m/%Y %H:%M:%S") })
# opening png graphic device
png("plot2.png")
# setting plot background color
par(bg = "transparent")
# drawing plot
with(hpc, plot(strptime(Timeline, format = "%d/%m/%Y %H:%M:%S"),
GlobalActivePower, type = "l", xlab = "", ylab = "Global Active Power (kilowatts)"))
dev.off() |
945ada78d4c0012b45a3285243f7a545817de17a | 52e168effa6b7c2d7a4efb831fdc786cdffa1a73 | /get_substances.R | c4e0a3bd30cd99ccaf1de50417e9254303dc8c88 | [] | no_license | gezever/emissie_imjv | 33a8dca845c9fbe755b98c36e16b375cdf9d0726 | 637f81e305209db59fb12a13bcddd85643d2194b | refs/heads/master | 2021-05-03T06:38:13.959858 | 2018-02-07T10:30:04 | 2018-02-07T10:30:04 | 120,600,057 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,912 | r | get_substances.R | library(SPARQL)
# Step 1 - Set up preliminaries and define query
# Define the endpoint
#endpoint <- "http://lodomv-on-2.vm.cumuli.be:8100/imjv/sparql"
endpoint <- "http://rdfstoreomv-on-1.vm.cumuli.be:3030/rdfstoreomv/archive/query"
# create query statement
query <- "
PREFIX milieu: <https://id.milieuinfo.be/def#>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX geo: <http://www.w3.org/2003/01/geo/wgs84_pos#>
PREFIX skos: <http://www.w3.org/2004/02/skos/core#>
PREFIX http: <http://www.w3.org/2011/http#>
PREFIX sdmx: <http://purl.org/linked-data/sdmx/2009/attribute#>
PREFIX locn: <http://www.w3.org/ns/locn#>
PREFIX dbo: <http://dbpedia.org/ontology/>
SELECT ?jaar ?stof ?hoeveelheid ?eenheid ?latitude ?longitude ?refgebied ?TX_MUNTY_DESCR_NL
WHERE {GRAPH <urn:x-arq:UnionGraph>{
?subject a <http://purl.org/linked-data/cube#Observation> ;
milieu:referentiegebied ?refgebied ;
milieu:hoeveelheid ?hoeveelheid ;
sdmx:unitMeasure ?unit ;
milieu:tijdsperiode ?jaar ;
milieu:substantie ?substantie .
FILTER ( ?hoeveelheid > 0 )
?substantie skos:prefLabel ?s.
?refgebied a milieu:Emissiepunt ;
milieu:exploitatie ?xtie ;
geo:lat ?latitude ;
geo:long ?longitude .
?unit skos:altLabel ?eenheid .
BIND (STR(?s) AS ?stof)
}
SERVICE <http://lodcbbomv-on-1.vm.cumuli.be:8080/lodomv/repositories/cbb> {
?xtie locn:address ?adres .
?adres <http://www.w3.org/ns/locn#postName> ?label.
FILTER (lang(?label) = 'nl')
BIND (STR(?label) AS ?TX_MUNTY_DESCR_NL)
}
}
"
# Step 2 - Use SPARQL package to submit query and save results to a data frame
qd <- SPARQL(endpoint,query)
df <- qd$results
#print(df, quote = TRUE, row.names = FALSE)
full_table<-rbind(df)
write.csv(full_table, file = "data/full_table2.csv")
|
ebfe4822ee1edad69d3b4136fdf5618a30d5d299 | c2e589d75eae2b603abc6b126b1206780e87cf70 | /landscape_young_cohorts.R | d8d9e00d8ac562392730618ab171809146549202 | [] | no_license | MARIASUAM/harvest_x_climate_LANDIS | 13b3a8cfcdc68d3d59a72cda2a31b192a741fe1f | a7810e42c2ded4863432bb7db3befb825e6701e8 | refs/heads/master | 2023-04-18T02:09:23.889062 | 2022-09-14T14:15:40 | 2022-09-14T14:15:40 | 412,398,572 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 3,157 | r | landscape_young_cohorts.R | # Age distribution
library(dplyr)
library(ggplot2)
mgmt.scenarios <- c("211129_nomanag_current",
"211129_conserv_current",
"211129_proactive_current",
"211129_proactiveplus_current",
"211129_nomanag_rcp45",
"211129_conserv_rcp45",
"211129_proactive_rcp45",
"211129_proactiveplus_rcp45",
"211129_nomanag_rcp85",
"211129_conserv_rcp85",
"211129_proactive_rcp85",
"211129_proactiveplus_rcp85") # Folder names with each scenario
di <- "/Volumes/GoogleDrive/My Drive/proj_LANDIS/experiments/" # Path to simulations folder
outputs_folder <- "211129_outputs/" # Subfolder for outputs
age_dist <- data.frame()
for (i in seq_along(mgmt.scenarios)) {
temp <- read.table(paste(di, mgmt.scenarios[i], "_rep1/output/AgeDist/Age_95Histogram.csv", sep =""), header = TRUE, sep = ",") %>%
mutate(Harv_scenario = strsplit(as.character(mgmt.scenarios[i]), split = "_")[[1]][2],
Clim_scenario = strsplit(as.character(mgmt.scenarios[i]), split = "_")[[1]][3])
age_dist <- rbind(age_dist, temp)
}
# Fetch data
age_dist_young <- age_dist %>%
select(NrOfCohortsAtAge, X.1_15.4., Harv_scenario, Clim_scenario)
age_dist_young %>%
filter(NrOfCohortsAtAge != "jcommunis",
NrOfCohortsAtAge != "joxycedrus",
NrOfCohortsAtAge != "short",
NrOfCohortsAtAge != "medium",
NrOfCohortsAtAge != "tall",
NrOfCohortsAtAge != "Total",
NrOfCohortsAtAge != "popnigra") %>%
ggplot(aes(x = NrOfCohortsAtAge, y = X.1_15.4.)) +
geom_point(aes(color = NrOfCohortsAtAge)) +
facet_grid(Clim_scenario ~ Harv_scenario) + # , scales = "free_y"
theme_classic() +
theme(legend.position = "bottom",
axis.title = element_text(size = 14),
axis.text = element_text(size = 12),
legend.text = element_text(size = 14),
legend.title = element_blank())
groups <- age_dist_young %>%
filter(NrOfCohortsAtAge != "jcommunis",
NrOfCohortsAtAge != "joxycedrus",
NrOfCohortsAtAge != "short",
NrOfCohortsAtAge != "medium",
NrOfCohortsAtAge != "tall",
NrOfCohortsAtAge != "Total",
NrOfCohortsAtAge != "popnigra")%>%
mutate(Group = ifelse(NrOfCohortsAtAge == "qilex", "oak",
ifelse(NrOfCohortsAtAge == "qfaginea", "oak",
ifelse(NrOfCohortsAtAge == "qpyrenaica", "oak",
"pine")))) %>%
select(Group, X.1_15.4., Harv_scenario, Clim_scenario) %>%
group_by(Group, Harv_scenario, Clim_scenario) %>%
summarise(tot = sum(X.1_15.4.))
ggplot(groups, aes(x = Group, y = tot)) +
geom_point(aes(color = Group)) +
facet_grid(Clim_scenario ~ Harv_scenario) + # , scales = "free_y"
theme_classic() +
theme(legend.position = "bottom",
axis.title = element_text(size = 14),
axis.text = element_text(size = 12),
legend.text = element_text(size = 14),
legend.title = element_blank())
|
ee52d6fc5a76f8337589e37417c835cfac5d86f2 | 9591f5820092cf51ce5fb1a42dfe30eb5ab441b9 | /Nowicka2017/quick_analysis/0_correlation_cytokines_frequencies.R | 909654dabed3c53a5a01023818fac0037da41daf | [] | no_license | yhoang/drfz | 443d58837141ffd96a3b0037b07a8d6f67d56350 | 38925398d30737051a3df4b9903bdc8774c18081 | refs/heads/master | 2021-06-30T23:12:36.186438 | 2021-02-09T11:10:32 | 2021-02-09T11:10:32 | 218,074,297 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 16,531 | r | 0_correlation_cytokines_frequencies.R |
Sys.time()
# Load packages
library(gdata)
library(ggplot2)
library(reshape2)
library(limma) # for strsplit2
library(gtools) # for logit
library(plyr) # for rbind.fill
library(tools)
library(GGally)
library(ComplexHeatmap)
##############################################################################
# Test arguments
##############################################################################
outdir='../carsten_cytof/PD1_project/CK_2016-06-merged_23_29/correlation'
path_metadata=c('../carsten_cytof/PD1_project/CK_metadata/metadata_23_03.xlsx','../carsten_cytof/PD1_project/CK_metadata/metadata_29_03.xlsx')
path_frequencies=c('../carsten_cytof/PD1_project/CK_2016-06-23_03/050_frequencies/23_03_pca1_merging5_frequencies.xls','../carsten_cytof/PD1_project/CK_2016-06-29_03/050_frequencies/29_03_pca1_merging2_frequencies.xls')
## CD4
prefix='02_IFN_myeloid_CD4_'
path_bimatrix_frequencies='../carsten_cytof/PD1_project/CK_2016-06-merged_23_29/02v2_CD4/090_cytokine_bimatrix/23CD4allall_29CD4allall_02CD4v2_frequencies.xls'
## CD8
# prefix='02_IFN_myeloid_CD8_'
# path_bimatrix_frequencies='../carsten_cytof/PD1_project/CK_2016-06-merged_23_29/02v2_CD8/090_cytokine_bimatrix/23CD8allall_29CD8allall_02CD8v2_frequencies.xls'
##############################################################################
# Read in the arguments
##############################################################################
# rm(list = ls())
#
# args <- (commandArgs(trailingOnly = TRUE))
# for (i in 1:length(args)) {
# eval(parse(text = args[[i]]))
# }
#
# cat(paste0(args, collapse = "\n"), fill = TRUE)
##############################################################################
if(!file.exists(outdir))
dir.create(outdir, recursive = TRUE)
# ------------------------------------------------------------
# Load metadata
# ------------------------------------------------------------
md <- lapply(1:length(path_metadata), function(i){
md <- read.xls(path_metadata[i], stringsAsFactors = FALSE)
md
})
md <- plyr::rbind.fill(md)
rownames(md) <- md$shortname
### Factor arrangment
md$response <- factor(md$response, levels = c("NR", "R", "HD"))
md$response <- factor(md$response)
md$day <- factor(md$day, levels = c("base", "tx"))
md$day <- factor(md$day)
md$patient_id <- factor(md$patient_id)
md$data <- factor(md$data)
md$data_day <- interaction(md$data, md$day, lex.order = TRUE, drop = TRUE)
### Colors
colors <- unique(md[, c("condition", "color")])
colors$condition <- factor(colors$condition)
## replace _ with \n
levels(colors$condition) <- gsub("_", "\n", levels(colors$condition ))
color_groups <- colors$color
names(color_groups) <- colors$condition
color_groupsb <- adjustcolor(color_groups, alpha = 0.3)
names(color_groupsb) <- colors$condition
color_samples <- md$color
names(color_samples) <- md$shortname
colors <- unique(md[, c("response", "color")])
color_response <- colors$color
names(color_response) <- colors$response
# ------------------------------------------------------------
# Load cluster frequencies
# ------------------------------------------------------------
prop <- lapply(1:length(path_frequencies), function(i){
# i = 1
prop <- read.table(path_frequencies[i], header = TRUE, sep = "\t", as.is = TRUE)
print(prop[, c("cluster", "label")])
return(prop)
})
prop_out <- Reduce(function(...) merge(..., by = c("cluster", "label"), all=TRUE, sort = FALSE), prop)
prop_out[, c("cluster", "label")]
## drop the 'drop' cluster
prop_out <- prop_out[prop_out$label != "drop", , drop = FALSE]
if(!all(complete.cases(prop_out))){
stop("There are some clusters that are not common in the merged data sets or have different cluster number!")
}
## keep only those samples that are also in the metadata file
prop_out <- prop_out[, c("cluster", "label", md$shortname)]
prop_out <- prop_out[order(prop_out$cluster), , drop = FALSE]
### Create labels
labels <- data.frame(cluster = prop_out$cluster, label = prop_out$label)
labels <- labels[order(labels$cluster, decreasing = FALSE), ]
labels$label <- factor(labels$label, levels = unique(labels$label))
labels
prop_tmp <- t(prop_out[, md$shortname])
colnames(prop_tmp) <- prop_out$label
prop <- data.frame(sample = md$shortname, prop_tmp, check.names = FALSE)
colnames(prop) <- gsub("+", "pos", colnames(prop), fixed = TRUE)
colnames(prop) <- gsub("-", "_", colnames(prop), fixed = TRUE)
# ------------------------------------------------------------
# Load the bimatrix frequencies
# ------------------------------------------------------------
biprop <- lapply(1:length(path_bimatrix_frequencies), function(i){
# i = 1
biprop <- read.table(path_bimatrix_frequencies[i], header = TRUE, sep = "\t", as.is = TRUE)
})
biprop_out <- Reduce(function(...) merge(..., by = c("cluster", "label"), all=TRUE, sort = FALSE), biprop)
biprop_tmp <- t(biprop_out[, md$shortname])
colnames(biprop_tmp) <- biprop_out$label
biprop <- data.frame(sample = md$shortname, biprop_tmp, check.names = FALSE)
colnames(biprop) <- gsub("+", "pos", colnames(biprop), fixed = TRUE)
colnames(biprop) <- gsub("-", "_", colnames(biprop), fixed = TRUE)
# -----------------------------------------------------------------------------
# Prepare the ggadf and gglabels objects
# -----------------------------------------------------------------------------
gglabels <- c("CD14pos_monos", "IFN_gpos")
### Prepare the data for plotting with ggplot
ggadf <- merge(biprop, prop, by = "sample", sort = FALSE)
rownames(ggadf) <- ggadf$sample
ggadf$group <- factor(md[rownames(ggadf), "condition"])
## replace _ with \n
levels(ggadf$group) <- gsub("_", "\n", levels(ggadf$group))
ggadf$data_day <- md[rownames(ggadf), "data_day"]
ggadf$data <- md[rownames(ggadf), "data"]
head(ggadf)
# -----------------------------------------------------------------------------
# Correlation analysis
# -----------------------------------------------------------------------------
### Pairs plot with GGally
# ggp <- ggpairs(ggadf[, gglabels]) +
# theme_bw()
#
# pdf(file.path(outdir, paste0(prefix, "frequencies_plot_corr_ggally.pdf")), 10, 10)
# print(ggp)
# dev.off()
shape_data_day <- c(19, 1, 17, 2)
names(shape_data_day) <- levels(ggadf$data_day)
shape_data_day
### Individual paired plots
for(i in 1:(length(gglabels) - 1)){
for(j in (i+1):length(gglabels)){
# i = 1; j = 2
ggp <- ggplot(ggadf) +
geom_point(aes_string(x = as.character(gglabels[i]), y = as.character(gglabels[j]), shape = "data_day", color = "group"), size = 3, alpha = 0.8) +
geom_smooth(aes_string(x = as.character(gglabels[i]), y = as.character(gglabels[j]))) +
theme_bw() +
theme(axis.text = element_text(size = 14),
axis.title = element_text(size = 16),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.border = element_blank(),
axis.line.x = element_line(size = 0.5, linetype = "solid", colour = "black"),
axis.line.y = element_line(size = 0.5, linetype = "solid", colour = "black"),
legend.title = element_blank()) +
scale_shape_manual(values = shape_data_day) +
scale_color_manual(values = color_groups)
pdf(file.path(outdir, paste0(prefix, "frequencies_plot_corr_pairs_", gglabels[i], "_", gglabels[j] ,"_dataALL.pdf")), width = 7, height = 5)
print(ggp)
dev.off()
### Plots per data day
for(dd in levels(ggadf$data_day)){
ggp <- ggplot(ggadf[ggadf$data_day == dd, , drop = FALSE]) +
geom_point(aes_string(x = as.character(gglabels[i]), y = as.character(gglabels[j]), shape = "data_day", color = "group"), size = 3, alpha = 0.8) +
geom_smooth(aes_string(x = as.character(gglabels[i]), y = as.character(gglabels[j]))) +
theme_bw() +
theme(axis.text = element_text(size = 14),
axis.title = element_text(size = 16),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.border = element_blank(),
axis.line.x = element_line(size = 0.5, linetype = "solid", colour = "black"),
axis.line.y = element_line(size = 0.5, linetype = "solid", colour = "black"),
legend.title = element_blank()) +
scale_shape_manual(values = shape_data_day) +
scale_color_manual(values = color_groups)
pdf(file.path(outdir, paste0(prefix, "frequencies_plot_corr_pairs_", gglabels[i], "_", gglabels[j], "_", gsub(".", "", dd, fixed = TRUE) ,".pdf")), width = 7, height = 5)
print(ggp)
dev.off()
}
}
}
### Heatmap with correlation
corr_methods <- c("spearman")
for(m in 1:length(corr_methods)){
# m = 1
mat <- cor(ggadf[, gglabels], method = corr_methods[m], use = "complete.obs")
out <- data.frame(cluster = rownames(mat), mat, stringsAsFactors = FALSE)
write.table(out, file.path(outdir, paste0(prefix, "frequencies_plot_corr_corr_", corr_methods[m], "_dataALL.xls")), sep = "\t", quote = FALSE, row.names = FALSE, col.names = TRUE)
mat[upper.tri(mat)] <- NA
diag(mat) <- NA
if(length(gglabels) > 2){
### Using ComplexHeatmap
legend_breaks = seq(from = -round(1), to = round(1), by = 0.5)
ht1 <- Heatmap(mat, name = "Correlation", col = colorRampPalette(c("#dc143c", "#f5f5f5", "#4682b4"), space = "Lab")(15), na_col = "white", cluster_columns = FALSE, cluster_rows = FALSE, row_names_side = "left", heatmap_legend_param = list(at = legend_breaks, labels = legend_breaks, color_bar = "continuous", legend_height = unit(40, "mm")), cell_fun = function(j, i, x, y, w, h, col){
if(j < i)
grid.text(round(mat[i, j], 2), x, y)
})
pdf(file.path(outdir, paste0(prefix, "frequencies_plot_corr_heat_", corr_methods[m] ,"_asis", "_dataALL.pdf")))
draw(ht1)
dev.off()
}
### Analysis per data day
for(dd in levels(ggadf$data_day)){
mat <- cor(ggadf[ggadf$data_day == dd, gglabels, drop = FALSE], method = corr_methods[m], use = "complete.obs")
out <- data.frame(cluster = rownames(mat), mat, stringsAsFactors = FALSE)
write.table(out, file.path(outdir, paste0(prefix, "frequencies_plot_corr_corr_", corr_methods[m], "_", gsub(".", "", dd, fixed = TRUE) ,".xls")), sep = "\t", quote = FALSE, row.names = FALSE, col.names = TRUE)
}
}
### Test the correlation coefficients
corr_methods <- c("spearman")
for(m in 1:length(corr_methods)){
# m = 1
corr_pvs <- matrix(NA, length(gglabels), length(gglabels))
colnames(corr_pvs) <- gglabels
rownames(corr_pvs) <- gglabels
for(i in 1:(length(gglabels) - 1)){
for(j in (i+1):length(gglabels)){
# i = 1; j = 2
print(paste0(gglabels[i], " vs ", gglabels[j]))
out <- cor.test(x = ggadf[, gglabels[i]], y = ggadf[, gglabels[j]], alternative = "two.sided", method = corr_methods[m])
corr_pvs[j, i] <- out$p.value
ggp_title <- paste0(round(out$estimate, 2))
### Plot
ggp <- ggplot(ggadf) +
geom_point(aes_string(x = as.character(gglabels[i]), y = as.character(gglabels[j]), shape = "data_day", color = "group"), size = 3, alpha = 0.8) +
geom_smooth(aes_string(x = as.character(gglabels[i]), y = as.character(gglabels[j]))) +
ggtitle(ggp_title) +
theme_bw() +
theme(title = element_text(size = 16),
axis.text = element_text(size = 14),
axis.title = element_text(size = 16),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.border = element_blank(),
axis.line.x = element_line(size = 0.5, linetype = "solid", colour = "black"),
axis.line.y = element_line(size = 0.5, linetype = "solid", colour = "black"),
legend.title = element_blank()) +
scale_shape_manual(values = shape_data_day) +
scale_color_manual(values = color_groups)
pdf(file.path(outdir, paste0(prefix, "frequencies_plot_corr_pairs_", corr_methods[m], "_", gglabels[i], "_", gglabels[j] ,"_dataALL.pdf")), width = 7, height = 5)
print(ggp)
dev.off()
}
}
corr_apvs <- matrix(p.adjust(corr_pvs, method = "BH"), nrow = length(gglabels), byrow = FALSE)
colnames(corr_apvs) <- gglabels
rownames(corr_apvs) <- gglabels
out <- data.frame(cluster = rownames(corr_apvs), corr_apvs, stringsAsFactors = FALSE)
write.table(out, file.path(outdir, paste0(prefix, "frequencies_plot_corr_apvs_", corr_methods[m], "_dataALL.xls")), sep = "\t", quote = FALSE, row.names = FALSE, col.names = TRUE)
out <- data.frame(cluster = rownames(corr_pvs), corr_pvs, stringsAsFactors = FALSE)
write.table(out, file.path(outdir, paste0(prefix, "frequencies_plot_corr_pvs_", corr_methods[m], "_dataALL.xls")), sep = "\t", quote = FALSE, row.names = FALSE, col.names = TRUE)
if(length(gglabels) > 2){
### Using ComplexHeatmap
legend_breaks = c(0, 0.05, 0.1, 1)
ht1 <- Heatmap(corr_apvs, name = "Correlation", col = colorRampPalette(c("grey50", "grey70", "grey90", "grey90"))(4), na_col = "white", cluster_columns = FALSE, cluster_rows = FALSE, row_names_side = "left", heatmap_legend_param = list(at = legend_breaks, labels = legend_breaks, color_bar = "discrete", legend_height = unit(40, "mm")), cell_fun = function(j, i, x, y, w, h, col){
if(j < i)
grid.text(sprintf("%.02e", corr_apvs[i, j]), x, y)
})
pdf(file.path(outdir, paste0(prefix, "frequencies_plot_corr_apvs_", corr_methods[m] ,"_asis", "_dataALL.pdf")))
draw(ht1)
dev.off()
}
### Analysis per data day
for(dd in levels(ggadf$data_day)){
corr_pvs <- matrix(NA, length(gglabels), length(gglabels))
colnames(corr_pvs) <- gglabels
rownames(corr_pvs) <- gglabels
for(i in 1:(length(gglabels) - 1)){
for(j in (i+1):length(gglabels)){
# i = 1; j = 2
print(paste0(gglabels[i], " vs ", gglabels[j]))
out <- cor.test(x = ggadf[ggadf$data_day == dd, gglabels[i]], y = ggadf[ggadf$data_day == dd, gglabels[j]], alternative = "two.sided", method = corr_methods[m])
corr_pvs[j, i] <- out$p.value
ggp_title <- paste0(round(out$estimate, 2))
### Plot
ggp <- ggplot(ggadf[ggadf$data_day == dd, , drop = FALSE]) +
geom_point(aes_string(x = as.character(gglabels[i]), y = as.character(gglabels[j]), shape = "data_day", color = "group"), size = 3, alpha = 0.8) +
geom_smooth(aes_string(x = as.character(gglabels[i]), y = as.character(gglabels[j]))) +
ggtitle(ggp_title) +
theme_bw() +
theme(title = element_text(size = 16),
axis.text = element_text(size = 14),
axis.title = element_text(size = 16),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.border = element_blank(),
axis.line.x = element_line(size = 0.5, linetype = "solid", colour = "black"),
axis.line.y = element_line(size = 0.5, linetype = "solid", colour = "black"),
legend.title = element_blank()) +
scale_shape_manual(values = shape_data_day) +
scale_color_manual(values = color_groups)
pdf(file.path(outdir, paste0(prefix, "frequencies_plot_corr_pairs_", corr_methods[m], "_", gglabels[i], "_", gglabels[j], "_", gsub(".", "", dd, fixed = TRUE) ,".pdf")), width = 7, height = 5)
print(ggp)
dev.off()
}
}
corr_apvs <- matrix(p.adjust(corr_pvs, method = "BH"), nrow = length(gglabels), byrow = FALSE)
colnames(corr_apvs) <- gglabels
rownames(corr_apvs) <- gglabels
out <- data.frame(cluster = rownames(corr_apvs), corr_apvs, stringsAsFactors = FALSE)
write.table(out, file.path(outdir, paste0(prefix, "frequencies_plot_corr_apvs_", corr_methods[m], "_", gsub(".", "", dd, fixed = TRUE), ".xls")), sep = "\t", quote = FALSE, row.names = FALSE, col.names = TRUE)
out <- data.frame(cluster = rownames(corr_pvs), corr_pvs, stringsAsFactors = FALSE)
write.table(out, file.path(outdir, paste0(prefix, "frequencies_plot_corr_pvs_", corr_methods[m], "_", gsub(".", "", dd, fixed = TRUE), ".xls")), sep = "\t", quote = FALSE, row.names = FALSE, col.names = TRUE)
}
}
sessionInfo()
|
138b05d7279b2f5152a1b57dc7c418f173b63ca5 | d2f880cbca787d6ebf0d6535e9e427a12997afcd | /install_inzight.R | c6e24392644df52cf751123dc1a7d400ee8f52b5 | [] | no_license | iNZightVIT/mac-installer | 9276be24505984fd7777cba6fb423854717ebdb8 | db1f094b907422e5a4582c6a5744cde1bab68f5f | refs/heads/master | 2020-04-18T12:12:33.071073 | 2019-05-31T08:30:56 | 2019-05-31T08:30:56 | 167,527,073 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 734 | r | install_inzight.R | local({
if (Sys.info()[['sysname']] != 'Darwin') return()
.Platform$pkgType = 'mac.binary.el-capitan'
unlockBinding('.Platform', baseenv())
assign('.Platform', .Platform, 'package:base')
lockBinding('.Platform', baseenv())
options(
pkgType = 'both', install.packages.compile.from.source = 'always',
repos = c(
'https://r.docker.stat.auckland.ac.nz',
'https://cran.rstudio.com'
)
)
})
dir.create('iNZightVIT/.library', recursive = TRUE)
utils::install.packages(
c(
'iNZight',
'iNZightModules',
'iNZightTS',
'iNZightMR',
'iNZightPlots',
'iNZightRegression',
'iNZightTools',
'vit'
),
lib = 'iNZightVIT/.library'
)
|
7d3e1ba38bc455a4cf60fa5d10edd773b90dfef6 | 9262d48a13eb12331e10b8d8c8689e8de160e63c | /R/edas-datapreprocessing-daytype.R | 99590122bb54895931ffdc73df4fb135a965dbcd | [] | no_license | jinlee86/bems_analysisr_package | 9a69a7cf8642773113a276140eab067537a6f658 | a4a5b10a0198b648e6f8ede5927b4d7c7d3fc702 | refs/heads/master | 2021-01-21T19:27:52.151776 | 2017-06-29T05:44:07 | 2017-06-29T05:44:07 | 92,139,188 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,443 | r | edas-datapreprocessing-daytype.R | #' EDAS Data Preprocess - Working/Non-Working/Special Days
#' 데이터를 주중/주말/특별한 날로 구분
#'
#' @param dataset 데이터 프레임 (data.frame) 형식의 input 데이터 셋
#' @param datetimeCol POSIXCT 타입의 날짜 시간 값을 나타내는 컬럼 명
#' @param workingdays 주중을 정의 하는 요일들(1~7) numeric list, 기본값으로 월요일부터 금요일 (1~5)
#' @param specialdays Optional 특별한 날을 정의 하는 character list, 날짜 포맷 yyyy-mm-dd
#' @return
#' indextable - 데이터 프레임 (type, value), 주중/주말/특별한 날로 데이터가 어떤 값으로 구분되어있는지를 보여줌
#' workingdays - list 형식의 업무요일 값(들)
#' nonworkingdays - list 형식의 비업무요일 값(들)
#' specialdays - list 형식의 특별한 날짜(들)
#' eval - input 데이터 셋(데이터 프레임)에 요일(dayOfweek) 값과 주중/주말/특별한 날을(datetype) 나타내는 값을
#' 추가한 결과
#' @family EDAS
#' @examples
#' #generate a sample dataset
#' dataset <- data.frame("Datetime"=seq(ISOdate(1900,1,1), ISOdate(1901,1,1), length.out=20),
#' "x"=seq(1,10,length.out=20))
#'
#' edas.datapreprocessing.daytype(dataset, "Datetime")
#'
#' specialdays <- c("2016-01-02", "2016-01-03")
#' edas.datapreprocessing.daytype(dataset, "Datetime", specialdays = specialdays )
#' @export
edas.datapreprocessing.daytype <- function(dataset,
datetimeCol,
workingdays=c(1,2,3,4,5),
specialdays=NULL)
{
dataset$dayOfweek <- format(dataset[,datetimeCol], format="%u")
dataset$datetype <- sapply(dataset$dayOfweek, function(x) ifelse(x %in% workingdays, 1, 2))
if (!is.null(specialdays) && is(specialdays, "Date"))
dataset[format(dataset[,datetimeCol], format="%Y-%m-%d") %in% as.character(specialdays),]$datetype <- 3
daytypeIndexTable <- data.frame(type=c("workingday", "non-workingday", "specialday"),
value=c(1,2,3))
preprocess_report <- list("indextable"=daytypeIndexTable,
"workingdays"=workingdays,
"nonworkingdays"=setdiff(1:7,workingdays),
"specialdays"=specialdays,
"eval"=dataset)
return(preprocess_report)
}
|
4c8b17ab0e911989baeb46bfc1c8b34617ddfdd0 | b08b7e3160ae9947b6046123acad8f59152375c3 | /Programming Language Detection/Experiment-2/Dataset/Train/R/sha-1.r | 03a9958011fb8aecf4bc870a51a6a08b19f00636 | [] | no_license | dlaststark/machine-learning-projects | efb0a28c664419275e87eb612c89054164fe1eb0 | eaa0c96d4d1c15934d63035b837636a6d11736e3 | refs/heads/master | 2022-12-06T08:36:09.867677 | 2022-11-20T13:17:25 | 2022-11-20T13:17:25 | 246,379,103 | 9 | 5 | null | null | null | null | UTF-8 | R | false | false | 100 | r | sha-1.r | library(digest)
input <- "Rosetta Code"
cat(digest(input, algo = "sha1", serialize = FALSE), "\n")
|
dc51dbe20f8185bc3698b53cd469c40f636000a4 | 402733edf0ee3946671f7c8272b9fbd51f21420e | /src/01basic_stats.R | 070aa89968f04b715d95f086ab2e1361a1bb1fae | [] | no_license | amphibian-exeff/vanmeter_bodyburdens_envpoll2014 | a55cb6cf5fb2ce793c9a23f5033bd0d3705b5273 | ddfeaf59f7408f718010e75d6c053acd768ccdb5 | refs/heads/master | 2023-07-12T12:48:26.664855 | 2021-08-09T03:59:00 | 2021-08-09T03:59:00 | 41,526,957 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 23,567 | r | 01basic_stats.R | library(ggplot2)
library(plyr)
summary(allfrog)
colnames(allfrog)
# indices
#frogs
unique.frogs <- unique(allfrog$Species)
for(frog in unique.frogs){print(frog)}
index.green <- which(allfrog$Species=="Green treefrog")
print(paste("# green =",length(index.green)))
index.barking <- which(allfrog$Species=="Barking treefrog")
print(paste("# barking =",length(index.barking)))
index.mole <- which(allfrog$Species=="Mole salamander")
print(paste("# mole salamanders =",length(index.mole)))
index.leopard <- which(allfrog$Species=="Leopard frog")
print(paste("# leopard =",length(index.leopard)))
index.fowlers <- which(allfrog$Species=="Fowlers toad")
print(paste("# fowlers =",length(index.fowlers)))
index.gray <- which(allfrog$Species=="Gray treefrog")
print(paste("# gray =",length(index.gray)))
index.cricket <- which(allfrog$Species=="Cricket frog")
print(paste("# cricket =",length(index.cricket)))
index.narrowmouth <- which(allfrog$Species=="Narrowmouth toad")
print(paste("# narrowmouth =",length(index.narrowmouth)))
print(paste("frog records =",length(allfrog$Species)))
count.frogs = length(index.green) + length(index.barking)+ length(index.mole)+ length(index.leopard) +
length(index.fowlers)+ length(index.gray)+ length(index.cricket)+ length(index.narrowmouth)
print(paste("frog species records =",count.frogs))
#chemicals
unique.chemicals <- unique(allfrog$Chemical)
for(chemical in unique.chemicals){print(chemical)}
index.atrazine <- which(allfrog$Chemical=="Atrazine")
print(paste("# atrazine =",length(index.atrazine)))
index.fipronil <- which(allfrog$Chemical=="Fipronil")
print(paste("# fipronil =",length(index.fipronil)))
index.pendimethalin <- which(allfrog$Chemical=="Pendimethalin")
print(paste("# pendimethalin =",length(index.pendimethalin)))
index.triadimefon <- which(allfrog$Chemical=="Triadimefon")
print(paste("# triadimefon =",length(index.triadimefon)))
index.imidacloprid <- which(allfrog$Chemical=="Imidacloprid")
print(paste("# imidacloprid =",length(index.imidacloprid)))
Nchemicals = length(index.atrazine)+length(index.fipronil)+length(index.pendimethalin)+length(index.triadimefon)+length(index.imidacloprid)
print(paste("# chemicals =",Nchemicals))
#metabolites
index.sulfone <- which(allfrog$Chemical=="Fipronil-Sulfone")
print(paste("# sulfone =",length(index.sulfone)))
index.triadimenol <- which(allfrog$Chemical=="Triadimenol")
print(paste("# triadimenol =",length(index.triadimenol)))
index.deisopropyl <- which(allfrog$Chemical=="Deisopropyl Atrazine")
print(paste("# deisopropyl =",length(index.deisopropyl)))
index.desethyl <- which(allfrog$Chemical=="Desethyl Atrazine")
print(paste("# desethyl =",length(index.desethyl)))
Nmetabolites=length(index.sulfone)+length(index.triadimenol)+length(index.deisopropyl)+length(index.desethyl)
print(paste("# metabolites =",Nmetabolites))
#totals
index.totalatrazine <- which(allfrog$Chemical=="Total Atrazine")
print(paste("# total atrazine =",length(index.totalatrazine)))
index.totaltriadimefon <- which(allfrog$Chemical=="Total Triadimefon")
print(paste("# total triadimefon=",length(index.totaltriadimefon)))
index.totalfipronil <- which(allfrog$Chemical=="Total Fipronil")
print(paste("# total fipronil=",length(index.totalfipronil)))
Ntotals = length(index.totalatrazine)+length(index.totaltriadimefon)+length(index.totalfipronil)
print(paste("# totals =",Ntotals))
Ntotaltotal = Nchemicals + Nmetabolites+Ntotals
print(paste("# total chemical entries =",Ntotaltotal))
print(paste("frog species records =",count.frogs))
#instruments
unique.instruments <- unique(allfrog$Instrument)
for(instrument in unique.instruments){print(instrument)}
index.gcms <- which(allfrog$Instrument=="GCMS")
index.lcms <- which(allfrog$Instrument=="LCMS")
#applications
unique.applications <- unique(allfrog$Application)
for(application in unique.applications){print(application)}
index.soil <- which(allfrog$Application=="Soil")
index.overspray <- which(allfrog$Application=="Overspray")
#construct some factor fields as labels
attach(allfrog)
allfrog$ChemLabel <- paste("Log",allfrog$logKow,allfrog$Chemical,allfrog$Application,allfrog$Instrument)
allfrog$ChemLabel <- as.factor(allfrog$ChemLabel)
unique(paste(Chemical,Application,Instrument))
##############################
#basic histograms and test for normality
allsoil <- allfrog[index.soil,]
dim(allsoil)
#allsoil.lcms <- allsoil[which(allsoil$Instrument=="LCMS"),]
#allsoil.gcms <- allsoil[which(allsoil$Instrument=="GCMS"),]
#View(allsoil)
#View(allsoil.lcms)
#View(allsoil.gcms)
alloverspray <- allfrog[index.overspray,]
dim(alloverspray)
#View(alloverspray)
unique(alloverspray$Species)
index.allsoil.overspray <- which(allfrog$Species==unique(alloverspray$Species))
allsoil.overspray <- allsoil[index.allsoil.overspray,]
dim(allsoil.overspray)
#View(alloverspray)
#alloverspray.lcms <- alloverspray[which(alloverspray$Instrument=="LCMS"),]
#alloverspray.gcms <- alloverspray[which(alloverspray$Instrument=="GCMS"),]
#View(alloverspray)
## lump triademefons and fipronils and atrazines (tba)
## barkers and greens
#ignore frogs as a factor for distribution fitting
##LCMS
pdf(paste(frog_out,"hist_app_overspray.pdf",sep=""),width=11,height=8)
par(mfrow=c(2,2))
for(chemical in unique.chemicals){
chem.soil <- allsoil.overspray$TissueConc[allsoil.overspray$Chemical==chemical]
chem.overspray <- alloverspray$TissueConc[alloverspray$Chemical==chemical]
this.instrument <- unique(allsoil.overspray$Instrument[allsoil.overspray$Chemical==chemical])
#report out sample size
print(paste(chemical,this.instrument, "soil samples = ", length(chem.soil)," overspray samples = ", length(chem.overspray)))
if(length(chem.soil)>0 && length(chem.overspray)>0){
histmin <- min(c(chem.soil,chem.overspray),na.rm=TRUE)
histmax <- max(c(chem.soil,chem.overspray),na.rm=TRUE)
t.p <- round(t.test(chem.soil,chem.overspray)$p.value,digits=5)
hist(chem.soil,main=paste(this.instrument,chemical,"p=",t.p),xlab="Soil Application: Tissue Concentration",col="blue",xlim=c(histmin,histmax))
hist(chem.overspray,main=paste(this.instrument,chemical,"p=",t.p),xlab="Overspray Application: Tissue Concentration",col="red",xlim=c(histmin,histmax))
}
}
dev.off()
#lcms boxplots for barkers and greens that compare soil versus overspray
# for imidacloprid, total atrazine, total triadimefon, total fipronil, and pendimethalin
index.goodchems = c(index.imidacloprid,index.totalatrazine,index.totaltriadimefon,index.totalfipronil,index.pendimethalin)
spray.boxplot <- allfrog[index.goodchems,]
spray.boxplot <- spray.boxplot[spray.boxplot$Instrument=="LCMS",]
View(spray.boxplot[spray.boxplot$Species=="Barking treefrog"|spray.boxplot$Species=="Green treefrog",])
spray.boxplot <- spray.boxplot[spray.boxplot$Species=="Barking treefrog"|spray.boxplot$Species=="Green treefrog",]
dim(spray.boxplot)
View(spray.boxplot)
colnames(spray.boxplot)
spray.boxplot[which(spray.boxplot$Chemical=="Total Atrazine"),3] = "Atrazine"
spray.boxplot[which(spray.boxplot$Chemical=="Total Triadimefon"),3] = "Triadimefon"
spray.boxplot[which(spray.boxplot$Chemical=="Total Fipronil"),3] = "Fipronil"
# Multiple plot function
#
# ggplot objects can be passed in ..., or to plotlist (as a list of ggplot objects)
# - cols: Number of columns in layout
# - layout: A matrix specifying the layout. If present, 'cols' is ignored.
#
# If the layout is something like matrix(c(1,2,3,3), nrow=2, byrow=TRUE),
# then plot 1 will go in the upper left, 2 will go in the upper right, and
# 3 will go all the way across the bottom.
#
multiplot <- function(..., plotlist=NULL, file, cols=1, layout=NULL) {
require(grid)
# Make a list from the ... arguments and plotlist
plots <- c(list(...), plotlist)
numPlots = length(plots)
# If layout is NULL, then use 'cols' to determine layout
if (is.null(layout)) {
# Make the panel
# ncol: Number of columns of plots
# nrow: Number of rows needed, calculated from # of cols
layout <- matrix(seq(1, cols * ceiling(numPlots/cols)),
ncol = cols, nrow = ceiling(numPlots/cols))
}
if (numPlots==1) {
print(plots[[1]])
} else {
# Set up the page
grid.newpage()
pushViewport(viewport(layout = grid.layout(nrow(layout), ncol(layout))))
# Make each plot, in the correct location
for (i in 1:numPlots) {
# Get the i,j matrix positions of the regions that contain this subplot
matchidx <- as.data.frame(which(layout == i, arr.ind = TRUE))
print(plots[[i]], vp = viewport(layout.pos.row = matchidx$row,
layout.pos.col = matchidx$col))
}
}
}
dim(spray.boxplot)
View(spray.boxplot)
spray.boxplot[11:15,11] <- NA
pdf(paste(frog_out,"boxplot_soil_spray_bcf.pdf",sep=""),width=8.5,height=11)
spray.barkingtreefrog <- na.omit(spray.boxplot[spray.boxplot=="Barking treefrog",])
spray.factors <- reorder(spray.barkingtreefrog$Chemical, spray.barkingtreefrog$logKow)
p1 <- qplot(spray.factors, BCF, fill=factor(Application), data=spray.barkingtreefrog,
geom="boxplot",xlab="",ylab="Barking treefrog BCF")+annotate("text", x=5, y=3.3, label="A")+
annotate("text", x=1, y=-0.25, label="***")+annotate("text", x=2, y=-0.25, label="*")+
annotate("text", x=3, y=-0.25, label="**")+annotate("text", x=4, y=-0.25, label="***")+
annotate("text", x=5, y=-0.25, label="***")+
theme_bw() +scale_fill_grey(start=0.5, end=1) + labs(fill="Application")
spray.greentreefrog <- na.omit(spray.boxplot[spray.boxplot=="Green treefrog",])
p2 <- qplot(reorder(Chemical,logKow), BCF, fill=factor(Application), data=spray.greentreefrog,
geom="boxplot",xlab="Pesticide",ylab="Green treefrog BCF")+annotate("text", x=4, y=1.2, label="B")+
annotate("text", x=1, y=-0.25, label="***")+annotate("text", x=2, y=-0.25, label="**")+
annotate("text", x=3, y=-0.25, label="**")+annotate("text", x=4, y=-0.25, label="***")+
theme_bw()+scale_fill_grey(start=0.5, end=1) + labs(fill="Application")
multiplot(p1, p2)
dev.off()
pdf(paste(frog_out,"boxplot_soil_spray_tissueconc.pdf",sep=""),width=8.5,height=11)
spray.barkingtreefrog <- na.omit(spray.boxplot[spray.boxplot=="Barking treefrog",])
spray.factors <- reorder(spray.barkingtreefrog$Chemical, spray.barkingtreefrog$logKow)
p1 <- qplot(spray.factors, TissueConc, fill=factor(Application), data=spray.barkingtreefrog,
geom="boxplot",xlab="",ylab="Barking treefrog Tissue Concentration (ppm)")+annotate("text", x=5, y=17, label="A")+
annotate("text", x=1, y=-1.25, label="***")+annotate("text", x=2, y=-1.25, label="*")+
annotate("text", x=3, y=-1.25, label="**")+annotate("text", x=4, y=-1.25, label="***")+
annotate("text", x=5, y=-1.25, label="***")+
theme_bw() +scale_fill_grey(start=0.5, end=1) + labs(fill="Application")
spray.greentreefrog <- na.omit(spray.boxplot[spray.boxplot=="Green treefrog",])
p2 <- qplot(reorder(Chemical,logKow), TissueConc, fill=factor(Application), data=spray.greentreefrog,
geom="boxplot",xlab="Pesticide",ylab="Green treefrog Tissue Concentration (ppm)")+annotate("text", x=4, y=21, label="B")+
annotate("text", x=1, y=-1.25, label="***")+annotate("text", x=2, y=-1.25, label="**")+
annotate("text", x=3, y=-1.25, label="**")+annotate("text", x=4, y=-1.25, label="***")+
theme_bw()+scale_fill_grey(start=0.5, end=1) + labs(fill="Application")
multiplot(p1, p2)
dev.off()
pdf(paste(frog_out,"barchart_soil_spray.pdf",sep=""),width=8.5,height=11)
#create a data frame with averages and standard deviations
bt <- spray.boxplot[spray.boxplot=="Barking treefrog",]
bcf.avg<-ddply(bt, c("Chemical", "Application"), function(df)
return(c(bcf.avg=mean(df$BCF), bcf.sd=sd(df$BCF),bcf.logKow=mean(df$logKow))))
#create the barplot component
dodge <- position_dodge(width=0.9)
avg.plot<-qplot(reorder(Chemical,bcf.logKow), bcf.avg, fill=factor(Application),
data=bcf.avg, xlab="",ylab="Barking treefrog BCF",geom="bar", position="dodge")
#add error bars
p1 <- avg.plot+geom_errorbar(aes(ymax=bcf.avg+bcf.sd, ymin=bcf.avg-bcf.sd),position="dodge")+
annotate("text", x=5, y=3.3, label="A")+theme_bw()+ labs(fill="Application")
gt <- spray.boxplot[spray.boxplot=="Green treefrog",]
bcf.avg<-ddply(gt, c("Chemical", "Application"), function(df)
return(c(bcf.avg=mean(df$BCF), bcf.sd=sd(df$BCF),bcf.logKow=mean(df$logKow))))
bcf.avg[5,3]=NA
bcf.avg[5,4]=NA
#create the barplot component
dodge <- position_dodge(width=0.9)
avg.plot<-qplot(reorder(Chemical,bcf.logKow), bcf.avg, fill=factor(Application),
data=bcf.avg, xlab="Pesticide",ylab="Green treefrog BCF",geom="bar", position="dodge")
#add error bars
p2 <- avg.plot+geom_errorbar(aes(ymax=bcf.avg+bcf.sd, ymin=bcf.avg-bcf.sd), position="dodge")+
annotate("text", x=5, y=1.2, label="B")+theme_bw()+ labs(fill="Application")
multiplot(p1, p2)
dev.off()
# ##GCMS
# pdf(paste(frog_out,"hist_app_gcms.pdf",sep=""),width=11,height=8)
# par(mfrow=c(2,2))
# for(chemical in unique.chemicals){
# chem.soil <- allsoil.gcms$TissueConc[allsoil$Chemical==chemical]
# chem.overspray <- alloverspray.gcms$TissueConc[alloverspray$Chemical==chemical]
# #report out sample size
# print(paste(chemical, "gcms ","soil samples = ", length(chem.soil)," overspray samples = ", length(chem.overspray)))
# if(length(chem.soil)>0 && length(chem.overspray)>0){
# histmin <- min(c(chem.soil,chem.overspray),na.rm=TRUE)
# histmax <- max(c(chem.soil,chem.overspray),na.rm=TRUE)
# t.p <- round(t.test(chem.soil,chem.overspray)$p.value,digits=5)
# hist(chem.soil,main=paste("GCMS:",chemical,"p=",t.p),xlab="Soil Application: Tissue Concentration",col="blue",xlim=c(histmin,histmax))
# hist(chem.overspray,main=paste("GCMS:",chemical,"p=",t.p),xlab="Overspray Application: Tissue Concentration",col="red",xlim=c(histmin,histmax))
# }
# }
# dev.off()
#################
#plot means
AppLabels <- c("Imidacloprod ()","Fipronil (1.43 mg)","Triadimefon (3.57 mg)","Pendimethalin (25.14 mg)","Atrazine (29.34 mg)")
pdf(paste(frog_out,"soil_means_w_CIs_AllChemicals.pdf",sep=""),width=11,height=8)
par(mfrow=c(1,1))
# soil concentrations
plotmeans(log(allfrog$SoilConc)~allfrog$Chemical+allfrog$Application+allfrog$Instrument,xlab="Application Rate (mg/cm^2)", xaxt="n",
ylab="ln(SoilConc)", main="Soil Concentrations for All Aquaria: Mean Plot with 95% CI",
barwidth=2,col="dark green")
axis(side=1,at=c(1,2,3,4),labels=AppLabels[1:4])
plotmeans(log(allfrog$SoilConc)~allfrog$ChemLabel,xlab="Chemical", ylab="ln(SoilConc)",
main="All Species: Mean Plot with 95% CI",barwidth=2,col="dark green")
dev.off()
plotmeans(log(permeability)~ChemLabel,xlab="Chemical", ylab="ln(Dermal Soil-Frog Transfer Coefficient)", main="All Species: Mean Plot with 95% CI",barwidth=2,col="dark green")
plotmeans(permeability~ChemLabel,xlab="Chemical", ylab="Dermal Soil-Frog Transfer Coefficient", main="All Species: Mean Plot with 95% CI",barwidth=2,col="dark green")
## plotmeans for 5 main chemicals- bcf
pdf(paste(frog_out,"soil_bcf_chemicals_lcms.pdf",sep=""),width=11,height=8)
# index.somechemicals <- c(index.imidacloprid,index.totalatrazine,index.totaltriadimefon,index.totalfipronil,index.pendimethalin)
# 1,5,2,4,3
index.somechemicals <- c(index.imidacloprid,index.pendimethalin,index.totalatrazine,index.totalfipronil,index.totaltriadimefon)
index.soil5chem <- intersect(index.somechemicals,index.soil)
index.soil5chem.lcms <- intersect(index.soil5chem,index.lcms)
#View(allfrog[index.soil5chemlcms,])
KowLabels <- c("Imidacloprod \n Log Kow = 0.57","Atrazine \n Log Kow = 2.5","Triadimefon \n Log Kow = 3.11",
"Fipronil \n Log Kow = 4","Pendimethalin \n Log Kow = 5.18")
plotmeans(allfrog[index.soil5chem.lcms,]$BCF~allfrog[index.soil5chem.lcms,]$ChemLabel,xlab="Chemical", xaxt="n",
ylab="Soil-Frog BCF", main="All Species: Mean Plot with 95% CI",barwidth=2,col="dark green")
axis(side=1,at=c(1,2,3,4,5),labels=KowLabels[1:5])
dev.off()
## plotmeans for 5 main chemicals- ln(soil-frog dermal transfer coefficient)
pdf(paste(frog_out,"soil_dermaltransfer_chemicals_lcms.pdf",sep=""),width=11,height=8)
index.somechemicals <- c(index.imidacloprid,index.totalatrazine,index.totaltriadimefon,index.totalfipronil,index.pendimethalin)
index.soil5chem <- intersect(index.somechemicals,index.soil)
index.soil5chem.lcms <- intersect(index.soil5chem,index.lcms)
#View(allfrog[index.soil5chemlcms,])
KowLabels <- c("Imidacloprod \n Log Kow = 0.57","Atrazine \n Log Kow = 2.5","Triadimefon \n Log Kow = 3.11",
"Fipronil \n Log Kow = 4","Pendimethalin \n Log Kow = 5.18")
plotmeans(log(allfrog[index.soil5chem.lcms,]$AppFactor)~allfrog[index.soil5chem.lcms,]$ChemLabel,xlab="Chemical",xaxt="n",
ylab="ln(Soil-Frog Transfer Coefficient)", main="All Species: Mean Plot with 95% CI",barwidth=2,col="dark green")
axis(side=1,at=c(1,2,3,4,5),labels=KowLabels[1:5])
dev.off()
## plotmeans for 8 species- ln(soil-frog dermal transfer coefficient)
pdf(paste(frog_out,"soil_dermaltransfer_species_lcms.pdf",sep=""),width=14,height=8)
index.somechemicals <- c(index.imidacloprid,index.totalatrazine,index.totaltriadimefon,index.totalfipronil,index.pendimethalin)
index.soil5chem <- intersect(index.somechemicals,index.soil)
index.soil5chem.lcms <- intersect(index.soil5chem,index.lcms)
#View(allfrog[index.soil5chemlcms,])
KowLabels <- c("Imidacloprod \n Log Kow = 0.57","Atrazine \n Log Kow = 2.5","Triadimefon \n Log Kow = 3.11",
"Fipronil \n Log Kow = 4","Pendimethalin \n Log Kow = 5.18")
plotmeans(log(allfrog[index.soil5chem.lcms,]$AppFactor)~allfrog[index.soil5chem.lcms,]$Species,xlab="Species",
ylab="ln(Soil-Frog Transfer Coefficient)", main="All Species: Mean Plot with 95% CI",barwidth=2,col="dark green")
#axis(side=1,at=c(1,2,3,4,5),labels=KowLabels[1:5])
dev.off()
pdf(paste(frog_root,"means_w_CIs_AllSpecies.pdf",sep=""),width=11,height=8)
par(mfrow=c(1,1))
plotmeans(log(SoilConc)~Species,xlab="Species", ylab="ln(SoilConc)", main="All Chemicals: Mean Plot with 95% CI",barwidth=2,col="dark green")
plotmeans(log(permeability)~Species,xlab="Species", ylab="ln(Dermal Soil-Frog Transfer Coefficient)", main="All Chemicals: Mean Plot with 95% CI",barwidth=2,col="dark green")
plotmeans(permeability~Species,xlab="Species", ylab="Dermal Soil-Frog Transfer Coefficient", main="All Chemicals: Mean Plot with 95% CI",barwidth=2,col="dark green")
dev.off()
pdf(paste(frog_root,"means_w_CIs_IndividualSpecies.pdf",sep=""),width=8,height=10)
par(mfrow=c(2,1))
plotmeans(log(permeability[index.barking])~ChemLabel[index.barking],xlab="Chemical", ylab="ln(Dermal Soil-Frog Transfer Coefficient)", main="Barking treefrog: Mean Plot with 95% CI",ylim=c(-6.7,0),barwidth=2,col="dark green")
plotmeans(log(permeability[index.fowlers])~ChemLabel[index.fowlers],xlab="Chemical", ylab="ln(Dermal Soil-Frog Transfer Coefficient)", main="Fowlers toad: Mean Plot with 95% CI",ylim=c(-6.7,0),barwidth=2,col="dark green")
plotmeans(log(permeability[index.gray])~ChemLabel[index.gray],xlab="Chemical", ylab="ln(Dermal Soil-Frog Transfer Coefficient)", main="Gray treefrog: Mean Plot with 95% CI",ylim=c(-6.7,0),barwidth=2,col="dark green")
plotmeans(log(permeability[index.green])~ChemLabel[index.green],xlab="Chemical", ylab="ln(Dermal Soil-Frog Transfer Coefficient)", main="Green treefrog: Mean Plot with 95% CI",ylim=c(-6.7,0),barwidth=2,col="dark green")
plotmeans(log(permeability[index.leopard])~ChemLabel[index.leopard],xlab="Chemical", ylab="ln(Dermal Soil-Frog Transfer Coefficient)", main="Leopard frog: Mean Plot with 95% CI",ylim=c(-6.7,0),barwidth=2,col="dark green")
plotmeans(log(permeability[index.mole])~ChemLabel[index.mole],xlab="Chemical", ylab="ln(Dermal Soil-Frog Transfer Coefficient)", main="Mole salamander: Mean Plot with 95% CI",ylim=c(-6.7,0),barwidth=2,col="dark green")
dev.off()
pdf(paste(frog_root,"means_w_CIs_IndividualChemicals.pdf",sep=""),width=8,height=10)
par(mfrow=c(2,1))
plotmeans(log(permeability[index.atrazine])~Species[index.atrazine],xlab="Species", ylab="ln(Dermal Soil-Frog Transfer Coefficient)", main="Atrazine: Mean Plot with 95% CI",ylim=c(-6.7,0),barwidth=2,col="dark green")
plotmeans(log(permeability[index.fipronil])~Species[index.fipronil],xlab="Species", ylab="ln(Dermal Soil-Frog Transfer Coefficient)", main="Fipronil: Mean Plot with 95% CI",ylim=c(-6.7,0),barwidth=2,col="dark green")
plotmeans(log(permeability[index.pendimethalin])~Species[index.pendimethalin],xlab="Species", ylab="ln(Dermal Soil-Frog Transfer Coefficient)", main="Pendimethalin: Mean Plot with 95% CI",ylim=c(-6.7,0),barwidth=2,col="dark green")
plotmeans(log(permeability[index.triadimefon])~Species[index.triadimefon],xlab="Species", ylab="ln(Dermal Soil-Frog Transfer Coefficient)", main="Triadimefon: Mean Plot with 95% CI",ylim=c(-6.7,0),barwidth=2,col="dark green")
dev.off()
pdf(paste(frog_root,"checking_predictions.pdf",sep=""),width=11,height=8)
par(mfrow=c(1,1))
plot(logKow,permeability,col="dark red",xlim=c(2.3,5.7))
points(logKow+0.1,predict(lm.nospecies))
points(logKow+0.2,predict(ancova.species.1))
points(logKow+0.3,predict(ancova.species.2))
coplot(permeability~Species|molmass_gmol+Solat20C_mgL)
coplot(permeability~Species|Chemical+molmass_gmol)
coplot(permeability~Chemical|Species+molmass_gmol)
dev.off()
pdf(paste(frog_root,"allspecies_boxplots.pdf",sep=""),width=11,height=8)
par(mfrow=c(2,2))
boxplot(permeability~logKow,data=mlrfrog, xlab="Log Kow", ylab="Dermal Soil-Frog Transfer Coefficient",col="firebrick2")
boxplot(permeability~Solat20C_mgL,data=mlrfrog, xlab="Solubility", ylab="Dermal Soil-Frog Transfer Coefficient",col="firebrick2")
boxplot(permeability~molmass_gmol,data=mlrfrog, xlab="Molecular Mass", ylab="Dermal Soil-Frog Transfer Coefficient",col="firebrick2")
boxplot(permeability~Koc_gmL,data=mlrfrog, xlab="Koc", ylab="Dermal Soil-Frog Transfer Coefficient",col="firebrick2")
par(mfrow=c(2,2))
boxplot(Tissue~logKow,data=mlrfrog, xlab="Log Kow", ylab="Tissue Concentration",col="firebrick2")
boxplot(Tissue~Solat20C_mgL,data=mlrfrog, xlab="Solubility", ylab="Tissue Concentration",col="firebrick2")
boxplot(Tissue~molmass_gmol,data=mlrfrog, xlab="Molecular Mass", ylab="Tissue Concentration",col="firebrick2")
boxplot(Tissue~Koc_gmL,data=mlrfrog, xlab="Koc", ylab="Tissue Concentration",col="firebrick2")
par(mfrow=c(2,2))
boxplot(SoilConc~logKow,data=mlrfrog, xlab="Log Kow", ylab="Soil Concentration",col="firebrick2")
boxplot(SoilConc~Solat20C_mgL,data=mlrfrog, xlab="Solubility", ylab="Soil Concentration",col="firebrick2")
boxplot(SoilConc~molmass_gmol,data=mlrfrog, xlab="Molecular Mass", ylab="Soil Concentration",col="firebrick2")
boxplot(SoilConc~Koc_gmL,data=mlrfrog, xlab="Koc", ylab="Soil Concentration",col="firebrick2")
par(mfrow=c(2,2))
boxplot(AppFactor~logKow,data=mlrfrog, xlab="Log Kow", ylab="Uptake Proportion",col="firebrick2")
boxplot(AppFactor~Solat20C_mgL,data=mlrfrog, xlab="Solubility", ylab="Uptake Proportion",col="firebrick2")
boxplot(AppFactor~molmass_gmol,data=mlrfrog, xlab="Molecular Mass", ylab="Uptake Proportion",col="firebrick2")
boxplot(AppFactor~Koc_gmL,data=mlrfrog, xlab="Koc", ylab="Uptake Proportion",col="firebrick2")
dev.off() |
cf17cf10361200e8b2b659ace9cbca807265a8a9 | aaac559889d1968ee128d67460bcf4a4272e39fb | /figure/Plot 4.R | 49c102db86a6def860b0df9a4bbb78ed37d79e92 | [] | no_license | Omar-Ma/ExData_Plotting1 | 7a6f9cd928afe2f42ac50f6d0e9edc5e680b99a7 | 4bfad1eb25ea314250548c63f399a7424c03ef17 | refs/heads/master | 2021-01-09T07:02:54.416243 | 2014-10-12T23:04:21 | 2014-10-12T23:04:21 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,086 | r | Plot 4.R | da<-read.table("household_power_consumption.txt",sep=";",header=T)
da$Date1<-as.Date(da$Date,"%d/%m/%Y")
da1<-subset(da,Date1=="2007-02-01"|Date1=="2007-02-02")
da1$DateAndTime<-paste(da1$Date,da1$Time)
da1$DateAndTime<-strptime(da1$DateAndTime,"%d/%m/%Y %H:%M:%OS")
par(mfrow = c(2, 2))
with(da1, {plot(DateAndTime,Global_active_power,type="l",xlab="",ylab="Global Active Power (kilowatts)")
plot(DateAndTime, Voltage, type="l",xlab="daytime",ylab="Voltage")
{
with(da1, plot(DateAndTime, Sub_metering_1, xlab="",ylab = "Energy sub metering", type = "n"))
with(subset(da1), points(DateAndTime, Sub_metering_1, col = "black",type="l"))
with(subset(da1), points(DateAndTime, Sub_metering_2, col = "red",type="l"))
with(subset(da1), points(DateAndTime, Sub_metering_3, col = "blue",type="l"))
legend("topright",lty=1,col = c("black", "red","blue"), legend = c("Sub_metering_1", "Sub_metering_2","Sub_metering_1"))
}
plot(DateAndTime,Global_reactive_power, type="l",xlab="daytime",ylab="Global_reactive_power")
})
dev.copy(png,file="plot4.png")
dev.off() |
8da4b1db89fe126fec733c64406aab104c8f41da | e9666e75d8b6b24e9982848c24f2b3fa4bf32c1d | /NeuralNet/oneA.R | d320f05b784fd56551befcbfac30d98092ab7f61 | [] | no_license | mesfind/CODATA-RDA | 8b084e4bea330e0ae86d2aa28ae81a36671efc67 | b0e0c03b5e5b68b90bfe7a9b53d266ed3d7074f8 | refs/heads/master | 2020-03-26T13:33:02.554916 | 2018-08-24T09:10:35 | 2018-08-24T09:10:35 | 144,773,757 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,313 | r | oneA.R | ALPHA=0.05 # learning rate parameter
nodes=c(5,8,5,1) # 5 inputs, 2 hidden layers, with 8 and 5 nodes , 1 output
nlayers=length(nodes) -1 # 3 sets of weights
net=list() # set up empty list
# net[[ j ]] holds weight matrix feeding nodes of layer j+1 from nodes in layer j
# make weights and fill with random numbers
for(j in 1:nlayers) net[[ j ]] <- matrix(runif(nodes[ j ]*nodes[ j +1 ]),nodes[j+1],nodes[j])
netsays <- function(x) { # Returns net output for some input vector x
for(j in 1:nlayers) x <- 1/(1+exp(-net[[ j ]] %*% x))
return(x)
}
backprop <- function(layer,n1,n2,factor){ # recursive function used for back-propagation
if(layer>1) for(n in 1:nodes[layer-1])
backprop(layer-1,n2,n,factor*net[[layer]][n1,n2]*r[[iayer]][n2]*(1-r[[layer]][n2]))
net[[layer]][n1,n2] <<- net[[layer]][n1,n2] - ALPHA*factor*r[[layer]][n2]
}
netlearns <- function(x,truth) { # like netsays but changes weights
r <<- list() # to contain the outputs of all nodes in all layers
r[[1]] <<- x # the input layer
for(layer in 1:nlayers) r[[layer+1]] <<- as.vector(1/(1+exp(-net[[layer]] %*% r[[layer]])))
u <- r[[nlayers+1]] # final answer, for convenience
for(n in 1:nodes[nlayers]) backprop(nlayers,1,n,(u-truth)*u*(1-u))
}
sample <- read.table("data/Sample3", header=FALSE)
# Random sampling
samplesize = 0.80 * nrow(sample)
set.seed(1234)
index = sample( seq_len ( nrow ( sample ) ), size = samplesize )
## Scale data for neural network
max = apply(sample , 2 , max)
min = apply(sample,2 , min)
scaled = as.data.frame(scale( sample, center = min, scale = max - min))
# creating training and test set
trainNN = scaled[index , ]
testNN = scaled[-index , ]
Nsample <- dim(trainNN)[1]
print(head(trainNN))
for (i in 1:Nsample) {print(trainNN[i,1]); print(trainNN[i,-1])}
plot(c(0,1),c(0,1))
v <- netsays(t(trainNN[,-1]))
p <- trainNN[order(v),1]
nc <- sum(trainNN[,1]==0)
nd <- Nsample-nc
nnc <- nc
nnd <- nd
for (i in 1:length(p)) {if(p[i]==1) {nd <- nd-1} else {nc <- nc-1}
points(nc/nnc,nd/nnd,pch='.') }
vc <- rep(0,nnc)
vd <- rep(0,nnd)
nc <- 0
nd <- 0
for (i in 1:Nsample){
itype <- trainNN[i,1]
isay <- netsays(as.numeric(trainNN[i,-1]))
if(itype==0) {nc <- nc+1;vc[nc] <- isay} else {nd<- nd+1;vd[nd] <- isay} }
hc <- hist(vc,breaks=seq(0,1,.05))
hd <- hist(vd,breaks=seq(0,1,.05)) |
7728ee4ce614af634cef2f52ebfdf1b1898a74d4 | c7c92c242d568ceb6a064bb53f85dbb3326ee0ac | /gettingAndCleaningData/run_analysis.R | bbf217c1e0577f4250a10908adf5170ccae3256f | [] | no_license | gregord64/datasciencecoursera | d2969c865630282abe9d3afe21cadbbcb6bb61d8 | e9279f9b7bea26969b4ce6a0125d758d7faae92f | refs/heads/master | 2021-01-25T07:07:44.713078 | 2015-05-17T22:55:23 | 2015-05-17T22:55:23 | 28,272,542 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,585 | r | run_analysis.R | ###########################################
## ##
## Set the working directory ##
## ##
###########################################
setwd("c:/R_Programming/gettingAndCleaningData")
###########################################
## ##
## Load required libraries ##
## ##
###########################################
library(downloader)
library(reshape2)
#####################################################
## ##
## Define the directory info ##
## for the courseProjectDataset. ##
## ##
#####################################################
datasetPath <- file.path("../gettingAndCleaningData" , "UCI HAR Dataset")
#####################################################
## ##
## ##
## Define the list of files we need ##
## fir the courseProjectDataset ##
## ##
## ##
#####################################################
datasetFiles<-list.files(datasetPath, recursive=TRUE)
datasetFiles
#####################################################
## ##
## ##
## First read in the test and training data ##
## Assign each of these sets an initial name ##
## ##
#####################################################
subject_test <- read.table(file.path(datasetPath, "test" , "subject_test.txt" ),header = FALSE)
y_test <- read.table(file.path(datasetPath, "test" , "y_test.txt" ),header = FALSE)
x_test <- read.table(file.path(datasetPath, "test" , "x_test.txt" ),header = FALSE)
subject_train <- read.table(file.path(datasetPath, "train" , "subject_train.txt" ),header = FALSE)
y_train <- read.table(file.path(datasetPath, "train" , "y_train.txt" ),header = FALSE)
x_train<- read.table(file.path(datasetPath, "train" , "x_train.txt" ),header = FALSE)
#####################################################
## ##
## ##
## Next add appropriate labels for variable names ##
## Assign each of these to the initial datasets ##
## that were just created ##
## ##
## This will also use the 'features.txt' ##
## to provide the appropriate labels ##
## ##
## ##
## Additional information on these labels ##
## can be found in the "features_info.txt" file ##
## ##
#####################################################
# Generic label for the Subject variable
names(subject_train) <- "subjectID"
names(subject_test) <- "subjectID"
# Descriptive labels for the features utilizing the features file
featureNames <- read.table(file.path(datasetPath, "features.txt"), header = FALSE )
names(x_train) <- featureNames$V2
names(x_test) <- featureNames$V2
# Generic label for the Activity label
names(y_train) <- "activity"
names(y_test) <- "activity"
#####################################################
## ##
## ##
## Now the datasets can be combined ##
## ##
## This will go through each set for train and test##
## and then it will combine train and test to ##
## create the single dataset ##
## ##
#####################################################
train <- cbind(subject_train, y_train, x_train)
test <- cbind(subject_test, y_test, x_test)
combined <- rbind(train, test)
#####################################################
## ##
## ##
## Now extract only the measurements on the ##
## mean and standard deviation for each measurement##
## ##
## ##
#####################################################
## Determine which columns contain "mean()" or "std()"
meanstdcols <- grepl("mean\\(\\)", names(combined)) |
grepl("std\\(\\)", names(combined))
## Keep the subjectID and activity columns
meanstdcols[1:2] <- TRUE
## Remove unnecessary columns
combined <- combined[, meanstdcols]
#####################################################
## ##
## ##
## Use descriptive activity names to name ##
## the activities in the data set ##
## ##
## This information is provided in the ##
## 'Activities.txt' file ##
## ##
#####################################################
## Convert the activity column from integer to factor
combined$activity <- factor(combined$activity, labels=c("Walking", "Walking Upstairs", "Walking Downstairs", "Sitting", "Standing", "Laying"))
#####################################################
## ##
## ##
## Create a second, independent tidy data ##
## set with the average of each variable for ##
## each activity and each subject. ##
#####################################################
## Clean up the data set
melted <- melt(combined, id=c("subjectID","activity"))
tidy <- dcast(melted, subjectID+activity ~ variable, mean)
## Write the tidy data set to a text file with row.names=FALSE as required
write.table(tidy, "courseProjectDataset.txt", row.names=FALSE)
## The file could also be written as a CSV if desired using the following
## This line was intentionally commented out
## write.csv(tidy, "courseProjectDataset.csv", row.names=FALSE)
|
98c53ce133dbcfba92a2a9998072c60572847ddd | 0ef6541cf6b05f241cdc2471280d31fdd7d73a06 | /Simulations/Drugs_Functions.R | 2e630e9d6412ac43c9b7e63d02e42c1e8bca5fb8 | [] | no_license | anthonycrane/MixedMarkov | eb3c73ab507d6449281da03fa2a5485d0a5bb316 | d15b4874dbfe9d6f1f41d1eb528954acec104c42 | refs/heads/master | 2020-12-28T23:05:08.921670 | 2014-12-12T19:48:43 | 2014-12-12T19:48:43 | 27,934,150 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,029 | r | Drugs_Functions.R | # File: Drugs_Functions.R
# Purpose: Defines convenience functions for use in the mHMM project.
AnySuccess.Z <- function(data, subject){
# Function: AnySuccess.Z(data, subject)
# Purpose: Identifies if there is a sequence of 14 consecutive 0's for a
# given subject in the data frame., for 'Z' variable.
# Input: data - A data frame
# subject - Subject id in data frame
# Output: '1' if the subjects had a 14 day non-using streak
# '0' otherwise
sequences <- rle(data$Z[data$Subject == subject])
sequences.nouse.length <- sequences$lengths[which(sequences$values == 0)]
n.success <- length(sequences.nouse.length[sequences.nouse.length >= 14])
return(ifelse(n.success > 0,1, 0))
}
AnySuccess.C <- function(data, subject){
# Function: AnySuccess.C(data, subject)
# Purpose: Identifies if there is a sequence of 14 consecutive 0's for a
# given subject in the data frame., for 'C' variable.
# Input: data - A data frame
# subject - Subject id in data frame
# Output: '1' if the subjects had a 14 day non-using streak
# '0' otherwise
sequences <- rle(data$C[data$Subject == subject])
sequences.nouse.length <- sequences$lengths[which(sequences$values == 0)]
n.success <- length(sequences.nouse.length[sequences.nouse.length >= 14])
return(ifelse(n.success > 0,1, 0))
}
AnySuccess.C.fit <- function(data, subject){
# Function: AnySuccess.C.fit(data, subject)
# Purpose: Identifies if there is a sequence of 14 consecutive 0's for a
# given subject in the data frame., for 'C.fit' variable.
# Input: data - A data frame
# subject - Subject id in data frame
# Output: '1' if the subjects had a 14 day non-using streak
# '0' otherwise
sequences <- rle(data$C.fit[data$Subject == subject])
sequences.nouse.length <- sequences$lengths[which(sequences$values == 0)]
n.success <- length(sequences.nouse.length[sequences.nouse.length >= 14])
return(ifelse(n.success > 0,1, 0))
}
invlogit <- function (x) {
# Function: invlogit(x)
# Purpose: Compute inverse logistic value of an adds.
# Input: An odds
# Output: A probability
return(exp(x)/(1+exp(x)))
}
visits.gen <- function(X.it, C.itm1, Beta, beta.0, beta.1, b.0, lambda.0, lambda.1) {
# Function: visits.gen(X.it, C.itm1, Beta, beta.0,beta.1, lambda.0, lambda.1)
# Purpose: Generate the response at time t for subject i, given covariates
# at time t and hidden state at time t - 1.
# Input: X.it - Treatment group time t
# C.itm1 - Hidden state at time t - 1
# Beta - Treatment effect of being in state 1 compared to state
# 0 (log OR)
# beta.0 - Intercept for being in state 1 compared to state 0
# (log odds of being in state 0, given in state 0 at
# previous time point and in control group)
# beta.1 - Log OR of being in state 1 given in state 1 at previous
# time point
# b.0 - random intercept for subject
# lambda.0 - Log odds of observing drug use while in state 0
# lambda.1 - Log OR of observing drug use in state 1 compared to
# state 0
# Output: A list of length 2 with first element being the state at time t
# and the second element being observed use at time t
p.1 <- invlogit(beta.0 + beta.1*C.itm1 + Beta*X.it + b.0)
C.it <- rbinom(n = 1, size = 1, p = p.1)
theta.it <- invlogit(lambda.0 + lambda.1*C.it)
Z.it <- rbinom(n = 1, size = 1, p = theta.it)
return(list(C.it,Z.it))
}
Data.gen <- function(X.i, nday, Beta, beta.0, beta.1, b.0, lambda.0, lambda.1) {
# Function: Data.gen(X.i, nday, Beta, beta.0, beta.1, lambda.0, lambda.1)
# Purpose: Given covatiates across all measurements for a subject, generate
# the response vector and hidden states for each measurement time.
# Input: X.i - nday x 3 matrix, first column is Subject, Second Column
# is Day, 3rd column is Treatment group
# nday - Number of days of observations
# Beta - Treatment effect of being in state 1 compared to state
# 0 (log OR)
# beta.0 - Intercept for being in state 1 compared to state 0
# (log odds of being in state 0, given in state 0 at
# previous time point and in control group)
# beta.1 - Log OR of being in state 1 given in state 1 at previous
# time point
# b.0 - random intercept for subject
# lambda.0 - Log odds of observing drug use while in state 0
# lambda.1 - Log OR of observing drug use in state 1 compared to
# state 0
# Output: nday x 5 matrix; columns are: Subj, Day, Treatment, C (state),
# Z (response)
#
# D.i is a nday x 5 matrix; columns are: Subj, Day, Treatment, C (state),
# Z (response)
D.i <- cbind(X.i, C = rep(NA, nday), Z = rep(NA, nday))
for (row in 1:nrow(D.i)) {
if (row == 1) {
X.it <- D.i[row, 3]
C.itm1 <- 1 # We assume all subjects were in state 1 prior to entry
V.gen <- visits.gen(X.it = X.it, C.itm1 = C.itm1, Beta = Beta,
beta.0 = beta.0, beta.1 = beta.1, b.0 = b.0,
lambda.0 = lambda.0, lambda.1 = lambda.1)
D.i[row, 4] <- V.gen[[1]]
D.i[row, 5] <- V.gen[[2]]
} else {
X.it <- D.i[row, 3]
C.itm1 <- D.i[row - 1, 4]
V.gen <- visits.gen(X.it = X.it, C.itm1 = C.itm1, Beta = Beta,
beta.0 = beta.0, beta.1 = beta.1, b.0 = b.0,
lambda.0 = lambda.0, lambda.1 = lambda.1)
D.i[row, 4] <- V.gen[[1]]
D.i[row, 5] <- V.gen[[2]]
}
}
return(D.i)
}
expected <- function(x){
(rowSums(x)%*%t(colSums(x)))/sum(x)
} |
35af788e519c897ef5b1cd114e95066caace51ef | e55da889645bfb77e2c8a5970e5cce365be678c6 | /Store_Distance_Pairs.R | 02b6be822cd51804c8237f9a750484e7ba745d7d | [] | no_license | jumorris2017/R-code-flatfiles | 60f71f65b9409935feae107152f22e0b371e023d | a652d49b3a96416369f590d86189192d2ca57ce2 | refs/heads/master | 2018-09-08T14:31:09.353232 | 2018-08-15T00:01:27 | 2018-08-15T00:01:27 | 110,997,721 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,380 | r | Store_Distance_Pairs.R | ##Calculating Store Distance to other Stores
##Findings pairs of close stores
##For Lisa/Megan request 1/9/18
#load libraries
library(data.table)
library(geosphere)
library(stringr)
library(splitstackshape)
#load data
dd <- fread("O:/CoOp/CoOp194_PROReportng&OM/Megan/Distances/ComplexLatLong.csv")
setcolorder(dd,c("STORE_NUM","LONGITUDE","LATITUDE"))
#remove stores 101 duplicates
dd <- dd[!duplicated(dd),]
#create distance matrix
dm <- distm(dd[,2:3])
dm <- as.data.table(dm)
#add stores column
stores <- dd[,STORE_NUM]
#cbind
ddm <- cbind(stores,dm)
#colnames
cnames <- c("stores",paste0("st",dd[,STORE_NUM]))
#setnames
setnames(ddm,cnames)
#write distance matrix
#write.csv(ddm,file="O:/CoOp/CoOp194_PROReportng&OM/Megan/Distances/distancematrix.csv")
#ddm <- fread("O:/CoOp/CoOp194_PROReportng&OM/Megan/Distances/distancematrix.csv")
ddm[, "V1" := NULL]; ddm[, "stores" := NULL]
ddm <- ddm[, lapply(.SD, function(x) round(x,0))]
stores <- colnames(ddm)
#delete anything larger than 100,000
ddm <- ddm[, lapply(.SD, function(x) ifelse(x>=100000,NA,x))]
#add padding of 0's for correct sorting
ddm <- ddm[, lapply(.SD, function(x) str_pad(x,5,pad="0"))]
#paste store number to distance
temp <- as.data.table(matrix(nrow=9393,ncol=9393))
for (i in 1:9393) {
col <- colnames(ddm)[i]
vec <- ddm[[col]]
temp[, i] <- paste(vec,stores,sep="-")
}
###"ddm" is a matrix of the distances###
#create a ranking the distances, and deleting them if they're over rank 26
sortmat <- as.data.table(matrix(nrow=9393,ncol=9393))
for (i in 1:9393){
col <- colnames(temp)[i]
vec <- temp[[col]]
vec <- sort(vec)
vec[27:length(vec)] <- NA
sortmat[,i] <- vec
}
##keep rows 1-26
sortmat2 <- sortmat[1:26,]
#transpose data.table
sortmat2 <- t(sortmat2)
sortmat2 <- as.data.table(sortmat2)
#create column names
colnames2 <- as.vector(c("store_num",paste0("st",str_pad(c(1:25),2,pad="0"))))
#setnames
setnames(sortmat2,colnames2)
#split distance from store numbers
sortmat3 <- as.data.table(matrix(nrow=9393,ncol=26))
for (i in seq_along(colnames(sortmat2))) {
sortmat3[,i] <- cSplit(sortmat2, i, "-")[,26]
}
colnames3 <- as.vector(c("store_dist",paste0("st",str_pad(c(1:25),2,pad="0"),"dist")))
#setnames
setnames(sortmat3,colnames3)
#delete store distance to self
sortmat3[, store_dist := NULL]
sortmat4 <- as.data.table(matrix(nrow=9393,ncol=26))
for (i in seq_along(colnames(sortmat2))) {
sortmat4[,i] <- cSplit(sortmat2, i, "-")[,27]
}
colnames4 <- as.vector(c("store_num",paste0("st",str_pad(c(1:25),2,pad="0"),"num")))
#setnames
setnames(sortmat4,colnames4)
#cbind stores to distances
final <- cbind(sortmat4,sortmat3)
#order columns
colordervec <- c("store_num",final[, sort(names(final)[2:51])])
setcolorder(final,colordervec)
#write final product
write.csv(final,file="O:/CoOp/CoOp194_PROReportng&OM/Megan/Distances/listof25closeststores.csv")
# #create a matrix with the distance-store cells ranked from closest to furthest away, keeping ony top 26
# sortmat <- as.data.table(matrix(nrow=9393,ncol=9393))
# for (i in 6000:9393){
# col <- colnames(temp)[i]
# vec <- temp[[col]]
# vec <- sort(vec)
# vec[27:length(vec)] <- NA
# sortmat[,i] <- vec
# }
# #get rid of all rankings greater than 26 (25 + 1 for self)
# rankmat <- rankmat[, lapply(.SD, function(x) ifelse(x>26,NA,x))]
# ##sort columns separately
# temp <- apply(temp,1,sort)
#
# #give top25 storename column headers
# top25 <- as.data.table(top25)
# #cbind
# top25 <- cbind(stores,top25)
# #setnames
# setnames(top25,cnames)
#
###"top25" is a ranking of the 25 closest stores###
# #pull in the store number for top 25 stores
# top25stores <- top25[, lapply(.SD, function(x) ifelse(x>1&x<=26,top25[,stores],NA)), .SDcols=colnames(top25)[2:ncol(top25)]]
# ###"top25stores" is a matrix of the store numbers of the 25 closest stores###
#
# # ##create a for loop that deletes all the distances for stores ranking >26
# # top25distances <- as.data.table(matrix(nrow=9393,ncol=9393))
# # #loop through cells. if within top 26 ranking, return the distance
# # #need the j+1 because the other data.tables have the "stores" column
# # for (i in 1:nrow(top25distances)) {
# # for (j in 1:ncol(top25distances)) {
# # if (!is.na(top25[i,j+1])) {
# # top25distances[i,j] <- ddm[i,j+1]
# # } else top25distances[i,j] <- NA
# # }
# # }
#
# ###"top25distances" is a matrix of the distances of the 25 closest stores###
#
# ##cbind the matrices together (without store number columns)
# top25nostorenum <- top25[, stores := NULL]; ddmnostorenum <- ddm[, stores := NULL]
# top25bind <- cbind(top25nostorenum,top25stores,ddmnostorenum)
# top25bind <- top25bind[, lapply(.SD, function(x) as.character(x))]
#
# ##paste together the distance and store number in one cell
# finalddm <- as.data.table(matrix(nrow=9393,ncol=9393))
# finalddm <- finalddm[, lapply(.SD, function(x) as.character(x))]
# for (i in 1:9393) {
# j <- i+9393
# k <- i+9393+9393
# finalddm[, i] <- paste(top25bind[,i,with=FALSE],top25bind[,j,with=FALSE],top25bind[,k,with=FALSE],sep="-")
# }
#
# ##sort columns separately
# test <- apply(finalddm,1,sort)
#
# ##keep only rows 2-26
# test2 <- test[2:26,]
#
# ##parse out the distance from the store number
#
# ##transpose the matrix so it's long format (cols=store number, distance 1, store 1, distance n, store n)
#
#
#
#
#
#
|
6dc1d30fc30bf853544b6bf5e5df5be55ff7f6c4 | eb6eb8dad4374c3a686229d0d0bdc1de7a8c3616 | /bostoncrimes.R | 045b253d37a02fa9b4a6c19482ba572e96980ee7 | [] | no_license | sonaljain2212/Boston_Crimes | 169b9f09234956c6052a1f201d2262ef48bbb58e | 4d6306f787ca5bf4149e0b2bf4370919fbf8f550 | refs/heads/master | 2021-02-09T04:28:07.504482 | 2020-03-01T23:35:19 | 2020-03-01T23:35:19 | 244,239,557 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,537 | r | bostoncrimes.R | #top 10 offense code group
crime <- crimetest %>% group_by(OFFENSE_CODE_GROUP) %>% summarise(count=n()) %>% arrange(desc(count))
crime <- crime[1:10,]
ggplot(crime[1:10,]) + geom_col(aes(x=OFFENSE_CODE_GROUP,y=count,fill=OFFENSE_CODE_GROUP))+coord_flip()+labs(title="Count of offense code group")
#hour polar coord
crimetest %>% ggplot() + geom_bar(aes(x=HOUR, fill=factor(HOUR)))+coord_polar()+labs(title="Crime frequency by hour")
#year plot
crimetest %>% filter(OFFENSE_CODE_GROUP=="Motor Vehicle Accident Response") %>% group_by(YEAR) %>% summarise(count=n(),na.rm=TRUE) %>% ggplot() + geom_col(aes(x=YEAR, y=count, fill=factor(YEAR)))+labs(title="Count of motor vehicle accidents each year")
#top 10 district with most frequent crime
crimedistrict <- crimetest %>% filter(DISTRICT !="External") %>% group_by(DISTRICT) %>% summarise(count=n(),na.rm=TRUE) %>% arrange(desc(count))
crimedistrict <- crimedistrict [1:10,]
crimedistrict %>% ggplot() + geom_col(aes(x=DISTRICT, y=count, fill=DISTRICT))+labs(title="top 10 district with most crimes")
#how to get Google maps
if(!requireNamespace("devtools")) install.packages("devtools")
devtools::install_github("dkahle/ggmap")
library("ggmap")
ggmap::register_google(key = "AIzaSyBBxmhUSNJxZ26EJ7avY6II6zCBHd3kHJE")
devtools::session_info()
datacrime <- crimetest %>% filter(DISTRICT !="External")
p <- qmap("Boston , BOS",zoom = 12) + geom_point(data = datacrime, mapping=aes(x= Long ,y= Lat, color=DISTRICT)) + coord_map()
print(p) |
7ea7ff59a257c76b68cdc908e9a35bf6c26769d3 | 800697f6e2ca0d4873742660bb0cee90b96c9d84 | /R/zzz.R | f7192a3bd5ec3504e27f21ab336175306967e41f | [] | no_license | aaronmberger-nwfsc/pacifichakemse-1 | 7b16c4971a8abaa8ff56216db21c603547fdf95a | bc617ac9036d67fda0c9560d8705773619f3a549 | refs/heads/master | 2023-08-29T00:10:24.431777 | 2021-11-02T09:06:33 | 2021-11-02T09:06:33 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,192 | r | zzz.R | globalVariables(c(
"ylimits",
"year",
"value",
"model",
"med",
"run",
"p5",
"p95",
"med.can",
"p5.can",
"p95.can",
"med.us",
"med.US",
"p5.us",
"p95.us",
"p5.US",
"p95.US",
"HCR",
"season",
"med.tot",
"p5.tot",
"p95.tot",
"scenario",
"indicator",
"AAV.plot",
"AAV.plotquant",
"AssessTac",
"Catch",
"Catch.plot",
"Catch.plotquant",
"F0.can",
"F0.us",
"Quota",
"Realized",
"SE.SSB",
"SSB",
"SSB.US",
"SSB.can",
"SSB.mid.can",
"SSB.mid.us",
"SSB.plot",
"SSB.plotquant",
"SSBtot",
"TAC",
"V",
"vtac.can",
"V.TAC",
"V.TAC.fa",
"V.TAC.sp",
"V.TAC.su",
"V.ca.plot",
"V.us.plot",
"amc",
"amc.US",
"amc.can",
"amc.us",
"ams",
"ams.US",
"ams.can",
"ams.us",
"area.1",
"area.2",
"assessment",
"avg",
"avg.fa",
"avg.sp",
"avg.su",
"ca",
"dev.off",
"df",
"exploitation",
"indicator",
"lower",
"ls.JMC",
"ls.save",
"margin",
"nruns",
"p25",
"p75",
"par",
"pcnt",
"png",
"prop",
"quota.plotquant",
"rep.values",
"seeds",
"space",
"ssb",
"tac",
"tac_historical",
"tac_realized",
"upper",
"us",
"vtac.can.seas",
"vtac.us",
"vtac.us.seas",
"year",
"year.future",
"years",
"ylimits",
"yr",
"can.prop",
"us.prop",
"Yr",
"Fleet",
"v_tac",
"v_tac_sp",
"v_tac_su",
"v_tac_fa",
"avg_sp",
"avg_su",
"avg_fa",
".",
"age",
"Can",
"US",
"0.5",
"country",
"x",
"Catch_space",
"pope.mul",
"R.save",
"SSB.weight",
"sel",
"row_sum",
"val",
".x",
"Virgin",
"Initial",
"SSB_SPRtgt",
"Fstd_SPRtgt",
"TotYield_SPRtgt",
"TotYield_Btgt",
"TotYield_MSY",
"Fishery",
"Obs",
"Iter",
"rsum",
"Exp",
"Calc_Q",
"N",
"Pearson",
"Sex",
"Seas",
"Bio_Pattern",
"BirthSeas",
"index",
"obs",
"se_log",
"err",
"fleet",
"flag",
"type",
"Value",
"Label",
"total",
"catch",
"d2$Catchobs",
"d2$survey",
"d2$survey_err",
"Year",
"Total",
"f_new",
"<<-",
"1",
"2",
"3",
"4",
"5",
"6",
"7",
"8",
"9",
"10",
"11",
"12",
"seas1",
"seas2",
"seas3",
"seas4",
"ss_model",
"tot",
"calc_tac_est_vs_real",
"df_lists_OM",
"hcr",
"month",
"om_output",
"quota",
"CAN_JV",
"CAN_Shoreside",
"CAN_forgn",
"Nsamp_adj",
"US_JV",
"US_foreign",
"USresearch",
"Used",
"atSea_US_CP",
"atSea_US_MS",
"US_shore",
"variable",
"om",
"am",
"Country",
"movement",
".x",
".y",
"catch_quota",
"qlow",
"qhigh",
"aav",
"Indicator",
"can",
"sim_year",
"grp",
"om_output",
"iter"
)) |
2f3c1d856f49a38f1c8f46cca574f159cd62ca85 | be1e93819f4d3c850e33bc516041224bfbe2efd4 | /RforEveryone_Chapter10.R | aae87bbfbca8df4cd8f8dd8552bf8cec18e34593 | [] | no_license | CaptainNano77/R-for-Everyone-My-follow-Along-Notes-and-Code | 98e7fce4a14f7201e9e82c2aced7913274ec53ba | d5c285e93b6a82ef4a28778c9b2c15c383d2bf2f | refs/heads/master | 2020-08-12T12:44:36.374255 | 2019-10-18T02:21:54 | 2019-10-18T02:21:54 | 214,769,754 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,492 | r | RforEveryone_Chapter10.R | #########################################################################################################
# CHAPTER 10
# LOOPS, THE UN-R WAY TO ITERATE
########################################################################################################
# Although most languages rely heavily on loops, R generally uses vectorization. However, loops
# are still useful in R
#########################################################################################################
# for loops: 10.1
########################################################################################################
#
for (i in 1:10){
print(i)
}
# notice this could have just been done with the vectorization of the print function
print(1:10)
fruit <- c('apple', 'banana', 'pomegranate')
fruitlength <- rep(NA, length(fruit))
fruitlength
names(fruitlength) <- fruit
fruit
for (a in fruit){
fruitlength[a] <- nchar(a)
}
fruitlength
# Again, R's built in vectorization could have made this a lot easier...
fruitlength2 <- nchar(fruit)
names(fruitlength2) <- fruit
fruitlength2
# Also can get identical results with this...
identical(fruitlength, fruitlength2)
#########################################################################################################
# while loops : 10.2
########################################################################################################
#
# while loops are even less frequent in R. However, they are a way to run the code inside the
# braces repeatedly as long as the tested condition proves true.
x <- 1
while (x <= 5){
print(x)
x <- x + 1
}
#########################################################################################################
# Controlling Loops: 10.3
########################################################################################################
#
# Sometimes we need to skip to the next iteration of the loop and completely break out of it.
# This is accomplished with 'next' and 'break'
for (i in 1:10)
{
if (i == 3)
{
next
}
print (i)
}
# notice that '3' was not printed
for (i in 1:10)
{
if (i == 4)
{
break
}
print (i)
}
# NOTE: IT IS BEST TO NOT USE LOOPS IN R. IT IS VERY IMPORTANT TO NOT USED NESTED LOOPS. (LOOPS IN
# LOOPS) AS THESE ARE VERY SLOW IN R.
#
#finish
|
11d70953f2e046f38971dad815712d50d5d8a4c9 | 3d8e78b8ca205deb70ac902e0f707105a8970a4e | /plot1.R | 45ef16e14d501b3ab933040b51cc371d774fb480 | [] | no_license | rgentzler/ExData_Plotting1 | 4fb344716b5a061239acbb2e2a30ca64000549cf | 9562ff4d78b46a94d0d9fa80f453774d56bda61d | refs/heads/master | 2020-12-25T05:25:54.112517 | 2015-08-03T22:49:28 | 2015-08-03T22:49:28 | 40,139,630 | 0 | 0 | null | 2015-08-03T17:53:57 | 2015-08-03T17:53:57 | null | UTF-8 | R | false | false | 507 | r | plot1.R | data <- read.table("household_power_consumption.txt",
sep = ";", header = TRUE)
##Subset to dates of interest and convert date
data$Date <- as.Date(data$Date, "%d/%m/%Y")
data <- data[ which(data$Date == "2007-02-01" | data$Date == "2007-02-02"),]
gap <- transform(data,
Global_active_power = as.numeric(as.character(Global_active_power)))
hist(gap$Global_active_power, col = "red",
xlab = "Global Active Power (kilowatts)",
main = "Global Active Power")
|
6f6fdebba68af100cb52c1e72725db501f5c6eca | abd4340c7a54de5b298a0699279e15a49e25172a | /analysis/ipo_cor_maps.R | 14cc78285a7c5cd1ac3f8d5648183bdf134b8b6a | [] | no_license | cszang/regional-trends-surface-warming | 83a6a6ee49cfff6f22ab43fa15fc61a461ffa776 | 1160c50b453dade5ba4d45ee9935ef833df3d1a2 | refs/heads/master | 2020-04-04T19:40:42.400540 | 2018-11-05T12:49:50 | 2018-11-05T12:49:50 | 156,216,255 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,696 | r | ipo_cor_maps.R | ## draw maps with mei/sst-correlations
request("ipo_gistemp_fdr")
request("ipo_noaa_fdr")
request("ipo_hadcrut_fdr")
## x is a ipo_X object
ipo_ref <- function(x) {
x <- x %>%
unnest(cipo) %>%
select(x = lon, y = lat, cor, p) %>%
mutate(sig = ifelse(p < 0.05, TRUE, FALSE),
pos = ifelse(cor > 0, TRUE, FALSE)) %>%
mutate(mode = ifelse(
pos & sig, 1, # sig +
ifelse(
pos & !sig, 2, # +
ifelse(
!pos & sig, 3, # sig -
4 # -
)
)
)) %>%
as.data.frame
x
}
ipo_gistemp_R <- ipo_gistemp_fdr %>%
ipo_ref %>%
reraster
ipo_map_gistemp <- tm_shape(ipo_gistemp_R) +
tm_raster("mode", palette = hpalette,
legend.show = FALSE, auto.palette.mapping = FALSE,
colorNA = "#e3e3e3") +
map_layer
ipo_noaa_R <- ipo_noaa_fdr %>%
## longitudes range from 0 to 360 and need to be fixed prior to
## remapping
mutate(lon = ifelse(lon < 180, lon, lon - 360)) %>%
ipo_ref %>%
reraster
ipo_map_noaa <- tm_shape(ipo_noaa_R) +
tm_raster("mode", palette = hpalette,
legend.show = FALSE, auto.palette.mapping = FALSE,
colorNA = "#e3e3e3") +
map_layer
ipo_hadcrut_R <- ipo_hadcrut_fdr %>%
ipo_ref %>%
reraster
ipo_map_hadcrut <- tm_shape(ipo_hadcrut_R) +
tm_raster("mode", palette = hpalette,
legend.show = FALSE, auto.palette.mapping = FALSE,
colorNA = "#e3e3e3") +
map_layer
pdf("figures/ipo_correlation_maps_fdr.pdf", width = 8, height = 4)
tmap_arrange(ipo_map_gistemp, ipo_map_hadcrut, ipo_map_noaa,
ncol = 3)
dev.off()
|
8c8f48f371a60c14f5e4a043802a5b460dc78e6f | 8ea6600eebbb9abcde5d420189c7624fa26621a9 | /inst/usage.R | 315e81a8b8e8d8826ccc41152ed500b46c24b3e1 | [] | no_license | eprifti/momr | 7fafd2c5ecb6af3b67b368239ea9c4c62815e939 | cad46ffdbcc3fcab9096e3c510f9b5802ae99815 | refs/heads/master | 2022-10-02T20:59:42.738037 | 2022-09-21T08:50:02 | 2022-09-21T08:50:02 | 39,720,920 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 2,689 | r | usage.R |
# load the package
library(momr)
library(profvis)
#' all the data in the package
# data(package="momr")
#' load the raw and frequency test dataset
data("hs_3.3_metahit_sample_dat_raw")
data("hs_3.3_metahit_sample_dat_freq")
#' NORMALIZATION
#' This should be performed with the whole dataset (complete catalogue).
#' But here is an exemple with the subset of the data for illustration purposes
data(hs_3.3_metahit_genesize)
norm.data <- normFreqRPKM(dat=hs_3.3_metahit_sample_dat_raw, cat=hs_3.3_metahit_genesize)
#' CLUSTERING OF SAMPLES
#pdf(file="visual_output.pdf") # open the pdf_visu
hc.data <- hierClust(data=hs_3.3_metahit_sample_dat_freq[,1:5], side="col")
clust.order <- hc.data$mat.hclust$order
#' order samples followin the hierarchical clustering
ordered.samples <- colnames(hs_3.3_metahit_sample_dat_freq[,1:5])[clust.order]
#' how close are the two first samples (spearman, rho)
hc.data$mat.rho[ordered.samples[1], ordered.samples[2]]
# select the samples closely related together
close.samples <- filt.hierClust(hc.data$mat.rho, hclust.method = "ward", plot = TRUE, filt = 0.37)
#' CLUSTER GENES ON THE MGS CATALOG
#' load the curated mgs data for the hs_3.3_metahit catalog
data("mgs_hs_3.3_metahit_sup500")
#' project a list of genes onto the mgs
genebag <- rownames(hs_3.3_metahit_sample_dat_freq)
mgs <- projectOntoMGS(genebag=genebag, list.mgs=mgs_hs_3.3_metahit_sup500)
#' extract the profile of a list of genes from the whole dataset
mgs.dat <- extractProfiles(mgs, hs_3.3_metahit_sample_dat_freq, silent=FALSE)
#' plot the barcodes
par(mfrow=c(5,1), mar=c(1,0,0,0))
for(i in 1:5){
plotBarcode(mgs.dat[[i]])
}
#' compute the filtered vectors
mgs.mean.vect <- computeFilteredVectors(profile=mgs.dat, type="mean")
#' TEST RELATIONS
#' for the first 1000 genes
res.test <- testRelations(data=hs_3.3_metahit_sample_dat_freq[1:500,],
trait=c(rep(1,150),rep(2,142)),type="wilcoxon")
head(res.test)
print(paste("There are",sum(res.test$p<0.05, na.rm=TRUE),"significant genes and",
sum(res.test$q<0.05, na.rm=TRUE), "after adjustment for multiple testing"))
res.test.mgs <- testRelations(data=mgs.mean.vect, trait=c(rep(1,150),rep(2,142)),type="wilcoxon")
#' DOWNSIZING & UPSIZING
#' downsize the matrix
data.downsized <- downsizeMatrix(data=hs_3.3_metahit_sample_dat_raw[,1:5],level=600,repetitions=1)
colSums(data.downsized, na.rm=TRUE)
#' downsize the genecount
data.genenb <- downsizeGC(data=hs_3.3_metahit_sample_dat_raw[,1:5], level=600, repetitions=3)
par(mfrow=c(1,1), mar=c(4,4,4,4))
plot(density(colMeans(data.genenb, na.rm=TRUE)), main="density of downsized gene richness")
#dev.off()
#' End of test file
|
7437b086d21cdbf0545a498cf92924e6d384c6c6 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/raster/examples/extremeValues.Rd.R | a2565058f560663d893574133cb9b6fcd9403350 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 575 | r | extremeValues.Rd.R | library(raster)
### Name: extremeValues
### Title: Minimum and maximum values
### Aliases: minValue maxValue minValue,RasterLayer-method
### minValue,RasterStack-method minValue,RasterBrick-method
### maxValue,RasterLayer-method maxValue,RasterStack-method
### maxValue,RasterBrick-method
### Keywords: spatial
### ** Examples
r <- raster()
r <- setValues(r, 1:ncell(r))
minValue(r)
maxValue(r)
r <- setValues(r, round(100 * runif(ncell(r)) + 0.5))
minValue(r)
maxValue(r)
r <- raster(system.file("external/test.grd", package="raster"))
minValue(r)
maxValue(r)
|
3cf3364c1c6211cdf7ec0dad26686794edb779ff | 29585dff702209dd446c0ab52ceea046c58e384e | /logcondiscr/R/GradientL.r | 109b1a5cd9fc5121d5adb0d3ccce849685327588 | [] | no_license | ingted/R-Examples | 825440ce468ce608c4d73e2af4c0a0213b81c0fe | d0917dbaf698cb8bc0789db0c3ab07453016eab9 | refs/heads/master | 2020-04-14T12:29:22.336088 | 2016-07-21T14:01:14 | 2016-07-21T14:01:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 463 | r | GradientL.r | GradientL <- function(W, psi, dX){
# Computes gradient of the log-likelihood L at psi
m <- length(psi)
tmp <- rep(NA, m)
tmp[1] <- J10(psi[1], psi[2], dX[1])
tmp[m] <- J10(psi[length(psi)], psi[length(psi) - 1], dX[length(dX)])
if (m > 2){
J <- J10(psi[2:(m - 1)], psi[3:m], dX[2:length(dX)]) + J10(psi[2:(m - 1)], psi[1:(m - 2)], dX[1:(length(dX) - 1)]) - exp(psi[2:(m - 1)])
tmp[2:(m - 1)] <- J
}
gradL <- W - tmp
return(gradL)
}
|
f299304d5ccc5d05408e8d895b1c3bd78941d065 | 7a95abd73d1ab9826e7f2bd7762f31c98bd0274f | /netrankr/inst/testfiles/checkPairs/libFuzzer_checkPairs/checkPairs_valgrind_files/1612746748-test.R | 8dd29cd3cc2950b363409b1d95bdae82ef7c9344 | [] | no_license | akhikolla/updatedatatype-list3 | 536d4e126d14ffb84bb655b8551ed5bc9b16d2c5 | d1505cabc5bea8badb599bf1ed44efad5306636c | refs/heads/master | 2023-03-25T09:44:15.112369 | 2021-03-20T15:57:10 | 2021-03-20T15:57:10 | 349,770,001 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,315 | r | 1612746748-test.R | testlist <- list(x = c(Inf, 7.01244887724574e+217, 0, NaN, NaN, NaN, NaN, NaN, NaN, -9.25783436608935e+303, NaN, NaN, -5.3870366847157e-108, -2.17982767809619e-106, -5.46303037866812e-108, 1.80107573659384e-226, 0), y = c(1.29849269277858e+219, 1.80107573886382e-226, 1.87978485692451e-226, 2.24032175463232e-178, 9.1158662013441e-316, 6.17672607646052e+218, 1.29849269277858e+219, 1.91323950329152e-43, 4.79822274932403e-299, NaN, 2.0080414659448e+131, 4.57671146819021e-246, 2.48154073217111e+217, 5.92121705581053e-310, 0, 2.07226151461452e-315, 0, -4.940462474121e+77, 1.82416071901419e-226, 1.80107573659442e-226, 1.80107573659442e-226, NaN, NaN, NaN, NaN, NaN, NaN, NaN, -2.28992634745165e+226, 6.94495352473156e-223, 1.85734270574246e-308, 4.79822068439058e-299, NaN, NaN, 1.8010760888434e-226, 1.80107573659442e-226, 8.39911597930119e-323, 2.11624586939991e-225, NaN, 1.29842036478571e+219, 1.83914994793787e-226, 2.15857280668041e-320, 0, 1.79486475156292e-226, 1.29849269277858e+219, -4.94057290206713e+77, 4.71570466126102e+130, 1.80107573659293e-226, -7.06898650316662e-111, 9.38997237502125e-311, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
result <- do.call(netrankr:::checkPairs,testlist)
str(result) |
c9f8ccb3e9d616072212d2367cd650a9f674a97b | a800dff7c2068108a2502e034d0625b247a87b46 | /inst/scenarios/test-test-function.R | 94b0e8681ed950bc4531cec3dcff31d376feb22c | [] | no_license | selcukfidan47/testwhat | 9c4786e4654d404c54affd510a4735819a843b5c | a7c03fd6848881915fe6657d5c5c45db90392ce0 | refs/heads/master | 2021-01-16T22:36:14.340497 | 2016-02-15T11:00:07 | 2016-02-15T11:00:07 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 10,063 | r | test-test-function.R | scen <- list(list(type = "NormalExercise", student = "\n summary(c(1,2,3,4))", solution = "\n summary(c(1,2,3,4),c(1,2,3,4))\n dim(c(1,2,3))",
pass = list(test_basic_check_pass_1 = list(long = "basic function check - pass 1", sct = "test_function(\"summary\")")),
fail = list(test_basic_check_fail_1 = list(long = "basic function check - fail 1", sct = "test_function(\"dim\")"),
test_not_called_msg_fail_1 = list(long = "check if not_called_msg feedback is correct - fail 1", sct = "test_function(\"dim\", not_called_msg = \"This is the not called message\")",
message = "This is the not called message"))), list(type = "NormalExercise", student = "\n summary(c(1,2,3,4))\n dim(c(1,2,3))\n rep(1, 4)\n dnorm(1, 10, 5)\n mean(c(1,2,3), na.rm = FALSE)",
solution = "\n summary(c(1,2,3,4))\n dim(c(1,2,3,4))\n rep(1, 20)\n dnorm(1,10)\n mean(c(1,2,3), na.rm = TRUE)",
pass = list(test_arguments_pass_1 = list(long = "checks on the function arguments - pass 1", sct = "test_function(\"summary\", \"object\")"),
test_arguments_pass_2 = list(long = "checks on the function arguments - pass 2", sct = "test_function(\"rep\", \"x\")"),
test_arguments_pass_3 = list(long = "checks on the function arguments - pass 3", sct = "test_function(\"dnorm\", c(\"x\", \"mean\"))")),
fail = list(test_arguments_fail_1 = list(long = "checks on the function arguments - fail 1", sct = "test_function(\"dim\", \"x\")"),
test_arguments_fail_2 = list(long = "checks on the function arguments - fail 2", sct = "test_function(\"dnorm\", c(\"x\", \"mean\"), allow_extra = FALSE)"),
test_arguments_fail_3 = list(long = "checks on the function arguments - fail 3", sct = "test_function(\"mean\", c(\"x\", \"na.rm\"))"),
test_incorrect_msg_fail_1 = list(long = "check if incorrect_msg feedback is correct - fail 1", sct = "test_function(\"dim\", \"x\", incorrect_msg = \"This is the incorrect message\")",
message = "This is the incorrect message"))), list(type = "NormalExercise", student = "\n df.equiv <- data.frame(a = c(1, 2, 3), b = c(4, 5, 6))\n var(df.equiv)\n df.not_equiv <- data.frame(a = c(1, 2, 3), b = c(4, 5, 6))\n lm(df.not_equiv)",
solution = "\n df.equiv <- data.frame(c = c(1, 2, 3), d = c(4, 5, 6))\n var(df.equiv)\n df.not_equiv <- data.frame(c = c(7, 8, 9), d = c(4, 5, 6))\n lm(df.not_equiv)",
pass = list(test_equal_pass_1 = list(long = "difference between equal and equivalent for arguments - pass 1",
sct = "test_function(\"var\", \"x\")")), fail = list(test_equal_fail_1 = list(long = "difference between equal and equivalent for arguments - fail 1",
sct = "test_function(\"lm\", \"formula\")"), test_equal_fail_2 = list(long = "difference between equal and equivalent for arguments - fail 2",
sct = "test_function(\"var\", \"x\", eq_condition = \"equal\")"), test_equal_fail_3 = list(long = "difference between equal and equivalent for arguments - fail 3",
sct = "test_function(\"lm\", \"formula\", eq_condition = \"equal\")"))), list(type = "NormalExercise",
student = "\n df.equiv <- data.frame(a = c(1, 2, 3), b = c(4, 5, 6))\n var(df.equiv)\n df.not_equiv <- data.frame(a = c(1, 2, 3), b = c(4, 5, 6))\n lm(df.not_equiv)",
solution = "\n df.equiv <- data.frame(c = c(1, 2, 3), d = c(4, 5, 6))\n var(df.equiv)\n df.not_equiv <- data.frame(c = c(7, 8, 9), d = c(4, 5, 6))\n lm(df.not_equiv)",
pass = list(test_equal_pass_1 = list(long = "difference between equal and equivalent for arguments - pass 1",
sct = "test_function(\"var\", \"x\")"), test_equal_pass_2 = list(long = "difference between equal and equivalent for arguments - pass 2",
sct = "test_function(\"var\", eval = FALSE)"), test_equal_pass_3 = list(long = "difference between equal and equivalent for arguments - pass 3",
sct = "test_function(\"lm\", eval = FALSE)"), test_equal_pass_4 = list(long = "difference between equal and equivalent for arguments - pass 4",
sct = "test_function(\"var\", eval = FALSE, eq_condition = \"equal\")"), test_equal_pass_5 = list(long = "difference between equal and equivalent for arguments - pass 5",
sct = "test_function(\"lm\", eval = FALSE, eq_condition = \"equal\")")), fail = list(test_equal_fail_1 = list(long = "difference between equal and equivalent for arguments - fail 1",
sct = "test_function(\"lm\", \"formula\")"), test_equal_fail_2 = list(long = "difference between equal and equivalent for arguments - fail 2",
sct = "test_function(\"var\", \"x\", eq_condition = \"equal\")"), test_equal_fail_3 = list(long = "difference between equal and equivalent for arguments - fail 3",
sct = "test_function(\"lm\", \"formula\", eq_condition = \"equal\")"))), list(type = "NormalExercise",
student = "\n var.iden <- 3\n var(var.iden)\n var.equal <- 4\n mean(var.equal)", solution = "\n var.iden <- 3 + 4.4e-8\n var(var.iden)\n var.equal <- 4\n mean(var.equal)",
pass = list(test_identical_pass_1 = list(long = "difference between identical and equal for arguments - pass 1",
sct = "test_function(\"var\", \"x\", eq_condition = \"equal\")"), test_identical_pass_2 = list(long = "difference between identical and equal for arguments - pass 2",
sct = "test_function(\"mean\", \"formula\", eq_condition = \"equal\")"), test_identical_pass_3 = list(long = "difference between identical and equal for arguments - pass 3",
sct = "test_function(\"mean\", \"formula\", eq_condition = \"identical\")"), test_identical_pass_4 = list(long = "difference between identical and equal for arguments - pass 4",
sct = "test_function(\"var\", eval = FALSE, eq_condition = \"equal\")"), test_identical_pass_5 = list(long = "difference between identical and equal for arguments - pass 5",
sct = "test_function(\"mean\", eval = FALSE, eq_condition = \"equal\")"), test_identical_pass_6 = list(long = "difference between identical and equal for arguments - pass 6",
sct = "test_function(\"var\", eval = FALSE, eq_condition = \"identical\")"), test_identical_pass_7 = list(long = "difference between identical and equal for arguments - pass 7",
sct = "test_function(\"mean\", eval = FALSE, eq_condition = \"identical\")")), fail = list(test_identical_fail_1 = list(long = "difference between identical and equal for arguments - fail 1",
sct = "test_function(\"var\", \"x\", eq_condition = \"identical\")"))), list(type = "NormalExercise",
student = "\n var.a <- c(302, 305, 309)\n mean(var.a)\n var(var.a)", solution = "\n var.b <- c(302, 305, 309)\n mean(var.b)\n var(var.b)",
pass = list(test_eval_pass_1 = list(long = "checks whether the eval argument works properly - pass 1", sct = "test_function(\"mean\", \"x\")")),
fail = list(test_eval_fail_1 = list(long = "checks whether the eval argument works properly - fail 1", sct = "test_function(\"var\", \"x\", eval = FALSE)"))),
list(type = "NormalExercise", student = "\n mean(1:10, trim = 0.9)\n var(1:5, 6:10)", solution = "\n mean(1:10)\n var(1:5, 6:10)",
pass = list(test_allow_extra_pass_1 = list(long = "checks whether the allow_extra argument works properly - pass 1",
sct = "test_function(\"mean\", \"x\")"), test_allow_extra_pass_2 = list(long = "checks whether the allow_extra argument works properly - pass 2",
sct = "test_function(\"mean\", c(\"x\", \"trim\"), allow_extra = FALSE)"), test_allow_extra_pass_3 = list(long = "checks whether the allow_extra argument works properly - pass 3",
sct = "test_function(\"var\", c(\"x\", \"y\"), allow_extra = FALSE)")), fail = list(test_allow_extra_fail_1 = list(long = "checks whether the allow_extra argument works properly - fail 1",
sct = "test_function(\"mean\", \"x\", allow_extra = FALSE)"))), list(type = "NormalExercise", student = "\n mean(1:10, trim = 0.9, na.rm = FALSE)\n var(1:5, 6:10)",
solution = "\n mean(1:10)\n var(1:5, 11:15)", pass = list(test_ignore_pass_1 = list(long = "checks whether the ignore argument works properly - pass 1",
sct = c("test_function(\"mean\", \"x\", allow_extra = FALSE, ignore = c(\"trim\", ", " \"na.rm\"))")),
test_ignore_pass_2 = list(long = "checks whether the ignore argument works properly - pass 2", sct = "test_function(\"var\", \"x\", allow_extra = FALSE, ignore = \"y\")")),
fail = list(test_ignore_fail_1 = list(long = "checks whether the ignore argument works properly - fail 1",
sct = "test_function(\"mean\", \"x\", allow_extra = FALSE, ignore = \"na.rm\")"), test_ignore_fail_2 = list(long = "checks whether the ignore argument works properly - fail 2",
sct = "test_function(\"mean\", \"x\", allow_extra = FALSE, ignore = \"na.rm\")"))), list(type = "NormalExercise",
student = "\n a <- \"test\"\n mean(1:10, trim = 0.9, na.rm = FALSE)\n mean(1:5, trim = 0.8)\n mean(1:10, trim = 0.9)",
solution = "\n a <- \"test\"\n mean(1:10, trim = 0.9)\n mean(1:9)\n mean(1:10)", pass = list(test_index_old_pass_1 = list(long = "checks whether the index argument works properly - pass 1",
sct = "test_function(\"mean\", \"x\", index = 2)"), test_index_old_pass_2 = list(long = "checks whether the index argument works properly - pass 2",
sct = "test_function(\"mean\", c(\"x\", \"trim\"))")), fail = list(test_index_old_fail_1 = list(long = "checks whether the index argument works properly - fail 1",
sct = "test_function(\"mean\", \"x\", index = 3)"), test_index_old_fail_2 = list(long = "checks whether the index argument works properly - fail 2",
sct = "test_function(\"mean\", c(\"x\", \"trim\"), allow_extra = FALSE, index = 2)"), test_index_old_fail_3 = list(long = "checks whether the index argument works properly - fail 3",
sct = "test_function(\"mean\", \"x\", allow_extra = FALSE, index = 4)"))))
|
bb0c9c440ccf5e1c585ff4c4d796ed2461b5ec85 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/EnergyOnlineCPM/examples/maxEnergyCPMv.Rd.R | 39088f4c5706ade91c33b52e8e1cd0aea6025e0b | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,063 | r | maxEnergyCPMv.Rd.R | library(EnergyOnlineCPM)
### Name: maxEnergyCPMv
### Title: Nonparametric Multivariate Control Chart based on Energy Test
### Aliases: maxEnergyCPMv
### Keywords: Phase II Statistical Process Control Change Point Model
### Energy Statistic
### ** Examples
# simulate 300 length time series
simNr=300
# simulate 300 length 5 dimensonal standard Gaussian series
Sigma2 <- matrix(c(1,0,0,0,0, 0,1,0,0,0, 0,0,1,0,0, 0,0,0,1,0, 0,0,0,0,1),5,5)
Mean2=rep(1,5)
sim2=(mvrnorm(n = simNr, Mean2, Sigma2))
# simulate 300 length 5 dimensonal standard Gaussian series
Sigma3 <- matrix(c(1,0,0,0,0, 0,1,0,0,0, 0,0,1,0,0, 0,0,0,1,0, 0,0,0,0,1),5,5)
Mean3=rep(0,5)
sim3=(mvrnorm(n = simNr, Mean3, Sigma3))
# construct a data set of length equal to 35.
# first 20 points are from standard Gaussian.
# second 15 points from a Gaussian with a mean shift with 555.
data1=sim6=rbind(sim2[1:20,],(sim3+555)[1:15,])
# set warm-up number as 20, permutation 200 times, significant level 0.005
wNr=20
permNr=200
alpha=1/200
maxEnergyCPMv(data1,wNr,permNr,alpha)
|
0492fc27620b305d15b4ef4d57d7f87014559e2f | 8ddc05161cfaebe13c6e84a15919a450a6c69e2a | /libraries.R | 24a4dc6c9fa281596e74b6f00cdde7b776d18e03 | [
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | alphaapina/RCoinbaseProAPI | c2c34cafdcb1c885acc6e90c7ce93a368a6f4db5 | 349489c26c12dff5fe4a91b5c7385296495dbdc7 | refs/heads/main | 2023-04-23T01:47:33.005865 | 2021-05-15T11:56:59 | 2021-05-15T11:56:59 | 367,613,553 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 137 | r | libraries.R | # Required libraries
library(httr)
library(jsonlite)
library(base64enc)
library(digest)
library(quantmod)
library(websocket)
|
129da6d3ac60db7b3fba5104502f39767d22aee4 | 43448ea3e38ae17a43e2eb6c36aa1544b4b074b4 | /Step_0_init.R | 74b8d3024ba8f7b1498b26c63a473f3cef193074 | [] | no_license | yuliasidi/Binomial_PE_Progs | 18175abc8ccd7135686750bebd205f5751431239 | 9e325b76db5d4d1cee1c877b5ed89ea7d2ab83f8 | refs/heads/master | 2020-03-26T16:49:47.366172 | 2019-02-21T19:13:27 | 2019-02-21T19:13:27 | 145,125,922 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 523 | r | Step_0_init.R | #!/home/statsadmin/R/bin/Rscript
# Initiations prior to running all the programs
library(tidyr, warn.conflicts = F, quietly = T)
library(dplyr, warn.conflicts = F, quietly = T)
library(ggplot2, warn.conflicts = F, quietly = T)
library(purrr, warn.conflicts = F, quietly = T)
library(reshape2, warn.conflicts = F, quietly = T)
library(mice, warn.conflicts = F, quietly = T)
source("PE_Bin_PD_Functions.R")
alpha <- c(0.025)
beta <- c(0.1)
# # of simulated studies
n.sim <- 10000
# # of imputations for MI
num.mi=5
|
442c221f1555e86db062261aff2fcb0352068c68 | c78b8c9cbc5324f2c3cca5360338819d9e41204f | /simulate.R | 97678f901bf25616c775efbdeb7e5dcb87042baa | [] | no_license | robsonedwards/monte-carlo | 2d6b047dc090141f503ad23f83c4e1275a0e950e | af0cb0eea51195f77a8427c061ad020ea6e09b0a | refs/heads/master | 2020-04-07T13:32:04.047688 | 2018-11-26T11:48:06 | 2018-11-26T11:48:06 | 158,410,926 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,946 | r | simulate.R | #
require("dslabs")
require("dplyr")
require("ggplot2")
require("magrittr")
set.seed(4113)
################################ Data Wrangling ################################
data("polls_us_election_2016")
polls_us_election_2016 %>%
# Ignore data from individual congressional districts of Maine and Nebraska
filter(!grepl("Maine CD-", state) & !grepl("Nebraska CD-", state) ) %>%
mutate(clinton_margin = rawpoll_clinton - rawpoll_trump) %>%
mutate(duration = enddate - startdate) %>%
# Remove unneeded columns
select(-adjpoll_clinton, -adjpoll_trump, -adjpoll_johnson, -adjpoll_mcmullin,
-rawpoll_clinton, -rawpoll_trump, -rawpoll_johnson, -rawpoll_mcmullin,
-startdate, -population) ->
polls
polls$state <- as.factor(as.character(polls$state))
results_us_election_2016 %>%
select(-electoral_votes) ->
results
# Create a result row for U.S. as a whole rather than an individual state
U.S. <- c(clinton = 65844610, trump = 62979636, others = 7804213) #Source: Time
U.S. <- round( U.S. / sum(U.S.) * 100, digits = 1)
results[52,] <- c("U.S.", U.S.["clinton"], U.S.["trump"], U.S.["others"])
# Change datatype of cols in results to agree with cols in polls
results$state <- as.factor(results$state)
results$clinton <- as.numeric(results$clinton)
results$trump <- as.numeric(results$trump)
results$others <- as.numeric(results$others)
# Create error column. Error is the amount the poll margin exceeded the result
# margin in the direction of favouring Clinton. (see explanation in report)
results %<>% mutate(clinton_margin = clinton - trump)
for(i in 1:nrow(polls)){
polls$result[i] = results$clinton_margin[results$state == polls$state[i]]
}
polls %>%
mutate(error = clinton_margin - result) %>%
mutate(error2 = error ^2) %>%
select(-result, -clinton_margin) -> #remove unneeded columns
polls
polls$enddate <- as.numeric(polls$enddate) - as.numeric(as.Date("2016-11-08"))
# Now enddate is the number of days before the election, this makes intercept
# terms in the linear models we're about to derive much more interpretable
# Remove 43 outliers that are > 3 sd's from the mean. (top 1% worst polls)
polls %<>% filter(abs(polls$error - mean(polls$error)) < 3 * sd(polls$error) )
########################## Exploratory Data Analysis ###########################
polls_sample = polls[sample(1:nrow(polls), size = 150, replace = F),]
plot(polls_sample$enddate, polls_sample$error2, pch = '+')
lm(polls$error2 ~ polls$enddate)
lm(polls$error ~ polls$enddate)
goodpolls <- filter(polls, grepl("A", grade))
plot(goodpolls$enddate, goodpolls$error2, pch = '+')
lm(goodpolls$error2 ~ goodpolls$enddate)
lm(goodpolls$error ~ goodpolls$enddate)
# No evidence of correlation between date and poll error in general
cor.test(polls$error, polls$enddate)
cor.test(abs(polls$error), polls$enddate)
cor.test(polls$error2, polls$enddate)
# Some evidence of correlation for higher-rated polls, for some error measures
cor.test(goodpolls$error, goodpolls$enddate)
cor.test(abs(goodpolls$error), goodpolls$enddate)
cor.test(goodpolls$error2, goodpolls$enddate)
################################## Simulation ##################################
simulate_and_test <- function(data_subset, n, effect_size, test){
# NOTE test must take only x and y and return p value
# simulate
mean_error = mean(data_subset$error)
mean_enddate = mean(data_subset$enddate)
sd_error = sd(data_subset$error)
sim <- data.frame(enddate = sample(as.integer(data_subset$enddate), n * 100, replace = TRUE))
sim$error <- rnorm(n * 100, sd = sd_error, mean = (sim$enddate - mean_enddate) * effect_size + mean_error)
# test
pvals <- 1:100
apply_test <- function(i){
range <- (i * 100 - 99):(i * 100)
return(test(sim$error[range], sim$enddate[range]))
}
pvals <- lapply(pvals, apply_test)
pvals <- unlist(pvals)
result <- sum(pvals < 0.05) # size or power.
return(result / 100)
}
pearson <- function(x, y){
test <- cor.test(x, y, alternative = "less", method = "pearson")
return(unname(test["p.value"]) )
}
spearman <- function(x, y){
test <- cor.test(x, y, alternative = "less", method = "spearman")
return(unname(test["p.value"]))
}
### Change to true to get spammed. Unfortunately I wasn't able to implement a
# more readable version of this in time.
if(FALSE){
for(method in c(pearson, spearman)){
for(effect_size in c(0, -0.001, -0.01, -0.05)){
for(g in levels(polls$grade)){
data <- polls[which(polls$grade == g),]
for(n in c(100, 1000, 4000)){
print(as.character( c("Effect:", effect_size,
". Grade:", g, ". n: ", n, ". Result: ",
simulate_and_test(data, n, effect_size, method))))
}
}
}
}
}
for(method in c(pearson, spearman)){
for(effect_size in c(0, -0.001, -0.01, -0.05)){
print(unlist(c(effect_size, simulate_and_test(data, 1000, effect_size, method))))
}
}
|
67718dcf7bbdc40ded2009695be784656df2081a | 89c34b67e23640c688c7734d1a513ac7b4b9ea79 | /Estatistica_Inferencial_Regressao_Linear_Parte1/Licao12-IEL-Exercicios-background.R | c974419a9f67b0da5cac9725ace26e34adade9e7 | [
"CC-BY-4.0",
"X11",
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] | permissive | oushiro/Introducao_a_Estatistica_para_Linguistas_Exercicios | 87c0aee87840503ef3d462d2a257b01c2b022ea6 | 8ce39947bc5036c68c6eff414df8a9b4ab32a6f0 | refs/heads/master | 2023-04-05T23:26:14.079812 | 2021-05-02T21:57:49 | 2021-05-02T21:57:49 | 344,640,358 | 0 | 1 | NOASSERTION | 2021-04-03T22:26:51 | 2021-03-04T23:47:26 | R | UTF-8 | R | false | false | 191 | r | Licao12-IEL-Exercicios-background.R | # Script to display file to swirl user
display_swirl_file("Licao12-IEL-Exercicios.R", "Introducao a Estatistica para Linguistas Exercicios", "Estatistica Inferencial Regressao Linear Parte1") |
640426c603234f66dc9ecd9fcfa1a070bf0b0a7d | 9213f0339c60c788fe9d9f1edce6bf48d183764f | /ANALYSES/BAYENV/morpho_data/boxplotCD.R | 4e2399f44ab9b2b926502b2971887abb2932ee14 | [] | no_license | HullUni-bioinformatics/Diplotaxodon_twilight_RAD | d47ad9d29db831ee868ef95e59aeab445d921a3d | 7983db82847072fbb56193b522e57078e5b7e49a | refs/heads/master | 2021-01-12T05:52:29.795840 | 2017-07-31T12:00:27 | 2017-07-31T12:00:27 | 77,223,728 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 934 | r | boxplotCD.R | Di_1 <- c(11.0,11.8,11.8,10.5,9.2,10.3,8.6,10.4,10.4,8.3,9.1,8.6,9.7,9.3,8.3,10.2,9.2,9.6,10.9,10.2,9.8,12.7,10.6,9.4,8.9,6.9,12.0,9.4,8.5,10.3,11.1,9.4,9.7,10.1,10.0,8.8,8.9,9.1,8.8,8.7,8.9,12.7,9.4,8.5,10.2,11.6,8.8,9.9,9.9,11.3)
Di_2 <- c(9.0,9.2,9.1,9.1,8.6,8.2,10.2,9.9,8.7,10.1,8.1,10.6,9.2,8.9,8.8,9.5,10.0,10.4,9.1,9.6,10.0,9.5,9.8,9.6,9.7,8.6,8.4,10.5,9.0,8.2,10.0,9.2,8.5,10.2,9.6,9.3,8.4,7.0,10.1,9.3,10.1,9.4,8.9,9.6,8.5,9.3,10.4,8.5,9.4,10.1,9.7)
Di_4 <- c(8.8,8.0,9.6,9.3,10.5,11.0,9.8,6.7,10.2,10.4,8.9,8.7,6.9,8.5,8.0,8.0,7.5,9.1,9.5,9.8,8.3,10.9,9.0,9.8,9.2,11.3,8.3,8.9,8.9,8.0,9.9,9.5,9.1,9.5,10.6,9.1,10.8,8.9,9.2,7.7,8.4,9.8,9.3,9.3,10.0,9.7,8.7,8.9,8.5,10.0,9.9,9.3,9.1,9.6)
Di_5 <- c(8.6,10.7,7.9,9.2,10.3,9.6,9.9,8.9,9.5,9.7,8.6,8.1,11.4,8.7,9.7,8.7,9.6,10.0,9.3,9.0)
svg(filename = 'morpho_data/boxplotCD.svg')
boxplot(Di_1, Di_2, Di_4, Di_5, names = c('Di_1', 'Di_2', 'Di_4', 'Di_5'), main = 'CD')
dev.off()
|
791299179b4dc552ea28260c1b432cf570e98bab | 607b47cdf7b5932b0492406351d8cde0c8b7a162 | /extr_ala_points.R | 5d0c12f51170c21f76088d9a6cadccba0cd694e0 | [] | no_license | MartinVocko/Ensemble-data-manipulation | e7bff2ecf4f3fc1eda72591e003a0969021f4540 | 449649379725abdd1a5edb9c31670ca6ce468a49 | refs/heads/master | 2022-02-22T06:57:15.975728 | 2019-10-10T15:01:05 | 2019-10-10T15:01:05 | 116,383,173 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,808 | r | extr_ala_points.R | setwd("~/Plocha/DATA/Aladin_4ext")
folds=list.files("/home/vokounm/Plocha/Data_CHMU/Aladin16")
#fls = dir(pattern = '*grb')
template = 'ncl_convert2nc XXX'
#konverze do nc
for (j in 1:length(folds)){
setwd(file.path("~/Plocha/Data_CHMU/Aladin16",folds[j])) #projizdi jednotlive slozky
fls = dir(pattern = '*grb')
for (i in 1:length(fls)){
exe = paste('ncl_convert2nc', fls[i]) #konvertuje y .grb do .nc
system(exe,wait=FALSE)
}
}
####################################################################################################
#extrakce hodnot
setwd("~/Plocha/DATA/Aladin_4ext")
temp = list.files(pattern="*.nc")
TAB = data.table()
sez = data.table(dir(pattern="nc"))
#sez = sez[1:10000, ]
sez[, V2:= gsub('SURFPREC\\_TOTAL\\_|\\.nc', '', V1)]
sez[, AHEAD:=sapply(strsplit(V2, '_'), function(x)x[1])]
sez[, TIME:=sapply(strsplit(V2, '_'), function(x)x[2])]
sez[, DTM:=as.POSIXct(TIME, format = '%Y%m%d%H')]
y = raster(temp[i], varname = "g3_lat_0")
x = raster(temp[i], varname = "g3_lon_1")
r = raster('SURFPREC_TOTAL_06_2011040100.nc')
y = raster('SURFPREC_TOTAL_06_2011040100.nc', varname = "g3_lat_0")
x = raster('SURFPREC_TOTAL_06_2011040100.nc', varname = "g3_lon_1")
plat = rasterToPoints(y)
plon = rasterToPoints(x)
lonlat = cbind(plon[, 3], plat[, 3])
ll = SpatialPoints(lonlat)
ex = gIntersects(ll, vp, byid = TRUE) #intersect a extract hodnot pro polygon
exx = apply(ex, 1, which)
dtms = sez[, unique(DTM)]
dt = dtms[1]
TAB = list()
for (j in 1:length(dtms)){
sez1 = sez[DTM==dtms[j]]
s = stack(sez[DTM==dtms[j], V1], varname = 'A_PCP_GDS3_HTGL' )
for (i in 1:nrow(sez1)){
r= s[[i]] #raster(temp[i], varname='A_PCP_GDS3_HTGL')
names(r) = 'Total.precipitation'
# y = raster(temp[i], varname = "g3_lat_0")
# x = raster(temp[i], varname = "g3_lon_1")
#r = raster('SURFPREC_TOTAL_06_2011040100.nc')
#y = raster('SURFPREC_TOTAL_06_2011040100.nc', varname = "g3_lat_0")
#x = raster('SURFPREC_TOTAL_06_2011040100.nc', varname = "g3_lon_1")
# plat = rasterToPoints(y)
# plon = rasterToPoints(x)
# lonlat = cbind(plon[, 3], plat[, 3])
#
# ll = SpatialPoints(lonlat)
v = rasterToPoints(r)
dta = SpatialPointsDataFrame(ll, data.frame(v))
# ex = gIntersects(dta, vp, byid = TRUE) #intersect a extract hodnot pro polygon
# exx = apply(ex, 1, which)
#
tab=data.table(DTM = dtms[j], AHEAD = sez1[i,AHEAD], OBL = vp$Id, PR = sapply(exx, function(x) mean(dta[x, ]$Total.precipitation)))
# TAB[[as.character(dtms[j])]] = tab
TAB[[length(TAB)+1]] = tab
res = do.call(rbind, TAB)
#tab[, EID:= 1]
#TAB = rbind(TAB, tab[, .(OBL, PR,EID, SID = 'cf', TIME, AHEAD)])
}}
#res = do.call(rbind, TAB)
saveRDS(TAB, 'aladin4.RDS')
tab
|
460ee6ed43abe30d2a7618193d64a25a7e28cb4f | 08bf784334a65b0d0aa09fd3e3384fdb31cd8864 | /sup_figures/EDF2.R | dd51b46cd70fca42dc3b86d182e3567ae7e3b7ec | [] | no_license | lfresard/blood_rnaseq_rare_disease_paper | 77386fcf64ba3d82c6adbf9fb266b56c5438668e | fa73fe0944cb6a151dad37eed2d245436d1d510f | refs/heads/master | 2021-06-18T22:54:53.452665 | 2019-09-23T19:04:01 | 2019-09-23T19:04:01 | 147,561,692 | 29 | 9 | null | null | null | null | UTF-8 | R | false | false | 1,355 | r | EDF2.R | # =======================================================================================================
# This is a script for generating EDF2: Correction for batch effects - Expression data.
#
# Note that the final figure was generated by using inkscape by combining figures from this script for visualization purposes
#
# =======================================================================================================
#!/bin/R
# Read in input data
edf2c=read.table("EDF2c_data.txt", sep="\t", header=T)
## Panel A
# PCA plot of first 2 PCs uncorrected data. PC1 and PC2 data available
## Panel B
# PCA plot of first 2 PCs corrected data. PC1 and PC2 data available
## Panel C
edf2c_plot <- ggplot(data=edf2c,
aes(x=Var2, y=as.factor(Var1), fill=value)) +
geom_tile(color="white") +
scale_fill_gradient2(low="blue", high="red", mid="white", midpoint=0, limit=c(-1,1), space="Lab", name="Pearson\nCorrelation") +
theme_minimal() +
coord_fixed() + labs(x="Covariates", y="Surrogate variable") +
theme(legend.position="right",
axis.text.x=element_text(angle=45, hjust=1),
axis.text=element_text(size=11),
axis.title=element_text(size=14)) +
annotate("segment", x=-Inf, xend=Inf, y=-Inf, yend=-Inf, size=1) +
annotate("segment", x=-Inf, xend=-Inf, y=-Inf, yend=Inf, size=1)
|
a57d1726700ce36a4f3a75715376f7bda2d1461a | 92e2c5fc8c8360f17cd861e017ae873d4fe76477 | /Project1/plot3.R | dac64f6830b107651680eb8c2f044c0cb221639f | [] | no_license | DesartB/Exploratory-Data-Analysis | f534e2dd02346ec9fbd1630d6ee79fe4e025153f | 09a3fb74c6767312bed2becb5e467208dae31887 | refs/heads/master | 2021-01-22T06:58:37.328692 | 2015-09-11T15:50:42 | 2015-09-11T15:50:42 | 42,309,373 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,380 | r | plot3.R | # This script creates plot 3 of the Course Project 1 of the "Exploratoy Data Analysis" course.
# It creates a PNG file of 480x480 pixels called plot3.png and representing Plot 3.
# Author: Bruno Desart
# Date : 10 September 2015
# Reading the file
data <- read.table("exdata_data_household_power_consumption/household_power_consumption.txt", header = TRUE, sep = ";", na.strings = "?", nrow = 2075260)
# Date and time conversion into Date/Time classes in R
dtchar <- paste(data$Date, data$Time, sep = " ")
dt <- strptime(dtchar, "%d/%m/%Y %H:%M:%S")
# Subset data from the dates 2007-02-01 and 2007-02-02
fromdate <- strptime("2007-02-01 00:00:00", "%Y-%m-%d %H:%M:%S")
todate <- strptime("2007-02-02 23:59:59", "%Y-%m-%d %H:%M:%S")
ok <- (dt >= fromdate & dt < todate)
data <- cbind(data,Date_R = as.Date(data$Date, "%d/%m/%Y"), Time_R = dt, OK = ok)
datas <- data[data$OK == TRUE,]
datas2 <- na.omit(datas)
# Plot 3 saved into the file name plot3.png
png("plot3.png", width = 480, height = 480, units = "px")
with(datas2, plot(Time_R, Sub_metering_1, xlab = "", ylab = "Energy sub metering", type = "l"))
with(subset(datas2), lines(Time_R, Sub_metering_2, col = "red"))
with(subset(datas2), lines(Time_R, Sub_metering_3, col = "blue"))
legend("topright", lty = "solid", col = c("black","red","blue"), legend = c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
dev.off() |
ce17379b406709c6d2afcd39a1fe1615c78f029c | 41b079970f142ed6439a07b896719718b1fb4fff | /man/compute.rina.Rd | 02f3013e06b1fa9d500fb22fd178ba2154f9a6bb | [] | no_license | quantrocket/strategery | 5e015e75d874c6ab16e767861e394350bd825055 | a7b6aee04f3f95b71e44c2c9f3c9a76390c21e52 | refs/heads/master | 2021-01-18T07:36:31.291056 | 2014-06-17T21:54:30 | 2014-06-17T21:54:30 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 118 | rd | compute.rina.Rd | \name{compute.rina}
\alias{compute.rina}
\title{Some Title}
\usage{
compute.rina(R)
}
\description{
Some Title
}
|
35a36e87c61b11929e539d2d7717599b09ccea26 | 33fd29142f89a0b31132081990b34c2e52d84601 | /test/simple/loops.R | c0234c464f8076b45fc04e73ed24f22aa5c01347 | [] | no_license | gpfarina/sym-core-R | 7e18e5aecb238710429bd37fd0f1fdd7c2e4321d | 6cd5a445d1a7c28480640f97a03a388111af0090 | refs/heads/master | 2020-05-02T10:41:01.725872 | 2019-03-27T02:32:52 | 2019-03-27T02:32:52 | 177,904,211 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 46 | r | loops.R |
a <- 0
for (i in 1:5) {
a <- a + i
}
a
|
406a19062dd1ec8c36d76c249104ddf3122d14fd | a2aa09bf660ffcd4f89224a25c38aca4d72ff607 | /code/tblCANC_checks.R | 09806e937114b359fb197e6a6e1a2caced05b133 | [] | no_license | ldriggen-iu/qa-checks-r | 52efe0aef3fae92f3721b4e827497a7797c3ed5b | 225a80f868da636643ca297572a2095e3cc70c12 | refs/heads/master | 2020-12-24T16:05:54.936000 | 2017-02-21T13:48:36 | 2017-02-21T13:48:36 | 52,459,025 | 0 | 0 | null | 2016-02-24T16:57:22 | 2016-02-24T16:57:22 | null | UTF-8 | R | false | false | 4,757 | r | tblCANC_checks.R | #############################################################
#
# Program: tblCANC_checks.R
# Project: IeDEA
#
# PIs: Constantin Yiannoutsos, PhD; Stephany Duda, PhD; Beverly Music, MS
# Programmer: Larry Riggen, MS
# Purpose: Read in IeDEAS standard and write
# data queries
#
# INPUT: "tblCANC.csv"
# OUTPUT:
#
# Notes: As long as the working directory in "setwd" is
# correctly pointing to the location of tblCANC.csv,
# then this code should run smoothly, generating
# a listing of data queries.
#
# Created: 26 February 2016
# Revisions: ???? table was drastically revised IeDEA_DES_Proposed_Additions_2016_Nov_14_V14.docx
# Need to check with Bev and Stephany on how to implement "Location coding System: ICD10, ICD9, other systems,
# e.g. NA-ACCORD-short list (suggest using NA-ACCORD-short list: NA-ACCORD_Clinical_DxICD9_Mapping Update Sept 2014.xls)"
# and "Histology coding system: ICD-O-3, other systems, e.g. NA-ACCORD-short list, None (suggest using NA-ACCORD-short list:
# NA-ACCORD_Cancer_Registry_Dx_Mapping Update Sept 2014.xls)" code verification
#
#############################################################
## NAME OF TABLE FOR WRITING QUERIES
tablename <- "tblCANC"
## NAMES EXPECTED FROM HICDEP+/IeDEAS DES
## Modified for Harmonist - Look for a file containing the expected names.
## If the file of expected names doesn't exist, use the defaults for the table
if (file.exists("./input/Specification_of_required_and_optional_columns.tsv")) {
column_specs<-read.table("./input/Specification_of_required_and_optional_columns.tsv",header = TRUE, sep="\t", stringsAsFactors=FALSE)
# get the specs for tblART
expectednames<-unlist(strsplit(gsub('\"','',column_specs[column_specs$tbl==tablename,]$required_columns),','))
acceptablenames<-c(expectednames,unlist(strsplit(gsub('\"','',column_specs[column_specs$tbl==tablename,]$optional_columns),',')))
}
if (!(file.exists("./input/Specification_of_required_and_optional_columns.tsv"))) {
expectednames <- c("patient","canc_d","loc_code","loc_code_sys","hist_code","hist_code_sys")
acceptablenames <- c(expectednames,"canc_d_a")
}
################### QUERY CHECKING BEGINS HERE ###################
## CHECK FOR EXTRA OR MISSING VARIABLES
extravar(acceptablenames,canc)
missvar(expectednames,canc)
## PRIOR TO CONVERTING DATES, CHECK THAT THE TYPE IS APPROPRIATE
if(exists("canc_d",canc)) {(notdate(canc_d,canc,id=patient))}
## CONVERT DATES USING EXPECTED FORMAT (will force NA if format is incorrect)
if(exists("canc_d",canc)){canc$canc_d <- convertdate(canc_d,canc)}
## CHECK FOR MISSING DATES
if(exists("canc_d",canc)) {missingvalue(canc_d,canc)}
## CHECK FOR DATES OCCURRING IN THE WRONG ORDER
if(exists("basic") && exists("birth_d",basic) && exists("canc_d",canc)){
bascanc <- merge(canc,with(basic,data.frame(patient,birth_d)),all.x=TRUE)
bascanc$birth_d <- convertdate(birth_d,bascanc)
outoforder(birth_d,canc_d,bascanc,table2="tblBAS")
}
if(exists("ltfu") && exists("death_d",ltfu) && exists("canc_d",canc)){
ltfucanc <- merge(canc,with(ltfu,data.frame(patient,death_d)),all.x=TRUE)
ltfucanc$death_d <- convertdate(death_d,ltfucanc)
outoforder(canc_d,death_d,ltfucanc,table2="tblLTFU")
}
if(exists("ltfu") && exists("l_alive_d",ltfu) && exists("canc_d",canc)){
ltfucanc <- merge(canc,with(ltfu,data.frame(patient,l_alive_d)),all.x=TRUE)
ltfucanc$l_alive_d <- convertdate(l_alive_d,ltfucanc)
outoforder(canc_d,l_alive_d,ltfucanc,table2="tblLTFU")
}
## CHECK FOR DATES OCCURRING TOO FAR IN THE FUTURE
if(exists("canc_d",canc)){futuredate(canc_d,canc)}
## CHECK FOR Invalid location codes (only for NA-ACCORD-short list at this time)
if(exists("loc_code_sys",canc) && exists ("loc_code",canc) && canc$loc_code_sys=="NA-ACCORD-short list") {
badcodes(loc_code,c(20,39,9,33,8,1,12,62,64,65,51),canc)
}
## CHECK FOR DUPLICATE PATIENT IDs
for(i in unique(canc$loc_code)[!is.na(unique(canc$loc_code))]){
canc_sub <- canc[canc$loc_code %in% i,]
queryduplicates(patient,canc_sub,date=canc_d,subsettext=paste("&loc_code=",i,sep=""))
}
## ???? need some help from Bev and Stephany on how to code histology.
## CHECK FOR UNEXPECTED CODING
#canc_type_codebook <- read.csv("resource/canc_type_codebook.csv",header=TRUE,stringsAsFactors = FALSE,na.strings="")
#badcodes(canc_d_a,c("<",">","D","M","Y","U"),canc)
#badcodes(canc_type,canc_type_codebook$code,canc)
#badcodes(canc_cert,c(1,2,9),canc)
#badcodes(canc_extent,c(1,2,9),canc)
#badcodes(canc_tx,c(1,2,3,4,5,9),canc)
# Verify patient exists in tblBAS
if (exists("basic")) {missrecord(patient,canc,basic)}
################### QUERY CHECKING ENDS HERE ###################
|
bb44bae1766d1480d99cca3589da92fbf72ea292 | 29585dff702209dd446c0ab52ceea046c58e384e | /secr/R/sighting.chat.R | 6fd7aa68548e4d8d2d06da00a0957de98dfce1c1 | [] | no_license | ingted/R-Examples | 825440ce468ce608c4d73e2af4c0a0213b81c0fe | d0917dbaf698cb8bc0789db0c3ab07453016eab9 | refs/heads/master | 2020-04-14T12:29:22.336088 | 2016-07-21T14:01:14 | 2016-07-21T14:01:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 480 | r | sighting.chat.R | ## 2015-10-27
## not published; needs to copy many more arguments from 'object'
sighting.chat <- function (object, nsim = 1000) {
if (!inherits(object, 'secr'))
stop ("requires fitted secr model")
CH <- object$capthist
if (is.null(Tu(CH)) & is.null(Tm(CH)))
stop("no unmarked or nonID sighting data in model")
secr.fit (CH, mask = object$mask, fixed = object$fixed, details=list(nsim=nsim),
trace = FALSE, start = object)
} |
0e5d0eb2c5e518fd64b6d97a9acb49f92dca3417 | 3f4a537acdb5d3880d1564586af0baac6e53fcaa | /man/dtw.Rd | 2476e1b6a4122eecd080ed2b5cc2ba1f251f5b00 | [] | no_license | J-Moravec/simpleDTW | 9e0453ef770f8e7405abd73ad7027c9d92d83521 | 71b90976c5791ec26911072cfa7a2024f0b93f65 | refs/heads/master | 2021-01-20T06:30:09.886780 | 2017-05-01T00:46:00 | 2017-05-01T00:46:00 | 89,886,723 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 382 | rd | dtw.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dtw.R
\name{dtw}
\alias{dtw}
\title{Performs dynamic time warping}
\usage{
dtw(time_series_1, time_series_2)
}
\arguments{
\item{time_series_1}{first time series}
\item{time_series_2}{second time series}
}
\value{
gods now what
}
\description{
Performs dynamic time warping for two input time series.
}
|
fbb783214caf4687b84823483284117affa8d571 | 1fcf0b4d765d40b3d527473d5f4769ca8b286118 | /cachematrix.R | 65cd9cc140aeadba110925835477865ff1fda007 | [] | no_license | Bass-2015/ProgrammingAssignment2 | 817349e1dfc61972b494c4e0c97e84ab5ae604bc | 1ac30737a1fc0496e984522af5691a3f563955ed | refs/heads/master | 2020-12-13T09:02:36.021703 | 2015-10-04T16:50:16 | 2015-10-04T16:50:16 | 43,266,326 | 0 | 0 | null | 2015-09-27T21:41:10 | 2015-09-27T21:41:08 | null | UTF-8 | R | false | false | 1,505 | r | cachematrix.R | ## Matrix Inversion is usually a costly computation and there may be some
## benefit to caching the Inverse of a Matrix rather than compute it
# repeatedly. Two functions - makeCacheMatrix and cacheSolve - can be used
# to cache the Inverse of a Matrix.
## This function creates a Matrix object that can cache its Inverse.
## As described in the Coursera R-Programming course (offered by JHU), this
## function does 4 tasks, as follows:
## 1. Set the value of the Matrix
## 2. Get the value of the Matrix
## 3. Set the value of the Inverse of the Matrix
## 4. Get the value of the Inverse of the Matrix
makeCacheMatrix <- function(x = matrix()) {
invers <- NULL
set <- function(y) {
x <<- y
invers <<- NULL
}
get <- function() x
setinverse <- function(inverse) invers <<- inverse
getinverse <- function() invers
list(set = set, get = get, setinverse = setinverse, getinverse = getinverse)
}
## This function computes the Inverse of the Matix returned by makeCacheMatrix
## above. If the Inverse has already been calculated (and the Matrix has not)
## changed), the the cacheSolve function would retrieve the Inverse from the
## cache.
cacheSolve <- function(x, ...) {
invers <- x$getinverse()
if(!is.null(invers)) {
message("getting cached data")
return(invers)
}
data <- x$get()
invers <- solve(data)
x$setinverse(invers)
## Return a matrix that is the inverse of 'x'
invers
}
|
cc4381c417474ab32a6eebebe82447a598ed0fb6 | 53851868e25801999033fe8d7c3150b73e7dde65 | /R/aegean/calculatedistancefile.r | 94623efe5bc4391deb1538724a0c86a53d639962 | [] | no_license | xuzhikethinker/PRG | bb7e75d27f9da7611d3c26f10bb083ec69025487 | 25b971f6e65ef13f80d3a56732e4bb6d4502bb55 | refs/heads/master | 2016-09-06T02:27:18.042949 | 2013-03-27T18:17:53 | 2013-03-27T18:17:53 | 9,262,600 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 129 | r | calculatedistancefile.r | calculateDistanceFile<- function(distanceFileName,sitedf){
print("--- calculateDistanceFile routine incomplete",quote=F);
}
|
27c335b4914a1d5980e1e9868274f049c31857a6 | 86d44ee9892df7d76e4d026c52147ac497995d5a | /MouseFunctions.R | 47d80a8a43f8461760b2b099813e0fef0e181fde | [] | no_license | russell-ligon/MouseTracking | 8da964b5e192c2d2f23e754e09e91a7ea4ebfec3 | adc521749e2f976f2b7467064135c7738dc3bada | refs/heads/master | 2021-07-21T02:41:08.608606 | 2020-04-15T17:18:58 | 2020-04-15T17:18:58 | 130,697,836 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 39,352 | r | MouseFunctions.R |
library(gdata)
library(zoo)
library(circular)
####DOWNSAMPLES DATA to 1HTZ
onesecond.downsampler<-function(datatodownsample=profileplainpee,startingnames=profileplainpee$label){
#print(paste("Downsampling ",dir,sep=''))
#frameinfo<-lapply(FullFiles.FullNames,function(x) strsplit(x,"2018-"))
if(length(grep("2019",startingnames[1]))==0){
frameinfo<-strsplit(startingnames,"2019-")
} else {
frameinfo<-strsplit(startingnames,"2018-")
}
alltimes<-lapply(frameinfo,function(x) strsplit(x[2],".csv"))
oknow<-lapply(alltimes,function(x) strsplit(x[[1]],"_"))
oknow2<-unlist(lapply(oknow,function(x) x[[1]][c(2)]))
oknow3<-unlist(lapply(oknow,function(x) x[[1]][c(1)]))
fileinformation<-data.frame(matrix(unlist(alltimes),nrow=length(alltimes),byrow=T));colnames(fileinformation)<-"V1"
fileinformation$day<-oknow3
fileinformation$tosecond<-oknow2
fileinformation$check<-paste(oknow3,oknow2,sep="-")
fileinformation$unique.time<-!duplicated(fileinformation$check)
#deleters<-fileinformation[which(!fileinformation$unique.time),]
downsampled<-datatodownsample[which(fileinformation$unique.time),]
return(downsampled)
}
#downsample just list of names
onesecondgroups<-function(startingnames){
if(length(grep("2019",startingnames[1]))==0){
frameinfo<-strsplit(startingnames,"2018-")
} else {
frameinfo<-strsplit(startingnames,"2019-")
}
alltimes<-lapply(frameinfo,function(x) strsplit(x[2],".csv"))
oknow<-lapply(alltimes,function(x) strsplit(x[[1]],"_"))
oknow2<-unlist(lapply(oknow,function(x) x[[1]][c(2)]))
oknow3<-unlist(lapply(oknow,function(x) x[[1]][c(1)]))
fileinformation<-data.frame(matrix(unlist(alltimes),nrow=length(alltimes),byrow=T));colnames(fileinformation)<-"V1"
fileinformation$day<-oknow3
fileinformation$tosecond<-oknow2
fileinformation$check<-paste(oknow3,oknow2,sep="-")
return(fileinformation$check)
}
#just return downsample indices
onesecond.index<-function(startingnames){
if(length(grep("2019",startingnames[1]))==0){
frameinfo<-strsplit(startingnames,"2018-")
} else {
frameinfo<-strsplit(startingnames,"2019-")
}
alltimes<-lapply(frameinfo,function(x) strsplit(x[2],".csv"))
oknow<-lapply(alltimes,function(x) strsplit(x[[1]],"_"))
oknow2<-unlist(lapply(oknow,function(x) x[[1]][c(2)]))
oknow3<-unlist(lapply(oknow,function(x) x[[1]][c(1)]))
fileinformation<-data.frame(matrix(unlist(alltimes),nrow=length(alltimes),byrow=T));colnames(fileinformation)<-"V1"
fileinformation$day<-oknow3
fileinformation$tosecond<-oknow2
fileinformation$check<-paste(oknow3,oknow2,sep="-")
fileinformation$unique.time<-!duplicated(fileinformation$check)
return(which(fileinformation$unique.time))
}
#takes dataframe (use keeplongA13, keeplongA24), and trial/day specific roi information to add the roi where pee happens
#RETURNS [[1]] dataframe with ROI-identified pee coords & [[2]] dataframe with info on errors
peeroi<-function(dfwp,inforoi){
library(sp)
dfwp$peeroi<-NA
dfwp$peeroiFULL<-NA
dfhold<-dfwp
dfwp2<-dfwp[which(!is.na(dfwp$mTemp)),]
replacementrowvalue<-which(!is.na(dfwp$mTemp))
#pulls just the roi info for the correct camera
if(length(grep("a1",colnames(dfwp)))>1){
inforoi2<-inforoi[which(inforoi$arena=="a1" | inforoi$arena=="a3"),]
}
if(length(grep("a2",colnames(dfwp)))>1){
inforoi2<-inforoi[which(inforoi$arena=="a2" | inforoi$arena=="a4"),]
}
if(nrow(dfwp2)==0){
dfhold<-dfhold
} else {
errorflag<-1
for(r in 1:nrow(dfwp2)){
ne<-0
for(s in 1:nrow(inforoi2)){
e<-point.in.polygon(dfwp2$true.x[r],dfwp2$true.y[r],pol.x=c(inforoi2[s,c(6,8,10,12)]),pol.y=c(inforoi2[s,c(7,9,11,13)]))
#e2<-ifelse(e==1 & dfwp2[r,"quad"]==inforoi[s,"arena"],1,0)
ne<-c(ne,e)
}
ne<-ne[-1] #drop first entry, which was created as 0 before s loop
roispossible<-inforoi2$ROI_name[ne==1]
roispossible<-unique(roispossible)
fullpossiblerois<-paste(as.character(roispossible),collapse=".")
if(length(roispossible)>1){#possible for a) waters, which are also on walls, and water is given precedence
# and b) corners, and corners given precedence
#print(as.character(roispossible))
if(length(roispossible[grep("water",roispossible)])>0){
roispossible<-roispossible[grep("water",roispossible)]
}
if(length(roispossible[grep("barrier",roispossible)])==2){
roinam<-as.character(roispossible[1])
roispossible<-paste0(strsplit(roinam,"_")[[1]][1],"_central_corner")
}
if(length(roispossible[grep("corner",roispossible)])>0){
roispossible<-roispossible[grep("corner",roispossible)]
}
if(((length(roispossible[grep("barrier",roispossible)])>0)+(length(roispossible[grep("wall",roispossible)])))==2){
roispossible<-roispossible[grep("barrier",roispossible)]
}
}
#if multiple 'corner' rois possible, use the one that matches the quad identified by me, earlier in the code
if(length(roispossible[grep("corner",roispossible)])>1){
roispossible<-roispossible[grep(dfwp2[r,"quad"],roispossible)]
#if the above trim doesn't work, length will still be >1
#IN which case we take the lead quad
if(length(roispossible)>1){
dropindex<-which(roispossible==roispossible[grep(paste0("_",dfwp2[r,"quad"]),roispossible)])
roispossible<-roispossible[-dropindex]
}
}
if(length(roispossible)==0){
roispossible<-"generalmid"
}
if(length(fullpossiblerois)==0){
fullpossiblerois<-"NOMATCH"
}
keepnewrow<-dfwp2[r,]
keepnewrow$peeroi<-as.character(roispossible)
keepnewrow$peeroiFULL<-fullpossiblerois
replacindex<-replacementrowvalue[r]
dfhold[replacindex,]<-keepnewrow
if(r==1){
keepnewframe<-keepnewrow
} else {
keepnewframe<-rbind(keepnewframe,keepnewrow)
}
#capture roi and quad mismatch instances, but ignore instances where the inferred location is generalmid (impossible to compare)
if(roispossible!="generalmid"){
if((dfwp2[r,"quad"]!=strsplit(as.character(roispossible),"_")[[1]][1])){
errorstring<-data.frame(c(inforoi2[1,c("trial","camera","day")],as.character(dfwp2[r,"quad"]),as.character(roispossible),as.character(fullpossiblerois)))
colnames(errorstring)[c(4:6)]<-c("ralQuad","roiID","fullmatches")
#print(r)
if(errorflag==1){
allerrors<-errorstring
errorflag<-errorflag+1
} else {
allerrors<-rbind(allerrors,errorstring)
}
}#close if testing whether the first quad component of the roi name is the same as the RAL identified pee quad
}#close if testing whether 'generalmid'
}
dfhold$peeroi<-factor(dfhold$peeroi)
#summary(dfhold)
}
if(!exists("allerrors")){
errorstring<-data.frame(c(inforoi[1,c("trial","camera","day")],as.character(dfwp2[1,"quad"]),as.character("moop")))
colnames(errorstring)[c(4:5)]<-c("ralQuad","roiID")
allerrors<-errorstring[0,]
}
wantedlist<-list(dfhold,allerrors)
return(wantedlist)
}
######renametherois
# function that takes old rois and converts them, along with relevant infor from 'thistrial' to create new universal ROI labels
renametherois<-function(v.of.rois,thistrial,sex.of.focal){
# v.of.rois = vector of rois in original format, eg. a4_a2_barrier
# thistrial = subset of FullMegaInfo for this trial
# sex.of.focal = simply the sex (f or m) of the focal mouse in a given quad
v2<-gsub("-","_",v.of.rois)
hooke<-strsplit(as.character(v2),"_")
neighbor.vec<-unlist(lapply(hooke,function(x){
if(length(x)==3){
neighbor<-x[2]
} else {
neighbor<-""
}}))
roitype.vec<-unlist(lapply(hooke,function(x){x[length(x)] }))
neighbor.sex<-unlist(lapply(neighbor.vec,function(x){
if(x!=""){
if(any(thistrial$quad==x)){
bing<-as.character(thistrial[which(thistrial$quad==x),"sex"])
} else {
bing<-""
}
} else {
bing<-""
}
return(bing)
}
)
)
sexcompare<-data.frame(cbind(as.character(sex.of.focal),as.character(neighbor.sex)))
colnames(sexcompare)<-c("focalsex","compsex")
sexcompare$focalsex<-as.character(sexcompare$focalsex)
sexcompare$compsex<-as.character(sexcompare$compsex)
sexcompare$sexcat<-ifelse(sexcompare$compsex=='','',
ifelse(sexcompare$focalsex==sexcompare$compsex,"SS","OS"))
neightbor2<-neighbor.vec
neightbor2[!is.na(match(neighbor.vec,c("a1","a2","a3","a4")))]<-""
addmodifier<-neightbor2
peeishappeninghere<-paste0(sexcompare$sexcat,addmodifier,roitype.vec)
return(peeishappeninghere)
}
#Barplotting function, with error bars
barplotwitherrorbars<-function(dataframe,valuecolumn,groupingvariablecolumn,secondarygroupingvariable=NA,plottingcolors,cexName=1.3){
library(dplyr)
#dataframe = dataframe to summarize, multiple observations = rows, variables = columns
#valuecolumn = name of column from a dataframe that you want plotted, e.g. "height"
#groupingvariablecolumn (e.g. "sex")
#plottingcolors = vector of color values corresponding to alphabetic ording of grouping variables
#secondarygroupingvariable = if you want a double split (e.g. Sex X Strains), this is where you define that variable
if(!exists("cexName")){cexName=2}
if(!is.na(secondarygroupingvariable)){
possiblecombos<-unique(expand.grid(dataframe[,c(groupingvariablecolumn,secondarygroupingvariable)]))
flag<-1
ns<-list()
sems<-list()
means<-list()
varnams<-list()
types.primary<-unique(dataframe[,groupingvariablecolumn])
types.secondary<-unique(dataframe[,secondarygroupingvariable])
for(r in 1:length(types.secondary)){
secondaryval<-types.secondary[r]
subseconddata<-dataframe[which(dataframe[,secondarygroupingvariable]==secondaryval),]
for(s in 1:length(types.primary)){
primaryval<-types.primary[s]
subprim<-subseconddata[which(subseconddata[,groupingvariablecolumn]==primaryval),]
ns[[flag]]<-nrow(subprim)
varnams[[flag]]<-paste(possiblecombos[flag,1],possiblecombos[flag,2],sep=".")
calcvalue<-na.omit(subprim[,valuecolumn])
#computation of the standard error of the mean
sems[[flag]]<-sd(calcvalue)/sqrt(length(calcvalue))
means[[flag]]<-mean(calcvalue)
flag<-flag+1
}
}
standardErrors <- unlist(sems)
means<-unlist(means)
names<-unlist(varnams)
samplesizes<-unlist(ns)
plotTop <- max(means+standardErrors*2)
barCenters <- barplot(means, names.arg=names, col=plottingcolors, las=1, ylim=c(0,plotTop),ylab=valuecolumn,cex.names = cexName,las=2)
segments(barCenters, means-standardErrors*2, barCenters, means+standardErrors*2, lwd=2)
text(x = barCenters+.2, y= means-means*.25, label = samplesizes, pos = 3, cex = 0.8, col = "blue")
} else {
#default, bar plot with single grouping variable and SEM bars
types<-dataframe[,groupingvariablecolumn]
ns<-list()
sems<-list()
means<-list()
varnams<-list()
for(g in 1:nlevels(types)){
g2<-levels(types)[g]
varnams[[g]]<-g2
calcvalue<-na.omit(dataframe[which(dataframe[,groupingvariablecolumn]==g2),valuecolumn])
#computation of the standard error of the mean
sems[[g]]<-sd(calcvalue)/sqrt(length(calcvalue))
means[[g]]<-mean(calcvalue)
ns[[g]]<-length(calcvalue)
}
standardErrors <- unlist(sems)
means<-unlist(means)
names<-unlist(varnams)
samplesizes<-unlist(ns)
plotTop <- max(means+standardErrors*2)
barCenters <- barplot(means, names.arg=names, col=plottingcolors, las=1, ylim=c(0,plotTop),ylab=valuecolumn,cex.names = cexName)
segments(barCenters, means-standardErrors*2, barCenters, means+standardErrors*2, lwd=2)
text(x = barCenters+.2, y= means-means*.25, label = samplesizes, pos = 3, cex = 0.8, col = "blue")
}
}
# CreateCompositeList -----------------------------------------------------
#Loops through each folder in 'directories'
# pulls files corresponding to tracking informaiton (location.names)
# and ROI information (Roifiles)
# then names the information from these files and amalgamates into a data.frame called CombinedInfo
# then puts that dataframe into the list AllFoldersList at position zz
CreateCompositeList<-function(directories,pix.cm=4.2924,AssociatingDistance.cm=10,
xytrackpattern="fixed.csv",roipattern="-region",roundxy=TRUE){
#directories should include all folders/subfolder with tracking data
#pix.cm defines the pixels/cm correction
#AssociatingDistance.cm Defines cm distance that corresponding to an 'association'
AllFoldersList<-list() #Creates empty list which will get filled iteratively with loop
flag<-1 #sets flag, which will only increase when looping through folders containing the "correct" kind of data
for (zz in 1:length(directories)){
FolderInfo1<-directories[zz]#pull full
FolderInfo<-strsplit(FolderInfo1,"/")[[1]][length(strsplit(FolderInfo1,"/")[[1]])]
FolderInfo<-paste(strsplit(FolderInfo1,"/")[[1]][(length(strsplit(FolderInfo1,"/")[[1]])-1)],FolderInfo,sep='.')
location.names<-list.files(directories[zz],full.names=TRUE,pattern=xytrackpattern)#change pattern to only pull location csvs
if(length(location.names)>0){ #Only if in the right kind of folder, i.e. containing ...fixed.csv files, run the rest, otherwise, skip
Roifiles<-list.files(directories[zz],full.names = TRUE,pattern=roipattern)#refine to pull roi csvs
Roifilenames<-list.files(directories[zz],full.names = FALSE,pattern=roipattern)#refine to pull roi csvs
individuals<-length(location.names)
for(Caleb in 1:individuals){
Location<-read.csv(location.names[Caleb])
colnames(Location)[2:3]<-paste(colnames(Location)[2:3],".A",Caleb,sep='')
Location[,3]<-(Location[,3]-1080)*(-1)
if(Caleb>1){
CombinedInfo<-merge(CombinedInfo,Location,by="position")
} else {
CombinedInfo<-Location
}
}
#CombinedInfo[,c(2:ncol(CombinedInfo))]<-CombinedInfo[,c(2:ncol(CombinedInfo))]/(pix.cm)
if(roundxy==TRUE){
CombinedInfo<-round(CombinedInfo)
}
if(individuals>1){
CombinedInfo<-pairwise.Distances(CombinedInfo,individuals)#Custom function located in MouseFunctions.R
ncomparisons<-2*(individuals-1) #Calculates number of unique dyadic comparisons based on the n of individuals
dister<-(individuals*2+2)
CombinedInfo[,c(dister:(dister+ncomparisons-1))]<-(CombinedInfo[,c(dister:(dister+ncomparisons-1))])/(pix.cm)
CombinedInfo<-Associate.Identifier(CombinedInfo,AssociatingDistance.cm)#Custom function located in MouseFunctions.R
}
if(length(Roifilenames)>1){
for(Caitlin in 1:length(Roifiles)){
ROI<-read.csv(Roifiles[Caitlin])
roiname<-Roifilenames[Caitlin]
colnames(ROI)[2]<-roiname
CombinedInfo<-merge(CombinedInfo,ROI, by="position")
}
}
AllFoldersList[[flag]]<-CombinedInfo #puts CombinedInfo dataframe into AllFoldersList at position zz
names(AllFoldersList)[[flag]]<-FolderInfo #applies name of folder to list element
flag<-flag+1
}
}
return(AllFoldersList)
}
# BehaviorCategorizer -----------------------------------------------------
#Big =
# approach.angle = acceptable width of angle for approaching another mouse
# leave.angle = acceptable width of angle for leaving another mouse
# integratesteps = n of frames over which to calculate rolling averages for angles and delta pairwise distances for calculating approaching/leaving
# walk.speed = cm/sec threshold, above which = running
BehaviorCategorizer<-function(Big,approach.angle=90,leave.angle=90,integratesteps=10,n.inds=4,walk.speed=10,stationary.noise=.5){
orderedcomparisons<-2*(n.inds-1)*2
OrderedMicePairs<-colnames(Big)[grep(":",colnames(Big))]
BigSub<-Big[,c(grep(":",colnames(Big)),grep("Delta",colnames(Big)))]
Bog<-rollapply(BigSub,width=integratesteps,mean,na.rm=TRUE,partial=TRUE,align="left")
MouseDisp<-Big[,grep("step",colnames(Big))]
RunMouse<-rollapply(MouseDisp,width=integratesteps,sum,na.rm=TRUE,partial=TRUE,align="left")
colnames(RunMouse)<-gsub("step","cm/s",colnames(RunMouse))
#left alignment means that the locations we identify for behaviors (e.g. approaches)
# will corespond to the 'start' of the behavior
movingbehavior<-apply(RunMouse,2,function(x) ifelse(x<stationary.noise,"stationary",
ifelse(x>walk.speed,"running","walking")))
colnames(movingbehavior)<-gsub("cm/s","movement",colnames(movingbehavior))
#Approaching classifier
top.angle<-approach.angle/2
bottom.angle<-(approach.angle/2)*-1
ApproachAngleClassifier<-function(angles){
angleapproach<-ifelse(angles>bottom.angle & angles<top.angle,1,0)
}
#Leaving classifier
Ltop.angle<-180-(leave.angle/2)
Lbottom.angle<-(-180)+(leave.angle/2)
LeaveAngleClassifier<-function(angles){
anglealeave<-ifelse(angles>Ltop.angle | angles<Lbottom.angle,1,0)
}
Approaching<-apply(Bog[,c(grep(":",colnames(Bog)))],2,ApproachAngleClassifier)
colnames(Approaching)<-gsub("-angle","pointedtoward",colnames(Approaching))
Leaving<-apply(Bog[,c(grep(":",colnames(Bog)))],2,LeaveAngleClassifier)
colnames(Leaving)<-gsub("-angle","pointedaway",colnames(Leaving))
socialmovements<-cbind(Approaching, Leaving,Bog[,c(grep(":",colnames(Bog)))],Bog[,c(grep("Delta",colnames(Bog)))])
oc<-strsplit(OrderedMicePairs,"-")
oc2<-lapply(oc,function(x) strsplit(x[1],":"))
oc3<-lapply(oc2,function(x) unlist(x))
for(rory in 1:orderedcomparisons){
plusr<-rory+orderedcomparisons
approachingthismouse<-ifelse(socialmovements[,rory]==1 &
socialmovements[,c(grepl(oc3[[rory]][1],colnames(socialmovements))&
grepl(oc3[[rory]][2],colnames(socialmovements))&
grepl("Delta",colnames(socialmovements)))]<0 &
movingbehavior[,grepl(oc3[[rory]][1],colnames(movingbehavior))]!="stationary",1,0)
approachingthismouse<-as.data.frame(approachingthismouse,ncol=1)
colnames(approachingthismouse)<- gsub(":",".app.",gsub("-angle","",OrderedMicePairs))[rory]
if(rory<2){
approachingbehavior<-approachingthismouse
} else {
approachingbehavior<-cbind(approachingbehavior,approachingthismouse)
}
leavingthismouse<-ifelse(socialmovements[,plusr]==1 & socialmovements[,c(grepl(oc3[[rory]][1],colnames(socialmovements))&
grepl(oc3[[rory]][2],colnames(socialmovements))&
grepl("Delta",colnames(socialmovements)))]>0 &
movingbehavior[,grepl(oc3[[rory]][1],colnames(movingbehavior))]!="stationary",1,0)
leavingthismouse<-as.data.frame(leavingthismouse,ncol=1)
colnames(leavingthismouse)<- gsub(":",".lvs.",gsub("-angle","",OrderedMicePairs))[rory]
if(rory<2){
leavingbehavior<-leavingthismouse
} else {
leavingbehavior<-cbind(leavingbehavior,leavingthismouse)
}
}
oknow<-cbind(RunMouse,movingbehavior,approachingbehavior,leavingbehavior)
return(oknow)
}
# Mouse2MouseTrajectories -------------------------------------------------
# Function that takes two columns, per individual, of x/y coordinates
# (e.g. 4 individuals, n.inds=4, should correspond to 8 columns (xyvalues.allpairs))
# Returns pairwise distances, orientations at each time point
# (i.e. whether a mouse is moving towards another) and velocities
Mouse2Mouse<-function(xyvalues.allpairs,pairwisedistances,
n.inds=4,shrinksize=.754){
#xyvalues.allpairs = subset of full dataframe containing original all x,y coordinates for all
# n.inds = number of individual mice
ncomparisons<-2*(n.inds-1)
flag<-1
colorlist<-c("gray","blue","red","green")
for(d in 1:n.inds){ #Loops through all individuals, to compare trajectory relative to others location
a1<-(d-1)*2+1
b1<-a1+1
Z1=xyvalues.allpairs[,a1]+1i*xyvalues.allpairs[,b1] #Pulls locations for first individual
# RealZ <-zoo(Z1)
# #Location Vector
# Z1<-(RealZ)
# step vectors
dZ1 <- (diff(Z1))
# orientation of each step
distanctrav<-Mod(dZ1)
Z1.Phi <- Arg(dZ1)
Compass.D.M1<-((Z1.Phi * 180)/pi)
Compass.D.M1<-ifelse(Compass.D.M1>0,Compass.D.M1,(360+(Compass.D.M1)))
circle.Compass.D.M1<-circular(Compass.D.M1, type = "angles",
units = "degrees",
template = "geographics",#c("none", "geographics", "clock12", "clock24"),
#modulo = c("asis", "2pi", "pi"),
zero = 0) #rotation = c("counter", "clock"), names)
nom1<-gsub(".x","",colnames(xyvalues.allpairs)[a1])
# rose.diag(circle.Compass.D.M1, bins = 16, col = colorlist[d],
# prop = 2, shrink=shrinksize,
# main = nom1,rotation="clock")
for(e in 1:n.inds){ #Loops through all others
if(d!=e){
a2<-(e-1)*2+1
b2<-a2+1
comparisonheader<-paste(gsub(".x","",colnames(xyvalues.allpairs)[a1]),
gsub(".x","",colnames(xyvalues.allpairs)[a2]),sep=':')
Z2=xyvalues.allpairs[,a2]+1i*xyvalues.allpairs[,b2] #Pulls locations for first individual
#interleave movement and difference vectors
interleaveddata<-matrix(rbind(t(Z1), t(Z2)), ncol=1, byrow=TRUE)
# step vectors
dZ.1.2 <- diff(interleaveddata)#Calculates distance between 1 & 2
# orientation of each step
Z1Z2.Phi <- Arg(dZ.1.2)#orientation of vector connecting mouse 1 and mouse 2
Z1Z2.Phi <- Z1Z2.Phi[seq(1,nrow(Z1Z2.Phi),2),]
Z1Z2.Phi <- Z1Z2.Phi[c(1:(length(Z1Z2.Phi)-1))]
Compass.D.M1toM2<-((Z1Z2.Phi * 180)/pi)#Calculates orientation between M1 and M2, in angles
Compass.D.M1toM2<-ifelse(Compass.D.M1toM2>0,Compass.D.M1toM2,(360+(Compass.D.M1toM2)))
# AngularDifference<-atan2(sin(Z1Z2.Phi-Z1.Phi), cos(Z1Z2.Phi-Z1.Phi))
# DiffCompass.D.M1toM2<-(90 - ((AngularDifference) * 180)/pi)#Calculates orientation between M1 and M2, in angles
# DiffCompass.D.M1toM2<-ifelse(DiffCompass.D.M1toM2>0,DiffCompass.D.M1toM2,(360+(DiffCompass.D.M1toM2)))#makes angles go 0-360
#
#Two-step function to calculate difference between two angles (which might span 0), and for
# which negative values are returned for one direction of turn, and positive for another
a = Compass.D.M1toM2 - Compass.D.M1
a = ifelse(a>180,a-360,
ifelse(a<(-180),a+360,a))
#up, down, right, left,upright,upleft,downright,downleft
# (defines pairwise angle for plotting quadrant to quadrant angles with relevant orientation)
anglecomparisons<-c(pi/2,1.5*pi,0,pi,
pi/4,.75*pi,pi+.75*pi,pi+pi/4)
#Depending on which quadrants are being compared, this nested ifelse will assign zervalues corresponding to the
# different elements in the anglecomparisons vector
zervalue<-ifelse(comparisonheader=="A3:A1" || comparisonheader=="A4:A2",anglecomparisons[1],
ifelse(comparisonheader=="A1:A3" || comparisonheader=="A2:A4",anglecomparisons[2],
ifelse(comparisonheader=="A1:A2" || comparisonheader=="A3:A4",anglecomparisons[3],
ifelse(comparisonheader=="A4:A3" || comparisonheader=="A2:A1", anglecomparisons[4],
ifelse(comparisonheader=="A3:A2",anglecomparisons[5],
ifelse(comparisonheader=="A4:A1",anglecomparisons[6],
ifelse(comparisonheader=="A1:A4",anglecomparisons[7],anglecomparisons[8])))))))
#Turns angle data into a 'circular' object, for circular plotting
circle.Compass.M1.M2<-circular(a, type = "angles",
units = "degrees",
template = "none",#c("none", "geographics", "clock12", "clock24"),
#modulo = c("asis", "2pi", "pi"),
zero = zervalue) #rotation = c("counter", "clock"), names)
rose.diag(circle.Compass.M1.M2, bins = 16, col = colorlist[e], prop = 2, shrink=shrinksize,
main =comparisonheader,col.lab=colorlist[d] ,rotation="clock")
#From Almelda et al. 2010, Indices of movement behaviour: conceptual background, effects of scale and location errors.
# "The Straightness or linearity index, ST, (BATSCHELET 1981),
# is simply the net displacement distance (the Euclidian distance
# between the start and the final point), divided by the total length of the movement."
#BATSCHELET, E. 1981. Circular Statistics in Biology. London, Academic Press.
if(flag>1){
allcomparisonangles<-cbind(allcomparisonangles,a)
colnames(allcomparisonangles)[ncol(allcomparisonangles)]<-paste(comparisonheader,"angle",sep='-')
} else {
allcomparisonangles<-as.data.frame(a,ncol=1)
colnames(allcomparisonangles)<-paste(comparisonheader,"angle",sep='-')
}
#############################
# TotDistance<-sum(S) #Sum of step lengths
# AvgSpeed<-mean(V) #Mean of velocities
# MaxSpeed<-max(V) #Maximum velocity
# StraightnessIndex<-(StrtStop/TotDistance) #Ranges from 0-1 (with 1 being a straight line)
# AvgOrientation<-mean(Phi)
# AvgTurn<-mean(Theta)
flag<-flag+1
} else {
rose.diag(circle.Compass.D.M1, bins = 16, col = colorlist[d],
prop = 2, shrink=shrinksize,
main = nom1,rotation="clock")
}
}
if(d>1){
maintraveldis<-cbind(maintraveldis,distanctrav)
colnames(maintraveldis)[ncol(maintraveldis)]<-paste(nom1,"step",sep='')
} else {
maintraveldis<-as.data.frame(distanctrav,ncol=1)
colnames(maintraveldis)<-paste(nom1,"step",sep='')
}
if(d>1){
maintravelangles<-cbind(maintravelangles,Compass.D.M1)
colnames(maintravelangles)[ncol(maintravelangles)]<-paste(nom1,"angles",sep='-')
} else {
maintravelangles<-as.data.frame(Compass.D.M1,ncol=1)
colnames(maintravelangles)<-paste(nom1,"angles",sep='-')
}
}
allcomparisons<-cbind(maintraveldis,maintravelangles,allcomparisonangles)
#Add on data.frame construction, making new columns corresponding
# to change (Delta) in pairwise distances (to be used when using movement data for behavioral categorization)
G<-apply(pairwisedistances,2,function(x) c(0,diff(x)))
colnames(G)<-paste("Delta.",colnames(pairwisedistances),sep='')
G<-G[-1,]
allcomparisons<-cbind(allcomparisons,G)
return(allcomparisons)
}
# pairwise.Distances ------------------------------------------------------
#Function to calculate pairwise distances from CombinedInfo dataframes
# *dataframeCombinedInfo* should be a df (nrows corresponding to frames), with labelled x and y coordinates
# (e.g. "x.a1" "y.a1" "x.a2" "y.a2" "x.a3" "y.a3" "x.a4" "y.a4" )
# *inds* should correspond to the number of individuals you are comparing
pairwise.Distances<-function(dataframeCombinedInfo,inds=4,pix.cm=4.2924){
if(inds>1){
individual.names<-gsub(".x","",colnames(dataframeCombinedInfo)[seq(2,inds*2,2)])#pulls subset of column names, corresponding to the # of inds, and uses gsub to substitute 'blank' for x.
ncomparisons<-2*(inds-1) #Calculates number of unique dyadic comparisons based on the n of individuals
for(i in 1:(inds-1)){
first<-dataframeCombinedInfo[,grepl(individual.names[i],colnames(dataframeCombinedInfo))]
firstID<-individual.names[i]
for(j in (i+1):inds){
second<-dataframeCombinedInfo[,grepl(individual.names[j],colnames(dataframeCombinedInfo))]
distances<-sqrt((first[,1]-second[,1])^2+(first[,2]-second[,2])^2)
distances<-distances/pix.cm
secondID<-individual.names[j]
comparisonname<-paste(firstID,secondID,"dist",sep='')
dataframeCombinedInfo<-cbind(dataframeCombinedInfo,distances)
colnames(dataframeCombinedInfo)[ncol(dataframeCombinedInfo)]<-comparisonname
}
}
return(dataframeCombinedInfo) #returns original dataframe with additional columns corresponding to dyadic distances at each timepoint
}
}
# Associate.Identifier ----------------------------------------------------
#Using the *dataframeCombinedInfo*, which should contain pairwise distance columns with "dist" in the column names
# cheks for associations based on whether the distances are smaller than 'AssociatingDistance' threshold
Associate.Identifier<-function(dataframeCombinedInfo,AssociatingDistance){
associationspossible<-colnames(dataframeCombinedInfo[,grepl("dist",colnames(dataframeCombinedInfo))])
for(r in 1:length(associationspossible)){
ass<-associationspossible[r]
ass.lab<-gsub("dist","",ass)
xyz<-ifelse(dataframeCombinedInfo[,ass]<AssociatingDistance,1,0)
dataframeCombinedInfo<-cbind(dataframeCombinedInfo,xyz)
colnames(dataframeCombinedInfo)[ncol(dataframeCombinedInfo)]<-ass.lab
}
#CombinedInfo$totalSocial<-apply(CombinedInfo[, c(16:21)], 1, function(x) toString(na.omit(x)))
return(dataframeCombinedInfo)
}
####
# Takes HugeMouse dataframe (with colnames id'd below),
# where 'ds' represents dailysecond, where each ds is repeated 10x
# and creates a downsampled version where each set of measures collected over each
# of the 10 frames/second are used to generate metrics at 1 Htz
#
# For categorical movement states (running, walking, stationary), takes the most freq state
# For velocity (cm/s), takes average over 10 frames/sec
# For binary (0/1) variables, if it happens at all, take it!
###################################################################
# c("ds", "frame", "a1.x", "a1.y", "a2.x", "a2.y", "a3.x", "a3.y",
# "a4.x", "a4.y", "a1a2dist", "a1a3dist", "a1a4dist", "a2a3dist",
# "a2a4dist", "a3a4dist", "a1a2", "a1a3", "a1a4", "a2a3", "a2a4",
# "a3a4", "a1step", "a2step", "a3step", "a4step", "a1-angles",
# "a2-angles", "a3-angles", "a4-angles", "a1:a2-angle", "a1:a3-angle",
# "a1:a4-angle", "a2:a1-angle", "a2:a3-angle", "a2:a4-angle", "a3:a1-angle",
# "a3:a2-angle", "a3:a4-angle", "a4:a1-angle", "a4:a2-angle", "a4:a3-angle",
# "Delta.a1a2dist", "Delta.a1a3dist", "Delta.a1a4dist", "Delta.a2a3dist",
# "Delta.a2a4dist", "Delta.a3a4dist", "a1cm/s", "a2cm/s", "a3cm/s",
# "a4cm/s", "a1movement", "a2movement", "a3movement", "a4movement",
# "a1.app.a2", "a1.app.a3", "a1.app.a4", "a2.app.a1", "a2.app.a3",
# "a2.app.a4", "a3.app.a1", "a3.app.a2", "a3.app.a4", "a4.app.a1",
# "a4.app.a2", "a4.app.a3", "a1.lvs.a2", "a1.lvs.a3", "a1.lvs.a4",
# "a2.lvs.a1", "a2.lvs.a3", "a2.lvs.a4", "a3.lvs.a1", "a3.lvs.a2",
# "a3.lvs.a4", "a4.lvs.a1", "a4.lvs.a2", "a4.lvs.a3")
HugeMouseDownSampler<-function(HugeMouse){
numericcolumnstoavg<-colnames(HugeMouse)[c(grep("cm/s",colnames(HugeMouse)))]
numericcolumnstomax<-colnames(HugeMouse)[c(which(colnames(HugeMouse) %in% c("a1a2","a1a3","a1a4","a2a3","a2a4","a3a4")),
grep(".app.",colnames(HugeMouse)),grep("lvs",colnames(HugeMouse)))]
movcols<-colnames(HugeMouse)[grep("movement",colnames(HugeMouse))]
for(nca in 1:length(numericcolumnstoavg)){
avgspd<-HugeMouse %>%
group_by(ds) %>%
summarise_at(numericcolumnstoavg[nca], mean, na.rm = TRUE)
#############################
if(nca==1){
howmove<-avgspd
} else {
howmove<-merge(howmove,avgspd,by="ds")
}
}
for(ncm in 1:length(numericcolumnstomax)){
maxVv<-HugeMouse %>%
group_by(ds) %>%
summarise_at(numericcolumnstomax[ncm], max, na.rm = TRUE)
#############################
howmove<-merge(howmove,maxVv,by="ds")
}
for(mc in 1:4){
howmove[,movcols[mc]]<-ifelse(howmove[,numericcolumnstoavg[mc]]>10,"running",
ifelse(howmove[,numericcolumnstoavg[mc]]>0.5,"walking","stationary"))
}
howmove<-howmove[,c(1,36:39,2:35)]
howmove<-myFun(howmove)
howmove[,c(2:5)]<-lapply(howmove[,c(2:5)],function(x){factor(x)})
return(howmove)
}
#-----------------------------------------------------------------------------------
# Function that takes a vector of movement 'states' ('running','walking','stationary')
# and returns the single value that is most represented
# (or, if running is tied for most represented, takes 'running' as the state), else it looks at walking
# and last, if neither running nor walking is top (or tied for top), then the state is
# 'stationary'
run.walk.station<-function(t){
t2<-data.frame(table(t))
rn<-t2[which(t2$t=="running"),'Freq']
#if most prevalent beh state is running, or tied for most prevalent, then it's running
if(max(t2$Freq)==rn){
bstate<-'running'
} else {
wk<-t2[which(t2$t=="walking"),'Freq']
#if most prevalent beh state is walking, then it's walking
if(max(t2$Freq)==wk){
bstate<-'walking'
} else {
bstate<-'stationary'
}
}
return(bstate)
}
#############
myFun <- function(data) {
ListCols <- sapply(data, is.list)
cbind(data[!ListCols], t(apply(data[ListCols], 1, unlist)))
}#FUNCTION TO UNLIST VARIALBES IN DF
########################################################
# add.alpha ---------------------------------------------------------------
## Add an alpha value to a colour
add.alpha <- function(col, alpha=1){
if(missing(col))
stop("Please provide a vector of colours.")
apply(sapply(col, col2rgb)/255, 2,
function(x)
rgb(x[1], x[2], x[3], alpha=alpha))
}
# characteriseTrajectory --------------------------------------------------
# Define a function which calculates some statistics
# of interest for a single trajectory
characteriseTrajectory <- function(trj) {
# Measures of speed
derivs <- TrajDerivatives(trj)
mean_speed <- mean(derivs$speed)
sd_speed <- sd(derivs$speed)
# Measures of straightness
sinuosity <- TrajSinuosity(trj)
resampled <- TrajRediscretize(trj, .001)
Emax <- TrajEmax(resampled)
# Periodicity
corr <- TrajDirectionAutocorrelations(resampled, 60)
first_min <- TrajDAFindFirstMinimum(corr)
# Return a list with all of the statistics for this trajectory
list(mean_speed = mean_speed,
sd_speed = sd_speed,
sinuosity = sinuosity,
Emax = Emax,
min_deltaS = first_min[1],
min_C = first_min[2]
)
}
# chart.Correlation.RUSTY --------------------------------------------------
#Modified from PerformanceAnalytics::chart.Correlation
chart.Correlation.RUSTY<-function (R, histogram = TRUE, method = c("pearson", "kendall",
"spearman"), dotcolors, ppp, logger, ...)
{
x = checkData(R, method = "matrix")
if (missing(method))
method = method[1]
cormeth <- method
panel.cor <- function(x, y, digits = 2, prefix = "",
use = "pairwise.complete.obs", method = cormeth,
cex.cor, ...) {
usr <- par("usr")
on.exit(par(usr))
par(usr = c(0, 1, 0, 1))
r <- cor(x, y, use = use, method = method)
txt <- format(c(r, 0.123456789), digits = digits)[1]
txt <- paste(prefix, txt, sep = "")
if (missing(cex.cor))
cex <- 0.8/strwidth(txt)
test <- cor.test(as.numeric(x), as.numeric(y), method = method)
Signif <- symnum(test$p.value, corr = FALSE, na = FALSE,
cutpoints = c(0, 0.001, 0.01, 0.05, 0.1, 1), symbols = c("***",
"**", "*", ".", " "))
text(0.5, 0.5, txt, cex = cex * (abs(r) + 0.3)/1.3)
text(0.8, 0.8, Signif, cex = cex, col = 2)
}
f <- function(t) {
dnorm(t, mean = mean(x), sd = sd.xts(x))
}
dotargs <- list(...)
dotargs$method <- NULL
rm(method)
hist.panel = function(x, ... = NULL) {
par(new = TRUE)
hist(x, col = "light gray", probability = TRUE,
axes = FALSE, main = "", breaks = "FD")
lines(density(x, na.rm = TRUE), col = "red", lwd = 1)
rug(x)
}
if(missing(logger))
logger=""
if (histogram)
pairs(x, gap = 0, lower.panel = panel.smooth, upper.panel = panel.cor,
diag.panel = hist.panel,col=dotcolors,pch=ppp,log=logger)
else pairs(x, gap = 0, lower.panel = panel.smooth, upper.panel = panel.cor,col=dotcolors,pch=ppp,log=logger)
}
|
3146bf6d45f30424d474690ea86876a3bcf197ec | 11a54850e58eb3c263562c033f8fda299cc8857c | /man/GetFixtures.Rd | 318bd99c4088ba3430ff0d016634bf318cc1e35b | [] | no_license | marcoblume/pinnacle.API | de62f828378b797fed365c0e3cab7494017c9e15 | 2038b7ed7aecae1d3be0cbaeee5f27e9e381e342 | refs/heads/master | 2021-01-16T23:59:12.572918 | 2019-09-10T07:05:41 | 2019-09-10T07:05:41 | 41,935,966 | 47 | 16 | null | 2019-09-10T07:05:42 | 2015-09-04T20:17:10 | R | UTF-8 | R | false | true | 1,731 | rd | GetFixtures.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/GetFixtures.R
\name{GetFixtures}
\alias{GetFixtures}
\title{Get Non-Settled Events for a Given Sport}
\usage{
GetFixtures(sportid, leagueids = NULL, eventids = NULL, since = NULL,
islive = FALSE)
}
\arguments{
\item{sportid}{An integer giving the sport. If this is missing in
interactive mode, a menu of options is presented to the user.}
\item{leagueids}{A vector of league IDs, or \code{NULL}.}
\item{eventids}{A vector of event IDs, or \code{NULL}.}
\item{since}{To receive only listings updated since the last query, set
\code{since} to the value of \code{last} from the previous fixtures
response. Otherwise it will query all listings.}
\item{islive}{When \code{TRUE}, retrieve only live events.}
}
\value{
A data frame with rows containing matching events and columns containing
sport, league, and event information. Not all sports return the same listing
format -- in particular, only baseball listings will have pitcher
information.
}
\description{
Queries the event listing for a given sport, which can be filtered by league
and/or event ID, and narrowed to include only live events.
}
\details{
This function will raise an error if the API does not return HTTP status
\code{OK}. For information on the possible errors, see the API documentation
for \href{https://pinnacleapi.github.io/#operation/Fixtures_V1_Get}{Get Fixtures}.
}
\examples{
\donttest{
SetCredentials("TESTAPI", "APITEST")
AcceptTermsAndConditions(accepted=TRUE)
GetFixtures(sportid = 41, leagueids = 191545)}
}
\seealso{
See \code{\link{GetSettledFixtures}} to retrieve settled events, or
\code{\link{GetSpecialFixtures}} to retrieve special contestants for a sport.
}
|
f4f32fd32f67f7d02013d5062e1c31bbb60305f7 | b1ca7708a9a0783e9e18f3d17cb48c2615130a64 | /cachematrix.R | 8d262974f4f2b10f6b1d978d006f0da9e52a950c | [] | no_license | atwellke1984/ProgrammingAssignment2 | bb4ee9545ac3932f498a8715df4e650d8beb32a9 | df26fe271197d7b8008695d3705b08249c069f8c | refs/heads/master | 2021-01-22T17:57:46.574509 | 2016-08-20T09:20:02 | 2016-08-20T09:20:02 | 65,477,465 | 0 | 0 | null | 2016-08-11T14:48:27 | 2016-08-11T14:48:27 | null | UTF-8 | R | false | false | 1,422 | r | cachematrix.R | ## The makeCacheMatrix and cacheSolve functions below cache and calculate the inverse of a matrix
## The makeCacheMatrix creates a list to:-
## 1. set the value of the matrix (set() function)
## 2. get the value of the matrix (get() function)
## 3. set the value of the inverse of the matrix (setinverse() function)
## 4. get the value of the inverse of the matrix (getinverse() function)
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y) {
x <<- y
i <<- NULL
}
get <- function() x
setinverse <- function(inverse) i <<- inverse
getinverse <- function() i
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## The cacheSolve calculates the inverse of the matrix created in the makeCacheMatrix function
## First the cacheSolve function checks if the inverse of the matrix has already been calculated.
## If the inverse has already been calculated, the computation is skipped and it gets the inverse of the matrix from the cache (getinverse()).
## If the inverse has not already been calculated, the inverse of the matrix is calculated and set in the cache (setinverse()).
cacheSolve <- function(x, ...) {
i <- x$getinverse()
if(!is.null(i)) {
message("getting cached data")
return(i)
}
data <- x$get()
i <- solve(data, ...)
x$setinverse(i)
i
}
|
8cca3c670736d606e6b52c83e27876479d59e070 | 752adaa6cdcd6c95b3c16d298754db551dc519ea | /man/geom_h0.Rd | 83c98c3db2d5781387cc6fca67d556e6269b235b | [] | no_license | pttry/ggptt | db5e2f3c80f494e524d9ad44314563e6bd7bc7be | b1b56b3c5c4296c344e2cb4cf90eb2c0b3ccd1de | refs/heads/master | 2023-05-12T07:28:11.754206 | 2023-05-01T06:20:23 | 2023-05-01T06:20:23 | 142,289,370 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 421 | rd | geom_h0.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/geom_lines.R
\name{geom_h0}
\alias{geom_h0}
\title{Horizontal line on 0.}
\usage{
geom_h0(dashed = FALSE)
}
\arguments{
\item{dashed}{A locigal to have a dashed line.}
}
\description{
This geom allows you to annotate the plot with horizontal lines on y = 0.
}
\examples{
p <- ggplot(mtcars, aes(x = wt, y=mpg)) + geom_point()
p + geom_h0()
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.