blob_id
stringlengths
40
40
directory_id
stringlengths
40
40
path
stringlengths
2
327
content_id
stringlengths
40
40
detected_licenses
listlengths
0
91
license_type
stringclasses
2 values
repo_name
stringlengths
5
134
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
branch_name
stringclasses
46 values
visit_date
timestamp[us]date
2016-08-02 22:44:29
2023-09-06 08:39:28
revision_date
timestamp[us]date
1977-08-08 00:00:00
2023-09-05 12:13:49
committer_date
timestamp[us]date
1977-08-08 00:00:00
2023-09-05 12:13:49
github_id
int64
19.4k
671M
star_events_count
int64
0
40k
fork_events_count
int64
0
32.4k
gha_license_id
stringclasses
14 values
gha_event_created_at
timestamp[us]date
2012-06-21 16:39:19
2023-09-14 21:52:42
gha_created_at
timestamp[us]date
2008-05-25 01:21:32
2023-06-28 13:19:12
gha_language
stringclasses
60 values
src_encoding
stringclasses
24 values
language
stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
7
9.18M
extension
stringclasses
20 values
filename
stringlengths
1
141
content
stringlengths
7
9.18M
6dcc99d3ed050859830199e327f39340087ca977
c3d5dd693c83876a229fc2014e1a4ff3e9f05c8f
/R/calcPSG-bin.R
37d38e70c634a139d179aaa52ffb4dc6b395f152
[]
no_license
bvenner/OTM33A
2d7ff779e49824085030ff4abb8353814334aebc
d1c2e0e2d224017cdd281335fc3df368b6f4fdec
refs/heads/master
2021-07-23T18:30:47.129786
2018-12-13T23:11:52
2018-12-13T23:11:52
148,807,856
0
1
null
null
null
null
UTF-8
R
false
false
2,496
r
calcPSG-bin.R
#' Calculate plume direction and spread using binned data and natural scale #' @param dat Data table, with names obtained from GMAP data output as of 2018-Aug #' @return object of class "lm" with plume fit, has the side effect of adding column theta to dat #' @export #' @examples #' calcPSG.bin(dat) calcPSG.bin <- function(dat, Analyte="CH4", binwidth=2, wdfilt=180, min.n=0, plot = TRUE) { ## dat: data.table containing raw data, with header information in attribute fields ## binwidth: width of wind direction bins (in degrees) ## wdfilt: wind direction cut-off (degrees), e.g. 60 indicates only use ## data when the wind is coming from the direction of peak concentration dat = tryCatch({ setnames(dat,Analyte,"Analyte") bins <- seq(-360, 360, binwidth) # Create columns in data.table with wind direction bin labels and centers dat[,wd.filter := ifelse(abs(wd3)<=wdfilt,TRUE,FALSE)] # Aggregate data by wd bins dat[,wdbin := cut(wd3, bins, right=FALSE)] ch4.wdbin <- dat[sub==TRUE&wd.filter==TRUE,list(Analyte = mean(Analyte),n = .N,wd3=mean(wd3)),wdbin][order(wdbin)] # check for consistency with ORD code ch4.wdbin[,n.filter := n > min.n*dat[,.N]] # Set initial values for gaussian fit estimation mu0 <- ch4.wdbin[which.max(Analyte), wd3] k0 <- ch4.wdbin[,max(Analyte)] # Fit Gaussian curve to wd bins wdfit <- nls(Analyte ~ k*exp( - 1/2*((wd3-mu)/sigma)^2), start=c(mu = mu0, sigma=10, k = k0), weights = n, dat=ch4.wdbin[n.filter==TRUE], algorithm = "port") setattr(dat,paste(Analyte,"wd.rot",sep="."),as.numeric(coef(wdfit)[1])) setattr(dat,paste(Analyte,"wd.sigma",sep="."),as.numeric(coef(wdfit)[2])) setattr(dat,paste(Analyte,"wd.peak",sep="."),as.numeric(coef(wdfit)[3])) # Plot Gaussian fit to wind direction if (plot==TRUE) { mm=unlist(dat[wd.filter==TRUE,list(min(wd3),max(wd3))]) gauss.dat = data.frame(wd3=seq(-180,180,length=500)) gauss.dat$Fit = predict(wdfit,gauss.dat) plot.title=attr(dat,"file.name") plot.title=ifelse(is.null(plot.title),"file.name not set",plot.title) # Plot Gaussian Fit fig1 <- ggplot(ch4.wdbin[n.filter==TRUE], aes(x=wd3, y=Analyte), col="black") + geom_point(alpha=0.2) + geom_line(data=gauss.dat,aes(x=wd3,y=Fit), col="red") + theme_bw(base_size=16) + xlab("Wind Direction") + ylab(Analyte) + ggtitle(plot.title) print(fig1) } return(dat) },finally={setnames(dat,"Analyte",Analyte)} ) }
ddf42a6c7600de4e04c10c09095b13cbed812efa
738661d07b2dbd0e947669d2e7e82ff544a91364
/bioinformatics-final/core_script.r
fb1a8af631dc104b6124b6ab3da68d4462217a37
[]
no_license
Joshuanguyen22/BioinformaticsFinal
8794f5092cb31194a62871442bd9c44c3851176e
c1e7e2218410b82ab855b4786ac92720695c7da2
refs/heads/master
2020-05-18T08:43:51.563009
2019-05-15T02:18:14
2019-05-15T02:18:14
184,303,412
1
0
null
null
null
null
UTF-8
R
false
false
1,482
r
core_script.r
# https://keras.rstudio.com/ # https://machinelearningmastery.com/tutorial-first-neural-network-python-keras/ # Install commands # devtools::install_github("rstudio/keras") library(keras) # Here I will work through the digit recognition example # Load in the data mnist <- dataset_mnist() x_train <- mnist$train$x y_train <- mnist$train$y x_test <- mnist$test$x y_test <- mnist$test$y # reshape the data from a matrix into an array x_train <- array_reshape(x_train, c(nrow(x_train), 784)) x_test <- array_reshape(x_test, c(nrow(x_test), 784)) # rescale the values from [0, 255] to [0,1] x_train <- x_train / 255 x_test <- x_test / 255 # https://www.quora.com/What-is-one-hot-encoding-and-when-is-it-used-in-data-science # Now we one-hot encode the y data as it is currently an integer from 0-9 y_train <- to_categorical(y_train, 10) y_test <- to_categorical(y_test, 10) # Create the model model <- keras_model_sequential() model %>% layer_dense(units = 256, activation = 'relu', input_shape = c(784)) %>% layer_dropout(rate = 0.4) %>% layer_dense(units = 128, activation = 'relu') %>% layer_dropout(rate = 0.3) %>% layer_dense(units = 10, activation = 'softmax') model %>% compile( loss = 'categorical_crossentropy', optimizer = optimizer_rmsprop(), metrics = c('accuracy') ) history <- model %>% fit( x_train, y_train, epochs = 30, batch_size = 128, validation_split = 0.2 ) model %>% evaluate(x_test, y_test) model %>% predict_classes(x_test)
bb0ef510ac8c01aae06c74a75038c44f2cff3e6d
e44c19490a242076814bc634b4692a8be25b680f
/R/efast_sampling.R
baee658b725158fcd3a6565d3e8df7a90c865d15
[]
no_license
jc1571/spartan
5fea09f62be7fcac12f8ad6f76b6d2a93445a58a
7f5f94506363dc811aa5790f31563659cd25a6a8
refs/heads/master
2021-01-02T23:09:57.394383
2017-08-09T12:20:02
2017-08-09T12:20:02
99,481,324
0
0
null
2017-08-06T11:12:16
2017-08-06T11:12:16
null
UTF-8
R
false
false
7,247
r
efast_sampling.R
#' Generates parameter sets for variance-based eFAST Sensitivity Analysis #' #' This technique analyses simulation results generated through sampling #' using the eFAST approach (extended Fourier Amplitude Sampling Test). #' This perturbs the value of all parameters at the same time, with the #' aim of partitioning the variance in simulation output between input #' parameters. Values for each parameter are chosen using fourier frequency #' curves through a parameters potential range of values. A selected #' number of values are selected from points along the curve. Though all #' parameters are perturbed simultaneously, the method does focus on #' one parameter of interest in turn, by giving this a very different #' sampling frequency to that assigned to the other parameters. #' Thus for each parameter of interest in turn, a sampling frequency is #' assigned to each parameter and values chosen at points along the curve. So #' a set of simulation parameters then exists for each parameter of interest. #' As this is the case, this method can be computationally expensive, #' especially if a large number of samples is taken on the parameter search #' curve, or there are a large number of parameters. On top of this, to ensure #' adequate sampling each curve is also resampled with a small adjustment to the #' frequency, creating more parameter sets on which the simulation should be run. #' This attempts to limit any correlations and limit the effect of repeated #' parameter value sets being chosen. Samples are output to CSV file, one per #' parameter/curve pairing #' #' @param FILEPATH Directory where the parameter samples should be output to #' @param NUMCURVES The number of 'resamples' to perform (see eFAST #' documentation) - recommend using at least 3 #' @param NUMSAMPLES The number of parameter subsets to generate - should be at #' least 65 for eFAST #' @param PARAMETERS Array containing the names of the parameters of which #' parameter samples will be generated. For eFAST, remember to add a #' parameter named 'Dummy' #' @param PMIN Array containing the minimum value that should be used for each #' parameter and the dummy. Sets a lower bound on sampling space #' @param PMAX Array containing the maximum value that should be used for each #' parameter and the dummy. Sets an upper bound on sampling space #' #' @export efast_generate_sample <- function(FILEPATH, NUMCURVES, NUMSAMPLES, PARAMETERS, PMIN, PMAX) { if (file.exists(FILEPATH)) { NUMPARAMS <- length(PARAMETERS) wanted_n <- NUMSAMPLES * NUMPARAMS * NUMCURVES # wanted no. of sample points # OUTPUT # SI[] : first order sensitivity indices # STI[] : total effect sensitivity indices # Other used variables/constants: # OM[] : vector of k frequencies # omi : frequency for the group of interest # omci[] : set of freq. used for the compl. group # X[] : parameter combination rank matrix # AC[],BC[]: fourier coefficients # FI[] : random phase shift # V : total output variance (for each curve) # VI : partial var. of par. i (for each curve) # VCI : part. var. of the compl. set of par... # AV : total variance in the time domain # AVI : partial variance of par. i # AVCI : part. var. of the compl. set of par. # Y[] : model output MI <- 4 # maximum number of fourier coefficients # that may be retained in calculating the partial # variances without interferences between the # assigned frequencies # Computation of the frequency for the group # of interest omi and the # of sample points NUMSAMPLES (here N=NUMSAMPLES) omi <- floor(((wanted_n / NUMCURVES) - 1) / (2 * MI) / NUMPARAMS) NUMSAMPLES <- 2 * MI * omi + 1 if (NUMSAMPLES * NUMCURVES < 65) print("Error: sample size must be >= 65 per factor") PARAMETERVALS <- array(0, dim = c(NUMSAMPLES, NUMPARAMS, NUMPARAMS, NUMCURVES)) for (PARAMNUM in 1:NUMPARAMS) { # Algorithm for selecting the set of frequencies. # omci(i), i=1:k-1, contains the set of frequencies # to be used by the complementary group. omci <- efast_setfreq(NUMPARAMS, omi / 2 / MI, PARAMNUM) OM <- array(0, dim = c(1, NUMPARAMS, 1)) # Loop over the NUMCURVES search curves. for (CURVENUM in 1:NUMCURVES) { # Setting the vector of frequencies OM # for the k parameters cj <- 1 for (j in 1:NUMPARAMS) { if (j == PARAMNUM) { # For the parameter (factor) of interest # RECODE WHEN WORKED OUT OM ARRAY OM[PARAMNUM] <- omi } else { # For the complementary group. # RECODE WHEN WORKED OUT ARRAY OM[j] <- omci[cj] cj <- cj + 1 } } # Setting the relation between the scalar # variable S and the coordinates # {X(1),X(2),...X(k)} of each sample point. FI <- array(runif(NUMPARAMS, min = 0, max = 1), dim = c(NUMPARAMS, 1, 1)) FI <- FI * 2 * pi S_VEC <- pi * (2 * (1:NUMSAMPLES) - NUMSAMPLES - 1) / NUMSAMPLES OM_VEC <- OM[1:NUMPARAMS] FI_MAT <- array(0, dim = c(NUMPARAMS, NUMSAMPLES, 1)) for (i in 1:NUMSAMPLES) { FI_MAT[, i, 1] <- FI } # FORMULA IN ORIGINAL MATLAB CODE: #ANGLE = OM_VEC'*S_VEC+FI_MAT; # CONVERSION TO R: om_vec_svec <- array(OM_VEC %*% t(S_VEC), dim = c(NUMPARAMS, NUMSAMPLES, 1)) ANGLE <- om_vec_svec + FI_MAT # TRANSPOSE ARRAY ANGLET <- array(0, dim = c(NUMSAMPLES, NUMPARAMS, 1)) for (i in 1:NUMSAMPLES) { ANGLET[i, , 1] <- ANGLE[, i, 1] } # NOW CALCULATE THE PARAMETER VALUES - THESE ARE STORED IN A # MULTIDIMENSIONAL ARRAY, AS EACH CURVE HAS SEVEN SETS OF PARAMETER # VALUES PARAMETERVALS[, , PARAMNUM, CURVENUM] <- 0.5 + asin(sin(ANGLET)) / pi # AS THESE VALUES WILL CURRENTLY BE BETWEEN 0 AND 1, TRANSFORM THE # DISTRIBUTION TO GIVE TRUE PARAMETER VALUES PARAMETERVALS[, , PARAMNUM, CURVENUM] <- efast_parameterdist( PARAMETERVALS[, , PARAMNUM, CURVENUM], PMAX, PMIN, NUMSAMPLES, length(PARAMETERS)) } } # NOW OUTPUT THE RESULTS - SPLIT BY CURVE FILE # SO, WILL HAVE ONE FILE FOR EACH PARAMETER OF INTEREST, FOR EACH CURVE for (CURVENUM in 1:NUMCURVES) { for (PARAMNUM in 1:NUMPARAMS) { parameter_file <- paste(FILEPATH, "/Curve", CURVENUM, "_", PARAMETERS[PARAMNUM], ".csv", sep = "") output_params <- PARAMETERVALS[, , PARAMNUM, CURVENUM] colnames(output_params) <- c(PARAMETERS) write.csv(output_params, parameter_file, quote = FALSE, row.names = FALSE) print(paste("Parameter Set for ", CURVENUM, " Generated and Output to ", FILEPATH, "/Curve", CURVENUM, "_", PARAMETERS[PARAMNUM], ".csv", sep = "")) } } } else { print("The directory specified in FILEPATH does not exist. No parameter samples generated") } }
0b31b5b44122e43404d608dfe39bc31097c888e5
7ef8ab4e1cc09af5fd0b7a025213e6eabe7808f1
/Tesser.R
2214a0c32114dc3f713011ba98590c2a0995dc40
[]
no_license
aegerton/tesseract
5d4cc9edbcac93495ff9a9c796db54b39e9fa05b
9eded0666a09991e1e8f782ac7a547ab3846ecca
refs/heads/master
2021-08-29T17:53:05.556939
2017-12-14T14:14:12
2017-12-14T14:14:12
114,258,654
0
0
null
null
null
null
UTF-8
R
false
false
715
r
Tesser.R
library("pdftools", lib.loc="~/R/win-library/3.4") library("tesseract", lib.loc="~/R/win-library/3.4") ## tesseract_download("ind") ## indonesia<- tesseract("indonesia") cat(ocr("D:/Users/user/Dropbox/AIH Underwriting/SCOR/Askrindo/PDF/File 4-12-17, 8 06 27 PM.png", engine =tesseract("ind"))) dest <- "D:/Users/user/Dropbox/AIH Underwriting/SCOR/Askrindo/PDF" img_file <- pdf_convert("D:/Users/user/Dropbox/AIH Underwriting/SCOR/Askrindo/PDF/NOTAKESEPAHAMANSUMBERSEGARAPRIMADAYA.pdf", format = 'tiff', pages = 1:2, dpi = 400) text <- ocr(img_file, engine = tesseract("ind")) unlink(img_file) cat(text) cat(text, file = "D:/Users/user/Dropbox/AIH Underwriting/SCOR/Askrindo/PDF/File 4-12-17, 8 06 27 PM.txt")
5ea7aca4be16863e2c7a5dd8df06bf8ac3fe2a7c
2f67c7aec09cfcd17c1ccb958abd4323095e74b7
/R/pava.sa.R
af9dbd4d6a0f310673eeac57e79822b330b70a1d
[]
no_license
cran/Iso
1adbb236f878019b3594288224f1e4edda19d429
b580a5802d570e357f48ae1bf48a59ad9d583f84
refs/heads/master
2021-08-16T18:01:11.168883
2020-05-26T04:13:34
2020-05-26T04:13:34
17,691,833
0
0
null
null
null
null
UTF-8
R
false
false
1,752
r
pava.sa.R
pava.sa <- function(y,w=NULL,decreasing=FALSE,long.out=FALSE,stepfun=FALSE) { # # Function 'pava.sa' (stand-alone pava). To perform isotonic # regression for a simple (increasing) linear ordering using the ``pool # adjacent violators algorithm''. This version is programmed in raw # R; i.e. it does not invoke dynamically loaded fortran. If # long.out is TRUE then the result returned consists of a list # containing the fitted values, the final weights, and a set of # indices `tr', made up of the smallest index in each level set, which # thus keeps track of the level sets. If in addition stepfun is TRUE, # then the step function represention of the isotonic regression is # added to the forgoing list. If stepfun is TRUE and long.out is FALSE # then only the stepfunction representation is returned. If stepfun # and long.out are both FALSE then only the fitted values are # returned. # if(decreasing) y <- rev(y) n <- length(y) if(is.null(w)) w <- rep(1,n) else if(decreasing) w <- rev(w) r <- rep(1,n) repeat { stble <- TRUE i <- 1 while(i < n) { if(y[i] > y[i+1]) { stble <- FALSE www <- w[i] + w[i+1] ttt <- (w[i]*y[i] + w[i+1]*y[i+1])/www y[i+1] <- ttt w[i+1] <- www y <- y[-i] w <- w[-i] r[i+1] <- r[i] + r[i+1] r <- r[-i] n <- n-1 } i <- i+1 } if(stble) break } y <- rep(y,r) if(decreasing) y <- rev(y) if(long.out | stepfun) { if(decreasing) r <- rev(r) tr <- rep(tapply(1:length(y),rep(1:length(r),r),min),r) } if(long.out) { if(decreasing) w <- rev(w) w <- rep(w,r) lout <- list(y=y,w=w,tr=tr) } if(stepfun) { knots <- 1+which(diff(tr)!=0) y0 <- c(y[1],y[knots]) h <- stepfun(knots,y0) } ntype <- 1+sum(c(long.out,stepfun)*(1:2)) switch(ntype,y,lout,h,c(lout,list(h=h))) }
0b6b73da461c20471001fd3e8880f2e4afa777c1
b3322b8ab4ea8a02c5b4d50aff2ed3ed7736aac1
/Problema3-8.R
7d5c3b5f1c022162abd10f3df32f42b2ce01bfc3
[]
no_license
pgbeermann2017/ExpDesign
78959e26b53ac259936066023218ac24e0793032
2c71bdf84f3e50405dd1dd5fa9b97a2bce5966c0
refs/heads/master
2020-03-31T07:00:38.572973
2018-10-08T15:26:21
2018-10-08T15:26:21
152,004,032
0
0
null
null
null
null
UTF-8
R
false
false
389
r
Problema3-8.R
rm(list=c(ls())) N10<-c(1530,1530,1440) N15<-c(1610,1650,1500) N20<-c(1560,1730,1530) N25<-c(1500,1490,1510) df<-data.frame(N10,N15,N20,N25) df df<-stack(df) names(df)<-c("Y","Varillado") df modelo<-aov(Y~Varillado,data=df) summary(modelo) tk1<-TukeyHSD(modelo) tk1 plot(tk1) boxplot(Y~Varillado,data=df) qqnorm(modelo$residuals) qqline(modelo$residuals)
c13caf3257cb24269f17fb2a64dd8ab63baaa213
4f5ce42e56302a2cf68e11374f27585377e8135c
/Assignment/run_analysis.R
8958c8310e0cc476ae1a8f32efcb1dbc83fa89fb
[]
no_license
kevintham/Getting-and-Cleaning-Data-Coursera-
4d4656ebb5ec5d5120064b19852c78828222c93f
625356db880c0335ef413a7fc03ed02a1a9cf11c
refs/heads/master
2021-06-24T05:34:03.848411
2017-09-05T10:16:40
2017-09-05T10:16:40
102,448,765
0
0
null
null
null
null
UTF-8
R
false
false
1,896
r
run_analysis.R
rm(list=ls(all=TRUE)) library(plyr) filename <- "getdata_dataset.zip" ## NOTE: setwd to repository path first! ## ## setwd('repository_path_here') ## Download and unzip the dataset: if (!file.exists(filename)){ fileURL <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip " download.file(fileURL, filename, method="curl") } if (!file.exists("UCI HAR Dataset")) { unzip(filename) } ## Read in test sets into data.table format xtest <- read.table('UCI HAR Dataset/test/X_test.txt') ytest <- read.table('UCI HAR Dataset/test/y_test.txt') subject_test <- read.table('UCI HAR Dataset/test/subject_test.txt') ## Read in training sets into data.table format xtrain <- read.table('UCI HAR Dataset/train/X_train.txt') ytrain <- read.table('UCI HAR Dataset/train/y_train.txt') subject_train <- read.table('UCI HAR Dataset/train/subject_train.txt') ## Merge training and test sets, name subject X <- rbind(xtrain, xtest) y <- rbind(ytrain, ytest) subject <- rbind(subject_train, subject_test) names(subject) <- 'subject' ## Read in feature list and select relevant features (those that end with mean() ## or std()) and then label the features accordingly features <- as.character(read.table('UCI HAR Dataset/features.txt')[,2]) extract_vec <- grep('.*-(mean|std)\\(\\)', features) feature_names <- features[extract_vec] X_ext <- X[, extract_vec] names(X_ext) <- feature_names ## replace Y data with respective activity names target_labels <- as.character(read.table('UCI HAR Dataset/activity_labels.txt')[,2]) y_lab <- data.frame(target_labels[unlist(y)]) names(y_lab) <- 'activity' ## merge data into one data.frame data <- cbind(X_ext, subject, y_lab) ## average the data by activity and subject averaged_data <- ddply(data, .(activity, subject), function(x) colMeans(x[, 1:66])) write.table(averaged_data, "averaged_data.txt", row.name=FALSE)
48129ad269bf4f98a11bd96709d6196ce3189797
d1828c58599c9e5e0ff8304a739a45156897f9c6
/ui.R
c119eb97c4f074627f4fb1b8e9cd002128298fe0
[]
no_license
mriveroj/proyecto_FOCUS
dc88d247704eaf0d649866ef3f3cdff979a8762d
ca52ed6d4e5552bf7b3590d55846ec688494ae71
refs/heads/main
2023-04-02T06:50:47.202194
2021-04-07T15:56:59
2021-04-07T15:56:59
350,563,916
0
0
null
2021-03-23T03:10:47
2021-03-23T03:10:47
null
UTF-8
R
false
false
8,014
r
ui.R
library(shiny) library(shinydashboard) library(tidyverse) library(leaflet) library(plotly) library(readr) library(rsconnect) library(repmis) source_data("https://github.com/brianlli2000/Accident_UK/blob/main/datos_limpios.Rdata?raw=false") #fit <- readRDS("modelo.rds") datos$Accident_Severity=as.factor(datos$Accident_Severity) datos$Day_of_Week=as.factor(datos$Day_of_Week) datos$Road_Type=as.factor(datos$Road_Type) datos$Speed_limit=as.factor(datos$Speed_limit) datos$Pedestrian_Crossing.Human_Control=as.factor(datos$Pedestrian_Crossing.Human_Control ) datos$Pedestrian_Crossing.Physical_Facilities=as.factor(datos$Pedestrian_Crossing.Physical_Facilities) datos$Light_Conditions=as.factor(datos$Light_Conditions) datos$Weather_Conditions=as.factor(datos$Weather_Conditions) datos$Road_Surface_Conditions=as.factor(datos$Road_Surface_Conditions) datos$Special_Conditions_at_Site=as.factor(datos$Special_Conditions_at_Site) datos$Carriageway_Hazards=as.factor(datos$Carriageway_Hazards) datos$Urban_or_Rural_Area=as.factor(datos$Urban_or_Rural_Area) shinyUI(dashboardPage(skin="purple", dashboardHeader(title = "Road Accidents in UK", titleWidth = 300), dashboardSidebar( sidebarMenu( menuItem("General", tabName = "General", icon = icon("dashboard")), menuItem("Space", tabName = "Space", icon = icon("dashboard")), menuItem("Model", tabName = "Model", icon = icon("dashboard")) ) ), dashboardBody( tabItems( tabItem(tabName = "General", h1("General Information"), fluidRow( box(title = "Bar Diagram by Year and Road Type", status = "info", solidHeader = TRUE, collapsible = TRUE, selectInput(inputId = "year1", label = "Year",choices = c(2005,2006,2007,2009,2010,2011,2012)), selectInput(inputId = "roadtype", label = "Road Type",choices = levels(datos$Road_Type)), plotlyOutput(outputId = "barra") ), infoBox(#color = "teal", "Casualties Report",textOutput(outputId = "muertes") ), infoBox(#color = "teal", "Accidents report",textOutput(outputId = "mean") ), box(title = "Map of United Kingdom", status = "warning", solidHeader = TRUE, collapsible = TRUE, leafletOutput(outputId = "mapa") ) ) ), tabItem(tabName = "Space", h2("Spatial Distribution of Accidents"), fluidRow( box(title = "United Kingdom Road Accident by Year", status = "primary", solidHeader = TRUE, collapsible = TRUE, selectInput(inputId = "year2", label = "Year",choices = c(2005,2006,2007,2009,2010,2011,2012)), plotOutput(outputId = "humo") ), tabBox(title = "Yearwise trend of accident location", tabPanel("Latitude", plotlyOutput(outputId = "latitud") ), tabPanel("Longitude", plotlyOutput(outputId = "longitud") ) ) ) ), tabItem(tabName = "Model", fluidRow( box(title = "Predicion of Accident Severity ", status = "info", solidHeader = TRUE, collapsible = TRUE, numericInput(inputId = "m1", label = "Number of Vehicles",min=1,value = 1), numericInput(inputId = "m2", label = "Number of Casualties",min=1,value = 1), selectInput(inputId = "m3", label = "Day of Week",choices = c("1","2","3","4","5","6","7")), selectInput(inputId = "m4", label = "Road Type",choices = levels(datos$Road_Type)), selectInput(inputId = "m5", label = "Pedestrian Crossing Human Control",choices = levels(datos$Pedestrian_Crossing.Human_Control)), selectInput(inputId = "m6", label = "Pedestrian Crossing Physical Facilities",choices = levels(datos$Pedestrian_Crossing.Physical_Facilities)), selectInput(inputId = "m7", label = "Light Conditions",choices = levels(datos$Light_Conditions)), selectInput(inputId = "m8", label = "Weather Conditions",choices = levels(datos$Weather_Conditions)), selectInput(inputId = "m9", label = "Road Surface Conditions",choices = levels(datos$Road_Surface_Conditions)), selectInput(inputId = "m10", label = "Carriageway Hazards",choices = levels(datos$Carriageway_Hazards)) ), box( textOutput(outputId = "prediction") ) ) ) ) ) ))
9d821444e8167ebb036f6932f29ccc9553756a3a
8ac03eb525450c5a88fc8f14370a2385b5b99a41
/man-roxygen/roxlate-gf-sc.R
108ba5039c9b1a078ad4a1f95017bea45f9bdc9c
[ "Apache-2.0" ]
permissive
felipegonzalez/graphframes
f4711a3dbb70a61ac860aec07463b67f2cb1ec05
dd7e5985b1702d0c91ef558b15c890f21221f1ad
refs/heads/master
2023-09-05T05:40:53.838876
2021-10-21T06:32:16
2021-10-21T06:32:16
null
0
0
null
null
null
null
UTF-8
R
false
false
33
r
roxlate-gf-sc.R
#' @param sc A Spark connection.
0569d19485a36ca38567443b0cefd954ab393107
ca5418a01faf094b74f13b7ba3b6bd29113a908e
/practice_r/pbdDMAT_ex2.r
92cd531f380433c8d6cd7411cb86d89c900cf19c
[]
no_license
ikarus702/pbd_KRLS
fae59baf048ac1ca928e602b3656a86a323eb8bf
c26509a52ccb6c26b1d3bab30f5c08b0b89a07d7
refs/heads/master
2020-05-30T08:58:47.472019
2019-07-29T16:37:29
2019-07-29T16:37:29
189,629,987
0
0
null
null
null
null
UTF-8
R
false
false
386
r
pbdDMAT_ex2.r
library(pbdDMAT, quiet=TRUE) init.grid() dx <- ddmatrix(1:30, nrow=10) x <- as.block(dx) x comm.print(submatrix(x)) x <- as.rowblock(dx) x comm.print(submatrix(x)) x <- as.colblock(dx) x comm.print(submatrix(x)) x <- as.rowcyclic(dx) x comm.print(submatrix(x)) x <- as.colcyclic(dx) x comm.print(submatrix(x)) x <- as.blockcyclic(dx) x comm.print(submatrix(x)) finalize()
28b202e132e3551be85905c14d0f950704cd55ce
419029eade19b17407a57d991ff157a2c5142999
/man/sedona_read_shapefile_to_typed_rdd.Rd
25c5392e27e2b5052e2f2b01074bf6ee19157ad7
[ "Apache-2.0" ]
permissive
lorenzwalthert/sparklyr.sedona
b045adf8fd1308c803c5dd9d212c4f0e4ea9cc98
0a1d7602ce1ff10a407b0ebd6383fa326df06ded
refs/heads/main
2023-04-27T12:12:21.117184
2021-05-06T16:54:03
2021-05-06T16:54:03
365,710,939
0
0
Apache-2.0
2021-05-09T09:07:26
2021-05-09T09:07:25
null
UTF-8
R
false
true
1,501
rd
sedona_read_shapefile_to_typed_rdd.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data_interface.R \name{sedona_read_shapefile_to_typed_rdd} \alias{sedona_read_shapefile_to_typed_rdd} \title{Create a typed SpatialRDD from a shapefile data source.} \usage{ sedona_read_shapefile_to_typed_rdd( sc, location, type = c("point", "polygon", "linestring"), storage_level = "MEMORY_ONLY" ) sedona_read_shapefile_to_typed_rdd( sc, location, type = c("point", "polygon", "linestring"), storage_level = "MEMORY_ONLY" ) } \arguments{ \item{sc}{A \code{spark_connection}.} \item{location}{Location of the data source.} \item{type}{Type of the SpatialRDD (must be one of "point", "polygon", or "linestring".} \item{storage_level}{Storage level of the RDD (default: MEMORY_ONLY).} } \description{ Create a typed SpatialRDD (namely, a PointRDD, a PolygonRDD, or a LineStringRDD) from a shapefile data source. Create a typed SpatialRDD (namely, a PointRDD, a PolygonRDD, or a LineStringRDD) from a shapefile data source. } \seealso{ Other Sedona data inferface functions: \code{\link{sedona_read_dsv_to_typed_rdd}()}, \code{\link{sedona_read_geojson_to_typed_rdd}()}, \code{\link{sedona_read_geojson}()}, \code{\link{sedona_read_shapefile}()}, \code{\link{sedona_read_wkb}()}, \code{\link{sedona_read_wkt}()}, \code{\link{sedona_save_spatial_rdd}()}, \code{\link{sedona_write_geojson}()}, \code{\link{sedona_write_wkb}()}, \code{\link{sedona_write_wkt}()} } \concept{Sedona data inferface functions}
6f09d3f140c64fc6eecaf8b994e2ff2d732fc5e5
819a5f52b0bb3be25156257c214b0b18f8c2612c
/R/covid_care_map.R
9b47003b89b501b0c9610d250021d57089301af4
[ "MIT" ]
permissive
kotliary/sars2pack
f605a81004535e382ee411a89fcae61af9753ee7
43abdafcc73ccbfb95c23b27cc20d3aadfd1fd9e
refs/heads/master
2022-04-21T15:04:23.176685
2020-04-14T17:28:54
2020-04-14T17:28:54
255,723,912
1
0
NOASSERTION
2020-04-14T20:54:44
2020-04-14T20:54:44
null
UTF-8
R
false
false
1,048
r
covid_care_map.R
#' United States healthcare system capacity by provider #' #' @details #' From the data providers: #' #' Mapping existing and forecasted health system capacity gaps (beds, #' staffing, ventilators, supplies) to care for surging numbers of #' COVID19 patients (especially ICU-level care) at high spatiotemporal #' resolution (by facility, daily, all USA to start). #' #' @importFrom readr read_csv #' #' @source #' - \url{https://github.com/covidcaremap/covid19-healthsystemcapacity} #' - \url{https://raw.githubusercontent.com/covidcaremap/covid19-healthsystemcapacity/master/data/published/us_healthcare_capacity-facility-CovidCareMap.csv} #' #' @examples #' res = us_healthcare_capacity() #' colnames(res) #' glimpse(res) #' #' @family data-import #' #' @export us_healthcare_capacity <- function() { fpath = s2p_cached_url('https://raw.githubusercontent.com/covidcaremap/covid19-healthsystemcapacity/master/data/published/us_healthcare_capacity-facility-CovidCareMap.csv') readr::read_csv(fpath, col_types = cols(), guess_max=5000) }
05af2501f9b8c7390b542f1d944195514b135112
8f789aa5b5e5c0054f8d501dbb00f22a70c7a22a
/man/getSequencesFromGenome.Rd
3c8785ca830ba18455b07d86bb22a33ad03c9886
[]
no_license
ddiez/rTRM
3458ea89798a374736f19df59e8be64ddc480e9b
8603c339ee67a94958d2be6f8cd8b7c6bb55f7f2
refs/heads/master
2021-01-10T20:30:21.929922
2015-10-09T03:14:59
2015-10-09T03:14:59
37,570,884
2
2
null
null
null
null
UTF-8
R
false
false
623
rd
getSequencesFromGenome.Rd
\name{getSequencesFromGenome} \alias{getSequencesFromGenome} \title{ Retrieves a set of sequences from a BSgenome object and optionally appends a label to each sequence id. } \description{ This is just a wrapper to getSeq() in package Biostrings that facilitates adding a label to each sequence.} \usage{ getSequencesFromGenome(BED, genome, append.id) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{BED}{ file with peak locations in BED format.} \item{genome}{ a BSgenome object (e.g. Mmusculus) } \item{append.id}{ optional label to append to each sequence id.} } \author{ Diego Diez }
e934598455b0eb7255db672a167e26fc1212b20b
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
/data/genthat_extracted_code/btergm/examples/gofstatistics.Rd.R
0aa0bfc2936dafd6289769b77f5c69710f7c7917
[]
no_license
surayaaramli/typeRrh
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
66e6996f31961bc8b9aafe1a6a6098327b66bf71
refs/heads/master
2023-05-05T04:05:31.617869
2019-04-25T22:10:06
2019-04-25T22:10:06
null
0
0
null
null
null
null
UTF-8
R
false
false
2,147
r
gofstatistics.Rd.R
library(btergm) ### Name: gofstatistics ### Title: Statistics for goodness-of-fit assessment of network models ### Aliases: gofstatistics gof-statistics statistics gofterms gof-terms ### comemb dsp esp nsp deg ideg odeg b1deg b2deg istar kstar ostar b1star ### b2star kcycle geodesic triad.directed triad.undirected pr roc rocpr ### rocprgof edgebetweenness.modularity edgebetweenness.pr ### edgebetweenness.roc fastgreedy.modularity fastgreedy.pr ### fastgreedy.roc louvain.modularity louvain.pr louvain.roc ### maxmod.modularity maxmod.pr maxmod.roc spinglass.modularity ### spinglass.pr spinglass.roc walktrap.modularity walktrap.pr ### walktrap.roc ### Keywords: statistics gof ### ** Examples # To see how these statistics are used, look at the examples section of # ?"gof-methods". The following example illustrates how custom # statistics can be created. Suppose one is interested in the density # of a network. Then a univariate statistic can be created as follows. dens <- function(mat, ...) { # univariate: one argument mat <- as.matrix(mat) # sparse matrix -> normal matrix d <- sna::gden(mat) # compute the actual statistic attributes(d)$label <- "Density" # add a descriptive label return(d) # return the statistic } # Note that the '...' argument must be present in all statistics. # Now the statistic can be used in the statistics argument of one of # the gof methods. # For illustrative purposes, let us consider an existing statistic, the # indegree distribution, a multivariate statistic. It also accepts a # single argument. Note that the sparse matrix is converted to a # normal matrix object when it is used. First, statnet's summary # method is used to compute the statistic. Names are attached to the # resulting vector for the different indegree values. Then the vector # is returned. ideg <- function(mat, ...) { d <- summary(mat ~ idegree(0:(nrow(mat) - 1))) names(d) <- 0:(length(d) - 1) attributes(d)$label <- "Indegree" return(d) } # See the gofstatistics.R file in the package for more complex examples.
09d9831a748069b12bd4220e52c9afd5f20367aa
184180d341d2928ab7c5a626d94f2a9863726c65
/valgrind_test_dir/integrateStepFunc-test.R
205e09d3e736caed3105b84724445abf892a4ccf
[]
no_license
akhikolla/RcppDeepStateTest
f102ddf03a22b0fc05e02239d53405c8977cbc2b
97e73fe4f8cb0f8e5415f52a2474c8bc322bbbe5
refs/heads/master
2023-03-03T12:19:31.725234
2021-02-12T21:50:12
2021-02-12T21:50:12
254,214,504
2
1
null
null
null
null
UTF-8
R
false
false
285
r
integrateStepFunc-test.R
function (x, bounds, values) { e <- get("data.env", .GlobalEnv) e[["integrateStepFunc"]][[length(e[["integrateStepFunc"]]) + 1]] <- list(x = x, bounds = bounds, values = values) .Call("_starvz_integrateStepFunc", PACKAGE = "starvz", x, bounds, values) }
7685750f69997bebf44d454d83d9e0c22c2b1d4c
c152f18566da3abd3ce757bcd25f02cd14215fdc
/plot4.R
f0e1e825b2042ef0280fa7781b0411740280c02d
[]
no_license
starship9/ExData_Plotting1
c9d0c6c6b0778d88cece4a6db189161d8aedd3ee
6e7ae43b6269ead0a9b45b901da1c01be45587bc
refs/heads/master
2021-09-04T19:24:04.924625
2018-01-21T17:49:39
2018-01-21T17:49:39
114,762,427
0
0
null
2017-12-19T12:26:09
2017-12-19T12:26:09
null
UTF-8
R
false
false
1,526
r
plot4.R
subsetData <- read.csv("subsetData.csv",stringsAsFactors = FALSE) str(subsetData) png(file = "plot4.png",width = 480, height = 480) par(mfrow=c(2,2)) plot(as.numeric(subsetData$Global_active_power),main ="",type="l", ylab="Global Active Power",xlab="",axes=FALSE) axis(1, at = c(0,1500,2900),labels = c("Thu","Fri","Sat")) axis(2,at=c(0,2,4,6)) box(lty = 1, col = 'black') plot(as.numeric(subsetData$Voltage),main="",type="l", ylab="Voltage",xlab="datetime",axes=FALSE) axis(1, at = c(0,1500,2900),labels = c("Thu","Fri","Sat")) axis(2,at=c(234,238,240,242,246),labels=c("234","238","240","242","246")) box(lty = 1, col = 'black') plot(1:nrow(subsetData),as.numeric(subsetData$Sub_metering_1),main="", ylab="Energy sub metering",xlab="",type="l",col="black",axes=FALSE) lines(as.numeric(subsetData$Sub_metering_2),col="red",type="l",axes=FALSE,ylim=c(0,40),xlab="",ylab="") lines(as.numeric(subsetData$Sub_metering_3),col="blue",type="l",axes=FALSE,ylim=c(0,40),xlab="",ylab="") axis(1, at = c(0,1500,2900),labels = c("Thu","Fri","Sat")) axis(2, at = c(0,10,20,30),labels = c("0","10","20","30")) legend("topright",legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),col=c("black","red","blue"),lty=c(1,1,1),lwd=1) box(lty = 1, col = 'black') plot(as.numeric(subsetData$Global_reactive_power),main ="",type="l", ylab="",xlab="datetime",axes=FALSE) axis(1, at = c(0,1500,2900),labels = c("Thu","Fri","Sat")) axis(2,at=c(0.1,0.2,0.3,0.4,0.5),c("0.1","0.2","0.3","0.4","0.5")) box(lty = 1, col = 'black') dev.off()
7e56975fb7cd55a43a9980c31a8a6e4f9ba4e1aa
e21575714abcf69b51d039c34fc48215ecfbdeca
/tests/testthat/test_replace_path_indicator.R
2413db9521d55680630898f7e7567a6691fc1428
[ "MIT" ]
permissive
jimsforks/pathformatr
2585aa877fb8727ec4736c6e14365f3a63f898dc
efa4ee1ce273e460179ffda34809946c1e99dda2
refs/heads/master
2023-02-12T13:18:39.744357
2021-01-08T12:14:50
2021-01-08T12:14:50
null
0
0
null
null
null
null
UTF-8
R
false
false
1,777
r
test_replace_path_indicator.R
context("Check path replacement 1 - indicator") # This is a short indicator test. To fully test the functionality, the # test_replace_path_local.R needs to be sourced interactively. test_that("Check path reformatting",{ # Check it works for here() expect_equal(split_path("here(\"inst/rstudio/addins.dcf\")"), "here(\"inst\",\"rstudio\",\"addins.dcf\")") # Check it works for file.path() expect_equal(split_path("file.path(\"inst/rstudio/addins.dcf\")"), "file.path(\"inst\",\"rstudio\",\"addins.dcf\")") # Check it works for random user-defined function (e.g. "test") expect_equal( split_path( "test(\"inst/rstudio/addins.dcf\") file.path(\"inst/rstudio/addins.dcf\")", fns = "test" ), "test(\"inst\",\"rstudio\",\"addins.dcf\") file.path(\"inst/rstudio/addins.dcf\")" ) # Check it doesn't replace arbitrary parenthesized file paths expect_equal(split_path("(\"inst/rstudio/addins.dcf\")"), "(\"inst/rstudio/addins.dcf\")") # Check it works with here namespace expect_equal(split_path("here::here(inst/rstudio/addins.dcf)"), "here::here(inst\",\"rstudio\",\"addins.dcf)") # Check trailing slash is replaced properly expect_equal(split_path("here::here(\"inst/rstudio/addins/\")"), "here::here(\"inst\",\"rstudio\",\"addins\")") # Check regex is sufficiently specific expect_equal(split_path("here::here(\"inst/rstudio/addins\") read.csv(\"test/test2.csv\")"), "here::here(\"inst\",\"rstudio\",\"addins\") read.csv(\"test/test2.csv\")") expect_equal(split_path("read.csv(\"test/test2.csv\")here::here(\"inst/rstudio/addins\")"), "read.csv(\"test/test2.csv\")here::here(\"inst\",\"rstudio\",\"addins\")") })
f8c9ac54f0cf5751d026853be8610e843efdedcd
722281e3bddbef275d0e165f58c15077f91741aa
/man/aliscore.Rd
3996442b3284d4b4863b3c6f53be22bf4a045ac1
[]
no_license
heibl/ips
ba824e8d2c185e22ad81c2d85d7e6368b9017f5d
647ba4c9ae1104b649cacc901c527de0207b7775
refs/heads/master
2022-02-08T16:08:03.520858
2022-02-06T14:54:58
2022-02-06T14:54:58
50,670,472
9
3
null
2018-01-18T09:19:30
2016-01-29T15:18:38
R
UTF-8
R
false
true
1,914
rd
aliscore.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/aliscore.R \name{aliscore} \alias{aliscore} \title{Masking of Sequence Alignments with ALISCORE} \usage{ aliscore(x, gaps = "5state", w = 6, r, t, l, s, o, exec) } \arguments{ \item{x}{DNA sequences of class \code{DNAbin}.} \item{gaps}{A vector of mode \code{"character"} indicating how gaps shall be treated: as \code{"5state"} or as \code{"ambiguous"}.} \item{w}{An integer giving the size of the sliding window.} \item{r}{An integer giving the number of random pairwise sequence comparisons; defaults to \code{4 * N}.} \item{t}{\emph{Not yet implemented}.} \item{l}{\emph{Not yet implemented}.} \item{s}{\emph{Not yet implemented}.} \item{o}{A vector of mode \code{"character"} containing outgroup taxon names.} \item{exec}{A character string, giving the path to the Aliscore script.} } \value{ A \code{matrix} of class \code{"DNAbin"}. } \description{ Provides a interface to \bold{Aliscore}, in order to remove problematic regions of a DNA sequence alignment. } \note{ This function was developed with ALISCORE version 2. } \examples{ data(ips.28S) \dontrun{aliscore(ips.28S)} } \references{ Misof, B. and K. Misof. 2009. A Monte Carlo approach successfully identifies randomness in multiple sequence alignments: a more objective means of data exclusion. \emph{Syst. Biol.} \bold{58}: 21--34. Kueck, P., K. Meusemann, J. Dambach, B. Thormann, B.M. von Reumont, J.W. Waegele and B. Misof. 2010. Parametric and non-parametric masking of randomness in sequence alignments can be improved and leads to better resolved trees. \emph{Frontiers in Zoology} \bold{7}: 10. \bold{Aliscore website}: \url{https://www.zfmk.de/en/research/research-centres-and-groups/aliscore} } \seealso{ \code{\link{mafft}} and \code{\link{prank}} for multiple sequence alignment; \code{\link{gblocks}} for another alignment masking algorithm. }
f6ee04a84611196cf77ec045ab352bdb8ea5fc6c
332590d9ac31a65c418ea7254174cd9dfac7d76a
/man/palette.jco.Rd
c34fd8f45cff3e14d2cdd667ebcc3d04f07d3805
[]
no_license
djambar/hdnom
34544a17168661410856f2709f824c148fbe6e8b
38dc945ab50db35d5c8e71715b2a08c9a452be21
refs/heads/master
2021-01-12T22:22:12.840852
2016-07-05T02:58:22
2016-07-05T02:58:22
null
0
0
null
null
null
null
UTF-8
R
false
true
366
rd
palette.jco.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/11.hdnom.color.palette.R \name{palette.jco} \alias{palette.jco} \title{Color Palette for Journal of Clinical Oncology (JCO)} \usage{ palette.jco() } \description{ A 10-color discrete color palette based on the colors used in figures in Journal of Clinical Oncology. } \keyword{internal}
0736c7cd0f414d7d7de208e9efd17faba4690700
e629f64c9651b1349c820f35b45615b3925dc09e
/w1_all-in-one/.Rhistory
1b4a5cb6ac47763cdf6f089b4615ff9ca07a770d
[]
no_license
jeremy-dai/15-Statistical-Computing-with-R
d2fea4f863bf1589c577b076913a549b9a3b870d
82090ff1cf7dfbe6dd4b27f6af19864cc966bfb3
refs/heads/master
2022-04-08T19:18:07.696859
2020-02-20T14:08:42
2020-02-20T14:08:42
206,742,386
0
0
null
null
null
null
UTF-8
R
false
false
182
rhistory
.Rhistory
as.numeric("four") as.numeric(c("3", "2", "...", "1")) T <- 5 Y=20 T*Y save.image("~/GitHub/15 Statistical Computing with R/w1_all-in-one/Untitled.RData") savehistory("Separated.R")
2bdfe4a579c333586760cba9e96de19ac7d1823b
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
/data/genthat_extracted_code/dynatopmodel/examples/discretise.Rd.R
059434279c10ebcf0cf001a186119ad923fdd5b5
[]
no_license
surayaaramli/typeRrh
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
66e6996f31961bc8b9aafe1a6a6098327b66bf71
refs/heads/master
2023-05-05T04:05:31.617869
2019-04-25T22:10:06
2019-04-25T22:10:06
null
0
0
null
null
null
null
UTF-8
R
false
false
1,085
r
discretise.Rd.R
library(dynatopmodel) ### Name: discretise ### Title: Discrete a catchment into hydrological response units (HRUs) ### Aliases: discretise ### ** Examples # Landcover and soils are fairly homogenous throughout the Brompton catchment. # Due to the extensive artifical sybsurface drainage discharging directly into # the channel it is hypothesied that the storm response is largely mostly controlled # by proximity to the network. A simple discretisation according to flow distance # from the nearest channel thus appears to capture the dynamics without introducing # unnecessary complexity. ## Not run: ##D require(dynatopmodel) ##D ##D data(brompton) ##D ##D chans <- build_chans(brompton$dem, drn=brompton$drn, chan.width=2) ##D sort by distance from the channel network, but want areas closest the channel to come first ##D layers <- addLayer(brompton$dem, 2000-brompton$flowdists) ##D disc <- discretise(layers, cuts=c(flowdists=10), chans=chans, area.thresh=0.5/100) ##D rm(chans) ##D rm(layers) ##D write.table(disc$groups, sep="\t", row.names=FALSE) ## End(Not run)
2cf7971b7d5e9c8148fd08fb03d392dc3947bc0c
16d1ae505aae32acd3c07973829a8145d3458095
/AZ/ADD.R
7d6ed68eea77d586d988bf8ab6998b6c9f0d5407
[]
no_license
sailyen/ifc_data_manipulation
528c25ed9206101aae2216926f2aa70ccb8d4ffc
4bc04b7476cf586dbd0a692d12ef5da5586e1a64
refs/heads/master
2020-04-03T13:09:33.551882
2018-12-05T18:09:01
2018-12-05T18:09:01
155,275,704
0
0
null
null
null
null
UTF-8
R
false
false
1,277
r
ADD.R
idct <- read.csv("IDCT.csv", header = T) SL <- read.csv("SME_L.csv", header = T) SM <- read.csv("SME_M.csv", header = T) SN <- read.csv("SME_N.csv", header = T) SO <- read.csv("SME_O.csv", header = T) names(idct)[1] <- "Country" names(SL)[1] <- "Country" names(SM)[1] <- "Country" names(SN)[1] <- "Country" names(SO)[1] <- "Country" library(sqldf) iSL <- sqldf("SELECT * FROM idct LEFT JOIN SL ON idct.Country = SL.Country") iSL <- iSL[, c(1,3)] names(iSL)[2] <- "value" iSL$indicator <- "SME_L" iSL$sector <- "SME" iSM <- sqldf("SELECT * FROM idct LEFT JOIN SM ON idct.Country = SM.Country") iSM <- iSM[, c(1,3)] names(iSM)[2] <- "value" iSM$indicator <- "SME_M" iSM$sector <- "SME" iSN <- sqldf("SELECT * FROM idct LEFT JOIN SN ON idct.Country = SN.Country") iSN <- iSN[, c(1,3)] names(iSN)[2] <- "value" iSN$indicator <- "SME_N" iSN$sector <- "SME" iSO <- sqldf("SELECT * FROM idct LEFT JOIN SO ON idct.Country = SO.Country") iSO <- iSO[, c(1,3)] names(iSO)[2] <- "value" iSO$indicator <- "SME_O" iSO$sector <- "SME" idct_SME_add <- rbind(iSL, iSM, iSN, iSO) write.csv(idct_SME_add, "idct_SME_add.csv")
72fe9994a7a939cca06cb19bd76307e1b4de22ca
28fd76376cea79ad0b7244b036589b179d1af756
/countNA.R
f43658f601c7f92475393b35f178f5b50b295d81
[]
no_license
mrzork/ConteoNA
0d9465b70672aaee0acd84de7627e5c617d73950
c153f09e9a1393293a6007f33d755670c46375cd
refs/heads/master
2021-01-22T02:52:59.075861
2018-11-22T21:03:23
2018-11-22T21:03:23
24,518,300
0
0
null
null
null
null
UTF-8
R
false
false
413
r
countNA.R
countNA<-function(data){ esna<-c(1,which(is.na(data))) b<-a<-k<-0 for(i in 1:length(data)){ if((is.na(data)*1)[i]==1 ){ b<-b+1 }else{ if((is.na(data)*1)[i]==0 && b>0){ k<-k+1 a[k]<-b b<-0 } } if(i==length(data) && b>0){ k<-k+1 a[k]<-b b<-0 } } a<-rbind(a) rownames(a)<-"amount of 'NA' consecutive" return(rbind(a)) }
2127875d29de41f9b228173be86a35c7d88196ff
cf82203ab8814e454b8af896e25637c1a547db94
/code.R
2080674d4a3cdd1f87f80ec9d712e0e120b2b8ad
[]
no_license
gldmt-duke/movies_data
7e35bd08e35abf3f3cfe6fe67646ae85178251cb
dcaf573e867ba109d863135bc16b1ab6e1349402
refs/heads/master
2020-05-27T21:24:00.639167
2017-03-02T13:54:19
2017-03-02T13:54:19
83,682,262
0
0
null
null
null
null
UTF-8
R
false
false
3,519
r
code.R
library(dplyr) library(magrittr) library(rvest) setwd("E:/Dropbox/Duke/STA101") load(url("https://stat.duke.edu/~mc301/data/movies.Rdata")) movies %<>% # Remove TV movies. filter(title_type != "TV Movie") %>% # Remove long and short runtimes. filter(runtime < 250) %>% filter(runtime >= 60) # Add clean studios. movies %<>% mutate(studio_lower = tolower(studio)) %>% mutate(studio_main = ifelse(grepl("warner", studio_lower) | grepl("hbo", studio_lower) | grepl("orion", studio_lower) | grepl("line", studio_lower), "warner", ifelse(grepl("fox", studio_lower), "fox", ifelse(grepl("disney", studio_lower) | grepl("buena", studio_lower) | grepl("miramax", studio_lower) | grepl("hollywood", studio_lower) | grepl("touchstone", studio_lower) | grepl("touchstone", studio_lower), "disney", ifelse(grepl("columbia", studio_lower) | grepl("sony", studio_lower), "sony", ifelse(grepl("paramount", studio_lower), "paramount", ifelse(grepl("lions", studio_lower) | grepl("lionsgate", studio_lower), "lionsgate", ifelse(grepl("mgm", studio_lower) | grepl("mgm/ua", studio_lower) | grepl("artists", studio_lower), "mgm", ifelse(grepl("weinstein", studio_lower), "weinstein", ifelse(grepl("ifc", studio_lower), "ifc", ifelse(grepl("magnolia", studio_lower) | grepl("magnolia/magnet", studio_lower) | grepl("magnet", studio_lower), "magnolia", ifelse(grepl("universal", studio_lower) | grepl("focus", studio_lower) | grepl("usa", studio_lower) | grepl("gramercy", studio_lower), "universal", "other")))))))))))) movies %<>% select(-studio_lower) # Extract info from IMDB. span <- list() for(i in 1:nrow(movies)) { print(movies$title[i]) html_page <- read_html(movies$imdb_url[i]) print(html_page %>% html_node("h1") %>% html_text()) movie <- html_page %>% html_node("#titleDetails") %>% html_text() %>% strsplit("\n") span[[i]] <- movie[[1]] } # Extract Gross. gross <- rep(NA, length(span)) for(i in 1:length(span)) { current <- span[[i]] text <- NA for(j in 1:length(current)) { if(grepl("Gross", current[j])) { text <- current[j] } } text <- trimws(text) text <- strsplit(text, "Gross:")[[1]][2] text <- trimws(text) text <- gsub("[^0-9]","",text) gross[i] <- as.numeric(text) } # Extract Budget. budget <- rep(NA, length(span)) for(i in 1:length(span)) { current <- span[[i]] text <- NA for(j in 1:length(current)) { if(grepl("Budget", current[j])) { text <- current[j] } } text <- trimws(text) text <- strsplit(text, "Budget:")[[1]][2] text <- trimws(text) text <- gsub("[^0-9]","",text) budget[i] <- as.numeric(text) } # Export data. movies <- cbind(movies, gross, budget) movies %<>% filter(!is.na(gross)) movies %<>% filter(!is.na(budget)) save(movies, file="movies_new.RData")
13cffd7453cee9c29179a7589d2e3923fa6974f9
07cc930397f60f8a9e1d437198244543f64d6ca6
/ctn_R/regression_avec_R/commandes_chapitre2.r
5b49394142c45058c21ac2564b86aaea73fc7e01
[]
no_license
BTA198D/ctnguyen
98e22ee8b1b89a44befc713c37408561ac997dc8
0affe545bced1c489e682277b18dd7bd012c1403
refs/heads/master
2023-03-20T10:11:48.083165
2020-10-30T15:14:41
2020-10-30T15:22:42
null
0
0
null
null
null
null
UTF-8
R
false
false
980
r
commandes_chapitre2.r
############################################################ ### L'ozone ############################################################ ### representation des donnees ozone <- read.table("ozone.txt",header=T,sep=";") library("scatterplot3d") scatterplot3d(ozone[,"T12"],ozone[,"Vx"],ozone[,"O3"],type="h",pch=16,box=FALSE,xlab="T12",ylab="Vx",zlab="O3") ### estimation + affichage regmulti <- lm(O3~T12+Vx,data=ozone) summary(regmulti) ############################################################ ### La hauteur des eucalyptus ############################################################ eucalypt <- read.table("eucalyptus.txt",header=T,sep=";") regmult <- lm(ht~circ+I(sqrt(circ)),data=eucalypt) resume.mult <- summary(regmult) resume.mult ### qualite du modele et des observations plot(ht~circ,data=eucalypt,pch="+",col="grey60") circ <- seq(min(eucalypt[,"circ"]),max(eucalypt[,"circ"]),len=100) grille <- data.frame(circ) lines(grille[,"circ"],predict(regmult,grille))
2c8865b5c139db76f356e0037ed78d1c1e621ff5
ed1b4c0f57ef37e22a6c1cb5130ed8f887e0700b
/inst/render.R
7dbaead03a61ee4826f4712563715823dec41a75
[]
no_license
langcog/wordbank-book
ccc063950c7ea2f13da3ea7856d99b5930d31561
9db9683f17864fd71f0ed740f542bc92be177c30
refs/heads/master
2021-07-19T17:05:37.107602
2021-07-09T17:39:11
2021-07-09T17:39:11
73,015,913
14
4
null
null
null
null
UTF-8
R
false
false
199
r
render.R
rmarkdown::render(output_format = 'bookdown::pdf_book', encoding = 'UTF-8') rmarkdown::render_site(output_format = 'bookdown::html_book', encoding = 'UTF-8')
8cc54ad899c6872c77c39f5ab8a0f899bb25d7f5
1d17402d6b8afd0e889d5501fab725a137de18f8
/R/mismatch.R
9f94f0f7b248dd324fdb5f1a79123636bc097e06
[]
no_license
UBod/kebabs
50d2652faf10c4cb5aaec0b7137f9dfb9629db93
87431a1adf67a680060810cf0af78209f5a69b70
refs/heads/master
2023-05-04T15:30:45.351731
2023-04-21T10:53:53
2023-04-21T10:53:53
133,505,139
0
3
null
null
null
null
UTF-8
R
false
false
12,419
r
mismatch.R
#2345678901234567890123456789012345678901234567890123456789012345678901234567890 #' @rdname mismatchKernel #' @title Mismatch Kernel #' #' @description Create a mismatch kernel object and the kernel matrix #' #' @param k length of the substrings also called kmers; this parameter defines #' the size of the feature space, i.e. the total number of features considered #' in this kernel is |A|^k, with |A| as the size of the alphabet (4 for DNA #' and RNA sequences and 21 for amino acid sequences). Default=3 #' #' @param m number of maximal mismatch per kmer. The allowed value range is #' between 1 and k-1. The processing effort for this kernel is highly dependent #' on the value of m and only small values will allow efficient processing. #' Default=1 #' #' @param r exponent which must be > 0 (see details section in #' \link{spectrumKernel}). Default=1 #' #' @param normalized a kernel matrix or explicit representation generated with #' this kernel will be normalized(details see below). Default=TRUE #' #' @param exact use exact character set for the evaluation (details see below). #' Default=TRUE #' #' @param ignoreLower ignore lower case characters in the sequence. If the #' parameter is not set lower case characters are treated like uppercase. #' Default=TRUE #' #' @param presence if this parameter is set only the presence of a kmers will #' be considered, otherwise the number of occurances of the kmer is used. #' Default=FALSE #' @details #' Creation of kernel object\cr\cr #' The function 'mismatchKernel' creates a kernel object for the mismatch #' kernel. This kernel object can then be used with a set of DNA-, RNA- or #' AA-sequences to generate a kernel matrix or an explicit representation for #' this kernel. For values different from 1 (=default value) parameter #' \code{r} leads to a transfomation of similarities by taking each element of #' the similarity matrix to the power of r. If \code{normalized=TRUE}, the #' feature vectors are scaled to the unit sphere before computing the #' similarity value for the kernel matrix. For two samples with the feature #' vectors \code{x} and \code{y} the similarity is computed as: #' \deqn{s=\frac{\vec{x}^T\vec{y}}{\|\vec{x}\|\|\vec{y}\|}}{s=(x^T y)/(|x| |y|)} #' For an explicit representation generated with the feature map of a #' normalized kernel the rows are normalized by dividing them through their #' Euclidean norm. For parameter \code{exact=TRUE} the sequence characters #' are interpreted according to an exact character set. If the flag is not #' set ambigous characters from the IUPAC characterset are also evaluated. #' The annotation specific variant (for details see \link{positionMetadata}) #' and the position dependent variant (for details see #' \link{annotationMetadata}) are not available for this kernel.\cr\cr #' Creation of kernel matrix\cr\cr #' The kernel matrix is created with the function \code{\link{getKernelMatrix}} #' or via a direct call with the kernel object as shown in the examples below. #' @return #' mismatchKernel: upon successful completion, the function returns a kernel #' object of class \code{\linkS4class{MismatchKernel}}. #' @seealso \code{\link{kernelParameters}}, \code{\link{getKernelMatrix}}, #' \code{\link{getExRep}}, \code{\link{spectrumKernel}}, #' \code{\link{gappyPairKernel}}, \code{\link{motifKernel}}, #' \code{\linkS4class{MismatchKernel}} #' @examples #' #' ## instead of user provided sequences in XStringSet format #' ## for this example a set of DNA sequences is created #' ## RNA- or AA-sequences can be used as well with the mismatch kernel #' dnaseqs <- DNAStringSet(c("AGACTTAAGGGACCTGGTCACCACGCTCGGTGAGGGGGACGGGGTGT", #' "ATAAAGGTTGCAGACATCATGTCCTTTTTGTCCCTAATTATTTCAGC", #' "CAGGAATCAGCACAGGCAGGGGCACGGCATCCCAAGACATCTGGGCC", #' "GGACATATACCCACCGTTACGTGTCATACAGGATAGTTCCACTGCCC", #' "ATAAAGGTTGCAGACATCATGTCCTTTTTGTCCCTAATTATTTCAGC")) #' names(dnaseqs) <- paste("S", 1:length(dnaseqs), sep="") #' #' ## create the kernel object with one mismatch per kmer #' mm <- mismatchKernel(k=2, m=1, normalized=FALSE) #' ## show details of kernel object #' mm #' #' ## generate the kernel matrix with the kernel object #' km <- mm(dnaseqs) #' dim(km) #' km[1:5, 1:5] #' #' ## alternative way to generate the kernel matrix #' km <- getKernelMatrix(mm, dnaseqs) #' km[1:5,1:5] #' #' \dontrun{ #' ## plot heatmap of the kernel matrix #' heatmap(km, symm=TRUE) #' } #' @author Johannes Palme <kebabs@@bioinf.jku.at> #' @references #' \url{http://www.bioinf.jku.at/software/kebabs/}\cr\cr #' (Leslie, 2002) -- C. Leslie, E. Eskin, J. Weston and W.S. Noble. #' Mismatch String Kernels for SVM Protein Classification. \cr\cr #' J. Palme, S. Hochreiter, and U. Bodenhofer (2015) KeBABS: an R package #' for kernel-based analysis of biological sequences. #' \emph{Bioinformatics}, 31(15):2574-2576, 2015. #' DOI: \doi{10.1093/bioinformatics/btv176}. #' @keywords kernel #' @keywords mismatchKernel, mismatch #' @keywords methods #' @export mismatchKernel <- function(k=3, m=1, r=1, normalized=TRUE, exact=TRUE, ignoreLower=TRUE, presence=FALSE) { ## check data independent kernel parameters and create closure if (!is.numeric(k) || any(k < 1)) stop("k must be an integer larger than 0\n") if (!is.numeric(m) || any(m < 1) || any(sapply(k, function(ki) any(m >= ki)))) stop("m must be an integer larger than 0 and smaller than k\n") if (!isSingleNumber(r) || r <= 0) stop("r must be a number larger than 0\n") if (!isTRUEorFALSE(normalized)) stop("normalized must be TRUE or FALSE\n") if (!isTRUEorFALSE(exact)) stop("exact must be TRUE or FALSE\n") if (!isTRUEorFALSE(ignoreLower)) stop("ignoreLower must be TRUE or FALSE\n") if (!isTRUEorFALSE(presence)) stop("presence must be TRUE or FALSE\n") if (length(k) == 1 && length(m) == 1) { rval<- function(x, y = NULL, selx = NULL, sely = NULL, self=NULL) { return(mismatchProcessing(x=x, y=y, selx=selx, sely=sely, k=k, m=m, r=r, normalized=normalized, exact=exact, ignoreLower=ignoreLower, presence=presence, self=self)) } return(new("MismatchKernel", .Data=rval, .userDefKernel=FALSE, k=k, m=m, r=r, normalized=normalized, annSpec=FALSE, distWeight=numeric(0), exact=exact, ignoreLower=ignoreLower, presence=presence)) } else { kmPairs <- as.matrix(expand.grid(m,k)) colnames(kmPairs) <- NULL ## return list of kernel objects kernels <- mapply(mismatchKernel, k=kmPairs[,2], m=kmPairs[,1], MoreArgs=list(r=r, normalized=normalized, exact=exact, ignoreLower=ignoreLower, presence=presence)) return(kernels) } } mismatchProcessing <- function(x, y, selx, sely, k, m, r, normalized, exact, ignoreLower, presence, self=NULL) { if (!is.null(self)) { ## retrieval of kernel parameters return(list(k=self@k, m = self@m, r=self@r, normalized=self@normalized, annSpec=FALSE, exact=self@exact, ignoreLower=self@ignoreLower, presence=self@presence, distWeight=numeric(0), revComplement=self@revComplement)) } if (missing(x) || is.null(x)) { stop(paste("x must be a", paste(kebabsInfo@allowedSeqClasses, collapse=", "), "\n")) } if (length(x) < 1) stop("sequence info is missing\n") if (missing(y)) y <- NULL if (missing(selx) || is.null(selx)) selx <- integer(0) if (missing(sely) || is.null(sely)) sely <- integer(0) if (class(x) %in% c("DNAString", "RNAString", "AAString")) { x <- switch(class(x), "DNAString" = DNAStringSet(x), "RNAString" = RNAStringSet(x), "AAString" = AAStringSet(x) ) } if (!(class(x) %in% kebabsInfo@allowedSeqSetClasses)) { stop(paste("x must be a", paste(kebabsInfo@allowedSeqClasses, collapse=", "))) } if (length(selx) > 0) { if (!is.numeric(selx) || length(selx) > length(x)) stop("selx must be a numeric vector with indices into 'x'\n") selx <- as.integer(selx) } else selx <- 1L:length(x) selxC <- selx - 1L symmetric <- TRUE if (length(k) > 1 || length(m) > 1) { stop("multiple values for 'k' or 'm' are only allowed\n", " in model selection\n") } if (!is.null(y)) { symmetric <- FALSE if (class(y) %in% c("DNAString", "RNAString", "AAString")) { y <- switch(class(y), "DNAString" = DNAStringSet(y), "RNAString" = RNAStringSet(y), "AAString" = AAStringSet(y) ) } if (!(class(y) %in% kebabsInfo@allowedSeqSetClasses)) { stop(paste("y must be a", paste(kebabsInfo@allowedSeqClasses, collapse=", "),"\n")) } if (class(x) != class(y)) stop("x and y must be of matching classes\n") if (length(sely) > 0) { if (!is.numeric(sely) || length(sely) > length(y)) stop("sely must be a numeric vector with indices into 'y'\n") sely <- as.integer(sely) } else sely <- 1L:length(y) selyC <- sely - 1L } else { sely <- integer(0) selyC <- sely } bioCharset <- getBioCharset(x, exact) ## limit k to 32 bit feature space if (names(bioCharset[[1]]) %in% c("AAexact", "AAiupac") && k > 7) stop("'k' must be smaller than or equal to 7\n") else if (names(bioCharset[[1]]) %in% c("DNAexact", "RNAexact") && k > 15) stop("for exact charset 'k' must be smaller than or equal to 15\n") else if (names(bioCharset[[1]]) %in% c("DNAiupac", "RNAiupac") && k > 7) stop("for iupac charset 'k' must be smaller than or equal to 7\n") if (!is.null(y)) seqLength <- c(width(x)[selx], width(y)[sely]) else seqLength <- width(x)[selx] if (any(seqLength < k)) { stop("mismatch kernel does not accept strings shorter\n", " than k\n") } maxSeqLength <- max(seqLength) isXStringSet <- is(x, "XStringSet") unmapped <- is(x, "DNAStringSet") || is(x, "RNAStringSet") res <- .Call("mismatchKernelMatrixC", x, y, selxC, selyC, as.integer(length(selxC)), as.integer(length(selyC)), as.logical(isXStringSet), as.logical(symmetric), as.integer(bioCharset[[2]]), as.logical(!ignoreLower), as.logical(unmapped), as.integer(maxSeqLength), as.integer(k), as.integer(m), as.logical(normalized), as.logical(presence)) if (length(names(x)) > 0 && length(selx) == nrow(res)) { rownames(res) <- names(x)[selx] if (symmetric) colnames(res) <- rownames(res) } if (!symmetric && length(names(y)) > 0 && length(sely) == ncol(res)) { colnames(res) <- names(y)[sely] } if (r != 1) return(as.KernelMatrix(res^r)) else return(as.KernelMatrix(res)) } #' @rdname mismatchKernel #' @aliases #' getFeatureSpaceDimension,MismatchKernel-method #' #' @param kernel a sequence kernel object #' @param x one or multiple biological sequences in the form of a #' \code{\linkS4class{DNAStringSet}}, \code{\linkS4class{RNAStringSet}}, #' \code{\linkS4class{AAStringSet}} (or as \code{\linkS4class{BioVector}}) #' @return of getDimFeatureSpace: #' dimension of the feature space as numeric value #' @export #' ## feature space is identical to spectrum kernel setMethod("getFeatureSpaceDimension", signature=signature(kernel="MismatchKernel"), kebabs:::getFeatureSpaceDimension.spectrum)
bdc505260395605927b475e133e8baaa796c2df8
67be121f07b78e8337262f97673c94ff2e4471f9
/R Programming/ProgrammingAssignment3/best.R
99276ceeac89d9bdd2c06dffdeba016ff18f9b63
[]
no_license
adamwlev/Coursera
61099a2c9ab7c899aaf3c3d086297072525ccc75
9bd03ea561d0145d1e68e8fdc78759196361a3d3
refs/heads/master
2021-04-22T12:23:32.932089
2016-12-06T23:45:11
2016-12-06T23:45:11
75,781,302
0
0
null
null
null
null
UTF-8
R
false
false
1,011
r
best.R
setwd("~/Documents/R Programming/ProgrammingAssignment3") best <- function(state, outcome) { data <- read.csv("rprog-data-ProgAssignment3-data/outcome-of-care-measures.csv", colClasses = "character") data$State<-as.factor(data$State) data[,11]<-as.numeric(data[,11]) data[,17]<-as.numeric(data[,17]) data[,23]<-as.numeric(data[,23]) ## Check that state and outcome are valid states<-levels(data$State) if (!(state %in% states)){ stop("invalid state") } outcomes<-c("heart attack","heart failure","pneumonia") if (!(outcome %in% outcomes)){ stop("invalid outcome") } if(outcome=="heart attack"){ output<-data[data$State==state,][order(data[data$State==state,11],data[data$State==state,2]),][1,2] } if(outcome=="heart failure"){ output<-data[data$State==state,][order(data[data$State==state,17],data[data$State==state,2]),][1,2] } if(outcome=="pneumonia"){ output<-data[data$State==state,][order(data[data$State==state,23],data[data$State==state,2]),][1,2] } return(output) }
2442de2fdb153326d45865cfda0b3899c524e2af
7af4243925881859c18ed52e787ae46465173f26
/R programming week3.R
f567fe891624267462c91b5d9efd95757da0a0d5
[]
no_license
MoominBamboo/R-Programming-Week-3-Assignment
b7d3fca87649f8e190ed41a4e46c078b5b1a4e61
805403c65d3c29d8eb631e82e6cea5ff2708218e
refs/heads/master
2021-03-31T01:04:08.034941
2018-03-12T18:30:58
2018-03-12T18:30:58
124,933,601
0
0
null
null
null
null
UTF-8
R
false
false
1,824
r
R programming week3.R
makeVector <- function(x = numeric()) { m <- NULL set <- function(y) { x <<- y m <<- NULL } get <- function() x setmean <- function(mean) m <<- mean getmean <- function() m list(set = set, get = get, setmean = setmean, getmean = getmean) } cachemean <- function(x, ...) { m <- x$getmean() if(!is.null(m)) { message("getting cached data") return(m) } data <- x$get() m <- mean(data, ...) x$setmean(m) m } makeCacheMatrix cacheSolve makeCacheMatrix <- function(x = matrix()) { ## define the argument with default mode of "matrix" inv <- NULL ## initialize inv as NULL; will hold value of matrix inverse set <- function(y) { ## define the set function to assign new x <<- y ## value of matrix in parent environment inv <<- NULL ## if there is a new matrix, reset inv to NULL } get <- function() x ## define the get fucntion - returns value of the matrix argument setinverse <- function(inverse) inv <<- inverse ## assigns value of inv in parent environment getinverse <- function() inv ## gets the value of inv where called list(set = set, get = get, setinverse = setinverse, getinverse = getinverse) ## you need this in order to refer ## to the functions with the $ operator } cacheSolve <- function(x, ...) { ## Return a matrix that is the inverse of 'x' inv <- x$getinverse() if(!is.null(inv)) { message("getting cached data") return(inv) } data <- x$get() inv <- solve(data, ...) x$setinverse(inv) inv }
0a702f7eb2c7c23dca8b4afd40acd73d5b02d2bb
ccace74225fd645944cffa3218db445c29b5b38d
/hw/hw1/car.R
2412337bda3d87ebc262ee4b61bcc9c78e773395
[]
no_license
luiarthur/decision_ams221
31f3587aba928e143b5cfdc187d72db437e96662
9c1b993902a39980eeeefaa828e6f4f63600ae4d
refs/heads/master
2020-04-11T05:31:45.767800
2019-01-24T17:34:03
2019-01-24T17:34:03
161,552,181
0
0
null
null
null
null
UTF-8
R
false
false
500
r
car.R
f = function(n, x) { p_snx = dbinom(x, n, .9) * .2 + dbinom(x, n, .4) * .8 p_nx = dbinom(x, n, .9) * .2 / p_snx a = (-50 * n + 700) * p_nx + (-50 * n + 800) * (1-p_nx) b = (-50 * n + 0) * p_nx + (-50 * n + 1000) * (1-p_nx) cat('n: ', n, ', x: ', x, ', a: ', a, ', b: ', b, ', p_snx: ', p_snx, '\n', sep='') max(a, b) * p_snx } N = 10 m = rep(NA, N) for (n in 1:N) { m[n] = sum(sapply(0:n, function(x) f(n, x))) } plot(m, type='o', ylab='Profit', xlab='number of alarm devices (n)')
8135ff7b6e4a913feef1ef3811a22b5d7d84d6e1
f8d2169fb12986ce3d0eec2ab5d6d208a7766eed
/R/frontend-score.R
a8abadeec7160701b7d810982ce7c5f42e23b9e6
[]
no_license
MariusDanner/bnlearn
934dec56136745517635dcfab96b009924c72bd8
3cb51471943811d94a32ec742d262a5d7b20481d
refs/heads/master
2020-05-29T15:26:17.038590
2019-05-29T14:00:11
2019-05-29T14:00:11
189,220,392
2
0
null
2019-05-29T12:24:38
2019-05-29T12:24:37
null
UTF-8
R
false
false
8,357
r
frontend-score.R
# compute the score of a network. score = function(x, data, type = NULL, ..., by.node = FALSE, debug = FALSE) { # check x's class. check.bn(x) # the original data set is needed. check.data(data) # check the network against the data. check.bn.vs.data(x, data) # check debug and by.node. check.logical(by.node) check.logical(debug) # no score if the graph is partially directed. if (is.pdag(x$arcs, names(x$nodes))) stop("the graph is only partially directed.") # check the score label. type = check.score(type, data) # expand and sanitize score-specific arguments. extra.args = check.score.args(score = type, network = x, data = data, extra.args = list(...), learning = FALSE) # check that the score is decomposable when returning node contributions. if (by.node && !is.score.decomposable(type, extra.args)) stop("the score is not decomposable, node terms are not defined.") # compute the node contributions to the network score. local = per.node.score(network = x, data = data, score = type, targets = names(x$nodes), extra.args = extra.args, debug = debug) if (by.node) return(local) else return(sum(local)) }#SCORE # AIC method for class 'bn', an alias of score(..., type = "aic") AIC.bn = function(object, data, ..., k = 1) { # check which type of data we are dealing with. type = data.type(data) # parameter sanitization done in the score() function. if (type %in% discrete.data.types) score(object, data = data, type = "aic", k = k, ...) else if (type == "continuous") score(object, data = data, type = "aic-g", k = k, ...) else if (type == "mixed-cg") score(object, data = data, type = "aic-cg", k = k, ...) }#AIC.BN # BIC method for class 'bn', an alias of score(..., type = "bic") BIC.bn = function(object, data, ...) { # check which type of data we are dealing with. type = data.type(data) # parameter sanitization done in the score() function. if (type %in% discrete.data.types) score(object, data = data, type = "bic", ...) else if (type == "continuous") score(object, data = data, type = "bic-g", ...) else if (type == "mixed-cg") score(object, data = data, type = "bic-cg", ...) }#BIC.BN # logLik method for class 'bn', an alias of score(..., type = "loglik") logLik.bn = function(object, data, ...) { # check which type of data we are dealing with. type = data.type(data) # parameter sanitization done in the score() function. if (type %in% discrete.data.types) score(x = object, data = data, type = "loglik", ...) else if (type == "continuous") score(x = object, data = data, type = "loglik-g", ...) else if (type == "mixed-cg") score(x = object, data = data, type = "loglik-cg", ...) }#LOGLIK.BN alpha.star = function(x, data, debug = FALSE) { # check x's class. check.bn(x) # the original data set is needed. check.data(data) # check the network against the data. check.bn.vs.data(x, data) # check debug. check.logical(debug) # no score if the graph is partially directed. if (is.pdag(x$arcs, names(x$nodes))) stop("the graph is only partially directed.") alpha.star.backend(x = x, data = data, debug = debug) }#ALPHA.STAR # infer the direction of an ipothetic arc between two specified nodes. choose.direction = function(x, arc, data, criterion = NULL, ..., debug = FALSE) { # check x's class. check.bn(x) # check the data are there. data.info = check.data(data) # check the arc is there. check.nodes(nodes = arc, graph = x, min.nodes = 2, max.nodes = 2) # check debug. check.logical(debug) # check criterion. if (is.null(criterion)) { # if no criterion is specified use either the default one or the # one used by the learning algorithm. if (x$learning$test == "none") criterion = check.test(criterion, data) else criterion = x$learning$test }#THEN else if (identical(criterion, "bootstrap")) { # nothing to do, move along. }#THEN else { criterion = check.criterion(criterion, data) }#ELSE # set the test/score counter. reset.test.counter() if (debug) cat("* testing", arc[1], "-", arc[2], "for direction.\n" ) if (criterion %in% available.tests) { # sanitize the alpha threshold. alpha = check.alpha(list(...)$alpha, network = x) # sanitize B (the number of bootstrap/permutation samples). B = check.B(list(...)$B, criterion) # warn about unused arguments. check.unused.args(list(...), c("alpha", "B")) x = choose.direction.test(x, data = data, arc = arc, test = criterion, alpha = alpha, B = B, debug = debug, complete = data.info$complete.nodes) }#THEN else if (criterion %in% available.scores) { # expand and sanitize score-specific arguments. extra.args = check.score.args(score = criterion, network = x, data = data, extra.args = list(...), learning = FALSE) x = choose.direction.score(x, data = data, arc = arc, score = criterion, extra.args = extra.args, debug = debug) }#ELSE else if (criterion == "bootstrap") { # expand and check bootstrap-specific arguments. extra.args = check.bootstrap.args(list(...), network = x, data = data) if (!is.null(extra.args$cpdag)) check.logical(extra.args$cpdag) else extra.args$cpdag = TRUE x = choose.direction.boot(x, data = data, arc = arc, extra.args = extra.args, algorithm = extra.args[["algorithm"]], algorithm.args = extra.args[["algorithm.args"]], cpdag = extra.args[["cpdag"]], debug = debug) }#THEN invisible(x) }#CHOOSE.DIRECTION # compute the Bayes factor of two networks. BF = function(num, den, data, score, ..., log = TRUE) { # check the two networks, individually and against each other. check.bn(num) check.bn(den) match.bn(num, den) nodes = names(num$nodes) # check the data. data.info = check.data(data) # check the networks against the data. check.bn.vs.data(num, data) check.bn.vs.data(den, data) # check the log argument. check.logical(log) # no score if at least one of the networks is partially directed. if (is.pdag(num$arcs, names(num$nodes))) stop("the graph in the numerator on the BF is only partially directed.") if (is.pdag(den$arcs, names(den$nodes))) stop("the graph in the denominator on the BF is only partially directed.") # make sure the score function is suitable for computing a Bayes factor. if (missing(score)) { if (data.info$type %in% discrete.data.types) score = "bde" else if (data.info$type %in% continuous.data.types) score = "bge" else if (data.info$type %in% mixed.data.types) score = "bic-cg" }#THEN else { score = check.score(score, data, allowed = c(available.discrete.bayesian.scores, available.continuous.bayesian.scores, grep("bic", available.scores, value = TRUE))) }#ELSE # expand and sanitize score-specific arguments. extra.args = check.score.args(score = score, network = num, data = data, extra.args = list(...), learning = FALSE) # if a graph prior is used, this in not a Bayes factor any longer. if (!is.null(extra.args$prior) && extra.args$prior != "uniform") warning("using a non-uniform graph prior means this is not a Bayes factor.") # if the score is decomposable, compute the Bayes factor using only those # local distributions that differ between the two networks; otherwise # compute it on the whole network. if (is.score.decomposable(score, extra.args)) { different = sapply(nodes, function(n) { !setequal(num$nodes[[n]]$parents, den$nodes[[n]]$parents) }) different = nodes[different] }#THEN else { different = nodes }#ELSE logBF.num = per.node.score(num, data = data, score = score, targets = different, extra.args = extra.args) logBF.den = per.node.score(den, data = data, score = score, targets = different, extra.args = extra.args) # compute the Bayes factor on the log-scale, and taking the difference between # local distributions before summing to minimise numeric problems. logBF = sum(logBF.num - logBF.den) return(ifelse(log, logBF, exp(logBF))) }#BF
26875ccfbbf1298a3bf67279b51cddde1ed3d066
b83685030a9ba183d06d44e8865a1165b82bc2b3
/plot2.R
c378f366f6b1bbf643c40519f87ff3bdd735d29e
[]
no_license
vovikdrg/Exploratory-Data-Analysis
217e14d74b013f80c5125ed683369ea03d0ffa65
40cccb5e718c0f8405a8208024d8a4b882e70309
refs/heads/master
2021-01-10T20:51:29.594051
2015-02-08T21:39:36
2015-02-08T21:39:36
30,505,430
0
0
null
null
null
null
UTF-8
R
false
false
551
r
plot2.R
data <- read.csv("household_power_consumption.txt", header=TRUE, sep=";", stringsAsFactors=FALSE, dec=".") data$Date <- as.Date(data$Date, format="%d/%m/%Y") data$Global_active_power <- as.numeric(data$Global_active_power) dataForPlot <- subset(data, subset=(Date >= "2007-02-01" & Date <= "2007-02-02")) dataForPlot$Datetime <- as.POSIXct(paste(dataForPlot$Date, dataForPlot$Time)) png("plot2.png", width=480, height=480) plot(dataForPlot$Global_active_power~datetime, type="l", ylab="Global Active Power (kilowatts)", xlab="") dev.off()
d841e82878144d9ed5df9f8b367aa3d01028345e
8f339ff4d4565ceff9faac608f0dd3db70cd0534
/man/orbit_read.Rd
b696918158b292608d7d41ed71ab6b9137d6a031
[]
no_license
markwh/rivertile
a5960e4b92070bc276114f92383b2568358fb0f4
5c8969332b9ee8f187fdd209e059f51fb83327c8
refs/heads/master
2020-04-24T05:47:54.342603
2019-10-22T13:11:08
2019-10-22T13:11:08
171,742,418
0
0
null
null
null
null
UTF-8
R
false
true
470
rd
orbit_read.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/read-priordb.R \name{orbit_read} \alias{orbit_read} \title{Read orbit locations} \usage{ orbit_read(ncfile, as_sf = TRUE, maxpoints = 1000) } \arguments{ \item{ncfile}{netcdf file containing prior info} \item{as_sf}{Convert to spatial frame object?} \item{maxpoints}{Maximum number of points to use for line resolution.} } \description{ Returns a sf object with multilinestring geometry. }
1d8756d76d247d4b9bb1aeb35691d556518f1fa0
186b58bad3b286be4411f5f0784fa97635f8faa9
/run_analysis_long.R
d6ad549cd7a6ea519a75a36351fb301aade2cc6b
[]
no_license
ZedLeb/coursera-data-science-getdata-course-project
07319534fadfe687bce00508ed667e4eaa0ec52e
c7364b801ec30ff376df97619734f0bdde3a8333
refs/heads/master
2021-01-22T20:54:58.833847
2014-12-01T09:44:11
2014-12-01T09:44:11
null
0
0
null
null
null
null
UTF-8
R
false
false
9,650
r
run_analysis_long.R
# The course does not teach us data.table, but I prefer to use it at all times # for its lightning speed when working with huge data sets and versatility. # See benchmarks here: # https://github.com/Rdatatable/data.table/wiki/Benchmarks-%3A-Grouping # If you are not familiar with data.table I recommend these reads # https://github.com/Rdatatable/data.table/wiki/vignettes/datatable-intro.pdf # https://github.com/Rdatatable/data.table/wiki/vignettes/datatable-faq.pdf # and the wiki here https://github.com/Rdatatable/data.table/wiki library("data.table") ############################################################################## # THE SCRIPT ASSUMES THAT WORKING DIRECTORY IS SET TO THE ROOT OF DATA FILES # # AND ENDS WITH A SLASH # ############################################################################## # Set working directory appropriately!!! # setwd("D:\\GDrive\\GitHub\\coursera-data-science-getdata-course-project\\rawdata") ############################################################################## # Check whether working directory contains activity_labels.txt file, which # means it is the root directory of our data. if (!file.exists("activity_labels.txt")) stop("Could not find activity_labels.txt file.\nPlease, set working directory to the root of the data folder!") # Load activity labels. We will need them to fullfil the following requirement: # 3. Uses descriptive activity names to name the activities in the data set a.labels <- fread("activity_labels.txt") # Load variable(column) names. We will need them to fullfill the following: # 2. Extracts only the measurements on the mean and standard deviation for each # measurement. # 4. Appropriately labels the data set with descriptive variable names. v.names <- fread("features.txt") # Now we need to decide which columns we must keep. We obviously must keep # columns with -mean() and -std(). There are also columns with -meanFreq() and # seven columns with angles of some means. I believe neither -meanFreq() nor # angles columns should be included in the tidy set since they are not # "mean and standard deviation for each measurement", they are some additional # characteristics of these measurements. So I keep only -mean() and -std() columns. # "perl = T" param is for PCRE syntax, which is generally faster. v.names <- v.names[grepl("-(mean|std)\\(\\)", V2, perl = T)] # We are down to 66 columns. Now let's cleanup these names a bit, so that it's # easier to use them in the code for anybody using our data in the future. # Get rid of parenthesis. # This is data.table syntax and it does update the values! It probably does not # work the same way in data.frame. v.names[, V2:=gsub("()", "", V2, fixed = T)] # Replace "-" with ".". This follows Google Style guide for variable naming. v.names[, V2:=gsub("-", ".", V2, fixed = T)] # Let's make a function, which reads and processes data from a training/testing # subset. ReadFilesAndMerge <- function(data.set = "train") { # Reads data files for a data set (train or test), converts activity to # labeled factor, filters out unneeded columns and merges everything together. # Subject IDs for each observation s <- fread(sprintf("%s/subject_%s.txt", data.set, data.set)) # Set sensible column name. setnames(x, names(x), v.names$V2) is faster # than names(x) <- v.names$V2 because the latter copies the table, while # the former does its changes by reference. setnames(s, names(s), c("Subject")) # Activity IDs for each observation y <- fread(sprintf("%s/y_%s.txt", data.set, data.set)) # Replace activity IDs with descriptive activity labels. # In our case, when items in a.labels are already correctly ordered from 1 to 6 # and ids in y are also from 1 to 6, we could disregard a.labels$V1 column and # just do left_join by row numbers in a.labels # # y <- a.labels[y$V1, V2] # # This takes rows from a.labels according to numbers in y$V1. This would not # work if ids were not consecutively numbered from 1 to 6. # In case of arbitrary ids, and when using data.tables, the correct way would be # # y <- a.labels[V1 == y$V1, V2] # # But IMHO both solutions are not ideal, since by replacing numbers with # strings we lose their relative ordering. It doesn't matter much here, but # if ids in Y where not just dumb numbers, but encoded relative order from the # least physical strain to the highest physical strain, replacing them with # bare labels would lose meaningful ordering information. So the ideal solution # should replace activity IDs with an ordered factor variable, where labels # will both be informative and keep the relative ordering of activities. # Thus here I convert y to a factor vector with descriptive activity labels. y <- factor(y$V1, a.labels$V1, a.labels$V2, ordered = T) # Convert to data.table just to keep using the same syntax everywhere y <- as.data.table(y) # Set sensible column name. setnames(y, names(y), c("Activity")) # fread is a very fast data.table's file reader, but it chokes on this file. # Thus I have to use read.table and convert to data.table. # If you know how to read this file using fread, please tell me! x <- read.table(sprintf("%s/X_%s.txt", data.set, data.set)) # convert to data.table to keep using the same syntax everywhere x <- as.data.table(x) # Now let's get rid of unneeded columns # First create a list of x columns, using IDs in v.names$V1 cols <- names(x)[v.names$V1] # Now let's get only those columns that we need. # This one is tricky in data.table! Everything is easy when you know column # names and can type them in, but when column names have to be provided # programmatically data.table gets tricky. "with=FALSE" is important here. # See datatable-faq.pdf Q:1.5 x <- x[, cols, with=F] # Finally set column names setnames(x, names(x), v.names$V2) # Ok, we are ready to merge everything using cbind, convert to data.table # and return the resulting data.table as.data.table(cbind(y, s, x)) } # Load files and do the magic. train <- ReadFilesAndMerge("train") test <- ReadFilesAndMerge("test") # We could do that without a function, if we were merging them immediately after # reading from files, but this way we have a nice data.table for each set and # can work with each separately if needed. # Now let's rbind them. data <- rbind(train, test) # Create data.table index by Activity, Subject setkey(data, Activity, Subject) # Now the real magic begins! # The following will group our data.table by Activity, Subject and calculate # an average for each of the other columns within a group. # .(Activity, Subject) is data.table's shortcut for list(Activity, Subject) # .SD represents the Subset of Data for all the columns not included in the groups. tidydata <- data[, lapply(.SD, mean), by = .(Activity, Subject)] # Now let's write it to disk. write.table(tidydata, file = "tidydata.txt", row.names = F) # That's it! :-) # Comments are very welcome! # Heck, just for the fun of it, a few hours before the deadline I decided to # write the code, which converts my wide tidydata into long narrow tidy data. library(reshape2) t.long <- melt(tidydata, 1:2, 3:68) # Now we have narrow long tidydata, but we also need to split "variable" column # into 3 columns for Signal, Axis and Feature. # This is a quick last minute dash, so probably there is a better solution. library(stringr) # The split pattern is a reqular expression, so the dot has to be escaped. v <- str_split_fixed(t.long$variable, "\\.", 3) # Add columns to t.long t.long$Signal <- v[, 1] t.long$Feature <- v[, 2] t.long$Axis <- v[, 3] # drop "variable" column t.long[, variable := NULL] # Now, since all our Signals,Axis groups have exactly two measures, which are # mean and std, and since the data is considered messy if we store more than one # distinct value in one column/variable. We should unmelt them to two separate # Mean and Std columns. s <- t.long[Feature=="std", value] t.long <- t.long[Feature=="mean"] t.long[, Std:=s] # rename "value" to "Mean" setnames(t.long, c("value"), c("Mean")) # drop Feature column t.long[, Feature := NULL] # But in fact that's not the end of the story! They really want us to go insane # and split the Signal value into many sub values: # 1) Unit of measurement (t denoting "time" and f denoting "frequency") t.long[, Unit:=ifelse(substr(Signal, 1, 1) == "t", "time", "freq")] # 2) Originator - Body or Gravity t.long[, Originator:=ifelse(substr(Signal, 2, 5) == "Body", "Body", "Gravity")] # 3) Device - Accelerator or Gyroscope t.long[, Device:=ifelse(grepl("Gyro", Signal, fixed = T), "Gyro", "Acc")] # 4) Jerk - TRUE or FALSE for if it's a Jerk signal t.long[, Jerk:=ifelse(grepl("Jerk", Signal, fixed = T), T, F)] # 5) Magnitude - TRUE or FALSE for if it's a measure of a magnitude of the signal t.long[, Magnitude:=ifelse(grepl("Mag", Signal, fixed = T), T, F)] # Now let's drop the Signal column and reorder the columns t.long[, Signal := NULL] setcolorder(t.long, neworder = c("Activity", "Subject", "Unit", "Device", "Originator", "Jerk", "Magnitude", "Axis", "Mean", "Std")) # OK, now let's write it to "tidydata-long.txt" write.table(t.long, file = "tidydata-long.txt", row.names = F) # The End
22d2118341a8d9d163b018198060bfd72348b30b
290f67b56cf2997f970cf662cffab1c6dd14f0e0
/landslide.r
2be8e06477afae49b155e8f2ccf30d13772dd76e
[]
no_license
sanchu01/Trend-Fitting
a48af8b5b9dbe4bf6593415cb7eb1c39beabbabf
944daf6f894cc15007a7111e8080d47419eb203c
refs/heads/main
2023-02-04T10:21:11.220973
2020-12-22T18:04:32
2020-12-22T18:04:32
323,698,498
0
0
null
null
null
null
UTF-8
R
false
false
690
r
landslide.r
cat(" LANDSLIDE") year=c(2009,2010,2011,2012,2013,2014,2015,2016,2017,2018,2019) y=c(29,32,17,13,11,15,20,13,25,13) x=array() x2=array() x3=array() x4=array() x2y=array() xy=array() for(i in 1:10) { x[i]=year[i]-2013 x2[i]=x[i]*x[i] x3[i]=x2[i]*x[i] x4[i]=x2[i]*x2[i] xy[i]=x[i]*y[i] x2y[i]=x2[i]*y[i] } c=(10*sum(x2y)-(sum(x2)*sum(y)))/(10*sum(x4)-(sum(x2)^2)) a=(sum(y)-(c*sum(x2)))/10 b=sum(xy)/sum(x2) cat("\n The value of a:",a) cat("\n The value of b:",b) cat("\n The value of c:",c) cat("\n The Equation is y=",a,b,"x",c,"x2") cat("\n The Prediction for the year 2019") cat("\n ",a+b*5+c*5^2)
fd9ca8cd1736e7414b09685ba046ab873fc027c4
2d34708b03cdf802018f17d0ba150df6772b6897
/googleidentitytoolkitv3.auto/man/Relyingparty.Rd
eb1bf8a7f26fa381f607e485b13ff37f048e9a45
[ "MIT" ]
permissive
GVersteeg/autoGoogleAPI
8b3dda19fae2f012e11b3a18a330a4d0da474921
f4850822230ef2f5552c9a5f42e397d9ae027a18
refs/heads/master
2020-09-28T20:20:58.023495
2017-03-05T19:50:39
2017-03-05T19:50:39
null
0
0
null
null
null
null
UTF-8
R
false
true
1,022
rd
Relyingparty.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/identitytoolkit_objects.R \name{Relyingparty} \alias{Relyingparty} \title{Relyingparty Object} \usage{ Relyingparty(captchaResp = NULL, challenge = NULL, email = NULL, idToken = NULL, newEmail = NULL, requestType = NULL, userIp = NULL) } \arguments{ \item{captchaResp}{The recaptcha response from the user} \item{challenge}{The recaptcha challenge presented to the user} \item{email}{The email of the user} \item{idToken}{The user's Gitkit login token for email change} \item{newEmail}{The new email if the code is for email change} \item{requestType}{The request type} \item{userIp}{The IP address of the user} } \value{ Relyingparty object } \description{ Relyingparty Object } \details{ Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}} Request of getting a code for user confirmation (reset password, change email etc.) } \seealso{ Other Relyingparty functions: \code{\link{relyingparty.getOobConfirmationCode}} }
f04f17fd4d801e09e8e4523b6ddd0357d7d59b24
171084c6adea20e506b70125862549f57b671e01
/R/utils.R
6b37c70fbefb1fe8ed10f7467928057937d18c41
[ "MIT" ]
permissive
hongyuanjia/ggpsychro
ce15e25e5673bfac5897399145df64f0bf348080
961d96d8b2239556ead600a304aab8b92e61a8a6
refs/heads/master
2022-02-01T08:45:10.099317
2021-02-02T14:22:46
2021-02-02T14:22:46
228,736,101
14
2
null
null
null
null
UTF-8
R
false
false
3,283
r
utils.R
is.waive <- function(x) inherits(x, "waiver") # new_data_frame {{{ # copied from ggplot2/R/performance.R # Fast data.frame constructor and indexing # No checking, recycling etc. unless asked for new_data_frame <- function(x = list(), n = NULL) { if (length(x) != 0 && is.null(names(x))) { stop("Elements must be named") } lengths <- vapply(x, length, integer(1)) if (is.null(n)) { n <- if (length(x) == 0 || min(lengths) == 0) 0 else max(lengths) } for (i in seq_along(x)) { if (lengths[i] == n) next if (lengths[i] != 1) { stop("Elements must equal the number of rows or 1") } x[[i]] <- rep(x[[i]], n) } class(x) <- "data.frame" attr(x, "row.names") <- .set_row_names(n) x } # }}} # with_units {{{ #' @importFrom psychrolib SetUnitSystem with_units <- function (units, expr) { psychrolib::SetUnitSystem(units) on.exit(psy_op$UNITS <- NA_character_, add = TRUE) force(expr) } # }}} # encode_units {{{ encode_units <- function (units) { switch(units, "SI" = 1L, "IP" = 2L, stop("'units' can only be either 'SI' or 'IP'.")) } # }}} # decode_units {{{ decode_units <- function (code) { c("SI", "IP")[code] } # }}} # bid_conv {{{ # bidirectional conversion bid_conv <- function (x, to) { switch(to, "F" = get_f_from_c(x), "C" = get_c_from_f(x), "Gr" = get_gr_from_g(x), "G" = get_g_from_gr(x) ) } get_f_from_c <- function (x) x * 9. / 5. + 32. get_c_from_f <- function (x) (x - 32) * 5. / 9. get_g_from_gr <- function (x) x / 7. get_gr_from_g <- function (x) x * 7. # }}} # dist_euclid {{{ dist_euclid <- function (x, y, xend, yend) { sqrt((xend - x) ^2 + (yend - y)^2) } # }}} # rep_dataframe {{{ rep_dataframe <- function (df, n) { do.call(rbind, replicate(n, df, simplify = FALSE)) } # }}} # The units of humidity ratio is lb_H2O lb_Air-1 [IP] or kg_H2O kg_Air-1 [SI], # but for Psychrometric Chart, we use gr_H2O lb_Air-1 [IP] or g_H2O kg_Air-1 # [SI]. Should amplify before plotting or do reversely during calculation # amplify_hum {{{ amplify_hum <- function (hum, units) { if (units == "SI") { hum * 1000.0 } else { hum * 7000.0 } } # }}} # narrow_hum {{{ narrow_hum <- function (hum, units) { if (units == "SI") { hum / 1000.0 } else { hum / 7000.0 } } # }}} # The units of enthalpy is J kg-1 [SI], but for Psychrometric Chart, we use kJ # kg-1 [SI]. Should amplify before plotting or do reversely during calculation # amplify_enth {{{ amplify_enth <- function (enth, units) { if (units == "SI") { enth * 1000.0 } else { enth } } # }}} # narrow_enth {{{ narrow_enth <- function(enth, units) { if (units == "SI") { enth / 1000.0 } else { enth } } # }}} # slope {{{ slope <- function (x, y, xend, yend) { (yend - y) / (xend - x) } # }}} # adopted from thomasp85/ggraph/R/utils.R # line_angle {{{ line_angle <- function(x, y, xend, yend, degrees = TRUE) { angles <- atan(slope(x, y, xend, yend)) angles[is.nan(angles)] <- 2 * pi angles[angles < 0] <- angles[angles < 0] + 2 * pi if (degrees) { angles * 360 / (2 * pi) } else { angles } } # }}}
265795ec8519cba30db7819c50f8998eaede336a
90a905e25ccf99565b10616506395b4008373424
/rankall.R
e821eb6adae9ef42de4e61d486fb6a2f568ab556
[]
no_license
jerry-ban/coursera_rprog
8016af286e2f55ecd5d1dbb657bfe9299fa8ad38
3a8d0db76ab7fadd1771629ebadbb6c3825785f0
refs/heads/master
2021-05-05T19:28:47.914105
2018-01-25T18:40:52
2018-01-25T18:40:52
117,782,524
0
0
null
null
null
null
UTF-8
R
false
false
4,049
r
rankall.R
library(dplyr) #source("rankhospital.R") rankall <- function(outcome, num = "best") { ## Read outcome data ## Check that state and outcome are valid ## Return hospital name in that state with the given rank ## 30-day death rate #rankhospital("MD", "heart attack", "worst") #state<- "TX" #outcome <- "heart attack" #num <- 20 rdata<-read.csv("Assignment3-data/outcome-of-care-measures.csv", stringsAsFactors = FALSE) all_states <- unique(rdata$State) all_states <- all_states[order(all_states)] outcome_new <- paste(strsplit(tolower(outcome),"[., ]")[[1]],collapse="") outcome_new <-paste(c("hospital30daydeathmortalityratesfrom", outcome_new), collapse = "") outcome_new col_names <- names(rdata) col_names col_names_new<- sapply(as.list(col_names), function(x) { paste( strsplit( tolower(x),"[.]")[[1]] , collapse="")} ) col_names_new col_count <- length(col_names) #col_find_outcome<-sapply(as.list(col_names_new), function(x) grepl(outcome_new, x)) col_find_outcome<-sapply(as.list(col_names_new), function(x) outcome_new==x) if(sum(col_find_outcome)<1){ stop("invalid outcome") } sum(col_find_outcome) col_find_death<-sapply(as.list(col_names_new), function(x) grepl("death", x)) sum(col_find_death) col_ids <- 1:col_count col_data <- data.frame(col_names, col_names_new, col_find_outcome,col_find_death, col_ids) #str(col_data) #head(col_data) find_col<- col_data[which(col_data$col_find_outcome & col_data$col_find_death),] find_col col_id <-find_col$col_ids[1] col_id hospital_name_id <-grep("Hospital.Name", colnames(rdata)) #str(hospital_name_id) state_col_id <-grep("State", colnames(rdata)) sdata <- rdata # rdata[which(rdata$State==state & rdata$State == state), ] sdata[,col_id] <- suppressWarnings(as.numeric(as.character(sdata[,col_id]))) summary(sdata) final_data <- sdata[,c(hospital_name_id,col_id,state_col_id)] summary(final_data) result <- final_data[order(final_data[,3], final_data[,2], final_data[,1]), ] #result_cleaned <- result[complete.cases(result),] result_cleaned <- result result2 <- data.frame() candidates<- split(result_cleaned[,c(3,1,2)], result_cleaned$State) candidates # convert list to dataframe # df <- data.frame(matrix(unlist(l), nrow=132, byrow=T),stringsAsFactors=FALSE) getCompleteCases<- function(x, num){ class(x) state_name<-x[1,1] state_name xd<-x #xa <- xd[ xd[,3]== min(xd[,3],na.rm=TRUE),] xa<-xd[order(xd[,3], xd[,2], xd[,1]), ] xa xb<-xa[complete.cases(xa),] xb if(nrow(xa) <1){ xa[1,] <- c(state_name,NA,NA) } colnames(xa)<-c("state", "hospital", "score") true_length = nrow(xa) if(tolower(as.character(num))=="best") { xb<-xa[1,] } else if(tolower(as.character(num))=="worst") { xb<-xa[true_length,] } else if(num<=true_length) { xb <- xa[num,] } else { xb<-data.frame("state"=c(state_name),"hospital" =c(NA), "score" = c(NA) ) } row.names(xb) <- c(state_name) xb } final_candidates<- lapply(candidates, function(x) { getCompleteCases(x, num) } ) final_candidates result <-data.frame() for( x in final_candidates) { result <- rbind(result, x[1,]) } final_result <- result #lapply( final_candidates, function(x, result) rbind(result, x[[1]])) summary(final_result) aa<-final_result#[order(c("State","Hospital.Name"),] #rdata[,"Hospital.Name"] <- as.character(rdata[,"Hospital.Name"]) #head(result) #tail(result) aa ## Read outcome data ## Check that state and outcome are valid ## Return hospital name in that state with lowest 30-day death ## r }
f315fc71645910d8c0e0484c7e04f99b0abe6bfc
b2ccc621a514e3b8f6201e74aeaf639873b9ac14
/week4/seminar/week4.R
ba573bc6aec2bc42e8abfb195a9d1ef81222c1ff
[]
no_license
danilchenko-andrey/cs-matstat-2014
4e39855544a3700188fcad7567ac99c6429be24b
88ca9556548c1dceb67cf0c0138ff6252caa594c
refs/heads/master
2020-06-03T06:01:11.596377
2014-04-09T17:25:19
2014-04-09T17:25:19
null
0
0
null
null
null
null
UTF-8
R
false
false
2,697
r
week4.R
# Task 1: paired and independed t-test cor <- 0.6 K <- matrix(c(1, 0.6, 0.6, 1), ncol=2) A <- chol(K) # our function that will generate correlated samples generate.f2 <- function(n, k, A, diff) { XY <- sapply(1:k, FUN = function(k, n) { X <- cbind(rnorm(n, 0, 1), rnorm(n, 0, 1)) Y <- A %*% t(X) + diff cbind('X'=Y[1,], 'Y'=Y[2,]) }, n=n) } n <- 50 k <- 1000 XY <- generate.f2(n, k, A, diff=c(2,30)) X <- XY[1:(n/2),] Y <- XY[(1+(n/2)):n,] sapply(1:k, FUN=function(i) { result <- t.test(X[,i], Y[,i], paired=T) result$p.value }) -> p.paired # power of paired test sum(p.paired > 0.05) / k sapply(1:k, FUN=function(i) { result <- t.test(X[,i], Y[,i], paired=F) result$p.value }) -> p.unpaired # power of unpaired test sum(p.unpaired > 0.05)/k # task 2 - ANOVA #================ n <- 50 # создадим группы, одна из которых смещена sample1 <- rnorm(n, 0, 1) sample2 <- rnorm(n, 0, 1) sample3 <- rnorm(n, 1, 1) # объединим группы в один датасет sample <- c(sample1, sample2, sample3) group <- rep(c('a','b','c'), rep(n,3)) data <- data.frame(sample, factor=group) # anova result <- aov(sample~factor,data) summary(result) # получили, что P(>F) << 0.05 — значит, что в наших группах есть различия # создадим новые группы, в которых нет различий sample1 <- rnorm(n, 0, 1) sample2 <- rnorm(n, 0, 1) sample3 <- rnorm(n, 0, 1) # объединим группы в один датасет sample <- c(sample1, sample2, sample3) group <- rep(c('a','b','c'), rep(n,3)) data <- data.frame(sample, factor=group) # anova result <- aov(sample~factor,data) summary(result) # получили, что P(>F) > 0.05 — значит, что в наших группах нет различий # task 3 - non-parametric criteries n1 <- 20 n2 <- 10 k <- 1000 sample1 <- append(rnorm(n1, 4, 2), runif(n2, 2, 4)) sample2 <- append(rnorm(n1, 5, 2), runif(n2, 4, 2)) hist(sample2, col=rgb(1,0,0,0.3), probability=T, ylim=c(0,0.3)) hist(sample1, col=rgb(0,1,0,0.3), probability=T, ylim=c(0,0.3), add=T) abline(v=mean(sample2), col='red', lty=5) abline(v=mean(sample1), col='green', lty=5) abline(v=median(sample2), col='red', lwd=2) abline(v=median(sample1), col='green', lwd=2) ps <- sapply(1:k, FUN=function(k) { sample1 <- append(rnorm(n1, 4, 2), runif(n2, 2, 4)) sample2 <- append(rnorm(n1, 5, 2), runif(n2, 3, 7)) p.w <- wilcox.test(sample1, sample2)$p.value p.t <- t.test(sample1, sample2)$p.value cbind(p.w, p.t) }) # power of mann-whitney test sum(ps[1,] < 0.05)/k # power of student test sum(ps[2,] < 0.05)/k
323cfff4a6ffce12461d86a643d3634e60dd23dd
ea5db4ea1b53886058be490089820ed419980628
/homework/03-7-13 Donnelly homework ggplot changing existing plot.R
49fa64769f8aa49ed7bb22c679b966835a5faf72
[]
no_license
CatsCauseTypos/r-club
d5d082b432eaeef8b9815b8a2947f17e3daec664
07ba138504abb0cce272679dc9c00416cd99b213
refs/heads/master
2021-06-09T02:48:55.338192
2017-04-20T21:00:10
2017-04-20T21:01:36
9,950,643
0
0
null
null
null
null
UTF-8
R
false
false
2,703
r
03-7-13 Donnelly homework ggplot changing existing plot.R
#first, let's get rid of movies without mpaa's and without ratings: mymov3 <- subset(x=movies, year>1990 & mpaa !="") ggplot(data = mymov3, mapping = aes( x = year, fill = mpaa ) ) + geom_density(alpha=0.4) + labs(title="oh for heaven's sakes") + theme(legend.position = "bottom") #good. #remembering what's there: head(movies) #Mike C's recommendation to check what's there: names(movies) #if I want to watch a funny animated movie with my college friends OR my friends' kids which has been highly rated: ggplot(data = mymov3, mapping = aes( x = year, fill = mpaa ) ) + geom_density(alpha=0.4) + labs(title="Excellent Animated Comedies Appropriate for Kids or Adults" ) + theme(legend.position = "bottom") + facet_grid(Comedy~Animation) #that 1/0 crap is not useful for categories... #Mike C's method of getting the titles to have meaning: mymov3$Comedy2 <- factor(mymov3$Comedy, labels = c("Not Funny", "Hilarity Ensues")) mymov3$Animation2 <- factor(mymov3$Animation, labels = c("Real Humans", "Drawn Together")) mymov3$rating2 <- round(mymov3$rating) # ? mymov3$rating2 <- c(1, 3, 4) # mymov3$rating2 <- as.factor(mymov3$rating2) #trying out mike c's labels... ggplot(data = mymov3, mapping = aes( x = year, fill = mpaa, linetype = as.factor(rating2) ) ) + geom_density(alpha=0.4) + labs(title="Excellent Animated Comedies Appropriate for Kids or Adults" ) + theme(legend.position = "bottom") + facet_grid(Comedy2~Animation2) #ok, but what's the error from? and what does it mean? and why are there dotted lines and no more animated comedies? #i have no idea how to proceed. #I wanted to make a visual representation of what my best google search criteria would be for a good animated comedy that was age-appropriate for a wide variety of audiences (e.g. me and college frieds vs children) #from this graph I could deduce what years had the highest densities of highly regarded movies in those areas. #questions: #Describe (in your own words) the different purposes of layers in a ggplot. #Layers are supposed to allow a lot of flexibility in graphing and a streamlined organization. Layers enable customization of everything individually. #Woe is me, I have too much data and my plots are overwhelming to look at. Give me 7 tips (in your own words) for dealing with my problem. #smaller points, hollow (vs filled) things, transparency, jitter, colors, use bins, fracturing the graph (those cool hexagons) #Susan / Sharon to the rescue! ggplot(data = mymov3, aes( x= as.factor(rating2), y = mpaa, color= as.factor(year))) + geom_jitter() + facet_grid(Comedy2~Animation2) + theme(legend.position = "bottom") + scale_colour_hue(h.start= 5)
9b7ba11bf9c19eddb1de62bd12d7a93225883c0c
10a60179df1fb101d3f22da0ad63467e14daac3f
/R/bfs_get_catalog_tables.R
5ef8012723c34d54cffbcf3ebd2b60e45db70cb2
[]
no_license
cran/BFS
e17ac6e24b63e268882156e0c5ec1f274702ae07
4a80dbe521580a153388352eaa7f916d94ebb6ab
refs/heads/master
2023-08-18T00:36:32.016655
2023-08-11T11:33:52
2023-08-11T13:30:27
236,560,495
0
0
null
null
null
null
UTF-8
R
false
false
8,354
r
bfs_get_catalog_tables.R
#' Get the BFS tables catalog #' #' Get the list of the tables available in the official \href{https://www.bfs.admin.ch/bfs/en/home/statistiken/kataloge-datenbanken/daten/_jcr_content/par/ws_catalog.rss.xml}{RSS Feed} of the Swiss Federal Statistical Office tables catalog. #' #' @param language character The language of a BFS catalog, i.e. "de", "fr", "it" or "en". #' @param title character String to search in title, subtitle and supertitle #' @param spatial_division BFS datasets by spatial division, choose between "Switzerland", "Cantons", "Districts", "Communes", "Other spatial divisions" or "International" #' @param prodima numeric Get only specific BFS themes using one or multiple prodima numbers #' @param inquiry character BFS datasets for an inquiry #' @param institution character BFS datasets for an institution #' @param publishing_year_start character BFS datasets for a publishing year start #' @param publishing_year_end character BFS datasets for a publishing year end #' @param order_nr character Filter by BFS Number (FSO number) #' @param skip_limit boolean skip limit, TRUE or FALSE #' #' @return A data frame #' #' @importFrom tidyRSS tidyfeed #' @importFrom janitor clean_names #' @importFrom purrr pmap_dfr possibly #' @importFrom tibble tibble #' #' @seealso \code{\link{bfs_get_data}} #' #' @return A tbl_df (a type of data frame; see tibble or #' dplyr packages). #' #' \describe{ #' \item{title}{A character column with the title of the BFS dataset} #' \item{language}{A character column with the language of the BFS dataset} #' \item{publication_date}{The published date of the BFS dataset in the tables catalog} #' \item{number_asset}{The BFS asset number} #' \item{url_bfs}{A character column with the URL of the related BFS #' webpage} #' \item{url_table}{A character column with the URL of the PX file} #' \item{catalog_date}{The released date of the current BFS tables catalog} #' } #' #' @examples #' \donttest{bfs_get_catalog_tables(language = "en", title = "students", prodima = c(900212))} #' #' @export bfs_get_catalog_tables <- function(language = "de", title = NULL, spatial_division = NULL, prodima = NULL, inquiry = NULL, institution = NULL, publishing_year_start = NULL, publishing_year_end = NULL, order_nr = NULL, skip_limit = TRUE) { #if (missing(language)) stop("must choose a language, either 'de', 'fr', 'it' or 'en'", call. = FALSE) language <- match.arg(arg = language, choices = c("de", "fr", "it", "en")) # Construct geography query based on spatial division names geography_names <- c("Switzerland", "Cantons", "Districts", "Communes", "Other spatial divisions", "International") geography_numbers <- c(900091,900092,900093,900004,900008,900068) names(geography_numbers) <- geography_names if(is.null(spatial_division)) { geography <- "" } else { spatial_division <- match.arg(arg = spatial_division, choices = geography_names) geography <- geography_numbers[names(geography_numbers) == spatial_division] } if(length(institution) != 1 & !is.null(institution)) stop("`institution` should be unique") if(is.null(institution)) institution <- "" if(length(inquiry) != 1 & !is.null(inquiry)) stop("`inquiry` should be unique") if(is.null(inquiry)) inquiry <- "" if(length(publishing_year_start) != 1 & !is.null(publishing_year_start)) stop("`publishing_year_start` should be unique") if(is.null(publishing_year_start)) publishing_year_start <- "" if(length(publishing_year_end) != 1 & !is.null(publishing_year_end)) stop("`publishing_year_end` should be unique") if(is.null(publishing_year_end)) publishing_year_end <- "" if(length(title) != 1 & !is.null(title)) stop("`title` should be unique") if(is.null(title)) title <- "" if(length(order_nr) != 1 & !is.null(order_nr)) stop("`order_nr` should be unique") if(is.null(order_nr)) order_nr <- "" # Construct prodima query #themes_names <- c("Statistical basis and overviews 00", "Population 01", "Territory and environment 02", "Work and income 03", "National economy 04", "Prices 05", "Industry and services 06", "Agriculture and forestry 07", "Energy 08", "Construction and housing 09", "Tourism 10", "Mobility and transport 11", "Money, banks and insurance 12", "Social security 13", "Health 14", "Education and science 15", "Culture, media, information society, sports 16", "Politics 17", "General Government and finance 18", "Crime and criminal justice 19", "Economic and social situation of the population 20", "Sustainable development, regional and international disparities 21") themes_prodima <- c(900001,900010,900035,900051,900075,900084,900092,900104,900127,900140,900160,900169,900191,900198,900210,900212,900214,900226,900239,900257,900269,900276) # query by prodima (theme) because RSS feed limitation to 350 entities, see issue #5 if(is.null(prodima)) { prodima <- themes_prodima } else { prodima <- prodima } # # TODO: allow multiple elements queries for each argument # # queries <- list( # prodima = prodima, # language = language, # skipLimit = skip_limit, # institution = institution, # geography = geography, # inquiry = inquiry, # publishingyearstart = publishing_year_start, # publishingyearend = publishing_year_end, # title = title, # orderNr = order_nr # ) # # # test if multiple elements in arguments # lengths_rss_queries <- lengths(queries) # final list for querying with a loop --------------------------------------- # get lenght prodima to create list to loop on length_prodima <- length(prodima) rss_queries <- list( prodima = prodima, language = rep(language, length_prodima), skipLimit = rep(skip_limit, length_prodima), institution = rep(institution, length_prodima), geography = rep(geography, length_prodima), inquiry = rep(inquiry, length_prodima), publishingyearstart = rep(publishing_year_start, length_prodima), publishingyearend = rep(publishing_year_end, length_prodima), title = rep(title, length_prodima), orderNr = rep(order_nr, length_prodima) ) get_rss_feed_data <- function(language, skipLimit, prodima, institution, geography, inquiry, publishingyearstart, publishingyearend, title, orderNr) { feed <- paste0("https://www.bfs.admin.ch/bfs/", language, "/home/statistiken/kataloge-datenbanken/tabellen/_jcr_content/par/ws_catalog.rss.xml?skipLimit=", skipLimit, "&prodima=", prodima, "&institution=", institution, "&geography=", geography, "&inquiry=", inquiry, "&publishingyearstart=", publishingyearstart, "&publishingyearend=", publishingyearend, "&title=", title, "&orderNr=", orderNr) df_feed <- tidyRSS::tidyfeed(feed = feed) colnames(df_feed) <- gsub('feed_', '', colnames(df_feed)) # cleaning colnames(df_feed) <- gsub('item_', '', colnames(df_feed)) # cleaning df_feed <- janitor::clean_names(df_feed, "small_camel") # cleaning base_url_bfs <- paste0("https://www.bfs.admin.ch/content/bfs/", language, "/home/statistiken/kataloge-datenbanken/tabellen.assetdetail.") base_url_table <- "https://www.bfs.admin.ch/bfsstatic/dam/assets/" if(any("title_2" == names(df_feed))) df_feed$title <- df_feed$title_2 if(any("link_2" == names(df_feed))) df_feed$url_bfs <- df_feed$link_2 if(any("pubDate_2" == names(df_feed))) df_feed$publication_date <- df_feed$pubDate_2 if(any("pubDate" == names(df_feed))) df_feed$catalog_date <- df_feed$pubDate df_feed$url_table <- gsub(base_url_bfs, base_url_table, df_feed$url_bfs) df_feed$url_table <- gsub(".html$", "/master", df_feed$url_table) #get `number_asset` df_feed$number_asset <- gsub(pattern = "\\D", replacement = "", x = df_feed$guid) df_feed$number_asset <- as.numeric(df_feed$number_asset) # select variables vars <- c("title", "language", "publication_date", "number_asset", "url_bfs", "url_table", "catalog_date") df <- df_feed[vars] } df <- purrr::pmap_dfr(rss_queries, purrr::possibly(get_rss_feed_data, otherwise = tibble::tibble()), .progress = TRUE) df2 <- df[!duplicated(df), ] # no duplication return(df2) }
f5df0c521483e8522ce3686e3595628eacfe3ea6
9c2f40ae8269413feb32cffa6d581dfe9f931dd0
/R/matching_types.R
21b0d55586934778674b294027f17ae48dbd681c
[ "MIT" ]
permissive
tpetricek/datadiff
ed5ca6cdfe8129ed947c24a42c62ea265aad86ef
8941269b483da9abcacde804b7f6b6e0a122a57a
refs/heads/master
2020-07-31T19:09:25.118489
2019-09-25T23:55:01
2019-09-25T23:55:01
210,723,004
0
0
MIT
2019-09-25T00:39:49
2019-09-25T00:39:49
null
UTF-8
R
false
false
1,217
r
matching_types.R
#' Identify matching patch types #' #' Returns the vector of elementary patch types found in the given \code{patch} #' which have a parameter with the given name. #' #' @param patch #' A patch. #' @param param_name #' A scalar parameter name. #' @param short #' A logical flag passed to the \code{patch_type} function. #' #' @return The matching patch types as a character vector. #' #' @export #' #' @examples #' patch <- compose_patch(patch_permute(10:1), #' patch_rescale(1L, shift = 0, scale_factor = 2), #' patch_delete(1L)) #' matching_types(patch, param_name = "shift") #' matching_types <- function(patch, param_name, short = TRUE) { stopifnot(length(param_name) == 1) types <- patch_type(patch, short = short, unique = TRUE) decomposed <- decompose_patch(patch) is_matching <- purrr::map_lgl(types, .f = function(type) { is_type_match <- purrr::map_lgl(decomposed, .f = function(p) { identical(patch_type(p, short = short), type) }) all(purrr::map_lgl(decomposed[is_type_match], .f = function(p) { param_name %in% names(get_patch_params(p)) })) }) if (!any(is_matching)) return(character(0)) types[is_matching] }
8b29e5166ca8c9995886070a2dc2fc01cdd4a451
83b618a2d0f060a1142725d347810788c25c8440
/FinalOutput/Code in R format/shiny_app_web/server.R
01aeb7778a6bcdb5ec092a0eb8227a5fee93edad
[]
no_license
senthil-ramasamy/MovieLensDataset-SDL-R-Project
f38475c9d664912ac74978db9d5a734cb645e96d
21239105ab395e99ea56ae9a1275c6dab13b8c33
refs/heads/master
2021-01-19T03:52:02.882710
2016-10-25T21:44:37
2016-10-25T21:44:37
65,266,386
0
0
null
null
null
null
UTF-8
R
false
false
2,763
r
server.R
library(shiny) library(reshape2) library(data.table) library(Matrix) library(proxy) library(recommenderlab) library(reshape2) movies <- read.csv("~/Works/Rworks/Movielens/Recom/movies.csv", stringsAsFactors=FALSE) str(movies) ratings <- read.csv("~/Works/Rworks/Movielens/Recom/ratings.csv", stringsAsFactors=FALSE) str(ratings) # Define server logic required to draw a histogram shinyServer(function(input, output) { # Expression that generates a histogram. The expression is # wrapped in a call to renderPlot to indicate that: # # 1) It is "reactive" and therefore should re-execute automatically # when inputs change # 2) Its output type is a plot checkvar <- input$checkboxvalue; useridno <- input$useidnoinput; towatch1 <- input$tw1; towatch2 <- input$tw2; towatch3 <- input$tw3; if(checkvar == TRUE){ #assign already existing ratings to the medium rating of 3 ratings$rating <- ifelse(ratings$userId == useridno, 3, ratings$rating) da <- subset(ratings, subset = userId == useridno) for(towatch in c(towatch1,towatch2,towatch3)){ if( towatch %in% da$movieId){ ratings$rating <- ifelse(ratings$userId == useridno & ratings$movieId == towatch, 5, ratings$rating) } else{ vec <- c(useridno,towatch,5) ratings <- rbind(ratings,vec) } } } #Create ratings matrix. Rows = userId, Columns = movieId ratingmat <- dcast(ratings, userId~movieId, value.var = "rating", na.rm=FALSE) ratingmat <- as.matrix(ratingmat[,-1]) #remove userIds #Creation of the Recommender Model #Method: UBCF #Similarity Calculation Method: Cosine Similarity #Nearest Neighbors: 30 library(recommenderlab) #Convert rating matrix into a recommenderlab sparse matrix ratingmat <- as(ratingmat, "realRatingMatrix") #Normalize the data ratingmat_norm <- normalize(ratingmat) #Create Recommender Model. "UBCF" stands for User-Based Collaborative Filtering recommender_model <- Recommender(ratingmat_norm, method = "UBCF", param=list(method="Cosine",nn=30)) recom <- predict(recommender_model, ratingmat[useridno], n=10) #Obtain top 10 recommendations for 1st user in dataset recom_list <- as(recom, "list") #convert recommenderlab object to readable list #Obtain recommendations recom_result <- matrix(0,10) for (i in c(1:10)){ recom_result[i] <- movies[as.integer(recom_list[[1]][i]),2] } output$summary <- renderPrint(recom_result) })
50632cd5f020e04a22d05c671aeff7825a8617e1
16e8405f836303f91ada43c6c500af188495ab1e
/test_data/TTTTTTT.R
4f10b1ce7d4e8bf0a43147af94cc4f8e7b6a068d
[]
no_license
caokai001/kcaoplot
2415da81660ab5a8eace24171a18c53d9235cf55
4c324968b7c28d6fea3aedc9921cb65db19ff57e
refs/heads/master
2020-09-20T08:40:24.566144
2020-01-06T19:58:06
2020-01-06T19:58:06
223,578,393
0
0
null
null
null
null
UTF-8
R
false
false
895
r
TTTTTTT.R
ctcf.motif=Fimo2GRanges("../TESTDATA/mynew.txt","GRanges") # 开放区域合并 dnase.peak<-bed2GRanges("../TESTDATA/E094-DNase.all.peaks.v2.bed") dnase.fdr<-bed2GRanges("../TESTDATA/E094-DNase.fdr0.01.peaks.v2.bed") dnase.mac<-bed2GRanges("../TESTDATA/E094-DNase.macs2.narrowPeak") dnase=c(dnase.peak,dnase.fdr,dnase.mac) dnase=GenomicRanges::reduce(dnase) # 读入snp信息 somatic_gastric<-bed2GRanges("../TESTDATA/41467_2018_3828_MOESM6_ESM.txt",header=TRUE) names(mcols(somatic_gastric))[1:2]<-c("Ref","Alt") somatic_gastric$id=c(1:length(somatic_gastric)) # 过滤FIMO motif.ovl<-filter_fimo(dnase,ctcf.motif) # 拓展到1kb site=filter_fimo_to_1000bp_gr(motif.ovl) # 与突变位置取交集 mut.table<-mut_pos_tb(site,somatic_gastric) # 画1kp 区间突变分布图 #plot_mutation_loci(mut.table) plot_mutation_loci(mut.table,center_expend_each=5,tf_name="NA",mutationScore_outdir="./")
a90257b31a5551a9d0e1d57320175084bfe17637
369bff8173041a70d122004d4b82a46a4197adc7
/app.R
ed3ea23fbf6cbada3c41dc7023ca9a0275b6d582
[]
no_license
MaStadel/PsyCorona-WebApp
8b66a44f11add317eb74526668bf6fb696e63627
1adf1272a03062b06363b53d62107818f5f5b2f4
refs/heads/master
2022-05-20T11:54:19.612914
2020-04-25T14:54:39
2020-04-25T14:54:39
null
0
0
null
null
null
null
UTF-8
R
false
false
60,715
r
app.R
library(shiny) library(dplyr) library(tidyr) library(ggplot2) library(stats) library(shinydashboard) #library(dygraphs) #library(RColorBrewer) library(stringr) #library(DT) library(shinyjs) library(shinyWidgets) library(r2d3) library(radarchart) library(haven) #library(leaflet) library(highcharter) library(rgeos) library(scales) library(grDevices) library(shinyalert) library(shinyBS) # R Studio Clean-Up: #cat("\014") # clear console #rm(list=ls()) # clear workspace #gc() # garbage collector #setwd(dirname(rstudioapi::getActiveDocumentContext()$path)) # usually set by project # load data: load("data/shinyDataAggregated.RData") r2d3_script <- " // !preview r2d3 data= data.frame(y = 0.1, ylabel = '1%', fill = '#E69F00', mouseover = 'green', label = 'one', id = 1) function svg_height() {return parseInt(svg.style('height'))} function svg_width() {return parseInt(svg.style('width'))} function col_top() {return svg_height() * 0.05; } function col_left() {return svg_width() * 0.25;} function actual_max() {return d3.max(data, function (d) {return d.y; }); } function col_width() {return (svg_width() / actual_max()) * 0.60; } function col_heigth() {return svg_height() / data.length * 0.95; } var bars = svg.selectAll('rect').data(data); bars.enter().append('rect') .attr('x', 150) .attr('y', function(d, i) { return i * col_heigth() + col_top(); }) .attr('width', function(d) { return d.y * col_width(); }) .attr('height', col_heigth() * 0.9) .attr('fill', function(d) {return d.fill; }) .attr('id', function(d) {return (d.label); }) .on('click', function(){ Shiny.setInputValue('bar_clicked', d3.select(this).attr('id'), {priority: 'event'}); }) .on('mouseover', function(){ d3.select(this).attr('fill', function(d) {return d.mouseover; }); }) .on('mouseout', function(){ d3.select(this).attr('fill', function(d) {return d.fill; }); }); bars.transition() .duration(500) .attr('x', 150) .attr('y', function(d, i) { return i * col_heigth() + col_top(); }) .attr('width', function(d) { return d.y * col_width(); }) .attr('height', col_heigth() * 0.9) .attr('fill', function(d) {return d.fill; }) .attr('id', function(d) {return d.label; }); bars.exit().remove(); // Identity labels var txt = svg.selectAll('text').data(data); txt.enter().append('text') .attr('x', width * 0.01) .attr('y', function(d, i) { return i * col_heigth() + (col_heigth() / 2) + col_top(); }) .text(function(d) {return d.label; }) .style('font-family', 'sans-serif'); txt.transition() .duration(1000) .attr('x', width * 0.01) .attr('y', function(d, i) { return i * col_heigth() + (col_heigth() / 2) + col_top(); }) .text(function(d) {return d.label; }); txt.exit().remove(); // Numeric labels var totals = svg.selectAll().data(data); totals.enter().append('text') .attr('x', function(d) { return ((d.y * col_width()) + 150) * 1.01; }) .attr('y', function(d, i) { return i * col_heigth() + (col_heigth() / 2) + col_top(); }) .style('font-family', 'sans-serif') .text(function(d) {return d.ylabel; }); totals.transition() .duration(1000) .attr('x', function(d) { return ((d.y * col_width()) + 150) * 1.01; }) .attr('y', function(d, i) { return i * col_heigth() + (col_heigth() / 2) + col_top(); }) .attr('d', function(d) { return d.x; }) .text(function(d) {return d.ylabel; }); totals.exit().remove(); " r2d3_file <- tempfile() writeLines(r2d3_script, r2d3_file) ui <- dashboardPage( title = "PsyCorona: Data Visualization", dashboardHeader(title=span( icon("fas fa-virus"), "PsyCorona Data Tool") #HTML(paste(icon("virus"), "PsyCorona Data Tool")), # dropdownMenu(type = "notifications", # notificationItem(text = "Data is currenlty not accurate", # icon = icon("warning"), # status = "warning") # ) ), dashboardSidebar( sidebarMenu( menuItem("Our Sample", tabName = "sample", icon = icon("fas fa-users")), menuItem("Psychological Variables", tabName = "Variables", icon = icon("fas fa-pencil-ruler")), menuItem("Development", tabName = "development", icon = icon("fas fa-chart-line"), badgeLabel = "coming soon", badgeColor = "orange"), menuItem("Data", tabName = "data", icon = icon("fas fa-share-square")), menuItem("About", tabName = "about", icon = icon("info")), menuItem(HTML(paste0("Take the Suvey Now ", icon("external-link"))), icon=icon("fas fa-file-signature"), href = "https://nyu.qualtrics.com/jfe/form/SV_6svo6J4NF7wE6tD", newtab = T)), shinyjs::useShinyjs(), tags$footer(HTML("<strong>Copyright &copy; 2020 <a href=\"https://psycorona.org/about/\" target=\"_blank\">PsyCorona</a>.</strong> <br>This work is licensed under a <a rel=\"license\" href=\"http://creativecommons.org/licenses/by-nc-nd/4.0/\" target=\"_blank\">Creative Commons Attribution-NonCommercial-NoDerivatives 4.0 International License</a>. <br><a rel=\"license\" href=\"http://creativecommons.org/licenses/by-nc-nd/4.0/\" target=\"_blank\"><img alt=\"Creative Commons License\" style=\"border-width:0\" src=\"https://i.creativecommons.org/l/by-nc-nd/4.0/88x31.png\" /></a> <br>Last updated:<br>"), latest.DateTime, id = "sideFooter", align = "left", style = " position:absolute; bottom:0; width:100%; padding: 10px; " ) #HTML("<a rel=\"license\" href=\"http://creativecommons.org/licenses/by-nc-nd/4.0/\"><img alt=\"Creative Commons License\" style=\"border-width:0\" src=\"https://i.creativecommons.org/l/by-nc-nd/4.0/88x31.png\" /></a><br />This work is licensed under a <a rel=\"license\" href=\"http://creativecommons.org/licenses/by-nc-nd/4.0/\">Creative Commons Attribution-NonCommercial-NoDerivatives 4.0 International License</a>.") #) ), dashboardBody( tags$script(HTML("$('body').addClass('sidebar-mini');")), tags$head(tags$link(rel = "stylesheet", type = "text/css", href = "style.css")), tags$head(tags$meta(name = "viewport", content = "width=1600"),uiOutput("body")), tags$style( type = 'text/css', '.bg-aqua {background-color: #3c8dbe!important; } .bttn-simple.bttn-primary {background-color: #3c8dbe!important; } .btn.radiobtn.btn-primary {float: center!important; display: block; width: 150px} ' ), tags$style("@import url(https://use.fontawesome.com/releases/v5.13.0/css/all.css);"), tags$script(src = "https://code.highcharts.com/mapdata/custom/world.js"), tags$script(HTML(" var openTab = function(tabName){ $('a', $('.sidebar')).each(function() { if(this.getAttribute('data-value') == tabName) { this.click() }; }); }; $('.sidebar-toggle').attr('id','menu'); var dimension = [0, 0]; $(document).on('shiny:connected', function(e) { dimension[0] = window.innerWidth; dimension[1] = window.innerHeight; Shiny.onInputChange('dimension', dimension); }); $(window).resize(function(e) { dimension[0] = window.innerWidth; dimension[1] = window.innerHeight; Shiny.onInputChange('dimension', dimension); }); ")), shinyjs::useShinyjs(), tabItems( tabItem(tabName = "sample", useShinyalert(), h3("Our Sample"), bsAlert("dataAlert"), fluidRow( box(width = 12, div(style="display:inline-block;width:100%;text-align:center;", radioGroupButtons( inputId = "var", label = "Participant characteristics:", selected = "languages", status = "primary", #justified = T, #individual = T, choiceNames = c("Survey language", "Gender", "Age", "Education", "Political orientation"), choiceValues = c("languages", "gender", "age", "education", "political") #checkIcon = list(yes = icon("ok", lib = "glyphicon"), no = icon("remove", lib = "glyphicon")) ) ), h3(textOutput("sample.bar.NA"), align = "center"), d3Output("d3.bar"), textOutput("SampleTxt"), align = "center") #) ), fluidRow( box( status = "primary", width = 6, tags$strong("World Map (sample sizes)"), highchartOutput("freqPlot") ), box( shinyjs::useShinyjs(), id = "sample-controls", width = 6, #height = "600px", title = "Controls", solidHeader = T, status = "primary", #"Use these controls to ", #br(), multiInput( inputId = "sample_country_selection", label = "Countries (all countries n > 20):", choices = NULL, choiceNames = lapply(seq_along(ctry.only.red$coded_country), function(i) tagList(tags$img(src = ctry.only.red$flag[i], width = 20, height = 15), ctry.only.red$coded_country[i], paste0(" (n=",prettyNum(ctry.only.red$n[i], big.mark=",", scientific=FALSE),")"))), choiceValues = ctry.only.red$coded_country, selected = ctry.only.red$coded_country ), hr(), div(style="display:inline-block;width:100%;text-align: center;", actionBttn( inputId = "sample_country_none", label = "None", style = "simple", color = "primary", size = "sm"), HTML("&nbsp;&nbsp;"), actionBttn( inputId = "sample_country_all", label = "All", style = "simple", color = "primary", size = "sm") ) ) ) ), tabItem(tabName = "Variables", box(width = 12, solidHeader = TRUE, navbarPage(title = "Domain:", id = "dataTabs", tabPanel("Government Reponse", value = 1, highchartOutput("boxGov") ), tabPanel("Community Response", value = 2, div(style="display:inline-block;width:100%;text-align: center;", radioGroupButtons( inputId = "ComVars", #label = "", selected = "comRule", #justified = TRUE, status = "primary", choiceNames = c("Rules", "Punishment", "Organization"), choiceValues = c("comRule", "comPunish", "comOrg") #checkIcon = list(yes = icon("ok", lib = "glyphicon"), no = icon("remove", lib = "glyphicon")) ) ), highchartOutput("boxCom") ), tabPanel("Cognitive Response", value = 3, #Financial strain(?), job insecurity (?)", div(style="display:inline-block;width:100%;text-align: center;", radioGroupButtons( inputId = "CogVars", #label = "Variable:", selected = "covidHope", #justified = TRUE, status = "primary", choiceNames = c("Hope", "Efficacy", "Loneliness", "Paranoia", "Conspiracy"), choiceValues = c("covidHope", "covidEff", "lone", "para", "consp") #checkIcon = list(yes = icon("ok", lib = "glyphicon"), no = icon("remove", lib = "glyphicon")) ) ), highchartOutput("boxCog") ), tabPanel("Behavioral Response", value = 4, #"isolation, beh", div(style="display:inline-block;width:100%;text-align: center;", radioGroupButtons( inputId = "BehVars", #label = "Variable:", selected = "behWash", #justified = TRUE, status = "primary", choiceNames = c("Washing", "Avoiding", "Social Contact"), choiceValues = c("behWash", "behAvoid", "iso") ) ), htmlOutput("boxBeh") ), tabPanel("Emotional Response", value = 5, column(12, align="center", tags$b("Individual Emotions "), prettySwitch( inputId = "categorySwitch", label = tags$b("Emotional Categories"), status = "success", fill = TRUE, inline = TRUE, value = TRUE ) ), chartJSRadarOutput('affect', height = "125", width = "400") ), tabPanel("Cross Domain Relationships", value = 6, highchartOutput("cor") ) ) ), conditionalPanel( condition = "input.dataTabs != 6", box(width = 12, solidHeader = T, status = "primary", multiInput( inputId = "psych_country_selection", label = "Countries (all countries n > 20):", choices = NULL, choiceNames = lapply(seq_along(ctry.red$coded_country), function(i) tagList(tags$img(src = ctry.red$flag[i], width = 20, height = 15), ctry.red$coded_country[i], paste0(" (n=",prettyNum(ctry.red$n[i], big.mark=",", scientific=FALSE),")"))), choiceValues = ctry.red$coded_country, selected = "global" ) ) ), conditionalPanel( condition = "input.dataTabs == 6", box(width = 12, solidHeader = T, status = "primary", h4("Select Variables:"), column(width = 6, pickerInput( inputId = "CorX", label = "X Axis:", #width = "80%", choices = list( Government = c("Clear Government Information" = "gov"), Community = c("Rules"="comRule", "Punishment"="comPunish", "Organization"="comOrg"), Cognitive = c("Hope"="covidHope", "Efficacy"="covidEff", "Loneliness"="lone", "Paranoia"="para", "Conspiracy"="consp"), Behavior = c("Washing"="behWash", "Avoiding"="behAvoid", "Isolation Offline"="isoPers", "Isolation Online"="isoOnl"), Emotion = c("Anxious"="affAnx", "Bored"="affBor", "Calm"="affCalm", "Content"="affContent", "Depressed"="affDepr", "Energetic"="affEnerg", "Excited"="affExc", "Nervous"="affNerv", "Exhausted"="affExh", "Inspired"="affInsp", "Relaxed"="affRel"), EmtionCat = c("High Arousal Positive"="affHighPos", "High Arousal Negative"="affHighNeg", "Low Arousal Positive"="affLowPos", "Low Arousal Negative"="affLowNeg") ) ) ), column(width = 6, pickerInput( inputId = "CorY", label = "Y Axis:", #width = "80%", choices = list( Government = c("Clear Government Information" = "gov"), Community = c("Rules"="comRule", "Punishment"="comPunish", "Organization"="comOrg"), Cognitive = c("Hope"="covidHope", "Efficacy"="covidEff", "Loneliness"="lone", "Paranoia"="para", "Conspiracy"="consp"), Behavior = c("Washing"="behWash", "Avoiding"="behAvoid", "Isolation Offline"="isoPers", "Isolation Online"="isoOnl"), Emotion = c("Anxious"="affAnx", "Bored"="affBor", "Calm"="affCalm", "Content"="affContent", "Depressed"="affDepr", "Energetic"="affEnerg", "Excited"="affExc", "Nervous"="affNerv", "Exhausted"="affExh", "Inspired"="affInsp", "Relaxed"="affRel"), EmtionCat = c("High Arousal Positive"="affHighPos", "High Arousal Negative"="affHighNeg", "Low Arousal Positive"="affLowPos", "Low Arousal Negative"="affLowNeg")), selected = "comPunish" ) ), hr(), h4("Select Region:"), multiInput( inputId = "cor_country_selection", label = "Countries (all countries n > 20):", choices = NULL, choiceNames = lapply(seq_along(ctry.only.red$coded_country), function(i) tagList(tags$img(src = ctry.only.red$flag[i], width = 20, height = 15), ctry.only.red$coded_country[i], paste0(" (n=",prettyNum(ctry.only.red$n[i], big.mark=",", scientific=FALSE),")"))), choiceValues = ctry.only.red$coded_country, selected = ctry.only.red$coded_country ), div(style="display:inline-block;width:100%;text-align: center;", actionBttn( inputId = "cor_country_none", label = "None", style = "simple", color = "primary", size = "sm"), HTML("&nbsp;&nbsp;"), actionBttn( inputId = "cor_country_all", label = "All", style = "simple", color = "primary", size = "sm") ) ) ) ), tabItem(tabName = "development", h2("Development over Time"), fluidRow( box(#title = "Explore The Data", width = 12, heigth = "500px", solidHeader = TRUE, tags$br(), tags$br(), h4(HTML(paste("<center>","As our efforts grow over time we will share data describing developments over time.", "</center>"))), h4(HTML(paste("<center>","As soon as we have multiple data waves you can explore this data here.", "</center>"))), tags$br() ) ) ), tabItem(tabName = "data", fluidRow( box(title = "Data Protection", width = 12, solidHeader = TRUE, HTML("To protect the privacy and confidentiality of our participants, this application only uses aggregate, anonymized data (i.e., no individual person is identifiable). <br><b>Anonymization:</b><br> <ol> <li>We use <b>data aggregation</b> as the main method of data anonymization. This means that we never show data of a single person or small groups of people, instead we combine data of multiple people to show country-level summary statistics. As an example, you can see the average level of hope in a country and how much variation there was in the responses but you cannot see the rating of any individual respondent. <br>Importantly, the application only has access to the country-level summaries, which means that data cannot be linked or combined to single out individuals (e.g., you cannot see the level of hope for U.S. women, aged 25-34, who speak Farsi and have a doctorate degree). <br><i>Note:</i> For aggregate data to effectively be anonymous we need to combine the responses of enough people, which is why we never display data of countries with less than 20 respondents.</li> <li>When we show summaries of categorical data (e.g., percentage of people identifying as female), we additionally apply <b>data perturbations</b> for small groups. This means that the counts and percentages for groups that comprise less than 20 people (less than 50 for political orientation) have been slightly changed (e.g., a random number between -2 and 2 or between -5 and 5 has been added; this process is also sometimes referred to as 'artificial noise addition'). <br>Please note that this also means that the numbers are not always 100% accurate. However, with this method, we can represent the full diversity of our sample while still protecting the identities of people in small or vulnerable groups.</li> </ol> These are the main ways in which we protect the personal data of our participants and make sure that no individual is identifiable within the application. If you have any questions or concerns, please feel free to contact us at <a href=\"mailto:psycorona@rug.nl?Subject=Data%20web%20application\" target=\"_top\">psycorona@rug.nl</a>. <br><b>Access:</b> <br> Access to person-level data will never be made available without thorough vetting by our editorial board (see data sharing below). And even then, we only share anonymized or pseudonymized data with active academic collaborators."), tags$br(), tags$br() ), box(title = "Data Collaboration", width = 12, solidHeader = TRUE, "One major aim of the PsyCorona initiative is to combine psychological reactions with local, regional, and national data on the Covid-19 spread and governmental reactions towards it. In our efforts we collaborate with ", tags$a(href="https://dataversuscorona.com/", target="_blank", "Data Scientists Against Corona"), "and are working on bringing together the ", tags$a(href="https://github.com/cjvanlissa/COVID19_metadata", target="_blank", "Covid-19 Metadata"), "repository with anonymized regional data from the PsyCorona initiative.", tags$br(), tags$br() ), box(title = "Data Sharing", width = 12, solidHeader = TRUE, "The aim of the PsyCorona initiative is to build a collaborative research network. If you are interested in becoming part of the PsyCorona initiative, you contact us via our website: ", tags$a(href="https://psycorona.org/", target="_blank", "www.psycorona.org."), tags$br(), tags$br(), "Part of data sharing is also open availability of code. The code to this web applet is available at", tags$a(href="https://github.com/JannisCodes/PsyCorona-WebApp", target="_blank", "our git repository."), tags$br(), tags$br() ) ) ), tabItem(tabName = "about", h3("Welcome to the PsyCorona Data Tool"), br(), fluidRow( box(#title = "Explore The Data", width = 8, heigth = "500px", solidHeader = TRUE, h4("The Initiative:"), "Psychology and culture could affect the spread of the virus; human psychology could also change in response to the pandemic. We aim to mobilize behavioral and data scientists to identify targets for rapid intervention to slow the spread of the pandemic and minimize its social damage. All the scientists on the team are operating on a largely volunteer basis, relying on existing infrastructure and their own resources. This is a global project based at New York University-Abu Dhabi and the University of Groningen (The Netherlands). We have evolved into a distributed team across the world, with autonomous work groups in numerous countries, each of whom understands the PsyCorona mission goals and needs. We aim to ensure global involvement, so we are translating the survey into more languages on a daily basis. Currently more than 100 international social scientists are working together to collect immediate and longitudinal information on the key social science factors that might predict the spread of COVID-19. The project, is documented in detail on our", tags$a(href="https://www.psycorona.org", target="_blank", "PsyCorona website."), " We pair social- and data scientists to connect data across multiple layers—individual survey reports of", prettyNum(sum(world.n$n), big.mark=" ", scientific=FALSE), "participants from more than",length(world.n$coded_country)-1,"countries, satellite data documenting social distancing, and World Health Organization data on county level spread of the disease.", br(), "You can find the PsyCorona Initiative on: ", tags$a(href="https://www.facebook.com/PsyCorona-100726584925027", target="_blank", icon("facebook")), HTML("&nbsp"), tags$a(href="https://github.com/JannisCodes/PsyCorona-WebApp", target="_blank", icon("github")), br(), br(), h4("What You Can Do Here:"), "This applet has ", tags$b("four main interactive sections"), " that enable visitors to directly interact with the PsyCorona data: ", tags$ul( tags$li("The ", a("Data", onclick = "openTab('data')", href="#"), "tab provides information on how we deal with our participants' data and how you can get involved in data analysis. Here we also share information on our open-source code and connections to meta data repositories we are aiming to connect to the psychological responses we measure during the PsyCorona initiative.")), "The remaining three tabs offer tools to visualize the psychological data we collect in this project.", tags$ul( tags$li("The ", a("Our Sample", onclick = "openTab('sample')", href="#"), " tab offers an insight into the diversity of our participants. We share compound information on some demographic variables, as well as the number of participants we have reached in each country. Please note that to protect the privacy and anonymity of our participants data visualizations are only available for selections of more than 20 people."), tags$li("The ", a("Psychological Variables", onclick = "openTab('Variables')", href="#"), " tab offers an interactive interface to explore the psychological variables we collect in the initiative's baseline survey. This survey is open to anyone interested at", tags$a(href="https://nyu.qualtrics.com/jfe/form/SV_6svo6J4NF7wE6tD", target="_blank", "tiny.cc/corona_survey"), "and currently includes over", prettyNum(sum(world.n$n), big.mark=" ", scientific=FALSE), "participants. You can explore psychological reactions to the coronavirus at five different levels: (1) Governmental Response, (2) Community Response, (3) Cognitive Response, (4) Behavioral Response, as well as (5) Emotional Reponse. Additionally, we offer a tool to explore the mean level relationship between different variables for different countries. Please note that to protect the privacy and anonymity of our participants we only provide country-level visualizations once we have data for more than 20 people from any particular country."), tags$li("The ", a("Development", onclick = "openTab('development')", href="#"), " tab gives you the possibility to interactively explore how different areas are evolving over time. This section is currently under construction, but will be available as soon as we finish data collection of developmental data that can be modeled over time.") ) ), box(width = 4, HTML("<a class=\"twitter-timeline\" data-height=\"600\" href=\"https://twitter.com/FortuneMagazine/lists/coronavirus-updates?ref_src=twsrc%5Etfw\">A Twitter List by FortuneMagazine</a> <script async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\"></script>") ) ), fluidRow( valueBox(prettyNum(sum(world.n$n), big.mark=" ", scientific=FALSE), "Participants", icon = icon("user-edit"), width = 4), valueBox(sum(str_count(names(ctry.scales), c("language")))-1, "Languages", icon = icon("language"), width = 4), valueBox("100+", "Researchers", icon = icon("user-graduate"), width = 4)#, #valueBox(404, "Something", icon = icon("project-diagram"), width = 3) ) ) ) ) ) server <- function(input, output, session) { # observeEvent(input$dimension[1], { # if (input$dimension[1] <= 767) { # shinyalert(title = "Small Screen Detected", # text = "Some elements of this application are not optimized for smaller screens. # If you have problems viewing some of the graphical displays please try to use a desktop or tablet device with a larger screens. # Some graphs might be visible if you use your device in landscape mode.", # type = "info", # animation = TRUE, # confirmButtonCol = "#3b738f" # ) # } else { # } # }) # shinyalert(title = "Mobile Version", # text = "This application is currently in development. # To protect the privacy and confidentiality of our participants this beta version relies on simulated data.", # type = "info", # animation = TRUE, # confirmButtonCol = "#3b738f" # ) createAlert(session = session, anchorId = "dataAlert", #alertId="a1", title = paste(icon("warning"),"Data Notification"), content="To protect the privacy of everyone who took our survey, this application only uses aggregate, anonymized data (i.e., no individual person is identifiable). For further information see our <a href='#' onclick=\"openTab('data')\">data description section</a>. Bear in mind that we display data collected over the past weeks. This means the data might not be representative of how countries are doing right now.", style = "warning") output$sample.bar.NA <- renderText({ #input <- list(var = "language", sample_country_selection = c("France", "Germany")) test <- ctry.scales %>% filter(coded_country %in% input$sample_country_selection) %>% select(starts_with(input$var)) %>% t() %>% as.data.frame() colnames(test) <- input$sample_country_selection test <- test %>% mutate(n = rowSums(., na.rm=TRUE), label = str_replace(rownames(.), ".*_", "")) %>% filter(n>0, label != "<NA>") ifelse(sum(test$n)<20, "Not enough data to display summary","") }) output$SampleTxt <- renderText({ #input <- list(var = "language", sample_country_selection = c("France", "Germany")) explanation <- list(languages = "I have high hopes that the situation regarding coronavirus will improve. [Mean and 95%CI]", gender = "I think that this country is able to fight the Coronavirus. [Mean and 95%CI]", age = "Mean Loneliness Scores [Mean and 95%CI]", education = "Mean State Paranoia Scores [Mean and 95%CI]", political = "Mean Conspiracy Scores [Mean and 95%CI]") explanation[[input$var]] }) output$d3.bar <- renderD3({ #input <- list(var = "language", sample_country_selection = c("France", "Germany")) #input <- list(var = "gender", sample_country_selection = c("Poland", "Romania", "Albania")) dem <- ctry.scales %>% filter(coded_country %in% input$sample_country_selection) %>% select(starts_with(input$var)) %>% t() %>% as.data.frame() colnames(dem) <- input$sample_country_selection dem %>% mutate(n = rowSums(., na.rm=TRUE), label = str_replace(rownames(.), ".*_", "")) %>% arrange(desc(n)) %>% filter(n > 0, label != "<NA>") %>% mutate(y = n, ylabel = scales::percent(n/sum(n), accuracy = 0.01), #prettyNum(n/sum(n)*100, big.mark = ",", format = "f", digits = 2), fill = "#3b738f", #ifelse(label != input$val, "#E69F00", "red"), mouseover = "#2a5674") %>% r2d3(r2d3_file) }) output$freqPlot <- renderHighchart({ hcmap(download_map_data = FALSE, data = world.n %>% filter(coded_country %in% input$sample_country_selection), value = "n", joinBy = c("iso-a2", "iso_a2"), name = "sample size", #dataLabels = list(enabled = TRUE, format = '{point.name}'), borderColor = "#FAFAFA", borderWidth = 0.1, tooltip = list(valueDecimals = 0, valuePrefix = "n = "))%>% hc_mapNavigation(enabled = TRUE) %>% hc_colorAxis(minColor = "#c4e6c3", maxColor = "#1d4f60", type = "logarithmic") }) #Color schemes: https://carto.com/carto-colors/ output$boxGov <- renderHighchart({ # for testing: # input = list(psych_country_selection = c("global")) governmentRed <- ctry.scales %>% select(coded_country, starts_with("gov")) %>% filter(coded_country %in% input$psych_country_selection) %>% mutate(mean = gov, low = gov-1.96*gov.se, high = gov+1.96*gov.se) %>% arrange(desc(mean)) tooltipJS <- paste0("function(){ return ('Country: ' + this.point.coded_country + ' <br> Mean: ' + Math.round((this.point.mean + Number.EPSILON) * 100) / 100 + ' <br> Lower Limit: ' + Math.round((this.point.low + Number.EPSILON) * 100) / 100 + ' <br> Upper Limit: ' + Math.round((this.point.high + Number.EPSILON) * 100) / 100) }") categories <- c("0" , "1<br>unclear", "2", "3", "4", "5", "6<br>clear") highchart() %>% hc_chart(type = "bar") %>% hc_add_series(governmentRed, "errorbar", hcaes(x = coded_country, low = low, high = high), color = "#3b738f") %>% hc_add_series(governmentRed, "scatter", hcaes(x = coded_country, y = mean), color = "#3b738f", marker = list(symbol = "diamond", radius = 5, enabled = TRUE), animation = list(duration = 1900), showInLegend = FALSE) %>% hc_title(text = "To what extent are you getting clear, unambiguous messages about what to do about the Coronavirus? [Mean and 95%CI]") %>% hc_xAxis(categories = as.list(governmentRed$coded_country)) %>% hc_yAxis(showFirstLabel = T, showLastLabel = T, min = 1, max = 6, #step = 1, #list(formatter = JS(gov.labs)), #rotation = 0, categories = categories, #align = "center", tickmarkPlacement = seq(1,6,1)) %>% hc_tooltip(formatter = JS(tooltipJS)) }) output$boxCom <- renderHighchart({ # for testing: # input = list(ComVars = "comRule", psych_country_selection = c("Germany", "France")) communityRed <- ctry.scales %>% dplyr::select(coded_country, n, mean = one_of(input$ComVars), #sd = one_of(paste0(input$ComVars,".sd")), se = one_of(paste0(input$ComVars,".se"))) %>% filter(coded_country %in% input$psych_country_selection) %>% mutate(low = mean - qt(1 - (0.05 / 2), n - 1) * se, high = mean + qt(1 - (0.05 / 2), n - 1) * se) %>% arrange(desc(mean)) tooltipJS <- paste0("function(){ return ('Country: ' + this.point.coded_country + ' <br> Mean: ' + Math.round((this.point.mean + Number.EPSILON) * 100) / 100 + ' <br> Lower Limit: ' + Math.round((this.point.low + Number.EPSILON) * 100) / 100 + ' <br> Upper Limit: ' + Math.round((this.point.high + Number.EPSILON) * 100) / 100) }") title.txt <- list(comRule = "To what extent is your commmunity developing strict rules in response to the Coronavirus? [Mean and 95%CI]", comPunish = "To what extent is your commmunity punishing people who deviate from the rules that have been put in place in response to the Coronavirus? [Mean and 95%CI]", comOrg = "To what extent is your commmunity well organized in responding to the Coronavirus? [Mean and 95%CI]") categories <- c("0" , "1<br>not at all", "2", "3", "4", "5", "6<br>very much") highchart() %>% hc_chart(type = "bar") %>% hc_add_series(communityRed, "errorbar", hcaes(x = coded_country, low = low, high = high), color = "#3b738f") %>% hc_add_series(communityRed, "scatter", hcaes(x = coded_country, y = mean), color = "#3b738f", marker = list(symbol = "diamond", radius = 5, enabled = TRUE), animation = list(duration = 1900), showInLegend = FALSE) %>% hc_title(text = title.txt[[input$ComVars]]) %>% hc_xAxis(categories = as.list(communityRed$coded_country)) %>% hc_yAxis(showFirstLabel = T, showLastLabel = T, min = 1, max = 6, #step = 1, #list(formatter = JS(gov.labs)), #rotation = 0, categories = categories, #align = "center", tickmarkPlacement = seq(1,6,1)) %>% hc_tooltip(formatter = JS(tooltipJS)) }) output$boxCog <- renderHighchart({ # for testing: # input = list(CogVars = "covidHope", cog_country_selection = c("Germany", "France")) cognitiveRed <- ctry.scales %>% dplyr::select(coded_country, n, mean = one_of(input$CogVars), #sd = one_of(paste0(input$ComVars,".sd")), se = one_of(paste0(input$CogVars,".se"))) %>% filter(coded_country %in% input$psych_country_selection) %>% mutate(low = mean - qt(1 - (0.05 / 2), n - 1) * se, high = mean + qt(1 - (0.05 / 2), n - 1) * se) %>% arrange(desc(mean)) tooltipJS <- paste0("function(){ return ('Country: ' + this.point.coded_country + ' <br> Mean: ' + Math.round((this.point.mean + Number.EPSILON) * 100) / 100 + ' <br> Lower Limit: ' + Math.round((this.point.low + Number.EPSILON) * 100) / 100 + ' <br> Upper Limit: ' + Math.round((this.point.high + Number.EPSILON) * 100) / 100) }") title.txt <- list(covidHope = "I have high hopes that the situation regarding coronavirus will improve. [Mean and 95%CI]", covidEff = "I think that this country is able to fight the Coronavirus. [Mean and 95%CI]", lone = "Mean Loneliness Scores [Mean and 95%CI]", para = "Mean State Paranoia Scores [Mean and 95%CI]", consp = "Mean Conspiracy Scores [Mean and 95%CI]") y.min <- list(covidHope = -3, covidEff = -3, lone = 1, para = 0, consp = 0) y.max <- list(covidHope = 3, covidEff = 3, lone = 5, para = 10, consp = 10) lab.ticks <- list(covidHope = c("0"), covidEff = c("0"), lone = c("0"), para = c("0"), consp = c("0%", "10%", "20%", "30%", "40%", "50%", "60%", "70%", "80%", "90%", "100%")) lab.breaks <- list(covidHope = seq(0,7,1), covidEff = seq(0,7,1), lone = seq(1,5,1), para = seq(0,10,1), consp = seq(0,10,1)) lab.ends <- list(covidHope = c("-3<br>disagree", "3<br>agree"), covidEff = c("-3<br>disagree", "3<br>agree"), lone = c("1<br>Never", "5<br>All the time"), para = c("0<br>Not at all", "10<br>Very much"), consp = c("0%", "100%")) lab.ends.js <- paste0("function(){console.log(this); if(this.isFirst){ return '", lab.ends[input$CogVars][[1]][1], "'} else if(this.isLast) {return '", lab.ends[input$CogVars][[1]][2], "'} else { return this.value } }") highchart() %>% hc_chart(type = "bar") %>% hc_add_series(cognitiveRed, "errorbar", hcaes(x = coded_country, low = low, high = high), color = "#3b738f") %>% hc_add_series(cognitiveRed, "scatter", hcaes(x = coded_country, y = mean), color = "#3b738f", marker = list(symbol = "diamond", radius = 5, enabled = TRUE), animation = list(duration = 1900), showInLegend = FALSE) %>% hc_title(text = title.txt[[input$CogVars]]) %>% hc_xAxis(categories = as.list(cognitiveRed$coded_country)) %>% hc_yAxis(showFirstLabel = T, showLastLabel = T, min = y.min[[input$CogVars]], max = y.max[[input$CogVars]], tickInterval = 1, #step = 1, labels = list(formatter = JS(lab.ends.js)), #rotation = 0, categories = lab.ticks[[input$CogVars]], #align = "center", tickmarkPlacement = lab.breaks[[input$CogVars]]) %>% hc_tooltip(formatter = JS(tooltipJS)) }) output$boxBeh <- renderUI({ # for testing: # input = list(BehVars = "avoid", beh_country_selection = c("Germany", "France")) if (input$BehVars == "iso") { behaviorRedIso <- ctry.scales %>% dplyr::select(coded_country, n, isoPers, isoOnl, isoPers.se, isoOnl.se) %>% filter(coded_country %in% input$psych_country_selection) %>% mutate(lowPers = isoPers - qt(1 - (0.05 / 2), n - 1) * isoPers.se, highPers = isoPers + qt(1 - (0.05 / 2), n - 1) * isoPers.se, lowOnl = isoOnl - qt(1 - (0.05 / 2), n - 1) * isoOnl.se, highOnl = isoOnl + qt(1 - (0.05 / 2), n - 1) * isoOnl.se) %>% arrange(desc(isoPers)) tooltipJSPers <- paste0("function(){ return ('Country: ' + this.point.coded_country + ' <br> Mean: ' + Math.round((this.point.isoPers + Number.EPSILON) * 100) / 100 + ' <br> Lower Limit: ' + Math.round((this.point.lowPers + Number.EPSILON) * 100) / 100 + ' <br> Upper Limit: ' + Math.round((this.point.highPers + Number.EPSILON) * 100) / 100) }") tooltipJSOnl <- paste0("function(){ return ('Country: ' + this.point.coded_country + ' <br> Mean: ' + Math.round((this.point.isoOnl + Number.EPSILON) * 100) / 100 + ' <br> Lower Limit: ' + Math.round((this.point.lowOnl + Number.EPSILON) * 100) / 100 + ' <br> Upper Limit: ' + Math.round((this.point.highOnl + Number.EPSILON) * 100) / 100) }") hcPers <- highchart() %>% hc_chart(type = "bar") %>% hc_add_series(behaviorRedIso, "errorbar", hcaes(x = coded_country, low = lowPers, high = highPers), color = "#3b738f") %>% hc_add_series(behaviorRedIso, "scatter", hcaes(x = coded_country, y = isoPers), color = "#3b738f", marker = list(symbol = "diamond", radius = 5, enabled = TRUE), animation = list(duration = 1900), showInLegend = FALSE) %>% hc_title(text = "Number of days per week with <b>in-person</b> contacts") %>% hc_xAxis(categories = as.list(behaviorRedIso$coded_country)) %>% hc_yAxis(min = 0, max = 7, categories = seq(0,7,1), tickmarkPlacement = seq(0,7,1)) %>% hc_tooltip(formatter = JS(tooltipJSPers)) hcOnli <- highchart() %>% hc_chart(type = "bar") %>% hc_add_series(behaviorRedIso, "errorbar", hcaes(x = coded_country, low = lowOnl, high = highOnl), color = "#3b738f") %>% hc_add_series(behaviorRedIso, "scatter", hcaes(x = coded_country, y = isoOnl), color = "#3b738f", marker = list(symbol = "diamond", radius = 5, enabled = TRUE), animation = list(duration = 1900), showInLegend = FALSE) %>% hc_title(text = "Number of days per week with <b>online</b> contacts") %>% hc_xAxis(categories = as.list(behaviorRedIso$coded_country)) %>% hc_yAxis(min = 0, max = 7, categories = seq(0,7,1), tickmarkPlacement = seq(0,7,1)) %>% hc_tooltip(formatter = JS(tooltipJSOnl)) lst <- list(hcPers, hcOnli) hw_grid(lst, ncol = 2, rowheight = "400") } else { behaviorRed <- ctry.scales %>% dplyr::select(coded_country, n, mean = one_of(input$BehVars), #sd = one_of(paste0(input$ComVars,".sd")), se = one_of(paste0(input$BehVars,".se"))) %>% filter(coded_country %in% input$psych_country_selection) %>% mutate(low = mean - qt(1 - (0.05 / 2), n - 1) * se, high = mean + qt(1 - (0.05 / 2), n - 1) * se) %>% arrange(desc(mean)) tooltipJS <- paste0("function(){ return ('Country: ' + this.point.coded_country + ' <br> Mean: ' + Math.round((this.point.mean + Number.EPSILON) * 100) / 100 + ' <br> Lower Limit: ' + Math.round((this.point.low + Number.EPSILON) * 100) / 100 + ' <br> Upper Limit: ' + Math.round((this.point.high + Number.EPSILON) * 100) / 100) }") title.txt <- list(behWash = "To minimize my chances of getting corona virus I wash my hands more often. [Mean and 95%CI]", behAvoid = "To minimize my chances of getting corona virus I avoid crowded spaces. [Mean and 95%CI]") y.min <- list(behWash = -3, behAvoid = -3) y.max <- list(wash = 3, behAvoid = 3) lab.ends <- list(behWash = c("-3<br>disagree", "3<br>agree"), behAvoid = c("-3<br>disagree", "3<br>agree")) lab.ends.js <- paste0("function(){console.log(this); if(this.isFirst){ return '", lab.ends[input$BehVars][[1]][1], "'} else if(this.isLast) {return '", lab.ends[input$BehVars][[1]][2], "'} else { return this.value } }") highchart() %>% hc_chart(type = "bar") %>% hc_add_series(behaviorRed, "errorbar", hcaes(x = coded_country, low = low, high = high), color = "#3b738f") %>% hc_add_series(behaviorRed, "scatter", hcaes(x = coded_country, y = mean), color = "#3b738f", marker = list(symbol = "diamond", radius = 5, enabled = TRUE), animation = list(duration = 1900), showInLegend = FALSE) %>% hc_title(text = title.txt[[input$BehVars]]) %>% hc_xAxis(categories = as.list(behaviorRed$coded_country)) %>% hc_yAxis(showFirstLabel = T, showLastLabel = T, min = y.min[[input$BehVars]], max = y.max[[input$BehVars]], tickInterval = 1, labels = list(formatter = JS(lab.ends.js)), categories = c("0"), tickmarkPlacement = seq(0,7,1)) %>% hc_tooltip(formatter = JS(tooltipJS)) %>% hw_grid(ncol = 1, rowheight = "400") } }) output$affect <- renderChartJSRadar({ # for testing: # input = list(categorySwitch = TRUE, sample_country_affect = c("global", "Germany")) if (input$categorySwitch == TRUE) { labs <- c("High Arousal Positive", "High Arousal Negative", "Low Arousal Positive", "Low Arousal Negative") vars <- c("affHighPos", "affHighNeg", "affLowPos", "affLowNeg") } else { labs <- c("Inspired", "Excited", "Nervous", "Anxious", "Calm", "Content", "Relaxed", "Bored", "Depressed", "Exhausted", "Energetic") vars <- c("affInsp", "affExc", "affNerv", "affAnx", "affCalm", "affContent", "affRel", "affBor", "affDepr", "affExh", "affEnerg") } radar <- data.frame("label" = labs, t( ctry.scales %>% filter(coded_country %in% input$psych_country_selection) %>% dplyr::select(one_of(vars)) ) ) names(radar) <- c("label", input$psych_country_selection) chartJSRadar(radar, maxScale = 5, showToolTipLabel=TRUE, showLegend = T, responsive = T, labelSize = 12) }) output$cor <- renderHighchart({ # for testing: # input = list(CorX = "covidHope", CorY = "covidEff") cor.dat <- ctry.scales %>% dplyr::select(coded_country, n, xvar = one_of(input$CorX), yvar = one_of(input$CorY)) %>% filter(coded_country != "global", coded_country %in% input$cor_country_selection) varLab <- c("gov"="Clear Government Information", "comRule"="Community Rules", "comPunish"="Community Punishment", "comOrg"="Community Organization", "covidHope"="Hope", "covidEff"="Efficacy", "lone"="Loneliness", "para"="State Paranoia", "consp"="Conspiracy", "behWash"="Washing", "behAvoid"="Avoiding", "isoPers"="Isolation Offline", "isoOnl"="Isolation Online", "affAnx"="Anxious", "affBor"="Bored", "affCalm"="Calm", "affContent"="Content", "affDepr"="Depressed", "affEnerg"="Energetic", "affExc"="Excited", "affNerv"="Nervous", "affExh"="Exhausted", "affInsp"="Inspired", "affRel"="Relaxed", "affHighPos"="High Arousal Positive", "affHighNeg"="High Arousal Negative", "affLowPos"="Low Arousal Positive", "affLowNeg"="Low Arousal Negative") if (nrow(cor.dat) == 0) { highchart() %>% hc_title(text = "Select Countries to Display") } else { tooltipJS <- paste0("function(){ return ('Country: ' + this.point.coded_country + ' <br> ",varLab[input$CorX],": ' + Math.round((this.x+ Number.EPSILON) * 100) / 100 + ' <br> ",varLab[input$CorY],": ' + Math.round((this.y+ Number.EPSILON) * 100) / 100 + ' <br> Sample Size: ' + this.point.n) }") highchart() %>% hc_add_series(data = cor.dat, type = "bubble", mapping = hcaes(x = xvar, y = yvar, z = n), color = "#3b738f", alpha = 0.5, minSize = 8, maxSize = "30%", showInLegend = F ) %>% hc_title(text = "PsyCorona Bubble Chart") %>% hc_xAxis(title = list(text = as.character(varLab[input$CorX]))) %>% hc_yAxis(title = list(text = as.character(varLab[input$CorY]))) %>% hc_tooltip(formatter = JS(tooltipJS)) } }) observeEvent(input$reset_input_ctry, { shinyjs::reset("country_controls") }) observeEvent(input$sample_country_all, { updateMultiInput( session = session, inputId = "sample_country_selection", selected = ctry.only.red$coded_country ) }) observeEvent(input$sample_country_none, { updateMultiInput( session = session, inputId = "sample_country_selection", selected = character(0) ) }) observeEvent(input$cor_country_all, { updateMultiInput( session = session, inputId = "cor_country_selection", selected = ctry.only.red$coded_country ) }) observeEvent(input$cor_country_none, { updateMultiInput( session = session, inputId = "cor_country_selection", selected = character(0) ) }) shinyjs::onclick("menu", shinyjs::toggle(id = "sideFooter", anim = F)) shiny:::flushReact() } # Run the application shinyApp(ui = ui, server = server)
314adff505faeed032559528fb0d0eb1a0e6166b
c26390b0aaf512666dfae5003ebac48a2bad0e0a
/tests/testthat/helper-generate.R
f65ea9f84046e1cad358347e86198b393543688e
[]
no_license
cran/OrthoPanels
25edd572167c9803cbc4e7ffbc48a56aeb5c09da
e97143db293bdcc810a5d1daf5ad1c309e1d3ddf
refs/heads/master
2022-06-30T19:49:52.617343
2022-06-09T04:20:01
2022-06-09T04:20:01
48,085,305
0
0
null
null
null
null
UTF-8
R
false
false
1,052
r
helper-generate.R
generate_data <- function(N, T, K = 1, rho = 0.5, beta = rep(0.5, K), f = rep(-1:1, length=N), drop_outs = 0, sd.y = 1) { ## $x_i = 0.75 f + N(0, 1)$: x <- array(.75*f, dim=c(N, K, T+1)) + rnorm(N*K*(T+1)) # now rearrange X to have T as the first dimension and N as last x <- aperm(x, 3:1) ## $y_{i,t} = \rho y_{i,t-1} + \beta x_{i,t} + f_i + N(0,1)$: y <- matrix(0, T+1, N) for (t in 1:(T+1)) { yy <- if (t>1) y[t-1, ] else 0 y[t,] <- f + rho * yy + beta %*% x[t,,] + rnorm(N, sd = sd.y) } ## generate the required number of dropouts (assume none at T=0) for (i in sample(N, drop_outs)) { t <- sample(T, 1) + 1 x[t,,i] <- NA y[t, i] <- NA } list(x = x, y = y) } #source('../../R/opm.R') #source('../../R/OrthoPanels-package.R') #source('../../R/rho.R') #source('../../R/sample.R')
569f81a36e83164570862ff2ac537163a894a7e8
02451759a7ffe3d83fc2f09864e0516bbfc03ee8
/man/nsRFA-internal.Rd
6335a990f1c89196e630a0e8c9e69a7a31653e6c
[]
no_license
cran/nsRFA
feb117ac5c0041ce4595c6dcf239f81dd7680c00
fd22cb58650c024d5266df29ea263e4db6dcf5be
refs/heads/master
2020-06-06T21:58:59.800744
2020-02-26T15:50:02
2020-02-26T15:50:02
17,697,987
2
1
null
null
null
null
UTF-8
R
false
false
2,242
rd
nsRFA-internal.Rd
% 2005-09-23, Alberto Viglione % \name{nsRFA-internal} \alias{nsRFA-internal} \alias{ksampleA2} \alias{nonparboot} %\alias{test.GOFmontecarlo} %\alias{typeIerrorA2_GOFlaio} %\alias{fw2} %- Also NEED an `\alias' for EACH other topic documented here. \title{Internal functions} \description{ User-level objects which are for \sQuote{internal} use only \code{ksampleA2} provides the Anderson-Darling test statistic; \code{nonparboot} provides a non-parametric bootstrap; % \code{test.GOFmontecarlo} is used to test the type I error of goodness of fit tests \code{GOFmontecarlo}. % \code{typeIerrorA2_GOFlaio} is used to test the type I error of goodness of fit tests \code{A2_GOFlaio}. % \code{fw2} is the approximation of the probability distribution of \code{w} (first 2 terms) when \eqn{H_0} is true (Anderson-Darling, 1952); it is used by \code{A2_GOFlaio}. } \usage{ ksampleA2 (x,cod) nonparboot (z,n=length(z)) % test.GOFmontecarlo (parameters, type="NORM", alfa=.05, n=30, N=100) % typeIerrorA2_GOFlaio (n, T, alfa=0.05, dist="NORM", Nsim=1000) % fw2 (w) } %- maybe also `usage' for other objects documented here. \arguments{ \item{x}{vector representing data from many samples defined with \code{cod}} \item{cod}{array that defines the data subdivision among sites} \item{z}{data sample, used for bootstrap} \item{n}{length of sample (extracted in \code{nonparboot})} % \item{parameters}{vector of distribution parameters} % \item{type}{distribution type: \code{"NORM"}, \code{"GENLOGIS"}, \code{"GENPAR"}, \code{"GEV"}, \code{"LOGNORM"}, \code{"P3"}} % \item{alfa}{significance level of the test} % \item{N}{number of iterations} % \item{T}{parameters (position, scale, shape, ...)} % \item{dist}{distribution (\code{"NORM"}, \code{"LN"}, \code{"EV1"}, \code{"EV2"}, \code{"GEV"}, \code{"GAM"}, \code{"LP3"})} % \item{Nsim}{number of iterations, as \code{N}} % \item{w}{transformed test statistic (Laio, 2004)} } %\details{} %\note{.} %\references{} %\author{Alberto Viglione} \note{For information on the package and the Author, and for all the references, see \code{\link{nsRFA}}.} %\seealso{\code{\link{factor}}, \code{\link{HOMTESTS}}, \code{\link{traceWminim}}, \code{\link{roi}}.} %\examples{} \keyword{internal}
d6e2666e0dae253c2711842f1ecdfb8ffbe2064b
ae69f22a8bdcc712dbac8057504b7044bb57ace9
/R/tag-export.r
4caeb6b79d3aa082328a9fe4d648419920724382
[]
no_license
kashenfelter/roxygen3
4fa4c61031bc3f0826ffb025eacf4751e79c6325
5ebffd6f41913e4737e71fb42dc5295170c8eadc
refs/heads/master
2020-03-13T22:43:13.212468
2013-11-30T13:38:15
2013-11-30T13:38:15
null
0
0
null
null
null
null
UTF-8
R
false
false
1,832
r
tag-export.r
#' Namespace: tags for exporting objects #' #' Generally, you will only need to use a bare \code{@@export} - this will #' inspect the documented object and automatically figure out the correct #' export (which varies based on whether it's a function, s3 method, s4 #' class, s4 generic/method). #' #' The convention in \pkg{roxygen3} is that exporting a generic automatically #' exports all associated methods. This happens automatically for S4 with #' the \code{exportMethod} directive, but needs considerable work for S3. #' For details, see \link{roxgyen_s3}. #' #' It's not recommend practice to use \code{@@exportPattern} (instead it's #' better to individually label the functions to be exported), but this may #' be useful for legacy packages. #' #' \code{@@S3method} exists largely for compatibility with roxygen2. Roxygen3 #' now automatically determines if an object is an S3 method, and so only #' \code{@@export is necessary.} #' #' @tagUsage #' @@export #' @@export function name #' @rdname tag-export setClass("ExportTag", contains = "Tag") setMethod("value<-", "ExportTag", function(tag, value) { if (isEmpty(value)) return(tag) tag@text <- str_split(value, "[[:space:]]+")[[1]] tag }) setMethod("process", "ExportTag", function(input, block) { if (!isEmpty(input)) return(block) defaults <- tag(block, "defaultExport") if (isEmpty(defaults)) return(block) tag(block, "export") <- defaults@export tag(block, "exportMethods") <- suffix(defaults@exportMethods) tag(block, "exportClass") <- suffix(defaults@exportClass) tag(block, "S3method") <- new("S3methodTag", methods = defaults@S3method) block }) setMethod("writeNamespace", "ExportTag", function(object) { ns_each("export", object@text) }) setMethod("getPrereqs", "ExportTag", function(tag) { c("S3methodTag", "DocTypeTag") })
1d7b0d39496273ac7d882d843a8c4679d7122bed
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
/data/genthat_extracted_code/geoknife/examples/geoknife-methods.Rd.R
647fbcb48950806c4fffd595c18b6572072bece4
[]
no_license
surayaaramli/typeRrh
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
66e6996f31961bc8b9aafe1a6a6098327b66bf71
refs/heads/master
2023-05-05T04:05:31.617869
2019-04-25T22:10:06
2019-04-25T22:10:06
null
0
0
null
null
null
null
UTF-8
R
false
false
386
r
geoknife-methods.Rd.R
library(geoknife) ### Name: geoknife ### Title: geoknife ### Aliases: geoknife ### ** Examples ## Not run: ##D job <- geoknife(stencil = c(-89,42), fabric = 'prism') ##D check(job) ##D ##D #-- set up geoknife to email user when the process is complete ##D ##D job <- geoknife(webgeom("state::Wisconsin"), fabric = 'prism', email = 'fake.email@gmail.com') ##D ## End(Not run)
4d3d2ec4ab2cba71ab9d7258257b3025d943743e
f86c61f0ec83bc7ed2281c9a2636eeee450eaca7
/shiny_practice/04_radioButtons/server.R
0b337a4d6b2e1b3a40fe8674d9940865ab08c9f8
[]
no_license
j-harbin/shiny
e0f4d0465817bba0f3595f6ce6b039c76183e655
b3ba46db0ef7e148e14a5058bb72ea50fd60f030
refs/heads/main
2023-06-06T06:24:10.661523
2021-06-24T14:52:06
2021-06-24T14:52:06
321,340,874
0
0
null
null
null
null
UTF-8
R
false
false
235
r
server.R
library(shiny) shinyServer(function(input,output) { output$myname <- renderText(input$name) output$myage <- renderText(input$age) output$mygender <- renderText(input$gender) } )
a4b62b2e18331aac9374a6f686537f5f7b8e8b0f
1b622a48cf7c8853d67605cb6501aea6ef4df678
/Evolutionary Genomics/Nicolas Salamin Phylogeny/PracticalDay1_likelihood/model_selection_concat.R
9ec582cb7b55b66412f58c68610e80c1186e9de0
[]
no_license
ThibaultSchowing/unistuff
0ab6e1c3ec27dda631b0399a1cfca351e959b7fc
380506b30a69df6c82f639064b3d5700fe90de14
refs/heads/master
2020-09-08T19:43:24.176721
2020-05-14T21:59:24
2020-05-14T21:59:24
221,225,865
1
0
null
null
null
null
UTF-8
R
false
false
470
r
model_selection_concat.R
library(ape) # run phyml sequentially on each DNA model #change the execname based on what you have on your computer phyml.test<-phymltest(seqfile="clownfish_concat.phy", execname="PhyML_3.0/phyml_3.0_win32.exe -b 0 -o lr", append = FALSE, "sequential") #takes a while... #plot the results plot(phyml.test) # Save an object to a file saveRDS(phyml.test, file = "model_compare_concat.rds") # Restore the object phyml.test <- readRDS(file = "model_compare_concat.rds")
de76c87edfc7f60f8d9678875b8929c25e719e0b
c4b86ad27a16524b978bcad04d3920b98bd77aba
/no_change.R
b182908fbf98ab0da63f68d20fc63ce1f2e19ec5
[]
no_license
nripeshtrivedi/Conversion_analysis
5ec900a97ba685720356cba1b7e1f7111a867b90
d9c9ac2c7a4a2e4e46492c82f530036b256120de
refs/heads/master
2020-12-24T07:04:03.717117
2016-11-20T07:26:29
2016-11-20T07:26:29
73,382,456
0
0
null
null
null
null
UTF-8
R
false
false
1,796
r
no_change.R
listz<-rep(0,length(listi)) for(q in 1:length(listi)) { rightafter<-0 burst<-0 excecuted<-0 p<-listi[q] number<-newdata$number[p] date<-0 ct<-0 c<-1 for(i in 1:length(nmum$memID)) { if(number==nmum$memID[i]){ date[c]<-nmum$actDate[i] ct[c]<-nmum$actCount[i] c<-c+1 } } isin<-IDs$memID %in% number j<-which(isin==TRUE) lisid<-IDs[j,]$listID isin<-members$listID %in% lisid j<-which(isin==TRUE) date<- (as.POSIXct(date,origin = "1970-01-01")) date<-as.Date(date) mark_date<- (as.POSIXct(members$signupDate[j],origin = "1970-01-01")) mark_date<-as.Date(mark_date) b<-data.frame(ct, date) names(b) <- c("number","times") min_date<-min(date) n_of_weeks<-(max(date)-min_date)/7 n_int<-ceiling(n_of_weeks) plot_n<-n_int token<-0 start<-min((date)) n_start<-start+7 c<-1 for(i in 1:length(date)) { if((b[i,2]<=(n_start))&&(b[i,2]>=(start))) { token[i]<-c } else { while(b[i,2]>n_start) { c<-c+1 start<-n_start n_start<-n_start+7 } token[i]<-c } } count<-rep(0,200) for(i in 1:length(token)) { count[token[i]]<-count[token[i]]+ct[i] } count<-count[1:n_int] numberr<-1:n_int c_week<-(mark_date-min(date))/7 if(c_week<1) { list[q]<--1 next } if(length(count)>2) { m.pm<-cpt.mean(count,penalty='Manual', pen.value='(0.5)*log(n)',class=TRUE,test.stat="CUSUM",method = "AMOC") plot(m.pm,xlab="Time", ylabcp="Overall Act.") abline(v=c_week,lty=1,col="blue") excecuted<-1 } if(((length(cpts(m.pm)))==0)) { if(excecuted==1) { listz[q]<-1 print(q) } } } list2<-which(listz==1) #list of numbers with no changes
acd95047bf6df5cfa13762429cc042fd294688fe
22d114d86d77cbc042f1f651ee6b8feabb2f5149
/tests/testthat/test-bm-read-file.R
c30aec31407a31cb3c2447dfa2cc492b4f1375fa
[ "MIT" ]
permissive
lidavidm/arrowbench
544b8030ae203c2274a6c4c8a6c5ea1d76fc246b
ca08ea96678962c462dd8049511e72fbb6f3e9da
refs/heads/main
2023-04-09T23:26:06.203856
2021-04-13T22:51:41
2021-04-13T22:51:41
358,266,146
0
0
NOASSERTION
2021-04-15T13:18:41
2021-04-15T13:18:40
null
UTF-8
R
false
false
522
r
test-bm-read-file.R
test_that("read_file validation", { # read_file has a few combinations in its default arguments that aren't valid read_file_no_validate <- read_file read_file_no_validate$valid_params <- NULL params_no_validate <- default_params(read_file_no_validate) params <- default_params(read_file) expect_lt(nrow(params), nrow(params_no_validate)) # specifically feather+snappy is not a possibility expect_identical( nrow(params[params$format == "feather" & params$compression == "snappy", ]), 0L ) })
f08a2faafe9674f058154de4e46d80aaf0d2bea3
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
/fuzzedpackages/stm/man/optimizeDocument.Rd
e794fee41bd5d08b4a1035a110b24e732113c9ab
[]
no_license
akhikolla/testpackages
62ccaeed866e2194652b65e7360987b3b20df7e7
01259c3543febc89955ea5b79f3a08d3afe57e95
refs/heads/master
2023-02-18T03:50:28.288006
2021-01-18T13:23:32
2021-01-18T13:23:32
329,981,898
7
1
null
null
null
null
UTF-8
R
false
true
3,926
rd
optimizeDocument.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/STMlncpp.R \name{optimizeDocument} \alias{optimizeDocument} \title{Optimize Document} \usage{ optimizeDocument( document, eta, mu, beta, sigma = NULL, sigmainv = NULL, sigmaentropy = NULL, method = "BFGS", control = list(maxit = 500), posterior = TRUE ) } \arguments{ \item{document}{a single matrix containing the document in the \code{\link{stm}} format} \item{eta}{a vector of length K-1 containing the initial starting value for eta} \item{mu}{a vector of length K-1 containing the prevalence prior} \item{beta}{a matrix containing the complete topic-word distribution for the document. If using a content covariate model it is presumed that you have already passed the correct content covariate level's beta.} \item{sigma}{a K-1 by K-1 matrix containing the covariance matrix of the MVN prior. If you supply this you do not need to supply \code{sigmainv} or \code{sigmaentropy}. See below.} \item{sigmainv}{a K-1 by K-1 matrix containing the precision matrix of the MVN prior. If you supplied \code{sigma} you do not need to supply this. See below.} \item{sigmaentropy}{the entropy term calculated from sigma. If you supplied \code{sigma} you do not need to supply this. See below.} \item{method}{the method passed to \code{\link{optim}}. Uses "BFGS" by default.} \item{control}{the control argument passed to \code{\link{optim}}. Sets the maximum number of observations to 500 but can be used to set other aspects of the optimization per the instructions in \code{\link{optim}}} \item{posterior}{should the full posterior be returned? If TRUE (as it is by default) returns the full variational posterior. Otherwise just returns the point estimate.} } \value{ a list \item{phis}{A K by V* matrix containing the variational distribution for each token (where V* is the number of unique words in the given document. They are in the order of appearance in the document. For words repeated more than once the sum of the column is the number of times that token appeared.} \item{lambda}{A (K-1) by 1 matrix containing the mean of the variational distribution for eta. This is actually just called eta in the output of \code{\link{stm}} as it is also the point estimate.} \item{nu}{A (K-1) by (K-1) matrix containing the covariance matrix of the variational distribution for eta. This is also the inverse Hessian matrix.} \item{bound}{The value of the document-level contribution to the global approximate evidence lower bound.} } \description{ A primarily internal use function for optimizing the document-level parameters of the variational distribution. Included here for advanced users who want to design new post-processing features. This help file assumes knowledge of our notation which follows the mathematical notation used in our vignette and other papers. } \details{ This function is a small wrapper around the internal function used to complete the E-step for each document. Regarding the arguments \code{sigma}, \code{sigmainv} and \code{sigmaentropy}. In the internal version of the code we calculate \code{sigmainv} and \code{sigmaentropy} once each E-step because it is shared by all documents. If you supply the original value to \code{sigma} it will calculate these for you. If you are going to be using this to run a bunch of documents and speed is a concern, peek at the underlying code and do the calculation yourself once and then just pass the result to the function so it isn't repeated with every observation. } \examples{ # fitting to a nonsense word distribution V <- length(poliblog5k.voc) K <- 50 beta <- matrix(rgamma(V*K,shape = .1), nrow=K, ncol=V) beta <- beta/rowSums(beta) doc <- poliblog5k.docs[[1]] mu <- rep(0, K-1) sigma <- diag(1000, nrow=K-1) optimizeDocument(doc, eta=rep(0, K-1), mu=mu, beta=beta, sigma=sigma) } \seealso{ \code{\link{thetaPosterior}} }
ce1b4007d259320a55bb79cc1452ccc2915f6c37
c2d7acd783bc15a599ad8712d4ef01f00aa100aa
/man/area_pal.Rd
d34a06d5d63cfe2327b8743ff212ee8b537728ef
[]
no_license
kohske/scales
48d2f5ea4bd38e4bd312975543fd9fbf32a820d9
e774baafb25ac4ef58aadaab5ca410eaa862cc58
refs/heads/master
2020-12-24T11:37:36.642076
2011-12-02T16:17:47
2011-12-02T16:17:47
1,102,403
2
0
null
null
null
null
UTF-8
R
false
false
285
rd
area_pal.Rd
\name{area_pal} \alias{area_pal} \title{Point area palette (continuous).} \usage{ area_pal(range = c(1, 6)) } \arguments{ \item{range}{Numeric vector of length two, giving range of possible sizes. Should be greater than 0.} } \description{ Point area palette (continuous). }
d21ed5a440691f561c5d9af6ab50de7bf7027f42
08313c529a4d1ffecb9f6463534a537d670d775c
/man/coverage.yield.single.Rd
d7cdad1b1abb45046f8306deb6a44c65a552d6c3
[]
no_license
emdann/hexamerModel
5a95ac93852915f51cb4f11917919d9566586e00
5c6bf140d9841fa0dcf572b8acf789c9b82caaf0
refs/heads/master
2021-09-21T21:03:45.694694
2018-08-31T12:22:46
2018-08-31T12:22:46
null
0
0
null
null
null
null
UTF-8
R
false
true
658
rd
coverage.yield.single.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/compare_peaks.r \name{coverage.yield.single} \alias{coverage.yield.single} \title{Compute coverage yield} \usage{ coverage.yield.single(scaled.track, roi.track) } \arguments{ \item{scaled.track}{GRanges object with values for predicted coverage of best batch (best) and random batch (even) (scaled, so no values < 0)} \item{roi.track}{GRanges object of regions of interest} } \value{ score for coverage yield } \description{ Computes ratio between area under the curve in region of interest and outside of region of interest (sample of the same bps of the region of interest) }
960b4107ee69385957947618fa364ab456c73398
7e77947f8edd12e07325aee3f82cb591d373dc5e
/predictiveTool/PredictiveTool.R
3cb4adf983f3ed62ccf147cf63fe06e63d34696f
[]
no_license
tgowan/HabModel_PredictiveTool
73cc6f23c2a13ab826f645d413ea9c60e8ab984a
41db140509ca9dfe4f9c8be00aa866f0450b19ec
refs/heads/master
2022-12-11T14:18:46.916606
2020-09-21T20:20:17
2020-09-21T20:20:17
297,450,297
0
0
null
null
null
null
UTF-8
R
false
false
8,803
r
PredictiveTool.R
# note: sstavg and processed SST rasters are not saved/exported # note: Cannot write to PredictiveTool.gdb. And saving results as shapfile is problematic. # So will need to join 'outf.csv' or 'enviro.gpkg/HabModelEnviro' to 'HabModelEnviro' and 'HabModelPrediction' in Arc after this R script is run biweek = 'Jan20B' biweekt = 'JanB' semimonth = 4 # 1-8, corresponding to DecA-MarB #directory with SST and Cloud folders containing unzipped .hdr/.flt files setwd("C:/Users/tim.gowan/Documents/Working/SST/Jan01thru15") #setwd("R:/Data/SST/CoastWatch_CaribNode/cw1920/Biweeks/JanB/Pred_Model/2020_Jan_13-16") #directory for PredictiveTool.gdb gdb = "C:/Users/tim.gowan/Documents/Working/SST/Jan01thru15/PredictiveTool.gdb" #gdb = "R:/Projects/Habitat/PredictiveTool/PredictiveTool.gdb" ################# #install.packages('mgcv') #install packages, if necessary library(raster) library(rgdal) library(rgeos) library(mgcv) ################# # a1: Create 'sstavg' raster # list all SST (and cloud) files in directory sst <- list.files(path="SST", pattern = ".flt") cloud <- list.files(path="Cloud", pattern = ".flt") length(sst) #number of images # loop through and process each image sstList <- vector("list", length(sst)) # new list to store processed data for (t in 1:length(sst)){ a <- raster(paste0("SST/", sst[t])) #covert SST float to raster a[is.na(a)] <- 0 # set NA values as 0 c <- raster(paste0("Cloud/", sst[t])) #covert cloud float to raster c[is.na(c)] <- 0 # set NA values as 0 c[c < 0.1] <- -99 # recode values to 1 or 0 c[c > -99] <- 0 c[c == -99] <- 1 x <- a * c #multiply SST image by cloud mask x[x < 5] <- NA # set values <5 degrees C as Null sstList[[t]] <- x #save } # store each in RasterLayer b <- sstList[[1]] for (t in 2:length(sst)){ b <- stack(b, sstList[[t]]) } # Create average sstavg <- mean(b, na.rm=T) plot(sstavg) # Define projection (WGS 1984 Mercator) crs(sstavg) <- "+proj=merc +lon_0=0 +k=1 +x_0=0 +y_0=0 +ellps=WGS84 +datum=WGS84 +units=m +no_defs" ################# # a2: Summarize 'sstavg' raster into fishnet grid. enviro = readOGR(gdb,"HabModelEnviro") #read in fishnet r.vals <- extract(sstavg, enviro) #Extract raster values to list object r.mean <- unlist(lapply(r.vals, mean, na.rm=TRUE)) # Use list apply to calculate mean for each grid cell enviro@data <- data.frame(enviro@data, mean=r.mean) # Join mean values to fishnet spplot(enviro, "mean") ################# # a3: Create isotherms and calculate distance to 22oC isotherm front_fish = readOGR(gdb,"front_fishnetC") #read in front fishnet f.vals <- extract(sstavg, front_fish) #Extract raster values to list object f.mean <- unlist(lapply(f.vals, mean, na.rm=TRUE)) # Use list apply to calculate mean for each grid cell front_fish@data <- data.frame(front_fish@data, mean=f.mean) # Join mean values to fishnet #convert fishnet to raster fr <- raster(xmn=min(coordinates(front_fish)[,1]), xmx=max(coordinates(front_fish)[,1]), ymn=min(coordinates(front_fish)[,2]), ymx=max(coordinates(front_fish)[,2]), res=4200) values(fr) <- 1:ncell(fr) #raster value = cell number r.polys <- rasterize(front_fish, fr, field=front_fish@data$mean, fun=mean) #create contour lines cl <- rasterToContour(r.polys, levels=c(12,16,22)) plot(r.polys) plot(cl, add=TRUE) cl22 <- cl[cl@data$level==22,] plot(cl22, add=TRUE, col='red', lwd=3) #writeOGR(cl, getwd(), "cl", driver="ESRI Shapefile", check_exists=TRUE, overwrite_layer=TRUE) #distance from 22 isotherm to centroids of grid cells centroids = readOGR(gdb,"centroids") #read in cell centroids m <- gDistance(centroids, cl22, byid=TRUE) #calculate distance centroids@data <- data.frame(centroids@data, dist=m[1,]) # Join distance values to centroids #join values to fishnet enviro@data$id <- 1:nrow(enviro@data) #to preserve order of rows in fishnet feature class out <- merge(enviro@data, centroids@data, by="FishnetID") out <- out[order(out$id), ] enviro@data <- out # change to negative value if mean >=22 degrees enviro@data$dist[enviro@data$mean >= 22 & !is.na(enviro@data$mean)] <- -1*enviro@data$dist[enviro@data$mean >= 22 & !is.na(enviro@data$mean)] spplot(enviro, "dist", col = "transparent", sp.layout=list(cl22, lwd=3)) ################# # steps 4,5,6: SQL query #extract data within prediction range sub <- enviro@data[(enviro@data$NEAR_DIST/1000 > 0 & enviro@data$NEAR_DIST/1000 <= 77.47) & ((enviro@data$MEAN_depth * -1) > 2 & (enviro@data$MEAN_depth * -1) < 69.86) & (enviro@data$POINT_Y >= 2966814 & enviro@data$POINT_Y <= 3743524) & (enviro@data$POINT_X >= 457819.5 & enviro@data$POINT_X <= 761227.2) & enviro@data$mean > 5 & !is.na(enviro@data$dist), c('X', 'Y', 'POINT_X', 'POINT_Y', 'Zone', 'MEAN_depth', 'MEAN_slope', 'NEAR_DIST', 'FishnetID', 'mean', 'dist')] #remove cells with missing data sub <- sub[!is.na(sub$mean),] #rename columns, convert dist to km predict <- data.frame(Long=sub$X, Lat=sub$Y, Easting=sub$POINT_X, Northing=sub$POINT_Y, Zone=sub$Zone, Depth=(sub$MEAN_depth * -1), Slope=sub$MEAN_slope, DistToShore=(sub$NEAR_DIST/1000), FishnetID=sub$FishnetID, Biweek=biweek, BiweekT=biweekt, SemiMonth=semimonth, Year='08_09', SST=sub$mean, DistTo22Iso=sub$dist/1000, Effort=250) ################# # steps 7-8: generate predictions #Read in training data data<-read.table("R:/Projects/Habitat/PredictiveTool/GroupsData1213.txt", header=T,sep="\t") #exclude Effort and Depth outliers data<-subset(data, Effort<340) data<-subset(data, Depth<70) dim(data) #should now contain 56143 rows and 19 columns head(data) #preview data frame #Run final selected GAM. k=3 limits d.f. to restrain wiggliness #Binomial model for presence/absence, using all training data m1<-gam(Presence~s(Effort,k=3)+s(SST,k=3)+s(DistToShore,k=3)+s(Depth,k=3)+s(DistTo22Iso,k=3)+te(SemiMonth,Northing,k=3)+Year,data=data,family=quasibinomial) summary(m1) #view results; make sure 'Deviance explained'=22.8% and 'GCV score'=0.264 #2nd GAM for # of whales, only using data with whales present whales<-subset(data, Presence>0) m3<-gam(OnWhales~te(SemiMonth,Northing,k=3)+Year+s(SST,k=3)+s(DistTo22Iso,k=3)+s(DistToShore,k=3)+s(Depth,k=3),data=whales,family=Gamma(link='log')) summary(m3) #view results; make sure 'Deviance explained'=12.2% and 'GCV score'=0.405 ###Generate predictions dim(predict) #should contain ~1600 rows and 16 columns #Predict presence/absence predict$pres<-predict(m1,newdata=predict,type="response") #Predict # of whales predict$count<-predict(m3,newdata=predict,type="response") #Calculate expected # of whales predict$abund<-predict$pres*predict$count #Calculate variance for expected # of whales predict$var_abund<-(predict$pres*predict$count)+(predict$pres*predict$count^2)*(1-predict$pres) head(predict) #note 4 new columns (pres, count, abund, and var_abund) were added to data frame #Some plots to check to results plot(predict$pres~predict$SST) plot(predict$pres~predict$Depth) ################# # step 9: join predictions to fishnet out2 <- merge(enviro@data, predict[,c('FishnetID', 'pres', 'abund')], by="FishnetID", all=TRUE) out2 <- out2[order(out2$id), ] enviro@data <- out2 enviro@data$pres <- as.numeric(enviro@data$pres) enviro@data$abund <- as.numeric(enviro@data$abund) spplot(enviro, "pres", col = "transparent") #rename columns colnames(enviro@data)[colnames(enviro@data)=='mean'] <- paste0('mean_',biweek) colnames(enviro@data)[colnames(enviro@data)=='dist'] <- paste0('Iso22',biweek) colnames(enviro@data)[colnames(enviro@data)=='pres'] <- paste0(biweek,'Pres') colnames(enviro@data)[colnames(enviro@data)=='abund'] <- paste0(biweek,'Abund') # Write results (Note: cannot write to ESRI file gdb, but may be possible with 'arcgisbinding') #writeOGR(enviro, getwd(), "HabModelEnviro", driver="ESRI Shapefile", check_exists=TRUE, overwrite_layer=TRUE) #export as geopackage file writeOGR(enviro, dsn="enviro.gpkg", layer="HabModelEnviro", driver="GPKG", check_exists=TRUE, overwrite_layer=TRUE) # export as csv outf <- enviro@data[,c('FishnetID', 'POINT_X', 'POINT_Y', paste0('mean_',biweek), paste0('Iso22',biweek), paste0(biweek,'Pres'), paste0(biweek,'Abund'))] write.csv(outf, 'outf.csv') ################# ## Need to run a version of 'a4_Final_FeatureClass.py' to join # meanSST and Iso22 fields to HabModelEnviro and # Pres and Abund fields to HabModelPredictions
87049e03dacec8de6b175323f3e1be95aeb90f54
c1c128884be20bd589342921036d4c1d82e5d480
/man/read_NMRSTAR_file.Rd
6c6c1fde0cfd573f25f1f157f1251c0be0476c10
[ "BSD-3-Clause" ]
permissive
billchenxi/BaMORC
6596d9444b163a2ef65f4c87583e2f7ff1f62d48
9d4beb83817fa6e08c8f669fb2cf891bfb32a699
refs/heads/master
2020-04-27T13:42:15.113201
2019-03-15T18:37:48
2019-03-15T18:37:48
174,380,290
2
0
NOASSERTION
2019-11-02T05:04:43
2019-03-07T16:22:25
R
UTF-8
R
false
true
1,008
rd
read_NMRSTAR_file.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/utility.R \name{read_nmrstar_file} \alias{read_nmrstar_file} \title{Extracts data from BMRB STAR 3.0 file. \code{read_nmrstar_file()} parses BMRB STAR 3.0 file. It will extract sequence information and chemical shifts for both alpha and beta carbons.} \usage{ read_nmrstar_file(file_path) } \arguments{ \item{file_path}{File path where input chemical shifts file is located} } \value{ Protein sequence and chemical shifts dataframe. } \description{ Extracts data from BMRB STAR 3.0 file. \code{read_nmrstar_file()} parses BMRB STAR 3.0 file. It will extract sequence information and chemical shifts for both alpha and beta carbons. } \examples{ ## Download a BMRB file library(BMRBr) \dontrun{bmrb_download(id_list = "4020", output_dir = "./", verbose = F)} ## Read in BMRB file and procec file_path = "bmr4020.str" \dontrun{head(read_nmrstar_file(file_path)} ## Delete downloaded BMRB file \dontrun{unlink("./bmr4020.str")} }
3023d26c66de980f1bea19e12ff8777bfab92395
9fabf4c4f23a4c25b0d539ef442961138b8d5fdc
/man/runExample.Rd
27baa648d10c8cf1c2813c4894418293e7d4d792
[]
no_license
PCRuniversum/shinyMolBio
cc496699410dabf045c73fd3a2601706d31b0d7b
6de5b243f77a81fabae1101b20d18f1f978e9e8b
refs/heads/master
2022-06-25T15:04:09.047865
2020-05-07T09:37:28
2020-05-07T09:37:28
107,377,869
4
0
null
null
null
null
UTF-8
R
false
true
735
rd
runExample.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/runExample.R \name{runExample} \alias{runExample} \title{Run shinyMolBio Example Applications} \usage{ runExample(example) } \arguments{ \item{example}{The name of the example to run, or NA (the default) to list the available examples.} } \description{ Launch shinyMolBio example applications } \examples{ ## Only run this example in interactive R sessions if (interactive()) { # List all available examples runExample() # Run one of the examples runExample("pcrPlateInput") # Print the directory containing the code for all examples system.file("shiny-examples", package="shinyMolBio") } } \author{ Konstantin A. Blagodatskikh <k.blag@yandex.ru> }
57c209fc9889bbc41f713c36ebba20ae3d8f7d5b
7206f57266b7ef6f79d9b87e726e6c387db35ef3
/Impute.R
c1b28c438a62253aa46755687251764930442703
[]
no_license
MatthewALanham/URBA2
a78a1ac5e5042fe7e2c08e5c348296655cf988e7
98bccec5550772cc2af2185bd7be74501c411510
refs/heads/master
2020-05-17T06:59:01.190699
2015-02-25T17:15:42
2015-02-25T17:15:42
31,308,610
0
1
null
null
null
null
UTF-8
R
false
false
9,868
r
Impute.R
######################################################################## # Missing value imputation # This function wraps the 'VIM' package's imputation techniques into one # function that a user can use by specifying only two inputs # 1) dataSetName # 2) ImputeTechnique (kNN, median, hotdeck, irmi, regression) # 3) Associated parameters # kNN # median # hotdeck # irmi : params = list(y1=c("x1","x2"), y2=c("x1","x3")) # regression : params = Dream+NonD~BodyWgt+BrainWgt # Author: Matthew A. Lanham # Updated: 10/31/2014 ######################################################################### ################################################################################ ## Notes on general imputation and the VIM package: ## ## Good approach to imputation ## 1) the visualization tools should be applied before imputation and the diagnostic tools afterwards. ## Calculate or plot the amount of missing/imputed values for each variable as well as in combination of variables ## ## ################################################################################# Impute = function(dataSetName, ImputeTechnique, params) { #dataSetName = sleep ## The VIM package provides tools for the visualization of missing and/or imputed values, which can require(VIM) ## be used for exploring the data and the structure of the missing and/or imputed values require(VIMGUI) ## allows an easy handling of the implemented plot methods ## http://cran.r-project.org/web/packages/VIM/VIM.pdf if (ImputeTechnique == 'kNN') { ################################################################################# ## k-Nearest Neighbour Imputation ## based on a variation of the Gower Distance for numerical, categorical, ordered and semi-continous variables dataSetName_imp = kNN( data = dataSetName #data.frame or matrix , variable = colnames(dataSetName) #variables where missing values should be imputed #, metric = NULL #metric to be used for calculating the distances between , k = 5 #number of Nearest Neighbours used , dist_var = colnames(dataSetName) #names or variables to be used for distance calculation , weights = NULL #weights for the variables for distance calculation , numFun = median #function for aggregating the k Nearest Neighbours in the case of a numerical variable , catFun = maxCat #function for aggregating the k Nearest Neighbours in the case of a categorical variable , makeNA = NULL #list of length equal to the number of variables, with values, that should be converted to NA for each variable , NAcond = NULL #list of length equal to the number of variables, with a condition for imputing a NA , impNA = TRUE #TRUE/FALSE whether NA should be imputed , donorcond = NULL #condition for the donors e.g. ">5" #, mixed = vector() #names of mixed variables , mixed.constant = NULL #vector with length equal to the number of semi-continuous variables specifying the point of the semi-continuous distribution with non-zero probability , trace = FALSE #TRUE/FALSE if additional information about the imputation process should be printed , imp_var = FALSE #TRUE/FALSE if a TRUE/FALSE variables for each imputed variable should be created show the imputation status , imp_suffix = "imp" #suffix for the TRUE/FALSE variables showing the imputation status , addRandom = FALSE #TRUE/FALSE if an additional random variable should be added for distance calculation ) } else if (ImputeTechnique == 'median') { ################################################################################# ## Median or kNN Imputation ## Missing values are imputed with the mean for vectors of class "numeric", with the median for ## vectors of class "integer", and with the mode for vectors of class "factor". Hence, x should be ## prepared in the following way: assign class "numeric" to numeric vectors, assign class "integer" ## to ordinal vectors, and assign class "factor" to nominal or binary vectors dataSetName_imp = initialise( x = dataSetName #a vector. , mixed = NULL #a character vector containing the names of variables of type mixed (semi-continous). , method = "median" #Method used for Initialization (median or kNN) , mixed.constant = NULL #vector with length equal to the number of semi-continuous variables specifying the point of the semi-continuous distribution with non-zero probability ) } else if (ImputeTechnique == 'hotdeck') { ################################################################################# ## "Hot-Deck Imputation" Implementation of the popular Sequential, Random (within a domain) hot-deck algorithm for imputation. dataSetName_imp = hotdeck( data = dataSetName#data.frame or matrix , variable = NULL #variables where missing values should be imputed , ord_var = NULL #variables for sorting the data set before imputation , domain_var = NULL #variables for building domains and impute within these domains , makeNA = NULL #list of length equal to the number of variables, with values, that should be converted to NA for each variable , NAcond = NULL #list of length equal to the number of variables, with a condition for imputing a NA , impNA = TRUE #TRUE/FALSE whether NA should be imputed , donorcond = NULL #list of length equal to the number of variables, with a donorcond condition for the donors e.g. ">5" , imp_var = FALSE #TRUE/FALSE if a TRUE/FALSE variables for each imputed variable should be created show the imputation status , imp_suffix = "imp" #suffix for the TRUE/FALSE variables showing the imputation status ) } else if (ImputeTechnique == 'irmi') { ################################################################################# ## "Iterative robust model-based imputation" (IRMI) ## In each step of the iteration, one variable is used as a response variable and the remaining variables serve as the regressors. dataSetName_imp = irmi( x = dataSetName #data.frame or matrix , eps = 5 #threshold for convergency , maxit = 100 #maximum number of iterations , mixed = NULL #column index of the semi-continuous variables , mixed.constant = NULL #vector with length equal to the number of semi-continuous variables specifying the point of the semi-continuous distribution with non-zero probability , count = NULL #column index of count variables , step = FALSE #a stepwise model selection is applied when the parameter is set to TRUE , robust = FALSE #if TRUE, robust regression methods will be applied , takeAll = TRUE #takes information of (initialised) missings in the response as well for regression imputation. , noise = TRUE #irmi has the option to add a random error term to the imputed values, this creates the possibility for multiple imputation. The error term has mean 0 and variance corresponding to the variance of the regression residuals. , noise.factor = 1 #amount of noise. , force = FALSE #if TRUE, the algorithm tries to find a solution in any case, possible by using different robust methods automatically. , robMethod = "MM" #regression method when the response is continuous. , force.mixed = TRUE #if TRUE, the algorithm tries to find a solution in any case, possible by using different robust methods automatically. , mi = 1 #number of multiple imputations. , addMixedFactors = FALSE #if TRUE add additional factor variable for each mixed variable as X variable in the regression , trace = FALSE #Additional information about the iterations when trace equals TRUE. , init.method = "kNN" #Method for initialization of missing values (kNN or median) , modelFormulas = params #a named list with the name of variables for the rhs of the formulas, which must contain a rhs formula for each variable with missing values, it should look like #list(y1=c("x1","x2"),y2=c("x1","x3")) #if factor variables for the mixed variables should be created for the regression models , multinom.method = "multinom" #Method for estimating the multinomial models (current default and only available method is multinom) ) } else if (ImputeTechnique == 'regression') { ################################################################################# ## "Regression Imputation" - Impute missing values based on a regression model. dataSetName_imp = regressionImp( formula = params #model formula to impute one variable , data = dataSetName #A data.frame or survey object containing the data , family = "AUTO" #family argument for "glm" ("AUTO" tries to choose automatically, only really tested option!!!) , robust = FALSE #TRUE/FALSE if robust regression should be used , imp_var = FALSE #TRUE/FALSE if a TRUE/FALSE variables for each imputed variable should be created show the imputation status , imp_suffix = "imp" #suffix used for TF imputation variables , mod_cat = FALSE #TRUE/FALSE if TRUE for categorical variables the level with the highest prediction probability is selected, otherwise it is sampled according to the probabilities. ) #example #data(sleep) #sleepImp1 = regressionImp(Dream + NonD ~ BodyWgt + BrainWgt, data=sleep) } else { dataSetName_imp = dataSetName } # Return imputed data set return(dataSetName_imp) }
6fba38c448c1292f4422eb2ec632c2159d053d83
cbf10668bc271a33450f2813ec3f54feb6c26333
/R/00-pairedStat.R
bf48367c442650e7b00ef58e838ec7a173bb28d7
[]
no_license
cran/NewmanOmics
06ef10a2550ed80a68cea5c1ef7e2129c97695aa
3ce614e1e23a20b4304587925284aafce864c2ec
refs/heads/master
2022-07-11T00:25:30.060241
2022-05-18T12:20:07
2022-05-18T12:20:07
236,631,708
0
0
null
null
null
null
UTF-8
R
false
false
6,363
r
00-pairedStat.R
setClass("NewmanPaired", slots = c( nu.statistics = "matrix", p.values = "matrix", pairedMean = "matrix", difference = "matrix", smoothSD = "matrix") ) validNewmanPair <- function(object) { all((dim(object@nu.statistics) == dim(object@p.values)) & (dim(object@nu.statistics) == dim(object@pairedMean)) & (dim(object@nu.statistics) == dim(object@difference)) & (dim(object@nu.statistics) == dim(object@smoothSD))) } setValidity("NewmanPaired", validNewmanPair) setMethod("[", signature = "NewmanPaired", function(x, i, j, ..., drop=FALSE) { new("NewmanPaired", nu.statistics = x@nu.statistics[i,j, drop=FALSE], p.values = x@p.values[i,j, drop=FALSE], pairedMean = x@pairedMean[i,j, drop=FALSE], difference = x@difference[i,j, drop=FALSE], smoothSD = x@smoothSD[i,j, drop=FALSE]) }) setMethod("dim", signature = "NewmanPaired", function(x) { dim(x@nu.statistics) }) setMethod("plot", signature = c("NewmanPaired", "missing"), function(x, y, which = NULL, ask = NULL, high=0.99, low=0.01, colset=c("red", "blue", "orange"), ...) { M <- dim(x)[2] if (is.null(which)) { which <- 1:M } if (any(which < 1) || any(which > M)) { stop("'which' must be between", 1, "and", M, "\n") } if (is.null(ask)) { ask <- prod(par("mfcol")) < length(which) && dev.interactive() } if (ask) { oask <- devAskNewPage(TRUE) on.exit(devAskNewPage(oask)) } for (W in which) { X <- x[,W] bigp <- X@p.values > 0.99 smallp <- X@p.values < 0.01 plot(X@pairedMean, X@difference, main=colnames(X@pairedMean), xlab="Mean log expression", ylab="Difference in log expression") points(X@pairedMean[smallp], X@difference[smallp], col=colset[1], pch=16) points(X@pairedMean[bigp], X@difference[bigp], col=colset[2], pch=16) points(X@pairedMean, X@smoothSD, col=colset[3]) points(X@pairedMean, -X@smoothSD, col=colset[3]) legend("topleft", c(paste("P <", round(low, 3)), paste("P >", round(high, 3)), "Smoothed SD"), col=colset, pch=16) } invisible(x) }) setMethod("hist", signature = "NewmanPaired", function(x, breaks=101, which=NULL, ask=NULL, xlab="P-value", ...) { M <- dim(x)[2] if (is.null(which)) { which <- 1:M } if (any(which < 1) || any(which > M)) { stop("'which' must be between", 1, "and", M, "\n") } if (is.null(ask)) { ask <- prod(par("mfcol")) < length(which) && dev.interactive() } if (ask) { oask <- devAskNewPage(TRUE) on.exit(devAskNewPage(oask)) } for (W in which) { X <- x[,W] hist(X@p.values, breaks=breaks, xlab=xlab, main=colnames(X@p.values), ...) } }) pairedStat <- function(baseData, perturbedData = NULL, pairing = NULL){ if (is.list(baseData)) { x <- baseData baseData <- do.call(cbind, lapply(x, function(entry) {entry[,1]})) colnames(baseData) <- sapply(x, function(A) colnames(A)[1]) perturbedData <- do.call(cbind, lapply(x, function(entry) {entry[,2]})) colnames(perturbedData) <- sapply(x, function(A) colnames(A)[2]) rm(x) } else if (is.null(perturbedData)) { if (is.null(pairing)) { stop("You must supply at least one of 'perturbedData' or 'pairing'.") } pos <- which(pairing > 0) pos <- pos[order(pairing[pos])] neg <- which(pairing < 0) neg <- neg[order(abs(pairing[neg]))] x <- baseData baseData <- x[,neg,drop=FALSE] # 'drop' in case there is only one pair perturbedData <- x[,pos, drop=FALSE] rm(x, pos, neg) } ## KRC: Do we need to check that the two matrices are the same size? ## Or just let the first computation throw its own error? ## Matrix computation of mean of two things pairedMean <- (baseData + perturbedData) / 2 ## Similar computation for SD of two things. pooledSD <- abs(baseData - perturbedData) / sqrt(2) colnames(pairedMean) <- colnames(pooledSD) <- colnames(perturbedData) ## For each column, perform loess fit n <- dim(baseData)[1] s <- dim(baseData)[2] smoothSD <- matrix(NA, n, s) # set aside storage for (i in 1:s) { l.mod <- loess(pooledSD[ ,i] ~ pairedMean[ ,i]) smoothSD[ ,i] <- predict(l.mod) } colnames(smoothSD) <- colnames(pairedMean) ## compute the matrix of nu-statistics ## KRC: Why is there an absolute value? matNu <- abs(baseData - perturbedData) / smoothSD colnames(matNu) <- colnames(pairedMean) ## empirical p-values via simulation m <- mean(matNu) sd <- sd(matNu) randNu <- randNuGen(m, sd) pValsPaired <- nu2PValPaired(matNu, as.vector(randNu)) colnames(pValsPaired) <- colnames(pairedMean) new("NewmanPaired", nu.statistics = matNu, p.values = pValsPaired, pairedMean = pairedMean, difference = perturbedData - baseData, smoothSD = smoothSD) } ### Generating 1 million Nu values based on the overall mean and std deviation ### of the Nu values obtained from the paired statistic. This will later be ### used to estimate the p-values. randNuGen <- function(mu=0, sigma=1) { ## magic numbers: ngenes = 10000, ntimes = 100 A <- matrix(rnorm(10000*100, mu, sigma), ncol=100) B <- matrix(rnorm(10000*100, mu, sigma), ncol=100) sdest <- mean( abs(A-B)/sqrt(2) ) abs(A-B)/sdest } ### originally written by Chao Liu on stackoverflow at ### https://stackoverflow.com/questions/20133344/find-closest-value-in-a-vector-with-binary-search NearestValueSearch <- function(x, w){ ## A simple binary search algorithm ## Assume the w vector is sorted so we can use binary search left <- 1 right <- length(w) while(right - left > 1){ middle <- floor((left + right) / 2) if(x < w[middle]){ right <- middle } else{ left <- middle } } if(abs(x - w[right]) < abs(x - w[left])){ return(right) } else{ return(left) } } nu2PValPaired <- function(nuMatrix, vec){ vec <- sort(vec) MatP <- matrix(sapply(nuMatrix, function(x) { 1 - NearestValueSearch(x, vec)/length(vec) }), nrow(nuMatrix), ncol(nuMatrix)) return(MatP) }
eab419dbe6095efde90d6ea1f5e0293927762edc
046e85ba3cd211e356278a30650199809e7cba67
/PhotoProcessing_24Sep.R
e7557cce3aef1dea836cdef25b151525303a1b8d
[]
no_license
jwilli038/GradSchool
462f1074c2cb3d10c731080f04ee67c54aed6e85
6cd98892956a1d0f515e8a598308cffa67f7708e
refs/heads/master
2023-01-28T18:47:05.265618
2023-01-09T01:45:37
2023-01-09T01:45:37
150,174,270
0
0
null
null
null
null
UTF-8
R
false
false
1,475
r
PhotoProcessing_24Sep.R
# Daubechies Image (DWT) data(dau) par(mfrow=c(1,1), pty="s") image(dau, col=rainbow(128)) sum(dau^2) dau.dwt <- dwt.2d(dau, "d4", 3) plot.dwt.2d(dau.dwt) sum(plot.dwt.2d(dau.dwt, plot=FALSE)^2) ## Xbox image (Reconstruction using wavelets) data(xbox) xbox.dwt <- dwt.2d(xbox, "haar", 3) par(mfrow=c(1,1), pty="s") plot.dwt.2d(xbox.dwt) par(mfrow=c(2,2), pty="s") image(1:dim(xbox)[1], 1:dim(xbox)[2], xbox, xlab="", ylab="", main="Original Image") image(1:dim(xbox)[1], 1:dim(xbox)[2], idwt.2d(xbox.dwt), xlab="", ylab="", main="Wavelet Reconstruction") image(1:dim(xbox)[1], 1:dim(xbox)[2], xbox - idwt.2d(xbox.dwt), xlab="", ylab="", main="Difference") ## Boat import #Boat from the internet library(imager) im <- load.image("I:/My Documents/Data Files/satelite_boat.jpg") thmb <-resize(im, 512,512) #Boat Reconstruction boat.dwt <- dwt.2d(thmb[,,,2], "haar", 3) par(mfrow=c(1,1), pty="s") plot.dwt.2d(boat.dwt) par(mfrow=c(2,2), pty="s") image(1:dim(thmb[,,,2])[1], 1:dim(thmb[,,,2])[2], thmb[,,,2], xlab="", ylab="", main="Original Image") image(1:dim(thmb[,,,2])[1], 1:dim(thmb[,,,2])[2], idwt.2d(boat.dwt), xlab="", ylab="", main="Wavelet Reconstruction") image(1:dim(thmb[,,,2])[1], 1:dim(thmb[,,,2])[2], thmb[,,,2] - idwt.2d(boat.dwt), xlab="", ylab="", main="Difference") # Boat DWT image(thmb[,,,2],col=rainbow(128)) boat.dwt <- dwt.2d(thmb, "haar", 3) plot.dwt.2d(boat.dwt)
6d87714596b7c9d887fc1afb53abb3e08ea2ae8b
149783948019828de27a1830bd103acc233967b5
/man/my_dmvn.Rd
aa4638ef84c134363d2a95e755bdc8ea8cf2b5c1
[]
no_license
Karagul/copulafa
10698d306815702aa62e51509f1eacdc7d23a02d
7c2b9af54972cd5483dcba336528110429da5bf3
refs/heads/master
2020-06-02T05:36:35.809114
2019-02-22T12:04:24
2019-02-22T12:04:24
null
0
0
null
null
null
null
UTF-8
R
false
true
631
rd
my_dmvn.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/my_dmvn.R \name{my_dmvn} \alias{my_dmvn} \title{Calculate the density} \usage{ my_dmvn(my_vec, l, logl) } \arguments{ \item{my_vec}{numeric vector. First \code{l} values represent the observations, the next \code{l} values represent the predictions (means), and the last \code{l}^2 values represent the estimated residual covariance matrix.} \item{l}{numeric. Number of dimensions.} \item{logl}{logical. Calculate log density? Defaults to true.} } \value{ numeric vector. The (log) density. } \description{ This function calculates the MVN density. }
cf93b449eed1cdfff92f7ad705b6055955c722e0
d0cc0897a6a34a064588001b1aae74839c85ba23
/R/CheckAbnChr.R
56c80784339a3bb2cc4dc9e73defeacf7150866a
[]
no_license
rxseadew/iPsychCNV
5d32ab6587f69d18558da9a7c70e764f2019ec6d
1c8fa20a6440fa686917ddabbac57d13007fa44e
refs/heads/master
2022-04-07T07:22:34.497169
2017-06-27T11:59:19
2017-06-27T11:59:19
null
0
0
null
null
null
null
UTF-8
R
false
false
3,972
r
CheckAbnChr.R
##' CheckAbnChr: Estimate abnormal chromosome. ##' ##' Specifically designed to reduce false positive CNVs and handle data from amplified DNA on dried blood spots. ##' @title CheckAbnChr ##' @param Path2RawFiles: Path for the Log R Ratio (LRR) and B Allele Frequency (BAF) files. Example: "/media/NeoScreen/NeSc_home/ILMN/iPSYCH/Version2". ##' @param Cores: Number of cores to run in parallel, default = 1. ##' @param Pattern: Files pattern in the path. Example: "*.txt$". ##' @param Skip: Integer, the number of lines of the data file to be skipped before beginning to read data. Use if file has comments, default = 10. ##' @param NumFiles: Number of files to run. Example: numeric, 10 or character "All", default = All. ##' @return Data frame with the estimate copy number for each chromosome. ##' @author Marcelo Bertalan, Louise K. Hoeffding. ##' @source \url{http://biopsych.dk/iPsychCNV} ##' @export ##' @examples ##' mockCNV <- MockData(N=5, Type="Blood", Cores=1) ##' cnvs <- CheckAbnChr(PathRawData=".", Cores=1, Pattern="^MockSample*", Skip=0) CheckAbnChr <- function(Path2RawFiles="/media/NeoScreen/NeSc_home/ILMN/iPSYCH/Version2", Files=NA, Cores=1, Pattern="*.txt$", skip=10, NumFiles="All") { library(iPsychCNV) library(parallel) suppressWarnings(if(is.na(Files)) { Files <- list.files(path=PathRawData, pattern=Pattern, full.names=TRUE, recursive=recursive) }) if(NumFiles %in% "All"){ NumFiles <- length(Files) } Res2 <- mclapply(Files[1:NumFiles], mc.cores=Cores, mc.preschedule = FALSE, function(X) { File <- X SampleID <- tail(unlist(strsplit(X, "/")),n=1) cat(File, "\n") # Reading sample Sample <- ReadSample(File, skip=skip) LocalID <- tail(unlist(strsplit(File, "/")), n=1) #LocalID <- unique(Sample$Sample.ID)[1] Res <- sapply(unique(Sample$Chr), function(CHR) { Sample.Y <- subset(Sample, Chr %in% CHR) Sample.Y <- Sample.Y[order(Sample.Y$Position),] # Allele # deCODE has no Allele #Allele.Y <- apply(Sample.Y, 1, function(X){ X["Allele1"] %in% X["Allele2"] }) #Allele.Y.Perc <- (sum(Allele.Y)/length(Allele.Y))*100 # BAF Class.Y <- ClassNumbers(Sample.Y) LRRmean.Y <- mean(Sample.Y$Log.R.Ratio) chrY.Perc <- (sum(Sample.Y$B.Allele.Freq > 0.2 & Sample.Y$B.Allele.Freq < 0.8)/nrow(Sample.Y))*100 # Classification res2 <- data.frame(CNVmean=LRRmean.Y) BAF.Y <- EvaluateMyBAF(Class.Y, res2) Class.Y[1:7] <- Class.Y[1:7] df <- data.frame(LocalID=LocalID, Chr=CHR, chr.Perc=chrY.Perc, LRRmean=LRRmean.Y, SampleID=SampleID, NumChr=BAF.Y, stringsAsFactors=F) df <- cbind(df, Class.Y) return(df) }) df <- MatrixOrList2df(Res) # EvalChr df$ChrEval <- NA if(df$NumChr[df$Chr %in% "X"] == 1 & df$NumChr[df$Chr %in% "Y"] == 1){ df$ChrEval <- "Male" } if(df$NumChr[df$Chr %in% "X"] == 2 & df$NumChr[df$Chr %in% "Y"] == 0){ df$ChrEval <- "Female" } if(df$NumChr[df$Chr %in% "X"] == 1 & df$NumChr[df$Chr %in% "Y"] == 0){ df$ChrEval <- "Turner syndrome" } if(df$NumChr[df$Chr %in% "X"] == 3 & df$NumChr[df$Chr %in% "Y"] == 0){ df$ChrEval <- "Triple-X syndrome" } if(df$NumChr[df$Chr %in% "X"] == 2 & df$NumChr[df$Chr %in% "Y"] == 1){ df$ChrEval <- "Klinefelter syndrome" } if(df$NumChr[df$Chr %in% "X"] == 1 & df$NumChr[df$Chr %in% "Y"] == 2){ df$ChrEval <- "XYY syndrome" } if(df$NumChr[df$Chr %in% "X"] == 2 & df$NumChr[df$Chr %in% "Y"] == 2){ df$ChrEval <- "XXYY syndrome" } if(df$NumChr[df$Chr %in% "12"] == 3){ df$ChrEval <- "Trisomia 12" } if(df$NumChr[df$Chr %in% "13"] == 3){ df$ChrEval <- "Trisomia 13" } if(df$NumChr[df$Chr %in% "16"] == 3){ df$ChrEval <- "Trisomia 16" } if(df$NumChr[df$Chr %in% "17"] == 3){ df$ChrEval <- "Trisomia 17" } if(df$NumChr[df$Chr %in% "18"] == 3){ df$ChrEval <- "Trisomia 18" } if(df$NumChr[df$Chr %in% "21"] == 3){ df$ChrEval <- "Trisomia 21" } if(df$NumChr[df$Chr %in% "22"] == 3){ df$ChrEval <- "Trisomia 22" } return(df) }) df <- MatrixOrList2df(Res2) return(df) }
9956f1a44d81e0957c90aa53ed340b9658e3c988
3f927ad080842bcf0469654fb980d7100cf73b77
/Swiper/server.R
a3cbdfdf5db3bcf7affde917e4d936f3050ebfce
[]
no_license
TheHuessy/Culler
6564c705a5fae61737e4bc42aa356a5c5db019de
361e472b5629ecef138738e0e240bef571ba9ec4
refs/heads/master
2023-04-16T09:08:05.968130
2021-04-29T15:25:10
2021-04-29T15:25:10
295,886,772
0
0
null
null
null
null
UTF-8
R
false
false
12,819
r
server.R
library(shiny) library(shinysense) library(magick) library(DBI) library(RPostgreSQL) library(yaml) library(tools) library(httr) ## For the logs the_date <- format(Sys.time(), "%Y-%m-%d:%H:%M") print(the_date) ############## creds_path <<- Sys.getenv('CREDS_PATH') if (nchar(creds_path) <= 0) { print("Master Variables not sourced, killing app...") quit(save="no") } creds <<- read_yaml(creds_path) sql_driver <<- dbDriver("PostgreSQL") make_sql_con <- function(sql_driver, creds){ return(dbConnect(sql_driver, host=creds$pg_host, user=creds$pg_user, password=creds$pg_pw, dbname="strobot" ) ) } ####### PULLING IN DATA ########## pull_data <- function(){ sql_con <- make_sql_con(sql_driver, creds) ## COMMENTING OUT EXTERNAL CULLS BECAUSE OF CONSTANT ACCESS ISSUES ## WAITING TO DEVELOP PATCH FOR THIS (IF POSSIBLE) IN THE FUTURE # ext_data <- dbGetQuery(sql_con,statement=paste("SELECT link_id, piece as base_link, keep, 'culling_external' as table_name FROM culling_external WHERE keep IS NULL ORDER BY random() LIMIT ", Sys.getenv('CULLER_BATCH_SIZE'), sep="")) dir_data <- dbGetQuery(sql_con,statement=paste("SELECT link_id, end_link as base_link, keep, 'culling_direct' as table_name FROM culling_direct WHERE keep IS NULL ORDER BY random() LIMIT ",Sys.getenv('CULLER_BATCH_SIZE'), sep="")) # work <- rbind(ext_data, dir_data) %>% # .[sample(nrow(.)),] work <- dir_data %>% .[sample(nrow(.)),] dbDisconnect(sql_con) return(work) } pull_total <- function(){ sql_con <- make_sql_con(sql_driver, creds) # tot_left <- dbGetQuery(sql_con, statement="SELECT SUM(total) FROM (SELECT COUNT(*) as total FROM culling_external WHERE keep IS NULL UNION ALL SELECT COUNT(*) as total FROM culling_direct WHERE keep IS NULL) as tbl") %>% # format(big.mark = ",") tot_left <- dbGetQuery(sql_con, statement="SELECT COUNT(*) as total FROM culling_direct WHERE keep IS NULL") %>% format(big.mark = ",") dbDisconnect(sql_con) return(tot_left) } pull_done_number <- function(){ sql_con <- make_sql_con(sql_driver, creds) tot_done <- dbGetQuery(sql_con, statement="SELECT COUNT(*) as total FROM culling_direct WHERE keep = 1") %>% format(big.mark = ",") dbDisconnect(sql_con) return(tot_done) } ##### ESTABLISH UNIVERSAL VARIABLES ##### work <<- pull_data() tot_left <<- pull_total() tot_done <<- pull_done_number() last_saved <<- 0 tot_yes <<- 0 tot_no <<- 0 yes_pct <<- "" tot <<- nrow(work) ##### FUNCTION DEFS ##### parse_csrf <- function(cookie_table){ output <- cookie_table[which(cookie_table$name == "csrftoken"),7] return(output) } set_insta_session <- function(full_url){ time_rn <- round(as.numeric(as.POSIXct(Sys.time())),0) response_data <- GET(full_url) csrf <- parse_csrf(response_data$cookies) login_link <- "https://www.instagram.com/accounts/login/" post_body <- list( username = creds$un_insta, enc_password = paste('#PWD_INSTAGRAM_BROWSER:0:{', time_rn, '}:', creds$pw_insta, sep = ""), optIntoOneTap = 'false' ) post_headers <- c( 'user-agent' = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36", 'x-requested-with' = "XMLHttpRequest", referer = login_link, 'x-csrftoken' = csrf ) auth_post <- POST(url = paste(login_link, "ajax/", sep = ""), body = post_body, add_headers(post_headers)) if ("sessionid" %in% auth_post$cookies$name) { print("Reauthenticated!") ######### CHECK TO SEE IF RUNNING THE PAGE FROM HERE MAINTAINS A SESSION ############### page_retry <- content(GET(full_url, add_headers('x-csrftoken' = parse_csrf(auth_post$cookies)))) if (is.null(page_retry$graphql$shortcode_media$display_url) == FALSE){ return(page_retry) } else { print("Reuathentication did not work") return(page_retry) } } else { print("Not able to authenticate instagram!") return(auth_post) } } reauthenticate <- function(full_url){ page_data <- set_insta_session(full_url) if (is.null(page_data$graphql$shortcode_media$display_url) == TRUE){ return("https://www.imgur.com/thispageisactuallydead") } else{ return(page_data$graphql$shortcode_media$display_url) } } insta_fresh <- function(piece){ full_url <- paste("https://www.instagram.com", piece, "?__a=1", sep = "") ## If response is bad [define in a little bit], run set_insta_sesson and then retry page_data <- content(GET(full_url)) if (is.null(page_data$graphql$shortcode_media$display_url) == TRUE){ print("reauthenticate!") return(reauthenticate(full_url)) } else { return(page_data$graphql$shortcode_media$display_url) } } get_cnt_safe <- function(work, cnt){ #### Maybe have it just evaluate if the count is higher than the tot value, then do all the GET testing in get_link new_cnt <<- as.numeric(cnt) + 1 if (new_cnt > nrow(work)){ pull_new_cohort(new_cnt-1) new_cnt <<- 1 } while (TRUE){ ## Catch if the last test pushed the cnt number over the number of rows in the cohort if (new_cnt > nrow(work)){ pull_new_cohort(new_cnt - 1) new_cnt <<- 1 } next_ext_bool <<- file_ext(work[new_cnt,2]) %in% c("mp4", "mkv", "gif", "avi", "m4v", "m4p", "mpg") if (next_ext_bool == TRUE){ # Set this URL's keep value to 0 work[new_cnt,3] <<- 0 tot_no <<- tot_no + 1 # Advance one more new_cnt <<- new_cnt + 1 } else { tester <<- get_link(new_cnt) url_check <<- try(status_code(GET(tester)), silent = TRUE) if (url_check == 200){ ### LOGGING ### print(tester) ############### break } else { ### LOGGING ### print(paste("Link got 9'd:", tester)) ############### work[new_cnt,3] <<- 9 tot_no <<- tot_no + 1 new_cnt <<- new_cnt + 1 } } } return(c(new_cnt, tester)) } save_file <- function(work, cnt){ sql_con <<- make_sql_con(sql_driver, creds) for (idx in (as.numeric(cnt)-5):(as.numeric(cnt)-1)){ link <- work[idx,2] new_keep <- work[idx,3] tbl_name <- work[idx,4] if (tbl_name == "culling_external"){ col_name <- "piece" } else { col_name <- "end_link" } update_string <- paste("UPDATE ", tbl_name, " SET keep = ", new_keep, " WHERE ", col_name, " = '", link, "'", sep = "" ) ################################### print(update_string) ################################### dbExecute(sql_con, update_string) } dbDisconnect(sql_con) last_saved <<- as.numeric(cnt)-1 } get_link <- function(cnt){ test_link <- work[cnt,2] test <- grep(pattern="/p/", x=test_link) if (length(test) == 0){ output_link <- work[cnt,2] } else { output_link <- insta_fresh(work[cnt,2]) } return(output_link) } pull_new_cohort <- function(cnt){ print(paste("going into pull new cohort, last_saved value =", last_saved)) save_diff <<- as.numeric(cnt) - last_saved if (save_diff != 1){ sql_con <<- make_sql_con(sql_driver, creds) for (idx in (last_saved):(as.numeric(cnt)-1)){ link <- work[idx,2] new_keep <- work[idx,3] # To catch wrong swipes and undesired values going into the db if (identical(new_keep, numeric(0)) || new_keep == "NA" || is.na(new_keep)){ next } tbl_name <- work[idx,4] ## BELOW COMMENTED OUT TO WAIT ON INSTAGRAM DEV WORK # if (tbl_name == "culling_external"){ # col_name <- "piece" # } else { # col_name <- "end_link" # } col_name <- "end_link" update_string <- paste("UPDATE ", tbl_name, " SET keep = ", new_keep, " WHERE ", col_name, " = '", link, "'", sep = "" ) print(update_string) dbExecute(sql_con, update_string) } dbDisconnect(sql_con) } work <<- pull_data() tot_done <<- pull_done_number() tot_left <<- pull_total() last_saved <<- 0 tot_yes <<- 0 tot_no <<- 0 yes_pct <<- "" tot <<- nrow(work) } end_img <- "https://upload.wikimedia.org/wikipedia/commons/thumb/e/ea/Thats_all_folks.svg/1019px-Thats_all_folks.svg.png" #cnt <<- get_cnt_safe(work,0) shinyServer(function(input, output, session) { ###### APP DEPENDANT FUNCTIONS ###### if (!exists("cnt")){ cnt <<- get_cnt_safe(work,0) } save_if_5 <- function(cnt) { if ((as.numeric(cnt)-1) %% 5 == 0 && (as.numeric(cnt)-1) != 0){ save_file(work, cnt) output$ticker <- renderUI({ h4(paste(cnt, " of ", tot, " * (", tot_left, ")", sep = "")) }) } else { output$ticker <- renderUI({ h4(paste(cnt, " of ", tot, " (", tot_left, ")", sep = "")) }) } } onStop(function(){ # Put this in the log so we know where a session ended print("========================") dbDisconnect(sql_con) }) object_swipe <- callModule(shinyswipr, "swiper_object") if (tot == 0){ img <- image_read(end_img) %>% image_write("tmp.jpg") } else{ img_link <<- cnt[2] img <- image_read(img_link) %>% image_write("tmp.jpg") } output$image_output <- renderImage({ list(src = "tmp.jpg", contentType = "image/jpeg") }, deleteFile = TRUE) output$ticker <- renderUI({ h4(paste(cnt[1], " of ", tot, " (", tot_left, ")", sep = "")) }) output$stats <- renderUI({ h4(paste(tot_no, " | ", tot_yes, " - ", yes_pct, "%", " (", tot_done, ")", sep="")) }) observeEvent(object_swipe(), { #### ASSIGN KEEP VALUE #### if (object_swipe() == "left"){ work[as.numeric(cnt[1]),3] <<- 0 tot_no <<- tot_no + 1 } else if (object_swipe() == "right") { work[as.numeric(cnt[1]),3] <<- 1 tot_yes <<- tot_yes + 1 } cnt <<- get_cnt_safe(work,as.numeric(cnt[1])) yes_pct <<- round(tot_yes/(tot_yes+tot_no), digits = 3)*100 if (is.nan(yes_pct) == TRUE){ yes_pct <<- 0 } save_if_5(as.numeric(cnt[1])) if (as.numeric(cnt[1]) > tot){ pull_new_cohort(as.numeric(cnt[1])) } else { ## Update image and resave tmp img_link <<- cnt[2] img <<- image_read(img_link) %>% image_write("tmp.jpg") } output$image_output <- renderImage({ list(src = "tmp.jpg", contentType = "image/jpeg") }, deleteFile = TRUE) output$stats <- renderUI({ h4(paste(tot_no, " | ", tot_yes, " - ", yes_pct, "%", " (", tot_done, ")", sep="")) }) }) })
2452cd4d06f2ccfff0192db96d36b12945a21a39
0a9b010c4e36a2beffa730405a5977ce8588b61e
/R/drive.options.test.r
a008964de09a1cad1c2410ea4cf518c2f93e6baa
[]
no_license
1beb/RGoogleDrive
3ce2f26d106368686376247f8ac420f9e6fae2d5
2fb7cc50ca234fb76d127ce00b1a443ca08cf6ba
refs/heads/master
2020-04-05T22:54:36.678404
2015-03-11T16:51:52
2015-03-11T16:51:52
14,985,867
3
3
null
2015-03-11T16:49:47
2013-12-06T15:17:25
R
UTF-8
R
false
false
752
r
drive.options.test.r
#' Tests if RGoogleDrive options are set #' #' Tests if RGoogleDrive options are set and if not, tells you which one has not been set. #' #' @export drive.options.test drive.options.test <- function() { any.not.set <- any( is.null(getOption("drive.app")), is.null(getOption("drive.secret")), is.null(getOption("drive.scope")) ) all.set <- !any.not.set drive.app.not.set <- is.null(getOption("drive.app")) drive.secret.not.set <- is.null(getOption("drive.secret")) if(drive.app.not.set) warning("drive.app not set in options, RGoogleDrive will not be able to authenticate.") if(drive.secret.not.set) warning("drive.secret not set in options, RGoogleDrive will not be able to authenticate.") return(all.set) }
d5b977392aa3fd503479797ca30fb45ac0c626c5
34030f0476a96d696c7a2843b98c80a778e5ce69
/man/FamModelFit.Rd
efcd6df2ad7dbcef740acce91debf12e27067250
[]
no_license
kinnamon-lab/FamModel
536c2ea41b954b38e02836b600a53f890728b460
d95e8559b9ef8d51d09562cff0bfd347a866e655
refs/heads/master
2023-04-14T07:46:28.209784
2021-04-19T13:24:59
2021-04-19T13:24:59
180,372,608
0
0
null
null
null
null
UTF-8
R
false
true
4,435
rd
FamModelFit.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/FamModelFit.R \name{FamModelFit} \alias{FamModelFit} \title{Abstract class for results of a fitted \code{FamModel} model} \description{ An abstract R6 class defining the pattern for all \code{FamModelFit} subclasses created by fitting a model to a \code{\link{FamData}} object using one of its model-fitting methods. This class provides only a default constructor without any initializer. } \section{Methods}{ \subsection{Public methods}{ \itemize{ \item \href{#method-get_data}{\code{FamModelFit$get_data()}} \item \href{#method-get_optres}{\code{FamModelFit$get_optres()}} \item \href{#method-get_theta_hat}{\code{FamModelFit$get_theta_hat()}} \item \href{#method-get_V_theta_hat}{\code{FamModelFit$get_V_theta_hat()}} \item \href{#method-print}{\code{FamModelFit$print()}} \item \href{#method-contrast}{\code{FamModelFit$contrast()}} \item \href{#method-clone}{\code{FamModelFit$clone()}} } } \if{html}{\out{<hr>}} \if{html}{\out{<a id="method-get_data"></a>}} \if{latex}{\out{\hypertarget{method-get_data}{}}} \subsection{Method \code{get_data()}}{ Returns the \code{\link{FamData}} object that produced this model fit. \subsection{Usage}{ \if{html}{\out{<div class="r">}}\preformatted{FamModelFit$get_data()}\if{html}{\out{</div>}} } } \if{html}{\out{<hr>}} \if{html}{\out{<a id="method-get_optres"></a>}} \if{latex}{\out{\hypertarget{method-get_optres}{}}} \subsection{Method \code{get_optres()}}{ Returns \code{list} of optimization results. \subsection{Usage}{ \if{html}{\out{<div class="r">}}\preformatted{FamModelFit$get_optres()}\if{html}{\out{</div>}} } } \if{html}{\out{<hr>}} \if{html}{\out{<a id="method-get_theta_hat"></a>}} \if{latex}{\out{\hypertarget{method-get_theta_hat}{}}} \subsection{Method \code{get_theta_hat()}}{ Returns \eqn{\hat{\theta}}, the numeric vector of parameter estimates. \subsection{Usage}{ \if{html}{\out{<div class="r">}}\preformatted{FamModelFit$get_theta_hat()}\if{html}{\out{</div>}} } } \if{html}{\out{<hr>}} \if{html}{\out{<a id="method-get_V_theta_hat"></a>}} \if{latex}{\out{\hypertarget{method-get_V_theta_hat}{}}} \subsection{Method \code{get_V_theta_hat()}}{ Returns the numeric \code{matrix} \eqn{\hat{V}(\hat{\theta})}, the estimated covariance matrix of the parameter estimates. \subsection{Usage}{ \if{html}{\out{<div class="r">}}\preformatted{FamModelFit$get_V_theta_hat()}\if{html}{\out{</div>}} } } \if{html}{\out{<hr>}} \if{html}{\out{<a id="method-print"></a>}} \if{latex}{\out{\hypertarget{method-print}{}}} \subsection{Method \code{print()}}{ Formatted printing of the \code{FamModelFit} object. \subsection{Usage}{ \if{html}{\out{<div class="r">}}\preformatted{FamModelFit$print(...)}\if{html}{\out{</div>}} } \subsection{Arguments}{ \if{html}{\out{<div class="arguments">}} \describe{ \item{\code{...}}{Arguments passed on to \code{\link[=print_ests]{print_ests()}}.} } \if{html}{\out{</div>}} } } \if{html}{\out{<hr>}} \if{html}{\out{<a id="method-contrast"></a>}} \if{latex}{\out{\hypertarget{method-contrast}{}}} \subsection{Method \code{contrast()}}{ Create a new \code{Contrast} object. \subsection{Usage}{ \if{html}{\out{<div class="r">}}\preformatted{FamModelFit$contrast(L_mat, m)}\if{html}{\out{</div>}} } \subsection{Arguments}{ \if{html}{\out{<div class="arguments">}} \describe{ \item{\code{L_mat}}{A contrast vector (1 df) or \code{matrix} (>1 df) containing one contrast in each row. The contrast vector must have a number of elements equal to the number of model parameters. The contrast matrix must be of full row rank and have a number of columns equal the number of model parameters.} \item{\code{m}}{An optional vector containing the null value for each contrast. Will be set to the zero vector of length \code{nrow(L_mat)} if not specified.} } \if{html}{\out{</div>}} } \subsection{Returns}{ A \code{\link{Contrast}} object for the specified arguments and this model fit. } } \if{html}{\out{<hr>}} \if{html}{\out{<a id="method-clone"></a>}} \if{latex}{\out{\hypertarget{method-clone}{}}} \subsection{Method \code{clone()}}{ The objects of this class are cloneable with this method. \subsection{Usage}{ \if{html}{\out{<div class="r">}}\preformatted{FamModelFit$clone(deep = FALSE)}\if{html}{\out{</div>}} } \subsection{Arguments}{ \if{html}{\out{<div class="arguments">}} \describe{ \item{\code{deep}}{Whether to make a deep clone.} } \if{html}{\out{</div>}} } } }
fbf69eeca284c7ebbad59613aaffd07b48bbb0e8
31bcd33e7d1d3d80840ab31420b2f2b90e69ab70
/Exam_1/Hall_Skills_Test_1.R
8a2f76dc8f4a68fc6eb9225dde7fa9abf1026aa0
[]
no_license
Daltonah/Data_Course_HALL
9aa5a2d8e6d275eab28da96fc3f6e2e9e9b91711
b85a9d1884e7583ddf716390b7b8cdabaf965fbe
refs/heads/master
2023-02-01T21:00:32.222539
2020-12-17T05:51:56
2020-12-17T05:51:56
290,382,879
0
0
null
null
null
null
UTF-8
R
false
false
1,981
r
Hall_Skills_Test_1.R
#Load Libraries library(tidyverse) library(ggplot2) library(dplyr) #Read in the data we will be using and assign it as a data frame. df<- read_csv("DNA_Conc_by_Extraction_Date.csv") #Recreate Ben and Katy's histograms with labels. hist(df$DNA_Concentration_Katy,main="DNA Concentration Histogram",xlab = "Katy's DNA Concentrations") hist(df$DNA_Concentration_Ben,main="DNA Concentration Histogram",xlab = "Ben's DNA Concentrations") #II Recreate provided images plot(x=as.factor(df$Year_Collected),y=df$DNA_Concentration_Katy,main="Katy's Extractions",ylab="DNA Concentration",xlab="Year") plot(x=as.factor(df$Year_Collected),y=df$DNA_Concentration_Ben,main="Ben's Extractions",ylab="DNA Concentration",xlab="Year") #Saving the plots as jpegs jpeg("./HALL_Plot1.jpeg") plot(x=as.factor(df$Year_Collected),y=df$DNA_Concentration_Katy,main="Katy's Extractions",ylab="DNA Concentration",xlab="Year") dev.off() jpeg("./HALL_Plot2.jpeg") plot(x=as.factor(df$Year_Collected),y=df$DNA_Concentration_Ben,main="Ben's Extractions",ylab="DNA Concentration",xlab="Year") dev.off() #IV Compare Ben vs Katy lowest performance min.ben <-min(df$DNA_Concentration_Ben - df$DNA_Concentration_Katy) min.row <-which(df$DNA_Concentration_Ben - df$DNA_Concentration_Katy==min.ben) df[min.row,]$Year_Collected #V Subset data to just downstairs and make scatterplot as directed df_down <- df[df$Lab == "Downstairs",] as.Date(df_down$Date_Collected) jpeg("./Ben_DNA_over_time.jpg") plot(x=as.Date(df_down$Date_Collected), y=df_down$DNA_Concentration_Ben, xlab="Date_Collected", ylab="DNA_Concentrations_Ben") dev.off() #VI (Bonus) Making a data frame using Ben's data with an extra column that contains average values for related year. Ben_avg<-aggregate(DNA_Concentration_Ben ~ Year_Collected, df, mean) print(Ben_avg) #Max for avg and year associated Ben_avg[which(Ben_avg$DNA_Concentration_Ben == max(Ben_avg$DNA_Concentration_Ben)),] write.csv(Ben_avg, "./Ben_Average_Conc.csv")
817ffb2d6883fa61dff9eb0244499a5a7369b345
897e7a6542bf5b0275505c179250ec9d48764fcd
/HW6/src.R
ea513a382f79e4ab9f62cdcbae22d19ae939131d
[ "MIT" ]
permissive
goldsail/BayesianHomework
405501e9507dfd99445529adaf92afda098b2407
d5506faccbf4d0b7b696c7c2bcb42d020bb0d357
refs/heads/master
2020-03-21T08:02:42.234503
2018-06-22T15:01:41
2019-07-30T15:30:55
138,315,780
0
0
null
null
null
null
UTF-8
R
false
false
1,290
r
src.R
set.seed(2018) weight <- function(x) { return (dnorm(x, sd = sqrt(3)) / dt(x, 3)) } for (S in c(100, 10000)) { t = rt(S, 3) w = unlist(lapply(t, weight)) png(sprintf("1_weight_%d.png", S), width = 640, height = 480) hist(w, breaks = 100) dev.off() png(sprintf("1_log_weight_%d.png", S), width = 640, height = 480) hist(log(w), breaks = 100) dev.off() first.order = mean(t * w) / mean(w) second.order = mean(t^2 * w) / mean(w) expectation = first.order variance = second.order - first.order^2 print(sprintf("1: size = %d, expectation = %f, variance = %f", S, expectation, variance)) } weight <- function(x) { return (dt(x, 3) / dnorm(x, sd = sqrt(3))) } for (S in c(100, 10000)) { n = rnorm(S, sd = sqrt(3)) w = unlist(lapply(n, weight)) png(sprintf("2_weight_%d.png", S), width = 640, height = 480) hist(w, breaks = 100) dev.off() png(sprintf("2_log_weight_%d.png", S), width = 640, height = 480) hist(log(w), breaks = 100) dev.off() first.order = mean(n * w) / mean(w) second.order = mean(n^2 * w) / mean(w) expectation = first.order variance = second.order - first.order^2 print(sprintf("2: size = %d, expectation = %f, variance = %f", S, expectation, variance)) }
bc298b9db1ba310f0c31ff2d826572dedd8ece19
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
/data/genthat_extracted_code/sirt/examples/data.reck.Rd.R
9c1db6fc1d56fda6f42726d9fb2ac5274ec2c714
[]
no_license
surayaaramli/typeRrh
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
66e6996f31961bc8b9aafe1a6a6098327b66bf71
refs/heads/master
2023-05-05T04:05:31.617869
2019-04-25T22:10:06
2019-04-25T22:10:06
null
0
0
null
null
null
null
UTF-8
R
false
false
8,750
r
data.reck.Rd.R
library(sirt) ### Name: data.reck ### Title: Datasets from Reckase' Book _Multidimensional Item Response ### Theory_ ### Aliases: data.reck data.reck21 data.reck61DAT1 data.reck61DAT2 ### data.reck73C1a data.reck73C1b data.reck75C2 data.reck78ExA ### data.reck79ExB ### Keywords: datasets ### ** Examples ## Not run: ##D ############################################################################# ##D # EXAMPLE 1: data.reck21 dataset, Table 2.1, p. 45 ##D ############################################################################# ##D data(data.reck21) ##D ##D dat <- data.reck21$dat # extract dataset ##D ##D # items with zero guessing parameters ##D guess0 <- c( 1, 2, 3, 9,11,27,30,35,45,49,50 ) ##D I <- ncol(dat) ##D ##D #*** ##D # Model 1: 3PL estimation using rasch.mml2 ##D est.c <- est.a <- 1:I ##D est.c[ guess0 ] <- 0 ##D mod1 <- sirt::rasch.mml2( dat, est.a=est.a, est.c=est.c, mmliter=300 ) ##D summary(mod1) ##D ##D #*** ##D # Model 2: 3PL estimation using smirt ##D Q <- matrix(1,I,1) ##D mod2 <- sirt::smirt( dat, Qmatrix=Q, est.a="2PL", est.c=est.c, increment.factor=1.01) ##D summary(mod2) ##D ##D #*** ##D # Model 3: estimation in mirt package ##D library(mirt) ##D itemtype <- rep("3PL", I ) ##D itemtype[ guess0 ] <- "2PL" ##D mod3 <- mirt::mirt(dat, 1, itemtype=itemtype, verbose=TRUE) ##D summary(mod3) ##D ##D c3 <- unlist( coef(mod3) )[ 1:(4*I) ] ##D c3 <- matrix( c3, I, 4, byrow=TRUE ) ##D # compare estimates of rasch.mml2, smirt and true parameters ##D round( cbind( mod1$item$c, mod2$item$c,c3[,3],data.reck21$pars$c ), 2 ) ##D round( cbind( mod1$item$a, mod2$item$a.Dim1,c3[,1], data.reck21$pars$a ), 2 ) ##D round( cbind( mod1$item$b, mod2$item$b.Dim1 / mod2$item$a.Dim1, - c3[,2] / c3[,1], ##D data.reck21$pars$b ), 2 ) ##D ##D ############################################################################# ##D # EXAMPLE 2: data.reck61 dataset, Table 6.1, p. 153 ##D ############################################################################# ##D ##D data(data.reck61DAT1) ##D dat <- data.reck61DAT1$data ##D ##D #*** ##D # Model 1: Exploratory factor analysis ##D ##D #-- Model 1a: tam.fa in TAM ##D library(TAM) ##D mod1a <- TAM::tam.fa( dat, irtmodel="efa", nfactors=3 ) ##D # varimax rotation ##D varimax(mod1a$B.stand) ##D ##D # Model 1b: EFA in NOHARM (Promax rotation) ##D mod1b <- sirt::R2noharm( dat=dat, model.type="EFA", dimensions=3, ##D writename="reck61__3dim_efa", noharm.path="c:/NOHARM",dec=",") ##D summary(mod1b) ##D ##D # Model 1c: EFA with noharm.sirt ##D mod1c <- sirt::noharm.sirt( dat=dat, dimensions=3 ) ##D summary(mod1c) ##D plot(mod1c) ##D ##D # Model 1d: EFA with 2 dimensions in noharm.sirt ##D mod1d <- sirt::noharm.sirt( dat=dat, dimensions=2 ) ##D summary(mod1d) ##D plot(mod1d, efa.load.min=.2) # plot loadings of at least .20 ##D ##D #*** ##D # Model 2: Confirmatory factor analysis ##D ##D #-- Model 2a: tam.fa in TAM ##D dims <- c( rep(1,10), rep(3,10), rep(2,10) ) ##D Qmatrix <- matrix( 0, nrow=30, ncol=3 ) ##D Qmatrix[ cbind( 1:30, dims) ] <- 1 ##D mod2a <- TAM::tam.mml.2pl( dat,Q=Qmatrix, ##D control=list( snodes=1000, QMC=TRUE, maxiter=200) ) ##D summary(mod2a) ##D ##D #-- Model 2b: smirt in sirt ##D mod2b <- sirt::smirt( dat,Qmatrix=Qmatrix, est.a="2PL", maxiter=20, qmcnodes=1000 ) ##D summary(mod2b) ##D ##D #-- Model 2c: rasch.mml2 in sirt ##D mod2c <- sirt::rasch.mml2( dat,Qmatrix=Qmatrix, est.a=1:30, ##D mmliter=200, theta.k=seq(-5,5,len=11) ) ##D summary(mod2c) ##D ##D #-- Model 2d: mirt in mirt ##D cmodel <- mirt::mirt.model(" ##D F1=1-10 ##D F2=21-30 ##D F3=11-20 ##D COV=F1*F2, F1*F3, F2*F3 " ) ##D mod2d <- mirt::mirt(dat, cmodel, verbose=TRUE) ##D summary(mod2d) ##D coef(mod2d) ##D ##D #-- Model 2e: CFA in NOHARM ##D # specify covariance pattern ##D P.pattern <- matrix( 1, ncol=3, nrow=3 ) ##D P.init <- .4*P.pattern ##D diag(P.pattern) <- 0 ##D diag(P.init) <- 1 ##D # fix all entries in the loading matrix to 1 ##D F.pattern <- matrix( 0, nrow=30, ncol=3 ) ##D F.pattern[1:10,1] <- 1 ##D F.pattern[21:30,2] <- 1 ##D F.pattern[11:20,3] <- 1 ##D F.init <- F.pattern ##D # estimate model ##D mod2e <- sirt::R2noharm( dat=dat, model.type="CFA", P.pattern=P.pattern, ##D P.init=P.init, F.pattern=F.pattern, F.init=F.init, ##D writename="reck61__3dim_cfa", noharm.path="c:/NOHARM",dec=",") ##D summary(mod2e) ##D ##D #-- Model 2f: CFA with noharm.sirt ##D mod2f <- sirt::noharm.sirt( dat=dat, Fval=F.init, Fpatt=F.pattern, ##D Pval=P.init, Ppatt=P.pattern ) ##D summary(mod2f) ##D ##D ############################################################################# ##D # EXAMPLE 3: DETECT analysis data.reck78ExA and data.reck79ExB ##D ############################################################################# ##D ##D data(data.reck78ExA) ##D data(data.reck79ExB) ##D ##D #************************ ##D # Example A ##D dat <- data.reck78ExA$data ##D #- estimate person score ##D score <- stats::qnorm( ( rowMeans( dat )+.5 ) / ( ncol(dat) + 1 ) ) ##D #- extract item cluster ##D itemcluster <- substring( colnames(dat), 1, 1 ) ##D #- confirmatory DETECT Item cluster ##D detectA <- sirt::conf.detect( data=dat, score=score, itemcluster=itemcluster ) ##D ## unweighted weighted ##D ## DETECT 0.571 0.571 ##D ## ASSI 0.523 0.523 ##D ## RATIO 0.757 0.757 ##D ##D #- exploratory DETECT analysis ##D detect_explA <- sirt::expl.detect(data=dat, score, nclusters=10, N.est=nrow(dat)/2 ) ##D ## Optimal Cluster Size is 5 (Maximum of DETECT Index) ##D ## N.Cluster N.items N.est N.val size.cluster DETECT.est ASSI.est ##D ## 1 2 50 1250 1250 31-19 0.531 0.404 ##D ## 2 3 50 1250 1250 10-19-21 0.554 0.407 ##D ## 3 4 50 1250 1250 10-19-14-7 0.630 0.509 ##D ## 4 5 50 1250 1250 10-19-3-7-11 0.653 0.546 ##D ## 5 6 50 1250 1250 10-12-7-3-7-11 0.593 0.458 ##D ## 6 7 50 1250 1250 10-12-7-3-7-9-2 0.604 0.474 ##D ## 7 8 50 1250 1250 10-12-7-3-3-9-4-2 0.608 0.481 ##D ## 8 9 50 1250 1250 10-12-7-3-3-5-4-2-4 0.617 0.494 ##D ## 9 10 50 1250 1250 10-5-7-7-3-3-5-4-2-4 0.592 0.460 ##D ##D # cluster membership ##D cluster_membership <- detect_explA$itemcluster$cluster3 ##D # Cluster 1: ##D colnames(dat)[ cluster_membership==1 ] ##D ## [1] "A01" "A02" "A03" "A04" "A05" "A06" "A07" "A08" "A09" "A10" ##D # Cluster 2: ##D colnames(dat)[ cluster_membership==2 ] ##D ## [1] "B11" "B12" "B13" "B14" "B15" "B16" "B17" "B18" "B19" "B20" "B21" "B22" ##D ## [13] "B23" "B25" "B26" "B27" "B28" "B29" "B30" ##D # Cluster 3: ##D colnames(dat)[ cluster_membership==3 ] ##D ## [1] "B24" "C31" "C32" "C33" "C34" "C35" "C36" "C37" "C38" "C39" "C40" "C41" ##D ## [13] "C42" "C43" "C44" "C45" "C46" "C47" "C48" "C49" "C50" ##D ##D #************************ ##D # Example B ##D dat <- data.reck79ExB$data ##D #- estimate person score ##D score <- stats::qnorm( ( rowMeans( dat )+.5 ) / ( ncol(dat) + 1 ) ) ##D #- extract item cluster ##D itemcluster <- substring( colnames(dat), 1, 1 ) ##D #- confirmatory DETECT Item cluster ##D detectB <- sirt::conf.detect( data=dat, score=score, itemcluster=itemcluster ) ##D ## unweighted weighted ##D ## DETECT 0.715 0.715 ##D ## ASSI 0.624 0.624 ##D ## RATIO 0.855 0.855 ##D ##D #- exploratory DETECT analysis ##D detect_explB <- sirt::expl.detect(data=dat, score, nclusters=10, N.est=nrow(dat)/2 ) ##D ## Optimal Cluster Size is 4 (Maximum of DETECT Index) ##D ## ##D ## N.Cluster N.items N.est N.val size.cluster DETECT.est ASSI.est ##D ## 1 2 50 1250 1250 30-20 0.665 0.546 ##D ## 2 3 50 1250 1250 10-20-20 0.686 0.585 ##D ## 3 4 50 1250 1250 10-20-8-12 0.728 0.644 ##D ## 4 5 50 1250 1250 10-6-14-8-12 0.654 0.553 ##D ## 5 6 50 1250 1250 10-6-14-3-12-5 0.659 0.561 ##D ## 6 7 50 1250 1250 10-6-14-3-7-5-5 0.664 0.576 ##D ## 7 8 50 1250 1250 10-6-7-7-3-7-5-5 0.616 0.518 ##D ## 8 9 50 1250 1250 10-6-7-7-3-5-5-5-2 0.612 0.512 ##D ## 9 10 50 1250 1250 10-6-7-7-3-5-3-5-2-2 0.613 0.512 ## End(Not run)
b1201eefd4044329d6754c462d2145fd5a34e1da
7f0e3aee0dd7ea32de66f48f1cc36f4a2a144f14
/generate_initial_networks.r
2acd7318691819cc6e9452d91650d5d9aefa912d
[]
no_license
f-duchenne/Phenology-favors-persistence-of-mutualistic-communities
55a744a11114ef07ef8e7300d6345bffc6424066
94d35f8bd45eae1d012e286481f36ff1c3bc8f71
refs/heads/main
2023-06-09T17:08:48.447348
2021-07-05T08:51:11
2021-07-05T08:51:11
373,263,253
0
0
null
null
null
null
ISO-8859-2
R
false
false
2,328
r
generate_initial_networks.r
library(truncnorm) library(bipartite) library(plot3D) library(Rmpfr) library(circular) library(CircStats) library(phylolm) library(ggplot2) library(phytools) library(ape) library(picante) library(doBy) library(phytools) library(phylotools) library(phangorn) library(gridExtra) library(doParallel) library(foreach) # cl<-makeCluster(6) # registerDoParallel(cl) foreach(jj=1:1000,.combine=rbind)%dopar%{ library(truncnorm) library(bipartite) library(plot3D) library(Rmpfr) library(circular) library(CircStats) library(phylolm) library(ggplot2) library(phytools) library(ape) library(picante) library(doBy) library(phytools) library(phylotools) library(phangorn) library(gridExtra) seuil=1e-5 #extinction cutoff precision=0.1 #precision for integrals digi=5 #round interaction matrix conv=1e-11 #convergence criterion dig_num=16 #round values for calcul nbsp_p=50 nbsp_f=50 interfp=0 interff=0 vec_p=c() vec_f=c() indi=0 duree=70 indi=indi+1 mu_p=sort(rnorm(nbsp_p,190,duree)) sd_p=runif(nbsp_p,5,40) mu_f=sort(rnorm(nbsp_f,190,duree)) sd_f=runif(nbsp_f,5,40) N_p=runif(nbsp_p,0.1,4) N_f=runif(nbsp_f,5,50) genmu_p=runif(nbsp_p,-1.5,1.5) genmu_f=runif(nbsp_f,-1.5,1.5) gensd_f=runif(nbsp_f,0.1,0.9) gensd_p=runif(nbsp_p,0.1,0.9) names(genmu_p)=paste0("p",1:nbsp_p) names(genmu_f)=paste("f",1:nbsp_f) #vecteurs parametres rmax <- runif(nbsp_p,1,1); rmax2<-runif(nbsp_f,1,1) K <- runif(nbsp_p,1,60);K2<-runif(nbsp_f,10,600) Nini <- c(N_p,N_f); m <- runif(nbsp_p,0.8,1);m2<-runif(nbsp_f,0.2,0.4) cs <- rep(0.3,nbsp_p);cs2<-rep(0.7,nbsp_f) hangling=runif(nbsp_f+nbsp_p,0.9,0.9) efficience=runif(nbsp_f+nbsp_p,0.8,1) #####SPECIES LEVEL INFORMATIONS TO EXPORT final=data.frame(sp=c(names(genmu_p),names(genmu_f)),type=c(rep("poll",nbsp_p),rep("flow",nbsp_f)),mfd=c(mu_p,mu_f), sd=c(sd_p,sd_f),trait_sd=c(gensd_p,gensd_f),trait_mu=c(genmu_p,genmu_f),N=c(N_p,N_f),K=c(K,K2), m=c(m,m2),cs=c(cs,cs2), interf=c(rep(interfp,nbsp_p),rep(interff,nbsp_f)),hangling=hangling,efficience=efficience) final$stade="initial" final$random=jj setwd(dir=paste0("C:/Users/Francois/Documents/modélisation part1/initial_networks_",nbsp_p,"x",nbsp_f)) write.table(final,paste("pops_eq_i_",jj,".txt",sep=""),sep="\t",row.names=F) } stopCluster(cl)
576f82c7a985965b7b49552411a8c8956d67f345
ea524efd69aaa01a698112d4eb3ee4bf0db35988
/R/test-example.R
5b7fbc432d21299978dbdc67a0c1d047723f943f
[ "MIT" ]
permissive
r-lib/testthat
92f317432e9e8097a5e5c21455f67563c923765f
29018e067f87b07805e55178f387d2a04ff8311f
refs/heads/main
2023-08-31T02:50:55.045661
2023-08-08T12:17:23
2023-08-08T12:17:23
295,311
452
217
NOASSERTION
2023-08-29T10:51:30
2009-09-02T12:51:44
R
UTF-8
R
false
false
1,889
r
test-example.R
#' Test package examples #' #' These helper functions make it easier to test the examples in a package. #' Each example counts as one test, and it succeeds if the code runs without #' an error. Generally, this is redundant with R CMD check, and is not #' recommended in routine practice. #' #' @keywords internal #' @param path For `test_examples()`, path to directory containing Rd files. #' For `test_example()`, path to a single Rd file. Remember the working #' directory for tests is `tests/testthat`. #' @param title Test title to use #' @param rd A parsed Rd object, obtained from [tools::Rd_db()] or otherwise. #' @export test_examples <- function(path = "../..") { res <- test_examples_source(path) %||% test_examples_installed() if (is.null(res)) { stop("Could not find examples", call. = FALSE) } invisible(res) } test_examples_source <- function(path = "../..") { if (!dir.exists(file.path(path, "man"))) { return() } Rd <- tools::Rd_db(dir = path) if (length(Rd) == 0) { return() } lapply(Rd, test_rd) } test_examples_installed <- function(package = testing_package()) { if (identical(package, "") || is.null(package)) { return() } Rd <- tools::Rd_db(package = package) if (length(Rd) == 0) { return() } lapply(Rd, test_rd) } #' @export #' @rdname test_examples test_rd <- function(rd, title = attr(rd, "Rdfile")) { test_example(rd, title) } #' @export #' @rdname test_examples test_example <- function(path, title = path) { ex_path <- tempfile(fileext = ".R") tools::Rd2ex(path, ex_path) if (!file.exists(ex_path)) { return(invisible(FALSE)) } env <- new.env(parent = globalenv()) ok <- test_code( test = title, code = parse(ex_path, encoding = "UTF-8"), env = env, default_reporter = StopReporter$new(), skip_on_empty = FALSE ) if (ok) succeed(path) invisible(ok) }
e0d802afb34fc54591c93a3cd009404e942c7434
ab6987f85e3524dc0da16f376aea9ced88967722
/plot3.R
09badc8f7e8bf3e7847eee12ae9cea11e6fb90fc
[]
no_license
ddgthub/ExData_Plotting1
346d907104c0e8e63a33c5687eba16e5390e950c
e884308fcc3aafd53fa691d435cb0ab8ebfb9d71
refs/heads/master
2021-01-18T17:40:24.884666
2014-06-07T14:56:42
2014-06-07T14:56:42
null
0
0
null
null
null
null
UTF-8
R
false
false
2,331
r
plot3.R
################################################################### #### Exploratory Data Analysis - Coursera - Project 1 - Plot 3 #### ################################################################### ## Check if there is a raw data file and download it if necessary if(!file.exists("raw_data.zip")) { print("Downloading raw data, please wait as it may take a while depending on your connection... :)") url<-"https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip" download.file(url,"raw_data.zip", method = "curl", mode = "wb") } ## Unzip the raw data file if the text file is not there if(!file.exists("household_power_consumption.txt")) { print("Unzipping the raw data, please wait...") unzip("raw_data.zip") } ## Read the raw data and grep away only the dates we need print("Reading the raw data, please wait...") if(!file.exists("filtered.txt")) { ## Credit to Andreas from discussion forums for the grep part :) rawfile <- file("household_power_consumption.txt", "r") cat(grep("(^Date)|(^[1|2]/2/2007)",readLines(rawfile), value=TRUE), sep="\n", file="filtered.txt") ## Remove raw unfiltered data from the environment close(rawfile) } ## Building the dataframe from filtered file elepc <- read.table("filtered.txt", header = TRUE, colClasses = "character", sep = ";", na.strings="?") ## Reverting Sub_metering_x to numeric sm1 <- as.numeric(elepc$Sub_metering_1) sm2 <- as.numeric(elepc$Sub_metering_2) sm3 <- as.numeric(elepc$Sub_metering_3) ## Formating date and time dat <- strptime(paste(elepc$Date, elepc$Time), format="%d/%m/%Y %H:%M:%S") ## Setting the locale to ENG Sys.setlocale("LC_TIME", "C") ## Plotting - No need to specify dimensions as 480x480px is default; ## Intentionally left background color white (also default) for legibility ## For my eyes 'antialias = "cleartype"' renders the font slightly better on PC print("Graph building, please wait...") png("plot3.png", antialias = "cleartype") plot(dat, sm1, type = "n", xlab = "", ylab = "Energy sub metering", lwd = 1) lines(dat, sm1, col = "black") lines(dat, sm2, col = "red") lines(dat, sm3, col = "blue") legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), col=c("black","red","blue"), lwd = 1) ## Closing the device dev.off()
227068be6bb1704594015ed3ec209853eb9f5db0
e5a9fb540ba9c62b9cf34999b17a0cdf4702c222
/makeskel.R
3b77d94dace748482b3343175f019ff56e76e612
[]
no_license
ubiquitousidea/stat662
1e13065a71151dada863c7355fdf6573332f95e7
1404f3803f18c36b04f12df1a33f5e5930eb7028
refs/heads/master
2021-05-03T22:24:21.087166
2018-02-20T04:25:54
2018-02-20T04:25:54
120,393,249
0
1
null
null
null
null
UTF-8
R
false
false
216
r
makeskel.R
# build the package skeleton library(roxygen2) code_file = "displaytimes.R" source(code_file) package.skeleton("CourseCollider", code_files = code_file, force = TRUE) roxygenise()
e45b5ec3fe2a2e636f47cc5c87549c673484ba87
a2175f2f375199359aca44d5e3b6815921d74f01
/man/cookie_server.Rd
d1a73d14e1d0fea1b23ffe55c4d0455650ce027d
[]
no_license
djfunksalot/cognitoR
f4d1d23b7634091f4ade5ba2a3a59fb331adc9ce
b455c49baa6da8a470fb42107a5c493ab9d85240
refs/heads/master
2022-12-13T07:13:19.009142
2020-08-21T00:47:54
2020-08-21T00:47:54
297,429,634
1
0
null
2020-09-21T18:40:12
2020-09-21T18:40:11
null
UTF-8
R
false
true
953
rd
cookie_server.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/cookie_server.R \name{cookie_server} \alias{cookie_server} \title{Cookie Shiny Module Server} \usage{ cookie_server(input, output, session, cookie_name = "user", cookie_expire = 7) } \arguments{ \item{input}{shiny input} \item{output}{shiny output} \item{session}{shiny session} \item{cookie_name}{character - Name of cookie to create} \item{cookie_expire}{numeric - Expiration timeof cookie} } \value{ list of reactive element and function to manage cookies } \description{ This server module returns a list with methods to interact with cookie created via JS. You have: } \details{ \itemize{ \item getCookie - Reactive function, returns the content of cookie if exist. Else return FALSE. \item setCookie - Sets the content for a cookie. (Required param: list()) \item rmCookie - Removes a cookie } } \examples{ cookie_server(NULL,NULL,NULL) } \author{ Pablo Pagnone }
e5aca371a5bf9ef17c20824429e22da6aeb0156d
70dd6507c11fa1ecabe84d781e56dbd4ee93097b
/R/get_modern_co2.R
776ca4b48541e5971629cc7bc6d17243eaa74d68
[]
no_license
jrpriceUPS/Math160UPS
4604d17f810be65ccd1d2cd03e294da9c345ce4f
d0ff7cec6eda8d8da860d7d4b8d6c6fe9185683d
refs/heads/master
2023-06-09T11:55:19.386088
2023-06-08T18:11:25
2023-06-08T18:11:25
190,257,509
0
0
null
null
null
null
UTF-8
R
false
false
339
r
get_modern_co2.R
#' Get Modern CO2 #' #' Loads modern CO2 data from website (Penny Rowe - Polar Project) #' @export get_modern_co2 <- function() { # .. Get the modern CO2 data from the NOAA website website_co2 <- "ftp://aftp.cmdl.noaa.gov/products/trends/co2/co2_mm_mlo.txt" modern_co2 <- try_read_url(website_co2, read_co2_url, modern_co2_backup) }
0ee9d66b3d5b3261f3858b397b1dfce37128a81f
15d417ccfacf20314589ed086c0b8daa2245cc1f
/libxti/t_listen/man.r
fa5627620f90d0b07b7cfeb0209cecf6897532e9
[]
no_license
paulohrpinheiro/tropix-libs
17e7faf0715b104fbf6f305074de76bd1cf08fc5
c41d33a6f95064ec6c0567a801048896be28c626
refs/heads/master
2021-12-03T19:21:09.621584
2014-11-06T17:47:26
2014-11-06T17:47:26
null
0
0
null
null
null
null
ISO-8859-1
R
false
false
3,520
r
man.r
.bp .he 'T_LISTEN (xti)'TROPIX: Manual de Referência'T_LISTEN (xti)' .fo 'Escrito em 27.07.92'Versão 3.0'Pag. %' .b NOME .in 5 .wo "t_listen -" recebe pedidos de conexão .br .in .sp .b SINTAXE .in 5 .(l #include <xti.h> int t_listen (int fd, T_CALL *call); .)l .in .sp .b DESCRIÇÃO .in 5 A função "t_listen" recebe pedidos de conexão de usuários remotos. .sp .(l +--------------------+------------+------------+ | Argumentos | Na chamada | No retorno | +--------------------+------------+------------+ | fd | x | / | | | | | | call->addr.maxlen | x | / | | call->addr.len | / | x | | call->addr.buf | x | (x) | | | | | | call->opt.maxlen | x | / | | call->opt.len | / | x | | call->opt.buf | x | (x) | | | | | | call->udata.maxlen | x | / | | call->udata.len | / | x | | call->udata.buf | x | (?) | | | | | | call->sequence | / | x | +--------------------+------------+------------+ .)l .sp O argumento "fd" indica o "endpoint" local que receberá os pedidos de conexão, e no retorno o argumento "call" conterá informações descrevendo o pedido recebido. O argumento "call" deve apontar para uma estrutura "T_CALL" que contém os seguintes membros: .sp .(l NETBUF addr; NETBUF opt; NETBUF udata; int sequence; .)l .sp No retorno, "addr" contém o endereço ("port" e endereço INTERNET) do usuário remoto que enviou o pedido, "opt" contém parâmetros específicos do protocolo associados à conexão requisitada, "udata" contém dados recebidos juntamente com o pedido de conexão, e "sequence" é um número que identifica univocamente o pedido de conexão. O valor de "sequence" possibilita o usuário a receber vários pedidos de conexão antes de responder a qualquer um deles. .sp Como esta função retorna valores nos campos "addr", "opt" e "udata" da estrutura "call", o campo "maxlen" de cada um deles deve ser inicializado antes da chamada à função, para indicar o tamanho disponível de cada uma das áreas. .sp Normalmente, "t_listen" executa no modo síncrono e espera a chegada de um pedido de conexão antes de retornar. No entanto, se o indicador O_NONBLOCK foi ligado (através de "t_open" ou "fcntl" (sys)), "t_listen" executa no modo assíncrono, apenas testando a presença de um pedido de conexão. Se nenhum está presente, retorna -1 e atribui o valor [TNODATA] a "t_errno". .in .sp .b 'DETALHES DE IMPLEMENTAÇÃO' .in 5 Esta implementação NÃO suporta a negociação de parâmetros específicos de protocolo nem a transmissão de dados durante o estabelecimento da conexão; portanto, no retorno "call->udata.len" e "call->opt.len" serão sempre NULOs. .in .sp .b VALOR DE RETORNO .r .in 5 Em caso de sucesso a função devolve o valor 0. Em caso contrário, devolve -1 e indica em "t_errno" a causa do erro. .in .sp .b VEJA TAMBÉM .r .in 5 .wo "(sys): " fcntl .br .wo "(xti): " t_accept, t_alloc, t_bind, t_connect, t_open, .br .wo " t_optmgmt, t_rcvconnect" .br .in .sp .(t .b REFERÊNCIAS .r .in 5 "X/OPEN Portability Guide: Networking Services", X/OPEN Company Ltd., Prentice Hall, New Jersey, 1st Edition, 1988. .)t .in .sp .b ESTADO .in 5 Efetivo. .in
56bd6905df2969ef7d963133cc2bf684595aec2e
525f2d1895f11e0e7ed15595f94d4e625c9e1c4d
/tests/testthat/test_OEIS_check.R
bcb95231524415795ece55cf96796d259448d715
[ "MIT" ]
permissive
EnriquePH/OEIS.R
fc391284392703905154a148992e25b9dd06c389
e9ad797dbd1e1f1529ce92dd5db085512e12a6b6
refs/heads/master
2023-03-17T05:48:45.987359
2023-03-12T13:28:38
2023-03-12T13:28:38
91,134,219
13
3
null
null
null
null
UTF-8
R
false
false
353
r
test_OEIS_check.R
id_error <- "A002000Z" id_ok <- "A002000" test_that("OEIS_check shows an error", { testthat::expect_error(OEIS_check(id_error)) }) test_that("OEIS_check works", { testthat::expect_error(OEIS_check(id_ok), NA) }) test_that("OEIS_check returns an object of the class OEIS_ID", { testthat::expect_identical(class(OEIS_check(id_ok)), "OEIS_ID") })
8ae2d201d2680b151ab762fbd91dbf891dc1cdb1
a9c6f8ceabbaa9e49708d18d3495e4a88a5800bd
/bioinfo.practice/man/reexports.Rd
00ea69dc79d1ee649b5b2722e83be5cb9f5dc1e4
[]
no_license
ajayarunachalam/course_bioinfo_training
20cf9bdbae0e947fcc65fc5faa08b8c0aa5e09e8
5f04bf80d3b158061c97d24fc7b82e6e69621144
refs/heads/master
2022-03-21T03:10:12.263617
2019-10-01T06:02:18
2019-10-01T06:02:18
null
0
0
null
null
null
null
UTF-8
R
false
true
462
rd
reexports.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/fasta.R \docType{import} \name{reexports} \alias{reexports} \alias{read.fasta} \alias{base.freq} \title{Objects exported from other packages} \keyword{internal} \description{ These objects are imported from other packages. Follow the links below to see their documentation. \describe{ \item{ape}{\code{\link[ape]{base.freq}}} \item{treeio}{\code{\link[treeio]{read.fasta}}} }}
38e0f25a05f0a58b2bf5091bada1df92e122ea1e
6388ffa8fb0df6e4b9ab2e5a977dda03abd6dd2a
/QuizWeek3.R
23a471c4e931f14cf27d08dc356c96443a74ed58
[]
no_license
harbutler/datasciencecoursera
e240a3c0e9640a54a3b921a40858d920f7e746a1
45bbbfa2ccc0f93a26f62335fea010fe40368053
refs/heads/master
2021-01-15T14:32:00.402611
2014-09-21T21:17:40
2014-09-21T21:17:40
null
0
0
null
null
null
null
UTF-8
R
false
false
740
r
QuizWeek3.R
library(datasets) data(iris) ?iris #mean of Sepal.Length for species virginca s <- split(iris, iris$Species) mean(s$virginica$Sepal.Length) lapply(s$virginica,mean, na.rm = TRUE) #vector of the means of the variables 'Sepal.Length', 'Sepal.Width', 'Petal.Length', and 'Petal.Width' apply(iris[,1:4], 2, mean) library(datasets) data(mtcars) ?mtcars # average miles per gallon (mpg) by number of cylinders in the car (cyl)? sapply(split(mtcars$mpg, mtcars$cyl), mean) # average horsepower (hp) by number of cylinders in the car (cyl)? avghp <- sapply(split(mtcars$hp, mtcars$cyl), mean) # absolute difference between the average horsepower of 4-cylinder cars and the # average horsepower of 8-cylinder cars? avghp[["8"]] - avghp[["4"]]
617ff9387a9a4dfa18d522cfc62cff033f5a8011
65938f7359b84cb41f0d1d5dc1ee5964bc7f6147
/Algorithms/Regression/MultiLinear Regression/Computer Data/Computer_MultiLinear.R
96a23f3eb28133fca79fb83e0794191f7287c046
[]
no_license
Rashmi1404/Data-Science-R-code
d9565ad1d0fe62dfd25380b9367dd5cff17a769b
d7e1120569ba194bab7af947993e87c43eff6ef6
refs/heads/master
2020-12-29T14:52:21.155154
2020-08-07T15:17:16
2020-08-07T15:17:16
238,642,421
1
0
null
null
null
null
UTF-8
R
false
false
2,201
r
Computer_MultiLinear.R
#Multi Linear Regression #Predict Price of the computer library(ggplot2) #Lets Import the Dataset pc <- read.csv("C:/Users/Rashmi/Desktop/Data Science/ASSIGNMENTS/multilinear/Computer_Data.csv") pc <- pc[,-1] #Here we Remove the 1st Column as it contains Id values attach(pc) head(pc) str(pc) cor(pc[,-c(6,7,8,10)]) #Accepts Only Numeric Variable tail(pc) dim(pc) #Returns the total Number of rows and Column names(pc) #Gives the Column names of the Dataset summary(pc) #Gives the Summary of the Dataset #Standard Deviation sd(price) sd(speed) sd(hd) sd(ram) sd(screen) #Variance var(price) var(speed) var(hd) var(ram) var(screen) windows() pairs(pc) #We have to convert the columns cd, multi, premium as factors cd <- as.factor(cd) multi<- as.factor(multi) premium <- as.factor(premium) #Plots ggplot(pc) +geom_histogram(aes(hd), binwidth =50, fill = "purple") #the data in this column is right skewed ggplot(pc) + geom_histogram(aes(trend),binwidth = 0.5 ,fill = "black") + xlab("Trend") #This data is Normally Distributed ggplot(pc) + geom_histogram(aes(ram),binwidth = 0.9 ,fill = "black") + xlab("ram") ggplot(pc) + geom_histogram(aes(price), fill = "green", binwidth = 50) + xlab("Price") #Lets Build a Multi Linear Model model <- lm(price~. , data = pc) summary(model) #The r-Square value of this model is 0.7778 with Residual Standard Error as 275.3 model_rmse <- sqrt(mean(( Profit - model$fit )^2, na.rm = T)) model_rmse #Lets do some Data Transformation and build a better Model #Log Transformation log_model <- lm(log(price)~. , data = pc) summary(log_model) #Here we got the R-Square value as 0.7832 with the least Residual Standard Error as 0.1202 log_model_rmse <- sqrt(mean((price - exp(log_model$fit))^2, na.rm = T)) log_model_rmse # SQRT model sqrt_model <- lm(price~. , data = pc) summary(sqrt_model) sqrt_model_rmse <- sqrt(mean((price - sqrt_model$fitted.values)^2)) sqrt_model_rmse #Lets make RMSE table rmse_table <- data.frame("Model Name" = c("Multi Linear Model", "Log_Model","sqrt_model"), "RMSE" = c(model_rmse, log_model_rmse,sqrt_model_rmse)) rmse_table #Here we Conclude that Log_model has the Least RMSE value so it can be used for Prediction
87fbe41c23cb4b6c81a4ca7c9ed8ead2c392ee73
326a2d966d5f46b463e6a5771e12157aa4d4ae31
/R/gen.graph.R
02d50aef59b9861d924b76f9d8e3e3f7595f007c
[]
no_license
frenchja/SAPATools
7abcbb27c5b238ecd523e55065bc1d49b6520431
513ce15101947773c09fe26ef17dd0c8f2363688
refs/heads/master
2020-06-05T00:39:30.210915
2015-02-19T04:28:40
2015-02-19T04:28:40
10,363,871
0
0
null
null
null
null
UTF-8
R
false
false
1,167
r
gen.graph.R
generate.scatter <- function(cor, n.obs = 500, add.text = FALSE, save = FALSE) { # Author: Jason A. French # Args: # cor = List of desired correlations # n.obs = Number of observations # add.text = Annotate correlation to graph # save = Save graph as PNG to current directory n.obs <- n.obs # Generate random correlation if not specified if(missing(cor)){ cor <- runif(n = 1, min = -1, max = 1) } lapply(X = cor,FUN = function(cor, ...){ # Generate matrix code modified from r-bloggers.com R = matrix(cbind(1,cor,cor,1),nrow=2) U = t(chol(R)) nvars = dim(U)[1] random.normal = matrix(rnorm(nvars*n.obs,0,1), nrow=nvars, ncol=n.obs); X = U %*% random.normal newX = t(X) raw = as.data.frame(newX) library(ggplot2) p <- ggplot(raw, aes(x = V1, y = V2)) + geom_point() if(isTRUE(add.text)){ cor.text <- paste('R = ',as.character(round(cor(raw)[2],2)),sep='') p <- p + annotate("text", x = 2, y = -1, label = cor.text,size=10) } print(cor(raw)[2]) if(isTRUE(save)){ ggsave(filename = paste(cor,'.png',sep=''),plot = p) } else { return(p) } }) }
a8db72c76d74d7b1a238b9c7b49e6cc737fee34a
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
/data/genthat_extracted_code/phenocamr/examples/daylength.Rd.R
06009f236d0450e4bac0e5a52269a9246c329e68
[]
no_license
surayaaramli/typeRrh
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
66e6996f31961bc8b9aafe1a6a6098327b66bf71
refs/heads/master
2023-05-05T04:05:31.617869
2019-04-25T22:10:06
2019-04-25T22:10:06
null
0
0
null
null
null
null
UTF-8
R
false
false
343
r
daylength.Rd.R
library(phenocamr) ### Name: daylength ### Title: Calculates day length (in hours) and the solar elevation ### Aliases: daylength ### Keywords: ephemerids solar, ### ** Examples ## No test: # calcualte the hours of sunlight and solar elevation on day of year 1 # and latitude 51 ephem <- daylength(1, 51) print(ephem) ## End(No test)
0d09db81713053468e0371b69ca60951e7690420
0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb
/cran/paws.compute/man/proton_get_template_sync_config.Rd
4330b7b05689c6c4d3303771ff539ead97330111
[ "Apache-2.0" ]
permissive
paws-r/paws
196d42a2b9aca0e551a51ea5e6f34daca739591b
a689da2aee079391e100060524f6b973130f4e40
refs/heads/main
2023-08-18T00:33:48.538539
2023-08-09T09:31:24
2023-08-09T09:31:24
154,419,943
293
45
NOASSERTION
2023-09-14T15:31:32
2018-10-24T01:28:47
R
UTF-8
R
false
true
608
rd
proton_get_template_sync_config.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/proton_operations.R \name{proton_get_template_sync_config} \alias{proton_get_template_sync_config} \title{Get detail data for a template sync configuration} \usage{ proton_get_template_sync_config(templateName, templateType) } \arguments{ \item{templateName}{[required] The template name.} \item{templateType}{[required] The template type.} } \description{ Get detail data for a template sync configuration. See \url{https://www.paws-r-sdk.com/docs/proton_get_template_sync_config/} for full documentation. } \keyword{internal}
fbf12a0b8327604460e56df8b9f402589a4778bf
9ded8c1e3116b174bcc4a00b0fdab700f2f9ce3c
/R/mixture.R
2715bbbd72f3e3feb6ec12cb6b263e7df2198d8c
[ "Apache-2.0" ]
permissive
njtierney/greta
3bcf8d69b86caf555aac11924f3a48a1232d178c
93aaf361c04591a0d20f73c25b6e6693023482fd
refs/heads/master
2023-04-29T03:07:48.845793
2022-09-09T02:37:39
2022-09-09T02:37:39
303,113,354
3
0
NOASSERTION
2023-04-17T02:08:28
2020-10-11T12:14:11
R
UTF-8
R
false
false
11,842
r
mixture.R
#' @name mixture #' @title mixtures of probability distributions #' #' @description `mixture` combines other probability distributions into a #' single mixture distribution, either over a variable, or for fixed data. #' #' @param ... variable greta arrays following probability distributions (see #' [distributions()]); the component distributions in a mixture #' distribution. #' #' @param weights a column vector or array of mixture weights, which must be #' positive, but need not sum to one. The first dimension must be the number #' of distributions, the remaining dimensions must either be 1 or match the #' distribution dimension. #' #' @param dim the dimensions of the greta array to be returned, either a scalar #' or a vector of positive integers. #' #' @details The `weights` are rescaled to sum to one along the first #' dimension, and are then used as the mixing weights of the distribution. #' I.e. the probability density is calculated as a weighted sum of the #' component probability distributions passed in via `\dots` #' #' The component probability distributions must all be either continuous or #' discrete, and must have the same dimensions. #' #' @export #' @examples #' \dontrun{ #' # a scalar variable following a strange bimodal distibution #' weights <- uniform(0, 1, dim = 3) #' a <- mixture(normal(-3, 0.5), #' normal(3, 0.5), #' normal(0, 3), #' weights = weights #' ) #' m <- model(a) #' plot(mcmc(m, n_samples = 500)) #' #' # simulate a mixture of poisson random variables and try to recover the #' # parameters with a Bayesian model #' x <- c( #' rpois(800, 3), #' rpois(200, 10) #' ) #' #' weights <- uniform(0, 1, dim = 2) #' rates <- normal(0, 10, truncation = c(0, Inf), dim = 2) #' distribution(x) <- mixture(poisson(rates[1]), #' poisson(rates[2]), #' weights = weights #' ) #' m <- model(rates) #' draws_rates <- mcmc(m, n_samples = 500) #' #' # check the mixing probabilities after fitting using calculate() #' # (you could also do this within the model) #' normalized_weights <- weights / sum(weights) #' draws_weights <- calculate(normalized_weights, draws_rates) #' #' # get the posterior means #' summary(draws_rates)$statistics[, "Mean"] #' summary(draws_weights)$statistics[, "Mean"] #' #' # weights can also be an array, giving different mixing weights #' # for each observation (first dimension must be number of components) #' dim <- c(5, 4) #' weights <- uniform(0, 1, dim = c(2, dim)) #' b <- mixture(normal(1, 1, dim = dim), #' normal(-1, 1, dim = dim), #' weights = weights #' ) #' } mixture <- function(..., weights, dim = NULL) { distrib("mixture", list(...), weights, dim) } mixture_distribution <- R6Class( "mixture_distribution", inherit = distribution_node, public = list( weights_is_log = FALSE, initialize = function(dots, weights, dim) { n_distributions <- length(dots) if (n_distributions < 2) { msg <- cli::format_error( c( "{.fun mixture} must be passed at least two distributions", "The number of distributions passed was: {.val {n_distributions}}" ) ) stop( msg, call. = FALSE ) } # check the dimensions of the variables in dots dim <- do.call(check_dims, c(dots, target_dim = dim)) weights <- as.greta_array(weights) weights_dim <- dim(weights) # use log weights if available if (has_representation(weights, "log")) { weights <- representation(weights, "log") self$weights_is_log <- TRUE } # weights should have n_distributions as the first dimension if (weights_dim[1] != n_distributions) { msg <- cli::format_error( c( "the first dimension of weights must be the number of \\ distributions in the mixture ({.val {n_distributions}})", "However it was {.val {weights_dim[1]}}" ) ) stop( msg, call. = FALSE ) } # drop a trailing 1 from dim, so user doesn't need to deal with it # Ugh, need to get rid of column vector thing soon. weights_extra_dim <- dim n_extra_dim <- length(weights_extra_dim) if (weights_extra_dim[n_extra_dim] == 1) { weights_extra_dim <- weights_extra_dim[-n_extra_dim] } # remainder should be 1 or match weights_extra_dim w_dim <- weights_dim[-1] dim_1 <- length(w_dim) == 1 && w_dim == 1 dim_same <- all(w_dim == weights_extra_dim) if (!(dim_1 | dim_same)) { msg <- cli::format_error( c( "the dimension of weights must be either \\ {.val {n_distributions}x1} or \\ {.val {n_distributions}x{paste(dim, collapse = 'x')}}", " but was {.val {paste(weights_dim, collapse = 'x')}}" ) ) stop( msg, call. = FALSE ) } dot_nodes <- lapply(dots, get_node) # get the distributions and strip away their variables distribs <- lapply(dot_nodes, member, "distribution") lapply(distribs, function(x) x$remove_target()) # also strip the distributions from the variables lapply(dot_nodes, function(x) x$distribution <- NULL) # check the distributions are all either discrete or continuous discrete <- vapply(distribs, member, "discrete", FUN.VALUE = logical(1)) if (!all(discrete) & !all(!discrete)) { msg <- cli::format_error( "cannot construct a mixture from a combination of discrete and \\ continuous distributions" ) stop( msg, call. = FALSE ) } # check the distributions are all either multivariate or univariate multivariate <- vapply(distribs, member, "multivariate", FUN.VALUE = logical(1) ) if (!all(multivariate) & !all(!multivariate)) { msg <- cli::format_error( "cannot construct a mixture from a combination of multivariate and \\ univariate distributions" ) stop( msg, call. = FALSE ) } # ensure the support and bounds of each of the distributions is the same truncations <- lapply(distribs, member, "truncation") bounds <- lapply(distribs, member, "bounds") truncated <- !vapply(truncations, is.null, logical(1)) supports <- bounds supports[truncated] <- truncations[truncated] n_supports <- length(unique(supports)) if (n_supports != 1) { supports_text <- vapply( X = unique(supports), FUN = paste, collapse = " to ", FUN.VALUE = character(1) ) msg <- cli::format_error( c( "component distributions must have the same support", "However the component distributions have different support:", "{.val {paste(supports_text, collapse = ' vs. ')}}" ) ) stop( msg, call. = FALSE ) } # get the maximal bounds for all component distributions bounds <- c( do.call(min, bounds), do.call(max, bounds) ) # if the support is smaller than this, treat the distribution as truncated support <- supports[[1]] if (identical(support, bounds)) { truncation <- NULL } else { truncation <- support } self$bounds <- support # for any discrete ones, tell them they are fixed super$initialize("mixture", dim, discrete = discrete[1], multivariate = multivariate[1] ) for (i in seq_len(n_distributions)) { self$add_parameter(distribs[[i]], glue::glue("distribution {i}"), shape_matches_output = FALSE ) } self$add_parameter(weights, "weights") }, create_target = function(truncation) { vble(self$bounds, dim = self$dim) }, tf_distrib = function(parameters, dag) { # get information from the *nodes* for component distributions, not the tf # objects passed in here # get tfp distributions, truncations, & bounds of component distributions distribution_nodes <- self$parameters[names(self$parameters) != "weights"] truncations <- lapply(distribution_nodes, member, "truncation") bounds <- lapply(distribution_nodes, member, "bounds") tfp_distributions <- lapply(distribution_nodes, dag$get_tfp_distribution) names(tfp_distributions) <- NULL weights <- parameters$weights # use log weights if available if (self$weights_is_log) { log_weights <- weights } else { log_weights <- tf$math$log(weights) } # normalise weights on log scale log_weights_sum <- tf$reduce_logsumexp( log_weights, axis = 1L, keepdims = TRUE ) log_weights <- log_weights - log_weights_sum log_prob <- function(x) { # get component densities in an array log_probs <- mapply( dag$tf_evaluate_density, tfp_distribution = tfp_distributions, truncation = truncations, bounds = bounds, MoreArgs = list(tf_target = x), SIMPLIFY = FALSE ) log_probs_arr <- tf$stack(log_probs, 1L) # massage log_weights into the same shape as log_probs_arr dim_weights <- dim(log_weights) extra_dims <- unlist(dim(log_probs_arr)[-seq_along(dim_weights)]) for (dim in extra_dims) { ndim <- length(dim(log_weights)) log_weights <- tf$expand_dims(log_weights, ndim) if (dim > 1L) { tiling <- c(rep(1L, ndim), dim) tf_tiling <- tf$constant(tiling, shape = list(ndim + 1)) log_weights <- tf$tile(log_weights, tf_tiling) } } # do elementwise addition, then collapse along the mixture dimension log_probs_weighted_arr <- log_probs_arr + log_weights tf$reduce_logsumexp(log_probs_weighted_arr, axis = 1L) } sample <- function(seed) { # draw samples from each component samples <- lapply(distribution_nodes, dag$draw_sample) names(samples) <- NULL ndim <- length(self$dim) # in univariate case, tile log_weights to match dim, so each element can # be selected independently (otherwise each row is kept together) log_weights <- tf$squeeze(log_weights, 2L) if (!self$multivariate) { for (i in seq_len(ndim)) { log_weights <- tf$expand_dims(log_weights, 1L) } log_weights <- tf$tile(log_weights, c(1L, self$dim, 1L)) } # for each observation, select a random component to sample from cat <- tfp$distributions$Categorical(logits = log_weights) indices <- cat$sample(seed = seed) # how many dimensions to consider a batch differs beetween multivariate # and univariate collapse_axis <- ndim + 1L n_batches <- ifelse(self$multivariate, 1L, collapse_axis) # combine the random components on an extra last dimension samples_padded <- lapply(samples, tf$expand_dims, axis = collapse_axis) samples_array <- tf$concat(samples_padded, axis = collapse_axis) # extract the relevant component indices <- tf$expand_dims(indices, n_batches) draws <- tf$gather(samples_array, indices, axis = collapse_axis, batch_dims = n_batches ) draws <- tf$squeeze(draws, collapse_axis) draws } list(log_prob = log_prob, sample = sample) } ) ) mixture_module <- module(mixture_distribution = mixture_distribution)
f3a69931ce9edba0a51e165dccfcbdd1b1b14a0a
320ddefc84d992475db8b83befde46e2b780314f
/R/Kullback.R
bee304319ea1e80ee9721cd92276edb3aa9d06c2
[]
no_license
cran/asbio
bca146e402058cd67ff5fc42423cb0c0544f942b
3cb01b7cb1a8dec60a5f809f91bc460a6566954d
refs/heads/master
2023-08-31T23:43:08.304864
2023-08-20T02:22:36
2023-08-20T04:30:48
17,694,492
3
2
null
null
null
null
UTF-8
R
false
false
1,239
r
Kullback.R
Kullback <- function(Y,X) { X <- factor(X) n <- length(X) # n.j <- as.matrix(summary(X)) n.j <- summary(X) p <- ncol(Y) g <- nlevels(X) lev.X <- levels(X) # # Vj will contain the within-group dispersion matrices of individual groups Vj <- vector("list", g) log.ratios <- NA det.V.j <- NA # Compute |V| V <- matrix(0,p,p) for(k in 1:g) V <- V + cov(Y[X==lev.X[k],])*(n.j[k]-1) V <- V/(n-g) det.V <- det(V) # Compute Kulback chi-square Chi <- 0 for(k in 1:g) { V.group <- cov(Y[X==lev.X[k],]) det.V.gr <- det(V.group) log.rat <- log(det.V/det.V.gr) Chi <- Chi + log.rat*((n.j[k]-1)/2) Vj[k] <- list(V.group) log.ratios <- c(log.ratios, log.rat) det.V.gr <- c(det.V.j, det.V.gr) } # p.val <- pchisq(Chi,(g-1)*p*(p+1)/2,lower.tail=FALSE) res <- data.frame(Chi=Chi,df=(g-1)*p*(p+1)/2,p.val=p.val) names(res) <- c("Chi*","df","P(Chi>Chi*)") head<-"Kullback test for equal covariance matrices" out <- list(Kullback=res, V=V, Vj=Vj, log.ratios=log.ratios[-1], det.V=det.V, det.V.j=det.V.j[-1], head=head) class(out)<-"kback" out } print.kback<-function(x,digits= max(3, getOption("digits")), ...){ cat("\n") cat(x$head,"\n") rq<-x$Kullback print(rq,digits=digits) cat("\n") invisible(x) }
0c445963e06693d63510ee184e5d2a4b1c61df6a
fea3aca116112b1f7f0fbad2781c01d2edef213f
/libs/r/tests/testthat/test_text_fragment_dto.R
bf81a8e9895b25dd58d02765db4474c810f7f9ce
[ "MIT" ]
permissive
Scripta-Qumranica-Electronica/SQE_API_Connectors
a708c0d6f185a047bb1d47f7099abe0f2155171a
aaa9b9eb8709d4257c32ea57321a179c6b1e041a
refs/heads/main
2022-12-28T00:19:32.576336
2020-10-14T09:26:48
2020-10-14T09:26:48
302,660,443
0
0
null
null
null
null
UTF-8
R
false
false
960
r
test_text_fragment_dto.R
# Automatically generated by openapi-generator (https://openapi-generator.tech) # Please update as you see appropriate context("Test TextFragmentDTO") model.instance <- TextFragmentDTO$new() test_that("textFragmentId", { # tests for the property `textFragmentId` (integer) # uncomment below to test the property #expect_equal(model.instance$`textFragmentId`, "EXPECTED_RESULT") }) test_that("textFragmentName", { # tests for the property `textFragmentName` (character) # uncomment below to test the property #expect_equal(model.instance$`textFragmentName`, "EXPECTED_RESULT") }) test_that("editorId", { # tests for the property `editorId` (integer) # uncomment below to test the property #expect_equal(model.instance$`editorId`, "EXPECTED_RESULT") }) test_that("lines", { # tests for the property `lines` (array[LineDTO]) # uncomment below to test the property #expect_equal(model.instance$`lines`, "EXPECTED_RESULT") })
371e08c6772cf12c22144ac5fb716d4cc69848de
24739a6f3be1ea9e4fc14b473a1f870de86fea8f
/KMR0218/main3.R
7380464bf251bbbdefd78c71715027c7dbfec4c9
[]
no_license
IrisTeng/CVEK-boot
46483139bba153004c4f9a8c514d152004802be1
e6a7defa34313ce6370137cdcdecfe6f37d94c89
refs/heads/master
2021-07-06T03:09:24.542339
2019-04-01T19:52:14
2019-04-01T19:52:14
114,708,834
3
1
null
2017-12-25T04:54:17
2017-12-19T02:18:57
R
UTF-8
R
false
false
3,349
r
main3.R
library(mvtnorm) library(MASS) library(snowfall) library(psych) source('./source/KernelGenerate.R') source('./source/LooCV.R') source('./source/gpr.R') source('./source/util.R') verify <- function(temptemp){ rawData <- data rawData[, 1] <- rawData[, 1] + rnorm(n, 0, 0.01) res <- KernelBoot(formula, label.names, data = rawData, method = method, l=l, lambda = exp(seq(-5, 5, 1)), B = B) #gaussian.pvalue <- res[[1]] result.test <- res[[2]] bs.test <- result.test[1, ] perturb.test <- result.test[2, ] original.test <- res[[3]] return(c(original.test,bs.test,perturb.test)) } # produce plot similar to the CVEK paper. # (x-axis: interaction effect size, y-axis: ratio of p-value smaller than 0.05). # vary the interaction effect size between seq(0, 0.1, 0.02), # vary the effect size multiplication factor within (1, 3, 5, 10), # vary the noise level within (0.05, 0.1, 0.25, 0.5, 1). # Keep bootstrap sample size at 200. Keep b11 = 2 (lower bound of effect size). # number of repetition M fix at 1000. # Repeat such experiment for linear and ridge regression. sfInit(parallel = T, cpus = 20) sfLibrary(mvtnorm) sfLibrary(MASS) sfLibrary(psych) sfSource('./source/KernelGenerate.R') sfSource('./source/LooCV.R') sfSource('./source/gpr.R') sfSource('./source/util.R') formula <- Y ~ X1 + X2 label.names <- list(X1 = c("x1", "x2"), X2 = c("x3", "x4")) result <- NULL for(n in c(50,100,200,300,400,500)){ for (M in c(100)){ for (method in c("quadratic")){ for(int.effect in 0){ for(l in 1){ for (B in c(100)){ l <- 1 X1 <- rmvnorm(n, mean = rep(0, length(label.names[[1]])), sigma = diag(length(label.names[[1]]))) X2 <- rmvnorm(n, mean = rep(0, length(label.names[[2]])), sigma = diag(length(label.names[[2]]))) Kern <- KernelGenerate(method, l = 1) w1 <- rnorm(n) w2 <- w1 K1 <- Kern(X1, X1) K2 <- Kern(X2, X2) h0 <- K1 %*% w1 + K2 %*% w2 h0 <- h0 / sqrt(sum(h0 ^ 2)) bias <- rnorm(1) Y <- h0 + bias + rnorm(n, 0, 0.01) data<- as.data.frame(cbind(Y, X1, X2)) colnames(data) <- c("Y", label.names[[1]], label.names[[2]]) sfExport("formula", "label.names", "int.effect", "n", "B", "M", "l", "method", "KernelBoot", "OriginalData2", "GenericFormula", "gpr", "MultiScore2", "data") system.time(res <- sfSapply(1 : M, verify)) write.table(t(res), file = "quadratic_stat.txt", row.names = F, col.names = F, append = T) #res2 <- apply(res, 1, function(x){sum(x < 0.05) / M}) #result <- rbind(result, c(int.effect, l, method, n, res2)) #cat(c(int.effect, l, method, n, res2), # file = "simulation_power.txt", append = T, "\n") cat("Finished:interaction effect size=", int.effect, "n=", n, "l=", l, "\n") } } } } } } #write.csv(result, file = "simulation_power.csv", row.names = F, quote = F)
99d488dcfb00e24193623766a3e14dc29381d669
0a906cf8b1b7da2aea87de958e3662870df49727
/borrowr/inst/testfiles/matchesToCor/libFuzzer_matchesToCor/matchesToCor_valgrind_files/1609958462-test.R
6ced99da30c9fb48dc13a30f4fd3a1def0407bf3
[]
no_license
akhikolla/updated-only-Issues
a85c887f0e1aae8a8dc358717d55b21678d04660
7d74489dfc7ddfec3955ae7891f15e920cad2e0c
refs/heads/master
2023-04-13T08:22:15.699449
2021-04-21T16:25:35
2021-04-21T16:25:35
360,232,775
0
0
null
null
null
null
UTF-8
R
false
false
244
r
1609958462-test.R
testlist <- list(x = structure(c(1.25160412001172e-308, -Inf, -Inf, 1.25214515436931e-308, 8.27634313663787e-317, 2.70270605950211e+257, 1.02078771303499e+258 ), .Dim = c(7L, 1L))) result <- do.call(borrowr:::matchesToCor,testlist) str(result)
964b1cea8415026529f715d0223d47fc3cf65806
9b13fa9aa2ea7ddb9f3bf5900b322d06771d2932
/Q8.R
94bac76123c4e0d61066659e5aaf9e8f95cd5c1f
[]
no_license
driegert/stat854project
405eabaa5011c898ff6bf245772d5c111a65cd95
6004fd28f22fb271fccee0676ee5a3edb2c86f3e
refs/heads/master
2021-01-23T07:16:05.664165
2012-05-29T14:41:13
2012-05-29T14:41:13
null
0
0
null
null
null
null
UTF-8
R
false
false
507
r
Q8.R
# Question 8 ############# # This function ONLY works for an AR2 ... # I probably should have generalized it. Q8 <- function(y.t, phi){ N <- length(y.t) M <- 2^(floor(log2(N)) + 2) est <- mt( y.t, N = N, NW = 10, K = 20 ) f <- seq(0, 1/(2*10), 1/(10*M)) corr <- 1 - (2*phi[1]*cos(2*pi*f)) - (2*phi[2]*cos(4*pi*f)) + (phi[1])^2 + (phi[2])^2 + (2*phi[1]*phi[2]*cos(2*pi*f)) spec.x <- est$data.mt / corr H <- est$data.mt / spec.x list(freq=f, res.spec=est, data.corr=spec.x, H=H) }
406e6eeeec51b4c07a134c20e040eb0dd23a1953
a4909a7ff2801276c55b597f117c3afad6422424
/2011MC04_Assignment_5/Q2.1.R
898432385bcda8ac012f6295adcbb69f1d2e99b0
[]
no_license
Avinash531997/Simulation-with-R-
bc962aa91b66ab677773e8a400108d9d9364f106
86cc5284103d205e5ed9ff68e39ff6771e05a3d1
refs/heads/main
2023-04-01T23:04:17.908538
2021-04-03T17:34:55
2021-04-03T17:34:55
342,793,433
0
0
null
null
null
null
UTF-8
R
false
false
861
r
Q2.1.R
#2011MC04 #Avinash Singh n = 5000 x =numeric(0) X1 =numeric(0) X2 =numeric(0) Sample_Generation <- function(){ for(i in 1:n){ u = runif(1) x[i] <- u } return(x) } Generated_Data1 <- Sample_Generation() options(max.print=999999) print(Generated_Data1) Generated_Data2 <- Sample_Generation() options(max.print=999999) print(Generated_Data2) for(i in 1:length(Generated_Data1)) { X1[i]<- ((-2*log(Generated_Data1[i],base=exp(1)))**0.5)*cos(2*3.14*Generated_Data2[i]) X2[i]<- ((-2*log(Generated_Data1[i],base=exp(1)))**0.5)*sin(2*3.14*Generated_Data2[i]) } X1 X2 Y1 = dnorm(X1, mean(X1), sd(X1)) # Plot the graph. plot(X1, Y1) Y2 = dnorm(X2, mean(X2), sd(X2)) # Plot the graph. plot(X2, Y2) #Sample Mean and Variance mean(X1) var(X1) #Sample Mean and Variance mean(X2) var(X2)
2d24a7e2f0161aad733fe3272c805a4e1d12aa00
5dba9ec7bfa32fd4b2c0e24dd2a00e1663e75429
/data/functions/find_vertex_based_on_centroid_function.R
a33dfc039368a0dc63952ac461dd558fa1ead774
[]
no_license
JRunnebaum/habitat_overlap
2a0086d15062e3b3647e3be0e37d14d1676a39f3
de764e7c8d6b03ebfc2dd0f1158765cfc48f96b6
refs/heads/master
2022-03-12T14:35:24.860140
2019-11-11T13:18:33
2019-11-11T13:18:33
175,312,771
0
0
null
null
null
null
UTF-8
R
false
false
892
r
find_vertex_based_on_centroid_function.R
#find the corners of the grid find_vertex_based_on_centroid = function(x, lon.length = 0.05, lat.width = 0.05) { #this function is only for finding vertex of a square based on its centroid #x, longitude/latitude of the center points, #which must be a matrix or data frame of 2 columns(first one is lon, second is lat) #lon.length and lat.width are the length and width of the square vertex.dir = matrix(c(1, 1, 1, -1, -1, -1, -1, 1, 1, 1), nr = 5, byrow = T) vertex.loc = c(lon.length, lat.width) * vertex.dir / 2 vertex.coors = as.matrix(x) + matrix(vertex.loc[1,], nr = dim(x)[1], nc = 2, byrow = T) for (l in 2:5){ Temp = x + matrix(vertex.loc[l,], nr = dim(x)[1], nc = 2, byrow = T) vertex.coors = cbind(vertex.coors, Temp) } colnames(vertex.coors) = c(paste0("vertex", rep(1:5, each = 2), rep(c("Lon", "Lat"), times = 5))) return(as.matrix(vertex.coors)) }
9e92aefaad2adb7706b44c32b2ac56808fed1330
b7ea8c0a96aedca03fd24d0548d3a0274db50fc4
/affair.R
99aba28e217c354bfc57589edce2a0b35ea74d51
[]
no_license
aksaannamathew/Logistic-
49a56221c27bda0c6cd2184475e28efca545f419
5fe765aa8346d8ad3bdad4031032fea1a3d3941e
refs/heads/master
2022-12-26T09:29:46.436473
2020-09-03T00:03:29
2020-09-03T00:03:29
276,342,059
0
1
null
2020-10-01T05:24:01
2020-07-01T10:00:56
R
UTF-8
R
false
false
2,429
r
affair.R
install.packages("readr") library(readr) install.packages("ROSE") library(ROSE) install.packages("ROCR") library(ROCR) aff <- read.csv("C:\\Users\\91755\\Desktop\\Assignment\\8 - Logistic\\Logistic regression\\affairs.csv") attach(aff) View(aff) sum(is.na(aff$affairs)) #no missing values str(aff) summary(aff) #Creating Dummy variable aff['Gend'] <- as.integer(factor(aff$gender, levels = c("male", "female"), labels = c(1, 2))) aff['Child'] <- as.integer(factor(aff$children, levels = c("yes", "no"), labels = c(1, 2))) View(aff) aff["aff.air"] <- ifelse(aff$affairs>0, '1', '0') aff <- subset(aff, select = -c(affairs, gender, children)) attach(aff) View(aff) table(aff$aff.air) prop.table(table(aff$aff.air)) #Imbalanced data aff_balanced_over <- ovun.sample(aff.air~., data = aff, method = "over", N=902)$data table(aff_balanced_over$aff.air) aff_balanced_under <- ovun.sample(aff.air~., data = aff, method = "under", N=300, seed = 1)$data table(aff_balanced_under$aff.air) data_balanced_both <- ovun.sample(aff.air~., data = aff, method = "both", p=0.5, N=601, seed = 222)$data table(data_balanced_both$aff.air) summary(data_balanced_both) attach(data_balanced_both) plot(data_balanced_both) #Graplical Representation #Model Building model <- glm(factor(factor(aff.air))~., data = aff, family = "binomial") summary(model) model_aff1 <- glm(factor(aff.air)~yearsmarried+religiousness+rating, family = "binomial") summary(model_aff1) model_aff2 <- glm(factor(aff.air)~., data = data_balanced_both, family = "binomial") summary(model_aff2) model_aff3 <- glm(factor(aff.air)~religiousness+rating+Child, data = data_balanced_both, family = "binomial") summary(model_aff3) exp(coef(model_aff3)) table(data_balanced_both$aff.air) #Confusion Matrix prob <- predict(model_aff3,type=c("response"), data = data_balanced_both) confusion<-table(prob>0.5, data_balanced_both$aff.air) confusion TPR = 208/(98+208) TPR #0.6797386 FPR = 116/(116+179) FPR #0.3932203 #Model Accuracy Accuracy <- sum(diag(confusion))/sum(confusion) Accuracy #0.6439268 Error <- 1-Accuracy Error #0.3560732 #ROC Curve rocrpred <- prediction(prob, data_balanced_both$aff.air) rocrperf <- performance(rocrpred, 'tpr', 'fpr') plot(rocrperf, colorize = T) #Area under the curve auc <- performance(rocrpred, 'acc') auc
181750ee4a819ca4a0b388a482ff590d476c2625
639b2aeb9de96fa6ef5053804c12fc0eab12cd44
/run_analysis.R
1c6058bb05e8ba940d20361ec01435f5c649a65d
[]
no_license
sbanerj3/GettingandCleaningData
67ba705e109c5d30bf5e98f14d6ef85bea720241
25bb4d5b1824b33f0c5908647b0cf547fbe496f3
refs/heads/master
2021-01-15T21:44:35.784881
2015-08-22T01:21:17
2015-08-22T01:21:17
41,180,026
0
0
null
null
null
null
UTF-8
R
false
false
3,229
r
run_analysis.R
#loading Features and Test files in R Features <- read.table("./UCI HAR Dataset/features.txt",sep=' ', header=F,fill=T) Test <- read.table("./UCI HAR Dataset/test/X_test.txt", header=F,fill=T) #using the Features file to change the column names for Test file names(Test) <- Features[,2] #loading Activity and Subjects for the Test set Test_y <- read.table("./UCI HAR Dataset/test/y_test.txt", header=F,fill=T) Subject_test <- read.table( "./UCI HAR Dataset/test/subject_test.txt",sep=' ', header=F,fill=T) #renamed column name in the subject test file to "Subject" colnames(Subject_test) = "Subject" #loading Train files in R Train <- read.table("./UCI HAR Dataset/train/X_train.txt", header=F,fill=T) #using the Features file to change the column names for Train file names(Train) <- Features[,2] #loading Activity and Subjects for the Train set Train_y <- read.table("./UCI HAR Dataset/train/y_train.txt",sep=' ', header=F,fill=T) Subject_train <- read.table("./UCI HAR Dataset/train/subject_train.txt",sep=' ', header=F,fill=T) #renamed column name in the subject train file to "Subject" colnames(Subject_train) = "Subject" #loaded Activity labels Activity_Lbl <- read.table("./UCI HAR Dataset/activity_labels.txt",sep=' ', header=F,fill=T) #combining Training set and Activity Train_com <- cbind(Train,Train_y) #created new dataset by keeping only the relevant column names mean and std. Train_com2 <- cbind(Train_com[grep("mean", colnames(Train_com))],Train_com[grep("std", colnames(Train_com))],Train_com$V1,Subject_train$Subject) #combining Testing set and Activity Test_com <- cbind(Test,Test_y) #created new dataset by keeping only the relevant column names mean and std. Test_com2 <- cbind(Test_com[grep("mean", colnames(Test_com))],Test_com[grep("std", colnames(Test_com))],Test_com$V1,Subject_test$Subject) #merged the test files with Activity labels Test_final <- merge(Test_com2,Activity_Lbl,by.x ="Test_com$V1",by.y="V1") #merged the train files with Activity labels Train_final <- merge(Train_com2,Activity_Lbl,by.x ="Train_com$V1",by.y="V1") #Deleted unwanted columns from the data Test_final$`Test_com$V1` <- NULL Train_final$`Train_com$V1` <- NULL #Renamed column names to make them meaningful names(Train_final)[names(Train_final) == 'Subject_train$Subject'] <- 'Subject' names(Test_final)[names(Test_final) == 'Subject_test$Subject'] <- 'Subject' #Updated columnname to Activity names(Train_final)[names(Train_final) == 'V2'] <- 'Activity' names(Test_final)[names(Test_final) == 'V2'] <- 'Activity' #ordered the data by column names Test_final<- Test_final[,order(names(Test_final))] Train_final<- Train_final[,order(names(Train_final))] #combined test and train data Final <- rbind(Train_final,Test_final) #using the aggregate function created the mean of all measures in the data set #grouping them by Subject and Activity ag <- aggregate(.~Subject+Activity,Final,mean) #cleaned the column names of the final aggregated dataset colnames(ag) <- tolower(gsub('[-()]','',colnames(ag))) colnames(ag) <- gsub('mean','Mean',colnames(ag)) colnames(ag) <- gsub('std','Std',colnames(ag)) #create the output file write.table(ag,file="Output.txt",sep=" ",row.names = FALSE)
a1f7c3748bf79823d0744632f4d78fe86ecbda43
6c498ad3e5c3b97cc4c8cb6a26b9c15d68d6a933
/r/R/model_predict.r
5e650dfe3cf81e5ca5187972a895c930ae29bf56
[]
no_license
ModelOriented/weles
6b327dcf4c97f0a2df637fed3b28a7bfb48aea1b
2b76fb0ed6b42c0395426eebdb8fcb9765dc43e7
refs/heads/master
2020-07-27T09:06:29.253125
2019-11-30T14:08:21
2019-11-30T14:08:21
209,040,281
9
4
null
2019-09-18T14:55:07
2019-09-17T11:53:01
HTML
UTF-8
R
false
false
2,349
r
model_predict.r
#' @title Make a prediction using weles model #' #' @description #' This tool allows you to make a prediction with model in weles. #' #' @param model_name name of the model in weles #' @param X data to make a prediction of, must have named columns, may be path to *.csv* file (must contatin **/** sign) or *hash* of already uploaded data, #' if X is an object and prepare_columns is True, columns' names will be fetched automatically #' @param pred_type type of prediction, 'exact' or 'prob' #' @param prepare_columns if X is an object then columns' names will be fetched automatically #' #' @references #' \href{http://192.168.137.64/models}{\bold{models}} #' \href{http://192.168.137.64/datasets}{\bold{datasets}} #' #' @examples #' \code { #' library("weles") #' #' model_predict("example_model", iris[,-5]) #' } #' #' @export model_predict <- function(model_name, X, pred_type = 'exact', prepare_columns = TRUE) { # checking input stopifnot(class(model_name) == 'character') stopifnot(class(X) == 'data.frame' || class(X) == 'character') stopifnot(class(pred_type) == 'character') stopifnot(class(prepare_columns) == 'logical') # making the hash for temporary files h = digest::digest(c(model_name, Sys.time())) # url url = paste0('http://192.168.137.64/models/', model_name, '/predict/', pred_type) # body for the request body = list() # uploading data if(class(X) == "character" && !grepl("/", X)) { # case when X is a hash body[['is_hash']] = 1 body[['hash']] = X } else if(class(X) == "character") { # case when X is a path body[['is_hash']] = 0 data = read.csv(X) body[['data']] = paste0(c(paste0(colnames(data), collapse=','), paste0(apply(data,1, paste0, collapse=','), collapse='\n')), collapse='\n') } else { # case when X is an object # fetching columns if(prepare_columns) { info = models_info(model_name) columns = info$columns target = info$model$target columns = columns[order(columns$id), 'name'] columns = columns[columns != target] names(X) = columns } data = paste0(c(paste0(colnames(X), collapse=','), paste0(apply(X,1, paste0, collapse=','), collapse='\n')), collapse='\n') body[['is_hash']] = 0 body[['data']] = data } # uploading r = httr::content(httr::POST(url = url, body = body), as='text') # return read.csv(text = r, header = F)[,1] }
21899f61764645156bf45c63ae2f0f6ece2042f4
8cd909e0f66fb5119f63b81528f0d7dd4f96bac4
/R/1_data_import.R
1845980685a1c49eb803d4af8e7959a1139b8351
[]
no_license
strnda/hydrological_modelling
360257c81a2338ca1cd96b2e7272e8f75e231b79
d09f78c95c7ca6427ad360f72582d1ed8e4969c6
refs/heads/master
2021-04-29T21:19:59.815272
2021-02-11T18:55:57
2021-02-11T18:55:57
121,613,038
2
4
null
null
null
null
UTF-8
R
false
false
759
r
1_data_import.R
source('R/aux_fun/aux_functions.R') lop <- c('data.table', 'ggplot2', 'DEoptim', 'AMORE', 'Rcpp') to.instal <- lop[which(!lop %in% installed.packages()[,'Package'])] if(length(to.instal) != 0) install.packages(to.instal) temp <- lapply(lop, library, character.only = T) rm(temp) id <- '04201500' dta <- as.data.table(read.fwf(sprintf('https://hydrology.nws.noaa.gov/pub/gcip/mopex/US_Data/Us_438_Daily/%s.dly', id), widths = c(8,10,10,10,10,10))) names(dta) <- c('DTM', 'P', 'E', 'Q', 'Tmax', 'Tmin') dta[, DTM := as.Date(gsub(' ','0', DTM), format = '%Y%m%d')] ggplot(dta) + geom_line(aes(x = DTM, y = Q), colour = 'steelblue4') + theme_bw() + labs(x = 'Time', y = 'Discharge', title = id) DTA <- periods(dta)
2e92417ab8bda659b37183af3e7bcb4d9a76ad74
58759b651f5db231a118875eb0a7fba2a236a995
/man/summarise_case_forecast.Rd
77bf9a969536ba395036ec516674bedec274aee8
[ "MIT" ]
permissive
epiforecasts/EpiSoon
565223d175a749391cb9ca41f298c50634861f5a
d8eb427000f403ca09543ce9eab54c35855256a1
refs/heads/main
2023-07-20T14:46:29.266919
2023-03-03T15:31:23
2023-03-03T15:31:23
248,311,916
6
6
NOASSERTION
2023-07-14T14:05:10
2020-03-18T18:29:07
HTML
UTF-8
R
false
true
889
rd
summarise_case_forecast.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/summarise.R \name{summarise_case_forecast} \alias{summarise_case_forecast} \title{Summarise Forecast Cases} \usage{ summarise_case_forecast(pred_cases) } \arguments{ \item{pred_cases}{A dataframe as produced by \code{EpiSoon::forecast_cases}.} } \value{ A summarised dataframe. } \description{ Summarise Forecast Cases } \examples{ \dontrun{ ## Example forecast forecast <- forecast_rt(EpiSoon::example_obs_rts, model = function(...) { EpiSoon::bsts_model( model = function(ss, y) { bsts::AddSemilocalLinearTrend(ss, y = y) }, ... ) }, horizon = 7, samples = 10 ) ## Forecast cases case_forecast <- forecast_cases( EpiSoon::example_obs_cases, forecast, EpiSoon::example_serial_interval ) ## Summarise case forecast summarise_case_forecast(case_forecast) } }
47e4e97b21304c6cbf78ddcb33f692fa0497e5a5
31ba946940e6da0cf05cfe91ac6beb6dd5cd2df8
/R/overlapping.R
5179fea1f4869cfc23982e513e94586574d2e92f
[ "MIT" ]
permissive
CodingLifeV/ImbCoL
03b70a62b61fc274c7ba359385201f8cfca7a400
be7682f1a4b9cbcecdf419f652de49d0fb95329f
refs/heads/master
2023-06-14T10:48:11.263970
2021-05-14T20:21:06
2021-05-14T20:21:06
null
0
0
null
null
null
null
UTF-8
R
false
false
9,393
r
overlapping.R
#' Measures of overlapping #' #' Classification task. The overlapping measures evaluate how informative the #' available features are to separate the classes. If there is at least one very #' discriminative feature in the dataset, the problem can be considered simpler #' than if there is no such an attribute. #' #' @family complexity-measures #' @param x A data.frame contained only the input attributes. #' @param y A factor response vector with one label for each row/component of x. #' @param measures A list of measures names or \code{"all"} to include all them. #' @param formula A formula to define the class column. #' @param data A data.frame dataset contained the input attributes and class. #' @param ... Not used. #' @details #' The following measures are allowed for this method: #' \describe{ #' \item{"F1"}{Maximum Fisher's Discriminant Ratio (F1) measures the overlap #' between the values of the features and takes the value of the largest #' discriminant ratio among all the available features.} #' \item{"F1v"}{Directional-vector maximum Fisher's discriminant ratio (F1v) #' complements F1 by searching for a vector able to separate two classes #' after the training examples have been projected into it.} #' \item{"F2"}{Volume of the overlapping region (F2) computes the overlap of #' the distributions of the features values within the classes. F2 can be #' determined by finding, for each feature its minimum and maximum values #' in the classes.} #' \item{"F3"}{The maximum individual feature efficiency (F3) of each #' feature is given by the ratio between the number of examples that are #' not in the overlapping region of two classes and the total number of #' examples. This measure returns the maximum of the values found among #' the input features.} #' \item{"F4"}{Collective feature efficiency (F4) get an overview on how #' various features may work together in data separation. First the most #' discriminative feature according to F3 is selected and all examples that #' can be separated by this feature are removed from the dataset. The #' previous step is repeated on the remaining dataset until all the #' features have been considered or no example remains. F4 returns the #' ratio of examples that have been discriminated.} #' \item{"FX_partial"}{X \in [2,3,4]. It is the decomposed version of the #' correponding FX function. Instead of giving a single complexity value #' for the dataset, it returns one complexity value per class.} #' } #' @return A list named by the requested overlapping measure. #' #' @examples #' ## Extract all overlapping measures #' data(iris) #' overlapping(Species ~ ., iris) #' @export overlapping <- function(...) { UseMethod("overlapping") } #' @rdname overlapping #' @export overlapping.default <- function(x, y, measures="all", ...) { if(!is.data.frame(x)) { stop("data argument must be a data.frame") } if(is.data.frame(y)) { y <- y[, 1] } y <- as.factor(y) if(min(table(y)) < 2) { stop("number of examples in the minority class should be >= 2") } if(nrow(x) != length(y)) { stop("x and y must have same number of rows") } if(measures[1] == "all") { measures <- ls.overlapping() } measures <- match.arg(measures, ls.overlapping(), TRUE) colnames(x) <- make.names(colnames(x)) x <- binarize(x) data <- data.frame(x, class=y) sapply(measures, function(f) { eval(call(paste("c", f, sep="."), data=data)) }, simplify = F) } #' @rdname overlapping #' @export overlapping.formula <- function(formula, data, measures="all", ...) { if(!inherits(formula, "formula")) { stop("method is only for formula datas") } if(!is.data.frame(data)) { stop("data argument must be a data.frame") } modFrame <- stats::model.frame(formula, data) attr(modFrame, "terms") <- NULL overlapping.default(modFrame[, -1, drop=FALSE], modFrame[, 1, drop=FALSE], measures, ...) } ls.overlapping <- function() { c("F2_partial","F3_partial","F4_partial") } branch <- function(data, j) { data[data$class == j, -ncol(data), drop=FALSE] } num <- function(data, j) { tmp <- branch(data, j) aux <- nrow(tmp) * (colMeans(tmp) - colMeans(data[,-ncol(data), drop=FALSE]))^2 return(aux) } den <- function(data, j) { tmp <- branch(data, j) aux <- rowSums((t(tmp) - colMeans(tmp))^2) return(aux) } numerator <- function(j, data) { tmp <- branch(data, j) aux <- nrow(tmp) * (colMeans(tmp) - colMeans(data[,-ncol(data), drop=FALSE]))^2 return(aux) } denominator <- function(j, data) { tmp <- branch(data, j) aux <- rowSums((t(tmp) - colMeans(tmp))^2) return(aux) } c.F1 <- function(data) { num <- lapply(levels(data$class), numerator, data) den <- lapply(levels(data$class), denominator, data) aux <- rowSums(do.call("cbind", num)) / rowSums(do.call("cbind", den)) aux <- 1 / (aux + 1) return(min(aux, na.rm=TRUE)) } dvector <- function(data) { l <- levels(data$class) a <- branch(data, l[1]) b <- branch(data, l[2]) c1 <- colMeans(a) c2 <- colMeans(b) W <- (nrow(a)/nrow(data)) * stats::cov(a) + (nrow(b)/nrow(data)) * stats::cov(b) B <- (c1 - c2) %*% t(c1 - c2) d <- MASS::ginv(W) %*% (c1 - c2) aux <- (t(d) %*% B %*% d)/(t(d) %*% W %*% d) return(aux) } c.F1v <- function(data) { data <- ovo(data) aux <- unlist(lapply(data, dvector)) return(mean(aux)) } regionOver <- function(data) { l <- levels(data$class) a <- branch(data, l[1]) b <- branch(data, l[2]) maxmax <- rbind(colMax(a), colMax(b)) minmin <- rbind(colMin(a), colMin(b)) over <- colMax(rbind(colMin(maxmax) - colMax(minmin), 0)) rang <- colMax(maxmax) - colMin(minmin) aux <- prod(over/rang, na.rm=TRUE) return(aux) } c.F2 <- function(data) { data <- ovo(data) aux <- unlist(lapply(data, regionOver)) return(mean(aux)) } nonOverlap <- function(data) { l <- levels(data$class) a <- branch(data, l[1]) b <- branch(data, l[2]) minmax <- colMin(rbind(colMax(a), colMax(b))) maxmin <- colMax(rbind(colMin(a), colMin(b))) aux <- do.call("cbind", lapply(1:(ncol(data)-1), function(i) { data[,i] < maxmin[i] | data[,i] > minmax[i] }) ) aux <- data.frame(aux) rownames(aux) <- rownames(data) return(aux) } c.F3 <- function(data) { data <- ovo(data) aux <- mapply(function(d) { colSums(nonOverlap(d))/nrow(d) }, d=data) aux <- data.frame(aux) aux <- mean(colMax(aux)) aux = 1 - aux return(aux) } removing <- function(data) { repeat { tmp <- nonOverlap(data) col <- which.max(colSums(tmp)) aux <- rownames(tmp[tmp[,col] != TRUE, , drop=FALSE]) data <- data[aux,- col, drop=FALSE] if(nrow(data) == 0 | ncol(data) == 1 | length(unique(data$class)) == 1) break } return(data) } c.F4 <- function(data) { data <- ovo(data) aux <- mapply(function(d) { n <- removing(d) (nrow(d) - nrow(n))/nrow(d) }, d=data) aux <- mean(aux) aux <- 1 - aux return(aux) } branch_partial <- function(data, j) { do.call(rbind,lapply(1:length(j), FUN = function(l) data[data$class == j[l], -ncol(data), drop=FALSE])) } regionOver_partial <- function(i,data) { l <- levels(data$class) a <- branch_partial(data, l[i]) b <- branch_partial(data, l[-i]) maxmax <- rbind(colMax(a), colMax(b)) minmin <- rbind(colMin(a), colMin(b)) over <- colMax(rbind(colMin(maxmax) - colMax(minmin), 0)) rang <- maxmax[1,] - minmin[1,] aux <- prod(over/rang, na.rm=TRUE) return(aux) } c.F2_partial <- function(data) { aux <- sapply(1:length(levels(data$class)), FUN = regionOver_partial, data = data) names(aux) <- levels(data$class) return(aux) } nonOverlap_partial <- function(i,data) { l <- levels(data$class) a <- branch_partial(data, l[i]) b <- branch_partial(data, l[-i]) minmax <- colMin(rbind(colMax(a), colMax(b))) maxmin <- colMax(rbind(colMin(a), colMin(b))) aux <- do.call("cbind", lapply(1:ncol(a), function(i) { a[,i, drop=FALSE] < maxmin[i] | a[,i, drop=FALSE] > minmax[i] }) ) aux <- data.frame(aux) return(aux) } c.F3_partial <- function(data) { aux <- sapply(1:length(levels(data$class)), function(d) { colSums(nonOverlap_partial(d,data))/sum(data$class == levels(data$class)[d]) }) aux <- rbind(aux,0) aux <- data.frame(aux) aux <- colMax(aux) aux <- 1 - aux names(aux) <- levels(data$class) return(aux) } removing_partial <- function(i,data) { repeat { tmp <- nonOverlap_partial(i,data) col <- which.max(colSums(tmp)) aux <- rownames(tmp[tmp[,col] != TRUE, , drop=FALSE]) data <- data[aux,- col, drop=FALSE] if(nrow(data) == 0 | ncol(data) == 1 | length(unique(data$class)) == 1) break } return(data) } c.F4_partial <- function(data) { aux <- sapply(1:length(levels(data$class)), FUN = function(d) { n <- removing_partial(d,data) nclass = sum(data$class == levels(data$class)[d]) (nclass - nrow(n))/nclass }) aux <- 1 - aux names(aux) <- levels(data$class) return(aux) }
45d21da255ea88a23273dabd3692f36c55ed3aed
5a5bc9e1b0d59859b4e213b092e19afe232819e1
/R/wind/wind_downscale2_orig.R
45492a6b167f93633d378775dc1ed70ad9c92b7b
[]
no_license
jrmosedale/microclimates
bf469e07b688e9342c0a8d767db84ee428e778f3
ae2e61969631506c523bd618c9106a61b00355dd
refs/heads/master
2021-04-30T15:18:19.091728
2018-02-12T11:31:16
2018-02-12T11:31:16
121,236,443
0
0
null
null
null
null
UTF-8
R
false
false
10,297
r
wind_downscale2_orig.R
library(ncdf4) library(raster) library(rgdal) # Function for computing Julian data # In this instance just used to work out number of days after 1st Jan 1950, # so that correct element of array can be extracted # Inputs: # day: the day of the month (0-31) # month: the month of the year (numeric: 0-12) # year: any year from 1950 to 2014 # Output: the Julian day (https://en.wikipedia.org/wiki/Julian_day) JD<-function(day,month,year){ a<-(14-month)/12 y<-year+4800-a m<-month+12*a-3 JDN<-floor((153*m+2)/5) + 365*y + floor(y/4) - floor(y/100) + floor(y/400) - 32045 + day JDN } # Based on hour, day, month and year, extracts the required value form the array of values stored by wind_downscale1.R # Inputs: # hr: the hour (0-23) 0 = midnight # day: the day of the month (0-31) # month: the month of the year (numeric: 0-12) # year: any year from 1950 to 2014 # Output: the element of the array stored by wind_downscale1.R that corresponds to either that hour, or the latest # period immediatly before that hour (data only available 6-hourly) array.val<-function(hr,day,month,yr) { jd.base=JD(1,1,1950) jd<-JD(day,month,yr) dval<-(jd-jd.base)*4 hval<-floor(hr/6) val<-dval+hval+1 val } # Works out the angle to the horizon in a specified direction (used to calculate the shelter coefficient) # Inputs: # dtm = a digital eleveation model stored as a matrix # NB the rotation of the digital elevetation data is important. This is designed to be used for a matrix # extracted from a raster (see raster package) as follows: my.matrix<-getValues(my.raster,format="matrix") horizonangle <- function(dtm,azimuth,res=100,steps=40) { azimuth<-azimuth-90 azi <- azimuth * (pi/180) horizon <- array(0,dim(dtm)) dtm3 <- array(0,dim(dtm)+200) x <- dim(dtm)[1] y <- dim(dtm)[2] dtm3[101:(x+100),101:(y+100)] <- dtm m<-10^2/steps^2 for (step in 1:steps) { horizon[1:x,1:y] <- pmax(horizon[1:x,1:y], (dtm3[(101+sin(azi)*m*step^2):(x+100+sin(azi)*m*step^2),(101+cos(azi)*m*step^2):(y+100+cos(azi)*m*step^2)]-dtm3[101:(x+100),101:(y+100)])/(m*res*step^2)) } horizon } windindex <- function(dtm,direction) { index <- 1 - atan(0.17 * 100 * horizonangle(dtm,direction))/1.65 index } #################################################### # # # This bit downscales the wind #################################################### # NB running this for 1 month takes ~30 hours # # # Stages: # (1) get wind values for a given hour, day, month and year # (2) convert to 100m resolution raster OSGB grid reference # (3) adjust based on altitude # (4) adjust based on shelter coefficient # loads data output by wind_downscale1.R # data are arrays of easterly and northerly wind components at spatial resolution of 2.5 degrees and temporal resolution of 4x daily # data automatically assigned name when written out: wind_u and wind_v load(file="C:/Jonathanmodel/wind/newdata/uwind.r") load(file="C:/Jonathanmodel/wind/newdata/vwind.r") # Specify month and year for which data are required yr=2014 month=1 # set period for which you want to create 100m resolution wind data # Set to do all hours in January 2014 for (day in 1:31){ for (hr in 0:23){ # Can be quite slow. Allows you to keep tabs on progress by printing hour, day, month & year tp<-paste("year=",yr," month=",month," day=",day," hour=",hr,sep="") print(tp) ############# # Stage 1: get wind values for a given day month and year ############# # As original data are 4x daily, but data are required for each hour, # this bit reads in the data for the periods immediatly before after for which there are data and calculates # weighted mean av1<-array.val(hr,day,month,yr) av2<-av1+1 rem<-hr/6-floor(hr/6) uwind1<-wind_u[,,av1] uwind2<-wind_u[,,av2] vwind1<-wind_v[,,av1] vwind2<-wind_v[,,av2] uwind<-(1-rem)*uwind1+rem*uwind2 vwind<-(1-rem)*vwind1+rem*vwind2 ############# # Stage 2: convert to 100m resolution raster OSGB grid reference ############# # Convert to raster (original lat long format and resolution uwind.r<-raster(uwind,xmn=-7.5,xmx=-2.5,ymn=47.5,ymx=52.5) vwind.r<-raster(vwind,xmn=-7.5,xmx=-2.5,ymn=47.5,ymx=52.5) # Reproject in OSGB projection crs(uwind.r)<-"+init=epsg:4326" crs(vwind.r)<-"+init=epsg:4326" u_osgb<-projectRaster(uwind.r,crs="+init=epsg:27700") v_osgb<-projectRaster(vwind.r,crs="+init=epsg:27700") # Trim to desired area and resample the data at a 100m resolution. Interpolation is set at default simple bilinear e<-extent(u_osgb) xmn=round(e@xmin/100)*100 xmx=round(e@xmax/100)*100 ymn=round(e@ymin/100)*100 ymx=round(e@ymax/100)*100 template<-raster(xmn=xmn,xmx=xmx,ymn=ymn,ymx=ymx,resolution=100) u_100<-resample(u_osgb,template) v_100<-resample(v_osgb,template) # read in Digital Eelevation data - I've chopped this to just cover SW Britain # We probably need to make a definative decision as to what area we want to cover and adjust # areas accordingly. dem<-raster("C:/Jonathanmodel/wind/demsw.asc") e<-extent(dem) # Crop areas so that it neatly has whole 100m grid cells around the edge # Dem and wind rasters set to same extent here as well xmn=round(e@xmin/100)*100 xmx=round(e@xmax/100)*100 ymn=round(e@ymin/100)*100 ymx=round(e@ymax/100)*100 e<-extent(c(xmn,xmx,ymn,ymx)) u_100<-crop(u_100,e) v_100<-crop(v_100,e) dem<-crop(dem,e) ############# # Stage 3: adjust based on altitude of # NB Height adjustment based on wind spped values at different pressures downloaded Earth System Research Lab # Typical heights at different pressures calculated from Allen et al 1998 http://www.fao.org/docrep/x0490e/x0490e07.htm#atmospheric pressure (p) # Quadratic function fitted - NB this works well for heights up to ~1800m. IT won't work above ~2000m # Function was first derived by comparing values at different pressures (heights) over the course of a year (2014) ############# # adjust wind speeds by height of dem # convert to matrices uwind.m<-getValues(u_100,format="matrix") vwind.m<-getValues(v_100,format="matrix") dem.m<-getValues(dem,format="matrix") # adjust wind by height ustr<-sqrt(uwind.m^2) # wind strength vstr<-sqrt(vwind.m^2) # wind strength udir<-ifelse(uwind.m>0,1,-1) # positive or negative vdir<-ifelse(vwind.m>0,1,-1) # positive or negative u.adj<-ustr*((-0.000000108025)*dem.m^2+0.000408692*dem.m+0.956139) # NB don't worry about warnings. Calculation assigns NAs to the sea v.adj<-vstr*((-0.000000108025)*dem.m^2+0.000408692*dem.m+0.956139) # NB don't worry about warnings. Calculation assigns NAs to the sea # adjust values to correspond to wind speed 1m above theground # rescaling factor first derived by comparing values ot Culdrose wind data using wind_downscale3 # however, in line wiht what you'd expect from: http://www.fao.org/docrep/x0490e/x0490e07.htm#wind profile relationship u.adj<-u.adj*0.373686439 v.adj<-v.adj*0.373686439 u.adj<-u.adj*udir v.adj<-v.adj*vdir ru<-raster(u.adj,template=u_100) rv<-raster(v.adj,template=v_100) # Some code here for plotting altitude adjusted values, currently commmented out #par(mfrow=c(2,2)) #plot(ru,main="altitude adjusted wind u") #plot(rv,main="altitude adjusted wind v") # calculate wind direction direction = (180/pi)*(atan2(u.adj,v.adj)) # NB this is direction in which wind blows to direction<-ifelse(direction<=180,direction+180,direction-180) # NB this direction from which wind originates rd<-raster(direction,template=u_100) #plot(rd,main="wind direction") ############# # Stage 4: height adjustments done using a shelter coefficient based on topography and wind direction ############# # go through in 10 km blocks and adjust by shelter coefficient # note, however that actually it actually selects 30km x 30 km area to allow for sheltering effects # that operate outside the area of each block. The 10km x 10km centre of the block is then selected # Programme could probably be speeded up without major loss of accuracy by setting buffer to ~5km instead of 10km # NB chopping into 10km blocks is necessary, as the function for calculating # the shelter coefficient assumes a single wind direction, a fairly safe assumption over 10km, but not over entire study region # first extend rasters to match 10km cells e<-extent(c(60000,360000,-10000,170000)) ru<-extend(ru,e) rv<-extend(rv,e) dem<-extend(dem,e) rd<-extend(rd,e) m.u<-getValues(ru,format="matrix") m.v<-getValues(rv,format="matrix") m.dem<-getValues(dem,format="matrix") m.d<-getValues(rd,format="matrix") # creates matrix for storing values windstrength<-matrix(NA,nrow=1800,ncol=3000) # matrix for storing all values # Goes through and does each 10km block for (rws in 1:16) { for (cls in 1:28) { xmn<-rws*100+1-100 ymn<-cls*100+1-100 xmx=xmn+300-1 ymx=ymn+300-1 b.u<-m.u[xmn:xmx,ymn:ymx] b.v<-m.v[xmn:xmx,ymn:ymx] b.dem<-m.dem[xmn:xmx,ymn:ymx] sel<-which(is.na(b.dem)==T) b.dem[sel]<-0 # calculates mean direction b.dir<-mean(m.d[xmn:xmx,ymn:ymx],na.rm=T) # Calculates wind strength from u and v components m.str<-sqrt(b.u^2+b.v^2) wcoef<-matrix(0,nrow=300,ncol=300) # Applies shelter coefficient if wind direction not NA. # Wind direction would be NA if all values within 10km block are NA, which happens if the entire 10km block is sea # (for which values nOt required) if (is.na(b.dir)==F) wcoef<-windindex(b.dem,b.dir) m.str<-m.str*wcoef # selects data for just the 10km x 10km centre of each 30km x 30km block windstrength[(xmn+100):(xmx-100),(ymn+100):(ymx-100)]<-m.str[101:200,101:200] } } # converts to raster and crops to desired extent r<-raster(windstrength,template=dem) e<-extent(c(79400,343500,0,159300)) r<-crop(r,e) rd<-crop(rd,e) #plot(r,main="wind speed") # converts raster to matrix and saves matrix as R dataset # seperate datasets saved for each hour (entire study area) m1.out<-getValues(r,format="matrix") m2.out<-getValues(rd,format="matrix") fileout.1<-paste("C:/Jonathanmodel/wind/dataout/strength_",yr,"_",month,"_",day,"_",hr,".r",sep="") fileout.2<-paste("C:/Jonathanmodel/wind/dataout/direction_",yr,"_",month,"_",day,"_",hr,".r",sep="") save(m1.out,file=fileout.1) save(m2.out,file=fileout.2) }}
30ca6cc11cba520e95752d32c0cd87e8a1c5bf69
66fac4228a78eb2d8c59a136ee8dd82636f4b3c4
/plot4.R
ffa338137d60601855666f3f0bd335724376babd
[]
no_license
MohamedAMKamal/Assignement2-
985f0c8b581ba19e3c1b03995b91bb4a51d77dad
8bef431370c2013005b7259ff611553d0a9707ae
refs/heads/master
2022-11-07T03:02:48.872587
2020-06-21T06:17:06
2020-06-21T06:17:06
273,845,923
0
0
null
null
null
null
UTF-8
R
false
false
1,108
r
plot4.R
plot4 <- function(...){ library(dplyr) library(data.table) library(ggplot2) NEI <- readRDS("summarySCC_PM25.rds") SCC <- readRDS("Source_Classification_Code.rds") SCC$SCC <- as.character(SCC$SCC) NEI_SCC <- left_join(NEI, SCC, by="SCC") NEI_SCC <- data.table(NEI_SCC) ## filter to coal specific type labels in SCC Level Three subtypes <- unique(SCC$SCC.Level.Three) coalsubtypes <- grep("Coal", subtypes) subtypes <- data.table(subtypes) coaltypenames <- subtypes[coalsubtypes,] coaltypenames2 <- as.character(coaltypenames$subtypes) ## Manipulate from main data table to subset coalsources <- NEI_SCC[SCC.Level.Three %in% coaltypenames2] totals <- coalsources[,.(Emissions.Sum=sum(Emissions)), by=year] qplot(year, Emissions.Sum, data=totals, xlab="Observation Year", ylab="Total PM2.5 Emissions in Tons", main="Emissions from Coal Sources") + geom_smooth(method=lm) ## Crate plot file ggsave("plot4.png", width=8, height=5) }