blob_id
stringlengths
40
40
directory_id
stringlengths
40
40
path
stringlengths
2
327
content_id
stringlengths
40
40
detected_licenses
listlengths
0
91
license_type
stringclasses
2 values
repo_name
stringlengths
5
134
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
branch_name
stringclasses
46 values
visit_date
timestamp[us]date
2016-08-02 22:44:29
2023-09-06 08:39:28
revision_date
timestamp[us]date
1977-08-08 00:00:00
2023-09-05 12:13:49
committer_date
timestamp[us]date
1977-08-08 00:00:00
2023-09-05 12:13:49
github_id
int64
19.4k
671M
star_events_count
int64
0
40k
fork_events_count
int64
0
32.4k
gha_license_id
stringclasses
14 values
gha_event_created_at
timestamp[us]date
2012-06-21 16:39:19
2023-09-14 21:52:42
gha_created_at
timestamp[us]date
2008-05-25 01:21:32
2023-06-28 13:19:12
gha_language
stringclasses
60 values
src_encoding
stringclasses
24 values
language
stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
7
9.18M
extension
stringclasses
20 values
filename
stringlengths
1
141
content
stringlengths
7
9.18M
e429cfd514469db5f198e16c37d4ac77acbbbc5e
ee036beea6789336117c6d05b0c9a4c622bf65c4
/RCode/ts_3a.R
cfecc0c7da7fc1ea62cdfcaf56702cbe00803d25
[]
no_license
ibrahim85/Thesis
17d69ca75b05cd1e4446564b89c90cf141e91696
87ec5a2f7d0341a4467a99e8f8096de08d0d6cb5
refs/heads/master
2020-03-22T18:14:07.821060
2014-09-03T06:44:27
2014-09-03T06:44:27
null
0
0
null
null
null
null
UTF-8
R
false
false
1,235
r
ts_3a.R
ts_3a <- function(Mkt, SLoss, MktName){ # # # Mkt: market data # SLoss: stop loss # MktName: market's name for print out # # Returns: # results vector. results <- createResultsVector(MktName, SLoss) #browser() Mkt$v <- as.numeric(Mkt$p) lvl <- min(Mkt$v) + ((max(Mkt$v) - min(Mkt$v))/2) # Trade Long Mkt$Long <- ifelse(Mkt$v > lvl, Mkt$Close - Mkt$Open, NA) results["LongPL"] <- round(sum(Mkt$Long, na.rm=TRUE)) #Adj for SLoss if (SLoss < 0) { Mkt$Long <- ifelse(Mkt$v > lvl, ifelse((Mkt$Low-Mkt$Open) < SLoss, SLoss, Mkt$Long), Mkt$Long) results["LongPL"] <- round(sum(Mkt$Long, na.rm=TRUE)) } # Trade Short Mkt$Short <- ifelse(Mkt$v < lvl, Mkt$Open - Mkt$Close, NA) results["ShortPL"] <- round(sum(Mkt$Short, na.rm=TRUE)) #Adj for SLoss if (SLoss < 0){ Mkt$Short <- ifelse(Mkt$v < lvl, ifelse((Mkt$Open-Mkt$High) < SLoss, SLoss, Mkt$Short), Mkt$Short) results["ShortPL"] <- round(sum(Mkt$Short, na.rm=TRUE)) } Stats <- calcStats2(Mkt$Long) results[5:7] <- Stats Stats <- calcStats2(Mkt$Short) results[8:10] <- Stats return(results) }
0d872f9c615faf99ecd65857981358a8e33d1847
051880099402393c9249d41526a5ac162f822f8d
/man/tg.sampleProblem.Rd
bf24409e8b8c098712d3e86910532e69d30007c5
[ "MIT" ]
permissive
bbTomas/rPraat
cd2b309e39e0ee784be4d83a980da60946f4c822
4c516e1309377e370c7d05245f6a396b6d4d4b03
refs/heads/master
2021-12-13T19:32:38.439214
2021-12-09T18:42:48
2021-12-09T18:42:48
54,803,225
21
7
null
null
null
null
UTF-8
R
false
true
445
rd
tg.sampleProblem.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/rpraat_sampleData.R \name{tg.sampleProblem} \alias{tg.sampleProblem} \title{tg.sampleProblem} \usage{ tg.sampleProblem() } \value{ TextGrid } \description{ Returns sample TextGrid with continuity problem. } \examples{ tg <- tg.sampleProblem() tg2 <- tg.repairContinuity(tg) tg2 <- tg.repairContinuity(tg2) tg.plot(tg2) } \seealso{ \code{\link{tg.repairContinuity}} }
f19528d0916d1c77841ba2d6a725119b67f4baeb
825aae59c1c325e658cee4b7b9dd6101328f733e
/plot1.R
b913fe4a24f35e0fcabd2291e624ee701e6492fd
[]
no_license
prebrov/ExData_Project2
bc9071076f0c5e760350e8c2d91e94f5e7784542
a0659d994024d30dff8f72457739b9faf1475978
refs/heads/master
2021-01-22T09:27:05.836506
2014-09-21T19:04:38
2014-09-21T19:04:38
null
0
0
null
null
null
null
UTF-8
R
false
false
694
r
plot1.R
## Q1. Have total emissions from PM2.5 decreased in the United States from 1999 to 2008? Using the base plotting system, make a plot showing the total PM2.5 emission from all sources for each of the years 1999, 2002, 2005, and 2008. ## Read data sets NEI <- readRDS("summarySCC_PM25.rds") SCC <- readRDS("Source_Classification_Code.rds") ## Aggregate total emissions for each year totalPerYear <- aggregate(Emissions ~ year, NEI, sum) ## Plot the line to see if emissions have decreased png(filename = "plot1.png", width = 480, height = 480) plot(totalPerYear, type = "l", col = "darkblue", xlab = "Year", ylab = "Total Emissions", main = "Total emissions (1999 - 2008)") dev.off()
556220f30f91425d1a0e4c1c787e13899c27e32e
90c5c8a79cb01f1a2475f01f8c0e4ba539492956
/Scripts/R_Scripts/build_CMV_predictor.R
a33c638a1194d56df41b506f2bf74748a37f9d4b
[]
no_license
JacobBergstedt/MIMETH
626725179fb37adf3853adafd19ccf33c4c1623a
c475440ee5bb3389fae72f1684d270641884ce0a
refs/heads/main
2023-04-15T03:18:12.731765
2022-08-23T13:36:50
2022-08-23T13:36:50
527,968,587
2
0
null
null
null
null
UTF-8
R
false
false
1,160
r
build_CMV_predictor.R
# Initialize -------------------------------------------------------------- library(tidyverse) library(stabs) library(glmnet) library(parallel) source("./Scripts/R_scripts/Libraries/functions_for_CMV_prediction.R") # Load data --------------------------------------------------------------- meth <- readRDS("./Data/RData/Methylation/MIMETH.minfi.MMatrix.noob_969.ComBat2.rds") ss <- readRDS("./Data/RData/Methylation/Annotation/MIMETH.969_sample_sheet.rds") meth <- meth[, ss$SentrixID] colnames(meth) <- ss$SUBJID meth <- t(meth) covs <- data.frame(SUBJID = rownames(meth)) %>% left_join(readRDS("./Data/RData/Environment/covariates_all_samples.rds") ) y <- covs$CMV_serostatus # Define globals ---------------------------------------------------------- q <- 50 tol <- 2 alpha <- 0.95 n_rep <- 4 fold_list <- replicate(n = n_rep, sample.int(10, length(y), replace = TRUE), simplify = FALSE) # Run cross validation ---------------------------------------------------- selection_runs <- map_dfr(fold_list, cv_stability_selection, meth, y, alpha = alpha, q = q, tol = tol) saveRDS(selection_runs, "./Data/RData/CMV_estimation_accuracy_stabsel.rds")
6e16373a61b3e27ab9a2067a0ad0f831b02e2d33
5efdf8e274a4a34a4f73645a59bb67995b6e3f4d
/cachematrix.R
7e7534e291b2932fa6af6877dd9cf85d60fdad4a
[]
no_license
mjgrav2001/ProgrammingAssignment2
c658587c679790e10bf66c76f76361e9ea39592a
12c6bf4c3a5393c095462b0ce0a02ad7775893db
refs/heads/master
2021-01-18T12:44:19.582237
2015-04-25T00:57:14
2015-04-25T00:57:14
34,548,283
0
0
null
2015-04-25T00:20:00
2015-04-25T00:20:00
null
UTF-8
R
false
false
1,927
r
cachematrix.R
## Caching the Inverse of a Matrix: ## ## This .R files contains two functions to compute the inverse of a given "matrix" ## but caching the inverse of the matrix rather than computing it repeatedly. ## The file contains following two functions: ## ## 1. makeCacheMatrix: This function creates a special "matrix" object ## that can cache its inverse. ## 2. cacheSolve: This function computes the inverse of the special "matrix" ## returned by makeCacheMatrix above. If the inverse has already been calculated ## (and the matrix has not changed), then the function cacheSolve retrieves ## the inverse from the cache. The inverse of a square matrix is done ## with the solve function in R. ## The function makeCacheMatrix creates a special "matrix", ## which is a list containing a function to ## a) set the values of the matrix (set) ## b) get the values of the matrix (get) ## c) set the values of the inverse of the matrix (setInv) ## d) get the values of the inverse of the matrix (getInv) makeCacheMatrix <- function(x = matrix()) { XI <- NULL set <- function(Y) { x <<- Y XI <<- NULL } get <- function() x setInv <- function(Xinv) XI <<- Xinv getInv <- function() XI list(set = set, get = get, setInv = setInv, getInv = getInv) } ## The following function calculates the inverse of a "matrix" created ## with the above function. It first checks to see if the inverse of the matrix ## has already been calculated. If so, it gets the inverse from the cache ## and skips the computation. Otherwise, it calculates the inverse of the data ## and sets the value of the inverse in the cache via the setInv function. cacheSolve <- function(x, ...) { ## Return a matrix that is the inverse of 'x' XI <- x$getInv() if(!is.null(XI)) { message("getting cached data") return(XI) } data <- x$get() XI <- solve(data, ...) x$setInv(XI) XI }
be7070a1c8a1fb41e3c15a94bd6e7d80e36bf5a0
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
/data/genthat_extracted_code/mdsstat/examples/test_as_row.Rd.R
5073657c970e580f215d256b2cbd69d58a9763e4
[]
no_license
surayaaramli/typeRrh
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
66e6996f31961bc8b9aafe1a6a6098327b66bf71
refs/heads/master
2023-05-05T04:05:31.617869
2019-04-25T22:10:06
2019-04-25T22:10:06
null
0
0
null
null
null
null
UTF-8
R
false
false
168
r
test_as_row.Rd.R
library(mdsstat) ### Name: test_as_row ### Title: Coerce mdsstat Test to 1-Row Data Frame ### Aliases: test_as_row ### ** Examples test_as_row(prr(mds_ts[[3]]))
477a7255de3a954a00f43a8d7dbd385f65321b5a
3eace8d25635ebbc9c9d498def0f32aae85d7d88
/man/mc.calc.bca.Rd
5e5f0f21d265b34e21e486c3e7bbc3c3e94a2e49
[]
no_license
piodag/mcr
4b7c10d61188e63afe1af5b8109e19bec8c59862
41b5f5b2c526b2da11568471d7101e965d4d0a30
refs/heads/main
2023-04-12T13:48:48.175023
2021-05-10T22:26:11
2021-05-10T22:26:11
360,986,672
0
0
null
null
null
null
UTF-8
R
false
false
1,186
rd
mc.calc.bca.Rd
\name{mc.calc.bca} \alias{mc.calc.bca} \title{Bias Corrected and Accelerated Resampling Confidence Interval} \usage{ mc.calc.bca(Xboot, Xjack, xhat, alpha) } \arguments{ \item{Xboot}{vector of point estimates for bootstrap samples. The i-th element contains point estimate of the i-th bootstrap sample.} \item{Xjack}{vector of point estimates for jackknife samples. The i-th element contains point estimate of the dataset without i-th observation.} \item{xhat}{point estimate for the complete data set (scalar).} \item{alpha}{numeric value specifying the 100(1-alpha)\% confidence level for the confidence interval (Default is 0.05).} } \value{ a list with elements \item{est}{point estimate for the complete data set (xhat).} \item{CI}{confidence interval for point estimate.} } \description{ Calculate resampling BCa confidence intervals for intercept, slope or bias given a vector of bootstrap and jackknife point estimates. } \references{ Carpenter, J., Bithell, J. (2000) Bootstrap confidence intervals: when, which, what? A practical guide for medical statisticians. \emph{Stat Med}, \bold{19 (9)}, 1141--1164. }
7816cc68b30197a4a27a521888e43095a9e6d738
cfe01977ef19f9f5ae8e39d7835cf979c9b67901
/man/default_col_pal.Rd
c8f934bb57b24710122885a0acb3ec4914bd24a7
[ "MIT" ]
permissive
InseeFr/disaggR
27aecbf65f4fc65e539e90720660902a561d7d0c
f6c88857c7beb0a4d5c990bb253ed050302ba8c3
refs/heads/master
2023-08-22T13:44:32.143164
2023-08-05T17:42:55
2023-08-05T17:42:55
238,296,894
15
4
NOASSERTION
2023-09-06T16:29:02
2020-02-04T20:14:08
R
UTF-8
R
false
true
361
rd
default_col_pal.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/plot.R \name{default_col_pal} \alias{default_col_pal} \title{Default color palette} \usage{ default_col_pal(object) } \description{ The default color palette for the graphics, inspired from the package \pkg{scales} whose scales can also be used as alternatives. } \keyword{internal}
c55aa2c5d221527f538bcac82ba29c13b3e988d8
277dbb992966a549176e2b7f526715574b421440
/R_training/실습제출/신현정/lab_08.R
06021ff08e122e33483a7e1dcaf3b1ea9bd60594
[]
no_license
BaeYS-marketing/R
58bc7f448d7486510218035a3e09d1dd562bca4b
03b500cb428eded36d7c65bd8b2ee3437a7f5ef1
refs/heads/master
2020-12-11T04:30:28.034460
2020-01-17T08:47:38
2020-01-17T08:47:38
227,819,378
0
0
null
2019-12-13T12:06:33
2019-12-13T10:56:18
C++
UTF-8
R
false
false
875
r
lab_08.R
#문제 1 mySum <-function(...) #(1) { data = c(...) oddSum= 0 evenSum = 0 print(class(data)) if(!is.numeric(data)){ return(NULL) }else{ for(i in data){ if(i %% 2 == 0) }else{ } } data = list(evenSum,) } return(data) } testwarn() <- function(x){ if(any(!NA)) } return(min) ; #(3) mySum<- function(x){ if(all(is.na(x))) return("NA를 최저값으로 변경하여 처리함!!") else (is.na(NULL)) return("NULL")} #문제 2 myExpr = function(x){ if(is.function(x)){ result = sample(1:45,6) cat(result,"\n") } else{ stop("수행 안할꺼임!!") } return(x(result)) } myExpr(max) # 함수 명을 넣어야함 #문제 4 d <- scan("data/iotest1.txt") sort(d) sort(d, decreasing = TRUE) sum(d) mean(d) #문제 5
be3613f16524981778810b28e252c501137175b9
ced0e8a0e6e8c40b64424b786356c34ef9c81a70
/Rcode_clinicalDataNew/rubins_rule.R
d02cf69423b323cf9dd39c25c2cfdf1423b84a3d
[ "MIT" ]
permissive
AnacletoLAB/DataAnalysisR
59bff492b3cc945aa42f01c1edf154323cd6df7e
8c09903be89a1199869f0408f35378df77a0ebdc
refs/heads/main
2023-03-22T09:37:57.271192
2021-03-17T18:46:06
2021-03-17T18:46:06
316,894,320
2
0
null
null
null
null
UTF-8
R
false
false
2,268
r
rubins_rule.R
rubin_rule_pool <- function(imp){ #https://thomasleeper.com/Rcourse/Tutorials/mi.html # imp is a list with num_imputations elements # each element is a dataframe/matrix where rows are the repetitions of the classifier and each column is # related to a different value (either performance value or importance of the feature) source(file.path('.', 'Utils_postprocess.R')) num_imputations = length(imp) # with the following function I get a matrix with # num_imputation columns - one for each imputation run. # each row is related to one of the values in the imp dataframe/matrix # (essentially, for each value I compute the mean over all the repetitions of the algorithm) mean_estimates <- sapply(imp, mean_on_columns) #Now I compute the mean over all the imputations grandm <- apply(mean_estimates,1,mean) grandm # now for each imputation, I compute the standard error of the classifiers' runs ses <- sapply(imp, var_on_columns) stderrs <- sapply(imp, se_on_columns) #To get the standard error of our multiple imputation estimate, #we need to combine the standard errors of each of our estimates, #so that estimates we need to start by getting the SEs of each imputed vector: #The within variance is the mean of the se for all the imputations within <- apply(ses,1, mean) within_se <- apply(stderrs,1, mean) #To calculate the between-imputation VARIANCE, #we calculate the sum of squared deviations of each imputed mean from the grand mean estimate: # FOR EACH IMPUTATION COMPUTINE THE SAMPLE VARIANCE between = within-within for (nv in 1:nrow(mean_estimates)) { between[nv] = sum((mean_estimates[nv,]-grandm[nv])^2)* (1/(num_imputations-1)) } between_se = sqrt(between)/sqrt(num_imputations) #Then we sum the within- and between-imputation variances (multiply the latter by a small correction): # cat(between, '\n') grandvar <- within + (1+1/num_imputations)*between grandse <- within_se + (1+1/num_imputations)*between_se return(list("mean" = grandm, "var" = grandvar, "se" = grandse, "between" = between, "within" = within)) } mean_on_cols <- function(df_mat){ return(apply(df_mat, 2, mean)) }
59429d3261b29a2211f5f8a8f5392c030594d822
ae782ac681b8e5bbfc68561bb06c0f7786cad1a0
/simulation_random_arrangement_successes_failures.R
4827ec4df8959c6ca081913ffd7ed560a40c4812
[]
no_license
SirRichter/R-code
dea56bd746b8ba8d087e53d244c07679eda2351c
719aca4ae5779f07463ae36a96e0887e9249db5d
refs/heads/master
2018-11-29T17:18:29.442930
2018-09-05T14:57:50
2018-09-05T14:57:50
117,899,338
0
0
null
null
null
null
UTF-8
R
false
false
566
r
simulation_random_arrangement_successes_failures.R
success <- 21 attempt <- 30 fail <- attempt - success streak <- function() { success.pos <- sort(sample(1:attempt, success)) fail.pos <- sort((1:attempt)[-success.pos]) streak <- 0 prev <- success.pos[1] for(i in success.pos[-1]) { current <- i if(prev == current-1) { streak <- streak + 1 } prev <- current } prev <- fail.pos[1] for(i in fail.pos[-1]) { current <- i if(prev == current-1) { streak <- streak + 1 } prev <- current } return(streak/attempt) }
74f45c8425f84efb41772de56bdb1de89320d82f
34a646deb8254171bd8e4882e3ea3c7e13fb63fb
/man/get_form_data.Rd
d97dc58a98a90401c7615b35788161dd84b26324
[ "MIT" ]
permissive
mattmalcher/lift.tracker
0e516bddb9570f1cdb4c83780f3e1c3c804291d2
bfcdce90eb2dc9f97e5ce9a1107a7286ad192679
refs/heads/main
2023-02-05T11:33:10.629001
2020-10-13T10:45:22
2020-10-13T10:45:22
303,187,016
0
0
null
null
null
null
UTF-8
R
false
true
383
rd
get_form_data.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/get_form_data.R \name{get_form_data} \alias{get_form_data} \title{Get Form Data} \usage{ get_form_data(form_url) } \arguments{ \item{form_url}{\itemize{ \item the share link for the sheet which the google form drops data into }} } \value{ a dataframe of lift breakage data } \description{ Get Form Data }
4f052962f0f61ec548127c2746febf28617ad29c
7e38971daf48e04414cb0c65facd38c98648142b
/R/cleanAQRQMJDaily.R
5e4b1c592daf3938205c61207456762fcef36ea6
[]
no_license
yn1/FFAQR
338800ec7209a10b33d79630ae2c082d7df1f22c
c173309a9415f9d83130650b8c7e4f5132fabdf4
refs/heads/master
2021-01-02T23:02:57.359761
2015-02-06T21:49:02
2015-02-06T21:49:02
29,973,189
0
1
null
null
null
null
UTF-8
R
false
false
740
r
cleanAQRQMJDaily.R
#' Reads in, cleans, and subdivides daily QMJ data set in data folder. #' Currently disabled, Java heap runs out of memory when reading xlsx. cleanAQRQMJDaily <- function() { temp <- tempfile() QMJDaily <- "https://www.aqr.com/~/media/files/data-sets/quality-minus-junk-factors-daily.xlsx" download.file(QMJDaily, temp, method = "curl") # Imports QMJ data AQRQMJFactorsDaily <- read.xlsx(temp, "QMJ Factors", startRow=19, colIndex=c(1:30)) row.names(AQRQMJFactorsDaily) <- NULL names(AQRQMJFactorsDaily)[1] <- "Date" AQRQMJFactorsDaily[,1] <- ymd(AQRQMJFactorsDaily[,1]) unlink(temp) start <- system.file(package="FFAQR") save(AQRQMJFactorsDaily, file=paste0(start, "/data/AQRQMJFactorsDaily.Rdata")) }
3f64c49c2fa4bc456019de2b8bce019d975e3a28
9916af82f94b822f233475296447adb486db56c1
/R/cSimulator.R
156762e26efc86010ef6f24e908f1aeeb4c01807
[]
no_license
saezlab/CellNOptR
865bc6cf866e00faf8718f1f61388fde7fbb58b8
4660813a35227bab86359d51f9f61a9e3deb0298
refs/heads/master
2022-05-20T06:58:25.273631
2022-05-11T08:59:33
2022-05-11T08:59:33
116,661,047
8
2
null
2022-03-21T13:28:13
2018-01-08T10:12:00
R
UTF-8
R
false
false
2,343
r
cSimulator.R
# # This file is part of the CNO software # # Copyright (c) 2011-2012 - EMBL - European Bioinformatics Institute # # File author(s): CNO developers (cno-dev@ebi.ac.uk) # # Distributed under the GPLv3 License. # See accompanying file LICENSE.txt or copy at # http://www.gnu.org/licenses/gpl-3.0.html # # CNO website: http://www.cellnopt.org # ############################################################################## # $Id$ cSimulator <- function(CNOlist, model, simList, indexList, mode=1) { if (!is(CNOlist,"CNOlist")){ CNOlist = CellNOptR::CNOlist(CNOlist) } # check the structures if(is.null(CNOlist@stimuli) || is.null(CNOlist@inhibitors)) { stop("This function needs 'valueStimuli' and 'valueInhibitors' in CNOlist") } if(is.null(model$reacID) || is.null(model$namesSpecies)) { stop("This function needs 'reacID' and 'namesSpecies' in model") } # variables nStimuli <- as.integer(length(indexList$stimulated)) nInhibitors <- as.integer(length(indexList$inhibited)) nCond <- as.integer(dim(CNOlist@stimuli)[1]) nReacs <- as.integer(length(model$reacID)) nSpecies <- as.integer(length(model$namesSpecies)) nMaxInputs <- as.integer(dim(simList$finalCube)[2]) # simList # used to be # >>> finalCube = as.integer(as.vector(t(simList$finalCube))-1) # but as.vector(t is slow and can be replaced by just as.integer albeit # appropriate C modifications finalCube = as.integer(simList$finalCube-1) ixNeg = as.integer(simList$ixNeg) ignoreCube = as.integer(simList$ignoreCube) maxIx = as.integer(simList$maxIx-1) # index indexSignals <- as.integer(indexList$signals-1) indexStimuli <- as.integer(indexList$stimulated-1) indexInhibitors <- as.integer(indexList$inhibited-1) nSignals <- length(indexSignals) # cnolist valueInhibitors <- as.integer(CNOlist@inhibitors) valueStimuli <- as.integer(CNOlist@stimuli) res = .Call("simulatorT1", # variables nStimuli, nInhibitors, nCond, nReacs, nSpecies, nSignals, nMaxInputs, # simList finalCube, ixNeg, ignoreCube, maxIx, # index indexSignals, indexStimuli, indexInhibitors, # cnolist valueInhibitors, valueStimuli, as.integer(mode) ) # should not be cut because it is used in simulateTN as an input # res = res[,indexList$signals] return(res) }
9a307cb1df0b403439627a169cdb1784f9f5ca60
6f7ae9c734fda6fbd7338ce1af3945c65f609088
/02_create_main_analysis_datasets/04_compute_market_access/03a_woreda_traveltime_dataset.R
7ff8f68983a038cf367f5f1ee657ad25ea5382d4
[]
no_license
mohammed-seid/Ethiopia-Corridors-IE
d9b16014f866d529f39720f434a5f5dda433f9eb
401836d71d60a81faded25a18c19a5f14e082701
refs/heads/master
2022-11-09T01:23:41.471170
2020-06-15T22:00:30
2020-06-15T22:00:30
null
0
0
null
null
null
null
UTF-8
R
false
false
6,703
r
03a_woreda_traveltime_dataset.R
# Travel Time #source("~/Documents/Github/Ethiopia-Corridors-IE/Code/_ethiopia_ie_master.R") SEP_ROAD_SHAPEFILES <- T # Use separate road shapefiles RESOLUTION_KM <- 3 WALKING_SPEED <- 5 for(SEP_ROAD_SHAPEFILES in c(TRUE, FALSE)){ # Load Data -------------------------------------------------------------------- woreda_wgs84 <- readRDS(file.path(finaldata_file_path, DATASET_TYPE, "individual_datasets", "points.Rds")) gpw <- raster(file.path(rawdata_file_path, "gpw-v4-population-density-2000", "gpw-v4-population-density_2000.tif")) gpw <- gpw %>% crop(woreda_wgs84) # Location with largest population with woreda --------------------------------- woreda_points <- lapply(1:nrow(woreda_wgs84), function(i){ print(i) gpw_i <- gpw %>% crop(woreda_wgs84[i,]) %>% mask(woreda_wgs84[i,]) df <- gpw_i %>% coordinates() %>% as.data.frame() df$pop <- gpw_i[] loc_df <- df[which.max(df$pop),] %>% dplyr::select(x,y) if(nrow(loc_df) %in% 0){ loc_df <- coordinates(woreda_wgs84[i,]) %>% as.data.frame() %>% dplyr::rename(x= V1, y= V2) } return(loc_df) }) %>% bind_rows() woreda_points$uid <- woreda_wgs84$uid coordinates(woreda_points) <- ~x+y crs(woreda_points) <- CRS("+proj=longlat +datum=WGS84 +no_defs +ellps=WGS84 +towgs84=0,0,0") # Reproject to Ethiopia Projection --------------------------------------------- # Reproject to UTM. Better for distance calculations (eg, for setting grid cell size) woreda_points <- spTransform(woreda_points, UTM_ETH) woreda <- spTransform(woreda_wgs84, UTM_ETH) # Crete Raster BaseLayer ------------------------------------------------------- r <- raster(xmn=woreda@bbox[1,1], xmx=woreda@bbox[1,2], ymn=woreda@bbox[2,1], ymx=woreda@bbox[2,2], crs=UTM_ETH, resolution = RESOLUTION_KM*1000) # Function for Travel Times ---------------------------------------------------- calc_travel_time <- function(year, woreda_points, SEP_ROAD_SHAPEFILES){ # If SEP_ROAD_SHAPEFILES=T, then "roads" is ignored, as loads roads within # the function. print(paste(year, "--------------------------------------------------------")) #### Load/Prep Roads if(SEP_ROAD_SHAPEFILES){ # If road isn't even (ie, odd), use previous year if((year %% 2) %in% 0){ year_road <- year } else{ year_road <- year - 1 } # Load Roads roads <- readOGR(dsn = file.path(project_file_path, "Data", "RawData", "RoadNetworkPanelDataV3_1996_2016_Revised"), layer = paste0("All_Network_", year_road)) if("Speed2006a" %in% names(roads)) roads$Speed2006 <- roads$Speed2006a } else{ roads <- readRDS(file.path(project_file_path, "Data", "FinalData", "roads", "RoadNetworkPanelData_1996_2016.Rds")) year_road <- year } roads <- spTransform(roads, UTM_ETH) speed_var <- paste0("Speed", year_road) roads$SpeedYYYY <- roads[[speed_var]] roads$SpeedYYYY[roads$SpeedYYYY %in% 0] <- WALKING_SPEED #### Sort by Speed # If multiple polylines interesect with a cell, velox uses the last polygon from # the spatial polygons dataframe. Consequently, we sort by speeds from slowest to # fastest so that velox uses the fastest speed. roads <- roads[order(roads$SpeedYYYY),] #### Rasterize roads_r <- r roads_r[] <- 0 roads_r_vx <- velox(roads_r) roads_r_vx$rasterize(roads, field="SpeedYYYY", background=WALKING_SPEED) # background should be walking speed (5km/hr); https://en.wikipedia.org/wiki/Preferred_walking_speed roads_r <- roads_r_vx$as.RasterLayer() #### Make Transition Layer # Roads is currently speed; calculate how long it takes to move across cell Now, values are the number # of hours it takes to cross the cell. roads_r[] <- RESOLUTION_KM/roads_r[] cost_t <- transition(roads_r, function(x) 1/mean(x), directions=8) #cost_t <- geoCorrection(cost_t, type="c") #### Calculate Travel Time for Each Location tt_df <- lapply(1:nrow(woreda_points), function(i){ if((i %% 10) %in% 0) print(i) tt <- costDistance(cost_t, woreda_points[i,], woreda_points) %>% as.numeric() tt <- tt * RESOLUTION_KM # to get more accurate travel time???? TODO #### TESTING #tt <- costDistance(cost_t, # woreda_points[1,], # woreda_points[100,]) %>% as.numeric() #tt1 <- shortestPath(cost_t, # woreda_points[1,], # woreda_points[100,], # output = "SpatialLines") #coordinates(woreda_points[1,] %>% spTransform(CRS("+init=epsg:4326"))) %>% rev() #coordinates(woreda_points[100,] %>% spTransform(CRS("+init=epsg:4326"))) %>% rev() #plot(tt1) #plot(roads_r,add=T) #plot(tt1,add=T) df_out <- data.frame(dest_uid = woreda_points$uid, travel_time = tt) df_out$orig_uid <- woreda_points$uid[i] return(df_out) }) %>% bind_rows tt_df$year <- year return(tt_df) } location_traveltimes <- lapply(1996:2016, calc_travel_time, woreda_points, SEP_ROAD_SHAPEFILES) %>% bind_rows() %>% as.data.table() # Calculate Linear Distance ---------------------------------------------------- distance_df <- lapply(1:nrow(woreda_points), function(i){ if((i %% 100) %in% 0) print(i) distance <- gDistance(woreda_points[i,], woreda_points, byid=T) %>% as.vector() df_out <- data.frame(dest_uid = woreda_points$uid, distance = distance) df_out$orig_uid <- woreda_points$uid[i] return(df_out) }) %>% bind_rows %>% as.data.table() location_traveltimes <- merge(location_traveltimes, distance_df, by=c("orig_uid", "dest_uid")) # Export ----------------------------------------------------------------------- if(SEP_ROAD_SHAPEFILES){ out_add <- "_rdsep" } else{ out_add <- "" } saveRDS(location_traveltimes, file.path(finaldata_file_path, DATASET_TYPE, "individual_datasets", paste0("woreda_traveltimes_distances",out_add,".Rds"))) }
18f23d87f61b709e03d499d50bb1aab16a58dbd8
d16d3a64c5707acf1faa5ab5db1ddf50f5bc48d6
/ACE2_analysis/3_dseq2.R
408dc70f892fbaaf3d56a6f5c0eb22fc6e2c2730
[]
no_license
takeonaito/rnaseq
80782d7e0366f50021b4bdef2469bfa21f871de1
39ce4cc8d92d1d9384ce46987b1bf3dadc43548e
refs/heads/master
2021-03-04T22:41:43.103095
2020-04-08T22:44:18
2020-04-08T22:44:18
246,072,279
0
0
null
null
null
null
UTF-8
R
false
false
4,947
r
3_dseq2.R
library(ggrepel) library(DESeq2) library(BiocParallel) library(tidyverse) library(readr) library(data.table) library(readxl) library(AnnotationDbi) library(org.Hs.eg.db) library(ggbeeswarm) # read necessary files (count, sample and serology) count <- read_tsv("/home/takeo/rnaseq/WashU/data/all.gene_counts.xls") sample <- read_xlsx("/home/takeo/rnaseq/WashU/data/WashU_BMI_RNAseq_IDlink.xlsx") sample$Genetic_ID <- str_replace(sample$Genetic_ID,"10-0441/10-1045","10-0441") serology<- read_tsv("/home/takeo/rnaseq/WashU/data/serology_updated_dalin.txt") # read necessary files (disease type and phenotype) location <- read_tsv("/home/takeo/rnaseq/WashU/data/cd_clean.txt") disease <- read_xls("/home/takeo/rnaseq/WashU/data/Copy of Genetics 01_02_2019.xls") colnames(disease) <- make.names(colnames(disease)) # merge disease type and phenotype data to target file target <- sample %>% dplyr::select(Genetic_ID,RNAseq_ID) # exclude non CD and caucasian target <- target %>% left_join(disease,by = c("Genetic_ID" = "Genetic.ID")) %>% dplyr::filter(Race == "Caucasian") target1 <- target %>% left_join(location,by = c("Genetic_ID" = "genetic_id")) target1$RNAseq_ID <- str_replace(target1$RNAseq_ID,"-","_") # make RNAseq_ID in sample file match to count files. target1$RNAseq_ID <- paste0("sample.",target1$RNAseq_ID) %>% base::tolower() # confirm complete match table( target1$RNAseq_ID %in% colnames(count)) # make sampleTable which contain serology and Genetic ID serology1 <- serology %>% drop_na(Genetic.ID) sampleTable <- target1 %>% inner_join(serology1,by = c("Genetic_ID" = "Genetic.ID")) ## there is a duplication in cc -89( genetcid = 97-0329) --> ask dalin. sampleTable <- sampleTable[-73,] # I excluded old data of 97-0329 # omit subjects whose serology are NA sampleTable$hensuu <- as.numeric(sampleTable$Age.at.Collection) sampleTable$hensuu <- as.factor(sampleTable$Gender) sampleTable1 <- sampleTable %>% drop_na(hensuu) %>% dplyr::select(-Genetic_ID) # extract count data whose serology data are available kouho <- sampleTable1$RNAseq_ID count1 <- count %>% dplyr::select(ensembl_gene_id,kouho) %>% as.data.frame() row.names(count1) <- count1$ensembl_gene_id count1 <- count1[,-1] # confirm sample order match identical(colnames(count1),sampleTable1$RNAseq_ID) # make dds object for DESeq2 dds <- DESeqDataSetFromMatrix(countData = count1, colData = sampleTable1, design = ~hensuu ) keep <- rowSums(counts(dds)>10) > 10 dds <- dds[ keep, ] nrow(dds) # do variance stabilizing transformation (VST) for visualization of data. vsd <- vst(dds, blind = FALSE) # make PCA for visualizing data set. plotPCA(vsd, intgroup = c("hensuu")) pcaData <- plotPCA(vsd, intgroup = c("hensuu"), returnData = TRUE) pcaData$order <- c(1:dim(vsd)[2]) p <- ggplot(pcaData, aes(x = PC1, y = PC2, color = hensuu)) + geom_point(size =3) p + geom_text_repel(data = pcaData,aes(label = order)) # exclude outliner CC_89 exclude <- which(row.names(colData(dds)) == "sample.cc_89") dds <- dds[,-exclude] # do variance stabilizing transformation (VST) for visualization of data after exclusion vsd <- vst(dds, blind = FALSE) # make PCA for visualizing data set after exclude outliner. plotPCA(vsd, intgroup = c("hensuu")) # do analysis of DEG --> this will take a few minutes register(MulticoreParam(workers = 10)) dds1 <- DESeq(dds,parallel = TRUE,minReplicatesForReplace = Inf) res <- results(dds1,alpha = 0.05,contrast=c("hensuu","F","M")) res <- results(dds1,alpha = 0.05) res$ensemble = row.names(res) res$symbol <- mapIds(org.Hs.eg.db, keys=row.names(res), column="SYMBOL", keytype="ENSEMBL", multiVals="first") res$entrez <- mapIds(org.Hs.eg.db, keys=row.names(res), column="ENTREZID", keytype="ENSEMBL", multiVals="first") res %>% data.frame(res) %>% filter(ensemble == 'ENSG00000130234') # subset only significant genes resSig <- subset(res, padj < 0.05) resSig <- subset(resSig,abs(log2FoldChange) > 1 ) resSig <- resSig[order(resSig$pvalue),] resSig # check the cooks distance of resSig kouho <- which(row.names(res) %in% row.names(resSig)) round(apply(assays(dds1)[["cooks"]][kouho,],1,max),2) # make plot of top hit gene topGene <- rownames(res)[which.min(res$padj)] geneCounts <- plotCounts(dds1, gene =topGene, intgroup = c("hensuu"), returnData = T,normalized = T) geneCounts <- plotCounts(dds1, gene ="ENSG00000130234", intgroup = c("hensuu"), returnData = T,normalized = T) ggplot(geneCounts, aes(x = hensuu, y = count,color = hensuu)) + scale_y_log10() + geom_beeswarm(cex = 3) ggplot(geneCounts,aes(x = hensuu, y = count)) +scale_y_log10() + geom_point() mode(geneCounts)
8f3d23f9001aeb68a94a6dd0d29f817664816702
3a882c3eb6867a5ce5081747c9c538aec0d08705
/man/read.txt.Renishaw.Rd
695b3b26fabb58f39be21b934421849e54fc4e2d
[]
no_license
cran/hyperSpec
02c327c0ea66014936de3af2cb188e9e30a4e6f7
4fc1e239f548e98f3a295e0521a2f99a5b84316d
refs/heads/master
2021-09-22T07:57:28.497828
2021-09-13T12:00:02
2021-09-13T12:00:02
17,696,713
3
10
null
2016-10-31T16:36:46
2014-03-13T05:00:53
R
UTF-8
R
false
true
2,540
rd
read.txt.Renishaw.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/deprecated.R, R/read.txt.Renishaw.R \name{scan.txt.Renishaw} \alias{scan.txt.Renishaw} \alias{scan.zip.Renishaw} \alias{read.txt.Renishaw} \alias{read.zip.Renishaw} \title{import Raman measurements from Renishaw ASCII-files} \usage{ scan.txt.Renishaw(...) scan.zip.Renishaw(...) read.txt.Renishaw( file = stop("file is required"), data = "xyspc", nlines = 0, nspc = NULL ) read.zip.Renishaw( file = stop("filename is required"), txt.file = sub("[.]zip", ".txt", basename(file)), ... ) } \arguments{ \item{...}{Arguments for \code{read.txt.Renishaw}} \item{file}{file name or connection} \item{data}{type of file, one of "spc", "xyspc", "zspc", "depth", "ts", see details.} \item{nlines}{number of lines to read in each chunk, if 0 or less read whole file at once. \code{nlines} must cover at least one complete spectrum,i.e. \code{nlines} must be at least the number of data points per spectrum. Reasonable values start at \code{1e6}.} \item{nspc}{number of spectra in the file} \item{txt.file}{name of the .txt file in the .zip archive. Defaults to zip file's name with suffix .txt instead of .zip} } \value{ the \code{hyperSpec} object } \description{ import Raman measurements from Renishaw (possibly compressed) .txt file. } \details{ The file may be of any file type that can be read by \code{\link[base]{gzfile}} (i.e. text, or zipped by gzip, bzip2, xz or lzma). .zip zipped files need to be read using \code{read.zip.Renishaw}. Renishaw .wxd files are converted to .txt ASCII files by their batch converter. They come in a "long" format with columns (y x | time | z)? wavelength intensity. The first columns depend on the data type. The corresponding possibilities for the \code{data} argument are: \tabular{lll}{ \code{data} \tab columns \tab \cr \code{"spc"} \tab wl int \tab single spectrum \cr \code{"zspc"}, \code{"depth"} \tab z wl int \tab depth profile\cr \code{"ts"} \tab t wl int \tab time series\cr \code{"xyspc"} \tab y x wl int \tab 2d map\cr } This function allows reading very large ASCII files, but it does not work on files with missing values (\code{NA}s are allowed). If the file is so large that it sould be read in chunks and \code{nspc} is not given, \code{read.txt.Renishaw} tries to guess it by using \code{wc} (if installed). } \seealso{ \code{\link{read.txt.long}}, \code{\link{read.txt.wide}}, \code{\link[base]{scan}} } \author{ C. Beleites } \keyword{IO} \keyword{file} \keyword{internal}
cde8a7b882eb3dd2b5ef1823cdcd53a07a3a8171
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
/data/genthat_extracted_code/chebpol/examples/chebcoef.Rd.R
1d240bfeacc06be555c83c69777040cc3549d436
[]
no_license
surayaaramli/typeRrh
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
66e6996f31961bc8b9aafe1a6a6098327b66bf71
refs/heads/master
2023-05-05T04:05:31.617869
2019-04-25T22:10:06
2019-04-25T22:10:06
null
0
0
null
null
null
null
UTF-8
R
false
false
238
r
chebcoef.Rd.R
library(chebpol) ### Name: chebcoef ### Title: Compute Chebyshev-coefficients given values on a Chebyshev grid ### Aliases: chebcoef ### ** Examples ## Coefficients for a 2x3x4 grid a <- array(rnorm(24),dim=c(2,3,4)) chebcoef(a)
eb7e2ffbb6211b7dea202b78068e3676e7069fbb
0ce3453dd3ea67d3d162486b1b78427a41d163d4
/man/sea_ice_area.Rd
e4485107590970e368a3886c02c065e321d41ba2
[ "MIT" ]
permissive
coolbutuseless/emphatic
0d79045a4893285d0cb5028a60c5ec772457e88a
32488dc0a91b7b461c6f3c592fdd1d2c1123b7dd
refs/heads/main
2023-09-01T12:58:08.954192
2023-08-30T05:59:10
2023-08-30T05:59:10
308,440,058
100
3
MIT
2020-12-17T22:53:18
2020-10-29T20:17:47
R
UTF-8
R
false
true
492
rd
sea_ice_area.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data-sets.R \docType{data} \name{sea_ice_area} \alias{sea_ice_area} \title{Monthly Southern Sea Ice Area over the last 40 years} \format{ Matrix of sea ice area, monthly from 1978 to 2020. } \source{ \url{ftp://sidads.colorado.edu/DATASETS/NOAA/G02135/south/monthly/data/} } \usage{ sea_ice_area } \description{ From the 'National Snow and Ice Data Center' \url{https://nsidc.org/data/g02135} } \keyword{datasets}
7a99d08ebe73bfb859ac32e51aa2763db81adcf5
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
/data/genthat_extracted_code/BIEN/examples/BIEN_metadata_citation.Rd.R
ba11ed8c8b0aa8648fd043c77a12ab84f8b810e8
[]
no_license
surayaaramli/typeRrh
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
66e6996f31961bc8b9aafe1a6a6098327b66bf71
refs/heads/master
2023-05-05T04:05:31.617869
2019-04-25T22:10:06
2019-04-25T22:10:06
null
0
0
null
null
null
null
UTF-8
R
false
false
444
r
BIEN_metadata_citation.Rd.R
library(BIEN) ### Name: BIEN_metadata_citation ### Title: Generate citations for data extracted from BIEN. ### Aliases: BIEN_metadata_citation ### ** Examples ## Not run: ##D BIEN_metadata_citation()#If you are referencing the phylogeny or range maps. ##D Xanthium_data<-BIEN_occurrence_species("Xanthium strumarium") ##D citations<-BIEN_metadata_citation(dataframe=Xanthium_data)#If you are referencing occurrence data ## End(Not run)
2047a5561a3c7c7d51847fd4c2bcf2b0d8772260
6e8d099d91bc467c36a8a5e3b609de0b44380603
/R/setTypes.R
f7f9ee829795f1d48b0bcf34e57c92eb8d3c60c4
[]
no_license
Peder2911/Unfed_Gnostic
4fb8bcb46880c978cd913d4c44a0b381cb2e8edf
aac66c233195a02447474d1c4f970a62680c250e
refs/heads/master
2020-04-04T03:47:02.053555
2018-11-01T14:15:30
2018-11-01T14:15:30
155,725,757
0
0
null
null
null
null
UTF-8
R
false
false
579
r
setTypes.R
#' Set Types By Vector #' #' Set the data types of a data frame using a character vector. #' The vector can be created by sapply(data,class) #' @param df A data frame #' @param types A character vector of length ncol(df) #' @keywords types metaprogramming #' @export #' @importFrom magrittr "%>%" #' @examples #' setTypes(mtcars,rep('character',11)) setTypes <- function(df,types){ expressions <- sapply(types,function(x){ paste('as.',x,sep='')%>% parse(text = .) }) i = 1 for(e in expressions){ df[[i]] <- eval(e)(df[[i]]) i <- i + 1 } df }
682d60245ed92ad6f2d010f4593f68d795faa212
bfe324beb0c335272362e7514938a82c08a9cc40
/tests/testthat/test_calc_water_tax.R
d0c2e70b3b14a8f16b9ac76b9532d4b74201919e
[]
no_license
jkmiller-wildlife/PrecipPackage
79157b7d394835f34a2999bdbaf380c74b09a7d0
070035c67dbdb496716c60e5d598b8315b7a4e3a
refs/heads/master
2020-06-01T12:07:27.458399
2019-06-16T03:43:21
2019-06-16T03:43:21
190,774,172
0
0
null
null
null
null
UTF-8
R
false
false
180
r
test_calc_water_tax.R
test_that( "Tax is positive and higher than zero", { data(monthly_precip) expect_true( calc_water_tax(monthly_precip, 100, "SANTA BARBARA", "OCT", 2012) > 0 ) } )
66a640ef620d17507b83d958e8b4f676f8b2f82d
7db5e131633d086c7a30675857c114615a9efe6f
/man/reverseList.Rd
df3a5b2428de4f4bb8ff567fc776a6045c3f4911
[]
no_license
guokai8/rcellmarker
969b13d4af8019fda574748064af4d72b77e58e6
32d3a12a4b84310a36f3eb645c7e364909257bd9
refs/heads/master
2022-05-04T00:48:55.315592
2022-03-30T17:27:52
2022-03-30T17:27:52
244,527,840
9
2
null
null
null
null
UTF-8
R
false
true
262
rd
reverseList.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/misc.R \name{reverseList} \alias{reverseList} \title{reverse List} \usage{ reverseList(lhs) } \arguments{ \item{lhs}{list with names} } \description{ reverse List } \author{ Kai Guo }
23f3d400f67be1d0e7d8141b9d82f32d179ed289
1ff45a674ca54329a98451899d834a094e70b115
/asian-pacific-heritage/process-acs-data-not-used-yet.R
6226b7f5e5da562dccfd4d138221263cc89ee796
[]
no_license
psrc/equity-data-tools
7642185d3c1d70e2ab71df489a9271c87356965c
d0bea587ec63731468c52895146fa3d7346ae321
refs/heads/main
2023-05-05T23:55:47.299847
2021-05-18T22:12:56
2021-05-18T22:12:56
359,532,801
1
0
null
null
null
null
UTF-8
R
false
false
6,761
r
process-acs-data-not-used-yet.R
# Inputs ------------------------------------------------------------------ library(tidycensus) library(tidyverse) Sys.setenv(CENSUS_KEY='c4780eb03010d73b7ae4e6894c1592375e545a21') census_api_key('c4780eb03010d73b7ae4e6894c1592375e545a21') acs <- "acs1" yrs <- c(seq(2010,2019,1)) psrc.county <- c("53033","53035","53053","53061") psrc.msa <- c("14740","42660") pop.tbl <- "B03002" pop.vars <- c("001","003","004","005","006","007","008","009","012") inc.tbl <- "S1903" inc.vars <- c("001","003","004","005","006","007","008","009","010") edu.tbl <- "S1501" edu.vars <- c("031","032","033","034","035","036","037","038","039", "040","041","042","043","044","045","046","047","048","049", "050","051","052","053","054") ownership <- "S2502" # Population --------------------------------------------------------- acs.population <- NULL census.tbl <- pop.tbl keep.vars <- paste0(pop.tbl,"_",pop.vars) total.var <- paste0(pop.tbl,"_001") # Download the list of variables from the latest data year variable.labels <- load_variables(max(yrs), acs, cache = TRUE) %>% rename(variable = name) for (c.yr in yrs) { # Download County Level Data census.download <- get_acs(geography = "county", state="53", year=c.yr, survey = acs, table = census.tbl) %>% mutate(NAME = gsub(", Washington", "", NAME)) %>% filter(GEOID %in% psrc.county, variable %in% keep.vars) # Get a region total from the county data temp <- census.download %>% select(variable, estimate, moe) %>% group_by(variable) %>% summarize(sumest = sum(estimate), summoe = moe_sum(moe, estimate)) %>% rename(estimate=sumest, moe=summoe) %>% mutate(GEOID="53033035053061", NAME="Region", year=c.yr) # Calculate Total population by geography totals <- temp %>% filter(variable==total.var) %>% select(NAME,estimate) %>% rename(total=estimate) # Add totals and calculate share of total by race temp <- left_join(temp, totals, by=c("NAME")) %>% mutate(share=estimate/total) %>% select(-total) # Combine with other data years if (is.null(acs.population)) {acs.population <- temp} else {acs.population <- bind_rows(list(acs.population, temp))} rm(census.download,temp,totals) } # Add labels from the latest census data year downloaded and clean up labels acs.population <- left_join(acs.population,variable.labels,by=c("variable")) %>% mutate(concept="Population by Race", label = str_extract(label, "(?<=!!)[^!!]*$"), label = gsub(" alone", "", label), label = gsub(":", "", label)) %>% rename(race=label) %>% mutate(category="Population") %>% mutate(race=gsub("White","White, not Hispanic or Latino",race)) # Median Income ------------------------------------------------------------------ acs.income <- NULL census.tbl <- inc.tbl total.var <- paste0(inc.tbl,"_C03_001") # Download the list of variables from the latest data year variable.labels <- load_variables(max(yrs), paste0(acs,"/subject"), cache = TRUE) %>% rename(variable = name) for (c.yr in yrs) { # Variables for income table changed in 2017 so make the keep lsit consistent depending on year if (c.yr <2017) { keep.vars <- paste0(inc.tbl,"_C02_",inc.vars) } else { keep.vars <- paste0(inc.tbl,"_C03_",inc.vars) } # Download Census Data by MSA for Median Income since we can't combine counties census.download <- get_acs(geography = "metropolitan statistical area/micropolitan statistical area", year=c.yr, survey = acs, table = census.tbl) %>% mutate(NAME = gsub(", WA Metro Area", " MSA", NAME)) %>% filter(GEOID %in% psrc.msa, variable %in% keep.vars) # Variable names changed in 2017 so adjust pre-2017 variable names to match so the labels align correctly if (c.yr <2017) { census.download <- census.download %>% mutate(variable = gsub("C02","C03",variable)) } # Calculate Total population by geography totals <- census.download %>% filter(variable==total.var) %>% select(NAME,estimate) %>% rename(total=estimate) # Add totals and calculate share of total by race temp <- left_join(census.download, totals, by=c("NAME")) %>% mutate(share=estimate/total) %>% select(-total) # Combine with other data years if (is.null(acs.income)) {acs.income <- temp} else {acs.income <- bind_rows(list(acs.income, temp))} rm(census.download,temp,totals) } # Add labels from the latest census data year downloaded and clean up labels acs.income <- left_join(acs.income,variable.labels,by=c("variable")) %>% mutate(year=c.yr, concept="Median Income by Race") %>% mutate(race=label) %>% mutate(race = str_extract(race, "(?<=!!)[^!!]*$"), race = gsub("Households","Total",race), label="Median Income") %>% rename(category=label) # Education --------------------------------------------------------- acs.education <- NULL census.tbl <- edu.tbl keep.vars <- paste0(edu.tbl,"_C01_",edu.vars) # Download the list of variables from the latest data year variable.labels <- load_variables(max(yrs), paste0(acs,"/subject"), cache = TRUE) %>% rename(variable = name) for (c.yr in yrs) { # Download County Level Data census.download <- get_acs(geography = "county", state="53", year=c.yr, survey = acs, table = census.tbl) %>% mutate(NAME = gsub(", Washington", "", NAME)) %>% filter(GEOID %in% psrc.county, variable %in% keep.vars) # Get a region total from the county data temp <- census.download %>% select(variable, estimate, moe) %>% group_by(variable) %>% summarize(sumest = sum(estimate), summoe = moe_sum(moe, estimate)) %>% rename(estimate=sumest, moe=summoe) %>% mutate(GEOID="53033035053061", NAME="Region", year=c.yr) # Add Labels temp <- left_join(temp,variable.labels,by=c("variable")) # Calculate Total population by geography totals <- temp %>% filter(!grepl("Bachelor's", label),!grepl("High school", label)) %>% select(NAME,estimate) %>% rename(total=estimate) # Add totals and calculate share of total by race temp <- left_join(temp, totals, by=c("NAME")) %>% mutate(share=estimate/total) %>% select(-total) # Combine with other data years if (is.null(acs.population)) {acs.population <- temp} else {acs.population <- bind_rows(list(acs.population, temp))} rm(census.download,temp,totals) } # Add labels from the latest census data year downloaded and clean up labels acs.population <- left_join(acs.population,variable.labels,by=c("variable")) %>% mutate(concept="Population by Race", label = str_extract(label, "(?<=!!)[^!!]*$"), label = gsub(" alone", "", label), label = gsub(":", "", label)) %>% rename(race=label) %>% mutate(category="Population") %>% mutate(race=gsub("White","White, not Hispanic or Latino",race))
6f208d4aba17a090405c66c02b05c936b2896949
a330829b1c70080a8a3b221b093e5dccfa71af90
/SDM_Project_HillsboroughCountyHomes.R
11ea2c492bdceead2d0fa8260e66944449127454
[]
no_license
erichmccartney/SDM_Project_HillsboroughCountyRealestate
917c8954789877a7c1259b292ddec48cb4b4098a
3cbd36ac50e3c3fe86034255c50c2933eb060b84
refs/heads/main
2023-08-28T17:40:52.587923
2021-10-24T13:31:42
2021-10-24T13:31:42
363,657,217
0
0
null
2021-05-10T00:43:51
2021-05-02T13:26:38
R
UTF-8
R
false
false
9,114
r
SDM_Project_HillsboroughCountyHomes.R
#' SDM Project: Hillsborough County Real Estate install.packages("rio") install.packages("moments") install.packages("car") install.packages("readxl") install.packages("corrplot") install.packages("reshape2") rm(list=ls()) library(rio) library(readxl) library(lattice) library(dplyr) library(ggplot2) library(corrplot) library(readxl) library(openxlsx) library(lubridate) library(reshape2) library(stargazer) library(lme4) library(survival) library(PerformanceAnalytics) setwd("~/GitHub/SDM_Project_HillsboroughCountyRealestate") df <- read.csv("HillsboroughCountyData.csv") str(df) View(df) #Feature engineering df$BuildingAge = 2021 - df$YearBuilt df$PricePerHeatedArea = df$JustValue/df$TotalHeatedAreaSqFt df$HeatedAreaProportion = df$TotalHeatedAreaSqFt/(df$Acreage*43560) df$LastSaleDate = as.Date(df$LastSaleDate, format = "%m/%d/%y" ) df$LengthOwnershipProportion = df$YearsSinceTurnover/df$BuildingAge #' Data visualizations hist(df$YearsSinceTurnover) hist(log(df$YearsSinceTurnover)) # Misleading histogram: has different varieties #DensityPlot densityplot(~YearsSinceTurnover | Avg_GradePoint2019, data=df) #Linear Regression #Summary: linearMod <- lm(YearsSinceTurnover ~ Avg_GradePoint2019 + Avg_GradePoint2018 + Avg_GradePoint2017, data=df) print(linearMod) #' OLS model (pooled) ols1 <- lm(YearsSinceTurnover ~ PropertyType*Neighborhood, data=df) summary(ols1) ols2 <- lm(YearsSinceTurnover ~ Avg_GradePoint2019 + Avg_GradePoint2018 + Avg_GradePoint2017*LastSalePrice, data=df) summary(ols2) # Fixed Effects Model fe1 <- lm(YearsSinceTurnover ~ Avg_GradePoint2019*LastSalePrice + Avg_GradePoint2019*SchoolZipCodeGroup + as.factor(Neighborhood), data=df) summary(fe1) confint(fe2) fe2 <- lm(LastSalePrice ~ PropertyType*Neighborhood + SiteCity, data=df) summary(fe2) confint(fe2) options(max.print = 60000) stargazer(linearMod, ols1, ols2, fe1, fe2, type="text", single.row=TRUE) # Random Effects Model re <- lmer(LastSalePrice ~ Neighborhood*PropertyType + (1 | SiteZip), data=df, REML=FALSE) summary(re) confint(re) AIC(re) fixef(re) # Magnitude of fixed effects ranef(re) # Magnitude of random effects coef(re) # Magnitude of total effects ggplot(df, aes(x=LastSalePrice, y = PropertyType)) + geom_bar(stat = "Identity", width = 0.10) ggplot(df, aes(x= LastSalePrice, y = PropertyType, fill = Neighborhood)) + geom_bar(stat = "Identity") stargazer(ols1, ols2, fe1, fe2, re, type="text", single.row=TRUE) AIC(ols1, ols2, fe1, fe2, re) #Test for Assumptions hist(ols1$res) ols1$fit hist(ols2$res) ols2$fit #ResidualPlot plot(fe1$res ~ fe1$fit) plot(fe2$res ~ fe2$fit) #QQPlot qqnorm(fe1$res) qqline(fe1$res, col="red") qqnorm(fe2$res) qqline(fe2$res, col="red") #Shapiro-Wilk's Test inconclusive sample size must be between 3 and 5000 shapiro.test(fe1$res) shapiro.test(fe2$res) # Group by neighborhood (unit of analysis) neighborhood_df = df %>% group_by(Neighborhood) %>% summarize(stories_avg = mean(TotalStories, na.rm = TRUE), bedrooms_avg = mean(TotalBedrooms, na.rm = TRUE), bathrooms_avg = mean(TotalBathrooms, na.rm = TRUE), building_age_avg = mean(BuildingAge, na.rm = TRUE), price_avg = mean(PricePerHeatedArea, na.rm = TRUE), heated_area_proportion_avg = mean(HeatedAreaProportion, na.rm = TRUE), grade_point_2019 = mean(Avg_GradePoint2019, na.rm = TRUE), minority_percentage = mean(Avg_Percentage.of.Minority.Students, na.rm=TRUE), economically_disadvantaged_percentage = mean(Avg_Percentage.of.Economically.Disadvanteged.Students, na.rm = TRUE), length_of_ownership = mean(YearsSinceTurnover, na.rm=TRUE) ) # Checking missing values summary(neighborhood_df) summary(df$TotalHeatedAreaSqFt) df$TotalHeatedAreaSqFt == 0 # We discovered that 361 observations did not have values for TotalHeatedAreaSqFt # and 1832 did not have values for Acreage and were dropped from the analysis # It represents 5.6823% of our dataset # It caused us to drop 2 neighborhoods nrow(filter(df, Acreage == 0 | TotalHeatedAreaSqFt == 0)) nrow(filter(df, Acreage == 0 | TotalHeatedAreaSqFt == 0))/38945 df2 = filter(df, TotalHeatedAreaSqFt != 0) df2 = filter(df2, Acreage != 0) neighborhood_df2 = df2 %>% group_by(Neighborhood) %>% summarize(stories_avg = mean(TotalStories, na.rm = TRUE), bedrooms_avg = mean(TotalBedrooms, na.rm = TRUE), bathrooms_avg = mean(TotalBathrooms, na.rm = TRUE), building_age_avg = mean(BuildingAge, na.rm = TRUE), price_avg = mean(PricePerHeatedArea, na.rm = TRUE), heated_area_proportion_avg = mean(HeatedAreaProportion, na.rm = TRUE), grade_point_2019 = mean(Avg_GradePoint2019, na.rm = TRUE), minority_percentage = mean(Avg_Percentage.of.Minority.Students, na.rm=TRUE), economically_disadvantaged_percentage = mean(Avg_Percentage.of.Economically.Disadvanteged.Students, na.rm = TRUE), length_of_ownership = mean(YearsSinceTurnover, na.rm=TRUE), length_of_ownership_proportion = mean(LengthOwnershipProportion, na.rm=TRUE) ) summary(neighborhood_df2) # Create Visualizations attach(neighborhood_df2) par(mfrow=c(3,4)) hist(stories_avg) hist(bedrooms_avg) hist(bathrooms_avg) hist(building_age_avg) hist(price_avg) hist(grade_point_2019) hist(heated_area_proportion_avg) hist(minority_percentage) hist(economically_disadvantaged_percentage) hist(length_of_ownership) hist(length_of_ownership_proportion) # Check for extremely high correlations par(mfrow=c(1,1)) cor = cor(neighborhood_df2[,c(-1)]) cor corrplot(cor, method = "circle") # we found that the percentage of minority and economically disadvantage # percentage are highly and negatively correlated to grade point, and so it will # be dropped from our analysis to avoid multicollinearity colnames(neighborhood_df2) # Statistical Analysis model1 = lm(length_of_ownership~stories_avg + bedrooms_avg + bathrooms_avg + price_avg + heated_area_proportion_avg + grade_point_2019 + building_age_avg, neighborhood_df2) model2 = lm(length_of_ownership~stories_avg + bedrooms_avg + bathrooms_avg + price_avg + heated_area_proportion_avg + grade_point_2019 + length_of_ownership_proportion, neighborhood_df2) model3 = lm(length_of_ownership~stories_avg + bedrooms_avg + bathrooms_avg + log(price_avg) + heated_area_proportion_avg + grade_point_2019 + building_age_avg, neighborhood_df2) summary(model1) summary(model2) summary(model3) stargazer(model1, model2, model3, type="text") #Linearity par(mfrow=c(1,1)) par(mar=c(5.1,4.1,4.1,2.1)) plot(neighborhood_df2$length_of_ownership,model1$fitted.values, pch=19,main="Length of Ownership Actuals v. Fitted") abline(0,1,col="red",lwd=3) #Normality par(mar=c(5.1,4.1,4.1,2.1)) qqnorm(model1$residuals,pch=19, main="Length of Ownership Normality Plot") qqline(model1$residuals,lwd=3,col="red") #Equality of Variances par(mar=c(5.1,4.1,4.1,2.1)) plot(neighborhood_df2$length_of_ownership,rstandard(model1), pch=19,main="Model 1 Residual Plot") abline(0,0,col="red",lwd=3) #It looks like we have some heteroskedasticety model4 = lm(1+log(length_of_ownership)~stories_avg + bedrooms_avg + bathrooms_avg + log(price_avg) + heated_area_proportion_avg + grade_point_2019 + building_age_avg, neighborhood_df2) #Equality of Variances par(mar=c(5.1,4.1,4.1,2.1)) plot(neighborhood_df2$length_of_ownership,rstandard(model4), pch=19,main="Model 4 Residual Plot") abline(0,0,col="red",lwd=3) #Identifying high leverage points. lev=hat(model.matrix(model1)) plot(lev,pch=19,ylim=c(0,.5), main="High leverage points") abline(3*mean(lev),0,col="red",lwd=3) neighborhood_df2[lev>(3*mean(lev)),] ##identifying which data points are 3 times higher than the mean leverage neighborhood_df2[lev>(3*mean(lev)),1] outliers = which(lev>(3*mean(lev)),1) df_no_outliers = neighborhood_df2[-outliers,] model5 = lm(length_of_ownership~stories_avg + bedrooms_avg + bathrooms_avg + price_avg + heated_area_proportion_avg + grade_point_2019 + building_age_avg, df_no_outliers) summary(model5) par(mar=c(5.1,4.1,4.1,2.1)) plot(df_no_outliers$length_of_ownership,rstandard(model5), pch=19,main="Model 5 Residual Plot") abline(0,0,col="red",lwd=3) model6 = lm(1+log(length_of_ownership)~stories_avg + bedrooms_avg + bathrooms_avg + price_avg + heated_area_proportion_avg + grade_point_2019 + building_age_avg, df_no_outliers) par(mar=c(5.1,4.1,4.1,2.1)) plot(df_no_outliers$length_of_ownership,rstandard(model6), pch=19,main="Model 6 Residual Plot") abline(0,0,col="red",lwd=3) stargazer(model1, model2, model3, model4, model5, model6, type="text") stargazer(ols2, model1, model2, type="text")
6f15eef9258b1c95edcd019951cb83520db8b29b
c200977129e98de598e665a37508d150f564d286
/get_allele_id_to_query.R
3312f56ab4250f818892d5efe663d8da16372766
[]
no_license
jenjohnson7/CS702
ba9b6cf029462a47168b297ed41001e42dec2138
78647a03f3237851b8985212f63631849fdbe385
refs/heads/master
2020-03-11T15:04:14.432669
2018-05-21T13:12:39
2018-05-21T13:12:39
123,441,033
0
0
null
null
null
null
UTF-8
R
false
false
475
r
get_allele_id_to_query.R
library(tidyverse) # get clinvar data AD_clinvar_data <- read.delim('data/AD_clinvar_result.txt', header = TRUE) AR_clinvar_data <- read.delim('data/AR_clinvar_result.txt', header = TRUE) XLR_clinvar_data <- read.delim('data/XLR_clinvar_result.txt', header = TRUE) #rowbind into total_data total_data <- rbind(AD_clinvar_data, AR_clinvar_data, XLR_clinvar_data) # list of allele_ids write.table(total_data$AlleleID.s., "data/allele_ids_to_query.txt", row.names = FALSE)
780ac1d9b7570ef64a887a3b797549c6f69457f5
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
/data/genthat_extracted_code/recluster/examples/recluster.group.col.Rd.R
c3d818b5873635061e2aad2bcab62bc018f74210
[]
no_license
surayaaramli/typeRrh
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
66e6996f31961bc8b9aafe1a6a6098327b66bf71
refs/heads/master
2023-05-05T04:05:31.617869
2019-04-25T22:10:06
2019-04-25T22:10:06
null
0
0
null
null
null
null
UTF-8
R
false
false
417
r
recluster.group.col.Rd.R
library(recluster) ### Name: recluster.group.col ### Title: Compute mean coordinate values and RGB colours. ### Aliases: recluster.group.col ### Keywords: cluster ### ** Examples data(datamod) sordiss<- recluster.dist(datamod,dist="sorensen") points<-metaMDS(sordiss, center=TRUE)$points col<-recluster.col(points) group<-c(1,2,3,3,3,1,2,1,2) ncol<-recluster.group.col(col,group) recluster.plot.col(ncol$aggr)
3cd4223bd99d8fed8dc979d0288900d1a8ee733f
9822e0e83895f17a69d529c2c5a54097e2260af2
/rf_AirBNB.R
45018aaaec3ba36e5108e153499844cee67b0609
[]
no_license
tboats/kaggle_AirBNB
e308a5938af514ea8793d48a71f27e31a298bb63
332378708251c136742d7e82f643287e3b80792b
refs/heads/master
2021-01-10T02:52:35.610412
2016-03-06T07:14:29
2016-03-06T07:14:29
49,351,586
0
0
null
null
null
null
UTF-8
R
false
false
6,433
r
rf_AirBNB.R
###################################################################### # Goal of script: create a random forest model on AirBNB data to predict country of visit # # # date of start: 01/09/2016 # # ###################################################################### ## load libraries library(plyr) library(dplyr) library(ggplot2) library(reshape2) library(caret) library(lubridate) library(randomForest) #library(ROCR) ## load input from "exploration_AirBNB.R" filename <- "dfTrain_sessionStats1.csv" dfSessionStats <- read.csv(filename) statCols <- c("sum", "mean", "sd", "max", "min") dfSessionStats[dfSessionStats$N == 1, statCols] <- 0 dfSessionStats[dfSessionStats$N == 2, "sd"] <- 0 ## load other data dfCountries <- read.csv("../data/countries.csv") ## merge data sets df <- full_join(x = dfSessionStats, y = dfCountries) #, by = c("country_destination"="country_destination") # clean up the data formats df$date_account_created <- ymd(df$date_account_created) df$date_first_booking <- ymd(df$date_first_booking) df$country_destination <- as.factor(df$country_destination) df <- mutate(df, travel = as.factor(1.*(country_destination != "NDF"))) df <- mutate(df, age = as.numeric(age)) df$age[is.na(df$age)] <- 0 df$timestamp_first_active <- ymd_hms(df$timestamp_first_active) colsExclude <- c("X", "id", "date_first_booking", "lat_destination", "lng_destination", "distance_km", "destination_km2", "destination_language", "language_levenshtein_distance", "travel") #,"timestamp_first_active" goodCols <- !(names(df) %in% colsExclude) #naCols <- names(which(colSums(is.na(df))>0)) #notNACols <- !unname(colSums(is.na(df))>0) y1Cols <- (names(df) %in% "country_destination") #travelCol <- (names(df) %in% "travel") ##################################################################### ## split into training, validation, and test sets set.seed(1245) trainFraction <- 0.7 trainIndex <- createDataPartition(df$country_destination, p = trainFraction, list = FALSE) dfTrain1 <- df[trainIndex,goodCols] # & !y1Cols] & notNACols dfTest1 <- df[-trainIndex,] # & !y1Cols] # idTrain <- df[trainIndex, "id"] idTest <- df[-trainIndex, "id"] travel1Col <- (names(dfTrain1) %in% "travel") y1Col <- (names(dfTrain1) %in% "country_destination") #################################################################### ## train random forest on travel/no travel # tr <- na.omit(dfTrain) # find best value of "mtry" # Start the clock! ptm <- proc.time() # sample the full data set to check how long it will run nsamp <- dim(dfTrain1)[1]#200 #10000 # samp <- sample(1:dim(dfTrain1)[1], nsamp) dfTrain1_s <- dfTrain1[samp,!travel1Col] dfTrain1_s$country_destination <- as.factor(as.character(dfTrain1_s$country_destination)) y1Col_s <- (names(dfTrain1_s) %in% "country_destination") # computeMtry <- FALSE if (computeMtry == TRUE){ bestMtry <- tuneRF(dfTrain1_s[,!y1Col_s], dfTrain1_s[,y1Col_s], mtryStart = 5, ntreeTry = 100, stepFactor = 1.5, improve = 0.10) m1 <- bestMtry[which(bestMtry[,"OOBError"] == min(bestMtry[,"OOBError"])), "mtry"] } else { m1 <- 4 } ntree <- 100 #rf <- randomForest(dfTrain1_s[,!y1Col_s], dfTrain1_s[,y1Col_s], data = dfTrain1_s, # use classwt to weight the classes freq <- table(dfTrain1_s$country_destination) wt <- unname(1/freq) # impute missing ages # cs <- dfTrain1_s[,!(names(dfTrain1_s) %in% c("date_account_created"))] # dfTrain1_s.imputed <- rfImpute(country_destination ~ ., data = cs) # dfTrain1_s.imputed <- mutate(dfTrain1_s.imputed, date_account_created = dfTrain1_s$date_account_created) # dfTrain1_s.imputed <- dfTrain1_s # dfTrain1_s.imputed$age[is.na(dfTrain1_s.imputed$age)] <- 0 # train random forest rf <- randomForest(country_destination ~ ., data = dfTrain1_s, mtry=m1, classwt=wt, ntree=ntree, keep.forest = TRUE, importance = TRUE) #, test = dfTest1 # plot the most important variables varImpPlot(rf) # how well does classifier perform on training set? p1tr <- predict(rf, dfTrain1_s) table(p1tr, dfTrain1_s$country_destination) # how well does classifier perform on test set? # impute age # cs <- dfTest1[,!(names(dfTest1) %in% c("date_account_created"))] # dfTest1.imputed <- rfImpute(country_destination ~ ., data = cs) # dfTest1.imputed <- mutate(dfTest1.imputed, date_account_created = dfTest1$date_account_created) p1prob <- predict(rf, dfTest1, type="prob") p1 <- predict(rf, dfTest1) table(p1, dfTest1$country_destination) # Stop the clock proc.time() - ptm ## create data frame to save predictions p1df <- data.frame(country_destination = dfTest1$country_destination) p1df <- cbind(p1df, data.frame(p1prob)) # head(p1df) ## generate top 5 predictions p1prob_5 <- t(apply(p1prob, 1, predictionSort)) df_p1prob_5 <- data.frame(p1prob_5) names(df_p1prob_5) <- c("C1", "C2", "C3", "C4", "C5") #names of top 5 countries df1_p1prob_5_ans <- mutate(df_p1prob_5, country_destination = dfTest1$country_destination) #evals <- t(apply(df1_p1prob_5[,1:5], 1, DCG, df1_p1prob_5[,6])) evals <- (apply(df1_p1prob_5_ans, 1, DCG)) print(paste("mean DCG: ", mean(evals))) ###################################################################### ## format for text output dfOut <- mutate(df_p1prob_5, id=idTest) dfOutm <- melt(dfOut, id = c("id")) dfOutm <- with(dfOutm, dfOutm[order(id, variable),]) saveCols <- c("id", "value") dfOutm <- dfOutm[,names(dfOutm) %in% saveCols] source('E:/Dropbox/R/general/timestamp.R') ts <- timestamp() outputName <- paste("output_", ts, '.txt', sep="") fileConn <- file(outputName) # writeLines(c("id,country\n"), fileConn) cat(c("id,country"), file=fileConn,sep="\n") nLines <- dim(dfOutm)[1] for (i in 1:100){ line <- paste(dfOutm[i,"id"], dfOutm[i,"value"],sep=",") #writeLines(line, fileConn) cat(line, file=outputName, sep="\n", append=TRUE) if (i %% 1000 == 0){ print(paste("line ", i, sep="")) } } close(fileConn) # #################################################################### # ## train random forest # tr <- na.omit(dfTrain) # tr$country_destination <- as.factor(as.character(tr$country_destination)) # rf <- randomForest(country_destination ~ ., data = tr, # mtry=2, # ntree=1000, # keep.forest = TRUE, # importance = TRUE, # test = dfTest)
8550fe62ced39cac1c4422a6c52187b8dccd5414
befe41bcf631337c50bb65c50d9f4c6ab548e0a5
/server.R
0c545b04c73d7c35bf040a889693f479dfae0c91
[]
no_license
Tutuchan/parisdata
ce2003ca8e1cb3740887c8b826bb44644efec787
1c9cbfd13a6e1de3f5c02fa5927598ba526ce01e
refs/heads/master
2021-01-10T15:21:12.491490
2015-09-30T13:04:40
2015-09-30T13:04:40
43,299,477
0
0
null
null
null
null
UTF-8
R
false
false
1,507
r
server.R
library(shinydashboard) library(shiny) source("global.R") shinyServer(function(input, output, session) { output$textTest <- renderPrint({ # spPolygons@data$insee[input$mainMap_shape_click$id]-100 }) output$mainMap <- renderLeaflet({ pal = colorNumeric("RdBu", dfNbAccs$n) leaflet(spPolygons) %>% addTiles() %>% addProviderTiles("Acetate.terrain") %>% addPolygons(stroke = TRUE, weight = 2, color = "black", fillColor = "blue", smoothFactor = 0.2, fillOpacity = 0.6, popup = ~arrs, layerId = 1:20) }) dataClick <- reactive({ validate( need(!is.null(input$mainMap_shape_click), "Choisissez un arrondissement.") ) arr = spPolygons@data$insee[input$mainMap_shape_click$id]-100 dfDataAccidents %>% filter(cp == arr) }) output$plotNbAccMois <- renderPlot({ dfPlot <- dataClick() %>% select(date, starts_with("vehic")) %>% mutate(date = as.Date(date), mois = format(date, "%m"), annee = format(date, "%Y")) %>% count(annee, mois) %>% mutate(date = as.Date(paste(annee, mois, "01", sep = "/"))) ggplot(dfPlot, aes(date, n)) + geom_bar(stat = "identity", fill = RColorBrewer::brewer.pal(4, "Paired")[2]) + theme_linedraw() + xlab("") + ylab("") + theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust = 1)) + scale_x_date(breaks = date_breaks("month"), labels = date_format("%m/%Y")) }) })
d6431db5315b33693e9f1be406abcb0b880b26d4
e9bec00a92cfb7b91186d458288784b4464a07d3
/test_dplyr.R
70665c5da4df6cd88b7223d674e54c2b7c48e60f
[ "MIT" ]
permissive
15210280436/thomas
4d6b296f8381f29d8a8df35a1b0d8d31e28cca4a
7010af4e5421decc965fb379ca810340a776c624
refs/heads/master
2020-04-22T20:46:58.318338
2019-02-15T06:08:27
2019-02-15T06:08:27
170,652,009
0
0
null
2019-02-14T08:05:13
2019-02-14T08:05:13
null
UTF-8
R
false
false
8,111
r
test_dplyr.R
library(tidyverse) library(nycflights13) library(ggplot2) library(DBI) library(modelr) # library(charlatan) library(stringr) con <- DBI::dbConnect(RPostgreSQL::PostgreSQL(), host = "127.0.0.1", dbname = "octopus_susuan", user = "wangjf", password = "d4aSXN2P", port = 5000) student <- tbl(con,"o_student") student_1 <- head(student,n=100) %>% collect() student_1 dbDisconnect(con) mpg ggplot(data = mpg) + geom_point(mapping = aes(x = displ, y = hwy)) ggplot(data = mpg) + geom_point( mapping = aes(x = displ, y = hwy), color = "blue") ggplot(data = mpg) + geom_point(mapping = aes(x = displ, y = hwy),color = "orange") + facet_wrap(~ class,nrow = 3) ggplot(data = mpg) + geom_point(mapping = aes(x = displ, y = hwy),color = "orange") + facet_grid(drv ~ cyl) mpg %>% distinct(cyl) flights %>% filter(month==1 & day==1,is.na(dep_time)) #is.na()选择有缺失值的 flights %>% arrange(desc(is.na(dep_time))) df <- tibble(x = c(5, 2, NA)) arrange(df, x) flights %>% rename(year_1=year) flights %>% mutate(strptime(dep_time,'%H:%M:%S %Y')) #表示两个字段之间所有列 flights %>% transmute(dep_time,hour=dep_time %/% 60,minit=dep_time %%60) #转变时间 flights %>% select(-(year:dep_delay)) #表示不在两个字段之间所有列 flights_sml <- flights %>% select(year:day,ends_with("delay"),distance,air_time) #ends_with结尾包含delay的lie flights_sml %>% mutate(gain=air_time-dep_delay, speed=distance/air_time*60) flights_sml %>% transmute(gain=air_time-dep_delay, #transmute只保留增加的变量 speed=distance/air_time*60) flights %>% select(origin,tailnum,dep_time) %>% group_by(origin,tailnum) %>% arrange(desc(dep_time)) flights %>% group_by(year,month,day) %>% summarise(delay=mean(dep_delay,na.rm = TRUE)) #na.rm 去掉空值 student_1 %>% filter(grade<8) %>% ggplot(mapping=aes(x=grade,color=grade))+ geom_bar() # geom_point and geom_smooth 配合使用,可以看到散点以及趋势 student_1 %>% filter(grade<8) %>% ggplot(mapping=aes(x=grade,y=province_id))+ geom_boxplot() seq(1,10) by_dest <- flights %>% group_by(dest) delay <- by_dest %>% summarise(count=n(),dist=mean(distance,na.rm=TRUE),delay=mean(arr_delay,na.rm=TRUE)) delay <- filter(delay,count>20,dest!="HNL") delay %>% filter(dist<750) %>% ggplot(mapping = aes(x=dist,y=delay))+ geom_point(aes(size=count),alpha=1/3)+ geom_smooth(se=FALSE) flights %>% group_by(tailnum) %>% summarise(delay=mean(arr_delay,na.rm=TRUE),n=n()) %>% ggplot(mapping=aes(x=n,y=delay))+ geom_point(alpha=1/10) diamonds %>% filter(carat<3) %>% ggplot(mapping = aes(x=carat,color=cut))+ geom_freqpoly(binwidth=0.1) diamonds %>% #直方图+密度图 filter(carat<3) %>% ggplot(mapping = aes(x=carat,y=..density..))+ geom_histogram(binwidth=0.1)+ geom_density(alpha=.7) flights %>% select(origin,tailnum,dep_time) %>% group_by(origin,tailnum) %>% arrange(desc(dep_time)) flights %>% group_by(year,month,day) %>% summarise(delay=mean(dep_delay,na.rm = TRUE)) #na.rm 去掉空值 diamonds1 <- diamonds %>% mutate(y_1=ifelse(y<3 | y>20,NA,y)) diamonds1 %>% ggplot(mapping = aes(x=x,y=y_1))+ geom_point() #看一下取消航班和未取消航班延误时间的样本量 flights %>% mutate( cancelled=is.na(dep_time),# is.na =true表示dep_time为空的取消航班标签。 sched_hour=sched_dep_time %/% 100, sched_min=sched_dep_time %% 100, sched_dep_time=sched_hour+sched_min/60 ) %>% group_by(cancelled) %>% summarise(n=n()) #通过图形查看取消航班和未取消航班延误时间的差异,未取消航班远远多于取消航班数量,不能说明两者存在差异 flights %>% mutate( cancelled=is.na(dep_time), sched_hour=sched_dep_time %/% 100, sched_min=sched_dep_time %% 100, sched_dep_time=sched_hour+sched_min/60 ) %>% ggplot(mapping = aes(x=sched_dep_time,color=cancelled))+ geom_freqpoly(binwidth=1/4) #通过密度来看两个样本延误时间差异 flights %>% mutate( cancelled=is.na(dep_time), sched_hour=sched_dep_time %/% 100, sched_min=sched_dep_time %% 100, sched_dep_time=sched_hour+sched_min/60 ) %>% ggplot(mapping = aes(x=sched_dep_time,y=..density..,color=cancelled))+ geom_freqpoly(binwidth=1/4)+ geom_density(alpha=.7) flights %>% mutate( cancelled=is.na(dep_time), sched_hour=sched_dep_time %/% 100, sched_min=sched_dep_time %% 100, sched_dep_time=sched_hour+sched_min/60 ) %>% ggplot(mapping = aes(x=origin,y=sched_dep_time))+ geom_boxplot() diamonds %>% count(color, cut) %>% #相当于groupby+summarise ggplot(mapping = aes(x=color,y=cut,fill=n))+ geom_tile()#两个变量组合观测数量 diamonds %>% ggplot(mapping = aes(x = x, y = y)) + geom_point() + coord_cartesian(xlim = c(4, 11), ylim = c(4, 11)) # 首先画散点图后发现x集中在4-11,集中在4-11,所以用coord_cartesian函数来限定一下,显得图形更直观 ggsave("diamonds.pdf") #保存到PDF,write_csv(diamonds, "diamonds.csv") 保存到csv faithful %>% ggplot(mapping = aes(x=eruptions,y=waiting))+ geom_point() readxl::read_xls("trial_class.xls", col_names = FALSE) # col_names = FALSE不要将第一行作为列 标题 x1 <- "El Ni\xf1o was particularly bad this year" x2 <- "\x82\xb1\x82\xf1\x82\xc9\x82\xbf\x82\xcd" parse_character(x1, locale = locale(encoding = "Latin1")) parse_character(x2, locale = locale(encoding = "Shift-JIS")) #处理字符编码 planes %>% count(tailnum) %>% filter(n>1) #Generate dummy dataset # set seed set.seed(1212) #由于是随机data,所以需要seed来记录 # fake data df <- data_frame( name = ch_name(30),#ch_name(30) 随机创建30个假名字 country = rep(c("US", "UK", "CN"), 10) %>% sample(), #rep函数,限制随机重复次数 job = sample(ch_job(3), 30, replace = TRUE), #ch_job(3) 创建3个假job,sample随机打乱 spending = rnorm(30, mean = 100, sd = 20), #随机一组平均值为100,标准差为20的30个数据 item = sample(1:3, 30, replace = TRUE) #sample 随机1-3 出现30次 ) glimpse(df) #横向查看数据 # common tools df %>% filter(country=="US") %>% mutate(per_item_spending=spending/item) %>% group_by(job) %>% summarise(total_spending=sum(spending), max_item = max(item), per_item_mu = mean(per_item_spending) ) %>% arrange(total_spending) # use select drop column df %>% #select(-country,-job) #select(-c(country, job)) select(item, spending, name) df %>% rename(person = name, amount = spending, quantity = item) df %>% select(contains("ing")) # 选择包含ing列 df %>% select(one_of("name", "item")) # use function from other package df %>% # from stringr package mutate(first_name = str_extract(name, "^\\w*"), last_name = str_extract(name, "\\w*$")) %>% select(contains("name")) # using if-else df %>% mutate(one_item = ifelse(item == 1, "Yes", "No")) %>% # ifelse 加入判断 select(contains("item")) # using case-when df %>% mutate( spending_cat = case_when( spending > 100 ~ "above 100", spending > 50 ~ "above 50", TRUE ~ "below 50" ) ) %>% select(contains("spending")) # filter multiple conditions df %>% filter(country == "CN", item != 1, spending >= 100) #多条件筛选 # find spending mean df %>% summarise(spending_mean = mean(spending),spending_sd=sd(spending)) # combine group by with filter or mutate df %>% group_by(job) %>% filter(spending < mean(spending)) %>% mutate(cumsum_spending = cumsum(spending)) k <- 2:10 k %>% map_dbl(sqrt) # map() # 返回一个列表(list) # map_lgl() # 返回一个逻辑型向量 # map_int() # 返回一个整数型向量 # map_dbl() # 返回双精度数值向量 # map_chr() # 返回字符串向量
c242f43d75b131c2326bf536cc327727899fd068
0a906cf8b1b7da2aea87de958e3662870df49727
/ggforce/inst/testfiles/enclose_points/libFuzzer_enclose_points/enclose_points_valgrind_files/1609955978-test.R
f5b798eae411a36083c7786640d198468ec8607a
[]
no_license
akhikolla/updated-only-Issues
a85c887f0e1aae8a8dc358717d55b21678d04660
7d74489dfc7ddfec3955ae7891f15e920cad2e0c
refs/heads/master
2023-04-13T08:22:15.699449
2021-04-21T16:25:35
2021-04-21T16:25:35
360,232,775
0
0
null
null
null
null
UTF-8
R
false
false
1,511
r
1609955978-test.R
testlist <- list(id = c(60821L, -8748155L, 590633780L, 2051080192L, 0L, -8716289L, -16776961L, -1L, -1L, -13534598L, 2054881025L, 0L, 0L, 0L, 0L, 255L, 2054815744L, 0L), x = c(9.70418706716128e-101, 9.70418706716128e-101, -3.57143978277452e+250, -3.57143978277452e+250, -3.57143978277452e+250, -3.57143978277452e+250, -3.57143978277452e+250, 7.89897283195045e-317, 0, -8.73989987746428e+245, -3.57143978277452e+250, -3.57143978277452e+250, -3.57143978277452e+250, 3.60189371937265e-275, -3.57143978277452e+250, Inf, -3.57143978277452e+250, 1.20057951939423e-321, -3.57077350460498e+250, -3.57077349397652e+250, 3.24586890601023e-298, 3.30036915594569e-296, 9.6134979878195e+281, -1.16450119635426e+70, 1.01522932745225e-314, 2.5990303916246e-312, -5.82900159111767e+303, -8.77779432448941e+304, -5.48612657193497e+303, 2.59894765607829e-312, 2.40273209939742e-306, 2.1008587222514e-312, 7.11756791544715e-304, -8.50349230663022e+304, 1.01522935857838e-314, 4.17201344146859e-309, 7.56414503782948e-320, 9.61276248427429e+281, 6.02760087926321e-322, 0, -1.07927704458837e+304, 7.11756792194462e-304, 8.07404892631684e-315, -1.1031304526204e+217, NaN, 6.05127750601865e-307, 4.66602416025939e-299, NaN, NaN, 9.50322928411057e-314, 7.06416447240789e-304, -5.48612406882363e+303, 8.25679295903193e-317, 1.25986739689518e-321, -5.48612541614556e+303, 3.61247587838313e-67, 5.42745243716277e-315, 0, 0, 0), y = 2.01158338396807e+131) result <- do.call(ggforce:::enclose_points,testlist) str(result)
894bec73cae39486bdb2fb9689e5106bdd1a15b0
e3fd2e053b75918b8d39403dae93d735dbe47381
/setupcode.R
79154c4a1987a2c815c470aae7f5dd0cd0ed8f18
[]
no_license
jeagleso/website
00bfbb85f4218a7b5d22f682d4e15f5c50498617
51b35fbfa09fa51a57ab41da48c62746d17e65eb
refs/heads/main
2023-04-20T14:17:20.126198
2021-05-01T16:30:22
2021-05-01T16:30:22
353,528,605
1
0
null
null
null
null
UTF-8
R
false
false
512
r
setupcode.R
# install packages # install.packages(c("distill", "postcards", "fontawesome")) # load packages library(distill) library(postcards) library(fontawesome) # create website files distill::create_website(dir = ".", title = "Jenna Eagleson", gh_pages = TRUE) # create postcard for homepage distill::create_article(file = "postcard", template = "jolla", package = "postcards") # create a theme distill::create_theme()
29fc792577d6bfa728af923109b38a4a1ef0aaa3
7f72ac13d08fa64bfd8ac00f44784fef6060fec3
/RGtk2/man/gtkPaperSizeGetHeight.Rd
cb8f0a47e6a3deaa73801a5bff8beefde8379478
[]
no_license
lawremi/RGtk2
d2412ccedf2d2bc12888618b42486f7e9cceee43
eb315232f75c3bed73bae9584510018293ba6b83
refs/heads/master
2023-03-05T01:13:14.484107
2023-02-25T15:19:06
2023-02-25T15:20:41
2,554,865
14
9
null
2023-02-06T21:28:56
2011-10-11T11:50:22
R
UTF-8
R
false
false
479
rd
gtkPaperSizeGetHeight.Rd
\alias{gtkPaperSizeGetHeight} \name{gtkPaperSizeGetHeight} \title{gtkPaperSizeGetHeight} \description{Gets the paper height of the \code{\link{GtkPaperSize}}, in units of \code{unit}.} \usage{gtkPaperSizeGetHeight(object, unit)} \arguments{ \item{\verb{object}}{a \code{\link{GtkPaperSize}} object} \item{\verb{unit}}{the unit for the return value} } \details{Since 2.10} \value{[numeric] the paper height} \author{Derived by RGtkGen from GTK+ documentation} \keyword{internal}
7a2f8ba3f96550e27ef3c12e8230c90b554dac9b
6cd15fd0e072741b5db8284ca20bf6534e495a20
/man/mlpca_b.Rd
0c39e54ccb3fe3f32450e26071ab12534903e55b
[ "MIT" ]
permissive
renands/RMLPCA
fffbd18c502e2e3ccfafaa4be677159877cb831b
039d34002fe4b98688869184e5139a3b842bfa00
refs/heads/master
2023-05-09T07:34:03.769415
2021-05-31T19:22:13
2021-05-31T19:22:13
273,766,066
1
0
null
null
null
null
UTF-8
R
false
true
1,871
rd
mlpca_b.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/mlpca_b.R \name{mlpca_b} \alias{mlpca_b} \title{Maximum likelihood principal component analysis for mode B error conditions} \usage{ mlpca_b(X, Xsd, p) } \arguments{ \item{X}{MxN matrix of measurements.} \item{Xsd}{MxN matrix of measurements error standard deviations.} \item{p}{Rank of the model's subspace, p must be than the minimum of M and N.} } \value{ The parameters returned are the results of SVD on the estimated subspace. The quantity Ssq represents the sum of squares of weighted residuals. All the results are nested in a list format. } \description{ Performs maximum likelihood principal components analysis for mode B error conditions (independent errors, homoscedastic within a column). Equivalent to perfoming PCA on data scaled by the error SD, but results are rescaled to the original space. } \details{ The returned parameters, U, S and V, are analogs to the truncated SVD solution, but have somewhat different properties since they represent the MLPCA solution. In particular, the solutions for different values of p are not necessarily nested (the rank 1 solution may not be in the space of the rank 2 solution) and the eigenvectors do not necessarily account for decreasing amounts of variance, since MLPCA is a subspace modeling technique and not a variance modeling technique. } \examples{ library(RMLPCA) data(data_clean) data(data_error_b) data(sds_b) # data that you will usually have on hands data_noisy <- data_clean + data_error_b # run mlpca_b with rank p = 2 results <- RMLPCA::mlpca_b( X = data_noisy, Xsd = sds_b, p = 2 ) # estimated clean dataset data_cleaned_mlpca <- results$U \%*\% results$S \%*\% t(results$V) } \references{ Wentzell, P. D. "Other topics in soft-modeling: maximum likelihood-based soft-modeling methods." (2009): 507-558. }
d15317af5c6172533dc950da58f7d0bf13b070eb
7bba3974e2c51fa744ae2c8e5fc5cc8d46bde10b
/SM7 - R_script_heatmaps.R
38d819674be6e935afb5c946ed3839c2a340ba5f
[]
no_license
pochotustra/genomics_endophytes
07404aaa857f9d33dd3378ee448449fffdbde0c9
407c4e260dda6288b5da066a86acb32391e3a809
refs/heads/master
2020-05-16T21:09:32.077734
2015-03-25T13:26:14
2015-03-25T13:26:14
32,863,947
0
0
null
null
null
null
ISO-8859-1
R
false
false
5,535
r
SM7 - R_script_heatmaps.R
rm(list=ls(all=TRUE)) install.packages("XLConnect") #esto es para instalar un paquete que requiere para abrir el archivo de excel. Solo necesita correrlo la promera vez. Despues ya puede borrar esta linea install.packages("plyr") # this is a new package you must installe the just the first time you use it. setwd("C:/Users/lopezfernj/Desktop/heatmap") # esta linea es para decirle a R donde están sus archivos. Con los que va a trabajar. DOnde tiene las tablas con los datos. require(XLConnect) # esto es para abrir el paquete que instaló. Tiene que crrer esta linea cada vez que abre R. wb<-loadWorkbook("./434_vs_Erwinia amylovora ATCC 49946.xls") wb2 = loadWorkbook("./434_vs_Erwinia billingiae Eb661.xls") wb3 = loadWorkbook("./434_vs_Erwinia pyrifoliae Ep1_96.xls") wb4 = loadWorkbook("./434_vs_Erwinia sp. Ejp617.xls") data = readWorksheet(wb, sheet = 1, header = TRUE)[,c(2,3,4,5,7,9)] new.table<-rbind(data) head(new.table) data2 = readWorksheet(wb2, sheet = 1, header = TRUE)[,c(2,3,4,5,7,9)] new.table2<-rbind(data2) head(new.table2) data3 = readWorksheet(wb3, sheet = 1, header = TRUE)[,c(2,3,4,5,7,9)] new.table3<-rbind(data3) head(new.table3) data4 = readWorksheet(wb4, sheet = 1, header = TRUE)[,c(2,3,4,5,7,9)] new.table4<-rbind(data4) head(new.table4) #aquí va a general una lista de todos los posible "Role" de todas las tables list.Roles<-c() number_of_data<-c() for (x in unique(new.table[,1])){ data_list = new.table[new.table$Category==x,4] data_list2 = new.table2[new.table2$Category==x,4] data_list3 = new.table3[new.table2$Category==x,4] data_list4 = new.table4[new.table2$Category==x,4] count_data<-c() for (l in c(data_list, data_list2, data_list3, data_list4)){ if (!(l %in% list.Roles)){ list.Roles<-c(list.Roles, l) count_data[length(count_data)+1]<-1 } } number_of_data[x]<-length(count_data) } Category_data<-c() for (x in unique(new.table[,1])){ Category_data<-c(Category_data,rep(x,number_of_data[x])) } # aquí va a sacar el dato para cada "Role" en cada tabla por separado, por ejemplo aqui se va a trabajar la tabla 1, la cual yo llamé new.table results_table1<-matrix(c(0,0), nrow=1,ncol=2) colnames(results_table1)<-c("SS.active.A", "SS.active.B") for (x in list.Roles){ if (x %in% new.table[,4]){ element<-which(new.table[,4]==x)[1] activeResult<-new.table[element,c(5,6)] } else{ activeResult<-c("no", "no") } results_table1<-rbind(results_table1,activeResult) } # las siguientes tres lines cambian "yes" y "no" por "1" y "0" library(plyr) # For the revalue() function results_table1$SS.active.A <- revalue(results_table1$SS.active.A, c("yes"=1, "no"=0)) results_table1$SS.active.B <- revalue(results_table1$SS.active.B, c("yes"=1, "no"=0)) # Esto es lo mismo que antes, pero con la table 2. la cual yo llamé new.table2 results_table2<-matrix(c(0,0), nrow=1,ncol=2) colnames(results_table2)<-c("SS.active.A", "SS.active.B") for (x in list.Roles){ if (x %in% new.table2[,4]){ element<-which(new.table2[,4]==x)[1] activeResult<-new.table2[element,c(5,6)] } else{ activeResult<-c("no", "no") } results_table2<-rbind(results_table2,activeResult) } # las siguientes tres lines cambian "yes" y "no" por "1" y "0" results_table2$SS.active.A <- revalue(results_table2$SS.active.A, c("yes"=1, "no"=0)) results_table2$SS.active.B <- revalue(results_table2$SS.active.B, c("yes"=1, "no"=0)) # aquí va a sacar el dato para cada "Role" en cada tabla por separado, por ejemplo aqui se va a trabajar la tabla 1, la cual yo llamé new.table results_table3<-matrix(c(0,0), nrow=1,ncol=2) colnames(results_table3)<-c("SS.active.A", "SS.active.B") for (x in list.Roles){ if (x %in% new.table[,4]){ element<-which(new.table[,4]==x)[1] activeResult<-new.table[element,c(5,6)] } else{ activeResult<-c("no", "no") } results_table3<-rbind(results_table3,activeResult) } # las siguientes tres lines cambian "yes" y "no" por "1" y "0" library(plyr) # For the revalue() function results_table3$SS.active.A <- revalue(results_table3$SS.active.A, c("yes"=1, "no"=0)) results_table3$SS.active.B <- revalue(results_table3$SS.active.B, c("yes"=1, "no"=0)) # aquí va a sacar el dato para cada "Role" en cada tabla por separado, por ejemplo aqui se va a trabajar la tabla 1, la cual yo llamé new.table results_table4<-matrix(c(0,0), nrow=1,ncol=2) colnames(results_table4)<-c("SS.active.A", "SS.active.B") for (x in list.Roles){ if (x %in% new.table[,4]){ element<-which(new.table[,4]==x)[1] activeResult<-new.table[element,c(5,6)] } else{ activeResult<-c("no", "no") } results_table4<-rbind(results_table4,activeResult) } # las siguientes tres lines cambian "yes" y "no" por "1" y "0" library(plyr) # For the revalue() function results_table4$SS.active.A <- revalue(results_table4$SS.active.A, c("yes"=1, "no"=0)) results_table4$SS.active.B <- revalue(results_table4$SS.active.B, c("yes"=1, "no"=0)) # Esta linea ve a juntar lo de las dos tablas en una sola table final_table<-cbind(Category=Category_data, Role=list.Roles, results_table1[-1,], results_table2[-1,], results_table3[-1,], results_table4[-1,]) head(final_table) #esta ultima linea es para guardar la tabla final en un archivo de texto que puede abrir en excel write.table(final_table, file = "Heatmap_Erwnia.txt", sep = "\t", row.names = F, col.names = T)
ae12163e43ceebb63cb864318ddd2750ccdd5767
29585dff702209dd446c0ab52ceea046c58e384e
/QCAGUI/R/findTh.R
593e9ae669f908035de58285a8a8616b44d236c3
[]
no_license
ingted/R-Examples
825440ce468ce608c4d73e2af4c0a0213b81c0fe
d0917dbaf698cb8bc0789db0c3ab07453016eab9
refs/heads/master
2020-04-14T12:29:22.336088
2016-07-21T14:01:14
2016-07-21T14:01:14
null
0
0
null
null
null
null
UTF-8
R
false
false
701
r
findTh.R
`findTh` <- function(x, n = 1, hclustm = "ward.D2", distm = "canberra", ...) { if (!isNamespaceLoaded("QCA")) { requireNamespace("QCA", quietly = TRUE) } other.args <- list(...) ### ### ### backwards compatibility ### if ("groups" %in% names(other.args)) { n <- other.args$groups - 1 } ### ### ### backwards compatibility ### x <- sort(x) cutpoints <- cumsum(rle(cutree(hclust(dist(x, method = distm), method = hclustm), k = n + 1))[[1]]) values <- rep(NA, n) for (i in seq(length(values))) { values[i] <- mean(x[seq(cutpoints[i], cutpoints[i] + 1)]) } return(values) }
20ca84e3a554efc46edf0c31873e9507bc6e281b
cd521a577c838a89b9ea0bd0db03ed13588ee5b2
/server.R
f12b1afc2ceea035855ef3c96b51ffecb077d4c5
[]
no_license
chessstats/motion
23490eb8644cf79aa1ab4bd7c93a67f5b5576de0
419aec8a82ee86291c7097f38baa8a5273457616
refs/heads/master
2021-01-10T13:35:05.155726
2016-02-16T05:26:54
2016-02-16T05:26:54
51,384,996
0
0
null
null
null
null
UTF-8
R
false
false
2,978
r
server.R
library(shiny) library(plotly) library(DT) dt<-read.table("ratinghist.txt") lists<-sprintf("%04d",dt$date) load("players.dat") names(players)<-paste("X",as.character(names(players)),sep="") fideids<-names(players) playernames<-as.character(players) #cat("ids",fideids[1:5],"names",playernames[1:5]) dt$year=2000+dt$year/100 chartsize<<-800 startyear<<-2001 shinyServer(function(input, output, session){ drawchart<- function(){ output$trendPlot <- renderPlotly({ selp<-input$control if(length(selp)==0) { selp=c("X1503014","X2016192") } starti<-1 for(j in 1:nrow(dt)){ if(dt$year[j]>=startyear){ starti<-j break } } dteff<-dt[starti:nrow(dt),] command<-paste("p<-plot_ly(dteff,type='line',x=year,y=",selp[1],",name='",players[[selp[1]]],"')",sep="") #print(command) eval(parse(text=command)) if(length(selp)>1) for(j in 2:length(selp)) { command=paste("p<-add_trace(p,type='line',x=year,y=",selp[j],",name='",players[[selp[j]]],"')",sep="") #print(command) eval(parse(text=command)) } p<-layout(p,height=round(chartsize/1.5),width=chartsize,xaxis=list(title=''),yaxis=list(title=''),margins=list(pad=10)) p }) } observeEvent(input$rplayers,{ #print("rplayers") session$sendCustomMessage(type="setPlayers",message=list(fideids,playernames,lists)) }) observeEvent(input$rlist,{ #print("rlist") i<-input$rlist+1 date<-dt$date[i] dt2<-dt[,2:length(fideids)] dt2<-dt2[,order(-dt2[i,])] playersordered=players[colnames(dt2)] ranklistnames=as.character(playersordered) ranklistrtgs=as.character(dt2[i,]) ranklist<-data.frame(ranklistnames,ranklistrtgs) #print("render") dto<-datatable(ranklist) output$dataTable<- renderDataTable(dto) }) observeEvent(input$reqchart,{ cparams<-input$reqchart setchartsize<-cparams[1] setstartyear<-cparams[2] if(setchartsize>0){ chartsize<<-setchartsize } if(setstartyear>0){ startyear<<-setstartyear } drawchart() }) observeEvent(input$control,{ drawchart() if(FALSE){ tablestr="<table id='mytable' class='display'><thead><tr><th>head</th></tr></thead><tbody><tr><td>body</td></tr></tbody>" session$sendCustomMessage(type="setTable",message=list("tablecont",tablestr,"mytable")) plotly='[{"x": [1, 2, 3, 4, 5], "y": [1, 2, 4, 8, 16] },{"x": [6, 2, 3, 4, 5], "y": [10, 3, 5, 7, 16] }]' layout='{"margin": { "t": 0 } }' session$sendCustomMessage(type="setPlot",message=list("plotlycont",plotly,layout)) svgcontent='<svg width="400" height="180"><rect x="50" y="20" rx="20" ry="20" width="150" height="150" style="fill:red;stroke:black;stroke-width:5;opacity:0.5" /></svg>' session$sendCustomMessage(type="setInnerHTML",message=list("svgcont",svgcontent)) } }) })
f1e70d60591d8ed3a780be7dc9599c7c2d47e16a
6aa307176ec4899e13015d4f20aa9a4fdaef46f7
/man/powermcpt.Rd
46c112a366b434258d2e626f51a7a369d9690e2e
[]
no_license
cran/MCPAN
7a56bec761537a8bd3cf9cd66f1dd0b8d90bb680
dcfedcf90abc5df5974e72fcf9b84c487f937e98
refs/heads/master
2020-12-24T15:50:00.850274
2018-03-22T11:22:58
2018-03-22T11:22:58
17,691,867
1
2
null
null
null
null
UTF-8
R
false
false
7,963
rd
powermcpt.Rd
\name{powermcpt} \alias{powermcpt} \title{ Testversion. Power calculation for multiple contrast tests (1-way ANOVA model) } \description{ Testversion. Calculate the power of a multiple contrast tests of k means in a model with homogeneous Gaussian errors, using the function pmvt(mvtnorm) to calculate multivariate t probabilities. Different options of power defineition are "global": the overall rejection probability (the probability that at the elementary null is rejected for at least one contrast, irrespective of being under the elementary null or alternative), "anypair": the probability to reject any of the elementary null hypotheses for those contrasts that are under the elmentary alternatives, "allpair": and the probability that all elementary nulls are rejected which are indeed under the elementary nulls. See Sections 'Details' and 'Warnings'! } \usage{ powermcpt(mu, n, sd, cmat = NULL, rhs=0, type = "Dunnett", alternative = c("two.sided", "less", "greater"), alpha = 0.05, ptype = c("global", "anypair", "allpair"), crit = NULL, ...) } \arguments{ \item{mu}{a numeric vector of expected values in the k treatment groups} \item{n}{a numeric vector of sample sizes in the k treatment groups} \item{sd}{a single numeric value, specifying the expected standard deviation of the residual error} \item{cmat}{optional specification of a contrast matrix; if specified, it should have as many columns as there are groups in arguments \code{mu} and \code{n} and it should have at least 2 rows, if specified, argument \code{type} is ignored, if not specified, the contrast is determined by argument \code{type}} \item{rhs}{numeric vector, specifying the right hand side of the hyptheses to test, defaults to 0, other specifications lead to tests of non-inferiority and superiority.} \item{type}{ a single character string, naming one of the contrast types available in \code{contrMat(multcomp)}; argument is ignored if \code{cmat} is specified} \item{alternative}{ a single character string, specifying the direction of the alternative hypothesis, one of \code{"two.sided","less","greater"}. Note that this argument governs how the multivariate t probabilities are evaluated as well as the computation of the critical value if none is specified (i.e. default \code{crit=NULL})} \item{alpha}{ a single numeric value, familywise type I error to be controlled, is ignored if argument \code{crit} is specified} \item{ptype}{ a single character string, naming the type of rejection probability to be computed; options are \code{"global"} for the global rejection probability, \code{"anypair"} for the rejection probability considering only those contrasts under the alternative, \code{"global"} for the probability that all elementary alternatives are rejected. } \item{crit}{ a single numeric value to serve as equicoordinate critical point in the multiple test; if it is not specified, it is computed as a quantile of the multivariate t distribution based on the specifications in arguments \code{n}, \code{cmat} (or \code{type}); note that for alternatives \code{'two.sided'} and \code{'greater'}, \code{crit} should be a single positive value, while for alternative \code{'less'}, \code{crit} should be a single negative value. } \item{\dots}{ further arguments, which are passed to the functions \code{qmvt} and \code{pmvt}, mainly to control the computation errors, see help \code{GenzBretz(mvtnorm)} for details} } \details{ In a homoscedastic Gaussian model with k possibly different means compared by (user-defined) multiple contrast tests, different types of rejection probabilities in the multiple testing problem can be computed. Based on a central multivariate t distribution with df=sum(n)-k appropriate equicoordinate critical points for the test are computed, different critical points can be specified in \code{crit} Computing probabilities of non-central multivariate t distributions \code{pmvt(mvtnorm)} one can calculate: The global rejection probability (\code{power="global"}), i.e. the probability that at least one of the elementary null hypotheses is rejected, irrespective, whether this (or any contrast!) is under the corresponding elementary alternative). As a consequence this probability involves elementary type-II-errors for those contrasts which are under their elementary null hypothesis. The probability to reject at least one of those elementary null hypotheses which are indeed under their corresponding elementary alternatives (\code{power="anypair"}). Technically, this is achieved by omitting those contrasts under the elementary null and compute the rejection probability for a given criticla value from a multivariate t distribution with reduced dimension. Note that for \code{'two-sided'} alternatives, type III-errors (rejection of the two-sided null in favor of the wrong direction) are included in the power. The probability to reject all elementary null hypotheses which are indeed under their corresponding elementary alternatives (\code{power="allpair"}). Also here, for 'two-sided' alternatives type III-error contribute to the computed 'allpair power'. Note further that two-sided allpair power is simulated based on multivariate t random numbers. } \value{ A list consisting of the following items: \item{power}{a numeric value the computed power, with estimated computational error as an attribute} \item{mu}{the input vector of expected values of group means} \item{n}{the input vector of group sample sizes} \item{conexp}{a data frame containing the contrast matrix, the expected values of the contrasts given mu (expContrast), the right hand sides of the hypotheses (rhs, as input), the expected values of the test statistics corresponding to the contrasts and rhs, and a column of logical values indicating whether the corresponding contrasts was under the alternative (under HA)} \item{crit}{a single numeric value, the critical value used for power computation} \item{alternative}{a single character string, as input} \item{ptype}{a single character string, as input} \item{alpha}{a single numeric value, as input} } \references{ \emph{Genz A, Bretz F (1999):} Numerical computation of multivariate t-probabilities with application to power calculation of multiple contrasts. Journal of Statistical Computation and Simulation, 63, 4, 361-378. \emph{Bretz F, Hothorn LA (2002):} Detecting dose-response using contrasts: asymptotic power and sample size determination for binomial data. Statistics in Medicine, 21, 22, 3325-3335. \emph{Bretz F, Hayter AJ and Genz A (2001):} Critical point and power calculations for the studentized range test for generally correlated means. Journal of Statistical Computation and Simulation, 71, 2, 85-97. \emph{Dilba G, Bretz F, Hothorn LA, Guiard V (2006):} Power and sample size computations in simultaneous tests for non-inferiority based on relative margins. Statistics in Medicien 25, 1131-1147. } \author{ Frank Schaarschmidt } \section{Warning}{This is a test version, which has roughly (but not for an extensive number of settings) been checked by simulation. Any reports of errors/odd behaviour/amendments are welcome.} \examples{ powermcpt(mu=c(3,3,5,7), n=c(10,10,10,10), sd=2, type = "Dunnett", alternative ="greater", ptype = "global") powermcpt(mu=c(3,3,5,7), n=c(10,10,10,10), sd=2, type = "Williams", alternative ="greater", ptype = "global") powermcpt(mu=c(3,3,5,7), n=c(10,10,10,10), sd=2, type = "Dunnett", alternative ="greater", ptype = "anypair") powermcpt(mu=c(3,3,5,7), n=c(10,10,10,10), sd=2, type = "Williams", alternative ="greater", ptype = "anypair") powermcpt(mu=c(3,4,5,7), n=c(10,10,10,10), sd=2, type = "Dunnett", alternative ="greater", ptype = "allpair") powermcpt(mu=c(3,2,1,-1), n=c(10,10,10,10), sd=2, type = "Dunnett", alternative ="greater", ptype = "allpair") } \keyword{htest} \concept{power}
05cf05a83598b48f4043c9628f5e6d8d1f02a295
251c9dd59afa6d9ca96339d2b94eb72d6dd37179
/man/readDcpRectangle.Rd
90f7255ae88603c657ae01f4d735592e7ffcf90b
[]
no_license
KUNJU-PITT/dChipIO
ebcd315e18e6a52b6cfb25dc9d47677c0bee3c0e
85080e5d289646f89c7ad96051cef3c155dfb7dd
refs/heads/master
2020-03-31T09:50:11.248755
2016-01-14T07:13:13
2016-01-14T07:13:13
null
0
0
null
null
null
null
UTF-8
R
false
false
2,113
rd
readDcpRectangle.Rd
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % Do not modify this file since it was automatically generated from: % % readDcpRectangle.R % % by the Rdoc compiler part of the R.oo package. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \name{readDcpRectangle} \alias{readDcpRectangle} \title{Reads a spatial subset of probe-level data from a dChip DCP file} \usage{ readDcpRectangle(filename, fields=c("rawIntensities", "normalizedIntensities"), xrange=c(0, Inf), yrange=c(0, Inf), ..., asMatrix=TRUE) } \description{ Reads a spatial subset of probe-level data from a dChip DCP file. } \arguments{ \item{filename}{The pathname of the DCP file.} \item{fields}{The cell fields to be read.} \item{xrange}{A \code{\link[base]{numeric}} \code{\link[base]{vector}} of length two giving the left and right coordinates of the cells to be returned.} \item{yrange}{A \code{\link[base]{numeric}} \code{\link[base]{vector}} of length two giving the top and bottom coordinates of the cells to be returned.} \item{asMatrix}{If \code{\link[base:logical]{TRUE}}, the CEL data fields are returned as matrices with element (1,1) corresponding to cell (xrange[1],yrange[1]).} \item{...}{Additional arguments passed to \code{\link{readDcp}}().} } \value{ A named \code{\link[base]{list}} CEL structure similar to what \code{\link{readDcp}}(). In addition, if \code{asMatrix} is \code{\link[base:logical]{TRUE}}, the CEL data fields are returned as matrices, otherwise not. } \author{Henrik Bengtsson} \examples{ path <- system.file("exData", package="dChipIO") filename <- "Test3-1-121502.dcp" pathname <- file.path(path, filename) data <- readDcpRectangle(pathname) layout(matrix(1:4, nrow=2, byrow=TRUE)) image(data$rawIntensities, main="Raw probe signals") image(data$normalizedIntensities, main="Normalized probe signals") } \seealso{ The \code{\link{readDcp}}() method is used internally. This method was inspired by \code{readCelRectangle()} of the \pkg{affxparser} package. } \keyword{file} \keyword{IO}
92fcd2cfd7af7565626d2eb2acf8afd88b92efa6
12a97000d7e61c7d5ddaaa05873ff98ebd90b34e
/man/fit_model.Rd
eb1803fa31eef762d3dd55260d272894771cad2b
[]
no_license
ThierryO/testlme4
0900472348a12af209d8020ae83362b0974f1deb
d1ee848b719760a4896acbc5e35f69f6c1d80914
refs/heads/master
2020-04-06T04:30:37.865206
2015-03-23T16:42:08
2015-03-23T16:42:08
32,745,270
0
0
null
null
null
null
UTF-8
R
false
false
313
rd
fit_model.Rd
% Generated by roxygen2 (4.1.0): do not edit by hand % Please edit documentation in R/fit_model.R \name{fit_model} \alias{fit_model} \title{Fit a poisson glmer} \usage{ fit_model(formula, dataset) } \arguments{ \item{formula}{the glmer formula} \item{dataset}{the dataset} } \description{ Fit a poisson glmer }
8083738b8c02f6e27aa44f7fa151955127141186
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
/data/genthat_extracted_code/genasis/examples/genplot.Rd.R
bced644e7bbcb452ea9b2ac6f1c68a3ce7e7865a
[]
no_license
surayaaramli/typeRrh
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
66e6996f31961bc8b9aafe1a6a6098327b66bf71
refs/heads/master
2023-05-05T04:05:31.617869
2019-04-25T22:10:06
2019-04-25T22:10:06
null
0
0
null
null
null
null
UTF-8
R
false
false
1,048
r
genplot.Rd.R
library(genasis) ### Name: genplot ### Title: Plot of concentration time series ### Aliases: genplot ### Keywords: genplot ### ** Examples ## Definition of simple data sources: c1<-c(0.386,0.256,0.182,0.254) c2<-"fluorene" c3<-c("2013-05-01","2013-06-03","2013-07-05","2013-08-07") c4<-c("2013-05-08","2013-06-10","2013-07-12","2013-08-14") sample_genasis<-data.frame(c1,c2,c3,c4) sample_openair<-data.frame(c4,c1) colnames(sample_openair)=c("date","fluorene") ## Examples of different usages: genplot(sample_openair,input="openair",pollutant="fluorene",distr="lnorm", n=10,ci="gradient",col="black",col.points="red",pch=15) genplot(sample_genasis,input="genasis",n=10,col="blue") genplot(c1,c3,ci=FALSE,pch=1,main="fluorene") ## Use of example data from the package: data(kosetice.pas.openair) genplot(kosetice.pas.openair[,1:8],col="orange",il="ts",ci=FALSE) data(kosetice.pas.genasis) ## Not run: ##D genplot(kosetice.pas.genasis[1:208,],input="genasis", ##D distr="lnorm",ci="gradient",col="orange") ## End(Not run)
5bc84bdfff86192626e3c05ab84f9d209f7918f9
2da2406aff1f6318cba7453db555c7ed4d2ea0d3
/man/undocumented.Rd
8a801cc3b816b6c577ed58b8d8b61da11f967a25
[]
no_license
rpruim/fastR2
4efe9742f56fe7fcee0ede1c1ec1203abb312f34
d0fe0464ea6a6258b2414e4fcd59166eaf3103f8
refs/heads/main
2022-05-05T23:24:55.024994
2022-03-15T23:06:08
2022-03-15T23:06:08
3,821,177
11
8
null
null
null
null
UTF-8
R
false
true
437
rd
undocumented.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/undocumented.R \name{undocumented} \alias{undocumented} \alias{grid.identify.points} \alias{funvec} \title{Undocumented functions} \description{ These objects are undocumented. } \details{ Some are left-overs from a previous version of the book and package. In other cases, the functions are of limited suitability for general use. } \author{ Randall Pruim }
140cd41d4df5099e02c64e373212f235f7a3e3a5
05a9f722bfd91a75144ebf840f296f932b5baf20
/BrestNewlyn/keyWestACREannualSLP.R
5f4856d2ae3435395fff61f9b4a04812b10b7931
[]
no_license
simonholgate/R-Scripts
05118e4e92118a506eaf29bf6fa9aca4ea3f9477
89ab9ee9da1bbce10f4dc9a422259dda64748689
refs/heads/master
2020-05-17T14:48:02.345740
2012-06-21T11:19:58
2012-06-21T11:19:58
4,738,138
1
0
null
null
null
null
UTF-8
R
false
false
2,180
r
keyWestACREannualSLP.R
##****************************************************************************************************## ## Calculate annual mean SLP for Key West and San Francisco area using ACRE SLP data ## ##****************************************************************************************************## ##****************************************************************************************************## ######################################################################################################## ## Functions for use below ## ######################################################################################################## ##****************************************************************************************************## annual.2d.slp <- function(slpArray){ ## Convert a 2D array of monthly data into a 2D annual array. ## There should be nstns columns and nmon rows nstns <- dim(slpArray)[2] nmon <- dim(slpArray)[1] nyr <- nmon/12 slpAnnualArray <- array(NA, dim=c(nyr,nstns)) for(i in 1:nstns){ ## Make a temporray vector of each station temp <- slpArray[,i] ## Reshape to 3d array with nstns*nyr*12 dim(temp) <- c(12, nyr) ## Place the nyr column means from the temporary array into the column for stn i slpAnnualArray[,i] <- colMeans(temp) } slpAnnualArray } ##******************************************************************************************************* ######################### ## Non-functional part ## ######################### #library(fields) nyr<-138 slp.yrs <- c(1871:2008) ##******************************************************************************************************* ## Key West/San Francisco annual pressure ## Monthly data 2 stations. First two are Key West and San Francisco. Variable name is slpKeyWestStns. load("~/diskx/polcoms/brestNewlyn/analysis/paper/keyWestEofACRE/keyWestACREslp.RData") # Convert Pa to Mb slpKeyWestStns <- slpKeyWestStns/100 nstns <- 2 slpAnnArray <- annual.2d.slp(slpKeyWestStns) save(slpAnnArray, file="keyWestACREannualSLP.RData")
9f39f61b6d49cf8094a458f943ec5b6302ced7ed
1ea0969c88f299c5f97fd426cb0befe285adfc7e
/man/get_all_commits.Rd
ee71f03839be1aabdb57153bc1996d8002c8dd3e
[]
no_license
chapmandu2/gitscraper
b1e180fe46fc01314a79e969d9e1bbe8b47fb7b5
2f6bad9a705d79071ef829deb6bfa93ef1e0ce23
refs/heads/master
2020-04-21T18:35:33.218985
2019-02-08T12:04:14
2019-02-08T12:04:14
169,775,658
0
0
null
null
null
null
UTF-8
R
false
true
322
rd
get_all_commits.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/get_all_commits.R \name{get_all_commits} \alias{get_all_commits} \title{Get all commits} \usage{ get_all_commits(git_repo) } \arguments{ \item{git_repo}{path to git repo} } \value{ data frame } \description{ Get all commits } \examples{ NULL }
6c6bae60d8292f3208064c9e8f939f529a4a853d
fe6cc44c3444421c510ec380118e92deb55b1564
/class3-master/code.R
be0781817c6000577ab13049b1a6841aa62a2699
[]
no_license
IlgizMurzakhanov/BDA
d5a9f0fe09f45a4fffd4eff256cc603eb6eabdc0
87cf6fbdde52fa53296488f269704e26fcb433a4
refs/heads/master
2021-04-08T05:41:47.741798
2015-12-19T19:50:10
2015-12-19T19:50:10
null
0
0
null
null
null
null
UTF-8
R
false
false
1,214
r
code.R
#data <- read_rdump('data_file.data.R') #fit <- stan("model_file.stan", data = data) #print(fit) #plot(fit) library(rstan) rstan_options(auto_write = TRUE) options(mc.cores = parallel::detectCores()) options(width = 160) data <- read_rdump('normal1.data.R') fit <- stan("normal.stan", data = data) print(fit) plot(fit) data <- read_rdump('normal2.data.R') fit <- stan(fit = fit, data = data) print(fit) #Not enough data or add priors fit <- stan("compiletime-error1.stan", data = data) #Needed semicolon fit <- stan("compiletime-error2.stan", data = data) #Need to declare all variables before use #real x[N, M]; x[1]: real[M] #matrix[N, M] x; x[1]: vector[M] #vector[N] x[M]; #All same dims fit <- stan("compiletime-error3.stan", data = data) #Samples from Bernoulli are ints not reals fit <- stan("runtime-error1.stan", data = read_rdump('runtime-error1.data.R')) #Y data is greater than 1 fit <- stan("runtime-error2.stan", data = read_rdump("runtime-error2.data.R")) #J is missing from data fit <- stan("runtime-error3.stan", data = read_rdump("runtime-error3.data.R")) #Use sqrt(sigma) instead so not sampling wrong values data <- read_rdump('normal1.data.R') fit <- stan("normal2.stan", data = data)
75d622b731178ad5c092297008f910d7fe0d11ef
fefc5800250818b2a53e1211cb83723af03ec82b
/reveals_handmade.R
5d06baa0eb445be0e8741212514fc0fe12db085e
[]
no_license
mtrachs/reveals_test
46c8b4968b251cd8ab4057035c25886672412d79
ee4088ad812c45e79a2cf43585b21c7d194008be
refs/heads/master
2020-03-19T08:17:00.630413
2018-06-05T15:33:17
2018-06-05T15:33:17
136,191,095
0
0
null
null
null
null
UTF-8
R
false
false
3,335
r
reveals_handmade.R
library(zipfR) library(rioja) setwd('~/Reveals_NEUS/') #--------------------------------------------------------------------------------------------------------------] #function to estimate species dependent depositional coefficient Ki <- function(b, R, zmax=400000){ ul1 <- b*(zmax-R)^(1/8) ul2 <- b*(zmax+R)^(1/8) ul3 <- b*(2*R)^(1/8) gamma_Ki <- Igamma(8, ul1, lower=TRUE) - Igamma(8, ul2, lower=TRUE) + Igamma(8, ul3, lower=TRUE) return(4*pi*R/b^8*gamma_Ki) } #--------------------------------------------------------------------------------- #function to estimate vegetation proportion REVEALS_gamma <- function(pollen,fall_speed,ppes,R,zmax,n,u,c){ #calculate parameter b later on used to estimate deposition coeffcient b <- 4/sqrt(pi) * fall.speed/(n*u*c) #species specific depositional coefficient K_species <- sapply(b,function(x){ Ki(x[[1]],R=R,zmax = zmax) }) #eq (5) in Sugita (2007a) weighted_pollen <- pollen/(ppes*K_species) veg_proportion <- weighted_pollen/sum(weighted_pollen) return(as.numeric(veg_proportion)) } #------------------------------------------------------------------------------------------------------------ #load data pollen <- read.csv("data/reveals_input.csv") pollen <- pollen[-1] names(pollen) <- unlist(strsplit(names(pollen),'[.]'))[seq(2,(2*ncol(pollen)),2)] reveals.params <- read.csv('data/reveals_input_params_variable.csv') taxa <- reveals.params$species #fall speed fall.speed <- reveals.params$fallspeed names(fall.speed) <- taxa fall.speed <- as.data.frame(t(fall.speed)) #ppe ppes <- reveals.params$PPEs names(ppes) <- taxa fall.speed <- fall.speed[names(fall.speed)%in%names(pollen)] ppes <- ppes[names(ppes)%in%names(pollen)] #massive difference depending on use of meters or km REVEALS_gamma(pollen,fall_speed,ppes,R=1,zmax=100,n=0.25,u=3,c=0.12) REVEALS_gamma(pollen,fall_speed,ppes,R=1000,zmax=100000,n=0.25,u=3,c=0.12) #---------------------------------------------------------------------------------------------------- #test sensitivity to maximum dispersal distances <- c(seq(10,100,10),seq(200,1000,100)) sensitivity_reveals_gamma <- sapply(distances,function(x){ REVEALS_gamma(pollen,fall_speed,ppes,R=1,zmax=x,n=0.25,u=3,c=0.12) }) colnames(sensitivity_reveals_gamma) <- distances #distances dist(t(sqrt(sensitivity_reveals_gamma[,c("50","100","400")])))^2 paldist(t(sensitivity_reveals_gamma[,c("50","100","400")])) #----------------------------------------------------------------------------------------------------- # load new pollen data #----------------------------------------------------------------------------------------------------- load('~/workflow_stepps_calibration/vegetation/data_nb/prediction_13_taxa_6796_cells_120_knots_cal_pl_Ka_Kgamma_EPs_79_sites_final.rdata') pollen <- y colnames(pollen)[grep("Other",colnames(pollen))] <- c('Other_conifer','Other_hardwood') pollen <- pollen[,colnames(pollen)%in%names(ppes)] sensitivity_all <- lapply(distances,function(distan){ pred_reveals <- apply(pollen,1,function(x){ REVEALS_gamma(x,fall_speed,ppes,R=1,zmax=distan,n=0.25,u=3,c=0.12) }) pred_reveals <- t(pred_reveals) colnames(pred_reveals) <- colnames(pollen) round(pred_reveals,3) }) names(sensitivity_all) <- distances
3df9bbe5db4f5f443f36854beb8774e1e408b370
06fc7cc5aa8aae3ded43c73a854c99be37ed25be
/RandomVariable/man/draw_nogen.Rd
84a6c5c093e0740a4a260de4b08f03dd375d8b8c
[]
no_license
emadsalehi/R-package
70c97acfa021f57c11365f3df9d74fb0bfd26003
1529223f72be9765d64bd46549fd0b924c25af40
refs/heads/master
2020-03-19T05:17:01.109380
2018-06-06T13:02:10
2018-06-06T13:02:10
135,916,615
0
0
null
null
null
null
UTF-8
R
false
false
266
rd
draw_nogen.Rd
\name{draw_nogen} \alias{draw_nogen} \title{draw density plot of normal random variable} \usage{ draw_nogen(u, s) } \description{ draw density plot of normal random variable with mean u and variance s: draw_nogen(u, s) } \examples{ draw_nogen(60, 16) }
a71d7132bff0c031d32885198bd7c516c04095ef
cd58a7407a07c4b846465c489e9f7c5ff41fc002
/R/root_tree_in_outgroup.R
e38b35227f0007b07a6acd6419c30c00c4af3042
[ "LicenseRef-scancode-warranty-disclaimer" ]
no_license
simeross/CuPhyR
e09c25c63bb5f8cf8800a87ecbafb971fbdca923
a69819cb48d6e5f5ed982ae9ad8bb85f6c1d9347
refs/heads/master
2022-04-13T06:49:33.264968
2020-03-31T08:38:19
2020-03-31T08:38:19
241,898,395
0
0
null
null
null
null
UTF-8
R
false
false
1,760
r
root_tree_in_outgroup.R
#' Root phylogenetic tree of a phyloseq object #' #' @description This funtion defines the leaf with the longest path as the root of the phylogenetic tree. #' This makes results reproducible by avoiding the behaviour of some functions that would otherwise pick a #' random leaf as the root of an unrooted phylogenetic tree. #' Based on answers in https://github.com/joey711/phyloseq/issues/597. The function requires the packages #' 'ape' and 'data.table' to be installed. #' @author Simeon Rossmann #' @seealso Discussion and answers in [related GitHub thread](https://github.com/joey711/phyloseq/issues/597) #' #' @param physeq a phyloseq object containing a phylogenetic tree to be rooted in an outgroup. #' #' @return a rooted phylogenetic tree. #' #' @examples #' phyloseq::phy_tree(ps) <- root_tree_in_outgroup(physeq = ps) #' #'@export root_tree_in_outgroup <- function(physeq = ps){ if(requireNamespace(c("ape", "data.table"), quietly = TRUE)){ phylo_tree <- phyloseq::phy_tree(physeq) tips <- ape::Ntip(phylo_tree) tree_data <- base::cbind( data.table::data.table(phylo_tree$edge), data.table::data.table(length = phylo_tree$edge.length))[1:tips,] tree_data <- base::cbind(tree_data, data.table::data.table(id = phylo_tree$tip.label)) # longest terminal branch as outgroup out_group <- dplyr::slice(tree_data, which.max(length)) %>% select(id) %>% as.character() new_tree <- ape::root(phylo_tree, outgroup=out_group, resolve.root=TRUE) message("Tree successfully rooted.") }else{ stop("The function 'root_tree_in_outgroup' requires the packages 'ape' and 'data.table' to be installed. Please make sure those packages can be loaded.") } return(new_tree) }
7fdea95ec3c5ac915a9cab15da9d54daca384a80
ba0d52a9447cc2cedcaacafd8349fc50a32363b5
/man/plotSurvGenderSeverity.Rd
701375eda5afa3238f16c8df1e0b051a43d44140
[ "CC0-1.0" ]
permissive
robschick/tangled
49590a754531b8e50294abb4d86fcd9cc85d037c
e4c0e49fa87802dd39fba01dc4fba5cef25e7b31
refs/heads/master
2023-04-07T19:24:43.838552
2022-05-04T19:11:30
2022-05-04T19:11:30
33,547,111
0
0
null
null
null
null
UTF-8
R
false
true
2,303
rd
plotSurvGenderSeverity.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/plotSurvGenderSeverity.R \name{plotSurvGenderSeverity} \alias{plotSurvGenderSeverity} \title{Plot of Kaplan-Meier survival curve for all entangled whales.} \usage{ plotSurvGenderSeverity(kmlines, censTicks, yearEnd, increment, legendLabs) } \arguments{ \item{kmlines}{A list with \code{nboot} components that contains a data frame in each component. The data frame is the KM curve based on samples of death times for the presumed dead animals. The first component of the list is the median estimate of survivorship. This will be used as the main KM curve. The other components will be used to display uncertainty.} \item{yearEnd}{A matrix where each row contains the estimated death times for each animal. Times are in the columns from 1:bnt, which allows for the animal to be alive at the time modelling end.} \item{increment}{Scalar representing the temporal unit at which we're showing survival.} \item{legendLabs}{Character vector to be used in plotting the legend.} \item{censticks}{A list with \code{nboot} components that contains a data frame in each component. The data frame contains information on when the animal is censored. The changes in eact iteration, and right now the function is set up to just plot the censored marks from the most probable censored year.} } \value{ A ggplot2 object that can be used to create the output plot } \description{ \code{plotSurvGenderSeverity} returns a plot of survivorship be severity and gender } \details{ This is a function that will build a \code{ggplot2} object that displays the KM curve along with the times animals get censored. The median estimates of survivorship and censored times are used to make the main line. This comes from the first element of each list that is passed to the function. The idea behind this function is to show the uncertainty in survivorship that arises from the different estimates of death in each animal. The main difference from \code{plotSurv} is that this breaks out overall survival and produces three lines for each of the three entanglement categories as well as plotting facets for each gender } \examples{ \dontrun{ plotSurvGenderSeverity(kmlines, censTicks, 7) } }
6f8c6cb2e0604c0ff474d789c311f68ec2c169c1
e646416a1bbc302f73d2fdcbe78c5a8069e40fc8
/random_foodwebs/info_food_webs.R
fb2e5e63391444e6e8c1ba18a27bfdf9fdf60e72
[ "MIT" ]
permissive
jusinowicz/info_theory_eco
c0ef0c0f94eca2df3b7308098f05b72233261c43
b0770b10464732aa32d13f46ba3c5ef958a74dcc
refs/heads/master
2022-05-28T19:34:50.642858
2022-05-05T17:37:19
2022-05-05T17:37:19
140,295,569
2
0
null
null
null
null
UTF-8
R
false
false
36,627
r
info_food_webs.R
#============================================================================= # R code to create to explore the Information Theoretic properties of # simple food webs. This creates a simple food web with an underlying dynamic # model. # 1. Food-web includes resource, herbivore, and predator: # A. Resource is based on a consumer-resource model, with added predators # 1. Competition between consumers and resources emerges from consumption # 2. Parameters at each level can be made a function of temperature. # B. Resources can be stochastic due to environmental fluctuations. # C. Relative non-linearity allows 2 consumers per Resource # 2. Generate a bunch of random food webs # 3. Use information theory to track the resulting food-web structures. # 4. This file has a lot of code for visualizing output of both the foodweb # its information theoretic properties after the main loop. #============================================================================= #============================================================================= # load libraries #============================================================================= library(deSolve) library(fields) source("../info_theory_functions/food_web_functions.R") source("../info_theory_functions/info_theory_functions.R") #============================================================================= # Outer loop. Set the number of trials and determine how to generate # combinations of species and parameters. #============================================================================= #Length and time steps of each model run tend = 200 delta1 = 0.01 tl=tend/delta1 #The maximum block depth for dynamic info metrics (larger is more accurate, but #slower and could cause crashing if too large) k= 5 #Number of food webs to generate nwebs = 20 #Output of each web out1 = vector("list",nwebs) #Converting the web to Rutledge's compartment model and calculating the information #theoretic quantities: Shannon Entropy, Mutual Information, Conditional Entropy rweb1 = vector("list",nwebs) #Dynamic information metrics calculated from the (discretized) time series di_web = vector("list",nwebs) #Track the average transfer entropy and separable information between each pair of #species as a way to build a network of information flow through the network. te_web = vector("list",nwebs) si_web = vector("list",nwebs) #Random resources: c = 0.1 amp = 1 res_R = c(amp,c) for (w in 1:nwebs){ print(w) #Assume 3 trophic levels unless otherwise specified. nRsp = ceiling(runif(1)*30) nCsp = ceiling(runif(1)*20) nPsp = ceiling(runif(1)*10) nspp = nRsp+nCsp+nPsp #Randomly generate the species parameters for the model as well: spp_prms = NULL #Resource: Nearly identical resource dynamics: spp_prms$rR = matrix(rnorm(nRsp,300,10), nRsp, 1) #intrinsic growth spp_prms$Ki = matrix(rnorm(nRsp,500,10), nRsp, 1) #carrying capacity #Consumers: spp_prms$rC = matrix(rnorm(nCsp,.5,0.2), nCsp, 1) #intrisic growth spp_prms$eFc = matrix(1,nCsp,nRsp) # just make the efficiency for everything 1 for now spp_prms$muC = matrix(rnorm(nCsp,0.6,0.1), nCsp, 1) #mortality rates #Consumption rates: #Generate a hierarchy where each species predominantly feeds on particular resource. dspp = abs((nCsp - nRsp)) hier1= seq(1/nRsp, (1-1/nRsp), length=nRsp) spp_prms$cC = hier1 for( n in 1:nCsp) { spp_prms$cC = cbind(spp_prms$cC, shifter(hier1,n)) } spp_prms$cC = matrix(spp_prms$cC[1:nRsp,1:nCsp ],nRsp,nCsp) #Predators: spp_prms$rP = matrix(rnorm(nPsp,0.5,0.2), nPsp, 1) #intrisic growth spp_prms$eFp = matrix(1,nPsp,nCsp) # just make the efficiency for everything 1 for now spp_prms$muP = matrix(rnorm(nPsp,0.6,0.1), nPsp, 1) #mortality rates #Consumption rates: #Generate a hierarchy where each species predominantly feeds on particular resource. dspp = ((nPsp - nCsp)) if(dspp<0){dspp = 0 } hier1= seq(1/nCsp, (1-1/nCsp), length = nCsp) spp_prms$cP = hier1 for( n in 1:nPsp) { spp_prms$cP = cbind(spp_prms$cP, shifter(hier1,n)) } spp_prms$cP = matrix(spp_prms$cP[1:nCsp,1:nPsp],nCsp,nPsp) #============================================================================= # Inner loop. Run the food web model, calculate information theoretic # quantities. #============================================================================= #============================================================================= # This function gives: # out The time series for of population growth for each species in the web # This can be set to just give the final 2 time steps of the web with # "final = TRUE" # spp_prms The parameters of all species in the food web #============================================================================= # tryCatch( {out1[w] = list(food_web_dynamics (spp_list = c(nRsp,nCsp,nPsp), spp_prms = spp_prms, # tend, delta1, res_R = NULL,final = FALSE ))}, error = function(e){}) #Random resource fluctuations: tryCatch( {out1[w] = list(food_web_dynamics (spp_list = c(nRsp,nCsp,nPsp), spp_prms = spp_prms, tend, delta1, res_R = res_R) ) print( paste( "nRsp", sum(out1[[w]]$out[tl,1:nRsp]>1) ) ) print( paste( "nCsp", sum(out1[[w]]$out[tl,(nRsp+1):nCsp]>1) ) ) print( paste( "nPsp", sum(out1[[w]]$out[tl,(nCsp+1):nPsp]>1) ) ) # plot(out1[[w]]$out[,1], t="l", ylim = c(0, max(out1[[w]]$out[tl,],na.rm=T) ) ) # for(n in 2:nRsp){ lines(out1[[w]]$out[,n], col ="red") } # for(n in (nRsp+1):(nCsp) ){ lines(out1[[w]]$out[,n], col ="blue") } # for(n in (nCsp+1):(nPsp) ){ lines(out1[[w]]$out[,n]) } }, error = function(e){}) #============================================================================= # Information theoretic assessment of the foodweb. #============================================================================= #============================================================================= # This section is as per Rutledge, Basore, and Mulholland 1976 #============================================================================= ## This code takes the ODEs and converts them to a biomass balance matrix and ## transition matrix. ## This version creates a compartment for each "event" where biomass is gained ## or loss. This includes birth, death, and "inefficiency" in the form of the ## way that biomass consumed translates to new population biomass. #============================================================================= # This function gives: # Qi(t) Biomass proportion flow through a node at time t # fij(t) Probability of biomass flow between i and j at t # fijQi(t) Total biomass flowing from i to j at t # sD Shannon entropy # mI_mean Average mutual information # mI_per Mutual information per interaction # ce Conditional entropy #============================================================================= # rweb1[w] = list(rutledge_web( spp_list=c(nRsp,nCsp,nPsp), pop_ts = out1[[w]]$out[,2:(nspp+1)], # spp_prms = out1[[w]]$spp_prms) ) #============================================================================= # Information processing networks #============================================================================= ## This code takes the population time-series counts output by the ODEs and ## calculates Excess Entropy, Active Information Storage, and Transfer Entropy. ## Each quantity is calculated at both the average and local level. #============================================================================= # This function gives: # EE_mean Average mutual information per species # AI_mean Average active information per species # TE_mean Average transfer entropy per species # # EE_local Local mutual information per species # AI_local Local active information per species # TE_local Local transfer entropy per species #============================================================================= nt1 = 1 nt2 = tl di_web[w] = list(get_info_dynamics(pop_ts = floor(out1[[w]]$out[nt1:tl,2:(nspp+1)]), k=k,with_blocks=TRUE)) ## This code takes the population time-series counts output by the ODEs and ## calculates the average Transfer Entropy from each species to every other ## species. The goal is to get an overview of the major information pathways ## in the web. #============================================================================= # This function gives: # te_web Average transfer entropy per species as a pairwise matrix #============================================================================= te_web[w] = list( get_te_web( pop_ts = floor(out1[[w]]$out[nt1:tl,2:(nspp+1)]), k=k) ) ## This code takes the population time-series counts output by the ODEs and ## calculates the average Separable Information from each species to every other ## species. The goal is to get an overview of the major information pathways ## in the web. #============================================================================= # This function gives: # si_web Average separable information per species as a pairwise matrix #============================================================================= si_web[w] = list( get_si_web( pop_ts = floor(out1[[w]]$out[nt1:tl,2:(nspp+1)]), k=k) ) } #============================================================================= # Examine a particular food web more closely: #============================================================================= library(viridis) library(fields) library(igraph) library(visNetwork) w=1 #============================================================================= #Export parameters into csv tables for easier reading. # !!! Make sure to set the name of the excel file below!!!! #============================================================================= library(xlsx) var_load = out1[[w]]$spp_prms[5] #These start at variable 5 and go to 14 write.xlsx(var_load, file="spp_prms_rweb1.xlsx", sheetName="sheet1", row.names=FALSE) for (n in 6:14){ var_load = out1[[w]]$spp_prms[n] sheet = paste("sheet",n-4, sep='') write.xlsx(var_load, file="spp_prms_rweb1.xlsx", sheetName=sheet, append=TRUE,row.names=FALSE) } #============================================================================= #Export the average information theoretic quantities into tables. # !!! Make sure to set the name of the excel file below!!!! #============================================================================= library(xlsx) var_load = di_web[[w]]$ee_means #These start at variable 5 and go to 14 write.xlsx(var_load, file="avg_dit_rweb1.xlsx", sheetName="sheet1", row.names=FALSE) var_load = di_web[[w]]$ai_means #These start at variable 5 and go to 14 write.xlsx(var_load, file="avg_dit_rweb1.xlsx", sheetName="sheet2",append=TRUE,row.names=FALSE) var_load = di_web[[w]]$te_means #These start at variable 5 and go to 14 write.xlsx(var_load, file="avg_dit_rweb1.xlsx", sheetName="sheet3",append=TRUE,row.names=FALSE) var_load = di_web[[w]]$si_means #These start at variable 5 and go to 14 write.xlsx(var_load, file="avg_dit_rweb1.xlsx", sheetName="sheet4",append=TRUE,row.names=FALSE) #============================================================================= # Plot each of the average information theoretic metrics as a bar graph #============================================================================= fig.name = paste("average_dynamics_rweb1.pdf",sep="") pdf(file=fig.name, height=8, width=8, onefile=TRUE, family='Helvetica', pointsize=16) layout.matrix=matrix(c(1:4), nrow = 2, ncol = 2) layout(mat = layout.matrix, heights = c(5,5), # Heights of the rows widths = c(5,5)) # Widths of columns #layout.show(4) barplot(di_web[[w]]$ee_means,cex.lab =1.3, beside = TRUE,ylab="Bits of information", xlab = "") abline(v =out1[[w]]$spp_prms$nRsp+1,col="red") mtext("Resour", side=1, at = c( out1[[w]]$spp_prms$nRsp/2 ) ) abline(v =out1[[w]]$spp_prms$nCsp+out1[[w]]$spp_prms$nRsp +1,col="blue" ) mtext("Consum", side=1, at = c( (out1[[w]]$spp_prms$nCsp+out1[[w]]$spp_prms$nRsp )-(out1[[w]]$spp_prms$nCsp)/2 ) ) mtext("Pred", side=1, at = c( nspp-(out1[[w]]$spp_prms$nPsp)/2 ) ) barplot(di_web[[w]]$ai_means,cex.lab =1.3, beside = TRUE,ylab="Bits of information", xlab = "Species #") abline(v =out1[[w]]$spp_prms$nRsp+1,col="red" ) mtext("Resour", side=1, at = c( out1[[w]]$spp_prms$nRsp/2 ) ) abline(v =out1[[w]]$spp_prms$nCsp+out1[[w]]$spp_prms$nRsp +1,col="blue" ) mtext("Consum", side=1, at = c( (out1[[w]]$spp_prms$nCsp+out1[[w]]$spp_prms$nRsp )-(out1[[w]]$spp_prms$nCsp)/2 ) ) mtext("Pred", side=1, at = c( nspp-(out1[[w]]$spp_prms$nPsp)/2 ) ) mtext("Average Information Storage", side = 3, line =4) barplot(di_web[[w]]$te_means,cex.lab =1.3, beside = TRUE,ylab="", xlab = "") abline(v =out1[[w]]$spp_prms$nRsp+1,col="red" ) mtext("Resour", side=1, at = c( out1[[w]]$spp_prms$nRsp/2 ) ) abline(v =out1[[w]]$spp_prms$nCsp+out1[[w]]$spp_prms$nRsp +1,col="blue" ) mtext("Consum", side=1, at = c( (out1[[w]]$spp_prms$nCsp+out1[[w]]$spp_prms$nRsp )-(out1[[w]]$spp_prms$nCsp)/2 ) ) mtext("Pred", side=1, at = c( nspp-(out1[[w]]$spp_prms$nPsp)/2 ) ) mtext("Average Information Transfer", side = 3, line = 2) barplot(di_web[[w]]$si_means,cex.lab =1.3, beside = TRUE,ylab="", xlab = "Species #") abline(v =out1[[w]]$spp_prms$nRsp+1,col="red" ) mtext("Resour", side=1, at = c( out1[[w]]$spp_prms$nRsp/2 ) ) abline(v =out1[[w]]$spp_prms$nCsp+out1[[w]]$spp_prms$nRsp +1 ,col="blue" ) mtext("Consum", side=1, at = c( (out1[[w]]$spp_prms$nCsp+out1[[w]]$spp_prms$nRsp )-(out1[[w]]$spp_prms$nCsp)/2 ) ) mtext("Pred", side=1, at = c( nspp-(out1[[w]]$spp_prms$nPsp)/2 ) ) mtext("Average Information Modification", side = 3, line = 2) dev.off() #============================================================================= # Plot the population dynamics #============================================================================= out = out1[[w]]$out nspp = out1[[w]]$spp_prms$nspp nRsp = out1[[w]]$spp_prms$nRsp nCsp = out1[[w]]$spp_prms$nCsp nPsp = out1[[w]]$spp_prms$nPsp tl = tend/delta1 par(mfrow=c(3,1)) #Resource species in RED plot(out[,"1"],t="l",col="red",ylim = c(0,max(out[tl,2:(nRsp+1)],na.rm=T))) for( n in 1:(nRsp) ) { lines(out[,paste(n)],t="l",col="red") } #Consumer species in BLUE plot(out[,paste(nRsp+2)],t="l",col="blue",ylim = c(0,max(out[tl,(nRsp+2):(nRsp+nCsp+1)],na.rm=T))) for( n in ( (nRsp+1):(nRsp+nCsp) ) ) { lines(out[,paste(n)],t="l",col="blue") } #Predator species in BLACK plot(out[,paste(nRsp+nCsp+2)],t="l",ylim = c(0,max(out[tl,(nRsp+nCsp+2):(nspp+1)],na.rm=T))) for( n in ((nRsp+nCsp+1):(nspp) ) ) { lines(out[3900:4000,paste(n)],t="l") } #============================================================================= # Plot the dynamic information metrics with time #============================================================================= #Local excess entropy nt_use = dim(di_web[[w]]$ee_local)[1] image.plot( 1:nt_use, 1:nspp, di_web[[w]]$ee_local, ylab="Species number", xlab="Time" ) abline(h =out1[[w]]$spp_prms$nRsp ) mtext("Resources", side=2, at = c( out1[[w]]$spp_prms$nRsp/2 ) ) abline(h =out1[[w]]$spp_prms$nCsp+out1[[w]]$spp_prms$nRsp ) mtext("Consumers", side=2, at = c( (out1[[w]]$spp_prms$nCsp+out1[[w]]$spp_prms$nRsp )-(out1[[w]]$spp_prms$nCsp)/2 ) ) mtext("Predators", side=2, at = c( nspp-(out1[[w]]$spp_prms$nPsp)/2 ) ) #Local active information storage nt_use = dim(di_web[[w]]$ai_local)[1] image.plot( 1:nt_use, 1:nspp, di_web[[w]]$ai_local, ylab="Species number", xlab="Time" ) abline(h =out1[[w]]$spp_prms$nRsp ) mtext("Resources", side=2, at = c( out1[[w]]$spp_prms$nRsp/2 ) ) abline(h =out1[[w]]$spp_prms$nCsp+out1[[w]]$spp_prms$nRsp ) mtext("Consumers", side=2, at = c( (out1[[w]]$spp_prms$nCsp+out1[[w]]$spp_prms$nRsp )-(out1[[w]]$spp_prms$nCsp)/2 ) ) mtext("Predators", side=2, at = c( nspp-(out1[[w]]$spp_prms$nPsp)/2 ) ) #Local transfer entropy nt_use = dim(di_web[[w]]$te_local)[1] image.plot( 1:nt_use, 1:nspp, di_web[[w]]$te_local, ylab="Species number", xlab="Time" ) abline(h =out1[[w]]$spp_prms$nRsp ) mtext("Resources", side=2, at = c( out1[[w]]$spp_prms$nRsp/2 ) ) abline(h =out1[[w]]$spp_prms$nCsp+out1[[w]]$spp_prms$nRsp ) mtext("Consumers", side=2, at = c( (out1[[w]]$spp_prms$nCsp+out1[[w]]$spp_prms$nRsp )-(out1[[w]]$spp_prms$nCsp)/2 ) ) mtext("Predators", side=2, at = c( nspp-(out1[[w]]$spp_prms$nPsp)/2 ) ) #Local separable information nt_use = dim(di_web[[w]]$si_local)[1] image.plot( 1:nt_use, 1:nspp, di_web[[w]]$si_local, ylab="Species number", xlab="Time" ) abline(h =out1[[w]]$spp_prms$nRsp ) mtext("Resources", side=2, at = c( out1[[w]]$spp_prms$nRsp/2 ) ) abline(h =out1[[w]]$spp_prms$nCsp+out1[[w]]$spp_prms$nRsp ) mtext("Consumers", side=2, at = c( (out1[[w]]$spp_prms$nCsp+out1[[w]]$spp_prms$nRsp )-(out1[[w]]$spp_prms$nCsp)/2 ) ) mtext("Predators", side=2, at = c( nspp-(out1[[w]]$spp_prms$nPsp)/2 ) ) #============================================================================= # Network plots of information storage. # The local exess entropy or active information could be used to show the # dominant cycles involved in information storage... #============================================================================= #============================================================================= # Network plots of information transfer. # This uses the average Transfer Entropy between each species pair to create # a directed network of information transfers. #============================================================================= ###This shows the network, but only highlights the largest link between each ###node #Pair down the graph by removing species that have essentially gone extinct #from the system. spp_use = (1:nspp)[out1[[w]]$out[10000,2:nspp]>1e-5] te_web1 = te_web[[w]][spp_use,spp_use] #Make an igraph object te_gr = graph_from_adjacency_matrix(te_web1, mode="directed", weighted=T) #Convert to VisNetwork list te_visn = toVisNetworkData(te_gr) te_visn$nodes$value = te_visn$nodes$id #Copy column "weight" to new column "value" in list "edges" te_visn$edges$value = te_visn$edges$weight #Further prune links that are smaller than the 95% interval m1 = mean(c(log(te_visn$edges$value))) sd1 = sqrt(var(c(log(te_visn$edges$value)))) te_visn$edges =te_visn$edges[log(te_visn$edges$value) > (m1-sd1), ] #Color code the nodes by trophic level spp_colors= c( matrix("red",nRsp,1),matrix("blue",nCsp,1), matrix("black",nPsp,1) ) spp_colors = spp_colors [spp_use] te_visn$nodes$color = spp_colors #Plot this as an HTML object #Add arrows to show direction #Add an option that when a node is clicked on only the "from" arrows are shown visNetwork(te_visn$nodes, te_visn$edges) %>% visEdges(arrows="to", arrowStrikethrough =FALSE ) %>% visOptions(highlightNearest = list(enabled =TRUE, degree =0) )%>% visIgraphLayout(layout = "layout_in_circle") %>% #visSave(file="te_graph1p.html", selfcontained = FALSE, background = "white") visExport( type = "pdf", name = "te_web_biggest_1") ###################################################### # Add information storage (AIS or EE) as a self-loop!# ###################################################### edges_tmp = data.frame(from = c(1:length(spp_use)), to =(1:length(spp_use)),weight =(1:length(spp_use)) ) edges_tmp$value = di_web[[1]]$ai_means[spp_use] te_visn$edges=rbind(te_visn$edges,edges_tmp) visNetwork(te_visn$nodes, te_visn$edges) %>% visEdges(arrows="to", arrowStrikethrough =FALSE ) %>% visOptions(highlightNearest = list(enabled =TRUE, degree =0) )%>% visIgraphLayout(layout = "layout_in_circle") %>% visSave(file="ai_te_graph1.html", selfcontained = FALSE, background = "white") #visExport( type = "pdf", name = "te_web_biggest_1") ###################################### #Because transfer can be asymmetrical, make 2 different graphs showing direction #of flows. te_gr1 = graph_from_adjacency_matrix( (te_web[[w]]*lower.tri(te_web[[w]])), mode="directed", weighted=T) te_gr2 = graph_from_adjacency_matrix( (te_web[[w]]*upper.tri(te_web[[w]])), mode="directed", weighted=T) #Convert to VisNetwork list te_visn1 = toVisNetworkData(te_gr1) te_visn2 = toVisNetworkData(te_gr2) #Copy column "weight" to new column "value" in list "edges" te_visn1$edges$value = te_visn1$edges$weight te_visn2$edges$value = te_visn2$edges$weight #Color code the nodes by trophic level te_visn1$nodes$color = c( matrix("red",nRsp,1),matrix("blue",nCsp,1), matrix("black",nPsp,1) ) te_visn2$nodes$color = c( matrix("red",nRsp,1),matrix("blue",nCsp,1), matrix("black",nPsp,1) ) #te_visn1$nodes$color = c( matrix(c("red","blue","black"),9,1) ) #te_visn2$nodes$color = c( matrix(c("red","blue","black"),9,1) ) #Plot this as an HTML object #Add arrows to show direction: te_visn1$edges$arrows = c(matrix("to",dim(te_visn1$edges)[1])) te_visn2$edges$arrows = c(matrix("to",dim(te_visn2$edges)[1])) visNetwork(te_visn1$nodes, te_visn1$edges) %>% visIgraphLayout(layout = "layout_in_circle") %>% visExport( type = "pdf", name = "te_web_clock_1") visNetwork(te_visn2$nodes, te_visn2$edges) %>% visIgraphLayout(layout = "layout_in_circle") %>% visExport( type = "pdf", name = "te_web_clock_1") #============================================================================= # Network plots of information modification. # This uses the average Separable Information between each species pair to create # a directed network of information transfers. #============================================================================= ###This shows the network, but only highlights the largest link between each ###node #Pair down the graph by removing species that have essentially gone extinct #from the system. spp_use = (1:nspp)[out1[[w]]$out[10000,2:nspp]>1e-5] si_web1 = si_web[[w]][spp_use,spp_use] #Make an igraph object si_gr = graph_from_adjacency_matrix(si_web1, mode="directed", weighted=T) #Convert to VisNetwork list si_visn = toVisNetworkData(si_gr) si_visn$nodes$value = si_visn$nodes$id #Copy column "weight" to new column "value" in list "edges" si_visn$edges$value = si_visn$edges$weight #Color code the nodes by trophic level spp_colors= c( matrix("red",nRsp,1),matrix("blue",nCsp,1), matrix("black",nPsp,1) ) spp_colors = spp_colors [spp_use] si_visn$nodes$color = spp_colors #Plot this as an HTML object #Add arrows to show direction #Add an option that when a node is clicked on only the "from" arrows are shown visNetwork(si_visn$nodes, si_visn$edges) %>% visEdges(arrows="to", arrowStrikethrough =FALSE ) %>% visOptions(highlightNearest = list(enabled =TRUE, degree =0) )%>% visIgraphLayout(layout = "layout_in_circle") %>% visSave(file="si_graph1.html", selfcontained = FALSE, background = "white") #visExport( type = "pdf", name = "si_web_biggest_1") #============================================================================= # Make combined plots of population and dynamic information metrics with time #============================================================================= #===========================================# #plot1: Info storage (Excess Entropy or AIS) #===========================================# # fig.name = paste("dynamic_info_AIS_rweb1.pdf",sep="") # pdf(file=fig.name, height=8, width=8, onefile=TRUE, family='Helvetica', pointsize=16) #When the figure is only over a subset of the time to show transient dynamics: fig.name = paste("dynamic_info_AIS_rweb1_sub.pdf",sep="") pdf(file=fig.name, height=8, width=8, onefile=TRUE, family='Helvetica', pointsize=16) layout.matrix=matrix(c(1:12), nrow = 6, ncol = 2) layout(mat = layout.matrix, heights = c(1.5, 3.5,1.5, 3.5, 1.5, 3.5, 1.5, 3.5,1.5, 3.5, 1.5, 3.5), # Heights of the rows widths = c(12,1)) # Widths of columns #layout.show(12) #par(mfrow=c(2,1),mai= c( 0.0, 0.2, 0.0, 0.2), omi=c(0.5,0.75,0.5,0.75)) #,mai= c( 1, 0, 0.2, 0), omi=c(2,0.75,2,0.75)) ###Common figure properties t1 = 5840 nlevel = 64 #For viridis color scheme #nt_use = dim(di_web[[w]]$ai_local)[1] nt_use = 5940 rs1 = 450 #lower bound for Resource population plot par(oma = c(3,2,3,3) ) #===========================================# #===========================================# ###Predator species par( mar = c(0.5,4,0,4) ) plot(out[t1:nt_use,paste(nRsp+nCsp+2)],t="l",ylim = c(0,max(out[t1:nt_use,(nRsp+nCsp+2):(nspp+1)],na.rm=T)), ylab="Population", xlab="", xaxs="i", xaxt="n",yaxs="i",cex.main=1.2,cex.lab=1.2) for( n in ((nRsp+nCsp+1):(nspp) ) ) { lines(out[t1:nt_use,paste(n)],t="l") } mtext("Local Information Storage", side = 3, line = 0, outer = TRUE) #Local excess entropy #par( mar = c(2,4,0,4) ) # nt_use = dim(di_web[[w]]$ee_local)[1] #image( 1:nt_use, 1:nCsp, di_web[[w]]$ee_local[,(nRsp+nCsp+1):(nspp)], ylab="Species number", # xlab="Time",col=viridis(nlevel) ) #Local active information storage par( mar = c(2,4,0,4) ) image( t1:nt_use, 1:nPsp, di_web[[w]]$ai_local[t1:nt_use,(nRsp+nCsp+1):(nspp)], ylab="Species #", xlab="Time",col=viridis(nlevel),cex.main=1.3,cex.lab=1.3) ###Consumer species par( mar = c(0.5,4,0,4) ) #Consumer species in BLUE plot(out[t1:nt_use,paste(nRsp+2)],t="l",col="blue",ylim = c(0,max(out[t1:nt_use,(nRsp+2):(nRsp+nCsp+1)],na.rm=T)) , ylab="Population", xlab="", xaxs="i", xaxt="n",yaxs="i",cex.main=1.2,cex.lab=1.2) for( n in ( (nRsp+1):(nRsp+nCsp) ) ) { lines(out[t1:nt_use,paste(n)],t="l",col="blue") } #Local excess entropy #par( mar = c(2,4,0,4) ) # nt_use = dim(di_web[[w]]$ee_local)[1] #image( 1:nt_use, 1:nCsp, di_web[[w]]$ee_local[,(nRsp+1):(nRsp+nCsp)], ylab="Species number", # xlab="Time",col=viridis(nlevel) ) #Local active information storage par( mar = c(2,4,0,4) ) image( t1:nt_use, 1:nCsp, di_web[[w]]$ai_local[t1:nt_use,(nRsp+1):(nRsp+nCsp)], ylab="Species #", xlab="Time",col=viridis(nlevel),,cex.main=1.3,cex.lab=1.3 ) ###Resource Species par( mar = c(0.5,4,0,4) ) #Resource species in RED plot(out[t1:nt_use,"1"],t="l",col="red",ylim = c(rs1,max(out[t1:nt_use,2:(nRsp+1)],na.rm=T)), ylab="Population", xlab="", xaxs="i", xaxt="n",yaxs="i",cex.main=1.2,cex.lab=1.2, ) for( n in 1:(nRsp) ) { lines(out[t1:nt_use,paste(n)],t="l",col="red") } #Local excess entropy #par( mar = c(2,4,0,4) ) # nt_use = dim(di_web[[w]]$ee_local)[1] #image( 1:nt_use, 1:nRsp, di_web[[w]]$ee_local[,1:nRsp], ylab="Species number", # xlab="Time",col=viridis(nlevel) ) #Local active information storage par( mar = c(2,4,0,4) ) image( t1:nt_use, 1:nRsp, di_web[[w]]$ai_local[t1:nt_use,1:nRsp], ylab="Species #", xlab="Time",col=viridis(nlevel),cex.main=1.3,cex.lab=1.3 ) ###Plot color bars for image plots: #Color bar 1 par( mar = c(0.5,0.5,0.5,0.5) ) frame() par( mar = c(3,0,0,2) ) var_dist = di_web[[w]]$ai_local[t1:nt_use,(nRsp+nCsp+1):(nspp)] image(1,(seq(min(var_dist),max(var_dist),max(var_dist)/nlevel)), t(seq(min(var_dist),max(var_dist),max(var_dist)/nlevel)), ylab="",xaxt='n',col=viridis(nlevel)) #Color bar 2 par( mar = c(0.5,0.5,0.5,0.5) ) frame() par( mar = c(3,0,0,2) ) var_dist = di_web[[w]]$ai_local[t1:nt_use,(nRsp+nCsp+1):(nspp)] image(1,(seq(min(var_dist),max(var_dist),max(var_dist)/nlevel)), t(seq(min(var_dist),max(var_dist),max(var_dist)/nlevel)), ylab="",xaxt='n',col=viridis(nlevel)) #Color bar 3 par( mar = c(0.5,0.5,0.5,0.5) ) frame() par( mar = c(3,0,0,2) ) var_dist = di_web[[w]]$ai_local[t1:nt_use,(nRsp+nCsp+1):(nspp)] image(1,(seq(min(var_dist),max(var_dist),max(var_dist)/nlevel)), t(seq(min(var_dist),max(var_dist),max(var_dist)/nlevel)), ylab="",xaxt='n',col=viridis(nlevel)) dev.off() #===========================================# #plot2: Information transmission (TE) #===========================================# # fig.name = paste("dynamic_info_TE_rweb1.pdf",sep="") # pdf(file=fig.name, height=8, width=8, onefile=TRUE, family='Helvetica', pointsize=16) #When the figure is only over a subset of the time to show transient dynamics: fig.name = paste("dynamic_info_TE_rweb1_sub.pdf",sep="") pdf(file=fig.name, height=8, width=8, onefile=TRUE, family='Helvetica', pointsize=16) layout.matrix=matrix(c(1:12), nrow = 6, ncol = 2) layout(mat = layout.matrix, heights = c(1.5, 3.5,1.5, 3.5, 1.5, 3.5, 1.5, 3.5,1.5, 3.5, 1.5, 3.5), # Heights of the rows widths = c(12,1)) # Widths of columns #layout.show(12) #par(mfrow=c(2,1),mai= c( 0.0, 0.2, 0.0, 0.2), omi=c(0.5,0.75,0.5,0.75)) #,mai= c( 1, 0, 0.2, 0), omi=c(2,0.75,2,0.75)) ###Common figure properties nlevel = 64 #For viridis color t1 = 5840 nlevel = 64 #For viridis color scheme #nt_use = dim(di_web[[w]]$ai_local)[1] nt_use = 5940 rs1 = 450 #lower bound for Resource population plot par(oma = c(3,2,3,3) ) #===========================================# #===========================================# ###Predator species par( mar = c(0.5,4,0,4) ) plot(out[t1:nt_use,paste(nRsp+nCsp+2)],t="l",ylim = c(0,max(out[t1:nt_use,(nRsp+nCsp+2):(nspp+1)],na.rm=T)), ylab="Population", xlab="", xaxs="i", xaxt="n",yaxs="i",cex.main=1.2,cex.lab=1.2) for( n in ((nRsp+nCsp+1):(nspp) ) ) { lines(out[t1:nt_use,paste(n)],t="l") } mtext("Local Transfer Entropy", side = 3, line = 0, outer = TRUE) #Local Transfer Entropy par( mar = c(2,4,0,4) ) image( t1:nt_use, 1:nPsp, di_web[[w]]$te_local[t1:nt_use,(nRsp+nCsp+1):(nspp)], ylab="Species #", xlab="Time",col=viridis(nlevel),cex.main=1.3,cex.lab=1.3) ###Consumer species par( mar = c(0.5,4,0,4) ) #Consumer species in BLUE plot(out[t1:nt_use,paste(nRsp+2)],t="l",col="blue",ylim = c(0,max(out[t1:nt_use,(nRsp+2):(nRsp+nCsp+1)],na.rm=T)) , ylab="Population", xlab="", xaxs="i", xaxt="n",yaxs="i",cex.main=1.2,cex.lab=1.2) for( n in ( (nRsp+1):(nRsp+nCsp) ) ) { lines(out[t1:nt_use,paste(n)],t="l",col="blue") } #Local transfer entropy par( mar = c(2,4,0,4) ) image( t1:nt_use, 1:nCsp, di_web[[w]]$te_local[t1:nt_use,(nRsp+1):(nRsp+nCsp)], ylab="Species #", xlab="Time",col=viridis(nlevel),,cex.main=1.3,cex.lab=1.3 ) ###Resource Species par( mar = c(0.5,4,0,4) ) #Resource species in RED plot(out[1:tl,"1"],t="l",col="red",ylim = c(rs1,max(out[t1:nt_use,2:(nRsp+1)],na.rm=T)), ylab="Population", xlab="", xaxs="i", xaxt="n",yaxs="i",cex.main=1.2,cex.lab=1.2, ) for( n in 1:(nRsp) ) { lines(out[t1:nt_use,paste(n)],t="l",col="red") } #local transfer entropy par( mar = c(2,4,0,4) ) image( t1:nt_use, 1:nRsp, di_web[[w]]$te_local[t1:nt_use,1:nRsp], ylab="Species #", xlab="Time",col=viridis(nlevel),cex.main=1.3,cex.lab=1.3 ) ###Plot color bars for image plots: #Color bar 1 par( mar = c(0.5,0.5,0.5,0.5) ) frame() par( mar = c(3,0,0,2) ) var_dist = di_web[[w]]$te_local[t1:nt_use,(nRsp+nCsp+1):(nspp)] image(1,(seq(min(var_dist),max(var_dist),max(var_dist)/nlevel)), t(seq(min(var_dist),max(var_dist),max(var_dist)/nlevel)), ylab="",xaxt='n',col=viridis(nlevel)) #Color bar 2 par( mar = c(0.5,0.5,0.5,0.5) ) frame() par( mar = c(3,0,0,2) ) var_dist = di_web[[w]]$te_local[t1:nt_use,(nRsp+nCsp+1):(nspp)] image(1,(seq(min(var_dist),max(var_dist),max(var_dist)/nlevel)), t(seq(min(var_dist),max(var_dist),max(var_dist)/nlevel)), ylab="",xaxt='n',col=viridis(nlevel)) #Color bar 3 par( mar = c(0.5,0.5,0.5,0.5) ) frame() par( mar = c(3,0,0,2) ) var_dist = di_web[[w]]$te_local[t1:nt_use,(nRsp+nCsp+1):(nspp)] image(1,(seq(min(var_dist),max(var_dist),max(var_dist)/nlevel)), t(seq(min(var_dist),max(var_dist),max(var_dist)/nlevel)), ylab="",xaxt='n',col=viridis(nlevel)) dev.off() #===========================================# #plot3: Information modification (SI) #===========================================# # fig.name = paste("dynamic_info_SI_rweb1.pdf",sep="") # pdf(file=fig.name, height=8, width=8, onefile=TRUE, family='Helvetica', pointsize=16) #When the figure is only over a subset of the time to show transient dynamics: fig.name = paste("dynamic_info_SI_rweb1_sub.pdf",sep="") pdf(file=fig.name, height=8, width=8, onefile=TRUE, family='Helvetica', pointsize=16) layout.matrix=matrix(c(1:12), nrow = 6, ncol = 2) layout(mat = layout.matrix, heights = c(1.5, 3.5,1.5, 3.5, 1.5, 3.5, 1.5, 3.5,1.5, 3.5, 1.5, 3.5), # Heights of the rows widths = c(12,1)) # Widths of columns #layout.show(12) #par(mfrow=c(2,1),mai= c( 0.0, 0.2, 0.0, 0.2), omi=c(0.5,0.75,0.5,0.75)) #,mai= c( 1, 0, 0.2, 0), omi=c(2,0.75,2,0.75)) ###Common figure properties nlevel = 64 #For viridis color t1 = 5840 nlevel = 64 #For viridis color scheme #nt_use = dim(di_web[[w]]$ai_local)[1] nt_use = 5940 rs1 = 450 #lower bound for Resource population plot par(oma = c(3,2,3,3) ) #===========================================# #===========================================# ###Predator species par( mar = c(0.5,4,0,4) ) plot(out[t1:nt_use,paste(nRsp+nCsp+2)],t="l",ylim = c(0,max(out[tl,(nRsp+nCsp+2):(nspp+1)],na.rm=T)), ylab="Population", xlab="", xaxs="i", xaxt="n",yaxs="i",cex.main=1.2,cex.lab=1.2) for( n in ((nRsp+nCsp+1):(nspp) ) ) { lines(out[t1:nt_use,paste(n)],t="l") } mtext("Local Seprable Information", side = 3, line = 0, outer = TRUE) #Local seprable informatio par( mar = c(2,4,0,4) ) image( t1:nt_use, 1:nPsp, di_web[[w]]$si_local[t1:nt_use,(nRsp+nCsp+1):(nspp)], ylab="Species #", xlab="Time",col=viridis(nlevel),cex.main=1.3,cex.lab=1.3) ###Consumer species par( mar = c(0.5,4,0,4) ) #Consumer species in BLUE plot(out[t1:nt_use,paste(nRsp+2)],t="l",col="blue",ylim = c(0,max(out[t1:nt_use,(nRsp+2):(nRsp+nCsp+1)],na.rm=T)) , ylab="Population", xlab="", xaxs="i", xaxt="n",yaxs="i",cex.main=1.2,cex.lab=1.2) for( n in ( (nRsp+1):(nRsp+nCsp) ) ) { lines(out[t1:nt_use,paste(n)],t="l",col="blue") } #Local separable information par( mar = c(2,4,0,4) ) image( t1:nt_use, 1:nCsp, di_web[[w]]$si_local[t1:nt_use,(nRsp+1):(nRsp+nCsp)], ylab="Species #", xlab="Time",col=viridis(nlevel),,cex.main=1.3,cex.lab=1.3 ) ###Resource Species par( mar = c(0.5,4,0,4) ) #Resource species in RED plot(out[t1:nt_use,"1"],t="l",col="red",ylim = c(rs1,max(out[tl,2:(nRsp+1)],na.rm=T)), ylab="Population", xlab="", xaxs="i", xaxt="n",yaxs="i",cex.main=1.2,cex.lab=1.2, ) for( n in 1:(nRsp) ) { lines(out[t1:nt_use,paste(n)],t="l",col="red") } #Local separable information par( mar = c(2,4,0,4) ) image( t1:nt_use, 1:nRsp, di_web[[w]]$si_local[t1:nt_use,1:nRsp], ylab="Species #", xlab="Time",col=viridis(nlevel),cex.main=1.3,cex.lab=1.3 ) ###Plot color bars for image plots: #Color bar 1 par( mar = c(0.5,0.5,0.5,0.5) ) frame() par( mar = c(3,0,0,2) ) var_dist = di_web[[w]]$si_local[t1:nt_use,(nRsp+nCsp+1):(nspp)] image(1,(seq(min(var_dist),max(var_dist),max(var_dist)/nlevel)), t(seq(min(var_dist),max(var_dist),max(var_dist)/nlevel)), ylab="",xaxt='n',col=viridis(nlevel)) #Color bar 2 par( mar = c(0.5,0.5,0.5,0.5) ) frame() par( mar = c(3,0,0,2) ) var_dist = di_web[[w]]$si_local[t1:nt_use,(nRsp+nCsp+1):(nspp)] image(1,(seq(min(var_dist),max(var_dist),max(var_dist)/nlevel)), t(seq(min(var_dist),max(var_dist),max(var_dist)/nlevel)), ylab="",xaxt='n',col=viridis(nlevel)) #Color bar 3 par( mar = c(0.5,0.5,0.5,0.5) ) frame() par( mar = c(3,0,0,2) ) var_dist = di_web[[w]]$si_local[t1:nt_use,(nRsp+nCsp+1):(nspp)] image(1,(seq(min(var_dist),max(var_dist),max(var_dist)/nlevel)), t(seq(min(var_dist),max(var_dist),max(var_dist)/nlevel)), ylab="",xaxt='n',col=viridis(nlevel)) dev.off() #=============================================================================== #=============================================================================== #=============================================================================== ###Or plot a subset of the data: nt1 = 5000 nt2 = tl-50 image.plot( nt1:nt2, 1:nspp, di_web[[w]]$ee_local[nt1:nt2,], ylab="Species number", xlab="Time" ) #Local active information storage image.plot( nt1:nt2, 1:nspp, di_web[[w]]$ai_local[nt1:nt2,], ylab="Species number", xlab="Time" ) #Local transfer entropy image.plot( nt1:nt2, 1:nspp, di_web[[w]]$te_local[nt1:nt2,], ylab="Species number", xlab="Time" ) #Local separable information image.plot( nt1:nt2, 1:nspp, di_web[[w]]$si_local[nt1:nt2,], ylab="Species number", xlab="Time" ) abline(h =out1[[w]]$spp_prms$nRsp ) mtext("Resources", side=2, at = c( out1[[w]]$spp_prms$nRsp/2 ) ) abline(h =out1[[w]]$spp_prms$nCsp+out1[[w]]$spp_prms$nRsp ) mtext("Consumers", side=2, at = c( (out1[[w]]$spp_prms$nCsp+out1[[w]]$spp_prms$nRsp )-(out1[[w]]$spp_prms$nCsp)/2 ) ) mtext("Predators", side=2, at = c( nspp-(out1[[w]]$spp_prms$nPsp)/2 ) ) out1[[w]]$out[10000,]>1e-5 #Generate quantities for the maximum entropy distribution, i.e. uniform: pop_me = runif(nspp) me_freq = pop_me/matrix(sum(pop_me),length(pop_me),1)
9c1c990cfa4dffa7fbe1b7a45b41c8b6e7b5b489
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
/data/genthat_extracted_code/biogeo/examples/edat.Rd.R
33baa36668112e43aa008500b818a87bd4ad4fa5
[]
no_license
surayaaramli/typeRrh
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
66e6996f31961bc8b9aafe1a6a6098327b66bf71
refs/heads/master
2023-05-05T04:05:31.617869
2019-04-25T22:10:06
2019-04-25T22:10:06
null
0
0
null
null
null
null
UTF-8
R
false
false
202
r
edat.Rd.R
library(biogeo) ### Name: edat ### Title: Species collection records dataset and environmental variables ### data ### Aliases: edat ### Keywords: datasets ### ** Examples data(edat) head(edat)
99695970907a3682c3e333eddff2a71cec5280f9
283409d2a37155d58855bc9be3b78e0ad7cdacb8
/Assignment-1/5.R
7a3759b15e659430d9187dbbe15349b6ba4cd7a7
[]
no_license
VivianeLovatel/Brasil_2019
ddca243336145336c94ce09ff97d1918bf67e95d
82128b52ed7fa47d343cfaccf6da698f44e9883a
refs/heads/master
2020-07-07T00:45:58.305182
2019-08-20T15:20:36
2019-08-20T15:20:36
203,190,340
0
0
null
null
null
null
UTF-8
R
false
false
166
r
5.R
numLions = 42 numTigers = 17 country ="South Africa" a="is" b ="The number of lions in" c<-"The number of tigers in" paste(b,country,a,numLions,c,country,a,numTigers)
20e9eecec8e25b8ed149eb6a6dc01010cb908e5c
541b8e18f977371bc002aa506489d6ab0dc6b165
/man/r18S_cov_tbl.Rd
c5040f3377fa1a2697c280416db730282f8da0b7
[]
no_license
hesselberthlab/endoU
2c1953418eebac8f1bcca01bd6d64b59e67d0032
bc926ec7b7368ccab93916a19c8d2a241b3c4eee
refs/heads/master
2022-12-31T14:09:14.934835
2020-10-23T22:45:18
2020-10-23T22:45:18
151,328,734
0
0
null
null
null
null
UTF-8
R
false
true
364
rd
r18S_cov_tbl.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{r18S_cov_tbl} \alias{r18S_cov_tbl} \title{18S coverage table} \format{An object of class \code{tbl_df} (inherits from \code{tbl}, \code{data.frame}) with 65736 rows and 7 columns.} \usage{ r18S_cov_tbl } \description{ 18S coverage table } \keyword{datasets}
7a0af2899b302027e26c3203dbf54125c46a77f0
9d9062f4972ca4eda10966882e8a35d7745b619c
/figures/defense/r-scripts/plotTraceUQPosterioTC216.R
708b4bdf644d22065489dbf83bab915f664258db
[]
no_license
damar-wicaksono/wd41-thesis
651abda4d1565745b15993f9d3c1ebd349e83bcc
7af132b0d5755a702490970e64a8c2986bd00f45
refs/heads/master
2021-01-17T05:26:23.472518
2019-07-12T22:34:35
2019-07-12T22:34:35
60,852,652
5
1
null
null
null
null
UTF-8
R
false
false
2,954
r
plotTraceUQPosterioTC216.R
# # title : plotTraceUQPosterioTC216.R # purpose : R script to create plot of UQ propagation using posterior samples # : for TC output # : FEBA Test No. 216 # author : WD41, LRS/EPFL/PSI # date : Jan. 2018 # # Load required libraries ----------------------------------------------------- library(ggplot2) # Global variables ------------------------------------------------------------ # FEBA Test No feba_test <- 216 # Input filename rds_tidy_prior_fullname <- paste0( "../../../wd41-thesis/figures/data-support/postpro/srs/febaTrans", feba_test, "-febaVars12Influential-srs_1000_12-tc-tidy.Rds") # Graphic variables fig_size <- c(9.75, 4.0) # Make the plot --------------------------------------------------------------- # w/ Bias # Output filename otpfullname <- paste0("./figures/plotTraceUQPosteriorAllDiscCenteredTC", feba_test, ".png") # Input filenames, posterior samples, correlated and independent rds_tidy_corr_fullname <- paste0( "../../../wd41-thesis/figures/data-support/postpro/disc/centered/all-params/correlated/febaTrans", feba_test, "-febaVars12Influential-mcmcAllDiscCentered_1000_12-tc-tidy.RDs") rds_tidy_ind_fullname <- paste0( "../../../wd41-thesis/figures/data-support/postpro/disc/centered/all-params/independent/febaTrans", feba_test, "-febaVars12Influential-mcmcAllDiscCenteredInd_1000_12-tc-tidy.RDs") # Make the plot source("./r-scripts/plotTraceUQPosteriorTC.R") # w/o Bias # Output filename otpfullname <- paste0("./figures/plotTraceUQPosteriorAllNoDiscNoBCTC", feba_test, ".png") # Input filenames, posterior samples, correlated and independent rds_tidy_corr_fullname <- paste0( "../../../wd41-thesis/figures/data-support/postpro/nodisc/not-centered/fix-bc/correlated/febaTrans", feba_test, "-febaVars12Influential-mcmcAllNoDiscNoBC_1000_12-tc-tidy.RDs") rds_tidy_ind_fullname <- paste0( "../../../wd41-thesis/figures/data-support/postpro/nodisc/not-centered/fix-bc/independent/febaTrans", feba_test, "-febaVars12Influential-mcmcAllNoDiscNoBCInd_1000_12-tc-tidy.RDs") # Make the plot source("./r-scripts/plotTraceUQPosteriorTC.R") # w/o Parameter 8 (dffbVIHTC) # Output filename otpfullname <- paste0( "./figures/plotTraceUQPosteriorAllDiscCenteredNoParam8TC", feba_test, ".png") # Input filenames, posterior samples, correlated and independent rds_tidy_corr_fullname <- paste0( "../../../wd41-thesis/figures/data-support/postpro/disc/centered/no-param8/correlated/febaTrans", feba_test, "-febaVars12Influential-mcmcAllDiscCenteredNoParam8_1000_12-tc-tidy.RDs") rds_tidy_ind_fullname <- paste0( "../../../wd41-thesis/figures/data-support/postpro/disc/centered/no-param8/independent/febaTrans", feba_test, "-febaVars12Influential-mcmcAllDiscCenteredNoParam8Ind_1000_12-tc-tidy.RDs") # Make the plot source("./r-scripts/plotTraceUQPosteriorTC.R")
eade0e4215ccc753881a36952489f5958d5d492f
37db44bf803a83936031efd79a3727f8c5c2ab51
/PK/server.R
a14fe31291029c8873b76fc53da2bf0372036d34
[]
no_license
jeffwzhong1994/R-shiny-web
81b226d03c1b491c5ee4472f8b15507c12d4d967
b5c082de47593e2d3fdc6d8253cbc8bdf4d66729
refs/heads/master
2020-09-20T03:15:33.284097
2019-11-28T00:57:34
2019-11-28T00:57:34
224,364,866
0
0
null
null
null
null
UTF-8
R
false
false
717
r
server.R
server=function(input,output){ output$pk=renderDataTable({ library(dplyr) library(lubridate) data=read.csv("pk records.csv") data$a.rtime=ymd_hms(data$a.rtime)-hours(16) data$year = paste(year(data$a.rtime),"-",month(data$a.rtime)) data$year data$a.rtime= as.Date(data$a.rtime) data$a.rtime summary(data) data%>% filter(a.uid == input$UID)%>% filter(a.rtime==input$Date)%>% mutate(PK_points=a.recvbeans*10)%>% mutate(Opponent_PK_points=a.peerrecvbeans*10)%>% select( time = a.rtime, uid=a.uid, PK_points, OpponentUID=a.peeruid, Opponent_PK_points) }) } app <- shinyApp(ui = ui, server = server) runApp(app, host ="0.0.0.0", port = 80)
b5f22dfc2630d855b9f02170ee45be598d4381a5
6b955291e90d4097e13c3808523e2d20b3a71398
/man/Gini.Rd
94b6cc85bb3020052875d8743c9c48f707053a89
[]
no_license
cran/shipunov
640f34408ae65c59a8fa655c23d01e5e86af38bc
8cd6acac881f048a17ddafcfc414a4894fa02f63
refs/heads/master
2023-03-16T23:19:10.341396
2023-02-05T13:42:56
2023-02-05T13:42:56
185,279,307
0
1
null
null
null
null
UTF-8
R
false
false
1,269
rd
Gini.Rd
\name{Gini} \alias{Gini} \title{Compute the simple Gini coefficient} \description{ Computes the simple Gini coefficient of unequality } \usage{ Gini(x) } \arguments{ \item{x}{a numeric vector with non-negative elements} } \details{ Gini coefficient is a common measure of inequality. Here it presents only for the convenience to have this calculation "outside" of social science R packages (where it commonly presents). Please read elsewhere of its meaning and uses. Code is based on the 'reldist' package from Mark S. Handcock but simplified to revome the using of weights (as a sideway result, it should be slightly faster). } \value{ The Gini coefficient (number between 0 and 1). } \references{ \emph{Relative Distribution Methods in the Social Sciences}, by Mark S. Handcock and Martina Morris, Springer-Verlag, Inc., New York, 1999. ISBN 0387987789. } \author{Alexey Shipunov} % \seealso{} \examples{ salary <- c(21, 19, 27, 11, 102, 25, 21) Gini(salary) new.1000 <- sample((median(salary) - IQR(salary)) : (median(salary) + IQR(salary)), 1000, replace=TRUE) salary2 <- c(salary, new.1000) Gini(salary2) salary3 <- salary[-which.max(salary)] salary3 Gini(salary3) salary4 <- c(salary3, 1010) salary4 Gini(salary4) } \keyword{univar}
34662d734a7fd2b67d6687211666ed3eb519f1a1
3ef1867d88291165d60c1189f84a27fb04ab1b7c
/exam/exam2_Q2.R
41a5536f448d5eb1e789840f12d467db3a80f51b
[]
no_license
dalsgit/510
d319476b7ba1e2319c91c55f9d010749bb926a65
04af567cf4339edebd2d09ca8982183a59d01d3f
refs/heads/master
2021-01-19T09:03:52.182561
2017-05-15T20:11:40
2017-05-15T20:11:40
82,082,671
0
0
null
null
null
null
UTF-8
R
false
false
825
r
exam2_Q2.R
setwd("C:/study/psu/git/510/exam") library(astsa) y=ts(scan("e2q2.txt")) plot(y,type="b") diff1 = diff(y,1) plot(diff1,type="b") model = ts.intersect(y, lag1y=lag(y,-1)) x = model[,1] P = model[,2] c = -86 ## Threshold value ##Regression for values below the threshold less = (P<c) x1 = x[less] P1 = P[less] out1 = lm(x1~P1) summary(out1) ##Regression for values above the threshold greater = (P>=c) x2 = x[greater] P2 = P[greater] out2 = lm(x2~P2) summary(out2) ##Residuals res1 = residuals(out1) res2 = residuals(out2) less[less==1]= res1 greater[greater==1] = res2 resid = less + greater acf2(resid) ##Predicted values less = (P<c) greater = (P>=c) fit1 = predict(out1) fit2 = predict(out2) less[less==1]= fit1 greater[greater==1] = fit2 fit = less + greater plot(y, type="o") lines(fit, col = "red", lty="dashed")
b397c046b66223ac297a1119571f15956cd1a9a7
22c8c61fd3f43093dba2ca6320804ec726c7a7e5
/1_linear_model_miRNA_mRNA.R
40c5c51b1a4ca0fa907989acff08c7c782b28bc6
[ "MIT" ]
permissive
rwindsor1/miRNA_hallmarks_of_cancer
add8d92ba2f143ac282604377a25e355831f7733
0c6e504033ef3d79ef642fa8a8f339feb4f20ad6
refs/heads/master
2020-04-23T07:14:32.169467
2018-10-10T01:53:38
2018-10-10T01:53:38
null
0
0
null
null
null
null
UTF-8
R
false
false
17,928
r
1_linear_model_miRNA_mRNA.R
#linear model of miRNAs predicting various signature scores library(RankProd) library(reshape2) library(penalized) cancer_types_list <- list(); cancer_types_list[[1]] <- c('BRCA','UCEC','HNSC') cancer_types_list[[2]] <- c('KIRC','LUAD','THCA') cancer_types_list[[3]] <- c('PRAD','LUSC','OV') cancer_types_list[[4]] <- c('STAD','BLCA','COAD') cancer_types_list[[5]] <- c('LIHC','CESC','KIRP') all_cancer_types <- melt(cancer_types_list)$value #load the signatures sig_fnames_list <- list(); sig_names_list <- list(); categories_of_sigs <- c('invasion','energetics','immortality','growth_suppressors','genome_instability','angiogenesis','apoptosis','proliferation','inflammation') sig_fnames_list[['invasion']] <- c('HALLMARK_EPITHELIAL_MESENCHYMAL_TRANSITION.txt','invasiveness_gene_sig_entrez_marsan2014.txt') sig_names_list[['invasion']] <- c('Hallmark: Epithelial Mesenchymal Transition','Invasiveness, Marsan 2014') sig_fnames_list[['energetics']] <- c('HALLMARK_OXIDATIVE_PHOSPHORYLATION.txt','HALLMARK_REACTIVE_OXIGEN_SPECIES_PATHWAY.txt') sig_names_list[['energetics']] <- c('Hallmark: Oxidative Phosphorylation','Hallmark: Reactive Oxygen Species Pathway') sig_fnames_list[['immortality']] <- c('HALLMARK_G2M_CHECKPOINT.txt') sig_names_list[['immortality']] <- c('Hallmark: G2M Checkpoint') sig_fnames_list[['growth_suppressors']] <- c('HALLMARK_PI3K_AKT_MTOR_SIGNALING.txt','HALLMARK_XENOBIOTIC_METABOLISM.txt') sig_names_list[['growth_suppressors']] <- c('Hallmark: PI3K AKT MTOR Signaling','Hallmark: Xenobiotic Metabolism') sig_fnames_list[['genome_instability']] <- c('HALLMARK_DNA_REPAIR.txt','HALLMARK_P53_PATHWAY.txt') sig_names_list[['genome_instability']] <- c('Hallmark: DNA Repair','Hallmark: p53 Pathway') sig_fnames_list[['angiogenesis']] <- c('hypoxia_gene_sig_entrez_probes.txt','HALLMARK_ANGIOGENESIS.txt','HALLMARK_HYPOXIA.txt','angiogenesis_gene_sig_entrez_desmedt2008_pos.txt','Masiero2013angiogenesisENTREZ.txt') sig_names_list[['angiogenesis']] <- c('Hypoxia, Buffa 2010','Hallmark: Angiogenesis','Hallmark: Hypoxia','Angiogenesis, Desmedt 2008','Angiogenesis, Masiero 2013') sig_fnames_list[['apoptosis']] <- c('HALLMARK_APOPTOSIS.txt','apoptosis_gene_sig_entrez_desmedt2008_pos.txt') sig_names_list[['apoptosis']] <- c('Hallmark: Apoptosis','Apoptosis, Desmedt 2008') sig_fnames_list[['proliferation']] <-c('proliferation_gene_sig_entrez_desmedt2008_pos.txt','HALLMARK_KRAS_SIGNALING_UP.txt') sig_names_list[['proliferation']] <-c('Proliferation, Desmedt 2008','Hallmark: KRAS Signaling Up') sig_fnames_list[['inflammation']] <- c('HALLMARK_INFLAMMATORY_RESPONSE.txt','HALLMARK_IL2_STAT5_SIGNALING.txt','HALLMARK_IL6_JAK_STAT3_SIGNALING.txt','HALLMARK_TGF_BETA_SIGNALING.txt','HALLMARK_TNFA_SIGNALING_VIA_NFKB.txt','immune_gene_sig_entrez_desmedt2008_pos.txt') sig_names_list[['inflammation']] <- c('Hallmark: Inflammatory Response','Hallmark: IL2 STAT5 Signaling','Hallmark: IL6 JAK STAT3 Signaling','Hallmark: TGF Beta Signaling','Hallmark: TNFa Signaling via NFKB','Immune, Desmedt 2008') sigs_list_by_cat <- list(); for(sig_category in categories_of_sigs){ sigs_list_by_cat[[sig_category]] <- list(); for(i in 1:length(sig_fnames_list[[sig_category]])){ fname <- sig_fnames_list[[sig_category]][i] genes = read.csv(paste0('gene_signatures/',fname), header=F, stringsAsFactors=F, colClasses = "character") # print(genes) sigs_list_by_cat[[sig_category]][[sig_names_list[[sig_category]][i]]]<- genes } } #load the datasets all_mRNA_datasets <- list(); for (cancer_type in all_cancer_types){ print(cancer_type) if(cancer_type!='BRCA'){ fname_mrna <- paste0('../Reprocessed GDAC data/',cancer_type,'/mRNA/tumour/cleaned_mRNA.txt') }else{ fname_mrna <- paste0('../Reprocessed GDAC data/',cancer_type,'/mRNA/tumour/cleaned_mRNA_ductal.txt') } all_mRNA_datasets[[cancer_type]] <- read.table(fname_mrna, sep='\t',stringsAsFactors = FALSE, header=TRUE,quote="") colnames(all_mRNA_datasets[[cancer_type]]) <- gsub('[.]','-',colnames(all_mRNA_datasets[[cancer_type]])) # want log2 data all_mRNA_datasets[[cancer_type]] <- log2(all_mRNA_datasets[[cancer_type]]+1) all_mRNA_datasets[[cancer_type]][!is.finite(as.matrix(all_mRNA_datasets[[cancer_type]]))] <- NA } #load the miRNA all_miRNA_datasets <- list(); all_miRNA <- c() for (cancer_type in all_cancer_types){ fname_miRNA <- paste0('../Reprocessed GDAC data/',cancer_type,'/miRNA/tumour/cleaned_miRNA_mature.txt') all_miRNA_datasets[[cancer_type]] <- read.table(fname_miRNA, sep='\t',stringsAsFactors = FALSE, header=TRUE,quote="") colnames(all_miRNA_datasets[[cancer_type]]) <- gsub('[.]','-',colnames(all_miRNA_datasets[[cancer_type]])) all_miRNA <- unique(c(all_miRNA,rownames(all_miRNA_datasets[[cancer_type]]))) } all_coeffs <- list(); all_rank_product_matrices <- list(); for (category in categories_of_sigs){ count <- 1 for (gene_sig in sigs_list_by_cat[[category]]){ sig_name <- sig_names_list[[category]][count] print(sig_name) all_coeffs[[sig_name]] <- matrix(0,nrow=length(all_miRNA),ncol=length(all_cancer_types)) row.names(all_coeffs[[sig_name]]) <- all_miRNA colnames(all_coeffs[[sig_name]]) <- all_cancer_types for (cancer_type in all_cancer_types){ print(cancer_type) genes_present <- intersect(rownames(all_mRNA_datasets[[cancer_type]]),gene_sig$V1) #compute and score the scores scores <- apply(all_mRNA_datasets[[cancer_type]][genes_present,], 2, function(x) median(x,na.rm=T)) #cross-validated linear model coeffs <- get_coefficients_pre_filter(cancer_type,scores) #store the miRNA results all_coeffs[[sig_name]][names(coeffs),cancer_type] <- coeffs } # #compute the rank-product matrix # all_rank_product_matrices[[sig_name]] <- make_rank_prod_matrix(all_coeffs[[sig_name]]) count <- count + 1 } } #the above code is run on a server for each signature in parallel, and the files are saved into a folder #called 'server_data.' Using this, we re-load everythng in R and then compute the overall rank prod matrices #the following is the code to load in from all signatures the files from the code running on server all_signatures <- melt(sig_names_list)$value rank_prod_tables <- list(); RP_out_values <- list(); all_coeffs_tmp <- list(); for (sig_name in all_signatures){ load(paste0('server_data/all_coeffs_',sig_name,'.rda')) all_coeffs_tmp[[sig_name]] <- all_coeffs[[sig_name]] } all_coeffs <- all_coeffs_tmp #library(rankProd) rank_prod_tables <- list(); RP_out_values <- list(); # for (category in categories_of_sigs){ # count <- 1 # for (gene_sig in sigs_list_by_cat[[category]]){ # sig_name <- sig_names_list[[category]][count] #here we need to do the rankprod for (sig_name in all_signatures){ all_coeffs[[sig_name]] <- all_coeffs[[sig_name]][which(rowSums(all_coeffs[[sig_name]]==0) < length(colnames(all_coeffs[[sig_name]]))),] print(dim(all_coeffs[[sig_name]])) RP.out <- RP(all_coeffs[[sig_name]],rep(1,15)) RP_out_values[[sig_name]] <- RP.out rank_prod_tables[[sig_name]] <- topGene(RP.out,cutoff = 0.05,method="pfp",gene.names=rownames(all_coeffs[[sig_name]])) # count <- count + 1 } # } #then save the outputs save(file='rank_prod_output_pre_filtered.rda',RP_out_values) save(file='rank_prod_tables_out_pre_filtered.rda',rank_prod_tables) for (sig_name in all_signatures){ #for each sig let's save the heatmap of the miRNA coefficients to see whether the cancers act the same all_coeffs_tmp_mod <- all_coeffs[[sig_name]][which(rowSums(all_coeffs[[sig_name]]!=0)!=0),] gplots::heatmap.2( all_coeffs_tmp_mod, col = gplots::colorpanel(100,"blue","white","red"),#gplots::colorpanel(100,"white","red"),#gplots::redgreen(100),#gplots::colorpanel(100,"blue","white","red"), #redgreen(100),#colorpanel(100,"red","yellow","green"), trace = "none", xlab = "Gene ID", ylab="Gene ID", na.color="grey", #labRow=rownames(autocors), #labCol=colnames(autocors),#gene_sig, main = paste0("\n\n", sig_name), dendrogram = "both", #symbreaks = T, Rowv = T,Colv=T ,key.xlab='Rho',key.ylab=NA, key.title=NA,margins=c(7,7),cexRow=0.15,cexCol=0.45) dev.copy(pdf,paste0('miRNA_hmap_preFiltered_',sig_name,'.pdf'),width=12,height=12) dev.off() } # #----------the following is to make a heatmap but for the miRNA that recur among cancer types for each signature themselves, not the families: # all_sigs_miRNA_list <- list() # for (sig_name in all_signatures){ # cur_miRNAs_list <- c() # for (cancer_type in all_cancer_types){ # cur_miRNAs_list <- c(cur_miRNAs_list,rownames(all_coeffs[[sig_name]])[which(all_coeffs[[sig_name]][,cancer_type] < 0)]) # } # print(table(cur_miRNAs_list)) # all_sigs_miRNA_list[[sig_name]] <- table(cur_miRNAs_list) # } #counts frequency of the miRNA occurring across all cancer types as significant # heatmap_matrix <- matrix(0, nrow=length(unique(melt(all_sigs_miRNA_list)[,1])),ncol=length(all_signatures)) # row.names(heatmap_matrix) <- unique(melt(all_sigs_miRNA_list)[,1]) # colnames(heatmap_matrix) <- all_signatures # for (sig_name in all_signatures){ # heatmap_matrix[names(all_sigs_miRNA_list[[sig_name]]),sig_name] <- as.numeric(all_sigs_miRNA_list[[sig_name]]) # } # gplots::heatmap.2( heatmap_matrix, # col = gplots::colorpanel(100,"white","red"),#gplots::colorpanel(100,"white","red"),#gplots::redgreen(100),#gplots::colorpanel(100,"blue","white","red"), #redgreen(100),#colorpanel(100,"red","yellow","green"), # trace = "none", # # xlab = "Gene ID", # # ylab="Gene ID", # na.color="grey", # #labRow=rownames(autocors), # #labCol=colnames(autocors),#gene_sig, # main = paste0("\n", "miRNA down freq \nof occurrence"), # dendrogram = "both", # #symbreaks = T, # Rowv = T,Colv=T ,key.xlab='Rho',key.ylab=NA, key.title=NA,margins=c(7,7),cexRow=0.11,cexCol=0.35) # dev.copy(pdf,paste0('miRNA_freq_preFiltered_DOWN_all_sigs.pdf'),width=12,height=12) # dev.off() # #-------------------------------------------------------------------------------------------------- get_coefficients <- function(cancer_type,scores){ #load in the miRNA data #fname_miRNA <- paste0('../Reprocessed GDAC data/',cancer_type,'/miRNA/tumour/cleaned_miRNA_mature_log2.txt') miRNA_data <- all_miRNA_datasets[[cancer_type]]#read.table(miRNA_fName, sep='\t',stringsAsFactors = FALSE, header=TRUE,quote="") #colnames(miRNA_data) <- gsub('[.]','-',colnames(miRNA_data)) #take only common subset of miRNA and scores common_colNames <- intersect(colnames(miRNA_data),names(scores)) #take just the common pieces miRNA_data <- miRNA_data[,common_colNames] scores <- scores[common_colNames] #z-transform the scores scores <- as.numeric(scores) - mean(as.numeric(scores))/sd(as.numeric(scores)) print(sum(is.na(scores))) #expression filter for miRNA expression_threshold <- 0.80 # means that at least 10% of samples must have a nonzero value of the mRNA miRNA_data <-miRNA_data[which((rowSums(miRNA_data==0)) < ((1-expression_threshold) * length(colnames(miRNA_data)))),] #remove NA values from miRNA data # expression_threshold <- 0.5 # miRNA_data <-miRNA_data[which((rowSums(is.na(miRNA_data)==0)) < ((1-expression_threshold) * length(colnames(miRNA_data)))),] # miRNA_data <-miRNA_data[which(rowSums(is.na(miRNA_data))==0),] miRNA_data <- as.matrix(log2(miRNA_data)) miRNA_data[!(is.finite(miRNA_data))] <- NA #z-transform the miRNA data for (j in 1:length(rownames(miRNA_data))){ miRNA_data[j,] <- (as.numeric(miRNA_data[j,]) - mean(as.numeric(miRNA_data[j,])))/sd(as.numeric(miRNA_data[j,])) } print(paste0("mirna " , sum(is.na(miRNA_data)))) #penalised linear regression new_df <- na.omit(t(rbind(scores,miRNA_data))) colnames(new_df) <- c('scores',rownames(miRNA_data)) print(new_df[1:4,1:4]) lambda_2_values <- c(0, 0.01, 0.1,1,10,100) max_likelihood <- -9999999999 for (lambda2_val in lambda_2_values){ cross_val_model <- optL1(response = new_df[,1],penalized = new_df[,2:length(colnames(new_df))], lambda2 = lambda2_val,data=as.data.frame(new_df),model="linear",fold=10,trace=F)#,trace=F,maxiter=1000,tol=.Machine$double.eps^0.23) # cross_val_model <- optL2(response = all_sig_scores[,1],penalized = all_sig_scores[,2:length(colnames(all_sig_scores))], minlambda2 = 0,maxlambda2=100,data=all_sig_scores[,2:length(colnames(all_sig_scores))],model="linear",fold=10)#lambda2 = lambda2_val,data=all_sig_scores,model="linear",fold=10) if ((cross_val_model$fullfit)@loglik > max_likelihood){ best_model <<- cross_val_model best_lambda <- lambda2_val } } miRNA_names_reported <- intersect(names(coef(best_model$fullfit)), rownames(miRNA_data)) #best_coef_matrix[rownames(miRNA_matrix)[i],mRNA_names_reported] <- coef(best_model$fullfit)[mRNA_names_reported] #return the coefficients coef(best_model$fullfit)[miRNA_names_reported] } get_coefficients_pre_filter <- function(cancer_type,scores){ #load in the miRNA data #fname_miRNA <- paste0('../Reprocessed GDAC data/',cancer_type,'/miRNA/tumour/cleaned_miRNA_mature_log2.txt') miRNA_data <- all_miRNA_datasets[[cancer_type]]#read.table(miRNA_fName, sep='\t',stringsAsFactors = FALSE, header=TRUE,quote="") #colnames(miRNA_data) <- gsub('[.]','-',colnames(miRNA_data)) #take only common subset of miRNA and scores common_colNames <- intersect(colnames(miRNA_data),names(scores)) #take just the common pieces miRNA_data <- miRNA_data[,common_colNames] scores <- scores[common_colNames] #z-transform the scores scores <- as.numeric(scores) - mean(as.numeric(scores))/sd(as.numeric(scores)) print(sum(is.na(scores))) #expression filter for miRNA expression_threshold <- 0.80 # means that at least 10% of samples must have a nonzero value of the mRNA miRNA_data <-miRNA_data[which((rowSums(miRNA_data==0)) < ((1-expression_threshold) * length(colnames(miRNA_data)))),] #remove NA values from miRNA data # expression_threshold <- 0.5 # miRNA_data <-miRNA_data[which((rowSums(is.na(miRNA_data)==0)) < ((1-expression_threshold) * length(colnames(miRNA_data)))),] # miRNA_data <-miRNA_data[which(rowSums(is.na(miRNA_data))==0),] miRNA_data <- as.matrix(log2(miRNA_data)) miRNA_data[!(is.finite(miRNA_data))] <- NA #z-transform the miRNA data for (j in 1:length(rownames(miRNA_data))){ miRNA_data[j,] <- (as.numeric(miRNA_data[j,]) - mean(as.numeric(miRNA_data[j,])))/sd(as.numeric(miRNA_data[j,])) } print(paste0("mirna " , sum(is.na(miRNA_data)))) #first we need to subset the data into folds new_df <- na.omit(t(rbind(scores,miRNA_data))) colnames(new_df) <- c('scores',rownames(miRNA_data)) folds <- 10 nrows_combined_df <- 1:dim(new_df)[1] best_overall_error <- 99999999 for (i in 0:(folds-1)){ new_df_subset <- as.data.frame(new_df[!(nrows_combined_df%%folds==i),]) #takes out the 1/nth row of the data set #train the univaraite model #put these as inputs to the penalized model linear_models_miRNA <- matrix(,nrow=length(rownames(miRNA_data)),ncol=1) row.names(linear_models_miRNA) <- rownames(miRNA_data) for (j in 1:length(rownames(miRNA_data))){ univariate_data <- as.data.frame(cbind(new_df_subset[,1],new_df_subset[,(j+1)])) colnames(univariate_data) <- c('sig_score','miRNA') # print(univariate_data) univariate_model <- lm(formula = sig_score ~ miRNA,data = univariate_data) # print(summary(univariate_model)) linear_models_miRNA[j] <- (summary(univariate_model)$coefficients)[2,4] #tmp_model <- coef(summary(coxph(Surv(as.numeric(combined_df_subsetted$times),as.numeric(combined_df_subsetted$events)) ~ combined_df_subsetted[,j+2]))) #cox_models_circRNA[j] <- tmp_model[5] #c(tmp_model[2],tmp_model[5]) } #significant miRNAs are those w p < 0.2: significant_miRNAs <- rownames(linear_models_miRNA)[which(linear_models_miRNA < 0.2 & !is.nan(linear_models_miRNA))] # print("sig MiRNA") # print(significant_miRNAs) #penalised linear regression # print(new_df_subset[1:4,1:4]) lambda_2_values <- c(0, 0.01, 0.1,1,10,100) max_likelihood <- -9999999999 for (lambda2_val in lambda_2_values){ cross_val_model <- optL1(response = new_df_subset[,1],penalized = new_df_subset[,significant_miRNAs], lambda2 = lambda2_val,data=as.data.frame(new_df_subset),model="linear",fold=10,trace=F)#,trace=F,maxiter=1000,tol=.Machine$double.eps^0.23) # cross_val_model <- optL2(response = all_sig_scores[,1],penalized = all_sig_scores[,2:length(colnames(all_sig_scores))], minlambda2 = 0,maxlambda2=100,data=all_sig_scores[,2:length(colnames(all_sig_scores))],model="linear",fold=10)#lambda2 = lambda2_val,data=all_sig_scores,model="linear",fold=10) if ((cross_val_model$fullfit)@loglik > max_likelihood){ best_model <<- cross_val_model best_lambda <- lambda2_val } } #now that we know the best model, let's test it on the other 1/n of the data, and record the error unused_df <- as.data.frame(new_df[(nrows_combined_df%%folds==i),]) current_predictions <- predict(best_model$fullfit, penalized=unused_df[,significant_miRNAs],data=unused_df) cur_error <- norm((as.numeric(unused_df[,1]) - as.numeric(current_predictions)),type="2") # print(cur_error) if (cur_error < best_overall_error){ best_overall_error <- cur_error best_overall_model <- best_model best_overall_lambda <- best_lambda } } miRNA_names_reported <- intersect(names(coef(best_overall_model$fullfit)), rownames(miRNA_data)) #best_coef_matrix[rownames(miRNA_matrix)[i],mRNA_names_reported] <- coef(best_model$fullfit)[mRNA_names_reported] #return the coefficients coef(best_overall_model$fullfit)[miRNA_names_reported] }
5b9bf8a8d356d8ab6a96eeba958404572ce555c0
1cb097e8ead264823dac5e5e8821ebd6132da64d
/R/utils.R
5deb022b80ae47994062a0a2d62a0eaa40bb897e
[]
no_license
impact-initiatives/koboAPI
2cd41c1655364ae8977e12a5ae4255f5908f5a2b
91b802a44edf92ba6d9eb192148d7267bce87b1b
refs/heads/master
2021-05-19T07:04:23.139014
2020-01-08T14:57:46
2020-01-08T14:57:46
251,577,490
0
1
null
2020-03-31T11:00:55
2020-03-31T11:00:54
null
UTF-8
R
false
false
488
r
utils.R
#' @name nullToNA #' @rdname nullToNA #' @title Replaces NULL values by NAs #' @description Replaces NULL values by NAs #' @param x Vector to be treated #' @return Returns the vector with NULL replaced #' @author Elliott Messeiller #' #' @export AddstartCol_SelectMultiple nullToNA <- function(x) { x[sapply(x, is.null)] <- NA return(x) } replace_x <- function(x, replacement = NA_character_) { if (length(x) == 0 || length(x[[1]]) == 0) { replacement } else { x } }
c85c1756b40b634060fb71a7ba7ee614664c8077
e7ea72b1750bb43c6ec4ad9c12479cb9c93681ba
/week2/moneyball.R
a8db134964ea82824baec086862307bc99e4dff0
[]
no_license
scholarly/ae
0ea867c7b53f5a396bc536df6950fc3e3888ad82
566ebcfd1419fba25698b7835554cbe46f308edf
refs/heads/master
2021-01-01T19:46:33.204250
2015-04-27T23:55:43
2015-04-27T23:55:43
31,639,688
0
0
null
null
null
null
UTF-8
R
false
false
782
r
moneyball.R
baseurl = "https://courses.edx.org/c4x/MITx/15.071x_2/asset/" files = c( "baseball.csv") get_data = function(url,local){ if(!file.exists(local)){ download.file(url,local,"curl") } read.csv(local) } data_dir = function(fname){ paste("data",fname,sep="/") } getm = function(file){ get_data(paste(baseurl,file,sep=""),data_dir(file)) } bb = getm(files) mb = subset(bb,Year<2002) mb$RD = mb$RS - mb$RA WinsReg = lm(W ~ RD, data=mb) RunsReg = lm(RS ~ OBP + SLG, data=mb) ORunsReg = lm(RA ~ OOBP + OSLG, data=mb) teamRank = c(1,2,3,3,4,4,4,4,5,5) wr12 =c(SFG=94,DET=88,NYY=95,STL=88,BAL=93,OAK=94,WSN=98,CIN=97,TEX=93,ATL=94) wr13 = c(BRS=97,STL=97,LAD=92,DET=93,TBR=92,OAK=96,PTP=94,ATL=96,CLV=92,CIN=90) print(cor(teamRank,wr12)) print(cor(teamRank,wr13))
3d2d73da9b69160e6c5122e8a2745b8a942620a4
860979ede989eec54b804e18c023a84f1b196ceb
/cachematrix.R
17966d1ffce838cba09eb5b222feceb3a2ebb0a2
[]
no_license
oveedl/ProgrammingAssignment2
705366e8aac50d9f34b35f6045a99365ff43a356
c691d388b1a82a1dd6be98c28dbf8f35f60124f6
refs/heads/master
2021-01-14T11:25:52.433109
2015-10-18T08:21:15
2015-10-18T08:21:15
44,273,977
0
0
null
2015-10-14T20:18:52
2015-10-14T20:18:52
null
UTF-8
R
false
false
1,321
r
cachematrix.R
## This set of functions handles an object for storing a matrix and ## and its inverse. The inverse is only calculated when it is asked ## for, and then stored in a cache to avoid more computational work ## if it should be needed again. ## Create a CacheMatrix object containing a matrix and an empty ## placeholder for the inverse. Given an object created with ## xCache <- makeCacheMatrix(x) ## the matrix x can be extracted with xCache$get(), ## and it can be exchanged for another one with xCache$set(x). makeCacheMatrix <- function(x = matrix()) { invx <- NULL set <- function(y) { x <<- y invx <<- NULL } get <- function() x setinv <- function(theinv) invx <<- theinv getinv <- function() invx list(set = set, get = get, setinv = setinv, getinv = getinv) } ## Use this function to extract the inverse from a CacheMatrix ## object. If xCache is such an object, the inverse of the matrix ## inside is retrieved with invx <- cacheSolve(xCache). If the ## inverse is not present, it is automatically calculated and ## stored in xCache, before beeing returned. cacheSolve <- function(x, ...) { invx <- x$getinv() if(!is.null(invx)) { return(invx) } xmatrix <- x$get() invx <- solve(xmatrix, ...) x$setinv(invx) invx }
313bf207b976254ddd1155cde46e56f8ff057cb9
fafb9d8b9c02b4a5dc6bad0021107feb8abdbef6
/man/change_speed.Rd
268f8bb19f8cd6bc7641532e2b186f565418a3c4
[ "MIT" ]
permissive
UBC-MDS/AudioFilters_R
e8bdf710796961bd9eef13ba99962ea6d5318259
ffd77322b38d889104692152d7ca941cd3cbd2de
refs/heads/master
2020-04-21T04:42:03.806263
2019-03-09T04:11:31
2019-03-09T04:11:31
169,320,277
1
2
MIT
2019-03-07T18:22:18
2019-02-05T22:02:43
R
UTF-8
R
false
true
587
rd
change_speed.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/change_speed.R \name{change_speed} \alias{change_speed} \title{Change the playback speed of an audio signal} \usage{ change_speed(input_signal, rate) } \arguments{ \item{input_signal}{numeric} \item{rate}{numeric, desired rate of change to the speed. To increase the speed, pass in a value greater than 1.0. To decrease the speed, pass in a value between 0.0 and 1.0.} } \value{ numeric, vector representing the audio signal with changed speed. } \description{ Change the playback speed of an audio signal }
8b30374a89723d3185be008a19e72c56841f685b
d4638aa62f44afebf5234eff1e6977b3c0738d3d
/man/market.api.process.Rd
e727cb82d424e9f60871a8590add6e448e785be7
[ "MIT" ]
permissive
cran/Rbitcoin
549137db2d433674021ee54908e5882d0da2016e
a3f72a513fa20076daff1c1d8d4fdf9014e41243
refs/heads/master
2020-05-20T09:22:30.013880
2014-09-01T00:00:00
2014-09-01T00:00:00
17,693,080
0
2
null
null
null
null
UTF-8
R
false
false
6,930
rd
market.api.process.Rd
% Generated by roxygen2 (4.0.1): do not edit by hand \name{market.api.process} \alias{market.api.process} \title{Process market API} \usage{ market.api.process(market, currency_pair, action, req = list(), ..., verbose = getOption("Rbitcoin.verbose", 0), on.market.error = expression(stop(e[["message"]], call. = FALSE)), on.error = expression(stop(e[["message"]], call. = FALSE)), api.dict = NULL, raw.query.res = FALSE) } \arguments{ \item{market}{character, example: \code{'kraken'}.} \item{currency_pair}{character vector of length 2, ex. \code{c(base = 'BTC', quote = 'EUR')}. Order does matter.} \item{action}{character, defined process to get organized data.} \item{req}{list with action details (price, amount, tid, oid, etc.) unified across the markets specific per action, see examples.} \item{\dots}{objects to be passed to \code{\link{market.api.query}} \itemize{ \item auth params: \code{key}, \code{secret}, \code{client_id} (last one used on bitstamp), }} \item{verbose}{integer. Rbitcoin processing messages, print to console if \code{verbose > 0}, each subfunction reduce \code{verbose} by 1. If missing then \code{getOption("Rbitcoin.verbose",0)} is used, by default \code{0}.} \item{on.market.error}{expression to be evaluated on market level error. Rules specified in \code{\link{api.dict}}.} \item{on.error}{expression to be evaluated on R level error related to \code{market.api.query}. For details read \code{\link{market.api.query}}.} \item{api.dict}{data.table user custom API dictionary definition, if not provided function will use default Rbitcoin \code{\link{api.dict}}.} \item{raw.query.res}{logical skip post-processing are return results only after \code{fromJSON} processing. Useful in case of change results structure from market API. It can always be manually post-processed as a workaround till the Rbitcoin update.} } \value{ Returned value depends on the \code{action} param. All actions will return market, currency pair (except \code{wallet} and \code{open_orders} which returns all currencies), R timestamp, market timestamp and below data (in case if market not provide particular data, it will result \code{NA} value): \itemize{ \item \code{'ticker'} returns \code{data.table} with fields: \code{last}, \code{vwap}, \code{volume}, \code{ask}, \code{bid}. \item \code{'wallet'} returns \code{data.table} with fields: \code{currency}, \code{amount}, \code{fee}. \item \code{'order_book'} returns \code{list} with API call level attributes and sub elements \code{[['asks']]} and \code{[['bids']]} as \code{data.table} objects with order book including already calculated cumulative \code{amount}, \code{price} and \code{value}. \item \code{'open_orders'} returns \code{data.table} with fields: \code{oid}, \code{type}, \code{price}, \code{amount}. \item \code{'place_limit_order'} returns \code{data.table} with fields: \code{oid}, \code{type}, \code{price}, \code{amount}. \item \code{'cancel_order'} returns \code{data.table} with fields: \code{oid}. \item \code{'trades'} returns \code{list} with API call level attributes and sub element \code{[['trades']]} as \code{data.table} (ASC order) with fields: \code{date}, \code{price}, \code{amount}, \code{tid}, \code{type}. } } \description{ Unified processing of API call according to API dictionary \code{\link{api.dict}}. Limited to markets and currency processing defined in \code{api.dict}, in case of currency pairs and methods not availble in dictionary use \code{\link{market.api.query}} directly. This function perform pre processing of request and post processing of API call results to unified structure across markets. It will result truncation of most (not common across the markets) attributes returned. If you need the full set of data returned by market's API you should use \code{\link{market.api.query}}. } \details{ To do not spam market's API, use \code{Sys.sleep(10)} between API calls. } \note{ The api dictionary was not fully tested, please follow the examples, if you find any bugs please report. Use only api dictionary \code{\link{api.dict}} from trusted source, in case if you use other \code{api.dict} it is advised to review pre-process, post-process and catch_market_error functions for markets and currency pairs you are going to use. Market level error handling might not fully work as not all markets returns API call status information. } \examples{ \dontrun{ # get ticker from market market.api.process(market = 'kraken', currency_pair = c('BTC', 'EUR'), action='ticker') # get ticker from all markets and combine ticker_all <- rbindlist(list( market.api.process(market = 'bitstamp', currency_pair = c('BTC', 'USD'), action='ticker') ,market.api.process(market = 'btce', currency_pair = c('LTC', 'USD'), action='ticker') ,{Sys.sleep(10); market.api.process(market = 'btce', currency_pair = c('LTC', 'BTC'), action='ticker')} ,{Sys.sleep(10); market.api.process(market = 'btce', currency_pair = c('NMC', 'BTC'), action='ticker')} ,market.api.process(market = 'kraken', currency_pair = c('BTC','EUR'), action='ticker') ,{Sys.sleep(10); market.api.process(market = 'kraken', currency_pair = c('LTC','EUR'), action='ticker')} ,{Sys.sleep(10); market.api.process(market = 'kraken', currency_pair = c('BTC','LTC'), action='ticker')} )) print(ticker_all) # get wallet from market market.api.process(market = 'kraken', currency_pair = c('BTC', 'EUR'), action = 'wallet', key = '', secret = '') # get wallet from all markets and combine wallet_all <- rbindlist(list( market.api.process(market = 'bitstamp', currency_pair = c('BTC', 'USD'), action = 'wallet', client_id = '', key = '', secret = ''), market.api.process(market = 'btce', currency_pair = c('LTC', 'USD'), action = 'wallet', method = '', key = '', secret = ''), market.api.process(market = 'kraken', currency_pair = c('BTC', 'EUR'), action = 'wallet', key = '', secret = '') )) print(wallet_all) # get order book from market market.api.process(market = 'kraken', currency_pair = c('BTC', 'EUR'), action = 'order_book') # get open orders from market market.api.process(market = 'kraken', currency_pair = c('BTC', 'EUR'), action = 'open_orders', key = '', secret = '') # place limit order market.api.process(market = 'kraken', currency_pair = c('BTC', 'EUR'), action = 'place_limit_order', req = list(type = 'sell', amount = 1, price = 8000), # sell 1 btc for 8000 eur key = '', secret = '') # cancel order market.api.process(market = 'kraken', currency_pair = c('BTC', 'EUR'), action = 'cancel_order, req = list(oid = 'oid_from_open_orders'), key = '', secret = '') # get trades market.api.process(market = 'kraken', currency_pair = c('BTC', 'EUR'), action = 'trades') } } \seealso{ \code{\link{market.api.query}} }
584fbe394dbdb8ced0e7c1e8fa52de6de982d857
6526ee470658c2f1d6837f7dc86a81a0fbdcffd5
/man/setPodsMatrix.mwIPM.Rd
8747d4d60998f9995ac616cf0142994b466f3bde
[]
no_license
mdlama/milkweed
c7e8a24021a35eb6fbef13360400d2d4069b4649
b791c8b39802f33471f8e827f369afa47c06d6af
refs/heads/master
2023-09-06T03:00:45.554997
2022-09-14T15:25:58
2022-09-14T15:25:58
76,479,540
0
0
null
2021-09-21T19:04:44
2016-12-14T16:59:01
R
UTF-8
R
false
true
477
rd
setPodsMatrix.mwIPM.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/mw_ipm.R \name{setPodsMatrix.mwIPM} \alias{setPodsMatrix.mwIPM} \title{Create pods matrix.} \usage{ \method{setPodsMatrix}{mwIPM}(obj, update = TRUE, perturb = rep(0, 4)) } \arguments{ \item{obj}{A mwIPM model object.} \item{update}{Update dependencies?} \item{perturb}{Parameter perturbation vector for sensitivity analysis.} } \value{ A mwIPM model object. } \description{ Create pods matrix. }
516e27e03cf2674661955a26536217ed968da41f
6c321997b2237e3432ebc89866e47c5636e8ccde
/man/stratifiedSamplingForCV.Rd
020b6b8cd1e59f3ae5f1abe60a61dd72f47b3ac8
[]
no_license
cran/coca
e37d4a524d58e47400158ac4cfea0ea10570038e
2baeffda08df37be4aa3b0638f99e00869a49a37
refs/heads/master
2021-05-16T23:21:41.927083
2020-07-06T16:00:09
2020-07-06T16:00:09
250,513,558
1
0
null
null
null
null
UTF-8
R
false
true
645
rd
stratifiedSamplingForCV.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/fill-moc.R \name{stratifiedSamplingForCV} \alias{stratifiedSamplingForCV} \title{Divide data into 5 subsets using stratified sampling} \usage{ stratifiedSamplingForCV(response) } \arguments{ \item{response}{Vector of categorical responses} } \value{ The function returns a vector of labels to assign each observation to a different fold } \description{ This function is used to do stratified subsampling based on the number of observations in each group in the response } \author{ Alessandra Cabassi \email{alessandra.cabassi@mrc-bsu.cam.ac.uk} } \keyword{internal}
67d3bec99b99b3d4407f9dd1199c4febb1ef612b
1d8ca36b20ffe9dc150803662434fe8e04c52b5d
/607/Projects/Porject 3/Data-607-Project-Three-Dan-Branch/Data-607-Project-Three-Dan-Branch/textmining 210322 2032.R
df8d450dd54be9f4d006617fbc0743e15c187069
[]
no_license
zachsfr/Cuny-SPS
5842c51b7594b2e8da6f90125ce712ec78eed6e6
de707926e72996622f9eb63de38713f03b5f9db6
refs/heads/main
2023-05-27T14:08:29.826608
2021-06-07T19:27:21
2021-06-07T19:27:21
336,085,896
0
0
null
null
null
null
UTF-8
R
false
false
3,577
r
textmining 210322 2032.R
#This script takes atlanta, a folder of txt job descriptions for data scientists. It generates ds_skills_df, a dataframe. ds_skills_df has 1 row for each job listing in atlanta, and one column for each term in ds_skills_list. The value in each cell is the number of appearances of the column name in the listing. library(tidyverse) library(tm) atlanta <- "C:/Users/dmosc/OneDrive/Documents/academic/CUNY SPS/DATA 607/Proj3/zachsfr project three/Data-607-Project-Three/atlanta" find <- c("artificial intelligence","amazon web services","[^[[:alnum:]][Cc]\\#","[^[[:alnum:]][Cc]\\+\\+","computer science","computer vision","data analysis","data engineering","data wrangling","deep learning","large datasets","machine learning","natural language processing","neural networks","object oriented","project management","[^[[:alnum:]][Rr][^[[:alnum:]]","scikit-learn","software development","software engineering","time series") repl <- c("ai","aws"," csharp"," cplusplus","computerscience","computervision","dataanalysis","dataengineering","datawrangling","deeplearning","largedatasets","machinelearning","nlp","neuralnetworks","oop","projectmanagement"," rrrr","scikitlearn","softwaredevelopment","softwareengineering","timeseries") ds_skills_list <- c("ai","airflow","analysis","aws","azure","bigquery","c","caffe","caffe2","cassandra","communication","computerscience","computervision","cplusplus","csharp","d3","dataanalysis","dataengineering","datawrangling","databases","deeplearning","docker","excel","fintech","git","hadoop","hbase","hive","java","javascript","keras","kubernetes","largedatasets","linux","machinelearning","mathematics","matlab","mongodb","mysql","neuralnetworks","nlp","nosql","numpy","oop","pandas","perl","pig","projectmanagement","publications","python","pytorch","rrrr","sas","scala","scikitlearn","scipy","sklearn","softwaredevelopment","softwareengineering","spark","spss","sql","statistics","tableau","tensorflow","theano","timeseries","unix","visualization") #Create corpus from Atlanta files# atlanta_corpus <- VCorpus(DirSource(atlanta, encoding = "UTF-8"), readerControl = list(language = "en")) #transform corpus# atlanta_corpus <- tm_map(atlanta_corpus, removeWords, stopwords("english")) atlanta_corpus <- tm_map(atlanta_corpus, stripWhitespace) atlanta_corpus <- tm_map(atlanta_corpus, content_transformer(tolower)) #atlanta_corpus <- tm_map(atlanta_corpus, removePunctuation) so I can detect C#, C++ for (i in seq(length(find))) { atlanta_corpus <- tm_map(atlanta_corpus, content_transformer(function(atlanta_corpus) gsub(atlanta_corpus, pattern = find[i], replacement = repl[i]))) } atlanta_corpus <- tm_map(atlanta_corpus, removePunctuation) ########### #build document_term dataframe# document_term <- DocumentTermMatrix(atlanta_corpus) document_term <- document_term %>% as.matrix() %>% as.data.frame() #Find members of ds_skills_list in colnames(document_term)# ##PROBLEM: R is not in colnames(document_term) ds_skills_in_document_term <- cbind(ds_skills_list, ds_skills_list %in% colnames(document_term)) ds_skills_in_document_term <- as.data.frame(ds_skills_in_document_term) ds_skills_in_document_term <- ds_skills_in_document_term %>% filter(V2 == "TRUE") #build ds_skills_df dataframe# ds_skills_df <- document_term %>% select(ds_skills_in_document_term$ds_skills_list) #tidy ds_skills_df# ds_skills_df <- rownames_to_column(ds_skills_df) ds_skills_df <- rename(ds_skills_df, "listing" = "rowname", "r" = "rrrr") ds_skills_df <- ds_skills_df %>% mutate("listing" = substr(listing,0,nchar(listing)-4))
8bfe96da1ef63ec3d112f099c41e56a9fa12c376
d746fef241f9a0e06ae48cc3b1fe72693c43d808
/ark_87287/d74s4s/d74s4s-018/rotated.r
9c2f9abf140f526bce22944a801a2a1f27067859
[ "MIT" ]
permissive
ucd-library/wine-price-extraction
5abed5054a6e7704dcb401d728c1be2f53e05d78
c346e48b5cda8377335b66e4a1f57c013aa06f1f
refs/heads/master
2021-07-06T18:24:48.311848
2020-10-07T01:58:32
2020-10-07T01:58:32
144,317,559
5
0
null
2019-10-11T18:34:32
2018-08-10T18:00:02
JavaScript
UTF-8
R
false
false
195
r
rotated.r
r=0.46 https://sandbox.dams.library.ucdavis.edu/fcrepo/rest/collection/sherry-lehmann/catalogs/d74s4s/media/images/d74s4s-018/svc:tesseract/full/full/0.46/default.jpg Accept:application/hocr+xml
f91d7286520d167f58ac7538b510eb6cd726818a
e189d2945876e7b372d3081f4c3b4195cf443982
/man/show_samples.Rd
78f112112fc985a9ee69d63450fb1b8c4f931fcd
[ "Apache-2.0" ]
permissive
Cdk29/fastai
1f7a50662ed6204846975395927fce750ff65198
974677ad9d63fd4fa642a62583a5ae8b1610947b
refs/heads/master
2023-04-14T09:00:08.682659
2021-04-30T12:18:58
2021-04-30T12:18:58
324,944,638
0
1
Apache-2.0
2021-04-21T08:59:47
2020-12-28T07:38:23
null
UTF-8
R
false
true
765
rd
show_samples.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/icevision_utils.R \name{show_samples} \alias{show_samples} \title{Show_samples} \usage{ show_samples( dls, idx, class_map = NULL, denormalize_fn = denormalize_imagenet(), display_label = TRUE, display_bbox = TRUE, display_mask = TRUE, ncols = 1, figsize = NULL, show = FALSE, dpi = 100 ) } \arguments{ \item{dls}{dataloader} \item{idx}{image indices} \item{class_map}{class_map} \item{denormalize_fn}{denormalize_fn} \item{display_label}{display_label} \item{display_bbox}{display_bbox} \item{display_mask}{display_mask} \item{ncols}{ncols} \item{figsize}{figsize} \item{show}{show} \item{dpi}{dots per inch} } \value{ None } \description{ Show_samples }
1f15d8a0b220c92de942a2c6f762ffd945d01c58
b77b91dd5ee0f13a73c6225fabc7e588b953842b
/shared_functions/calculate_nndist_all_lg.R
23cc089ae6f7caa88ecf8f99bebc6acc68b7b0bf
[ "MIT" ]
permissive
ksamuk/gene_flow_linkage
a1264979e28b61f09808f864d5fa6c75568147b0
6182c3d591a362407e624b3ba87403a307315f2d
refs/heads/master
2021-01-18T09:18:02.904770
2017-04-02T16:51:40
2017-04-02T16:51:40
47,041,898
1
0
null
null
null
null
UTF-8
R
false
false
2,471
r
calculate_nndist_all_lg.R
calculate_nndist_all_lg <- function (stats.file, num_permutations, trace = FALSE) { ## first, build null distributions of nndists for each linkage group: nnd.stats <- list() for (j in unique(stats.file$lg)){ if(trace){cat(paste0("LG ", j, "..."))} # subset for lg j stats.file.lg <- stats.file %>% filter(stats.file$lg == j) # the number of outliers on that lg num.outliers <- stats.file.lg %>% filter(!is.na(gen.pos)) %>% select(fst.outlier) %>% unlist %>% sum(na.rm = TRUE) if(trace){cat(paste0(num.outliers, " outliers."))} if (num.outliers > 1){ # draw 10000 samples of num.outliers random loci, take the mean, and return the ecdf and mean null.mean.nnds <- replicate(num_permutations, calculate.null.nnd(stats.file.lg, num.outliers)) # calculate the estimate mean null nndist null.mean <- mean(null.mean.nnds, na.rm = TRUE) null.ecdf <- ecdf(null.mean.nnds) # calculate the empirical nndist for real outliers site.sample <- stats.file.lg %>% filter(!is.na(gen.pos)) %>% filter(fst.outlier == TRUE) %>% select(gen.pos) %>% arrange(gen.pos) %>% mutate(dist.1 = c(NA,diff(gen.pos))) %>% mutate(dist.2 = c(diff(sort(gen.pos)),NA)) nn.dist <- rep(NA, length(site.sample$genpos)) for (k in 1:length(site.sample$gen.pos)){ if(!is.na(site.sample$dist.1[k]) & !is.na(site.sample$dist.2[k])){ nn.dist[k] <- min(c(site.sample$dist.1[k],site.sample$dist.2[k])) }else if(is.na(site.sample$dist.1[k])){ nn.dist[k] <- site.sample$dist.2[k] } else if(is.na(site.sample$dist.2[k])){ nn.dist[k] <- site.sample$dist.1[k] } } empirical.mean.nnd <- mean(nn.dist, na.rm = TRUE) #number of total loci n.sites <- stats.file.lg %>% filter(!is.na(gen.pos)) %>% select(gen.pos) %>% unlist %>% length nnd.stats[[j]] <- data.frame(lg = unique(stats.file.lg$lg), n.sites = n.sites, num.outliers = num.outliers, nnd.mean.null = null.mean, nnd.sd.null = sd(null.mean.nnds, na.rm = TRUE), nnd.mean.emp = empirical.mean.nnd, nnd.emp.percentile = null.ecdf(empirical.mean.nnd), nnd.emp.zscore = (empirical.mean.nnd - null.mean)/sd(null.mean.nnds, na.rm = TRUE), nnd.emp.pvalue = two_side_p(null.mean.nnds, empirical.mean.nnd)) } } return(do.call("rbind", nnd.stats)) }
a8aeb00dac6753c6cc2d6dac44d7d48bc07de9d6
318db4587504dba25316efb0f68ea49ec1279914
/DTRfunction_Feb2013.R
af16cca6b202fbee38e9271568ef8b9cc2345d0c
[]
no_license
lbuckley/BuckleyetalFE2015
84b74299dab90145bb2e808dae150077edd54946
3e85de8db094600e980de19ee0f04ca52e354193
refs/heads/master
2016-09-03T00:55:14.116232
2015-10-28T20:04:08
2015-10-28T20:04:08
31,223,798
1
0
null
null
null
null
UTF-8
R
false
false
2,130
r
DTRfunction_Feb2013.R
# library( fields) #library( evd) #library( evdbayes) #library( ismev) library(chron) #convert dates #library(gdata) #library(maptools) #library(spdep) #Function to calculate Parton and Logan 1981 diurnal variation #truncated sine wave to predict daytime temperature changes and an exponential function to predict nighttime temperatures #Parameters for Colorado alpha=1.86 gamma=2.20 beta= -0.17 #Wann 1985 #alpha= 2.59 #time difference between tx and noon #beta= 1.55 #time difference between tx and sunrise #gamma= 2.2 #decay parameter for rate of t change from sunset to tn #PAtterson 1981 function from Wann 1985 Thour=function(Tmx, Tmn, Hr, tr, ts, alpha=1.86, beta=-0.17, gamma=2.20){ #Tmx= max temperature #Tmn= min temperature #Hr= hour of measurement (0-24) l= ts-tr #daylength tx= 0.5*(tr+ts)+alpha #time of maximum temperature tn= tr+ beta #time of minimum temperature #calculate temperature for nighttime hour if( !(Hr>(tr+beta) & Hr<ts) ){ Tsn= Tmn+(Tmx-Tmn)*sin((pi*(ts-tr-beta))/(l+2*(alpha-beta))) if(Hr<=(tr+beta)) Tas=Hr+24-ts if(Hr>=ts) Tas=Hr-ts #time after sunset T=Tmn+(Tsn-Tmn)*exp(-(gamma*Tas)/(24-l+beta)) } #calculate temperature for daytime hour if(Hr>(tr+beta) & Hr<ts){ T= Tmn+(Tmx-Tmn)*sin((pi*(Hr-tr-beta))/(l+2*(alpha-beta))) } return(T) } #--------------------- #PAtterson 1981 function from Wann 1985 #This function combines data together to make it easier to run across many rows Thour.mat=function(Tmat, Hr, alpha=1.86, beta=-0.17, gamma=2.20){ #Tmx= max temperature #Tmn= min temperature #Hr= hour of measurement (0-24) Tmx= Tmat[1] Tmn= Tmat[2] tr= Tmat[3] ts= Tmat[4] l= ts-tr #daylength tx= 0.5*(tr+ts)+alpha #time of maximum temperature tn= tr+ beta #time of minimum temperature #calculate temperature for nighttime hour if( !(Hr>(tr+beta) & Hr<ts) ){ Tsn= Tmn+(Tmx-Tmn)*sin((pi*(ts-tr-beta))/(l+2*(alpha-beta))) if(Hr<=(tr+beta)) Tas=Hr+24-ts if(Hr>=ts) Tas=Hr-ts #time after sunset T=Tmn+(Tsn-Tmn)*exp(-(gamma*Tas)/(24-l+beta)) } #calculate temperature for daytime hour if(Hr>(tr+beta) & Hr<ts){ T= Tmn+(Tmx-Tmn)*sin((pi*(Hr-tr-beta))/(l+2*(alpha-beta))) } return(T) }
cd3f4123681d668abe93aa7ddef6af1f38102751
9efa3d105b7323709cf9157226054e4bf91afeaa
/Scripts/Model/3d plotly.R
606e30e735cc98f3926c535fd939a2101bef0429
[]
no_license
jachuR/WiFi-project
065c24b1d0a50cd53addfef7401de5a6449b4e68
6d3828323ec16963a78f94d25513b4d2e61fb052
refs/heads/master
2021-02-07T08:51:53.296147
2020-02-29T17:53:41
2020-02-29T17:53:41
244,005,436
0
0
null
null
null
null
UTF-8
R
false
false
1,067
r
3d plotly.R
map_errors_training <- readRDS(file = "Data/Results/FinnalTable_F_C.RDS") #map_errors_training - można znaleźć w X1_floor_S p <- plot_ly(map_errors_training %>% filter(FLOOR <2), x = ~LONGITUDE, y = ~LATITUDE, z = ~FLOOR, marker = list(color = ~WAP310, colorscale = c('#FFE1A1', '#683531'), showscale = TRUE), mode = 'markers', symbol = ~corect, symbols = c('x','circle','cross-dot'))%>% add_markers() %>% layout(scene = list(xaxis = list(title = 'Weight'), yaxis = list(title = 'Gross horsepower'), zaxis = list(title = '1/4 mile time')) ) p p <- plot_ly(map_errors_training %>% filter(FLOOR <2), x = ~LONGITUDE, y = ~LATITUDE, z = ~FLOOR, marker = list(color = ~WAP108, colorscale = c('#FFE1A1', '#683531'), showscale = TRUE), mode = 'markers')%>% add_markers() %>% layout(scene = list(xaxis = list(title = 'Weight'), yaxis = list(title = 'Gross horsepower'), zaxis = list(title = '1/4 mile time')) ) p
92ac6ca1cbda4216e2801485d557c647f3ed70cb
135f8eed2aa58a1776c5b24a72aa95a50c81d3c4
/man/components.fbl_prophet.Rd
d2d5db8cd78a00dedaa31691c99f820cd5bcf976
[]
no_license
mitchelloharawild/fable.prophet
f1ce81baafc923fa730fb584ffd9edf78b304780
6d3c4ac596dda43b7c457a62c59256b3fda59db8
refs/heads/master
2022-09-15T18:37:15.686104
2022-09-02T02:27:13
2022-09-02T02:27:13
162,971,323
59
9
null
null
null
null
UTF-8
R
false
true
1,267
rd
components.fbl_prophet.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/model.R \name{components.fbl_prophet} \alias{components.fbl_prophet} \title{Extract meaningful components} \usage{ \method{components}{fbl_prophet}(object, ...) } \arguments{ \item{object}{An estimated model.} \item{...}{Unused.} } \value{ A \code{\link[fabletools:dable]{fabletools::dable()}} containing estimated states. } \description{ A prophet model consists of terms which are additively or multiplicatively included in the model. Multiplicative terms are scaled proportionally to the estimated trend, while additive terms are not. } \details{ Extracting a prophet model's components using this function allows you to visualise the components in a similar way to \code{\link[prophet:prophet_plot_components]{prophet::prophet_plot_components()}}. } \examples{ \donttest{ if (requireNamespace("tsibbledata")) { library(tsibble) beer_components <- tsibbledata::aus_production \%>\% model( prophet = prophet(Beer ~ season("year", 4, type = "multiplicative")) ) \%>\% components() beer_components autoplot(beer_components) library(ggplot2) library(lubridate) beer_components \%>\% ggplot(aes(x = quarter(Quarter), y = year, group = year(Quarter))) + geom_line() } } }
0c93f374b6d21e3dd4df3115766a0efa7aa02920
fab4e7ad290309d0028e8de349d262519b705ef0
/gene_constraints/plot/use_density.R
d8c44262c0e1b2433726f7d4f36d4037021091da
[]
no_license
michaelbarton/michael-barton-thesis-figures
f4f3eb5937983b13c0b081cbc1b69d8228f436fd
7e0ad7de773cd5156209458b56ba3e2bc5915100
refs/heads/master
2016-09-11T04:09:10.956144
2009-08-17T15:42:57
2009-08-17T15:42:57
86,110
1
0
null
null
null
null
UTF-8
R
false
false
441
r
use_density.R
rm(list=ls()) library(lattice) source('helpers/find_replace.R') source('helpers/flux_data.R') data <- flux_data() data <- subset(data, value > -16) plot <- densityplot( ~ value | setup, groups = variable, bw=1, xlab="Absolute reaction flux (log.2)", auto.key=TRUE, data=data ) postscript("results/use_density.eps",width=9,height=5,onefile=FALSE,horizontal=FALSE, paper = "special",colormodel="rgb") print(plot) graphics.off()
74f6f7787d01eb936fe90f614918c6b5de9df309
3ed96bb9a7e7e0ed668b2fc05d29311ac4756a5d
/clases/M1_clase2_r-base.R
7e32218e6811583922fb65ff7f9afe2907871236
[]
no_license
JulioCursos/analisis_reproducible_iibp
ccababc0e55f205d0bee2e37e107063f37e36c56
5fb83e1e6e7908fab7873288a483e007da4248d5
refs/heads/main
2023-05-03T06:49:14.143767
2021-05-26T17:59:58
2021-05-26T17:59:58
371,774,381
0
0
null
2021-05-28T17:31:01
2021-05-28T17:31:00
null
UTF-8
R
false
false
7,480
r
M1_clase2_r-base.R
############-----------CLASE 2. R-BASE----------############ # CONTENIDO: # 2.1. Directorio de trabajo/ Espacio de trabajo # 2.2. R como calculadora # 2.3. Objetos # 2.4. Estilos para comentar, nombrar archivos, objetos # 2.5. Tipos de datos (numericos, caracter, factor, logico), # 2.6.Estructura de datos (vectores, matrices, data frames, listas y arrays) # 2.7. indices, filtros, seleccionar #### 2.1. Directorio de trabajo / Espacio de trabajo getwd() # Ver el directorio de trabajo o en ingles "working directory" setwd() # Establecer el directorio de trabajo setwd("..") # sube al directorio que contiene el actual setwd("c:/users/yo/proyecto") # ruta absoluta windows setwd("home/yo/proyecto") dir() # contenido del actual directorio de trabajo list.files() ## volveremos a repasar cuando importemos datos # 2.2. R como calculadora (En consola) # operaciones arimeticas 2 + 2 4 - 2 12 * 3 24 / 3 3^2 sqrt(25) 2*3 + 4 2*(3+4) #### 2.3. Objetos. R es un lenguaje orientado a objetos. Es decir, variables # datos, funciones, resultados se guardan en la memoria activa de la compu en forma # de "objetos" con un nombre especifico. x = 2 y <- 4 x + y # se pueden hacer operaciones con los objetos 2 * y a <- "mi nombre" b <- "A" # Instrucciones de asignacion nombre_objeto <- valor # agrupar expresiones # punto y coma x<- 2; y= 4; z= 6 # parentesis (x <- 3) #equivalente a x <- 3 x # llaves { x <- 3 y <- 2 x + y } #### 2.4. Estilos para comentar, nombrar archivos, objetos NombreObjeto # Joroba de camello nombre.objeto # Punto entre palabras nombre_objeto # guion bajo # No usar acentos # No dejar espacios en blanco # Mostrar ejemplo de Mayuscula-minuscula #### 2.5. Tipos de datos # Numerico (numeric). Con parte decimal o fracionaria mi_altura_cm <- 170 mi_peso_kg <- 77.5 # Entero (integer). Sin una parte decimal o fraccionaria ## para especificar que es un entero hay que agregar una L mi_edad <- 34L class(mi_edad) # Tambien llamados "double" o "float". Pero para fines practicos aca son todos numericos # Cadena de caracteres (character, string) mi_nombre <- "Julio" # siempre con comillas dobles o simples class(mi_nombre) nombre <- 'Julio' class(nombre) # Variables categoricas o factores (factor) sexos <- c("M", "H","H", "M", "H") class(sexos) sexo_fac <- factor(sexos)# convertimos el vector a factorial class(sexo_fac) levels(sexo_fac) # vemos las categorias levels(sexos) # logicos. Valores booleanos ## < (menor a) ## > (mayor a) ## & (y) ## | (o) ## ! (no) ## == (es igual a) ## != (es distinto de) a <- 2 b <- 4 a == b # a es igual a b? a > b # a es mayor a b? a != b # a es distinto de b? (a < 3) & (b < 5) # a es a menor que 3 y b menor que 3? (a < 1) | (b < 3) # a es a menor que 1 o b menor que 3? #### 2.5. Estructura de datos (vectores, matrices, data frames, listas y arrays), # Vectores ## Propiedades # Tipo: numeric, character, logical # Dimension: 1,la longitud # atributos: metadatos ## vector numerico c(1,2,3,4,5,6,7,8,9,10) 1:10 # secuencia seq(10)# mismo que el anterior rep(1, 10)# funcion repetir # vector character c("A", "B", "C", "D", "E") c("perro", "gato", "gallina", "perro") # vector logico c(FALSE, TRUE, FALSE, FALSE, FALSE, TRUE) # vector heterogeneo c(2,"A", "B", TRUE, 3, 5, "Z") ## vectorizacion de operaciones mi_vector <- c(1, 2, 3, 4, 5, 6) mi_vector * 2 ; mi_vector + 2 # operadores aritmeticos mi_vector < 4 # operador logico mi_vector_nuevo <- c(mi_vector, "A", FALSE) # agregar un elemeno a un vector class(mi_vector) # lo reconoce como character por que el tipo de datos mas flexible # Funciones para inspeccionar datos class() is.vector() length() unique() levels() # solo para factores # Matrices. Vector de 2 dimensiones ## Solo puede contener un tipo de datos # Argumentos funcion matrix() #data es el vector que contiene los elementos que formaran parte de la matriz. #nrow es el numero de filas. #ncol es el numero de columnas. #byrow es un valor logico. Si es TRUE el vector que pasamos ser?? ordenado por filas. #dimnames nombres asignado a filas y columnas. # crear una matriz con la funcion matrix 1:12 matrix(1:12) matrix(data= 1:12, nrow = 3, ncol = 4) matrix(data= 1:12, nrow = 4, ncol = 3) # Arrays. La extension de un vector a mas de 2 dimensiones # No se va a tratar en este curso # funciones para inspeccionar una matriz class() dim() # crear una matriz con cbind() o rbind # cbind() para unir vectores, usando cada uno como una columna. # rbind() para unir vectores, usando cada uno como un renglón. vector_1 <- 1:4 vector_2 <- 5:8 vector_3 <- 9:12 vector_4 <- 13:16 matriz_cbind <- cbind( vector_1, vector_2, vector_3, vector_4) matriz_rbind <- rbind(vector_1, vector_2, vector_3, vector_4) # Dataframes. Estructura en 2 dimensiones rectangulares. ## Puede contener datos de diferentes tipos mi_df <- data.frame( "entero"= 1:5, "factor"= c("a","b","c", "d","e"), "numero"= c(2.3, 22, 23, 6.4, 5), "cadena"= as.character(c("a","b","c", "d","e")) ) mi_df # funciones para inspeccionar un data frame class() dim() ncol() nrow() length() # da el nro de columnas names() # nombres de las variables # Listas. Contiene objetos de cualquier clase(numero, caracteres, matrices, funciones, etc) # una sola dimension, solo tiene largo mi_lista <- list(1:9, "Pepe", pi, matrix(1:12, nrow = 4, ncol = 3)) mi_lista #funciones para inspeccionar una lista class(lista) length(lista) dim(lista) str(lista) #### 2.6. indices, filtros, seleccionar ##indexacion: identificacion de los elementos de objeto por medio de numero # vector x <- c(3, 5, 9, 13, "A", F, "C") length(x) x[1] x[3:7] # lista lista <- list("A", c(2,4,5,4), matrix(1:12, ncol = 3, nrow = 4), FALSE) length(lista) lista[[1]] lista[[4]] # data.frame library(MASS) ## inspeccionar primero crabs class(crabs) dim(crabs) nrow(crabs) ncol(crabs) str(crabs) head(crabs)# primeras 6 filas tail(crabs)# ultimas 6 filas colnames(crabs)# nombre de las columnas # seleccionar ## [] o $ # data[x,y] # x filas # y, columnas crabs[,2] # seleciono columna 2 crabs[,"sex"] # lo mismo pero por el nombre crabs$sex #columna 2 crabs[1,] # selecciono fila 1 crabs[4,5] # elemento de la fila 4 columna 5 crabs[1:10, c("FL","CW")] # filas 1 al 10, variables "FL y "CW" ## Filtrar con algunos operadores logicos # Solo los cangrejos azules, todas las columnas crabs$sp == "B" # operacion logica crabs[crabs$sp == "B",] # aplicado a un subconjunto # Solo los cangrejos azules, columnas "RW", "FL" crabs[crabs$sp == "B", c("RW", "FL")] # cangrejos naranjas machos, todas las columnas crabs$sp == "O" & crabs$sex == "M"# operacion logica crabs[crabs$sp == "O" & crabs$sex == "M",]# aplicado a un filtro # cangrejos lobulo frontal mayor a 10mm crabs$FL > 10 # operacion logica crabs[crabs$FL > 10,]# operacion aplicada a un filtro # Ejercicio # Seleccione las hembras de la variedad Azul con CL entre 35 y 40 mm crabs$sp == "B" # sp azul crabs$sex == "F"# sexo hembra crabs$sex == "F" & crabs$sp == "B" # hembras de la variedad azul crabs$CL >= 35 & crabs$CL <=40 # CL entre 35 y 40 crabs[(crabs$sex == "F" & crabs$sp == "B" & crabs$CL >= 35 & crabs$CL), ] # expresion completa # o en dos pasos #Paso 1. hembras de variedad azul azul_hembra <- crabs[crabs$sex == "F" & crabs$sp == "B",] # Paso 2. con el rango CL entre 35 y 40 azul_hembra[(azul_hembra$CL <=40 & azul_hembra$CL >= 35),]
20ad0fb2302eb8da3d95a62159ed069264ced541
888eb6041144ac34c7ed0d17684f856a4e3b95fd
/R/compare_designs.R
eb704e84900d254a884835bddb27b1c687b6c21a
[]
no_license
reuning/DeclareDesign
c5ae645ae7e661469ff4c9a54f252c69619b2e51
b089b97397c6d95f334c129fdc0fb8fccb00d4d6
refs/heads/master
2023-08-19T19:30:45.244887
2021-10-17T13:11:15
2021-10-17T13:11:15
417,950,314
0
0
null
null
null
null
UTF-8
R
false
false
5,914
r
compare_designs.R
compare_partial <- function(FUN, DIFFFUN, is_data = FALSE){ if(is_data){ function(design1, design2, format = "ansi256", mode = "auto", pager = "off", context = -1L, rmd = FALSE) { stopifnot(requireNamespace("diffobj")) DIFFFUN <- get(DIFFFUN, getNamespace("diffobj")) compare_design_internal( FUN, DIFFFUN, design1, design2, format = format, mode = mode, pager = pager, context = context, rmd = rmd ) } } else{ function(design1, design2, format = "ansi256", mode = "sidebyside", pager = "off", context = -1L, rmd = FALSE) { stopifnot(requireNamespace("diffobj")) DIFFFUN <- get(DIFFFUN, getNamespace("diffobj")) compare_design_internal( FUN, DIFFFUN, design1, design2, format = format, mode = mode, pager = pager, context = context, rmd = rmd ) } } } #' Compare two designs #' #' @param design1 A design object, typically created using the + operator #' @param design2 A design object, typically created using the + operator #' @param format Format (in console or HTML) options from \code{diffobj::diffChr} #' @param mode Mode options from \code{diffobj::diffChr} #' @param pager Pager option from \code{diffobj::diffChr} #' @param context Context option from \code{diffobj::diffChr} which sets the number of lines around differences that are printed. By default, all lines of the two objects are shown. To show only the lines that are different, set \code{context = 0}; to get one line around differences for context, set to 1. #' @param rmd Set to \code{TRUE} use in Rmarkdown HTML output. NB: will not work with LaTeX, Word, or other .Rmd outputs. #' #' @examples #' #' design1 <- declare_model(N = 100, u = rnorm(N), potential_outcomes(Y ~ Z + u)) + #' declare_inquiry(ATE = mean(Y_Z_1 - Y_Z_0)) + #' declare_sampling(S = complete_rs(N, n = 75)) + #' declare_assignment(Z = complete_ra(N, m = 50)) + #' declare_measurement(Y = reveal_outcomes(Y ~ Z)) + #' declare_estimator(Y ~ Z, inquiry = "ATE") #' #' design2 <- declare_model(N = 200, U = rnorm(N), #' potential_outcomes(Y ~ 0.5*Z + U)) + #' declare_inquiry(ATE = mean(Y_Z_1 - Y_Z_0)) + #' declare_sampling(S = complete_rs(N, n = 100)) + #' declare_assignment(Z = complete_ra(N, m = 25)) + #' declare_measurement(Y = reveal_outcomes(Y ~ Z)) + #' declare_estimator(Y ~ Z, model = lm_robust, inquiry = "ATE") #' #' compare_designs(design1, design2) #' compare_design_code(design1, design2) #' compare_design_summaries(design1, design2) #' compare_design_data(design1, design2) #' compare_design_estimates(design1, design2) #' compare_design_inquiries(design1, design2) #' #' @name compare_functions #' @rdname compare_functions #' @export compare_designs <- function(design1, design2, format = "ansi8", pager = "off", context = -1L, rmd = FALSE) { compare_functions <- list(code_comparison = compare_design_code, data_comparison = compare_design_data, estimands_comparison = compare_design_inquiries, estimates_comparison = compare_design_estimates) vals <- lapply(compare_functions, function(fun) fun( design1, design2, format = format, pager = pager, context = context, rmd = rmd ) ) class(vals) <- "design_comparison" vals } #' @export print.design_comparison <- function(x, ...) { cat("Research design comparison\n\n") labels <- c("code_comparison" = "design code", "data_comparison" = "draw_data(design)", "estimands_comparison" = "draw_estimands(design)", "estimates_comparison" = "draw_estimates(design)") for(n in names(labels)) { print_console_header(paste("Compare", labels[n])) print(x[[n]]) } } #' @rdname compare_functions #' @export compare_design_code <- compare_partial(get_design_code, "diffObj") #' @rdname compare_functions #' @export compare_design_summaries <- compare_partial(function(x) capture.output(summary(x)), "diffChr") #' @rdname compare_functions #' @export compare_design_data <- compare_partial(draw_data, "diffObj") #' @rdname compare_functions #' @export compare_design_estimates <- compare_partial(draw_estimates, "diffObj", is_data = TRUE) #' @rdname compare_functions #' @export compare_design_inquiries <- compare_partial(draw_estimands, "diffObj", is_data = FALSE) compare_design_internal <- function(FUN, DIFFFUN, design1, design2, format = "ansi256", mode = "sidebyside", pager = "off", context = -1L, rmd = FALSE){ check_design_class_single(design1) check_design_class_single(design2) seed <- .Random.seed design1 <- FUN(design1) set.seed(seed) design2 <- FUN(design2) if(rmd == TRUE) { format <- "html" style <- list(html.output = "diff.w.style") } else { style <- "auto" } diff_output <- structure( DIFFFUN( design1, design2, format = format, mode = mode, pager = pager, context = context, style = style ), class = "Diff", package = "diffobj" ) if(rmd == TRUE) { cat(as.character(diff_output)) } else { diff_output } } clean_call <- function(call) { paste(sapply(deparse(call), trimws), collapse = " ") } get_design_code <- function(design){ if (is.null(attributes(design)$code)) { sapply(design, function(x) clean_call(attr(x, "call"))) } else { attributes(design)$code } } print_console_header <- function(text) { width <- options()$width cat("\n\n#", text, paste(rep("-", width - nchar(text) - 2), collapse = ""), "\n\n") }
a9528fc4e434e799cc3cee0e0fc5b993e20ff016
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
/data/genthat_extracted_code/PivotalR/examples/null.data.Rd.R
d7197ac8dc7fe5f0cd884fe6bb85b0a5a17d5a30
[]
no_license
surayaaramli/typeRrh
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
66e6996f31961bc8b9aafe1a6a6098327b66bf71
refs/heads/master
2023-05-05T04:05:31.617869
2019-04-25T22:10:06
2019-04-25T22:10:06
null
0
0
null
null
null
null
UTF-8
R
false
false
1,219
r
null.data.Rd.R
library(PivotalR) ### Name: null.data ### Title: A Data Set with lots of 'NA' values ### Aliases: null.data ### Keywords: database data operation ### ** Examples ## Not run: ##D ##D ##D ## set up the database connection ##D ## Assume that .port is port number and .dbname is the database name ##D cid <- db.connect(port = .port, dbname = .dbname, verbose = FALSE) ##D ##D ## create a table from the example data.frame "abalone" ##D delete("null_data", conn.id = cid) ##D x <- as.db.data.frame(null.data, "null_data", conn.id = cid, verbose = FALSE) ##D ##D ## ERROR, because of NULL values ##D fit <- madlib.lm(sf_mrtg_pct_assets ~ ris_asset + lncrcd + lnauto + ##D lnconoth + lnconrp + intmsrfv + lnrenr1a + lnrenr2a + ##D lnrenr3a, data = x) ##D ##D ## select columns ##D y <- x[,c("sf_mrtg_pct_assets","ris_asset", "lncrcd","lnauto", ##D "lnconoth","lnconrp","intmsrfv","lnrenr1a","lnrenr2a", ##D "lnrenr3a")] ##D ##D dim(y) ##D ##D ## remove NULL values ##D for (i in 1:10) y <- y[!is.na(y[i]),] ##D ##D dim(y) ##D ##D fit <- madlib.lm(sf_mrtg_pct_assets ~ ., data = y) ##D ##D fit ##D ##D db.disconnect(cid, verbose = FALSE) ## End(Not run)
e6c2dcb36f1efa257cc05e9d6dafb9803272b1da
0f413fffdd3a6f7d740e64ff125875a0992deebc
/global.R
f38f72ca74ea66fb92614b55a5abaae68dcefd7e
[]
no_license
CEREMA/appli_conso_espace_doubs
461820251a7df15cbc24b7984a7414b6c3d12de9
57fb3044dd08a45da1d364bfd6258f9292ff7d00
refs/heads/master
2021-12-06T11:46:40.058287
2021-06-16T15:15:11
2021-06-16T15:15:11
373,520,341
0
1
null
null
null
null
UTF-8
R
false
false
6,203
r
global.R
# # APPLI DE VISUALISATION DES ENVELOPPES BATIES DU DOUBS # # library(sf) library(DBI) library(rgdal) library(RSQLite) library(plotly) library(leaflet) library(leaflet.extras) library(shiny) library(data.table) library(shinydashboard) library(shinyWidgets) library(RColorBrewer) library(lwgeom) library(classInt) # LECTURE DES TABLES DEPUIS LE GEOPACKAGE ----------------------------------- conn <- dbConnect(RSQLite::SQLite(), dbname = "./data/bdd25.gpkg") dcommunes <- st_as_sf(dbGetQuery(conn, "SELECT * FROM communes")) dzonages <- dbReadTable(conn, "zonages") dindic <- dbReadTable(conn, "indicateurs") st_crs(dcommunes) = 4326 # quelques corrections dans la table des zonages dzonages[dzonages$id_zone == "247000714",]$nom_zone <- "CC du Pays de Villersexel (partie Doubs)" dzonages[dzonages$id_zone == "247000722",]$nom_zone <- "CC du Pays d'Héricourt (partie Doubs)" dzonages[dzonages$id_zone == "242504496",]$nom_zone <- "CC du Plateau de Frasne et du Val de Drugeon" dzonages[dzonages$id_zone == "pnrhj",]$nom_zone <- "PNR Haut Jura (partie Doubs)" dzonages[dzonages$id_zone == "pnrph",]$nom_zone <- "PNR du Doubs Horloger" dzonages[dzonages$id_zone == "200041887",]$nom_zone <- "CC du Val Marnaysien (partie Doubs)" dzonages[dzonages$id_zone == "scot7",]$nom_zone <- "SCoT de l'Agglomération bisontine (partie Doubs)" dzonages[dzonages$id_zone == "scot6",]$nom_zone <- "SCoT du Doubs Central" dzonages <- dzonages %>% filter(id_zone != "scot8") # liste déroulante pour choix des communes choixcom <- dcommunes$insee_com names(choixcom) <- dcommunes$nom_com # liste déroulante pour choix des zonages d'étude choixzone <- dzonages$id_zone names(choixzone) <- dzonages$nom_zone # année de référence par défaut annee_t0 <- 1990 # calcul d'indicateurs supplémentaires dcom <- dcommunes %>% select(nom_com, insee_com, surface) dindic <- dindic %>% dplyr::left_join(dcom) %>% mutate(sartif = senv17 + senvnd, partif = 1000000 * sartif / surface, cos = 100*sbati / (senv17 + senvnd), sartif_par_hab = 10000 * sartif / p17_pop, occpot17 = p17_pop + p17_emplt + p17_rsec, occpot12 = p12_pop + p12_emplt + p12_rsec, occpot07 = p07_pop + p07_emplt + p07_rsec, sartif_par_op = 10000 * sartif / occpot17, sartif_evo_men = menhab1217 ) dindic <- st_as_sf(dindic) # restructuration des données temporelles dpop <- as.data.table(dindic) %>% select(insee_com, p17_pop, p12_pop, p07_pop, d99_pop, d90_pop, d82_pop, d75_pop, d68_pop) %>% melt(id = c("insee_com"), variable.name = "annee", value.name = "population") %>% mutate(annee = strtoi(substr(annee, 2, 3)) + 2000) %>% mutate(annee = ifelse(annee > 2050, annee - 100, annee)) drsec <- as.data.table(dindic) %>% select(insee_com, p17_rsec, p12_rsec, p07_rsec, d99_rsec, d90_rsec, d82_rsec, d75_rsec, d68_rsec) %>% melt(id = c("insee_com"), variable.name = "annee", value.name = "rsec") %>% mutate(annee = strtoi(substr(annee, 2, 3)) + 2000) %>% mutate(annee = ifelse(annee > 2050, annee - 100, annee)) dsenv <- as.data.table(dindic) %>% select(insee_com, senv17, senv12, senv07, senv99, senv90, senv82, senv75, senv68, senvnd) %>% mutate(senv17 = senv17 + senvnd) %>% mutate(senv12 = senv12 + senvnd) %>% mutate(senv07 = senv07 + senvnd) %>% mutate(senv99 = senv99 + senvnd) %>% mutate(senv90 = senv90 + senvnd) %>% mutate(senv82 = senv82 + senvnd) %>% mutate(senv75 = senv75 + senvnd) %>% mutate(senv68 = senv68 + senvnd) %>% melt(id = c("insee_com"), variable.name = "annee", value.name = "stot") %>% mutate(annee = strtoi(substr(annee, 5, 6)) + 2000) %>% mutate(annee = ifelse(annee > 2050, annee - 100, annee)) demplt <- as.data.table(dindic) %>% select(insee_com, p17_emplt, p12_emplt, p07_emplt) %>% melt(id = c("insee_com"), variable.name = "annee", value.name = "emplt") %>% mutate(annee = strtoi(substr(annee, 2, 3)) + 2000) docpot <- as.data.table(dindic) %>% select(insee_com, occpot17, occpot12, occpot07) %>% melt(id = c("insee_com"), variable.name = "annee", value.name = "ocpot") %>% mutate(annee = strtoi(substr(annee, 7, 8)) + 2000) dmen <- as.data.table(dindic) %>% select(insee_com, men12, men17) %>% melt(id = c("insee_com"), variable.name = "annee", value.name = "men") %>% mutate(annee = strtoi(substr(annee, 4, 5)) + 2000) dtempo <- dpop %>% dplyr::left_join(drsec, by = NULL, copy = FALSE) %>% dplyr::left_join(dsenv, by = NULL, copy = FALSE) %>% dplyr::left_join(demplt, by = NULL, copy = FALSE) %>% dplyr::left_join(docpot, by = NULL, copy = FALSE) %>% dplyr::left_join(dmen, by = NULL, copy = FALSE) # couleurs pour les graphiques col1 <- "#F58220" #orange col2 <- "#268966" # pistache col3 <- "#33475b" # bleu nuit # dataframe nul sfnul <- st_sf(surface = 0, datation = 0, geom = dcommunes$geom[1]) # années de référence anneesref <- c(1968, 1975, 1982, 1990, 1999, 2007, 2012, 2017) # nom des indicateurs ind1 <- "surface artificialisée par le bâti en 2017" ind2 <- "évolution de la surface artificialisée par le bâti" ind3 <- "évolution relative de la surface artificialisée par le bâti" ind4 <- "part de la surface communale artificialisée par le bâti en 2017" ind5 <- "coefficient d'emprise au sol du bâti en 2017" ind6 <- "surface artificialisée par habitant en 2017" ind7 <- "surface artificialisée par occupant potentiel en 2017" ind8 <- "nombre de nouveaux ménages par ha artificialisé pour l'habitat" ind <- 1:8 names(ind) <- c(ind1, ind2, ind3, ind4, ind5, ind6, ind7, ind8) vars <- c("sartif", "sartif_evo", "partif_evo", "partif", "cos", "sartif_par_hab", "sartif_par_op", "sartif_evo_men") unites <- c(" ha", " ha", " %", " %", " %", " m2", " m2", " ménages") # requetes pour tester le fonctionnement de l'appli ## com_date <- dbGetQuery(conn, "SELECT code_insee, datation, surface FROM env_date WHERE code_insee = '25001'") ## com_bati <- st_as_sf(dbGetQuery(conn, "SELECT * FROM bati WHERE insee_com = '25001'", crs = 4326))
d4994854947ea808e5ba005522882397f514935c
e0eba6a80fe2b5e346829f0d6f0c90d4748af58d
/data_creation.R
ca067558e38ed4f93244cf7f3a1be957747acb12
[]
no_license
argyelan/MRI
ddcd33f077f6701e41c7a0d9471cfc37288b77aa
fdd3c05bc85f84eb22e28b3ec87a2f68c60dbe06
refs/heads/master
2020-06-20T21:28:23.542187
2019-07-26T16:25:12
2019-07-26T16:25:12
197,256,016
0
0
null
2019-07-26T16:25:13
2019-07-16T19:33:17
R
UTF-8
R
false
false
2,283
r
data_creation.R
#**************************************# # Script to create the data file to be handled# #**************************************# #Edit file directories if needed library(data.table) library(corrplot) library(dplyr) file <- '/nethome/rkim/hcp_example/example3/Nodes.nii' #Import data x <- paste0('fslstats ',file,' -R') print(x) range <- as.numeric(strsplit(system(x, intern = TRUE),' ')[[1]]) #Run bash command to find range of intensities img4D <- '/nethome/rkim/hcp_example/example3/GSR_preprocessed_ses-22921_task-rest_acq-AP_run-01_bold_hp2000_clean.nii.gz' x <- paste0('/nethome/rkim/Script/bin/corr_matrix.sh ',file,' ', img4D,' ', range[1],' ', range[2]) setwd('/nethome/rkim/Script/DataFolder/') #work directory where matrices will be stored filenames <- list.files(full.names = FALSE) sapply(filenames, unlink) system(x) #Run bash script to create correlation matrices print(x) filenames <- list.files(full.names = FALSE) #Find all matrices info = file.info(filenames) good = rownames(info[info$size > 0, ]) #Keep only matrices that have values in them dataFile <- do.call("cbind", lapply(good, read.csv, header = FALSE)) #Create the data file from the csv files oldnames <- colnames(dataFile) oldnames <- make.unique(oldnames, sep = ".") colnames(dataFile) <- oldnames dataFile <- data.table(dataFile) #meanData <- dataFile[,lapply(.SD, mean)] #dataWindow <- 100 #setwd('/nethome/rkim/Script/Pairwise Correlations/') #sapply(paste('pair_corr', 1:(nrow(dataFile) - dataWindow + 1), '.csv', sep = ''), unlink) #for (i in c(1:(nrow(dataFile) - dataWindow + 1))) #{ #pair_corr <- cor(dataFile[c(i:(i + dataWindow - 1))]) #Find the pairwise correlation of data #setwd('/nethome/rkim/Script/Pairwise Correlations/') #work directory where pairwise correlation vectors will be stored #newPair <- pair_corr[upper.tri(pair_corr)] #write.table(newPair, file = paste('pair_corr', sprintf('%03d',i), '.csv', sep = ''), row.names = FALSE, col.names = FALSE) #} #corrplot(pair_corr, method = "circle") #pairNames <- list.files(full.names = FALSE) #pairFile <- do.call("cbind", lapply(pairNames, read.csv, header = FALSE)) #Create the data file from the csv files #finalCorr <- cor(pairFile) #ind <- seq(2,ncol(pairFile),10) #corrplot(finalCorr[ind,ind], method = "circle")
3d02ebf2f68406e1af2f3b1a2849323ce812fe85
d14bcd4679f0ffa43df5267a82544f098095f1d1
/inst/apps/figure2_5/server.R
31d74e052bea67befcfefd6daa579b36c584879b
[]
no_license
anhnguyendepocen/SMRD
9e52aa72a5abe5274f9a8546475639d11f058c0d
c54fa017afca7f20255291c6363194673bc2435a
refs/heads/master
2022-12-15T12:29:11.165234
2020-09-10T13:23:59
2020-09-10T13:23:59
null
0
0
null
null
null
null
UTF-8
R
false
false
185
r
server.R
server = function(input, output, session) { observeEvent(input$evalfig5, { output$plotfig5 <- renderPlot({ return(isolate(eval(parse(text=input$fig5plot)))) }) })
96fd7e69352686539bbdc2bbcbff6453f61e5fe0
375b0780581873c3d5ec6035a5f1d4227f0f1bea
/tests/testthat/test.vig.R
18d6d27d82c4290b6c2431761d7025fad1f57cf5
[]
no_license
cran/odds.converter
e7d5d3498410277e6eee1667324300c8f2e3efd8
335a2c09ec76b5f8b33ff0f8a7da4a9ebff4dbbd
refs/heads/master
2021-01-10T13:14:56.227086
2018-06-01T11:29:53
2018-06-01T11:29:53
36,883,159
2
1
null
null
null
null
UTF-8
R
false
false
1,488
r
test.vig.R
us <- c(-110, -120, -110, -100) probs <- c(11/21, 6/11, 11/21, 1/2) vigs <- c(1/22, 1/23) # US input expect_equal(odds.vig(us[1], us[3]), vigs[1]) expect_equal(odds.vig(us[c(1, 3)]), vigs[1]) expect_equal(odds.vig(us[c(1, 3, NA)]), NA_real_) expect_equal(odds.vig(home = us[c(1:2, NA)], away = us[c(3:4, NA)]), c(vigs, NA_real_)) expect_equal(odds.vig(matrix(c(us[1:2], NA, us[3:4], NA), ncol = 2, dimnames = list(paste0("gm", 1:3), c("h", "a")))), c(gm1 = vigs[1], gm2 = vigs[2], gm3 = NA_real_)) expect_equal(odds.vig(data.frame(home = us[c(1:2, NA)], away = us[c(3:4, NA)])), c(vigs, NA_real_)) # Probability input expect_equal(odds.vig(probs[1], probs[3], input = "prob"), vigs[1]) expect_equal(odds.vig(probs[c(1, 3)], input = "prob"), vigs[1]) expect_equal(odds.vig(probs[c(1, 3, NA)]), NA_real_) expect_equal(odds.vig(home = probs[c(1:2, NA)], away = probs[c(3:4, NA)], input = "prob"), c(vigs, NA_real_)) expect_equal(odds.vig(matrix(probs[c(1:2, NA, 3:4, NA)], ncol = 2, dimnames = list(paste0('gm', 1:3), c('h', 'a'))), input = "prob"), c(gm1 = vigs[1], gm2 = vigs[2], gm3 = NA_real_)) expect_equal(odds.vig(data.frame(home = probs[c(1:2, NA)], away = probs[c(3:4, NA)]), input = "prob"), c(vigs, NA_real_))
45f3c3e631345cfe5c9e11163c0fba49d9a624d0
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
/data/genthat_extracted_code/DiagrammeR/examples/select_edges_by_node_id.Rd.R
b9df8ff21be25c0d5eb0091d991db8bfaf6cb7cf
[]
no_license
surayaaramli/typeRrh
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
66e6996f31961bc8b9aafe1a6a6098327b66bf71
refs/heads/master
2023-05-05T04:05:31.617869
2019-04-25T22:10:06
2019-04-25T22:10:06
null
0
0
null
null
null
null
UTF-8
R
false
false
848
r
select_edges_by_node_id.Rd.R
library(DiagrammeR) ### Name: select_edges_by_node_id ### Title: Select edges in a graph using node ID values ### Aliases: select_edges_by_node_id ### ** Examples # Create a graph with 5 nodes graph <- create_graph() %>% add_path(n = 5) # Create a graph selection by selecting edges # associated with nodes `1` and `2` graph <- graph %>% select_edges_by_node_id( nodes = 1:2) # Get the selection of edges graph %>% get_selection() # Perform another selection of edges, with nodes # `1`, `2`, and `4` graph <- graph %>% clear_selection() %>% select_edges_by_node_id( nodes = c(1, 2, 4)) # Get the selection of edges graph %>% get_selection() # Get a fraction of the edges selected over all # the edges in the graph graph %>% { l <- get_selection(.) %>% length(.) e <- count_edges(.) l/e }
f81b1bd06f179e684783beca62086558e66e435d
9c54073b91052e69fcea27c7c7b685e0d12ae6d6
/CMPT318/Assignment1/Untitled.R
24412a0a058204bc45e7b1b40d108f64a046df39
[]
no_license
yifanliu98/gitSFUBowen
b271b70867fa351717ce5fccda62d7fa974aafde
00a46d36bb78aa602ed278844ddef3409cf7204a
refs/heads/master
2022-02-24T07:57:11.145612
2019-09-19T05:45:48
2019-09-19T05:45:48
null
0
0
null
null
null
null
UTF-8
R
false
false
605
r
Untitled.R
dataset <- read.table("Dataset1.txt", header=TRUE, sep=",") #525600 with NA values require( lubridate ) library("depmixS4") dataset <- na.omit(dataset) #521860 with na values omitted. dataset$Date <- as.POSIXlt(dataset$Date, na.rm = TRUE, format ="%d/%m/%y")$wday sunday <- dataset[which(dataset$Date == 0),] sunday$Time <- hms(sunday$Time) sundayEvening <- sunday sundayMorning <- sunday sundayMorning <- subset(sundayMorning, as.numeric(Time) >= 28800 & as.numeric(Time) <= 43200) sundayEvening <- subset(sundayEvening, (as.numeric(Time) >= 75600 & as.numeric(Time) < 86400) | as.numeric(Time) == 0)
d7db935686efede3e34c8bc7bb6a31da19199107
8944a932cf6b08d91e2975f4344598ebf15f03b2
/man/KmeansClust.Rd
5fbeab3222d1c3595fd28b874a175bc3750a2df1
[ "MIT" ]
permissive
admurali/self
c830e8b486435f1503c2a3999035105c3dc0bf3d
112a723f6d388ac9323902407c88425d5ea38315
refs/heads/master
2020-05-30T23:24:11.664562
2016-12-15T07:53:04
2016-12-15T07:53:04
59,989,165
0
1
null
null
null
null
UTF-8
R
false
true
1,093
rd
KmeansClust.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/kmeans_clustering.R \name{KmeansClust} \alias{KmeansClust} \title{K-Means Clustering} \usage{ KmeansClust(data, x, y, cluster, rstart = 1) } \arguments{ \item{data}{is the dataset containing the observations.} \item{x}{is a reference to a column in the dataset that is could be the independent variable.} \item{y}{is a reference to another column in the dataset that could be the dependent variable.} \item{cluster}{is the number of clusters to perform k-means operation on.} \item{rstart}{is how many random starting cluster assignments to try before choosing the one with the lowest within cluster variation} } \value{ Returns a list that contains values corresponding to the cluster number, and other details. } \description{ K-Means Clustering } \details{ Subsets given data using column names or number and performs k-means clustering on the subset data with error handling. } \examples{ print(KmeansClustList(iris, 'Sepal.Length', "Sepal.Width", 3)) } \author{ Adithya Murali } \seealso{ \code{kmeans} }
3fe203018d6a8ac74cb870e6acf7bd4cfacb00c6
a7f19a71d2bfb2fc3294d3aaae3be1aef6aafe3b
/Econometria/Trabajo_final.R
c3d24d19fed35337e75058260926684b4f301838
[]
no_license
ArturoGon/Master
d3fe89e50964a51cc43cffe255e853f45e881a8e
322a8d9c36d9bb4ff81591885f219d0e03056b0c
refs/heads/main
2023-06-02T08:06:13.664904
2021-06-14T09:54:30
2021-06-14T09:54:30
376,586,518
0
0
null
null
null
null
UTF-8
R
false
false
25,291
r
Trabajo_final.R
rm(list=ls()) cat("\014") library(wooldridge) library(ISLR) library(leaps) library(stats) library(ggcorrplot) library(RColorBrewer) library(tidyverse) library(glmnet) library(pls) library(caret) library(MLmetrics) library(car) library(selectiveInference) library(covTest) library(hdm) attach(discrim) # Variable independiente psoda set.seed(44) ##### a) discrim <-na.omit(discrim) #Eliminamos las vaiables chain y state porque ya estan en forma binaria en otras variables. #Eliminamos las variables hrsopen2, pentree2, wagest2, nregs2, psoda2, pfries2, nmgrs2, emp2 por estar medidas en otra fecha. discrim <- dplyr::select(discrim, -c(state, chain, lpsoda, hrsopen2, pentree2, wagest2, nregs2, psoda2, pfries2, nmgrs2, emp2)) #Para hacer la correlación, quitamos las variables compown, NJ, BK, KFC, RR, lpfries, lhseval, lincome y ldensity datos_cor <- dplyr::select(discrim, -c(compown, county, NJ, BK, KFC, RR, lpfries, lhseval, lincome, ldensity)) ggcorrplot(cor(datos_cor), hc.order = TRUE, type = "lower", lab = TRUE, lab_size = 1.5) # Quitamos las variables prppov, hseval, prpncar por tener alta correlación # Eliminamos la variable county ya que es una etiqueta con 310 niveles. discrim <- dplyr::select(discrim, -c(prppov, county, hseval, prpncar,lhseval)) # Seleccionamos muestra de entrenamiento y test muestra <- sample(1:nrow(discrim), round(0.79*nrow(discrim), 0)) entrenamiento <- discrim[muestra, ] test <- discrim[-muestra, ] reg_1 = lm(psoda ~ ., data = entrenamiento) summary(reg_1) pred = predict(reg_1, newdata = test) error_reg_1 <- sqrt(MSE(y_pred = pred,y_true = test$psoda)) error_reg_1 ##### b) #Esta es la funcion para predecir los regsubsets predict.regsubsets=function(object, newdata, id,...){ form=as.formula(object$call[[2]]) mat=model.matrix(form, newdata) coefi=coef(object, id=id) mat[, names(coefi)]%*%coefi } k = 10 folds = sample(1:k,nrow(entrenamiento),replace=TRUE) cv_error_sub_10=matrix(NA,k,(ncol(entrenamiento)-1), dimnames=list(NULL, paste(1:(ncol(entrenamiento)-1)))) for(j in 1:k){ reg_full_10 = regsubsets(psoda ~., data=entrenamiento[folds != j,], nvmax = (ncol(entrenamiento)-1)) for (i in 1:(ncol(entrenamiento)-1)){ pred = predict.regsubsets(reg_full_10, entrenamiento[folds == j, ], id = i) cv_error_sub_10[j, i] = mean((entrenamiento$psoda[folds == j] - pred)^2) } } cv_error_media_10 = apply(cv_error_sub_10 ,2,mean) cv_error_media_10 plot(cv_error_media_10,pch=19,type="b", xlab="Numero de variables", ylab="Error CV") points(which.min(cv_error_media_10),cv_error_media_10[which.min(cv_error_media_10)], col="red",cex=2,pch=18) mejor_reg=regsubsets (psoda~.,data=entrenamiento , nvmax=(ncol(entrenamiento)-1)) coef(mejor_reg ,which.min(cv_error_media_10)) reg_sub_10 =regsubsets(psoda~.,data= entrenamiento,nvmax=(ncol(entrenamiento)-1)) pred_reg_sub_10 = predict.regsubsets(reg_sub_10, newdata = test, id=which.min(cv_error_media_10)) error_mss_sub_10 <- sqrt(mean((test$psoda - pred_reg_sub_10)^2)) error_mss_sub_10 # Regla codo codo_sub_10 = sd(cv_error_media_10) which.max(cv_error_media_10 - codo_sub_10 <= min(cv_error_media_10)) reg_sub_10_codo =regsubsets(psoda~.,data= entrenamiento,nvmax=(ncol(entrenamiento)-1)) pred_reg_sub_10_codo = predict.regsubsets(reg_sub_10_codo, newdata = test, id=which.max(cv_error_media_10 - codo_sub_10 <= min(cv_error_media_10))) error_mss_sub_10_codo <- sqrt(mean((test$psoda - pred_reg_sub_10_codo)^2)) error_mss_sub_10_codo ##### c) cv_error_sub_for_10=matrix(NA,k,(ncol(entrenamiento)-1), dimnames=list(NULL, paste(1:(ncol(entrenamiento)-1)))) for(j in 1:k){ reg_for_10=regsubsets(psoda~.,data=entrenamiento[folds != j,], nvmax= (ncol(entrenamiento)-1), method= "forward") for (i in 1:(ncol(entrenamiento)-1)){ pred = predict.regsubsets(reg_for_10, entrenamiento[folds == j, ], id = i) cv_error_sub_for_10[j, i] = mean((entrenamiento$psoda[folds == j] - pred)^2) } } cv_error_for_media_10 = apply(cv_error_sub_for_10 ,2,mean) cv_error_for_media_10 plot(cv_error_for_media_10,pch=19,type="b", xlab="Numero de variables", ylab="Error CV") points(which.min(cv_error_for_media_10),cv_error_for_media_10[which.min(cv_error_for_media_10)], col="red",cex=2,pch=18) mejor_reg=regsubsets (psoda~.,data=entrenamiento , nvmax=(ncol(entrenamiento)-1), method= "forward") coef(mejor_reg ,which.min(cv_error_for_media_10)) reg_for_10 =regsubsets(psoda~.,data= entrenamiento,nvmax=(ncol(entrenamiento)-1), method= "forward") pred_reg_for_10 = predict.regsubsets(reg_for_10, newdata = test, id=which.min(cv_error_for_media_10)) error_mss_for_10 <- sqrt(mean((test$psoda - pred_reg_for_10)^2)) error_mss_for_10 # Regla codo codo_for_10 = sd(cv_error_for_media_10) which.max(cv_error_for_media_10 - codo_for_10 <= min(cv_error_for_media_10)) reg_for_10_codo =regsubsets(psoda~.,data= entrenamiento,nvmax=(ncol(entrenamiento)-1)) pred_reg_for_10_codo = predict.regsubsets(reg_for_10_codo, newdata = test, id=which.max(cv_error_for_media_10 - codo_for_10 <= min(cv_error_for_media_10))) error_mss_for_10_codo <- sqrt(mean((test$psoda - pred_reg_for_10_codo)^2)) error_mss_for_10_codo ##### d) #Mejor selección de conjuntos k = 5 folds = sample(1:k,nrow(entrenamiento),replace=TRUE) cv_error_sub_5=matrix(NA,k,(ncol(entrenamiento)-1), dimnames=list(NULL, paste(1:(ncol(entrenamiento)-1)))) for(j in 1:k){ reg_full_5 = regsubsets(psoda ~., data=entrenamiento[folds != j,], nvmax = (ncol(entrenamiento)-1)) for (i in 1:(ncol(entrenamiento)-1)){ pred = predict.regsubsets(reg_full_5, entrenamiento[folds == j, ], id = i) cv_error_sub_5[j, i] = mean((entrenamiento$psoda[folds == j] - pred)^2) } } cv_error_media_5 = apply(cv_error_sub_5 ,2,mean) cv_error_media_5 plot(cv_error_media_5,pch=19,type="b", xlab="Numero de variables", ylab="Error CV") points(which.min(cv_error_media_5),cv_error_media_5[which.min(cv_error_media_5)], col="red",cex=2,pch=18) mejor_reg=regsubsets (psoda~.,data=entrenamiento , nvmax=(ncol(entrenamiento)-1)) coef(mejor_reg ,which.min(cv_error_media_5)) reg_sub_5 =regsubsets(psoda~.,data= entrenamiento,nvmax=(ncol(entrenamiento)-1)) pred_reg_sub_5 = predict.regsubsets(reg_sub_5, newdata = test, id=which.min(cv_error_media_5)) error_mss_sub_5 <- sqrt(mean((test$psoda - pred_reg_sub_5)^2)) error_mss_sub_5 # Regla codo codo_sub_5 = sd(cv_error_media_5) which.max(cv_error_media_5 - codo_sub_5 <= min(cv_error_media_5)) #Selección por pasos hacia adelante cv_error_sub_for_5=matrix(NA,k,(ncol(entrenamiento)-1), dimnames=list(NULL, paste(1:(ncol(entrenamiento)-1)))) for(j in 1:k){ reg_for_5=regsubsets(psoda~.,data=entrenamiento[folds != j,], nvmax= (ncol(entrenamiento)-1),method= "forward") for (i in 1:(ncol(entrenamiento)-1)){ pred = predict.regsubsets(reg_for_5, entrenamiento[folds == j, ], id = i) cv_error_sub_for_5[j, i] = mean((entrenamiento$psoda[folds == j] - pred)^2) } } cv_error_for_media_5 = apply(cv_error_sub_for_5 ,2,mean) cv_error_for_media_5 plot(cv_error_for_media_5,pch=19,type="b", xlab="Numero de variables", ylab="Error CV") points(which.min(cv_error_for_media_5),cv_error_for_media_5[which.min(cv_error_for_media_5)], col="red",cex=2,pch=18) mejor_reg=regsubsets (psoda~.,data=entrenamiento , nvmax=(ncol(entrenamiento)-1), method= "forward") coef(mejor_reg ,which.min(cv_error_for_media_5)) reg_for_5 =regsubsets(psoda~.,data= entrenamiento,nvmax=(ncol(entrenamiento)-1), method= "forward") pred_reg_for_5 = predict.regsubsets(reg_for_5, newdata = test, id=which.min(cv_error_for_media_5)) error_mss_for_5 <- sqrt(mean((test$psoda - pred_reg_for_5)^2)) error_mss_for_5 # Regla codo codo_for_5 = sd(cv_error_for_media_5) which.max(cv_error_for_media_5 - codo_for_5 <= min(cv_error_for_media_5)) reg_for_5_codo =regsubsets(psoda~.,data= entrenamiento,nvmax=(ncol(entrenamiento)-1)) pred_reg_for_5_codo = predict.regsubsets(reg_for_5_codo, newdata = test, id=which.max(cv_error_for_media_5 - codo_for_5 <= min(cv_error_for_media_5))) error_mss_for_5_codo <- sqrt(mean((test$psoda - pred_reg_for_5_codo)^2)) error_mss_for_5_codo ##### e) tabla <- data.frame("Regresión" = c("Minimos cuadrados ordinarios", "Seleccion de subconjuntos CV 10", "Seleccion por pasos hacia adelante CV 10", "Seleccion de subconjuntos CV 5", "Seleccion por pasos hacia adelante CV 5" ), "Error_prueba" = c(error_reg_1, error_mss_sub_10, error_mss_for_10, error_mss_sub_5, error_mss_for_5_codo), "Número_variables" = c(" ",which.min(cv_error_media_10), which.min(cv_error_for_media_10), which.min(cv_error_media_5), which.min(cv_error_for_media_5))) tabla ##### f) # Lo miramos por Bonferroni-Holm reg_lineal <- lm(psoda ~pfries + pentree +prpblck +NJ+ BK + RR, data= entrenamiento) summary(reg_lineal) p = c(3.75*10^(-16), 0.000513, 0.017370, 0.000329, 1.11*10^(-8), 6.72*10^(-8)) #p-valores de la regresion # Los que sean TRUE los seleccionamos. p <= 0.05/length(p) # Eliminamos la variable prpblack reg_nueva <- lm(psoda ~pfries + pentree +NJ+ BK + RR, data= entrenamiento) pred = predict(reg_nueva, newdata = test) error_reg_nueva <- sqrt(MSE(y_pred = pred,y_true = test$psoda)) error_reg_nueva # El error es mayor que antes ##### g) matriz_entrenamiento = model.matrix(psoda~., data=entrenamiento)[,-1] matriz_test = model.matrix(psoda~., data=test)[,-1] grid = 10^seq(4, -2, length=100) modelo_ridge_10 = cv.glmnet(matriz_entrenamiento, entrenamiento$psoda, alpha=0, lambda=grid, nfolds = 10) mejor_lambda_ridge_10 = modelo_ridge_10$lambda.min mejor_lambda_ridge_10 plot(modelo_ridge_10) modelo_ridge_10_l=glmnet(matriz_entrenamiento,entrenamiento$psoda,alpha=0,lambda=grid, thresh = 1e-12) prediccion_ridge_10 = predict(modelo_ridge_10_l, newx=matriz_test, s=mejor_lambda_ridge_10) a = sqrt(mean((test$psoda - prediccion_ridge_10)^2)) a # Regla del codo lambda_codo_ridge_10 <- modelo_ridge_10$lambda.1se lambda_codo_ridge_10 prediccion_ridge_10_2=predict(modelo_ridge_10_l,s=lambda_codo_ridge_10,newx=matriz_test) error.ridge.2 <- sqrt(mean((prediccion_ridge_10_2-test$psoda )^2)) error.ridge.2 ##### h) modelo_LASSO_10= cv.glmnet(matriz_entrenamiento, entrenamiento$psoda, alpha=1, lambda=grid, nfolds = 10) mejor_lambda_LASSO_10 = modelo_LASSO_10$lambda.min mejor_lambda_LASSO_10 plot(modelo_LASSO_10) modelo_LASSO_10_l=glmnet(matriz_entrenamiento,entrenamiento$psoda,alpha=1,lambda=grid, thresh = 1e-12) prediccion_LASSO_10 = predict(modelo_LASSO_10_l, newx=matriz_test, s=mejor_lambda_LASSO_10) b = sqrt(mean((test$psoda - prediccion_LASSO_10)^2)) b # Regla del codo lambda_codo_LASSO_10 <- modelo_LASSO_10$lambda.1se lambda_codo_LASSO_10 prediccion_LASSO_10_2=predict(modelo_LASSO_10_l,s=lambda_codo_LASSO_10,newx=matriz_test) error.LASSO.2 <- sqrt(mean((prediccion_LASSO_10_2-test$psoda )^2)) error.LASSO.2 ##### i) # Ridge CV-5 modelo_ridge_5 = cv.glmnet(matriz_entrenamiento, entrenamiento$psoda, alpha=0, lambda=grid, nfolds = 5) mejor_lambda_ridge_5 = modelo_ridge_5$lambda.min mejor_lambda_ridge_5 modelo_ridge_5_l=glmnet(matriz_entrenamiento,entrenamiento$psoda,alpha=0,lambda=grid, thresh = 1e-12) prediccion_ridge_5 = predict(modelo_ridge_5_l, newx=matriz_test, s=mejor_lambda_ridge_5) c =sqrt(mean((test$psoda - prediccion_ridge_5)^2)) c # Regla del codo lambda_codo_ridge_5 <- modelo_ridge_5$lambda.1se lambda_codo_ridge_5 prediccion_ridge_5_2=predict(modelo_ridge_5_l,s=lambda_codo_ridge_5,newx=matriz_test) error.ridge.2 <- sqrt(mean((prediccion_ridge_5_2-test$psoda )^2)) error.ridge.2 # LASSO CV-5 modelo_LASSO_5= cv.glmnet(matriz_entrenamiento, entrenamiento$psoda, alpha=1, lambda=grid, nfolds = 5) mejor_lambda_LASSO_5 = modelo_LASSO_5$lambda.min mejor_lambda_LASSO_5 modelo_LASSO_5_l=glmnet(matriz_entrenamiento,entrenamiento$psoda,alpha=1,lambda=grid, thresh = 1e-12) prediccion_LASSO_5 = predict(modelo_LASSO_5_l, newx=matriz_test, s=mejor_lambda_LASSO_5) d =sqrt(mean((test$psoda - prediccion_LASSO_5)^2)) d # Regla del codo lambda_codo_LASSO_5 <- modelo_LASSO_5$lambda.1se lambda_codo_LASSO_5 prediccion_LASSO_5_2=predict(modelo_LASSO_5_l,s=lambda_codo_LASSO_5,newx=matriz_test) error.LASSO.2 <- sqrt(mean((prediccion_LASSO_5_2-test$psoda )^2)) error.LASSO.2 ##### j) acp=pcr(psoda~., data=entrenamiento,scale=TRUE, validation="CV") # CV_10 acp_cv_10 <- crossval(acp, segments = 10) summary(acp_cv_10, what = "validation") acp_pred_10_cv=predict(acp,newdata=test,ncomp=13) error_acp_10_cv<- sqrt(mean((acp_pred_10_cv - test$psoda)^2)) error_acp_10_cv # Regla del codo regla_codo_10 <- selectNcomp(acp, method = "onesigma", plot = TRUE, validation = "CV", segments = 10) regla_codo_10 acp_pred_10_codo=predict(acp,newdata=test,ncomp=regla_codo_10) error_acp_10_codo <- sqrt(mean((acp_pred_10_codo - test$psoda)^2)) error_acp_10_codo #CV_5 acp_cv_5 <- crossval(acp, segments = 5) summary(acp_cv_5, what = "validation") acp_pred_5_cv=predict(acp,newdata=test,ncomp=13) error_acp_5_cv<- sqrt(mean((acp_pred_5_cv - test$psoda)^2)) error_acp_5_cv # Regla del codo regla_codo_5 <- selectNcomp(acp, method = "onesigma", plot = TRUE, validation = "CV", segments = 5) regla_codo_5 acp_pred_5_codo=predict(acp,newdata=test,ncomp=regla_codo_5) error_acp_5_codo <- sqrt(mean((acp_pred_5_codo - test$psoda)^2)) error_acp_5_codo ##### k) pls=plsr(psoda~., data=entrenamiento ,scale=TRUE, validation="CV") # PLS CV 10 pls_cv_10 <- crossval(pls, segments = 10) summary(pls_cv_10, what = "validation") pls_pred_10_cv=predict(pls,newdata=matriz_test,ncomp=3) error_pls_10 <- sqrt(mean((pls_pred_10_cv - test$psoda)^2)) error_pls_10 codo_pls_10 <- selectNcomp(pls, method = "onesigma", plot = TRUE, validation = "CV", segments = 10) codo_pls_10 pls_pred_10_codo=predict(pls,newdata=matriz_test,ncomp=codo_pls_10) error_pls_10_codo <- sqrt(mean((pls_pred_10_codo - test$psoda)^2)) error_pls_10_codo # PLS CV 5 pls_cv_5 <- crossval(pls, segments = 5) plot(RMSEP(pls_cv_5), legendpos="topright") summary(pls_cv_5, what = "validation") pls_pred_5_cv=predict(pls,newdata=matriz_test,ncomp=2) error_pls_5 <- sqrt(mean((pls_pred_5_cv - test$psoda)^2)) error_pls_5 codo_pls_5 <- selectNcomp(pls, method = "onesigma", plot = TRUE, validation = "CV", segments = 5) codo_pls_5 pls_pred_5_codo=predict(pls,newdata=matriz_test,ncomp=codo_pls_5) error_pls_5_codo <- sqrt(mean((pls_pred_5_codo - test$psoda)^2)) error_pls_5_codo # Randomization codo_pls_random <- selectNcomp(pls, method = "randomization", plot = TRUE) codo_pls_random pls_pred_random <- predict(pls,newdata=matriz_test,ncomp=codo_pls_random) error_pls_random <- sqrt(mean((pls_pred_random - test$psoda)^2)) error_pls_random ##### l) lambda_grid <- 10^seq(2,-2, length = 100) alpha_grid <- seq(0,1, by = 0.05) Control <- trainControl(method = "cv", number = 10) buscar_grid <- expand.grid(alpha = alpha_grid, lambda = lambda_grid) entrenamiento_modelo <- train(psoda~., data = entrenamiento, method = "glmnet", tuneGrid = buscar_grid, trControl = Control, tuneLength = 10, standardize = TRUE, maxit = 1000000) best_tune_EN_10 <- entrenamiento_modelo$bestTune entrenamiento_modelo$bestTune plot(entrenamiento_modelo) modelo_glmnet <- entrenamiento_modelo$finalModel coef(modelo_glmnet, s = entrenamiento_modelo$bestTune$lambda) mejor_modelo <- glmnet(matriz_entrenamiento,entrenamiento$psoda, alpha=entrenamiento_modelo$bestTune$alpha, lambda = entrenamiento_modelo$bestTune$lambda, thresh = 1e-12) coef(mejor_modelo, s = entrenamiento_modelo$bestTune$lambda) cbind(coef(mejor_modelo, s = entrenamiento_modelo$bestTune$lambda), coef(modelo_glmnet, s = entrenamiento_modelo$bestTune$lambda)) pred_LASSO_elastic_10 <- predict(mejor_modelo,s=entrenamiento_modelo$bestTune$lambda,newx=matriz_test) error_pred_LASSO_elastic_10 <- sqrt(mean((pred_LASSO_elastic_10 - test$psoda)^2)) error_pred_LASSO_elastic_10 ##### m) Control <- trainControl(method = "cv", number = 5) buscar_grid <- expand.grid(alpha = alpha_grid, lambda = lambda_grid) entrenamiento_modelo <- train(psoda~., data = entrenamiento, method = "glmnet", tuneGrid = buscar_grid, trControl = Control, tuneLength = 10, standardize = TRUE, maxit = 1000000) best_tune_EN_5 <- entrenamiento_modelo$bestTune entrenamiento_modelo$bestTune plot(entrenamiento_modelo) modelo_glmnet <- entrenamiento_modelo$finalModel coef(modelo_glmnet, s = entrenamiento_modelo$bestTune$lambda) mejor_modelo <- glmnet(matriz_entrenamiento,entrenamiento$psoda, alpha=entrenamiento_modelo$bestTune$alpha, lambda = entrenamiento_modelo$bestTune$lambda, thresh = 1e-12) coef(mejor_modelo, s = entrenamiento_modelo$bestTune$lambda) cbind(coef(mejor_modelo, s = entrenamiento_modelo$bestTune$lambda), coef(modelo_glmnet, s = entrenamiento_modelo$bestTune$lambda)) pred_LASSO_elastic_5 <- predict(mejor_modelo,s=entrenamiento_modelo$bestTune$lambda,newx=matriz_test) error_pred_LASSO_elastic_5 <- sqrt(mean((pred_LASSO_elastic_5 - test$psoda)^2)) error_pred_LASSO_elastic_5 ##### n) # Ridge con cv10 n = nrow(matriz_entrenamiento) beta_ridge_10 = coef(modelo_ridge_10_l, s=mejor_lambda_ridge_10/n, exact=TRUE, x = matriz_entrenamiento, y = entrenamiento$psoda)[-1] out_ridge_10 = fixedLassoInf(matriz_entrenamiento,entrenamiento$psoda ,beta_ridge_10, mejor_lambda_ridge_10/n) out_ridge_10 # Eliminar todas las variables menos la 17,18,20 matriz_entrenamiento_nueva <- matriz_entrenamiento[,c(17,18,20)] matriz_test_nueva <- matriz_test[,c(17,18,20)] grid=10^seq(4,-2, length =100) cv.mod=cv.glmnet(matriz_entrenamiento_nueva,entrenamiento$psoda,alpha=0,lambda=grid, nfolds = 10) plot(cv.mod) mejorlambda_1=cv.mod$lambda.min mejorlambda_1 mod=glmnet(matriz_entrenamiento_nueva,entrenamiento$psoda,alpha=0,lambda=grid) pred=predict(mod,s=mejorlambda_1 ,newx=matriz_test_nueva) error_1 <- sqrt(mean((pred-test$psoda )^2)) error_1 # LASSO con cv10 beta_LASSO_10 = coef(modelo_LASSO_10_l, s=mejor_lambda_LASSO_10/n, exact=TRUE, x = matriz_entrenamiento, y = entrenamiento$psoda)[-1] out_LASSO_10 <- fixedLassoInf(matriz_entrenamiento,entrenamiento$psoda, beta_LASSO_10 ,mejor_lambda_LASSO_10/n) out_LASSO_10 # Eliminar todas las variables menos la 17,18,20 grid=10^seq(4,-2, length =100) cv.mod=cv.glmnet(matriz_entrenamiento_nueva,entrenamiento$psoda,alpha=1,lambda=grid, nfolds = 10) plot(cv.mod) mejorlambda_2=cv.mod$lambda.min mejorlambda_2 mod=glmnet(matriz_entrenamiento_nueva,entrenamiento$psoda,alpha=1,lambda=grid) pred=predict(mod,s=mejorlambda_2 ,newx=matriz_test_nueva) error_2 <- sqrt(mean((pred-test$psoda )^2)) error_2 # Ridge con cv5 beta_ridge_5 = coef(modelo_ridge_5_l, s=mejor_lambda_ridge_5/n, exact=TRUE, x = matriz_entrenamiento, y = entrenamiento$psoda)[-1] out_ridge_5 = fixedLassoInf(matriz_entrenamiento,entrenamiento$psoda ,beta_ridge_5, mejor_lambda_ridge_5/n) out_ridge_5 # Eliminar todas las variables menos la 17,18,20 grid=10^seq(4,-2, length =100) cv.mod=cv.glmnet(matriz_entrenamiento_nueva,entrenamiento$psoda,alpha=0,lambda=grid, nfolds = 5) plot(cv.mod) mejorlambda_3=cv.mod$lambda.min mejorlambda_3 mod=glmnet(matriz_entrenamiento_nueva,entrenamiento$psoda,alpha=0,lambda=grid) pred=predict(mod,s=mejorlambda_3 ,newx=matriz_test_nueva) error_3 <- sqrt(mean((pred-test$psoda )^2)) error_3 # LASSO con cv5 beta_LASSO_5 = coef(modelo_LASSO_5_l, s=mejor_lambda_LASSO_5/n, exact=TRUE, x = matriz_entrenamiento, y = entrenamiento$psoda)[-1] out_LASSO_5 <- fixedLassoInf(matriz_entrenamiento,entrenamiento$psoda, beta_LASSO_5 ,mejor_lambda_LASSO_5/n) out_LASSO_5 # Eliminar todas las variables menos la 17,18,20 grid=10^seq(4,-2, length =100) cv.mod=cv.glmnet(matriz_entrenamiento_nueva,entrenamiento$psoda,alpha=1,lambda=grid, nfolds = 5) plot(cv.mod) mejorlambda_4=cv.mod$lambda.min mejorlambda_4 mod=glmnet(matriz_entrenamiento_nueva,entrenamiento$psoda,alpha=1,lambda=grid) pred=predict(mod,s=mejorlambda_4 ,newx=matriz_test_nueva) error_4 <- sqrt(mean((pred-test$psoda )^2)) error_4 # LASSO with Elastic Net con cv10 beta_LASSO_EN_10 = coef(mejor_modelo, s=best_tune_EN_10$lambda/n, exact=TRUE, x = matriz_entrenamiento, y = entrenamiento$psoda)[-1] out_LASSO_EN_10 <- fixedLassoInf(matriz_entrenamiento,entrenamiento$psoda, beta_LASSO_EN_10 ,best_tune_EN_10$lambda/n) out_LASSO_EN_10 # Eliminar todas las variables menos la 17,18,20 que son la 18,19 y 21 en el data set entrenamiento entrenamiento_nuevo <- entrenamiento[,c(1,18,19,21)] Control <- trainControl(method = "cv", number = 10) buscar_grid <- expand.grid(alpha = alpha_grid, lambda = lambda_grid) entrenamiento_modelo <- train(psoda~., data = entrenamiento_nuevo, method = "glmnet", tuneGrid = buscar_grid, trControl = Control, tuneLength = 10, standardize = TRUE, maxit = 1000000) entrenamiento_modelo$bestTune modelo_glmnet <- entrenamiento_modelo$finalModel coef(modelo_glmnet, s = entrenamiento_modelo$bestTune$lambda) mejor_modelo <- glmnet(matriz_entrenamiento_nueva,entrenamiento$psoda, alpha=entrenamiento_modelo$bestTune$alpha, lambda = entrenamiento_modelo$bestTune$lambda, thresh = 1e-12) pred_LASSO_elastic_1 <- predict(mejor_modelo,s=entrenamiento_modelo$bestTune$lambda,newx=matriz_test_nueva) error_pred_LASSO_elastic_1 <- sqrt(mean((pred_LASSO_elastic_1 - test$psoda)^2)) error_pred_LASSO_elastic_1 # LASSO with Elastic Net con cv 5 beta_LASSO_EN_5 = coef(mejor_modelo, s=best_tune_EN_5$lambda/n, exact=TRUE, x = matriz_entrenamiento, y = entrenamiento$psoda)[-1] out_LASSO_EN_5 <- fixedLassoInf(matriz_entrenamiento,entrenamiento$psoda, beta_LASSO_EN_5 ,best_tune_EN_5$lambda/n) out_LASSO_EN_5 # Eliminar todas las variables menos la 17,18,20 Control <- trainControl(method = "cv", number = 5) buscar_grid <- expand.grid(alpha = alpha_grid, lambda = lambda_grid) entrenamiento_modelo <- train(psoda~., data = entrenamiento_nuevo, method = "glmnet", tuneGrid = buscar_grid, trControl = Control, tuneLength = 10, standardize = TRUE, maxit = 1000000) entrenamiento_modelo$bestTune modelo_glmnet <- entrenamiento_modelo$finalModel coef(modelo_glmnet, s = entrenamiento_modelo$bestTune$lambda) mejor_modelo <- glmnet(matriz_entrenamiento_nueva,entrenamiento$psoda, alpha=entrenamiento_modelo$bestTune$alpha, lambda = entrenamiento_modelo$bestTune$lambda, thresh = 1e-12) pred_LASSO_elastic_2 <- predict(mejor_modelo,s=entrenamiento_modelo$bestTune$lambda,newx=matriz_test_nueva) error_pred_LASSO_elastic_2 <- sqrt(mean((pred_LASSO_elastic_2 - test$psoda)^2)) error_pred_LASSO_elastic_2 ##### o) # Penalización independiente post_lasso_reg_indep = rlasso(entrenamiento$psoda~matriz_entrenamiento,post=TRUE, X.dependent.lambda = FALSE) print(post_lasso_reg_indep, all=FALSE) yhat_postlasso_new_indep = predict(post_lasso_reg_indep, newdata=matriz_test) error_postlasso_indep <- sqrt(mean((yhat_postlasso_new_indep - test$psoda )^2)) error_postlasso_indep # Penalización dependiente post_lasso_reg_dep = rlasso(entrenamiento$psoda~matriz_entrenamiento,post=TRUE, X.dependent.lambda = TRUE) print(post_lasso_reg_dep, all=FALSE) yhat_postlasso_new_dep = predict(post_lasso_reg_dep, newdata=matriz_test) error_postlasso_dep <- sqrt(mean((yhat_postlasso_new_dep - test$psoda )^2)) error_postlasso_dep ##### p) # Ambos modelos nos dan las mismas variables y los mismos coeficientes por lo que es igual para ambos. lasso.effect = rlassoEffects(x=matriz_entrenamiento, y=entrenamiento$psoda, index=c("pfries", "NJ"), post = TRUE, ) print(lasso.effect) summary(lasso.effect) confint(lasso.effect, level=0.95, joint=TRUE) plot(lasso.effect, main="Confidence Intervals") ##### q) escrito en el pdf ##### r) pls_cv_5$coefficient
d5209c496715bd41d7897702ef6e4466091e796d
22f93d06424cbeeb1343623c20b1256aae2a08df
/classwork1/R-intro1_classwork.R
cbebf5fc92bc125eba0c38190cf43c7844cebf0e
[]
no_license
normall777/MyDataAccessMethods
fedfc6c7814ec75a8cc7561d32186d1e2209203d
4c78903f2a5962082693f92303c0ddcbfd1079b9
refs/heads/master
2020-03-29T07:13:21.849549
2018-12-17T22:22:46
2018-12-17T22:22:46
149,657,373
0
0
null
null
null
null
UTF-8
R
false
false
1,441
r
R-intro1_classwork.R
1234+4567 29-45 325/25 56*12 11*11 111*111 1111111*1111111 options(digits=14) options(width=40) 5:32 1:10 (1:10)+3 (1:10)-3 (1:10)*3 (1:10)^2 (1:10)^3 3:4 31 %% 7 31 %/% 7 7*4 + 3 x <-c(4, 1, 8, 9) y <-c(6, 2, 4, 3) plot(x,y); lines(x, y) x <- 1:10; y <- x^2; plot(x,y) plot(x,y); lines(x,y) learn <-c("stats" = 15, "math"= 10,"programming" = 30, "attempts" = 45) pie(learn) barplot(learn) Z <- rnorm(1000)# 1000 standard normal random variates hist(Z, prob = TRUE, main = "Гистограмма относительной частоты", sub = "Плотность распределения") curve(dnorm(x), from = -3, to = 3, add = TRUE, col = "blue") #--------- 11111111*11111111 11111111*1111111?. #Error in `?`(11111111 * 1111111, .) : #нет документации на тип ‘12345677654321’ и раздел ‘.’ (или ошибка в обработке помощи) #Ошибка в постановке вопросительного знака и точки a <- c(3,7,12,15,20) b <- c(2,5,8,11,15) S <- a*b plot(a,S); lines(a,S) plot(b,S); lines(b,S) plot(a,b) vasya <- c("Математика" = 40, "Английский"=40, "Физическая культура"=10, "Программирование"=150) pie(vasya) drinks <- rnorm(5, mean = 450, sd = 4) drinks drinks > 455 drinks <- rnorm(10000, mean = 450, sd = 4) spent <- sum(drinks>455) spent #1014/10000 ~ 10%
184ac0fbe82633047d548e5f99c961d4d2ca77c1
60f1af254960315177c12f81558260d747582dc1
/codes/R/get_genebody_from_gtf.R
1254961d2fa584da7eca8c93258cccaa3a965ba4
[]
no_license
15101538237ren/tcga
4f1dd60ca524aa770a72380fec4081e0aeba02c6
be81c473c15aa669a704598600f1d21e7df0b1a6
refs/heads/master
2021-09-07T16:20:42.789796
2018-02-26T01:36:11
2018-02-26T01:36:11
113,286,135
0
0
null
null
null
null
UTF-8
R
false
false
858
r
get_genebody_from_gtf.R
library(GenomicFeatures) data_path = "~/PycharmProjects/tcga_raw_data/GRCh38/" out_path = "~/PycharmProjects/tcga/global_files/" txdb <- makeTxDbFromGFF(paste(data_path, "Homo_sapiens.GRCh38.90.gtf",sep = ""), format="gtf") genes <- genes(txdb) gene_df <- as.data.frame(genes) ensembl_ids<-gene_df[,6] library(biomaRt) mart<-useMart("ensembl") mart<- useDataset("hsapiens_gene_ensembl", mart) attributs_df<- as.data.frame(listAttributes(mart)) #list all available attributes genes_table <- getBM(filters= "ensembl_gene_id", attributes= c("ensembl_gene_id", "hgnc_symbol", "description"), values= ensembl_ids, mart= mart) merged_gene_df<-merge(gene_df, genes_table, by.x="gene_id", by.y="ensembl_gene_id") df_out <- merged_gene_df[ ,c(7,2,3,4,6)] write.table(df_out, file= paste(out_path, "human_gene_bodys.tsv",sep = ""), col.names=F, row.names=F, sep="\t")
2b53022345283a6c5526dc6e1c8dee03dea0758d
f9e5ae04eae16761374e5c92f69db4a50f4fb34e
/R/NetworkView.R
7f1f05da0049df6f37dc804a4f4df1ef7fab21f3
[ "Apache-2.0" ]
permissive
PriceLab/TrenaViz
f09ddc1ff6fc0e824890c833b55cced05e67a90a
b88f6785ed6efa398863d697fa83d64201059061
refs/heads/master
2021-07-22T15:40:47.936451
2020-04-29T18:29:42
2020-04-29T18:29:42
159,706,619
0
0
null
null
null
null
UTF-8
R
false
false
19,618
r
NetworkView.R
#' import shiny #' import cyjShiny #' import TrenaProject #' import graph #' @name NetworkView #' @rdname NetworkView #' @aliases NetworkView #------------------------------------------------------------------------------------------------------------------------ # library(TrenaProject) # library(cyjShiny) #------------------------------------------------------------------------------------------------------------------------ .NetworkView <- setClass("NetworkView", representation = representation( quiet="logical", targetGene="character", tss="numeric", tbl.model="data.frame", tbl.regulatoryRegions="data.frame", state="environment") ) #------------------------------------------------------------------------------------------------------------------------ setGeneric('getGraph', signature='obj', function(obj) standardGeneric('getGraph')) #------------------------------------------------------------------------------------------------------------------------ setMethod('getGraph', 'NetworkView', function(obj){ tbl.nodes <- data.frame(id=c("A", "B", "C"), type=c("kinase", "TF", "glycoprotein"), lfc=c(1, 1, 1), count=c(0, 0, 0), stringsAsFactors=FALSE) tbl.edges <- data.frame(source=c("A", "B", "C"), target=c("B", "C", "A"), interaction=c("phosphorylates", "synthetic lethal", "unknown"), stringsAsFactors=FALSE) graph.json <- dataFramesToJSON(tbl.edges, tbl.nodes) targetGene <- obj@targetGene tbl.model <- obj@tbl.model tbl.reg <- obj@tbl.regulatoryRegions tss <- obj@tss g <- .geneRegulatoryModelToGraph(targetGene, tss, tbl.model, tbl.reg) g <- .addGeneModelLayout(g, xPos.span=1500) g }) #------------------------------------------------------------------------------------------------------------------------ #' Create an NetworkView object #' #' @description #' a shiny app #' #' @rdname NetworkView #' #' @param organism A character string, one of the supported species names: hsapiens, mmuscuulus #' @param genome A character string, one of the supported genome builds: hg38, mm10 #' @param quiet A logical indicating whether or not the Trena object should print output #' #' @return An object of the NetworkView class #' #' @export #' NetworkView <- function(targetGene, tss, tbl.model, tbl.regulatoryRegions, quiet=TRUE) { state <- new.env(parent=emptyenv()) .NetworkView(targetGene=targetGene, tss=tss, tbl.model=tbl.model, tbl.regulatoryRegions=tbl.regulatoryRegions, state=state, quiet=quiet) } # NetworkView #------------------------------------------------------------------------------------------------------------------------ setMethod("show", "NetworkView", function(object){ cat(paste("a NetworkView object from the TrenaViz package:", "\n")) cat(sprintf(" targetGene: %s\n", obj@targetGene)) cat(sprintf(" tss: %s\n", obj@tss)) cat(sprintf(" tbl.model: %d rows, %d columns\n", nrow(obj@tbl.model), ncol(obj@tbl.model))) cat(sprintf(" tbl.regulatoryRegions: %d rows, %d columns\n", nrow(obj@tbl.regulatoryRegions), ncol(obj@tbl.regulatoryRegions))) }) #------------------------------------------------------------------------------------------------------------------------ #' create and return the control-rich UI #' #' @rdname createPage #' @aliases createPage #' #' @param obj An object of class NetworkView #' #' @export #' setMethod("createPage", "NetworkView", function(obj) { fluidPage(id="networkViewPageContent", fluidRow( actionButton(inputId="fitNetworkButton", label="Fit"), actionButton(inputId="fitSelectedNodesButton", label="Fit Selection"), actionButton(inputId="removeNetworkButton", label="Remove Graph"), actionButton(inputId="genomicLayoutButton", label="GenomicLayout") ), fluidRow(column(width=12, cyjShinyOutput('cyjShiny'))) ) #cyjShinyOutput('cyjShiny', height=400) }) #------------------------------------------------------------------------------------------------------------------------ #' display the page #' #' @rdname displayPage #' @aliases displayPage #' #' @param obj An object of class NetworkView #' @param tf character string, the geneSymbol name of the transcription factor #' #' @export #' setMethod("displayPage", "NetworkView", function(obj){ printf("NetworkView displayPage") removeUI(selector="#networkViewPageContent", immediate=TRUE) insertUI(selector="#networkViewPage", where="beforeEnd", createPage(obj), immediate=TRUE) #js$cyjSetupResize(); js$cyjShinySetWidth(); later(function(){fit(session, 300)}, 1000) }) #------------------------------------------------------------------------------------------------------------------------ #' add shiny event handlers #' #' @rdname addEventHandlers #' @aliases addEventHandlers #' #' @param obj An object of class NetworkView #' @param session a Shiny session object #' @param input a Shiny input object #' @param output a Shiny output object #' #' @export #' setMethod("addEventHandlers", "NetworkView", function(obj, session, input, output){ printf("--- NetworkView::addEventHandlers") obj@state$session <- session obj@state$input <- input obj@state$output <- output observeEvent(input$fitNetworkButton, ignoreInit=TRUE, { fit(session, 80) }) observeEvent(input$fitSelectedNodesButton, ignoreInit=TRUE, { fitSelected(session, 80) }) observeEvent(input$removeNetworkButton, ignoreInit=TRUE, { removeGraph(session) }) observeEvent(input$genomicLayoutButton, ignoreInit=TRUE, { setNodePositions(session, obj@state$tbl.pos) }) observeEvent(input$viewNetworkButton, ignoreInit=FALSE, { printf("view network") updateTabItems(session, "sidebarMenu", selected="networkViewTab") # displayPage(obj) xyz <- "observing viewNetworkButton" output$cyjShiny <- renderCyjShiny({ printf("--- renderCyjShiny, triggered by viewNetworkButton") style.file <- system.file(package="TrenaViz", "extdata", "trenaModelStyle.js") g <- getGraph(obj) obj@state$g <- g print(g) graph.json <- graphNELtoJSON(g) xPos <- nodeData(g, attr="xPos") yPos <- nodeData(g, attr="yPos") tbl.pos <- data.frame(id=names(xPos), x=as.numeric(xPos), y=as.numeric(yPos), stringsAsFactors=FALSE) obj@state$tbl.pos <- tbl.pos cyjShiny(graph.json, layoutName="cola", styleFile=style.file, width=1000, height=1000) }) }) }) # addEventHandlers #------------------------------------------------------------------------------------------------------------------------ # by example: # # the incoming tbl.model presents these challenges: # # gene betaLasso lassoPValue pearsonCoeff rfScore betaRidge spearmanCoeff bindingSites # 6 E2F3 0 7.124847e-07 0.8683105 2.936714 0.04945335 0.8149973 NA # 45 HOXC13 0 3.987483e-02 -0.8640875 2.457541 -0.01531601 -0.7659080 NA # 97 ZNF263 0 6.236969e-01 0.9003067 2.134046 0.04104303 0.6360153 NA # 70 PRDM4 0 1.000000e+00 0.8984506 1.900193 0.03627523 0.7405583 NA # # and for which we want these results (first 4 rows only) # # tf pearson spearman betaLasso randomForest # 6 E2F3 0.8683105 0.8149973 0 2.936714 # 45 HOXC13 -0.8640875 -0.7659080 0 2.457541 # 97 ZNF263 0.9003067 0.6360153 0 2.134046 # 70 PRDM4 0.8984506 0.7405583 0 1.900193 .standardizeModelTable <- function(tbl.model) { required.colNames <- c("tf", "pearson", "spearman", "betaLasso", "randomForest") colnames.in <- tolower(colnames(tbl.model)) gene.col <- grep("^gene$", colnames.in) if(length(gene.col) > 0) colnames(tbl.model)[gene.col] <- "tf" pearson.col <- grep("pearson", colnames.in) if(length(pearson.col) > 0) colnames(tbl.model)[pearson.col] <- "pearson" spearman.col <- grep("spearman", colnames.in) if(length(spearman.col) > 0) colnames(tbl.model)[spearman.col] <- "spearman" betaLasso.col <- grep("betalasso", colnames.in) if(length(betaLasso.col) > 0) colnames(tbl.model)[betaLasso.col] <- "betaLasso" rf.1.col <- grep("forest", colnames.in) rf.2.col <- grep("rfscore", colnames.in) if(length(rf.1.col) > 0) colnames(tbl.model)[rf.1.col] <- "randomForest" if(length(rf.2.col) > 0) colnames(tbl.model)[rf.2.col] <- "randomForest" tbl.out <- tbl.model[, required.colNames] tbl.out } # .standardizeModelTable #------------------------------------------------------------------------------------------------------------------------ # by example: # # one instance of the incoming tbl.reg presents these challenges: # # motifName loc fp_start fp_end type name length strand sample_id method provenance score1 score2 score3 score4 score5 score6 chrom database shortMotif geneSymbol pubmedID organism source # Hsapiens-HOCOMOCOv10-CLOCK_HUMAN.H10MO.D chr3:128077447-128077466 128077441 128077451 motif.in.footprint Hsapiens-HOCOMOCOv10-CLOCK_HUMAN.H10MO.D 20 + ENCSR000EMT HINT lymphoblast_hint_16.minid 12 13.02250 1.81e-05 NA NA NA chr3 lymphoblast_hint_16 CLOCK_HUMAN.H10MO.D CLOCK 26586801 Hsapiens MotifDb # Hsapiens-HOCOMOCOv10-PURA_HUMAN.H10MO.D chr3:128417965-128417981 128417972 128417989 motif.in.footprint Hsapiens-HOCOMOCOv10-PURA_HUMAN.H10MO.D 17 - ENCSR000EJK HINT lymphoblast_hint_20.minid 12 12.04400 3.25e-05 NA NA NA chr3 lymphoblast_hint_20 PURA_HUMAN.H10MO.D PURA 26586801 Hsapiens MotifDb # Hsapiens-jaspar2016-HOXC11-MA0651.1 chr3:128617604-128617614 128617598 128617621 motif.in.footprint Hsapiens-jaspar2016-HOXC11-MA0651.1 11 - ENCSR000DBZ HINT lymphoblast_hint_20.minid 32 11.10000 7.99e-05 NA NA NA chr3 lymphoblast_hint_20 MA0651.1 HOXC11 24194598 Hsapiens MotifDb # Hsapiens-jaspar2016-SP4-MA0685.1 chr3:128487237-128487253 128487253 128487267 motif.in.footprint Hsapiens-jaspar2016-SP4-MA0685.1 17 - ENCSR000EJE HINT lymphoblast_hint_16.minid 24 4.01124 8.85e-05 NA NA NA chr3 lymphoblast_hint_16 MA0685.1 SP4 24194598 Hsapiens MotifDb # Hsapiens-jaspar2016-ZBTB7A-MA0750.1 chr3:128617856-128617867 128617847 128617892 motif.in.footprint Hsapiens-jaspar2016-ZBTB7A-MA0750.1 12 + ENCSR000DCA HINT lymphoblast_hint_16.minid 20 15.76400 2.28e-06 NA NA NA chr3 lymphoblast_hint_16 MA0750.1 ZBTB7A 24194598 Hsapiens MotifDb # # from which we wish to extract: # # chrom start end tf motif # 1 chr3 128483072 128483461 MAZ Hsapiens-HOCOMOCOv10-MAZ_HUMAN.H10MO.A # 2 chr3 128483072 128483461 SP4 Hsapiens-HOCOMOCOv10-SP4_HUMAN.H10MO.D # 3 chr3 128483072 128483461 SP2 Hsapiens-HOCOMOCOv10-SP2_HUMAN.H10MO.C # 4 chr3 128483072 128483461 SP3 Hsapiens-HOCOMOCOv10-SP3_HUMAN.H10MO.B # 5 chr3 128483072 128483461 SP3 Hsapiens-SwissRegulon-SP3.SwissRegulon # 6 chr3 128483072 128483461 SP1 Hsapiens-HOCOMOCOv10-SP1_HUMAN.H10MO.C # # and we want # chrom start end name distance motifName # chr3 128483072 128483461 MAZ Hsapiens-HOCOMOCOv10-MAZ_HUMAN.H10MO.A # # regRegions.names <- unlist(lapply(1:nrow(tbl.reg), function(i){ # distance.from.tss <- tbl.reg$distance.from.tss[i] # region.size <- nchar(tbl.reg$match[i]) # motif.name <- tbl.reg$motifName[i] # if(distance.from.tss < 0) # sprintf("%s.fp.downstream.%05d.L%d.%s", targetGene, abs(distance.from.tss), region.size, motif.name) # else # sprintf("%s.fp.upstream.%05d.L%d.%s", targetGene, abs(distance.from.tss), region.size, motif.name) # })) # # tbl.reg$regionName <- regRegions.names # all.nodes <- unique(c(targetGene, tfs, regRegions.names)) # g <- addNode(all.nodes, g) # # nodeData(g, targetGene, "type") <- "targetGene" # nodeData(g, tfs, "type") <- "TF" # nodeData(g, regRegions.names, "type") <- "regulatoryRegion" # nodeData(g, all.nodes, "label") <- all.nodes # nodeData(g, regRegions.names, "label") <- tbl.reg$motifName # nodeData(g, regRegions.names, "distance") <- tbl.reg$distance # nodeData(g, regRegions.names, "motif") <- tbl.reg$motifName # .standardizeRegulatoryRegionsTable <- function(tbl.reg, targetGene, tss) { locs <- lapply(tbl.reg$loc, parseChromLocString) tbl.rough <- do.call(rbind, lapply(locs, as.data.frame)) tbl.rough$chrom <- as.character(tbl.rough$chrom) tbl.rough <- cbind(tbl.rough, tbl.reg[, c("motifName", "fp_start", "fp_end", "geneSymbol")]) make.name <- function(tss, start, tf){ distance.from.tss <- tss - start sprintf("%s:%d:%s", targetGene, distance.from.tss, tf) } regulatory.region.names <- unlist(lapply(1:nrow(tbl.reg), function(i) make.name(tss, tbl.rough$fp_start[i], tbl.rough$geneSymbol[i]))) tbl.rough$name <- regulatory.region.names tbl.rough$distance <- tss - tbl.rough$fp_start tbl.rough$targetGene <- targetGene tbl.out <- tbl.rough[, c("chrom", "fp_start", "fp_end", "distance", "name", "targetGene", "geneSymbol", "motifName")] colnames(tbl.out) <- c("chrom", "start", "end", "distance", "name", "targetGene", "tf", "motif") rownames(tbl.out) <- NULL tbl.out } # .standardizeRegulatoryRegionsTable #------------------------------------------------------------------------------------------------------------------------ .geneRegulatoryModelToGraph <- function(targetGene, tss, tbl.model, tbl.reg) { xyz <- ".geneRegulatoryModelToGraph" tbl.model <- .standardizeModelTable(tbl.model) tbl.reg <- .standardizeRegulatoryRegionsTable(tbl.reg, targetGene, tss) required.geneModelColumnNames <- c("tf", "pearson", "spearman", "betaLasso", "randomForest") required.regulatoryRegionsColumnNames <- c("chrom", "start", "end", "distance", "name", "targetGene", "tf", "motif") stopifnot(all(required.geneModelColumnNames %in% colnames(tbl.model))) stopifnot(all(required.regulatoryRegionsColumnNames %in% colnames(tbl.reg))) printf("genes: %d, %d occurences of %d motifs", length(tbl.model$tf), length(tbl.reg$motif), length(unique(tbl.reg$motif))) g <- graphNEL(edgemode = "directed") nodeDataDefaults(g, attr = "type") <- "undefined" # targetGene, tf, footprint nodeDataDefaults(g, attr = "label") <- "default node label" nodeDataDefaults(g, attr = "distance") <- 0 nodeDataDefaults(g, attr = "pearson") <- 0 nodeDataDefaults(g, attr = "randomForest") <- 0 nodeDataDefaults(g, attr = "betaLasso") <- 0 nodeDataDefaults(g, attr = "motif") <- "" nodeDataDefaults(g, attr = "xPos") <- 0 nodeDataDefaults(g, attr = "yPos") <- 0 edgeDataDefaults(g, attr = "edgeType") <- "undefined" tfs <- tbl.model$tf all.nodes <- unique(c(targetGene, tfs, tbl.reg$name)) g <- addNode(all.nodes, g) nodeData(g, targetGene, "type") <- "targetGene" nodeData(g, tfs, "type") <- "TF" nodeData(g, tbl.reg$name, "type") <- "regulatoryRegion" nodeData(g, all.nodes, "label") <- all.nodes xyz <- "NetworkView assiging graph node data" nodeData(g, tbl.reg$name, "label") <- tbl.reg$motif nodeData(g, tbl.reg$name, "distance") <- tbl.reg$distance nodeData(g, tbl.reg$name, "motif") <- tbl.reg$motifName nodeData(g, tfs, "pearson") <- tbl.model$pearson nodeData(g, tfs, "betaLasso") <- tbl.model$betaLasso nodeData(g, tfs, "randomForest") <- tbl.model$randomForest g <- addEdge(tbl.reg$tf, tbl.reg$name, g) edgeData(g, tbl.reg$tf, tbl.reg$name, "edgeType") <- "bindsTo" g <- graph::addEdge(tbl.reg$name, targetGene, g) edgeData(g, tbl.reg$name, targetGene, "edgeType") <- "regulatorySiteFor" g } # .geneRegulatoryModelToGraph #------------------------------------------------------------------------------------------------------------------------ .addGeneModelLayout <- function(g, xPos.span=1500) { all.distances <- sort(unique(unlist(nodeData(g, attr='distance'), use.names=FALSE))) print(all.distances) fp.nodes <- nodes(g)[which(unlist(nodeData(g, attr="type"), use.names=FALSE) == "regulatoryRegion")] tf.nodes <- nodes(g)[which(unlist(nodeData(g, attr="type"), use.names=FALSE) == "TF")] targetGene.nodes <- nodes(g)[which(unlist(nodeData(g, attr="type"), use.names=FALSE) == "targetGene")] # add in a zero in case all of the footprints are up or downstream of the 0 coordinate, the TSS span.endpoints <- range(c(0, as.numeric(nodeData(g, fp.nodes, attr="distance")))) span <- max(span.endpoints) - min(span.endpoints) footprintLayoutFactor <- 1 printf("initial: span: %d footprintLayoutFactor: %f", span, footprintLayoutFactor) footprintLayoutFactor <- xPos.span/span #if(span < 600) # # footprintLayoutFactor <- 600/span #if(span > 1000) # footprintLayoutFactor <- span/1000 printf("corrected: span: %d footprintLayoutFactor: %f", span, footprintLayoutFactor) xPos <- as.numeric(nodeData(g, fp.nodes, attr="distance")) * footprintLayoutFactor yPos <- 0 nodeData(g, fp.nodes, "xPos") <- xPos nodeData(g, fp.nodes, "yPos") <- yPos adjusted.span.endpoints <- range(c(0, as.numeric(nodeData(g, fp.nodes, attr="xPos")))) printf("raw span of footprints: %d footprintLayoutFactor: %f new span: %8.0f", span, footprintLayoutFactor, abs(max(adjusted.span.endpoints) - min(adjusted.span.endpoints))) tfs <- names(which(nodeData(g, attr="type") == "TF")) for(tf in tfs){ footprint.neighbors <- edges(g)[[tf]] if(length(footprint.neighbors) > 0){ footprint.positions <- as.integer(nodeData(g, footprint.neighbors, attr="xPos")) new.xPos <- mean(footprint.positions) if(is.na(new.xPos)) browser() if(is.nan(new.xPos)) browser() #printf("%8s: %5d", tf, new.xPos) } else{ new.xPos <- 0 } nodeData(g, tf, "yPos") <- sample(300:1200, 1) nodeData(g, tf, "xPos") <- new.xPos } # for tf nodeData(g, targetGene.nodes, "xPos") <- 0 nodeData(g, targetGene.nodes, "yPos") <- -200 g } # .addGeneModelLayout #------------------------------------------------------------------------------------------------------------------------
f63252f044500e005838ad11d22162144d216d8a
a87349aeb0fe7ec8264f159731c413ab4f9bded5
/project/etl/model.R
f822bf8122ecf566bc3255e81a399104999c3a3f
[]
no_license
ivan-rivera/EndOfMe
08bd5598206624cdf5b0d59eb9caa6e9cdde6f00
fdddd2b4ac8fc9b01a5f5f8497c4e4ed25fa0b57
refs/heads/master
2021-08-28T03:41:12.151848
2021-08-09T18:30:41
2021-08-09T18:30:41
207,624,691
0
0
null
null
null
null
UTF-8
R
false
false
5,013
r
model.R
# ================================ # Modelling # ================================ #' Build and evaluate models #' #' @param sleep_collection list with processed sleep datasets #' @param response_vars vector of strings that currently supports the default values only #' @param recent_data_for_validation boolean, if true then the last N days (determined by the global parameter prop_for_model_validation in settings.R) are used for validation, if false, then validation data is picked randomly out of the entire dataset #' #' @return a list of datasets generate_predictions <- function( sleep_collection, response_vars = c("sleep_rating_next", "time_asleep_next"), recent_data_for_validation = FALSE # if we dont have much data, then recent records might be skewed towards either good or bad nights as there seems to be some autocorrelation in the sleep series ){ # NOTE: right now this function is optimized specifically for 2 response variables # in a sense that it is designed to be used to generate graphics predictor_vars <- sleep_collection[["modelling"]][["data"]] %>% select( -one_of( c( sleep_collection[["modelling"]][["variables"]]$id, sleep_collection[["modelling"]][["variables"]]$exclusions ) ) ) %>% colnames model_results <- list( "performance" = tibble(), "variables" = tibble(), "predictions" = tibble() ) # generate a model for each response variable for(v in response_vars){ print(sprintf("processing response variable %s...", v)) model_fitting_data <- sleep_collection[["modelling"]][["data"]] %>% filter(!is.na(!!rlang::parse_expr(v))) if(recent_data_for_validation){ days_for_model_validation <- floor( n_distinct(sleep_collection[["modelling"]][["data"]]$sleep_date) * prop_for_model_validation ) fitting_collection <- list( "fitting" = model_fitting_data %>% filter(sleep_date < max(sleep_date) - days_for_model_validation), "validation" = model_fitting_data %>% filter(sleep_date >= max(sleep_date) - days_for_model_validation) %>% filter(!is.na(!!rlang::parse_expr(v))) ) } else { staging_dates <- model_fitting_data %>% filter(!is.na(!!rlang::parse_expr(v))) %>% sample_frac(prop_for_model_validation) %>% pull(sleep_date) fitting_collection <- list( "fitting" = model_fitting_data %>% filter(!sleep_date %in% staging_dates), "validation" = model_fitting_data %>% filter(sleep_date %in% staging_dates) ) } model_data <- list( "fitting" = list( "predictors" = fitting_collection[["fitting"]] %>% select(one_of(predictor_vars)), "response" = fitting_collection[["fitting"]] %>% pull(v) ), "validation" = list( "predictors" = fitting_collection[["validation"]] %>% select(one_of(predictor_vars)), "response" = fitting_collection[["validation"]] %>% pull(v) ), "prediction" = list( "predictors" = sleep_collection[["modelling"]][["data"]] %>% filter(is.na(!!rlang::parse_expr(v))) %>% select(one_of(predictor_vars)) ) ) target_model <- caret::train( x=as.data.frame(model_data[["fitting"]][["predictors"]]), y=model_data[["fitting"]][["response"]], method="xgbLinear", metric="RMSE", tuneLength=20, preProcOptions=list(method=c("center", "scale")), trControl=caret::trainControl( method = "boot", number = 10, search = "random", verboseIter = FALSE ) ) validation_results <- tibble( response = v, actual = model_data[["validation"]][["response"]], predicted = predict( target_model, model_data[["validation"]][["predictors"]] ) ) var_importance <- varImp(target_model)$importance %>% as.data.frame() %>% rownames_to_column() %>% rename( "variable" = rowname, "importance" = Overall ) %>% filter(importance > 0) %>% mutate( response = v, importance = importance / 100, variable = gsub("_", " ", variable) # variable = ifelse( # grepl("before", variable), # variable, # paste0(variable, " yesterday") # ) ) prediction_results <- tibble( variable = v, prediction_date = sleep_collection[["modelling"]][["data"]] %>% filter(is.na(!!rlang::parse_expr(v))) %>% pull(sleep_collection[["modelling"]][["variables"]]$id), prediction = predict( target_model, model_data[["prediction"]][["predictors"]] ) ) model_results[["performance"]] %<>% rbind(validation_results) model_results[["variables"]] %<>% rbind(var_importance) model_results[["predictions"]] %<>% rbind(prediction_results) } model_results }
499dca05810e503030cb313c585804b231af3a58
a3b61b2926f9cf93af8fd7f774c2ca1323f56e12
/R_scripts/DE_genes_bar_chart.R
a998774f0723a3081155bb19a89d1f8b750a462f
[]
no_license
kerrimalone/AlvMac
77542b414e655bff2a84fc4e3fc93a753277a0df
dfbb9acffd90c7d3d24615232ba780ab6e0b6bab
refs/heads/master
2021-01-11T13:54:36.028650
2017-06-20T13:58:35
2017-06-20T13:58:35
94,886,756
1
0
null
null
null
null
UTF-8
R
false
false
13,221
r
DE_genes_bar_chart.R
############################### # Load required packages # ############################## library("ggplot2") #http://stackoverflow.com/questions/38268741/geom-bar-ggplot2-stacked-grouped-bar-plot-with-positive-and-negative-values-p ############################### # Read in and manipulate data # ############################## # Set working directory and load any previously saved data setwd("/Users/Kerri/Google Drive/Postdoc UCD /Alv mac work/EdgeR") #Create vectors with desirable variables for graphing Time.vec<-c(rep("02hr",4),rep("06hr",4),rep("24hr",4),rep("48hr",4)) Condition.vec<-c("MB","TB","MB","TB","MB","TB","MB","TB","MB","TB","MB","TB","MB","TB","MB","TB") Variable.vec<-c(rep("Up",2),rep("Down",2),rep("Up",2),rep("Down",2),rep("Up",2),rep("Down",2), rep("Up",2),rep("Down",2)) Variable.condition.vec<-c("MB_up","TB_up","MB_down","TB_down","MB_up","TB_up","MB_down","TB_down", "MB_up","TB_up","MB_down","TB_down","MB_up","TB_up","MB_down","TB_down") #Going to count how many genes are up and down so, #set up empty vector with blank entries to store the gene counts values.vec<-rep("x",16) #Read in DE gene data for each timepoint and treatment and subset #FDR < 0.05 and log2FC > 1 TB_2hr<-read.csv("FDR_0.05_logFC_DE_TB_2H.txt",sep="\t",header=TRUE) head(TB_2hr) dim(TB_2hr) TB_2hr<-na.omit(TB_2hr) #NA rows correspond to ncRNAs and thus do not have gene symbols. Need to be removed for venn. dim(TB_2hr) TB_2hr_logFC<-as.vector(TB_2hr["logFC"]) #counting up and down regged genes based on logFC values #save result in a particular entry in the blank vector values.vec values.vec[2] <-sum(TB_2hr_logFC > 1) values.vec[4] <-sum(TB_2hr_logFC < 1) #Repeat for all timepoints and treatments bovis_2hr<-read.csv("FDR_0.05_logFC_DE_MB_2H.txt",sep="\t",header=TRUE) head(bovis_2hr) dim(bovis_2hr) bovis_2hr<-na.omit(bovis_2hr) dim(bovis_2hr) bovis_2hr_logFC<-as.vector(bovis_2hr["logFC"]) values.vec[1] <-sum(bovis_2hr_logFC > 1) values.vec[3] <-sum(bovis_2hr_logFC < 1) bovis_6hr<-read.csv("FDR_0.05_logFC_DE_MB_6H.txt",sep="\t",header=TRUE) head(bovis_6hr) dim(bovis_6hr) bovis_6hr<-na.omit(bovis_6hr) dim(bovis_6hr) bovis_6hr_logFC<-as.vector(bovis_6hr["logFC"]) values.vec[5] <-sum(bovis_6hr_logFC > 1) values.vec[7] <-sum(bovis_6hr_logFC < 1) TB_6hr<-read.csv("FDR_0.05_logFC_DE_TB_6H.txt",sep="\t",header=TRUE) head(TB_6hr) dim(TB_6hr) TB_6hr<-na.omit(TB_6hr) dim(TB_6hr) TB_6hr_logFC<-as.vector(TB_6hr["logFC"]) values.vec[6] <-sum(TB_6hr_logFC > 1) values.vec[8] <-sum(TB_6hr_logFC < 1) bovis_24hr<-read.csv("FDR_0.05_logFC_DE_MB_24H.txt",sep="\t",header=TRUE) head(bovis_24hr) dim(bovis_24hr) bovis_24hr<-na.omit(bovis_24hr) dim(bovis_24hr) bovis_24hr_logFC<-as.vector(bovis_24hr["logFC"]) values.vec[9] <-sum(bovis_24hr_logFC > 1) values.vec[11] <-sum(bovis_24hr_logFC < 1) TB_24hr<-read.csv("FDR_0.05_logFC_DE_TB_24H.txt",sep="\t",header=TRUE) head(TB_24hr) dim(TB_24hr) TB_24hr<-na.omit(TB_24hr) #NA rows correspond to ncRNAs and thus do not have gene symbols. Need to be removed for venn. rownames(TB_24hr)<-TB_24hr[,1] dim(TB_24hr) TB_24hr_logFC<-as.vector(TB_24hr["logFC"]) values.vec[10] <-sum(TB_24hr_logFC > 1) values.vec[12] <-sum(TB_24hr_logFC < 1) bovis_48hr<-read.csv("FDR_0.05_logFC_DE_MB_48H.txt",sep="\t",header=TRUE) head(bovis_48hr) dim(bovis_48hr) bovis_48hr<-na.omit(bovis_48hr) #NA rows correspond to ncRNAs and thus do not have gene symbols. Need to be removed for venn. dim(bovis_48hr) bovis_48hr_logFC<-as.vector(bovis_48hr["logFC"]) values.vec[13] <-sum(bovis_48hr_logFC > 1) values.vec[15] <-sum(bovis_48hr_logFC < 1) TB_48hr<-read.csv("FDR_0.05_logFC_DE_TB_48H.txt",sep="\t",header=TRUE) head(TB_48hr) dim(TB_48hr) TB_48hr<-na.omit(TB_48hr) #NA rows correspond to ncRNAs and thus do not have gene symbols. Need to be removed for venn. dim(TB_48hr) TB_48hr_logFC<-as.vector(TB_48hr["logFC"]) values.vec[14] <-sum(TB_48hr_logFC > 1) values.vec[16] <-sum(TB_48hr_logFC < 1) values.vec #create a new df to store all of the above info with desired variables bar_data.raw<-data.frame(a=character(),b=character(),c=character(),d=numeric(), e=character()) bar_data<-rbind(bar_data.raw, data.frame(a=Time.vec, b=Condition.vec, c=Variable.vec, d=as.numeric(values.vec), e=Variable.condition.vec)) colnames(bar_data)<-c("Time","Condition","Variable","Value","Variable.condition") #Make custom labels for legend of graph to include both italicised and plain text label_1<-expression(paste(italic("M. bovis")," up")) label_2<-expression(paste(italic("M. tuberculosis")," up")) label_3<-expression(paste(italic("M. bovis")," down")) label_4<-expression(paste(italic("M. tuberculosis")," down")) ######### # Plot # ######### q<-ggplot(bar_data, aes(Time), ylim(-1300:1300)) + geom_bar(data = subset(bar_data, Variable == "Up"), aes(y = Value, fill = Variable.condition), stat = "identity", position = "dodge",colour="black",size=0.4) + scale_fill_manual(values=c("#75a5e5","#323cd3","#f7c0cb","#bc2944","#b5b1b2","#605e5f"), name=" ", breaks=c("MB_up", "TB_up", "MB_down", "TB_down"), #define the #breaks so that you can relabel labels=c(label_1,label_2,label_3,label_4)) + geom_bar(data = subset(bar_data, Variable == "Down"), #colours are bovis up, tb up, bovis down, tb down aes(y = -Value, fill = Variable.condition), stat = "identity", position = "dodge",colour="black",size=0.4) + geom_hline(yintercept = 0,colour = "black") + theme(axis.text.y=element_blank(), axis.ticks.y=element_blank(), legend.text.align = 0) #aligning the legend labels to legend boxes q + geom_text(data = subset(bar_data, Variable == "Up"), aes(Time, Value, group=Condition, label=Value), position = position_dodge(width=0.9), vjust = -0.25, size=4) + geom_text(data = subset(bar_data, Variable == "Down"), aes(Time, -Value, group=Condition, label=Value), position = position_dodge(width=0.9), vjust = 1.25, size=4) + coord_cartesian(ylim = c(-1300, 1300)) + scale_x_discrete(name="Time post-infection", breaks=c("02hr","06hr","24hr","48hr"), labels=c("2hr","6hr","24hr","48hr")) + #getting rid of the 0 in 02hr and 06hr scale_y_continuous("Number of differentially expressed genes") + theme(legend.text=element_text(size=9),legend.key.size=unit(0.4,"cm")) + #changing size of legend theme(axis.title.x=element_text(size=11)) + theme(axis.title.y=element_text(size=11)) + theme(legend.position="bottom", legend.box = "horizontal") + #horizontal legend at bottom of graph theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(), panel.background = element_blank(), axis.line = element_line(colour = "black", size=0.2)) ####################################### # Read in and manipulate data for FDR # ####################################### setwd("/Users/Kerri/Google Drive/Postdoc UCD /Alv mac work/EdgeR") #Create vectors with desirable variables for graphing Time.vec<-c(rep("02hr",4),rep("06hr",4),rep("24hr",4),rep("48hr",4)) Condition.vec<-c("MB","TB","MB","TB","MB","TB","MB","TB","MB","TB","MB","TB","MB","TB","MB","TB") Variable.vec<-c(rep("Up",2),rep("Down",2),rep("Up",2),rep("Down",2),rep("Up",2),rep("Down",2), rep("Up",2),rep("Down",2)) Variable.condition.vec<-c("MB_up","TB_up","MB_down","TB_down","MB_up","TB_up","MB_down","TB_down", "MB_up","TB_up","MB_down","TB_down","MB_up","TB_up","MB_down","TB_down") #Going to count how many genes are up and down so, #set up empty vector with blank entries to store the gene counts values.vec<-rep("x",16) #Read in DE gene data for each timepoint and treatment and subset TB_2hr<-read.csv("FDR_0.05_DE_TB_2H.txt",sep="\t",header=TRUE) head(TB_2hr) dim(TB_2hr) TB_2hr<-na.omit(TB_2hr) #NA rows correspond to ncRNAs and thus do not have gene symbols. Need to be removed for venn. dim(TB_2hr) TB_2hr<-as.vector(TB_2hr["logFC"]) #counting up and down regged genes based on FDR values #save result in a particular entry in the blank vector values.vec values.vec[2] <-sum(TB_2hr > 0) values.vec[4] <-sum(TB_2hr < 0) #Repeat for all timepoints and treatments bovis_2hr<-read.csv("FDR_0.05_DE_MB_2H.txt",sep="\t",header=TRUE) head(bovis_2hr) dim(bovis_2hr) bovis_2hr<-na.omit(bovis_2hr) dim(bovis_2hr) bovis_2hr<-as.vector(bovis_2hr["logFC"]) values.vec[1] <-sum(bovis_2hr > 0) values.vec[3] <-sum(bovis_2hr < 0) bovis_6hr<-read.csv("FDR_0.05_DE_MB_6H.txt",sep="\t",header=TRUE) head(bovis_6hr) dim(bovis_6hr) bovis_6hr<-na.omit(bovis_6hr) dim(bovis_6hr) bovis_6hr<-as.vector(bovis_6hr["logFC"]) values.vec[5] <-sum(bovis_6hr > 0) values.vec[7] <-sum(bovis_6hr < 0) TB_6hr<-read.csv("FDR_0.05_DE_TB_6H.txt",sep="\t",header=TRUE) head(TB_6hr) dim(TB_6hr) TB_6hr<-na.omit(TB_6hr) dim(TB_6hr) TB_6hr<-as.vector(TB_6hr["logFC"]) values.vec[6] <-sum(TB_6hr > 0) values.vec[8] <-sum(TB_6hr < 0) bovis_24hr<-read.csv("FDR_0.05_DE_MB_24H.txt",sep="\t",header=TRUE) head(bovis_24hr) dim(bovis_24hr) bovis_24hr<-na.omit(bovis_24hr) dim(bovis_24hr) bovis_24hr<-as.vector(bovis_24hr["logFC"]) values.vec[9] <-sum(bovis_24hr > 0) values.vec[11] <-sum(bovis_24hr < 0) TB_24hr<-read.csv("FDR_0.05_DE_TB_24H.txt",sep="\t",header=TRUE) head(TB_24hr) dim(TB_24hr) TB_24hr<-na.omit(TB_24hr) #NA rows correspond to ncRNAs and thus do not have gene symbols. Need to be removed for venn. rownames(TB_24hr)<-TB_24hr[,1] dim(TB_24hr) TB_24hr<-as.vector(TB_24hr["logFC"]) values.vec[10] <-sum(TB_24hr > 0) values.vec[12] <-sum(TB_24hr < 0) bovis_48hr<-read.csv("FDR_0.05_DE_MB_48H.txt",sep="\t",header=TRUE) head(bovis_48hr) dim(bovis_48hr) bovis_48hr<-na.omit(bovis_48hr) #NA rows correspond to ncRNAs and thus do not have gene symbols. Need to be removed for venn. dim(bovis_48hr) bovis_48hr<-as.vector(bovis_48hr["logFC"]) values.vec[13] <-sum(bovis_48hr > 0) values.vec[15] <-sum(bovis_48hr < 0) TB_48hr<-read.csv("FDR_0.05_DE_TB_48H.txt",sep="\t",header=TRUE) head(TB_48hr) dim(TB_48hr) TB_48hr<-na.omit(TB_48hr) #NA rows correspond to ncRNAs and thus do not have gene symbols. Need to be removed for venn. dim(TB_48hr) TB_48hr<-as.vector(TB_48hr["logFC"]) values.vec[14] <-sum(TB_48hr > 0) values.vec[16] <-sum(TB_48hr < 0) values.vec #create a new df to store all of the above info with desired variables bar_data.raw<-data.frame(a=character(),b=character(),c=character(),d=numeric(), e=character()) bar_data<-rbind(bar_data.raw, data.frame(a=Time.vec, b=Condition.vec, c=Variable.vec, d=as.numeric(values.vec), e=Variable.condition.vec)) colnames(bar_data)<-c("Time","Condition","Variable","Value","Variable.condition") ######### # Plot # ######### #Make custom labels for legend of graph to include both italicised and plain text label_1<-expression(paste(italic("M. bovis")," up")) label_2<-expression(paste(italic("M. tuberculosis")," up")) label_3<-expression(paste(italic("M. bovis")," down")) label_4<-expression(paste(italic("M. tuberculosis")," down")) q<-ggplot(bar_data, aes(Time), ylim(-4000:4000)) + geom_bar(data = subset(bar_data, Variable == "Up"), aes(y = Value, fill = Variable.condition), stat = "identity", position = "dodge",colour="black",size=0.4) + scale_fill_manual(values=c("#75a5e5","#323cd3","#f7c0cb","#bc2944","#b5b1b2","#605e5f"), name=" ", breaks=c("MB_up", "TB_up", "MB_down", "TB_down"), #define the #breaks so that you can relabel labels=c(label_1,label_2,label_3,label_4)) + geom_bar(data = subset(bar_data, Variable == "Down"), #colours are bovis up, tb up, bovis down, tb down aes(y = -Value, fill = Variable.condition), stat = "identity", position = "dodge",colour="black",size=0.4) + geom_hline(yintercept = 0,colour = "black") + theme(axis.text.y=element_blank(), axis.ticks.y=element_blank(), legend.text.align = 0) #aligning the legend labels to legend boxes q + geom_text(data = subset(bar_data, Variable == "Up"), aes(Time, Value, group=Condition, label=Value), position = position_dodge(width=0.9), vjust = -0.25, size=4) + geom_text(data = subset(bar_data, Variable == "Down"), aes(Time, -Value, group=Condition, label=Value), position = position_dodge(width=0.9), vjust = 1.25, size=4) + coord_cartesian(ylim = c(-4000, 4000)) + scale_x_discrete(name="Time post-infection", breaks=c("02hr","06hr","24hr","48hr"), labels=c("2hr","6hr","24hr","48hr")) + #getting rid of the 0 in 02hr and 06hr scale_y_continuous("Number of differentially expressed genes") + theme(legend.text=element_text(size=9),legend.key.size=unit(0.4,"cm")) + #changing size of legend theme(axis.title.x=element_text(size=11)) + theme(axis.title.y=element_text(size=11)) + theme(legend.position="bottom", legend.box = "horizontal") + #horizontal legend at bottom of graph theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(), panel.background = element_blank(), axis.line = element_line(colour = "black", size=0.2))
f9da7f61beb6afc5eaceab48bcbd69d28559b81d
efcf5fd4b6137da8e32d69c73de74d806d5db630
/R/getAuthenticationKey.R
c3e64dbf2a43565a8c8959b8d3a8751824fc4201
[]
no_license
bestdan/telegramr
49c785a3fbb8457b90c253ac15ea7d538b9357f6
dcf617e29f99b87de2b075111daf2a4e06099fb9
refs/heads/master
2021-08-19T17:30:10.508546
2017-11-27T03:03:14
2017-11-27T03:04:19
112,140,698
0
0
null
null
null
null
UTF-8
R
false
false
476
r
getAuthenticationKey.R
#' @name getAuthenticationKey #' @title getAuthenticationKey #' @description Get a local authentication key for interacting with Telegram #' @param bot The yaml block which corresponds to your key. #' @importFrom yaml yaml.load_file #' @examples #' \dontrun{ # res <- getAuthenticationKey(bot="life_tasker") #' } getAuthenticationKey <- function(file_path = "~/src/telegram_credentials.yaml", bot){ token <- yaml::yaml.load_file(file_path)[[bot]]$token return(token) }
3eab1135b9dbeed95f24f7d4fc38fee05fd11f32
29585dff702209dd446c0ab52ceea046c58e384e
/cocorresp/R/coca.formula.R
01ade955390c3dc7b5a6cd01054346a0f969b690
[]
no_license
ingted/R-Examples
825440ce468ce608c4d73e2af4c0a0213b81c0fe
d0917dbaf698cb8bc0789db0c3ab07453016eab9
refs/heads/master
2020-04-14T12:29:22.336088
2016-07-21T14:01:14
2016-07-21T14:01:14
null
0
0
null
null
null
null
UTF-8
R
false
false
2,717
r
coca.formula.R
"coca.formula" <- function(formula, data, method = c("predictive", "symmetric"), reg.method = c("simpls", "eigen"), weights = NULL, n.axes = NULL, symmetric = FALSE, ...) { parseFormula <- function (formula, data) { Terms <- terms(formula, "Condition", data = data) flapart <- fla <- formula <- formula(Terms, width.cutoff = 500) specdata <- formula[[2]] Yresponse <- as.matrix(eval(specdata, data, parent.frame())) formula[[2]] <- NULL Ypredictors <- eval(formula[[2]], data, parent.frame()) #if(class(Ypredictors) == "data.frame") # { # return(list(Yresponse = Yresponse, Ypredictors = Ypredictors)) # } else { if (formula[[2]] == "1" || formula[[2]] == "0") Ypredictors <- NULL else { mf <- model.frame(formula, data, na.action = na.fail) Ypredictors <- model.matrix(formula, mf) if (any(colnames(Ypredictors) == "(Intercept)")) { xint <- which(colnames(Ypredictors) == "(Intercept)") Ypredictors <- Ypredictors[, -xint, drop = FALSE] } } #} list(Yresponse = Yresponse, Ypredictors = Ypredictors) } if (missing(data)) data <- parent.frame() dat <- parseFormula(formula, data) x <- dat$Ypredictors y <- dat$Yresponse nam.dat <- list(namY = deparse(formula[[2]], width.cutoff = 500), namX = deparse(formula[[3]], width.cutoff = 500)) if (nam.dat$namX == ".") nam.dat$namX <- deparse(substitute(data)) if(any(rowSums(y) <= 0 )) stop("all row sums must be >0 in data matrix y") if(any((csum <- colSums(y)) <= 0 )) { y <- y[, csum > 0, drop = FALSE] message("some species contain no data and were removed from data matrix y\n") } if(any(rowSums(x) <= 0 )) stop("all row sums must be >0 in data matrix x") if(any((csum <- colSums(x)) <= 0 )) { x <- x[, csum > 0, drop = FALSE] message("some species contain no data and were removed from data matrix x\n") } method <- match.arg(method) if(method == "predictive") { reg.method <- match.arg(reg.method) retval <- switch(reg.method, simpls = predcoca.simpls(y, x, R0 = weights, n.axes = n.axes, nam.dat), eigen = predcoca.eigen(y, x, R0 = weights, n.axes = n.axes, nam.dat)) } else { retval <- symcoca(y, x, n.axes = n.axes, R0 = weights, symmetric = symmetric, nam.dat) } retval }
d7e9097491cb2cd44e26f6a354d1b6b748809cb5
910060d06b6c929bd49be80e2d2044e4e691fc0a
/R/R Scripts/Bioestadistica-R_03_Tratamiento_de_datos_en_R.r
e278d6b584bafa2ec71044f1c346d7de724c9338
[]
no_license
Rodrigo-MP/Masterbioinfo
701b28e7cdbf4f28ce6b2d7d6d3aa6ca7433f8ff
272d9a99b4e7dd4b14241e94c4bee7f9ccdbd985
refs/heads/master
2020-08-31T01:17:21.541780
2019-11-19T19:13:14
2019-11-19T19:13:14
218,543,989
0
0
null
null
null
null
ISO-8859-1
R
false
false
4,784
r
Bioestadistica-R_03_Tratamiento_de_datos_en_R.r
################################################################################ ################################################################################ ## CURSO: Bioestadística con R - Máster de Bioinformática ## ## Autor: Jesús Herranz ## ## Sesión 03: Tratamiento de datos con R ## ################################################################################ ################################################################################ ################################################################################ ## Data frames ################################################################################ df <- data.frame( ID=c(1,3,4,5,8,9,10,11), edad=c(34,46,23,19,23,11,14,34), sexo=c("H","M","M","M","H","M","H","M") ) df df$edad mean(df$edad) df[ , c("edad","sexo")] df[ , 2:3] names(df) df$tratamiento <- c(0,0,0,1,1,0,1,0) df ## Funciones con data frames dim(df) head(df) tail(df) names(df) names(df)[2] ## Orden merge df1 <- data.frame( ID =c(1,2,3,4), edad=c(12,34,44,54) ) df2 <- data.frame( ID2=c(1,2,3,4), hta=c(0,1,0,1) ) df3 <- merge(df1, df2, by.x="ID", by.y="ID2" ) df3 df2 <- data.frame( ID2=c(1,2,4,5), hta=c(0,1,0,1) ) df3 <- merge(df1, df2, by.x="ID", by.y="ID2" ) df3 df3 <- merge(df1, df2, by.x="ID", by.y="ID2", all=T ) df3 df3 <- merge(df1, df2, by.x="ID", by.y="ID2", all.x=T ) df3 df3 <- merge(df1, df2, by.x="ID", by.y="ID2", all.y=T ) df3 ## Uniendo filas con rbind df1 <- data.frame( ID =c(1,2,3,4), edad=c(12,34,44,54) ) df2 <- data.frame( ID =c(5,6), edad=c(42,28) ) df3 <- rbind(df1, df2) df3 ################################################################################ ## Factores ################################################################################ df <- data.frame( ID=c(1,3,4,5,8,9,10,11), edad=c(34,46,23,19,23,11,14,34), sexo=c("H","M","M","M","H","M","H","M") ) df$tratamiento <- c(0,0,0,1,1,0,1,0) is.factor(df$edad) is.factor(df$sexo) is.factor(df$tratamiento) df$sexo df$tratamiento levels(df$tratamiento) df$tratamiento <- as.factor(df$tratamiento) df$tratamiento levels(df$tratamiento) ######################### ## Algunos problemas con Factores x <- c(0,0,0,0,0,1,1,1,1,1,1,1,1,1,2,2,2,2) x<-factor(x) table(x) levels(x) ## Asigna valores 1,2,3 .... a las categorías ordenadas as.integer(x) x.num <- as.integer( as.character(x) ) ## Truco x.num ## Guarda los niveles siempre x [ x == 2 ] <- 1 x table(x) x = as.factor ( as.character( x )) x ################################################################################ ## Lectura de ficheros ################################################################################ f1 <- read.table(file="C://Bioestadistica con R/Ficheros para importar/Ejemplo 1.txt", header=T) dim(f1) head(f1) f2 <- read.csv(file="C://Bioestadistica con R/Datos/Bajo peso al nacer.csv", sep=";") dim(f2) head(f2) ################################################################################ ## Importación de ficheros de otros paquetes estadísticos ################################################################################ library(foreign) library(xlsx) ####################### ## SPSS f3<-read.spss("C://Bioestadistica con R/Ficheros para importar/Ejemplo 3.sav", to.data.frame=TRUE) head(f3) ####################### ## STATA f4<-read.dta("C://Bioestadistica con R/Ficheros para importar/Ejemplo 4.dta") head(f4) ####################### ## EXCEL - 2 hojas f5<-read.xlsx ( "C://Bioestadistica con R/Ficheros para importar/Ejemplo 5.xlsx", sheetIndex=1 ) head(f5) f6<-read.xlsx ( "C://Bioestadistica con R/Ficheros para importar/Ejemplo 5.xlsx", sheetIndex=2 ) head(f6) ################################################################################ ## Ficheros de salida ################################################################################ ## Salvamos a fichero de texto un dataframe write.table ( f4, "C://Bioestadistica con R/Temp/Ejemplo 4.txt", quote=FALSE , sep="\t", col.names=TRUE, row.names=FALSE) ## Se crea un fichero de salida FileOut <- file("C://Bioestadistica con R/Temp/Resultados 4.csv", "w") cat ( "Variable;Media;SD;Median", file=FileOut, sep="\n") cat ( paste ("Edad", mean(f4$edad, na.rm=T), sd(f4$edad, na.rm=T), median(f4$edad, na.rm=T), sep=";") , file=FileOut, sep="\n") close(FileOut)
589daf830190a5543fa76a51301ecd6e7805bd09
54b4976030ae6a42e10282c8f41609ef266721c9
/R/ecd-package.R
0f11f539b91b4b3f44e8cfea0b21b10296083ee1
[]
no_license
cran/ecd
b1be437b407e20c34d65bcf7dbee467a9556b4c1
18f3650d6dff442ee46ed7fed108f35c4a4199b9
refs/heads/master
2022-05-18T20:24:56.375378
2022-05-09T20:10:02
2022-05-09T20:10:02
48,670,406
0
0
null
null
null
null
UTF-8
R
false
false
1,516
r
ecd-package.R
#' ecd: A package for the stable lambda distribution family. #' #' The ecd package provides the core classes and functions for the stable lambda distribution family. #' The stable lambda distribution is implemented in \code{\link{dsl}} section. #' The lambda distribution uses the \code{ecld} namespace. SGED is considered part of ecld. #' (See \code{\link{ecld-class}} for definition.) #' The original elliptic lambda distribution uses the generic methods or \code{ecd} namespace. #' (See \code{\link{ecd-class}} for definition.) #' The option pricing API uses the \code{ecop} namespace. #' (See \code{\link{ecop-class}} for definition.) #' Most helper utilities are named under either \code{ecd} or \code{ecld}. #' #' @author Stephen H-T. Lihn #' #' @docType package #' @name ecd-package #' @import xts methods polynom graphics moments stabledist parallel yaml RSQLite #' #' @seealso The two main classes are \code{\link{ecd-class}} and \code{\link{ecld-class}} #' NULL # Some areas of this package require multi-core capability cores <- switch( Sys.info()[['sysname']], Windows = 1, Linux = parallel::detectCores(), Darwin = parallel::detectCores(), parallel::detectCores() ) if (is.null(getOption("mc.cores"))) { options("mc.cores"=cores) } # MPFR default settings if (is.null(getOption("ecd.precBits"))) { options("ecd.precBits"=120L) } # MPFR default Inf conversion, number of sigma as replacement for +/- Inf # for integrateR and imgf .ecd.mpfr.N.sigma <- 300 # end
359bc16c0415e60950691c7072c0b49c4e6841c6
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
/data/genthat_extracted_code/provenance/examples/minsorting.Rd.R
b556e77007bc20e588046873e54807f80f492f0c
[]
no_license
surayaaramli/typeRrh
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
66e6996f31961bc8b9aafe1a6a6098327b66bf71
refs/heads/master
2023-05-05T04:05:31.617869
2019-04-25T22:10:06
2019-04-25T22:10:06
null
0
0
null
null
null
null
UTF-8
R
false
false
351
r
minsorting.Rd.R
library(provenance) ### Name: minsorting ### Title: Assess settling equivalence of detrital components ### Aliases: minsorting ### ** Examples data(endmembers,densities) distribution <- minsorting(endmembers,densities,sname='ophiolite',phi=2, sigmaphi=1,medium="seawater",by=0.05) plot(distribution,cumulative=FALSE)
ac012aed3fd295150526110c53e2f6c3d246c68c
cbf60188ccba4933635055f23c97af3f409e42aa
/Danica_Ex04_01.R
df2c1f2926e6768975f73e96ca6fff72278ec772
[]
no_license
dshipley2/Learning_R
3890d4e09718b8a1ec8b7f168359c413135459b3
b7ed062a7be68242a621f8858b503be5e6905d89
refs/heads/master
2020-04-28T06:53:47.198218
2019-03-12T22:08:34
2019-03-12T22:08:34
175,074,072
0
0
null
null
null
null
UTF-8
R
false
false
1,651
r
Danica_Ex04_01.R
# Up and running with R # Ex04_01 # Recoding Variables # Use dataset "social_network.csv" which records the # gender and age of 202 online survey respondents # along with their preferred social networking sites # and an estimate of how many times they log in per week # Create data frame "sn" from CSV file w/ headers sn <- read.csv("C:/Users/Danica_Shipley/Desktop/social_network.csv", header = T) # Install and load "psch" package install.packages("psych") library("psych") # Original Variable Times hist(sn$Times) describe(sn$Times) # Normal skewness is 0, so above 10 is really high. Normal kurtosis is 0, so 120 is really high # z-scores # Use built-in function "scale" times.z <- scale(sn$Times) hist(times.z) describe(times.z) # log - When you have outliers on the high side, taking the log can help times.ln0 <- log(sn$Times) hist(times.ln0) describe(times.ln0) # produces weird results for this data set because there are 0's in the data set # Add 1 to data set to avoid the undefined logs for 0 tims times.ln1 <- log(sn$Times + 1) hist(times.ln1) describe(times.ln1) # Ranking (forces a nearly uniformed distribution by assigning ordinal variables) times.rank <- rank(sn$Times) hist(times.rank) describe(times.rank) # ties.method = c(average, first, random, max, min) times.rankr <- rank(sn$Times, ties.method = "random") # flatens out the distribution hist(times.rankr) describe(times.rankr) # Dicotomizing (use carefully, because you lose information in the process) times.gt1 <- ifelse(sn$Times > 1, 1, 0) # Dichotimized based on if they logged in more than 1 times a week times.gt1
9852eb9bebcb63bb3a54b337c812fb026605a27e
72d9009d19e92b721d5cc0e8f8045e1145921130
/heuristicsmineR/man/print.dependency_matrix.Rd
157a3f1ba47e052fe087743ed191b1e836198ccf
[]
no_license
akhikolla/TestedPackages-NoIssues
be46c49c0836b3f0cf60e247087089868adf7a62
eb8d498cc132def615c090941bc172e17fdce267
refs/heads/master
2023-03-01T09:10:17.227119
2021-01-25T19:44:44
2021-01-25T19:44:44
332,027,727
1
0
null
null
null
null
UTF-8
R
false
true
436
rd
print.dependency_matrix.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/print.dependency_matrix.R \name{print.dependency_matrix} \alias{print.dependency_matrix} \title{Generic print function for a dependency matrix} \usage{ \method{print}{dependency_matrix}(x, ...) } \arguments{ \item{x}{dependency matrix object} \item{...}{Additional Arguments} } \description{ Generic print function for a dependency matrix }
69aac8e8f87e603c18b15bfb179080fe958176fc
6a4f552946002eb86443f39bd9887c91f1608c1f
/R/ageAccel.R
59861569e3af40e9cab1345dfe68f03f586caffb
[]
no_license
RichardJActon/DNAmAgeMini
20f01399205d4dc94283583d1eee61a4c21dd610
0a443edece74cdd68a95442b910d2d2222115a43
refs/heads/master
2020-04-21T14:09:45.905735
2019-02-08T16:41:30
2019-02-08T16:41:30
169,625,124
1
2
null
null
null
null
UTF-8
R
false
false
270
r
ageAccel.R
# age acceleration #' ageAccel #' @export ageAccel <- function(pred,chron) { if (!(is.numeric(pred) & is.vector(pred))){ warning("pred is not a numeric vector")} if (!(is.numeric(chron) & is.vector(chron))){ warning("chron is not a numeric vector")} pred - chron }
40ced25eb6aa148466cb0ef40d34b2ab6510677c
aae743a14d850d2eb3daa1bf573ff082af5aa5cc
/man-roxygen/RDclass.R
816f0e433acb2d7c66ba50b6a7d42758cf591b7b
[]
no_license
mdroste/RDHonest
5d8f8890d7195db7044c91d3d5d37742c4b7a084
c471e3cfa1533807dc988e15bbecaa83d85846a4
refs/heads/master
2020-06-15T13:28:13.308470
2019-06-24T17:50:39
2019-06-24T17:50:39
195,312,954
1
0
null
2019-07-05T00:05:11
2019-07-05T00:05:10
null
UTF-8
R
false
false
177
r
RDclass.R
#' @param M Bound on second derivative of the conditional mean function. #' @param sclass Smoothness class, either \code{"T"} for Taylor or #' \code{"H"} for Hölder class.
208d10ab6f11106b18ed87bb28af8501bc6deea1
a5dc2f5e2cb3ecd3ab28fac55d6c11d13064e5f7
/R-package/quantmod/man/refit_quantile_genlasso.Rd
794a845a9aca1090ce440cb6c03c03e864a06f0d
[]
no_license
elray1/quantmod
b1fdb1f8cac954a0e3309aee5d2271b62ade1761
e6d3dff57ae7564e0d8ff98cf0654332d9b6794f
refs/heads/master
2022-10-10T08:28:50.427163
2020-06-11T14:58:33
2020-06-11T14:58:33
null
0
0
null
null
null
null
UTF-8
R
false
true
2,304
rd
refit_quantile_genlasso.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/cv_quantile_genlasso.R \name{refit_quantile_genlasso} \alias{refit_quantile_genlasso} \title{Refit function for cv_quantile_genlasso object} \usage{ refit_quantile_genlasso(obj, x, y, d, tau_new = c(0.01, 0.025, seq(0.05, 0.95, by = 0.05), 0.975, 0.99), weights = NULL, no_pen_rows = NULL, intercept = TRUE, standardize = TRUE, noncross = FALSE, x0 = NULL, lp_solver = NULL, time_limit = NULL, warm_starts = NULL, params = NULL, transform = NULL, inv_trans = NULL, jitter = NULL, verbose = FALSE) } \arguments{ \item{obj}{The \code{cv_quantile_genlasso} object to start from.} \item{x}{Matrix of predictors.} \item{y}{Vector of responses.} \item{d}{Matrix defining the generalized lasso penalty.} \item{tau_new}{Vector of new quantile levels at which to fit new solutions. Default is a sequence of 23 quantile levels from 0.01 to 0.99.} \item{noncross}{Should noncrossing constraints be applied? These force the estimated quantiles to be properly ordered across all quantile levels being considered. The default is FALSE. If TRUE, then noncrossing constraints are applied to the estimated quantiles at all points specified by the next argument \code{x0}.} \item{x0}{Matrix of points used to define the noncrossing constraints. Default is NULL, which means that we consider noncrossing constraints at the training points \code{x}.} \item{verbose}{Should progress be printed out to the console? Default is FALSE.} } \value{ A \code{quantile_genlasso} object, with solutions at quantile levels \code{tau_new}. } \description{ Refit generalized lasso solutions at a new set of quantile levels, given an existing \code{cv_quantile_genlasso} object. } \details{ This function simply infers, for each quantile level in \code{tau_new}, a (very) roughly-CV-optimal tuning parameter value, then calls \code{quantile_genlasso} at the new quantile levels and corresponding tuning parameter values. If not specified, the arguments \code{weights}, \code{no_pen_rows}, \code{intercept}, \code{standardize}, \code{lp_solver}, \code{time_limit}, \code{warm_starts}, \code{params}, \code{transform}, \code{inv_transorm}, \code{jitter} are all inherited from the given \code{cv_quantile_genlasso} object. }
f670371efc1cdbc5b184c103bb492180c711fde6
92e597e4ffc9b52cfb6b512734fb10c255543d26
/man/hsDelEmptyCols.Rd
09403276a8bf50cf00a92e9557e176122ee03b08
[ "MIT" ]
permissive
KWB-R/kwb.utils
3b978dba2a86a01d3c11fee1fbcb965dd15a710d
0930eaeb9303cd9359892c1403226a73060eed5b
refs/heads/master
2023-05-12T15:26:14.529039
2023-04-21T04:28:29
2023-04-21T04:28:29
60,531,844
9
1
MIT
2023-04-21T04:28:30
2016-06-06T13:52:43
R
UTF-8
R
false
true
883
rd
hsDelEmptyCols.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/column.R \name{hsDelEmptyCols} \alias{hsDelEmptyCols} \title{Delete empty Columns of Data Frame} \usage{ hsDelEmptyCols(dataFrame, FUN = function(x) all(is.na(x)), drop = FALSE) } \arguments{ \item{dataFrame}{data frame of which empty columns (NA in all rows) are to be removed} \item{FUN}{function to be applied to each column to decide whether the column is empty or not. Default: \code{function(x) all(is.na(x))}} \item{drop}{if \code{TRUE} (the default is \code{FALSE}) one dimension is dropped (a vector is returned instead of a data frame) in case that all but one columns are removed.} } \value{ copy of input data frame but with all empty columns removed } \description{ Returns data frame in which all empty columns (NA in all rows) are removed } \seealso{ \code{\link{removeEmptyColumns}} }