blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1e117f645dcf4f9da084b8b8cd497060a7e18f9e
|
bad132f51935944a52a00e20e90395990afd378a
|
/R/ISODistributor.R
|
2f77cf8820e4dc3e929905e4376052d67d33a1d4
|
[] |
no_license
|
cran/geometa
|
9612ad75b72956cfd4225b764ed8f048804deff1
|
b87c8291df8ddd6d526aa27d78211e1b8bd0bb9f
|
refs/heads/master
| 2022-11-10T21:10:25.899335
| 2022-10-27T22:45:13
| 2022-10-27T22:45:13
| 92,486,874
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,328
|
r
|
ISODistributor.R
|
#' ISODistributor
#'
#' @docType class
#' @importFrom R6 R6Class
#' @export
#' @keywords ISO distributor
#' @return Object of \code{\link{R6Class}} for modelling an ISODistributor
#' @format \code{\link{R6Class}} object.
#'
#' @examples
#' md <- ISODistributor$new()
#' rp <- ISOResponsibleParty$new()
#' rp$setIndividualName("someone")
#' rp$setOrganisationName("somewhere")
#' rp$setPositionName("Data manager")
#'
#' contact <- ISOContact$new()
#' phone <- ISOTelephone$new()
#' phone$setVoice("myphonenumber")
#' phone$setFacsimile("myfacsimile")
#' contact$setPhone(phone)
#' address <- ISOAddress$new()
#' address$setDeliveryPoint("theaddress")
#' address$setCity("thecity")
#' address$setPostalCode("111")
#' address$setCountry("France")
#' address$setEmail("someone@@theorg.org")
#' contact$setAddress(address)
#' res <- ISOOnlineResource$new()
#' res$setLinkage("http://www.somewhereovertheweb.org")
#' res$setName("somename")
#' contact$setOnlineResource(res)
#' rp$setContactInfo(contact)
#' rp$setRole("author")
#' md$setContact(rp)
#'
#' format <- ISOFormat$new()
#' format$setName("name")
#' format$setVersion("1.0")
#' format$setAmendmentNumber("2")
#' format$setSpecification("specification")
#' md$addFormat(format)
#'
#' xml <- md$encode()
#'
#' @references
#' ISO 19115:2003 - Geographic information -- Metadata
#'
#' @author Emmanuel Blondel <emmanuel.blondel1@@gmail.com>
#'
ISODistributor <- R6Class("ISODistributor",
inherit = ISOAbstractObject,
private = list(
xmlElement = "MD_Distributor",
xmlNamespacePrefix = "GMD"
),
public = list(
#'@field distributorContact distributorContact : ISOResponsibleParty
distributorContact = NULL,
#'@field distributorFormat distributorFormat : ISOFormat
distributorFormat = list(),
#'@description Initializes object
#'@param xml object of class \link{XMLInternalNode-class}
initialize = function(xml = NULL){
super$initialize(xml = xml)
},
#'@description Set contact
#'@param contact object of class \link{ISOResponsibleParty}
setContact = function(contact){
if(!is(contact, "ISOResponsibleParty")){
stop("The argument value should an object of class 'ISOResponsibleParty")
}
self$distributorContact = contact
},
#'@description Adds format
#'@param format format object of class \link{ISOFormat}
#'@return \code{TRUE} if added, \code{FALSE} otherwise
addFormat = function(format){
if(!is(format, "ISOFormat")){
stop("The argument value should an object of class 'ISOFormat")
}
return(self$addListElement("distributorFormat", format))
},
#'@description Deletes format
#'@param format format object of class \link{ISOFormat}
#'@return \code{TRUE} if deleted, \code{FALSE} otherwise
delFormat = function(format){
if(!is(format, "ISOFormat")){
stop("The argument value should an object of class 'ISOFormat")
}
return(self$delListElement("distributorFormat", format))
}
)
)
|
e3ed66211465e0c56a22142e985bad610b9b4046
|
d53ad1327c7481e52f9cfc4644b836a6754f89a0
|
/man/dist_extract.Rd
|
c6c42f68723e9c9a74a37cd52cac1889119dc2ef
|
[] |
no_license
|
talegari/forager
|
5aa152f65c4596c7d1161694a8aab40225950973
|
f3963444886afac85d252d6c0b5455426361a7f3
|
refs/heads/master
| 2020-03-19T20:40:45.641914
| 2019-03-09T19:25:26
| 2019-03-09T19:25:26
| 136,911,591
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,000
|
rd
|
dist_extract.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dist_extract.R
\name{dist_extract}
\alias{dist_extract}
\title{Extract distances between specified indexes from a 'dist' object}
\usage{
dist_extract(object, i, j, product = "inner")
}
\arguments{
\item{object}{distance object}
\item{i}{index}
\item{j}{index. If missing, this defaults to 1:size of the dist object}
\item{product}{(string) product type. One among: 'inner', 'outer'.}
}
\value{
A vector of distances when 'inner' is used. A matrix of distances
when 'outer' is used.
}
\description{
Extract distances as a vector when product is inner and as a
matrix when the product is outer
}
\details{
When j is missing, it defaults to 1:size of the dist object. When
the product is inner and lengths of i and j mismatch, the smaller one is
extended to the size of the onger one.
}
\examples{
temp <- stats::dist(datasets::mtcars)
dist_extract(temp, 1:2, 3:5, "inner")
dist_extract(temp, 1:2, 3:5, "outer")
}
|
c65cd649129a55ad38e1f8530385457f71f71658
|
7ebb7e41627e33a28534308c24e654cafeb5e787
|
/Generalized Linear Models - 411/Bonus Assignments/Insurance/Insurance.R
|
477b27447580cc4aa9188ad3681c72c21ab56e98
|
[] |
no_license
|
tamtwill/NU_MSDS_Courses
|
9ab150484d8e6eb2b58e4f7a8b9c16b6db3175d6
|
141b34d002e4fde6d15e762a35d4435d997b960a
|
refs/heads/master
| 2021-06-06T07:08:05.950402
| 2020-04-21T22:40:21
| 2020-04-21T22:40:21
| 140,891,149
| 0
| 0
| null | 2018-07-13T21:42:05
| 2018-07-13T20:41:03
|
HTML
|
UTF-8
|
R
| false
| false
| 2,552
|
r
|
Insurance.R
|
# Tamara Williams extra credit, Insurance
# include required packages
#---------------------------
library(readr)
library(pbkrtest)
library(car)
library(leaps)
library(MASS)
library(data.table)
library(ggplot2)
library(reshape2)
#####
# Set working directory
#####
setwd("~/NorthwesternU_MSPA/Classes/Generalized Linear Models - 411/Bonus Assignments/Insurance")
df=read.csv("insurance.csv",header=T, stringsAsFactors = FALSE)
summary(df)
#fix missing values
df$AGE[is.na(df$AGE)] = mean(df$AGE, na.rm = TRUE)
df$YOJ[is.na(df$YOJ)] = mean(df$YOJ, na.rm = TRUE)
df$CAR_AGE[is.na(df$CAR_AGE)] = mean(df$CAR_AGE, na.rm = TRUE)
df$SEX<- as.numeric(factor(df$SEX))
df$REVOKED <- as.numeric(factor(df$REVOKED))
df$RED_CAR <- as.numeric(factor(df$RED_CAR))
df$PARENT1 <- as.numeric(factor(df$PARENT1))
df$EDUCATION <- as.numeric(factor(df$EDUCATION))
df$JOB <- as.numeric(factor(df$JOB))
df$CAR_USE <- as.numeric(factor(df$CAR_USE))
df$CAR_TYPE <- as.numeric(factor(df$CAR_TYPE))
df$MSTATUS <- as.numeric(factor(df$MSTATUS))
df$URBANICITY <- as.numeric(factor(df$URBANICITY))
df$HOME_VAL <- as.numeric(gsub('[$,]', '', df$HOME_VAL))
df$BLUEBOOK <- as.numeric(gsub('[$,]', '', df$BLUEBOOK))
df$OLDCLAIM <- as.numeric(gsub('[$,]', '', df$OLDCLAIM))
df$INCOME <- as.numeric(gsub('[$,]', '', df$INCOME))
train <- as.data.frame(df)
target <- train$TARGET
train <- train[-2]
cormat <- round(cor(df),2)
melted_cormat <- melt(cormat)
# Get lower triangle of the correlation matrix
get_lower_tri<-function(cormat){
cormat[upper.tri(cormat)] <- NA
return(cormat)
}
# Get upper triangle of the correlation matrix
get_upper_tri <- function(cormat){
cormat[lower.tri(cormat)]<- NA
return(cormat)
}
melted_cormat <- melt(upper_tri, na.rm = TRUE)
# Heatmap
ggplot(data = melted_cormat, aes(Var2, Var1, fill = value))+
geom_tile(color = "white")+
scale_fill_gradient2(low = "blue", high = "red", mid = "white",
midpoint = 0, limit = c(-1,1), space = "Lab",
name="Pearson\nCorrelation") +
theme_minimal()+
theme(axis.text.x = element_text(angle = 45, vjust = 1,
size = 12, hjust = 1))+
coord_fixed()
model1 <- lm(target~ KIDSDRIV+AGE+HOMEKIDS+YOJ+INCOME+PARENT1+HOME_VAL+MSTATUS+SEX+
EDUCATION+JOB+TRAVTIME+CAR_USE+BLUEBOOK+TIF+CAR_TYPE+RED_CAR+OLDCLAIM+CLM_FREQ+REVOKED+
MVR_PTS+CAR_AGE+URBANICITY, data = train)
summary(model1)
model2 <- lm(target~BLUEBOOK+MSTATUS+SEX, data = train)
summary(model2)
|
7a4ae293602ced55950287e732c35a4366a05e8d
|
753e3ba2b9c0cf41ed6fc6fb1c6d583af7b017ed
|
/service/paws.apigateway/man/flush_stage_authorizers_cache.Rd
|
ab4e1f6e8f6442fa0c95e43fda48024d25f5ef70
|
[
"Apache-2.0"
] |
permissive
|
CR-Mercado/paws
|
9b3902370f752fe84d818c1cda9f4344d9e06a48
|
cabc7c3ab02a7a75fe1ac91f6fa256ce13d14983
|
refs/heads/master
| 2020-04-24T06:52:44.839393
| 2019-02-17T18:18:20
| 2019-02-17T18:18:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 649
|
rd
|
flush_stage_authorizers_cache.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.apigateway_operations.R
\name{flush_stage_authorizers_cache}
\alias{flush_stage_authorizers_cache}
\title{Flushes all authorizer cache entries on a stage}
\usage{
flush_stage_authorizers_cache(restApiId, stageName)
}
\arguments{
\item{restApiId}{[required] The string identifier of the associated RestApi.}
\item{stageName}{[required] The name of the stage to flush.}
}
\description{
Flushes all authorizer cache entries on a stage.
}
\section{Accepted Parameters}{
\preformatted{flush_stage_authorizers_cache(
restApiId = "string",
stageName = "string"
)
}
}
|
2827d6774aab4a2bce7389c35f937e23860f1eac
|
573e3623cd6d65c46e1771c2869c12d7ace68e0d
|
/R/outlier_detect.R
|
6ef762e775445ab271a6074b021aca99d22f77d6
|
[] |
no_license
|
mellyxx/xmsPANDA
|
14cda7624d99d4433fbced4fcb95222634be1cee
|
006831474e6c9181118a21922c040183a0e49b78
|
refs/heads/master
| 2023-03-12T21:07:50.886053
| 2021-03-02T16:40:24
| 2021-03-02T16:40:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,186
|
r
|
outlier_detect.R
|
outlier_detect <-
function(data_matrix,ncomp=2,pthresh=0.005,outlier.method="sumtukey"){
cnames<-{}
p1<-mixOmics::pca(t(data_matrix[,-c(1:2)]),center=TRUE,scale=TRUE,ncomp=ncomp)
cnames<-colnames(data_matrix[,-c(1:2)])
U3=p1$variates$X
##save(p1,U3,file="pcaoutlier.Rda")
if(outlier.method=="pcachisq"){
dist2=covRob(p1$variates$X,estim="pairwiseGK")$dist
pval <- pchisq(dist2, df = ncomp, lower.tail = FALSE)
is.out <- (pval < (pthresh)) # / length(dist2)))
#qplot(U3[, 1], U3[, 2], color =is.out,size = I(2),main="Outlier (green dots) detection using PCA and ChiSq test (p<0.005) ") + coord_equal()
col=rep("blue",length(is.out))
col[is.out==TRUE]<-"brown"
plotIndiv(p1,col=col,cex=2,title=paste("Outlier (brown dots) detection using PCA and ChiSq test (p<",pthresh,")",sep=""),size.title=8) #0.005) )
cnames<-cnames[which(is.out==TRUE)]
}else{
if(outlier.method=="pcout"){
res<-pcout(U3,makeplot=TRUE)
cnames<-names(res$wfinal01[which(res$wfinal01==0)])
}else{
if(outlier.method=="pcatukey"){
#s2=apply(data_matrix[,-c(1:2)],2,sum)
s2=U3[,1]
iqr_val<-quantile(s2,0.75)-quantile(s2,0.25)
upper_limit=quantile(s2,0.75)+1.5*(iqr_val)
lower_limit=quantile(s2,0.25)-1.5*(iqr_val)
cnames1<-cnames[which(s2>upper_limit | s2<lower_limit)]
s2=U3[,2]
iqr_val<-quantile(s2,0.75)-quantile(s2,0.25)
upper_limit=quantile(s2,0.75)+1.5*(iqr_val)
lower_limit=quantile(s2,0.25)-1.5*(iqr_val)
cnames1<-c(cnames1,cnames[which(s2>upper_limit | s2<lower_limit)])
cnames=cnames1
}else{
if(outlier.method=="sumtukey"){
s2=apply(data_matrix[,-c(1:2)],2,function(x){sum(x,na.rm=TRUE)})
iqr_val<-quantile(s2,0.75)-quantile(s2,0.25)
upper_limit=quantile(s2,0.75)+1.5*(iqr_val)
lower_limit=quantile(s2,0.25)-1.5*(iqr_val)
cnames<-cnames[which(s2>upper_limit | s2<lower_limit)]
}
}
}
}
#write.table(cnames,file="Outliers.txt",sep="\t",row.names=FALSE)
return(cnames)
}
|
3325c789aa2310e94d6d2374bd7a8957405aaf49
|
c4f6ca47ebe3c7ce07590fb04eb6801f120564a8
|
/man/locate_data.Rd
|
3f53dfa00b4c2d39cfa98adc05c1f6bb754a71cc
|
[
"MIT"
] |
permissive
|
jimsforks/locatr
|
516f843d2a7afd8d380f40bf63f94e24fe0fa3a1
|
dd4bfecbe0b4de75f82ac4b2827737d4d5033267
|
refs/heads/master
| 2022-10-07T21:52:52.834301
| 2020-06-08T08:17:17
| 2020-06-08T08:17:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 887
|
rd
|
locate_data.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/locate_data.R
\name{locate_data}
\alias{locate_data}
\title{Locates data cells}
\usage{
locate_data(sheet = NULL, ...)
}
\arguments{
\item{sheet}{a data frame produced by xlsx_cells}
\item{...}{a filter expression that identifies data cells.}
}
\description{
This function identifies which cells is a tidyxl dataframe represent data cells.
It removes data cells from data frame and stores them in an attribute of the resulting tidyxl
data frame.
}
\examples{
\dontrun{
library(tidyverse)
# Read in tidyxl data frame
xl_df <- locatr_example("worked-examples.xlsx") \%>\% xlsx_cells_fmt(sheets = "pivot-hierarchy")
# Identify numeric cells as data cells using the data_type column of xl_df
xl_df <- xl_df \%>\% locate_data(data_type == "numeric")
# Visually inspect the result
xl_df \%>\% plot_cells()
}
}
|
331bb8494bcc19b195e9cf1f6501015ebe36454b
|
63ec49bc7d1bbe18efd040ad0f616f91b195dfb4
|
/Relatório/roteiro.R
|
2a68e966f42d359619f6883b73e5347bb4f9e5e2
|
[] |
no_license
|
ddeniel1/EP-IA
|
b29b32b310c459fce713732966cb9ceaeedd4ea0
|
6c475d0f5afb6b533bd838cf892be6f1cb9b8cb9
|
refs/heads/master
| 2020-05-02T02:25:43.249101
| 2019-04-29T01:27:44
| 2019-04-29T01:27:44
| 177,704,385
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,930
|
r
|
roteiro.R
|
library(plyr)
library(tidyverse)
library(readr)
#Automatico
automatico <- read_csv("~/Documentos/IA/EP-IA/res/automatico/182_pop_4847_mut_20_ger/0.csv")
pop118mut7724 <- read_csv("~/Documentos/IA/EP-IA/res/automatico/118_pop_7724_mut_20_ger/0.csv")
pop18mut5976 <- read_csv("~/Documentos/IA/EP-IA/res/automatico/18_pop_5976_mut_20_ger/0.csv")
pop234mut507 <- read_csv("~/Documentos/IA/EP-IA/res/automatico/234_pop_507_mut_20_ger/0.csv")
pop252mut8947 <- read_csv("~/Documentos/IA/EP-IA/res/automatico/252_pop_8947_mut_20_ger/0.csv")
pop268mut16760 <- read_csv("~/Documentos/IA/EP-IA/res/automatico/268_pop_16760_mut_20_ger/0.csv")
pop396mut17868 <- read_csv("~/Documentos/IA/EP-IA/res/automatico/396_pop_17868_mut_20_ger/0.csv")
pop474mut2983 <- read_csv("~/Documentos/IA/EP-IA/res/automatico/474_pop_2983_mut_20_ger/0.csv")
pop478mut16081 <- read_csv("~/Documentos/IA/EP-IA/res/automatico/478_pop_16081_mut_20_ger/0.csv")
pop567mut10810 <- read_csv("~/Documentos/IA/EP-IA/res/automatico/567_pop_10810_mut_20_ger/0.csv")
pop626mut2654 <- read_csv("~/Documentos/IA/EP-IA/res/automatico/626_pop_2654_mut_20_ger/0.csv")
pop755mut5941 <- read_csv("~/Documentos/IA/EP-IA/res/automatico/755_pop_5941_mut_20_ger/0.csv")
pop760mut14423 <- read_csv("~/Documentos/IA/EP-IA/res/automatico/760_pop_14423_mut_20_ger/0.csv")
pop863mut16472 <- read_csv("~/Documentos/IA/EP-IA/res/automatico/863_pop_16742_mut_20_ger/0.csv")
pop945mut15021 <- read_csv("~/Documentos/IA/EP-IA/res/automatico/945_pop_15021_mut_20_ger/0.csv")
dfs <- list(automatico, pop118mut7724, pop18mut5976, pop234mut507, pop252mut8947,
pop268mut16760, pop396mut17868, pop474mut2983, pop478mut16081,
pop567mut10810, pop626mut2654, pop755mut5941, pop760mut14423, pop863mut16472,
pop945mut15021)
teste <-data.frame()
pop <- c(182, 118, 18, 234, 252, 268, 396, 474, 478, 567, 626, 755, 760, 863, 945)
mut <- c(0.4847, 0.7724, 0.5976, 0.0507, 0.8947, 1.6760, 1.7868, 0.2983, 1.6081, 1.0810, 0.2654,
0.5941, 1.4423, 1.6752, 1.5021)
i <- 1
for (v in dfs) {
v["pop"] <- c(pop[i],pop[i],pop[i],pop[i],pop[i],pop[i],pop[i],pop[i],pop[i],pop[i],pop[i],
pop[i],pop[i],pop[i],pop[i],pop[i],pop[i],pop[i],pop[i],pop[i])
v["mut"] <- c(mut[i],mut[i],mut[i],mut[i],mut[i],mut[i],mut[i],mut[i],mut[i],mut[i],mut[i],
mut[i],mut[i],mut[i],mut[i],mut[i],mut[i],mut[i],mut[i],mut[i])
tipo <- paste("Pop", pop[i], "Mut", mut[i])
v["tipo"] <- c(tipo,tipo,tipo,tipo,tipo,tipo,tipo,tipo,tipo,tipo,tipo,tipo,tipo,tipo,tipo,
tipo,tipo,tipo,tipo,tipo)
teste <- rbind_list(teste, v)
i <- i + 1
}
ggplot(teste, aes(x=ger, y=avgFit, colour=tipo)) + geom_line() +
ylab("Média FITNESS") + xlab("Número de Gerações") + labs(colour= "População")
ggplot(teste, aes(x=ger, y=bestFit, colour=tipo)) + geom_line() +
ylab("Best FITNESS") + xlab("Número de Gerações") + labs(colour= "População")
ggplot(teste, aes(x=ger, y=avgFit, colour=tipo)) + geom_line() +
ylab("FITNESS") + xlab("Número de Gerações") +
geom_line(aes(x=ger, y=bestFit, colour=tipo),linetype = "dashed" ) +
labs(colour= "População")
#manual
lista <- list.files("~/Documentos/IA/EP-IA/res/manual")
caminho <- "~/Documentos/IA/EP-IA/res/manual"
pop <- c(10, 100, 1000, 5, 50, 500, 5000)
mut <- c(0.002, 0.02, 0.2, 2)
teste <- data.frame()
contpop <- 1
cont <- 1
contmut <- 1
for (v in lista) {
c <- paste(caminho, "/", v, "/0.csv", sep="");
aux <- read_csv(c);
if((cont-1) %% 4 == 0 && cont != 1){
print(cont)
contpop <- contpop + 1
contmut <- 1
}
aux["pop"] <- c(pop[contpop],pop[contpop],pop[contpop],pop[contpop],pop[contpop],
pop[contpop],pop[contpop],pop[contpop],pop[contpop],pop[contpop],
pop[contpop],pop[contpop],pop[contpop],pop[contpop],pop[contpop],
pop[contpop],pop[contpop],pop[contpop],pop[contpop],pop[contpop])
aux["mut"] <- c(mut[contmut],mut[contmut],mut[contmut],mut[contmut],mut[contmut],
mut[contmut],mut[contmut],mut[contmut],mut[contmut],mut[contmut],
mut[contmut],mut[contmut],mut[contmut],mut[contmut],mut[contmut],
mut[contmut],mut[contmut],mut[contmut],mut[contmut],mut[contmut])
tipo <- paste("Pop", pop[contpop], "Mut", mut[contmut])
aux["tipo"] <- c(tipo,tipo,tipo,tipo,tipo,tipo,tipo,tipo,tipo,tipo,tipo,tipo,tipo,tipo,tipo,
tipo,tipo,tipo,tipo,tipo)
teste <- rbind(teste, aux);
cont <- cont + 1
contmut <- contmut + 1
}
ggplot(teste, aes(x=ger, y=avgFit, colour=tipo)) + geom_line() +
ylab("Média FITNESS") + xlab("Número de Gerações") + labs(colour= "População")
ggplot(teste, aes(x=ger, y=bestFit, colour=tipo)) + geom_line() +
ylab("Best FITNESS") + xlab("Número de Gerações") + labs(colour= "População")
ggplot(teste, aes(x=ger, y=avgFit, colour=tipo)) + geom_line() +
ylab("FITNESS") + xlab("Número de Gerações") +
geom_line(aes(x=ger, y=bestFit, colour=tipo) ,linetype = "dashed" ) +
labs(colour= "População")
#Mutacao pop 1
lista <- list.files("~/Documentos/IA/EP-IA/res/so_mutacao")
caminho <- "~/Documentos/IA/EP-IA/res/so_mutacao"
mut <- c(100, 0.002, 0.02, 0.2, 2, 20)
teste <- data.frame()
contpop <- 1
cont <- 1
contmut <- 1
for (v in lista) {
c <- paste(caminho, "/", v, "/0.csv", sep="");
aux <- read_csv(c);
aux["pop"] <- c(1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1)
aux["mut"] <- c(mut[cont],mut[cont],mut[cont],mut[cont],mut[cont],mut[cont],mut[cont],
mut[cont],mut[cont],mut[cont],mut[cont],mut[cont],mut[cont],mut[cont],
mut[cont],mut[cont],mut[cont],mut[cont],mut[cont],mut[cont])
tipo <- paste("Pop", 1, "Mut", mut[cont])
aux["tipo"] <- c(tipo,tipo,tipo,tipo,tipo,tipo,tipo,tipo,tipo,tipo,tipo,tipo,tipo,tipo,tipo,
tipo,tipo,tipo,tipo,tipo)
teste <- rbind(teste, aux);
cont <- cont + 1
}
ggplot(teste, aes(x=ger, y=avgFit, colour=tipo)) + geom_line() +
ylab("Média FITNESS") + xlab("Número de Gerações") + labs(colour= "População")
ggplot(teste, aes(x=ger, y=bestFit, colour=tipo)) + geom_line() +
ylab("Best FITNESS") + xlab("Número de Gerações") + labs(colour= "População")
ggplot(teste, aes(x=ger, y=avgFit, colour=tipo)) + geom_line() +
ylab("FITNESS") + xlab("Número de Gerações") +
geom_line(aes(x=ger, y=bestFit, colour=tipo),linetype = "dashed" ) +
labs(colour= "População")
#mutacao pop 100
lista <- list.files("~/Documentos/IA/EP-IA/res/Mutacao")
caminho <- "~/Documentos/IA/EP-IA/res/Mutacao"
mut <- c(0.001, 0.01, 0.1, 1, 10, 100, 0.002, 0.02, 0.2, 2, 20)
teste <- data.frame()
contpop <- 1
cont <- 1
contmut <- 1
for (v in lista) {
c <- paste(caminho, "/", v, "/0.csv", sep="");
aux <- read_csv(c);
aux["pop"] <- c(100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100)
aux["mut"] <- c(mut[cont],mut[cont],mut[cont],mut[cont],mut[cont],mut[cont],mut[cont],
mut[cont],mut[cont],mut[cont],mut[cont],mut[cont],mut[cont],mut[cont],
mut[cont],mut[cont],mut[cont],mut[cont],mut[cont],mut[cont])
tipo <- paste("Pop", 100, "Mut", mut[cont])
aux["tipo"] <- c(tipo,tipo,tipo,tipo,tipo,tipo,tipo,tipo,tipo,tipo,tipo,tipo,tipo,tipo,tipo,
tipo,tipo,tipo,tipo,tipo)
teste <- rbind(teste, aux);
cont <- cont + 1
}
ggplot(teste, aes(x=ger, y=avgFit, colour=tipo)) + geom_line() +
ylab("Média FITNESS") + xlab("Número de Gerações") + labs(colour= "População")
ggplot(teste, aes(x=ger, y=bestFit, colour=tipo)) + geom_line() +
ylab("Best FITNESS") + xlab("Número de Gerações") + labs(colour= "População")
ggplot(teste, aes(x=ger, y=avgFit, colour=tipo)) + geom_line() +
ylab("FITNESS") + xlab("Número de Gerações") +
geom_line(aes(x=ger, y=bestFit, colour=tipo),linetype = "dashed" ) +
labs(colour= "População")
|
ab307d3ef033b53cf659dfadad844ac63e6ace8a
|
1e820fe644a039a60bfbee354e50c775af675f6b
|
/ProbStatsR/E7_2_10.R
|
842f6da0a0df97982edcc13ec0fe5ba712c666dc
|
[] |
no_license
|
PyRPy/stats_r
|
a334a58fca0e335b9b8b30720f91919b7b43d7bc
|
26a3f47977773044d39f6d8ad0ac8dafb01cce3f
|
refs/heads/master
| 2023-08-17T00:07:38.819861
| 2023-08-16T14:27:16
| 2023-08-16T14:27:16
| 171,056,838
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 362
|
r
|
E7_2_10.R
|
# 7.2-10-------------------------------------------------------------------
dat <- read.table("E7_2-10.txt", quote="\"", comment.char="")
race <- dat$V1
# mean of race time difference
mu = mean(race) # 0.07875
# lower bond for mu
mu + qt(0.05, length(race)-1) * sd(race) / sqrt(length(race)) # -0.01043263
# no effects, since lower bound is less than zero.
|
d7d226c0adb66dd5cf53887dfc96d6d1090b3436
|
6e5b5050ff779266b83be384e0372fd507044b4d
|
/utils/to_csv.R
|
5ab2c6b55b30349b65d4ecf151e91255c87aa5f9
|
[] |
no_license
|
cmmid/covidm_reports
|
b3968b096d40a830d775a0fa3eac7afee1e3e56e
|
b87f48cb81c4ef3bbbd221295ea47fad17c27b13
|
refs/heads/master
| 2022-12-05T23:36:42.314743
| 2020-08-10T06:38:15
| 2020-08-10T06:38:15
| 254,046,734
| 9
| 6
| null | 2020-08-05T20:14:54
| 2020-04-08T09:43:35
|
R
|
UTF-8
|
R
| false
| false
| 379
|
r
|
to_csv.R
|
suppressPackageStartupMessages({
require(data.table)
require(qs)
})
.args <- if (interactive()) c(
"~/Dropbox/covidm_hpc_output"
) else commandArgs(trailingOnly = TRUE)
fls <- grep(
"old",
sort(list.files(.args[1], "qs$", full.names = TRUE, recursive = TRUE)),
invert = T, value = T
)
ls <- lapply(fls, function(fn) {
fwrite(qread(fn), gsub("qs$","csv",fn))
})
|
b3976477d215df7e9b600084d2e5dc2d99f84ffc
|
e5c43a31a082bbfec5ebbc20b34d373896721579
|
/R/functions/most.freq.R
|
5c358d2fda2c807f46ed24262ff97618bbea5df8
|
[] |
no_license
|
geryan/rfst
|
3dde3a499651f3a1ccc736f8c6597c5972f0e17c
|
0aac1f0c3b17096af0c5b0b06e1ad80ac6d709ca
|
refs/heads/master
| 2023-05-02T12:32:51.743467
| 2021-04-27T01:26:47
| 2021-04-27T01:26:47
| 164,573,310
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 497
|
r
|
most.freq.R
|
most.freq <- function(x, na.rm = TRUE){
if(na.rm == FALSE){
if(any(is.na(x))){
return(NA_integer_)
}
}
if(all(is.na(x))){
return(NA_integer_)
}
ux <- unique(x)
if(length(ux) == 1){
return(as.integer(x[1]))
}
tx <- table(x)
mx <- max(tx)
wx <- which(tx==mx)
nx <- as.numeric(dimnames(tx)[[1]])
if(length(wx) == 1){
return(as.integer(nx[wx]))
} else {
return(as.integer(base::sample(x = nx[wx], size = 1)))
}
}
|
7a86625785fbe2fddc0b7a55e79bd1f2c966a3b3
|
817c6a53e081e008cb771ae2beb8bb7cb438bd9a
|
/man/example2.Rd
|
d046bcde26783a235ba94c0cd23e15b382a0eff8
|
[] |
no_license
|
nfultz/learnSampling
|
4cfcde2d34229dbdffef87e8a5292433eb7ae9c5
|
a7130da3f57d6f43cb32d7a682461a9afe0ea840
|
refs/heads/master
| 2021-08-30T22:53:34.769019
| 2017-09-07T20:35:32
| 2017-09-07T20:35:32
| 114,797,849
| 2
| 1
| null | 2017-12-19T18:23:58
| 2017-12-19T18:23:57
| null |
UTF-8
|
R
| false
| false
| 410
|
rd
|
example2.Rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/data-example2.R
\docType{data}
\name{example2}
\alias{example2}
\title{Example 2 Data (n=75)}
\format{A data frame with 75 rows and 2 variables:
\describe{
\item{SelfEsteem}{Self-Esteem scores }
\item{Height}{Heights of participants}
}}
\usage{
data(example2)
}
\description{
Example 2 Data (n=75)
}
\keyword{datasets}
|
6691b860b5bf5153b2cf7509bf733d2d1f81a7dc
|
7352fd6b28ff208887681eb87cafae0e3177a0da
|
/R/find_resourcesPT.R
|
03fe0a1df2e8a4615e751f6453645bf78f311e40
|
[] |
no_license
|
p1981thompson/HBM_Rmarkdown_template
|
2e9d3634997b9c00fe8164b8e53c93654c4ea521
|
b0cd5fa24ea8b7366e535d3e0c796fa3771898c9
|
refs/heads/master
| 2020-09-23T02:38:01.088500
| 2019-12-02T14:12:49
| 2019-12-02T14:12:49
| 225,381,207
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 311
|
r
|
find_resourcesPT.R
|
find_resourcesPT<-function (template, file = "template.tex")
{
res <- system.file("rmarkdown", "templates", template, "resources",
file, package = "WileyHBMtemplate")
if (res == "")
stop("Couldn't find template file ", template, "/resources/",
file, call. = FALSE)
res
}
|
34d8f2328bffd3fb67f37ccaf76633998ebfcd60
|
da6909e677947ef96408927ad91af4c3a4821637
|
/man/HRV_vars_domain.Rd
|
c627cabf77f63d5fd0bbebfb3a01eae31a565870
|
[
"MIT"
] |
permissive
|
Lightbridge-KS/labChartHRV
|
57bae5e0821c37bace7683843abf0a62bb5c7a31
|
204136c40a64943c87d21d4cbadd40c6d3b78a78
|
refs/heads/main
| 2023-04-19T05:32:07.776771
| 2022-06-08T07:56:54
| 2022-06-08T07:56:54
| 480,112,543
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 682
|
rd
|
HRV_vars_domain.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{HRV_vars_domain}
\alias{HRV_vars_domain}
\title{HRV Time & Frequency Domain Variable}
\format{
A list with 2 elements:
\describe{
\item{time}{contain character vector of time-domain variables}
\item{freq}{contain character vector of frequency-domain variables}
}
}
\usage{
HRV_vars_domain
}
\description{
A list for quickly select HRV variables in the category of time or frequency domain.
It is intended to be used with the resulting tibble of \code{\link[=read_HRV_reports]{read_HRV_reports()}} or \code{\link[=parse_HRV_reports]{parse_HRV_reports()}}.
}
\keyword{datasets}
|
98b779ec099096ab857733e05d6f4757ecb750d7
|
f1a7ab41ba3ce33c01a30e7283339c6432f9745f
|
/man/proteinInteraction.Rd
|
fee67d867a9c4eea478e32a9fd8a76703a7546bf
|
[] |
no_license
|
vishalbelsare/xnet
|
3ccba84442ebbf411fd96dc5c02dfedfc794adbf
|
4093905ae81281b6cf81c6a3425bdaf884e78fb4
|
refs/heads/main
| 2023-05-30T09:50:59.545097
| 2021-06-03T13:04:23
| 2021-06-03T13:04:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,364
|
rd
|
proteinInteraction.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data_proteinInteraction.R
\docType{data}
\name{proteinInteraction}
\alias{proteinInteraction}
\alias{Kmat_y2h_sc}
\title{Protein interaction for yeast}
\format{\itemize{
\item proteinInteraction: a numeric square matrix with 150 rows/columns
\item Kmat_y2h_sc: a numeric square matrix with 150 rows/columns
}}
\source{
\url{https://doi.org/10.1093/bioinformatics/bth910}
}
\usage{
proteinInteraction
}
\description{
A dataset for examining the interaction between proteins of
yeast. The dataset consists of the following objects:
}
\details{
\itemize{
\item proteinInteraction: the label matrix based on the protein
network taken from the KEGG/PATHWAY database
\item Kmat_y2h_sc: a kernel matrix indicating similarity of proteins.
}
The proteins in the dataset are a subset of the 769 proteins
used in Yamanishi et al (2004). The kernel matrix used is the
combination of 4 kernels: one based on expression data, one
on protein interaction data, one on localization data and one
on phylogenetic profile. These kernels and their combination are
also explained in Yamanishi et al (2004).
}
\references{
\href{https://doi.org/10.1093/bioinformatics/bth910}{Yamanishi et al, 2004}: Protein network inference from multiple genomic data: a supervised approach.
}
\keyword{datasets}
|
17ada2f1909f6a4a498c4118564b3873b0e62100
|
a45a5a8e3e0b85a3fe2ed73cba9997d216267bff
|
/scripts/films2.R
|
5bc55c9d5d6d0b32216efbb2b6fd65b33dcd0a20
|
[] |
no_license
|
sbtr1/tidytuesday
|
66c9c273739c5114b23c63de7a4576639534881f
|
49716d1a9097f9b5af62c14727edd21fd24ee73d
|
refs/heads/master
| 2020-06-05T01:44:19.468384
| 2019-07-09T08:28:38
| 2019-07-09T08:28:38
| 192,269,115
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,097
|
r
|
films2.R
|
#### Tidy Tuesday week 30 - Movie revenues
## Load libraries
library(tidyverse)
library(lubridate)
library(ggrepel)
## Load data
movies = read_csv("../data/movie_profit.csv") %>%
select(-X1) %>% # remove X1 column
distinct(release_date, movie, production_budget, .keep_all=T) %>% # remove duplicate movies
mutate(release_date = mdy(release_date),
genre = factor(genre),
year = year(release_date),
value_proportion = worldwide_gross/production_budget
) %>%
filter(release_date < "2018-10-20") # remove movies that haven't come out yet
## Plot
ggplot(data=movies, aes(x=reorder(genre, value_proportion, max), y=value_proportion)) + geom_point(aes(size=worldwide_gross/1000000, color=genre), alpha=0.5) +
geom_label_repel(data=movies[movies$value_proportion>250,], aes(label=movie), min.segment.length=5) +
ylim(c(0,500)) +
scale_color_brewer(palette="Set1", guide=F) +
scale_size(range = c(1, 20)) +
scale_x_discrete(limits = rev(levels(reorder(movies$genre, movies$value_proportion, max)))) +
labs(title="Which movie genres are most profitable to produce?", subtitle="Movies with a revenue more than 250x their budget are labelled\n", x="", y="Revenue as proportion of budget\n", size="Worldwide gross\nrevenue (millions USD)", caption="Source: the_numbers, plot by @veerlevanson") +
theme_minimal(14) +
theme(legend.position = "right",
text=element_text(family="Roboto"),
plot.title = element_text(size=18, hjust = 0.5),
plot.subtitle = element_text(hjust = 0.5),
plot.caption = element_text(size = 12, hjust = 1),
axis.text.y = element_text(hjust = 0),
panel.grid = element_line(colour = "#F0F0F0"),
plot.margin = unit(c(1,0.5,0.5,1), "cm")
)
# Save plot
ggsave(filename = "week30_HorrorMovies/Movie_revenue.jpg", width=15, height=10, units="cm", scale=1.6)
# Percentage of movies that make more than their budget
round(sum(movies$value_proportion>1)/nrow(movies)*100,1)
|
39dc060a46337affe303f5a2f3ed08568fe431e3
|
bc0cb9110b38234b17b77110af228f7fadd9503d
|
/_tests/test-error.r
|
f984e881e1a39657e72b6239e88912e083b6aa06
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
klmr/sys
|
e079754d0afa157e1fcaedcc7fe7cb1f242e2bde
|
ea12f5e2f61e7efde7395c7ab7ecfe6cb86de507
|
refs/heads/master
| 2021-06-17T20:04:20.613476
| 2016-08-01T17:36:10
| 2016-08-01T17:36:10
| 38,907,303
| 14
| 1
| null | 2017-09-26T14:37:47
| 2015-07-11T00:09:13
|
R
|
UTF-8
|
R
| false
| false
| 1,314
|
r
|
test-error.r
|
context('Error messages')
test_that('wrong arguments cause errors', {
expect_that(xc(character(0), arg('filename', 'the input file')),
shows_error('filename'))
expect_that(xc('foo', arg('filename', 'the input file')),
shows_no_error('filename'))
expect_that(xc('--extra', arg('filename', 'the input file')),
shows_error('--extra'))
expect_that(xc('some.file', opt('x', 'extra', 'an extra argument')),
shows_error('some.file'))
expect_that(xc('some.file',
opt('x', 'extra', 'an extra argument'),
arg('filename', 'the input file', '')),
shows_error('--extra'))
expect_that(xc('-vvv',
opt('v', '', 'level of verbose logging', '',
validate = function (value) value %in% c('', 'v'))),
shows_error('-v'))
})
test_that('wrong usage causes errors', {
expect_that(xc(character(0),
arg('test-this', 'first argument', ''),
arg('test_this', 'duplicate argument', '')),
throws_error('Command line definition contains duplicate names'))
expect_that(xc('x', opt('foo', 'f', 'frobnicate')),
throws_error('short == "" || nchar(short) == 1'))
})
|
9ff1fc00be0786e866c9c05cc4fb69359dbc3b1c
|
f908123c6cf4362373810d61bd9b1c1e4a72d0c3
|
/man/render_pmap_file.Rd
|
8c3ecaa87a6b9550ed9f6fac2a1903f2ae29992e
|
[
"MIT"
] |
permissive
|
yingza/pmap
|
eecbd0a39dce192d0b68baa8e094f627e09de2ba
|
9287304bd855aecb74cb2200a9cd4e1289897498
|
refs/heads/master
| 2021-05-11T23:38:34.182677
| 2018-01-15T02:48:14
| 2018-01-15T02:48:14
| 117,515,029
| 0
| 0
| null | 2018-01-15T07:59:30
| 2018-01-15T07:59:30
| null |
UTF-8
|
R
| false
| true
| 1,151
|
rd
|
render_pmap_file.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/render_pmap_file.R
\name{render_pmap_file}
\alias{render_pmap_file}
\title{Render the process map as a file}
\usage{
render_pmap_file(p, file_name, format = c("png", "pdf", "svg", "ps"),
width = NULL, height = NULL)
}
\arguments{
\item{p}{the process map created by \code{create_pmap()}}
\item{file_name}{the file name to be stored}
\item{format}{the file format, it can be \code{png}(default), \code{pdf}, \code{svg} and \code{ps}.}
\item{width}{width of the image (optional)}
\item{height}{height of the image (optional)}
}
\description{
You can save the process map to a file
}
\details{
The function depends on V8 engine, so please install \code{v8} engine support on your platform before use the function.
\itemize{
\item For Ubuntu/Debian user, please install \code{libv8-dev} package;
\item For Fedora/RHEL user, please install \code{v8-devel} package;
\item For macOS user, please \code{brew install v8@3.15};
}
}
\examples{
library(dplyr)
library(pmap)
generate_eventlog() \%>\% create_pmap() \%>\% render_pmap_file(file_name = "test.svg", format = "svg")
}
|
3ef9f0a955d7264d9fb27b63a7f6cf640384220f
|
5e55d9058c2efca8fff0147404ad105d9cac6bd1
|
/iocovid animation.R
|
b6bcfe4691c55621f8f4eb7e65d18926836e4629
|
[
"CC0-1.0"
] |
permissive
|
higgi13425/rmrwr-book
|
0a67e7beff1a498565b68a0c2e5cb13d34d6e171
|
22a57b162714d5189302ec295c6e66925ef293ec
|
refs/heads/master
| 2023-04-28T05:47:20.129120
| 2023-04-17T02:05:34
| 2023-04-17T02:05:34
| 249,053,179
| 14
| 10
|
CC0-1.0
| 2021-01-10T19:49:02
| 2020-03-21T20:14:33
|
HTML
|
UTF-8
|
R
| false
| false
| 3,409
|
r
|
iocovid animation.R
|
library(tidyverse)
library(shadowtext)
library(gganimate)
library(gifski)
options(scipen = 20)
read_csv("https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_19-covid-Confirmed.csv") %>%
gather(date, cases, 5:ncol(.)) %>%
mutate(date = as.Date(date, "%m/%d/%y")) %>%
group_by(country = `Country/Region`, date) %>%
summarise(cases = sum(cases)) %>%
filter(country != "Others" & country != "Mainland China") %>%
bind_rows(
tibble(country = "Republic of Korea", date = as.Date("2020-03-11"), cases = 7755)
) %>%
group_by(country) %>%
mutate(days_since_100 = as.numeric(date-min(date[cases >= 100]))) %>%
ungroup() %>%
filter(is.finite(days_since_100)) %>%
group_by(country) %>%
mutate(new_cases = cases-cases[days_since_100 == 0]) %>%
filter(sum(cases >= 100) >= 5) %>%
filter(cases >= 100) %>%
bind_rows(
tibble(country = "33% daily rise", days_since_100 = 0:18) %>%
mutate(cases = 100*1.33^days_since_100)
) %>%
ungroup() %>%
mutate(
country = country %>% str_replace_all("( SAR)|( \\(.+)|(Republic of )", "")
) %>%
# filter(days_since_100 <= 10) %>%
ggplot(aes(days_since_100, cases, col = country)) +
geom_hline(yintercept = 100) +
geom_vline(xintercept = 0) +
geom_line(size = 0.8) +
geom_point(pch = 21, size = 1) +
scale_y_log10(expand = expand_scale(add = c(0,0.1)), breaks=c(100, 200, 500, 1000, 2000, 5000, 10000,100000)) +
# scale_y_continuous(expand = expand_scale(add = c(0,100))) +
scale_x_continuous(expand = expand_scale(add = c(0,1))) +
theme_minimal() +
theme(
panel.grid.minor = element_blank(),
legend.position = "none",
plot.margin = margin(3,15,3,3,"mm")
) +
coord_cartesian(clip = "off") +
scale_colour_manual(values = c("United Kingdom" = "#ce3140", "US" = "#EB5E8D", "Italy" = "black",
"France" = "#c2b7af", "Germany" = "#c2b7af", "Hong Kong" = "blue",
"Iran" = "springgreen3", "Japan" = "royalblue3", "Singapore" = "blue",
"Korea, South" = "slateblue3", "Belgium" = "#c2b7af", "Netherlands" = "#c2b7af",
"Norway" = "#c2b7af", "Spain" = "#c2b7af", "Sweden" = "#c2b7af",
"Switzerland" = "#c2b7af", "33% daily rise" = "gray35", "Austria" = "#c2b7af",
"China" = 'red', "Cruise Ship" = 'purple')) +
#geom_shadowtext(aes(label = paste0(" ",country)), hjust=0, vjust = 0, data = . %>%
#group_by(country) %>%
#top_n(1, days_since_100), bg.color = "white") +
labs(x = "Number of days since 100th case", y = "",
title = "Total number of COVID-19 Cases per Country" ) +
geom_segment(aes(xend = 48, yend = cases), linetype = 2, colour = 'grey') +
geom_point(size = 2) +
geom_text(aes(x = 48.1, label = country), hjust = 0) ->
static_plot
plt <- static_plot +
transition_reveal(days_since_100) +
ease_aes('cubic-in-out') +
labs(subtitle = "Starting at Day on which 100th Case Occurred")
#rendering the animation for gif
final_animation <- animate(plt, nframes = 100, fps = 10,
#duration = 30,
width = 600,
height = 400, renderer = gifski_renderer())
#saving the animation
anim_save('covid_animate.gif', animation = final_animation)
|
aec1e57ec94a3b0539e186adef9ae1e02da6a27d
|
16ff96e4318d7a0a9eae27eb8900a28874dc8407
|
/R/ocpu_print.R
|
176f89ff1ac81daa3a5dcaa9c9d47f83ed78afb7
|
[] |
no_license
|
bfatemi/ocputils
|
3f72bab3d687e5af1d51cde60de7d43fc4ffaddb
|
bc811c6cd3fe948f4165e39ebf1cc1ce9d3877b2
|
refs/heads/master
| 2020-05-16T21:41:49.825792
| 2019-05-15T12:09:29
| 2019-05-15T12:09:29
| 183,313,085
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,247
|
r
|
ocpu_print.R
|
#' OCPU PRINT UTILITY FUNCTIONS
#'
#' @param msg TBD
#' @param sym TBD
#' @param slim TBD
#' @param content TBD
#' @param addtime TBD
#'
#' @importFrom stringr str_split str_c str_length str_count str_trunc
#' @importFrom crayon bgWhite bold magenta bgMagenta white make_style combine_styles bgCyan
#'
#' @name ocpu_print
NULL
#' @describeIn ocpu_print TBD
#' @export
printstamp <- function(msg=""){
sym <- "#"
msglines <- stringr::str_split(msg, "\\n")[[1]]
msgmain <- msglines[which.max(sapply(msglines, stringr::str_length, simplify = FALSE)[[1]])]
# make message length an even number for centering purposes
if(stringr::str_length(msgmain) %% 2 == 1)
msgmain <- stringr::str_c(msgmain, " ")
msg <- stringr::str_c(" ", msglines, " ")
scount <- stringr::str_length(msgmain)
cushion <- ceiling(scount*1.3) - scount
cushion <- cushion + cushion %% 2
topcount <- scount + cushion - 1 + 2
sidecount <- sum(length(msglines), 2)
# hdft <- stringr::str_c(rep(sym, topcount), collapse = "")
spaces <- stringr::str_c(rep(" ", topcount - 1), collapse = "")
sides.left <- rep(paste0(sym, ">"), sidecount)
sides.right <- rep(sym, sidecount)
grid_col <- topcount + 1
grid_row <- sidecount + 2
tmp <- stringr::str_c(stringr::str_c(sides.left, spaces), collapse = "\n")
txt <- stringr::str_split(stringr::str_split(tmp, "\n")[[1]], "")
pad.l <- c(paste0(sym, "> "), rep("", cushion/2-1))
pad.r <- " "#c(rep(" ", cushion/2-1), sym)
txt[2:(1+length(msglines))] <- lapply(stringr::str_split(msglines, ""), function(i) c(pad.l, i, pad.r))
cat("\n\n")
cat(paste0(sapply(txt, function(itxt) paste0(c(itxt, "\n"), collapse = "")), collapse = ""))
cat("\n")
}
#' @describeIn ocpu_print TBD
#' @export
printmsg <- function(msg, sym="+", slim = TRUE, content=NULL, addtime=TRUE){
yellow1 <- crayon::make_style("yellow1")
ivory <- crayon::make_style("ivory")
bgMaroon <- crayon::make_style("maroon", bg = TRUE)
fancy <- crayon::combine_styles(ivory, crayon::bgCyan)
## can redo this code later: first construct the middle, then just repeat sym
## and cutoff at length of middle.. which can vary with slim.
# Will be messing with console width so grab global setting to reset later
globscipen <- options()$width
on.exit(options(width = globscipen))
##
## Get parameters for placement
##
# Calibrate position by requiring length to be closest even integer to true length
numchars <- ceiling(stringr::str_count(msg)/2)*2
# border should by some factor of twice the length of the message for centering aesthetics
lenAdj <- ifelse(slim, 1.25, 2)
blen <- round(numchars*lenAdj)
# construct topbottom first
topbottom <- paste0(c("\n", rep(sym, blen), "\n"), collapse="")
# construct middle
ind <- paste0(rep(" ", ceiling((blen - numchars)/2-1)), collapse="")
middle <- paste0(sym, fancy(ind), fancy(msg), fancy(ind), sym)
# if middle is shorter (likely only by 1), then adjust one side's spacing
# not sure when this would be negative, but too tired to think so will include 'max'
adjby <- max(0, blen - stringr::str_length(middle))
if(adjby > 0)
middle <- paste0(sym, fancy(ind), fancy(msg), fancy(ind), " ", sym)
# final message
trunc_topbot <- stringr::str_trunc(topbottom, stringr::str_length(middle)+2, ellipsis = "\n")
finalmsg <- paste0(trunc_topbot, middle, trunc_topbot, collapse="")
# Display - temporarily set the console width then print
options(width=stringr::str_count(topbottom))
cat(finalmsg)
# add time if applicable
if(addtime){
stamp <- paste0("\n", ind, "Timestamp: ", Sys.time(), "\n")
cat(yellow1(stamp))
}
# if content was provided, display that now
if(!is.null(content)){
if(class(content) %in% c("matrix", "data.frame")){
print(content, print.gap = TRUE, quote = FALSE)
}else{
cat(paste0("\n", content,"\n"))
}
}else{
cat("\n")
}
}
# f <- function(...){
# return(NULL)
# }
#
# args <- list(a = 1,
# letter = letters,
# dt = data.table(d1 = "first", d2 = "second"),
# l = list(1:100))
#
#
# decoded <- list(FUN=f, ARGS=args)
#
# print_ocpu_call(decoded)
|
c5eb0fdce8ea3cdcc741ae93b79125c752890d6c
|
722737ad902763e6687e4f9b5f31a10b9ec044fe
|
/man/getUpperBounds.Rd
|
86428c3ced08049376f1ce3f21f6ad352107e1d9
|
[] |
no_license
|
cran/editrules
|
7e82f60f78173b8476bc9c92ce565f9ca335c540
|
a84ffadd8103298883b59d425b3ccca4eefb8ee3
|
refs/heads/master
| 2021-01-01T15:51:26.200390
| 2018-07-01T20:30:03
| 2018-07-01T20:30:03
| 17,695,727
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 553
|
rd
|
getUpperBounds.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getUpperBounds.R
\name{getUpperBounds}
\alias{getUpperBounds}
\title{Get upperbounds of edits, given the boundaries of all variables}
\usage{
getUpperBounds(E, xlim)
}
\arguments{
\item{E}{\code{editmatrix}}
\item{xlim}{\code{matrix} with columns lower and upper, and rows are variables (in same order as E)}
}
\value{
matrix with upperbounds per edit and a possible value
}
\description{
Get upperbounds of edits, given the boundaries of all variables
}
\keyword{internal}
|
37e87f1dde20cf5d880819dc3643d5e01ec89289
|
5f368369467d46cdc73cabdd04eb439c856caccd
|
/R/p.R
|
e7e367cc4c810554486ee253d25036921b3a4ef6
|
[] |
no_license
|
geogugal/quickmapr
|
a3dfca514377c749b1ff46c64745b793867c1e5a
|
37410d105e1c80f55f81564fd8b2721f471a68c0
|
refs/heads/master
| 2023-08-16T17:09:02.787781
| 2023-08-14T19:04:58
| 2023-08-14T19:04:58
| 23,543,371
| 0
| 1
| null | 2023-09-11T16:21:36
| 2014-09-01T13:26:09
|
R
|
UTF-8
|
R
| false
| false
| 1,286
|
r
|
p.R
|
#' Pan the current plot
#'
#' Interactively reposition the current plot. Works on an existing
#' \code{qmap} object. Simply pass that object to \code{p()}. A single
#' repositioning results and the extent of the \code{qmap} object is changed.
#'
#' @param qmap_obj A qmap object. Optional, but performs better with larger
#' data sets.
#' @param loc A list with an x and y numeric indicating a location. Default is
#' to interactively get loc value until escaped.
#' @return NULL
#' @export
#' @importFrom graphics locator
#'
#' @examples
#' \dontrun{
#' data(lake)
#' x<-qmap(list(lake,buffer,elev))
#' p()
#' ## Or
#' p(x)
#' }
p <- function(qmap_obj = NULL, loc = NULL) {
if (class(qmap_obj) != "qmap") {
stop("Requires a valid qmap_obj.")
} else if (is.null(loc)) {
continue <- 0
obj <- paste(substitute(qmap_obj))
message("Click on plot to pan. Press 'Esc' to exit.")
loc <- locator(1)
while (!is.null(loc)) {
qmap_obj <- zoom_it(qmap_obj, loc, 1, pan = TRUE)
loc <- locator(1)
}
} else {
obj <- paste(substitute(qmap_obj))
qmap_obj <- zoom_it(qmap_obj, loc, 1, pan = TRUE)
}
assign(obj, qmap_obj, envir = parent.frame())
}
|
cad7982db6f054ce298cb3af48937ba430069d9f
|
604687e6abdbc28a02ca74a8e78cebeeef022689
|
/ElectreI/R/ElectreI.R
|
13cf1a4581bedd2be593fb089a0fec8f2c51860b
|
[] |
no_license
|
MrSpejn/MCDA-R-modules
|
3c0577e013572734b0c92e575b72ea7af8dc7e78
|
099505805250310b190baf16fe19efed64639be8
|
refs/heads/master
| 2020-04-09T04:33:54.620340
| 2019-01-18T17:53:53
| 2019-01-18T17:53:53
| 160,028,445
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 809
|
r
|
ElectreI.R
|
ElectreI <- function(performanceMatrix, criteriaWeights, directions, indifferenceThresholds, preferenceTreshholds, vetoThresholds) {
concorndanceCoefficients <- calculateConcordanceCoefficients(
calculatePartialConcordanceCoefficients(
performanceMatrix,
directions,
indifferenceThresholds,
preferenceTreshholds
),
criteriaWeights,
)
vetoMatrix <- calculateVetoMatrix(
performanceMatrix,
directions,
vetoThresholds
)
preferenceGraph <- concorndanceCoefficients > lambda & !vetoMatrix
acyclicPreferenceGraph <- remove_cycles(preferenceGraph)
kernel <- find_kernel(acyclicPreferenceGraph)
return(list(
kernel=kernel,
graph=acyclicPreferenceGraph,
))
}
|
035f1c5af975f084d3a052b89259de6a6295d3e8
|
7be984ab2843a06b4e0b9fd8a09dce8d2a00cfba
|
/run_analysis.R
|
3f830d53dc9f604b73ad7137c9d9f4cfa819f6d7
|
[] |
no_license
|
opticalcloaking/GetCleanData_CourseProject
|
ecf8ce9fb3f3164646da30f5a009b25ebd7548b5
|
f8f1fb78673fc920bd4199cd015351e8b6bba8b7
|
refs/heads/master
| 2020-05-17T08:27:03.242172
| 2015-07-26T20:02:05
| 2015-07-26T20:02:05
| 39,738,229
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,390
|
r
|
run_analysis.R
|
# read in features names
features <- read.csv("./features.txt", header=FALSE, sep=" ",
col.names=c("num", "feature"))
features$feature <- as.character(features$feature)
# identify features that are either a mean or a standard deviation
logicalMeanStd <- (grepl("mean()", features$feature, fixed=TRUE) |
grepl("std()", features$feature, fixed=TRUE))
# read in X training data
colsToRead <- (logicalMeanStd * 2 - 1) * 16
colsNames <- features$feature[logicalMeanStd]
X_train <- read.fwf("./train/X_train.txt", colsToRead, header=FALSE,
col.names=colsNames, buffersize=5)
# read in Y training data
Y_train <- read.csv("./train/y_train.txt", header=FALSE, col.names="activity")
# read in subject training data
subj_train <- read.csv("./train/subject_train.txt", header=FALSE,
col.names="subject")
# combine X and Y and subject training data
train <- cbind(Y_train, subj_train, X_train)
# read in X testing data
X_test <- read.fwf("./test/X_test.txt", colsToRead, header=FALSE,
col.names=colsNames, buffersize=5)
# read in Y testing data
Y_test <- read.csv("./test/y_test.txt", header=FALSE, col.names="activity")
# read in subject testing data
subj_test <- read.csv("./test/subject_test.txt", header=FALSE,
col.names="subject")
# combine X and Y and subject testing data
test <- cbind(Y_test, subj_test, X_test)
# combine training and testing data
df.samsung <- rbind(train, test)
# convert integer activity classifications to factor with descriptive levels
df.samsung$activity <- as.character(df.samsung$activity)
activity_labels <- read.csv("./activity_labels.txt", header=FALSE, sep=" ")
for (activity in 1:6) {
df.samsung$activity[which(df.samsung$activity == as.character(activity))] <-
as.character(activity_labels[activity,2])
}
# remove excess ellipses in column names
names(df.samsung) <- gsub("...", ".", names(df.samsung), fixed=TRUE)
names(df.samsung) <- gsub("..", ".", names(df.samsung), fixed=TRUE)
# treat subject as a character rather than an integer
df.samsung$subject <- as.character(df.samsung$subject)
# compute means of the variables by subject and activity
df.means <- aggregate(. ~ activity + subject, data=df.samsung, FUN=mean)
# output the means data frame
write.table(df.means, file="./tidymeans.txt", row.names=FALSE)
|
7d246e22caa57a0ed9cdba3df173c483c18a38eb
|
fd7113534a112f1cbbaf715087638de7b1916e42
|
/religious texts.R
|
739ef49df7bc2d72f6e753393eb6a72b11f374c8
|
[] |
no_license
|
sabreenaabedin/ds4001
|
9827cd546088fff59e08dd8d16a76d18c56eaf94
|
151f1a49ffaec9325c44a849e2bf513f9a0dcb13
|
refs/heads/master
| 2020-03-23T13:25:12.053988
| 2018-07-19T18:35:14
| 2018-07-19T18:35:14
| 141,616,975
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,845
|
r
|
religious texts.R
|
## load libraries
library(tm)
library(wordcloud)
library(SnowballC)
library(ggplot2)
## Create Corpus
{
dox <- Corpus(DirSource("C:/Users/Sabreena/Dropbox/DS/final"),readerControl = list(language="eng"))
quran <- Corpus(DirSource("C:/Users/Sabreena/Dropbox/DS/quran"),readerControl = list(language="eng"))
bible <- Corpus(DirSource("C:/Users/Sabreena/Dropbox/DS/bible"),readerControl = list(language="eng"))
mormon <- Corpus(DirSource("C:/Users/Sabreena/Dropbox/DS/mormon"),readerControl = list(language="eng"))
buddha <- Corpus(DirSource("C:/Users/Sabreena/Dropbox/DS/buddhism"),readerControl = list(language="eng"))
zorastrian <- Corpus(DirSource("C:/Users/Sabreena/Dropbox/DS/zorastrian"),readerControl = list(language="eng"))
meditation <- Corpus(DirSource("C:/Users/Sabreena/Dropbox/DS/meditation"),readerControl = list(language="eng"))
}
## Cleaning and Preprocessing
{
# all documents
dox <- tm_map(dox,content_transformer(tolower))
dox <- tm_map(dox,stripWhitespace)
dox <- tm_map(dox,removeWords,stopwords('english'))
dox <- tm_map(dox,removePunctuation)
dox <- tm_map(dox,stemDocument)
dox <- tm_map(dox, removeNumbers)
inspect(dox)
#quran
quran <- tm_map(quran,content_transformer(tolower))
quran <- tm_map(quran, PlainTextDocument)
quran <- tm_map(quran,stripWhitespace)
quran <- tm_map(quran,removeWords,stopwords('english'))
quran <- tm_map(quran,removePunctuation)
quran <- tm_map(quran,stemDocument)
quran <- tm_map(quran, removeNumbers)
# bible
bible <- tm_map(bible,content_transformer(tolower))
bible <- tm_map(bible, PlainTextDocument)
bible <- tm_map(bible,stripWhitespace)
bible <- tm_map(bible,removeWords,stopwords('english'))
bible <- tm_map(bible,removePunctuation)
bible <- tm_map(bible,stemDocument)
bible <- tm_map(bible, removeNumbers)
# mormon
mormon <- tm_map(mormon,content_transformer(tolower))
mormon <- tm_map(mormon, PlainTextDocument)
mormon <- tm_map(mormon,stripWhitespace)
mormon <- tm_map(mormon,removeWords,stopwords('english'))
mormon <- tm_map(mormon,removePunctuation)
mormon <- tm_map(mormon,stemDocument)
mormon <- tm_map(mormon, removeNumbers)
# buddha
buddha <- tm_map(buddha,content_transformer(tolower))
buddha <- tm_map(buddha, PlainTextDocument)
buddha <- tm_map(buddha,stripWhitespace)
buddha <- tm_map(buddha,removeWords,stopwords('english'))
buddha <- tm_map(buddha,removePunctuation)
buddha <- tm_map(buddha,stemDocument)
buddha <- tm_map(buddha, removeNumbers)
# zorastrian
zorastrian <- tm_map(zorastrian,content_transformer(tolower))
zorastrian <- tm_map(zorastrian, PlainTextDocument)
zorastrian <- tm_map(zorastrian,stripWhitespace)
zorastrian <- tm_map(zorastrian,removeWords,stopwords('english'))
zorastrian <- tm_map(zorastrian,removePunctuation)
zorastrian <- tm_map(zorastrian,stemDocument)
zorastrian <- tm_map(zorastrian, removeNumbers)
# meditation
meditation <- tm_map(meditation,content_transformer(tolower))
meditation <- tm_map(meditation, PlainTextDocument)
meditation <- tm_map(meditation,stripWhitespace)
meditation <- tm_map(meditation,removeWords,stopwords('english'))
meditation <- tm_map(meditation,removePunctuation)
meditation <- tm_map(meditation,stemDocument)
meditation <- tm_map(meditation, removeNumbers)
}
dtm <- DocumentTermMatrix(dox)
tdm <- TermDocumentMatrix(dox)
inspect(dtm[1:5, 1:5])
tdm.common <- removeSparseTerms(tdm, .1)
dtm.common <-removeSparseTerms(dtm, 0.6)
inspect(dtm.common)
findAssocs(dtm, "god", 0.99)
findAssocs(dtm, "data", corlimit=0.6)
#dtm1 <- DocumentTermMatrix(dox[2:3])
## Word Clouds
{
# all terms
wordcloud(dox,scale=c(5,0.5),max.words=80, random.order = FALSE, rot.per = .25, colors = RColorBrewer::brewer.pal(8,"Dark2"))
# quran
wordcloud(quran,scale=c(5,0.5),max.words=80, random.order = FALSE, rot.per = .25, colors = RColorBrewer::brewer.pal(8,"Dark2"))
# bible
wordcloud(bible,scale=c(5,0.5),max.words=80, random.order = FALSE, rot.per = .25, colors = RColorBrewer::brewer.pal(8,"Dark2"))
# mormon
wordcloud(mormon,scale=c(5,0.5),max.words=80, random.order = FALSE, rot.per = .25, colors = RColorBrewer::brewer.pal(8,"Dark2"))
# buddha
wordcloud(buddha,scale=c(5,0.5),max.words=80, random.order = FALSE, rot.per = .25, colors = RColorBrewer::brewer.pal(8,"Dark2"))
# zorastrian
wordcloud(zorastrian,scale=c(5,0.5),max.words=80, random.order = FALSE, rot.per = .25, colors = RColorBrewer::brewer.pal(8,"Dark2"))
# meditation
wordcloud(meditation,scale=c(5,0.5),max.words=80, random.order = FALSE, rot.per = .25, colors = RColorBrewer::brewer.pal(8,"Dark2"))
}
## Word Count
{
inspect(dtm[1:3,1:3]) #verify that sum of the rows would give total number of words
rowTotals <- apply(dtm, 1, sum)
View(rowTotals)
barplot(rowTotals, main="Terms per Document", xlab = "Word Count",
ylab="Document", col="darksalmon", horiz = TRUE,
names.arg=c("Mormon", "Bible", "Quran", "Buddh.", "Zend.", "Med."))
}
## Cluster Dendrograms
{
findFreqTerms(dtm,100)
freq <- colSums(as.matrix(dtm))
ord <- order(freq,decreasing=FALSE) # terms in ascending order
fterms <- freq[tail(ord, 20)] # grab last 20 terms
tail(fterms) # verify
my.df <- as.data.frame(fterms)
my.df.scale <- scale(my.df) #normalize
d <- dist(my.df.scale,method="euclidean") #find euclidean distance
fit <- hclust(d, method="ward.D")
plot(fit, col = "indianred4", xlab = "Terms") #plot
## k means clustering
m <- as.matrix(dtm)
d <- dist(m)
groups <- hclust(d, method="ward.D")
plot(groups, hang = -1)
rect.hclust(groups,2)
rect.hclust(groups, 4) # k = 4
# dtmq <- DocumentTermMatrix(quran)
# m <- as.matrix(dtmq)
# d <- dist(m)
# groups <- hclust(d, method="ward.D")
# plot(groups, hang = -1)
# rect.hclust(groups,2)
# rect.hclust(groups, 4) # k = 4
# dtmb <- DocumentTermMatrix(bible)
# m <- as.matrix(dtmb)
# d <- dist(m)
# groups <- hclust(d, method="ward.D")
# plot(groups, hang = -1)
# rect.hclust(groups,2)
# rect.hclust(groups, 4) # k = 4
#
# dtmbud <- DocumentTermMatrix(buddha)
# m <- as.matrix(dtmbud)
# d <- dist(m)
# groups <- hclust(d, method="ward.D")
# plot(groups, hang = -1)
# rect.hclust(groups,2)
# rect.hclust(groups, 4) # k = 4
# dtmzor <- DocumentTermMatrix(zorastrian)
# m <- as.matrix(dtmzor)
# d <- dist(m)
# groups <- hclust(d, method="ward.D")
# plot(groups, hang = -1)
# rect.hclust(groups,2)
# rect.hclust(groups, 4) # k = 4
}
## Sentiment Analysis
{
positives= readLines("C:/Users/Sabreena/Dropbox/DS/text_mining/positive_words.txt")
negatives= readLines("C:/Users/Sabreena/Dropbox/DS/text_mining/negative_words.txt")
score.sentiment = function(sentences, pos.words, neg.words)
{
require(plyr)
require(stringr)
scores = laply(sentences, function(sentence, pos.words, neg.words) {
sentence = gsub('[[:punct:]]', '', sentence)
sentence = gsub('[[:cntrl:]]', '', sentence)
sentence = gsub('\\d+', '', sentence)
sentence = tolower(sentence)
word.list = str_split(sentence, '\\s+')
words = unlist(word.list)
pos.matches = match(words, pos.words)
neg.matches = match(words, neg.words)
pos.matches = !is.na(pos.matches)
neg.matches = !is.na(neg.matches)
score = sum(pos.matches) - sum(neg.matches)
return(score)
}, pos.words, neg.words)
scores.df = data.frame(score=scores, text=sentences)
return(scores.df)
}
# create data frame to track scores
SentimentScores <- as.data.frame(c("Mormon", "Bible", "Quran", "Buddh.", "Zend.", "Med."))
SentimentScores[c("scores")] <- NA
View(SentimentScores)
# Book of Mormon
Mormon <- readLines("C:/Users/Sabreena/Dropbox/DS/final/1-Book-of-Mormon-Mormonism.txt")
Score <- score.sentiment(Mormon,positives,negatives)
hist(Score$score,xlab="Sentiment Score ",main="Mormon Sentiment",
border="black",col="darkseagreen")
SentimentScores[1,2] <- sum(Score$score)
# Bible
Bible <- readLines("C:/Users/Sabreena/Dropbox/DS/final/2-King-James-Bible-Christianity.txt")
Score <- score.sentiment(Bible,positives,negatives)
hist(Score$score,xlab="Sentiment Score ",main="Bible Sentiment",
border="black",col="darkseagreen")
SentimentScores[2,2] <- sum(Score$score)
# Quran
Quran <- readLines("C:/Users/Sabreena/Dropbox/DS/final/3-Quran-Islam.txt")
Score <- score.sentiment(Quran,positives,negatives)
hist(Score$score,xlab="Sentiment Score ",main="Quran Sentiment",
border="black",col="darkseagreen")
SentimentScores[3,2] <- sum(Score$score)
# Buddhism
Buddh <- readLines("C:/Users/Sabreena/Dropbox/DS/final/4-Gospel-of-Budda-Buddhism.txt")
Score <- score.sentiment(Buddh,positives,negatives)
hist(Score$score,xlab="Sentiment Score ",main="Buddha Sentiment",
border="black",col="darkseagreen")
SentimentScores[4,2] <- sum(Score$score)
# Zend Avesta
Zend <- readLines("C:/Users/Sabreena/Dropbox/DS/final/5-Zend-Avesta-Zorastrianism-NEW.txt")
Score <- score.sentiment(Zend,positives,negatives)
hist(Score$score,xlab="Sentiment Score ",main="Zend Sentiment",
border="black",col="darkseagreen")
SentimentScores[5,2] <- sum(Score$score)
#Meditations
Med <- readLines("C:/Users/Sabreena/Dropbox/DS/final/6-Meditations.txt")
Score <- score.sentiment(Med,positives,negatives)
hist(Score$score,xlab="Sentiment Score ",main="Meditation Sentiment",
border="black",col="darkseagreen")
SentimentScores[6,2] <- sum(Score$score)
View(SentimentScores)
plot(SentimentScores, horiz = TRUE, col = "magenta")
# divide by the word count
SentimentScores[1,2] <- SentimentScores[1,2]/126447
SentimentScores[2,2] <- SentimentScores[2,2]/373701
SentimentScores[3,2] <- SentimentScores[3,2]/93121
SentimentScores[4,2] <- SentimentScores[4,2]/45157
SentimentScores[5,2] <- SentimentScores[5,2]/91421
SentimentScores[6,2] <- SentimentScores[6,2]/35283
View(SentimentScores)
plot(SentimentScores)
}
## frequency histogram - attempted
{
# wf=data.frame(term=names(freq),occurrences=freq)
# View(wf)
# p <- ggplot(subset(wf, freq>3000), aes(term, occurrences))
# p <- p + theme(axis.text.x=element_text(angle=45, hjust=1))
# p
}
## violence in religious texts
# timothyrenner.github.io
{
# violentwords <- data.frame(c("wound","hurt","fight","violate","destroy",
# "slaughter", "murder", "kill", "attack", "break",
# "crush", "provoke", "anger", "hatred"))
# colnames(violentwords) <- "words"
# View(violentwords)
findFrequency = function(text)
{
## attempted iterative approach at first
# violentfrequency <- 0
# for(i in 1:nrow(data.frame)){
# n <- length(grep(text, data.frame[i]))
# violentfrequency <- violentfrequency + n
# }
## ended up hard coding the violent words
violentfrequency <- 0
violentfrequency <- length(grep("wound", text)) + length(grep("hurt", text)) +
length(grep("fight", text)) + length(grep("violate", text)) + length(grep("destroy", text)) +
length(grep("slaughter", text)) + length(grep("murder", text)) + length(grep("kill", text))
+ length(grep("attack", text)) + length(grep("break", text)) + length(grep("crush", text))
+ length(grep("provoke", text)) + length(grep("anger", text)) + length(grep("hatred", text))
return(violentfrequency)
}
findFrequency(Quran)
#check
length(grep("wound", Quran))
length(grep("hurt", Quran))
# create data frame to track scores
violence <- as.data.frame(c("Mormon", "Bible", "Quran", "Buddh.", "Zend.", "Med."))
violence[c("scores")] <- NA
View(violence)
#calculate scores
violence[1,2] <- findFrequency(Mormon)
violence[2,2] <- findFrequency(Bible)
violence[3,2] <- findFrequency(Quran)
violence[4,2] <- findFrequency(Buddh)
violence[5,2] <- findFrequency(Zend)
violence[6,2] <- findFrequency(Med)
View(violence)
plot(violence, xlab = "text")
# divide by the word count
violence[1,2] <- violence[1,2]/126447
violence[2,2] <- violence[2,2]/373701
violence[3,2] <- violence[3,2]/93121
violence[4,2] <- violence[4,2]/45157
violence[5,2] <- violence[5,2]/91421
violence[6,2] <- violence[6,2]/35283
View(violence)
plot(violence, xlab = "text")
}
## graph viz - required RGraphViz which uninstalled all my other libraries
{
# plot(tdm,
# terms = sample(fterms, 10),
# corThreshold = 0.7,
# weighting = FALSE,
# attrs = list(graph = list(rankdir = "BT"),
# node = list(shape = "rectangle",
# fixedsize = FALSE))
# )
}
## log
{
# dtm.dense <- as.matrix(dtm)
# dim(dtm.dense)
# library(reshape2)
# dtm.dense = melt(dtm.dense, value.name = "count")
#
# ggplot(dtm.dense, aes(x = Docs, y = Terms, fill = log10(count))) +
# geom_tile(colour = "white") +
# scale_fill_gradient(high="#FF0000" , low="#FFFFFF")+
# ylab("") +
# theme(panel.background = element_blank()) +
# theme(axis.text.x = element_blank(), axis.ticks.x = element_blank())
#
# ggplot(dtm.dense, aes(x = Docs, y = Terms))
}
# library(cluster)
# d <- dist(t(dtm), method="euclidian")
# fit <- hclust(d=d, method="ward")
# fit
# plot(fit,hang = -1)
#
# library(fpc)
# dtms <- removeSparseTerms(dtm, 0.60) # Prepare the data (max 15% empty space)
# d <- dist(t(dtms), method="euclidian")
# kfit <- kmeans(d, 4) # 2 groups
# clusplot(as.matrix(d), kfit$cluster, color=T, shade=T, labels=2, lines=0)
|
50f5f3ab55e180135d8fcae636d381c940adb63b
|
f8ddb60bbd550d0c18104e09ee62d5ac0e295d5b
|
/man/heightToPeak.Rd
|
5434432c2f8de23b083ce46f913717238ad3abbc
|
[] |
no_license
|
OskarHansson/strvalidator
|
1a52ae233b1458b01c020d6089b877a0c978b271
|
734d17dda259271ee180bd876e80152e208085f9
|
refs/heads/master
| 2023-07-19T01:53:57.239456
| 2023-07-16T13:25:45
| 2023-07-16T13:25:45
| 8,653,176
| 5
| 3
| null | 2020-05-04T06:35:10
| 2013-03-08T15:05:30
|
R
|
UTF-8
|
R
| false
| true
| 845
|
rd
|
heightToPeak.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/heightToPeak.r
\name{heightToPeak}
\alias{heightToPeak}
\title{Height To Peak.}
\usage{
heightToPeak(data, width = 1, keep.na = TRUE, debug = FALSE)
}
\arguments{
\item{data}{data frame containing at least columns 'Height' and 'Size'.}
\item{width}{numeric specifying the width of the peak in bp.}
\item{keep.na}{logical. TRUE to keep empty markers.}
\item{debug}{logical. TRUE prints debug information.}
}
\value{
data.frame with new values.
}
\description{
Helper function to convert a peak into a plotable polygon.
}
\details{
Converts a single height and size value to a plotable 0-height-0 triangle/peak value.
Makes 3 data points from each peak size for plotting a polygon representing a peak.
Factors in other columns might get converted to factor level.
}
|
3012631bd716cc775c766f80693e640dfbc6c2ff
|
02731a4437feddd63aff72dac76679be1c4c1e6a
|
/vif.R
|
a96e1f2a7149825e4a88ac87cadb8ac3487a4e0d
|
[
"MIT"
] |
permissive
|
rnaimehaom/ToxicityModel
|
6738665974ca7dcefec1db92882a7a13a894089d
|
d194e6c5ddf3c981171296dda4d5972bbd45199f
|
refs/heads/master
| 2023-04-27T20:25:29.194101
| 2021-05-15T10:07:56
| 2021-05-15T10:07:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 202
|
r
|
vif.R
|
vif <-function(x,df) {
y = names(df)
y_new = y[!(y %in% x)]
eqn = paste(y_new, collapse = '+')
form = paste(x,"~", eqn)
vif = 1/(1-summary(lm(as.formula(form), data = df))$r.squared)
vif
}
|
49800e0ac23cbcb94ff366271de372ae36d16b34
|
3e5d8d362b3367e4ff0e152b0242b7a285d8484f
|
/man/resamp_area_Param.Rd
|
c7bc2aae9b70a95f62f99e5727b854d2e0d6df5f
|
[] |
no_license
|
mandymejia/ciftiTools
|
d591a6e8732dd9df17dd62d959a7a808eee16bef
|
7becc99a6301c47541c883739f7fb2f0f3413e60
|
refs/heads/master
| 2023-08-17T06:07:35.229385
| 2023-01-23T20:00:17
| 2023-01-23T20:00:17
| 241,136,369
| 30
| 10
| null | 2023-08-21T21:47:22
| 2020-02-17T15:06:01
|
HTML
|
UTF-8
|
R
| false
| true
| 773
|
rd
|
resamp_area_Param.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rox_args_docs.R
\name{resamp_area_Param}
\alias{resamp_area_Param}
\title{resamp_area_Param}
\arguments{
\item{areaL_original_fname, areaR_original_fname}{File paths to the surfaces
to use for vertex area correction during adaptive resampling. (Only used if
resampling with the adaptive method.) \code{area[L/R]_original_fname} should
match the current resolution of the data.
The Workbench command for adaptive resampling requires the target surfaces
for area correction too, but to make the workflow easier \code{ciftiTools}
will resample \code{area[L/R]_original_fname} with the barycentric method
and use that for the target surface.}
}
\description{
resamp_area_Param
}
\keyword{internal}
|
df852795c6a1fb9adbe6e76066b83198985997cc
|
e6549edacf38351730ca91ead2456d50ba20f1cd
|
/man/InvBasis.wst.rd
|
ccbdfc4de068a89e2aafa9b8652eba5c0b147d6f
|
[] |
no_license
|
cran/wavethresh
|
96f92574f59f62f77b9b5fe5c318e27011de585c
|
433dac8d2b5f3bf806530a29b5fe022fd2fe9087
|
refs/heads/master
| 2022-11-29T22:37:39.292801
| 2022-11-16T14:20:02
| 2022-11-16T14:20:02
| 17,700,852
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,913
|
rd
|
InvBasis.wst.rd
|
\name{InvBasis.wst}
\alias{InvBasis.wst}
\title{Invert a wst library representation with a basis specification}
\usage{
\method{InvBasis}{wst}(wst, nv, \dots)
}
\arguments{
\item{wst}{The wst object that you wish to invert}
\item{nv}{The node vector, basis spec, that you want to pick out}
\item{...}{Other arguments, that don't do anything here}
}
\description{
Inverts a wst basis representation with a given basis specification,
for example an output from the \code{\link{MaNoVe}} function.
}
\details{
Objects arising from a \code{\link{wst.object}} specification
are a representation of a signal with respect to a library
of basis functions. A particular basis specification can be obtained
using the \code{\link{numtonv}} function which can pick an indexed
basis function, or \code{\link{MaNoVe.wst}} which uses the
Coifman-Wickerhauser minimum entropy method to select a basis.
This function takes a \code{\link{wst.object}} and
a particular basis description (in a \code{\link{nv.object}} node vector
object) and inverts the representation with respect to that selected basis.
}
\value{
The inverted reconstruction
}
\seealso{\code{\link{numtonv}},\code{\link{nv.object}},\code{\link{MaNoVe.wst}},\code{\link{threshold.wst}},\code{\link{wst}}}
\examples{
#
# Let's generate a noisy signal
#
x <- example.1()$y + rnorm(512, sd=0.2)
#
# You can plot this if you like
#
\dontrun{ts.plot(x)}
#
# Now take the nondecimated wavelet transform
#
xwst <- wst(x)
#
# Threshold it
#
xwstT <- threshold(xwst)
#
# You can plot this too if you like
#
\dontrun{plot(xwstT)}
#
# Now use Coifman-Wickerhauser to get a "good" basis
#
xwstTNV <- MaNoVe(xwstT)
#
# Now invert the thresholded wst using this basis specification
#
xTwr <- InvBasis(xwstT, xwstTNV)
#
# And plot the result, and superimpose the truth in dotted
#
\dontrun{ts.plot(xTwr)}
\dontrun{lines(example.1()$y, lty=2)}
}
\author{G P Nason}
\keyword{smooth}
|
f26790dfb9fe59e99eee512b66ab3c72a0c96bbc
|
0419c49a00967c2eae4c577a9fac79e7464b675b
|
/commonDEGs_test.R
|
9b791789d7ed70b2950d746b23ed43f8f4b40ab3
|
[] |
no_license
|
zerland/PhD_Code
|
1a6348f89e98da387dffd5fdfd9c2a104d116213
|
51d702adf900117d64a8f250879820e6d89e91de
|
refs/heads/master
| 2023-03-18T18:59:56.892139
| 2019-03-08T11:50:15
| 2019-03-08T11:50:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,658
|
r
|
commonDEGs_test.R
|
setwd("/users/clairegreen/Documents/PhD/TDP-43/TDP-43_Code/Results/GeneExpression/noMedian/")
C9 <- read.csv("C9_unique.csv")
C9 <- C9[order(C9$P.Value),]
CH <- read.csv("CH_unique.csv")
CH <- CH[order(CH$P.Value),]
sals <- read.csv("sals_unique.csv")
sals <- sals[order(sals$P.Value),]
ftld <- read.csv("ftld_unique.csv")
ftld <- ftld[order(ftld$P.Value),]
vcp <- read.csv("vcp_unique.csv")
vcp <- vcp[order(vcp$P.Value),]
setwd("/users/clairegreen/Documents/PhD/TDP-43/TDP-43_Code/Results/GeneExpression/TDP-43_DEseq2/")
pet <- read.csv("PET_results_keepfiltering.csv")
pet <- pet[!duplicated(pet$hgnc_symbol),]
rav <- read.csv("RAV_results_keepfiltering.csv")
rav <- rav[!duplicated(rav$hgnc_symbol),]
## extract gene lists
c9_gene <- C9$Gene.Symbol
ch_gene <- CH$Gene.Symbol
sals_gene <- sals$Gene.Symbol
ftld_gene <- ftld$Gene.Symbol
vcp_gene <- vcp$Gene.Symbol
pet_gene <- pet$hgnc_symbol
rav_gene <- rav$hgnc_symbol
# num_overlap <- matrix(data=NA)
List <- list()
for (i in 1:6500){
C9_int <- c9_gene[1:i]
CH_int <- ch_gene[1:i]
sals_int <- sals_gene[1:i]
ftld_int <- ftld_gene[1:i]
vcp_int <- vcp_gene[1:i]
pet_int <- pet_gene[1:i]
rav_int <- rav_gene[1:i]
List[[i]] <- Reduce(intersect, list(C9_int, CH_int, sals_int, ftld_int, vcp_int, pet_int, rav_int))
}
output_6500 <- plyr::ldply(List, rbind)
write.csv(output_6500, "intersectnomedian_6000.csv")
write.csv(List, "list.csv",quote = FALSE, row.names = FALSE)
List[6500]
turnwhich(List == "165")
List[5877]
x <- as.vector(List[6500])
cat(x, sep = "\n")
write.csv(List, "List.csv")
write.table(x, "6500_list.txt", quote = FALSE, col.names = FALSE, row.names = FALSE)
|
ec70381fc1754ac7eaca2f87bfa62dad0c3f8bd2
|
3d87b099f242e6fb9d8b594537ee7c972691de42
|
/run_analysis.R
|
b9a024a3a866b523df5137f0a8b8da1d83429116
|
[] |
no_license
|
fsmunoz/cleaningdatacoursera
|
53dc21de08af119ef75b293bd3bdcde9a9a41268
|
a507e171121eab4469c53577b86aa98c39aa775d
|
refs/heads/master
| 2021-01-22T22:35:45.173122
| 2014-07-25T07:47:36
| 2014-07-25T07:47:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,247
|
r
|
run_analysis.R
|
### Coursera - Getting and Cleaning Data
### Course Project
###
### 2014, Frederico Munoz <fsmunoz@gmail.com>
###
### Here are the data for the project:
### https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip
### You should create one R script called run_analysis.R that does the following.
### - Merges the training and the test sets to create one data set.
### - Extracts only the measurements on the mean and standard deviation for each measurement.
### - Uses descriptive activity names to name the activities in the data set
### - Appropriately labels the data set with descriptive activity names.
### - Creates a second, independent tidy data set with the average of each variable for each activity and each subject.
###
### This file is thoroughly commented (excessively so) given the
### pedagogic nature of it. All output is written as CSV files since
### it is easier to inspect and work with from multiplace
### applications.
library(reshape2) # used for melt/dcast
### Global variables use throughout
fileUrl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
datasetFile <- "UCI HAR Dataset.zip"
datasetDir <- "UCI HAR Dataset"
### Check for zipped dataset file: if it doesn't exist then download it
if (!file.exists(datasetFile) && !file.exists(datasetDir)) {
download.file(fileUrl, destfile=datasetFile, method="curl")
} else {
print("File already exists, skipping download")
}
### Check for dataset directory, and unzip file if it doesn't exist
if(!file.exists(datasetDir)) {
unzip(zipfile = datasetFile)
} else {
print("Dataset directory already exists, skipping unzip")
}
### Import datasets and merge them
X.traindata <- read.table("./UCI HAR Dataset/train/X_train.txt")
X.testdata <- read.table("./UCI HAR Dataset/test/X_test.txt")
X.data <- rbind(X.testdata, X.traindata) # merge
Y.traindata <- read.table("./UCI HAR Dataset/train/y_train.txt")
Y.testdata <- read.table("./UCI HAR Dataset/test/y_test.txt")
Y.data <- rbind(Y.testdata, Y.traindata) # merge
### Read and merge subjects
sub.train <- read.table("UCI HAR Dataset/train/subject_train.txt")
sub.test <- read.table("UCI HAR Dataset/test/subject_test.txt")
sub.data <- rbind(sub.train, sub.test) # merge
### Import the features and activities
features <- read.table("./UCI HAR Dataset/features.txt")
activities <- read.table("./UCI HAR Dataset/activity_labels.txt")
## This transforms WALKING_UPSTAIRS > "WALKING_UPSTAIRS" >
## "walking_upstairs" > "walkingupstairs"
activities[, 2] = gsub("_", "", tolower(as.character(activities[, 2])))
### Add labels; this will change Y.data:
###
### V1 > activity > activity
### 1 5 > 1 5 > 1 standing
### 2 5 > 2 5 > 2 standing
### 3 5 > 3 5 > 3 standing
###
names(Y.data) <- "activity" # changes col name from "V1" to something meaningful
Y.data [,1] = activities[Y.data[,1], 2] # this replaces "1" with "walking", etc.
### Extract mean and SD only; there are plenty of measurements but the
### mean and SD always end in "-mean()" and "-std()", e.g.:
### > features
### [...]
### 503 503 fBodyAccMag-mean() <--- the mean
### 504 504 fBodyAccMag-std() <--- the SD
### 505 505 fBodyAccMag-mad()
### 506 506 fBodyAccMag-max()
###
### NB: there is a decision here: mean and SD are taken from fields
### which end in -mean() and -std(); this is important since there are
### other fields which include words like "Avg" in their name and that
### could be also included. I have opted to keep them out and use only
### the ones that follow the rule above since these are the ones which
### are explicitly marked as means and SDs.
###
### Use grep to match for these fields and then store them into a new
### variable
meanSD.features <- grep("-mean\\(\\)|-std\\(\\)", features[, 2]) # match
X.data.meanSD<-X.data[,meanSD.features] # store
## same approach as before but deleting the "()"
names(X.data.meanSD)<-tolower(gsub("\\(|\\)", "", features[meanSD.features, 2]))
## After this last step we have 66 variable, all of them related to
## mean and SD, in our new variable:
## > str(X.data.meanSD)
## 'data.frame': 10299 obs. of 66 variables:
## $ tbodyacc-mean-x : num 0.257 0.286 0.275 0.27 0.275 ...
## $ tbodyacc-mean-y : num -0.0233 -0.0132 -0.0261 -0.0326 -0.0278 ...
## $ tbodyacc-mean-z : num -0.0147 -0.1191 -0.1182 -0.1175 -0.1295 ...
## $ tbodyacc-std-x : num -0.938 -0.975 -0.994 -0.995 -0.994 ...
## $ tbodyacc-std-y : num -0.92 -0.967 -0.97 -0.973 -0.967 ...
## $ tbodyacc-std-z : num -0.668 -0.945 -0.963 -0.967 -0.978 ...
## $ tgravityacc-mean-x : num 0.936 0.927 0.93 0.929 0.927 ...
## [...]
### Final arragements, merging and saving of dataset
###
### tmp.dataset will be a "long" table, with all the mean values in
### X.data.meanSD prepended with one column indicating the activity
### and another one the subject:
###
### |-------------- from X.data.meanSD -----------|
### subject activity tbodyacc-mean-x tbodyacc-mean-y tbodyacc-mean-z
### 1 1 standing 0.2571778 -0.02328523 -0.01465376
### [...]
###
names(sub.data) <- "subject" # label the column
tmp.dataset <- cbind(sub.data, Y.data, X.data.meanSD) # bind all the columns
write.csv(tmp.dataset, "temp_data.csv", row.names = FALSE) # write it to disk, as CSV file.
### Creates a second, independent tidy data set with the average of
### each variable for each activity and each subject.
###
### This will be done using melt/cast, thus reshaping the data: first
### by melting it (and making it into a "long-format" table) using
### activity and subject as ids, then by recasting it but this time
### calculating the average of each variable by the same ids.
## Melt the data using subject and activity as id:
melted.data <- melt(tmp.dataset, id=c("subject","activity"))
## This produces a long format table:
##
## > head(melted.data)
## subject activity variable value
## 1 1 standing tbodyacc-mean-x 0.2571778
## 2 1 standing tbodyacc-mean-x 0.2860267
## 3 1 standing tbodyacc-mean-x 0.2754848
##
## ... which has less columns but more rows:
## > str(tmp.dataset)
## 'data.frame': 10299 obs. of 68 variables:
## [...]
## > str(melted.data)
## 'data.frame': 679734 obs. of 4 variables:
## [...]
## Cast the melted table, but calculating the mean of the "variable"
## column (which contains all the variable names as factors) and
final.data <- dcast(melted.data, formula = subject + activity ~ variable, mean)
## Some real magic using melt/dcast of the reshape2 library, which
## creates the final data. melt takes wide-data and melts it into
## long-format data, and castthe opposite (dcast is for data frames)
melted.data <- melt(tmp.dataset, id=c("subject","activity"))
final.data <- dcast(melted.data, formula = subject + activity ~ variable, mean)
write.csv(final.data, "final_data.csv", row.names = FALSE) # this is the final output, as a CSV file
## The final, tidy dataset has 180 observations and 68 variables; the
## numering variables are now the mean value for each one of them for
## each combination of subject + activity; all columns are named and
## all factors are explicitly labelled.
##
## str(final.data)
## 'data.frame': 180 obs. of 68 variables:
## $ subject : int 1 1 1 1 1 1 2 2 2 2 ...
## $ activity : chr "laying" "sitting" "standing" "walking" ...
## $ tbodyacc-mean-x : num 0.281 0.276 0.278 0.276 0.278 ...
## $ tbodyacc-mean-y : num -0.0182 -0.0131 -0.0173 -0.0186 -0.0227 ...
## [...]
##
## The dataset is wide (again) and so the excerpt below is clipped horizontally as well:
##
## head(final.data)
## subject activity tbodyacc-mean-x tbodyacc-mean-y tbodyacc-mean-z
## 1 1 laying 0.2813734 -0.01815874 -0.1072456
## 2 1 sitting 0.2759908 -0.01305597 -0.1098725
## 3 1 standing 0.2776850 -0.01732705 -0.1035844
## [...]
## EOF
|
4ca08bc815f6c6557c66cd75f698ad1e78dcb560
|
118bc327b85a3ac1b40649dd4559d5f75b913a43
|
/man/gcDist.Rd
|
4102bbcefbd4e12da66b90da71ada52e9483fa7e
|
[
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] |
permissive
|
bobverity/bobFunctions
|
2c613da2db8a3c2eaaffab0a8a5df170240891d4
|
3bce3fd92e4c30710413b02ea338ad1d987e5782
|
refs/heads/master
| 2021-01-17T12:36:57.566104
| 2018-10-05T10:57:10
| 2018-10-05T10:57:10
| 59,672,304
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 566
|
rd
|
gcDist.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/main.R
\name{gcDist}
\alias{gcDist}
\title{great circle distance}
\usage{
gcDist(origin_lat, origin_lon, dest_lat, dest_lon)
}
\arguments{
\item{origin_lat}{scalar latitude of origin in degrees}
\item{origin_lon}{scalar longitude of origin in degrees}
\item{origin_lat}{vector latitude of destination in degrees}
\item{origin_lon}{vector longitude of destination in degrees}
}
\description{
Calculates great circle distance (km) between an origin and one or more destination points.
}
|
588f4ea36067e1ae5323c0474b6f318fbc0e13fd
|
c9f3369c749e5a3cfebaa96dc1b484e72a3dd7d0
|
/R/nomle.R
|
bcde1b5033f450711357cc84541ab6ae3c020bdf
|
[] |
no_license
|
amoloudi/R-PKG-Distributions
|
128ff992a10a0da915f27aa941d2f9d476ad9e9a
|
20daa9657d6833cb7aff2ac9194340504c1f0270
|
refs/heads/master
| 2021-09-02T19:18:04.772144
| 2018-01-03T19:13:48
| 2018-01-03T19:13:48
| 115,752,321
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 272
|
r
|
nomle.R
|
nomle <- function(x){
u = 0
v = 0
n = dim(x)
out <- rep(0, 2)
for(i in 1:n[1]){
u = u + x[i,1]
}
u = u / n[1]
out[1] = u - 1
for(i in 1:n[1]){
v = v + (x[i,1]-u)^2
}
v = v / (n[1]-1)
out[2] = v
return(out)
}
|
03c6efb21e21596f0be8e3f0d8f1608fb4797c83
|
136eb41b20d1869345c052ffea61e43f0b4f4840
|
/man/plot_hazard1.Rd
|
0a2f3e1a52c355b7391359145764813cd78c64b6
|
[] |
no_license
|
andybega/spduration
|
d24964b65492c56242061392405a3872a8467e99
|
6154bfcff9d172a3598d7c73058c1d08f1b65d18
|
refs/heads/master
| 2023-06-21T18:37:24.942725
| 2023-06-21T07:16:28
| 2023-06-21T07:16:28
| 6,479,313
| 4
| 2
| null | null | null | null |
UTF-8
|
R
| false
| true
| 426
|
rd
|
plot_hazard1.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot.spdur.R
\name{plot_hazard1}
\alias{plot_hazard1}
\title{Plot conditional hazard rate}
\usage{
plot_hazard1(x, ...)
}
\arguments{
\item{x}{class "spdur" object}
\item{...}{passed to \code{plot_hazard}}
}
\value{
NULL, plots.
}
\description{
Plot hazard function without simulated confidence intervals. See
\code{\link{plot_hazard}} instead.
}
|
59d2cdf4d96db9a9b39837df613d5278d135f38e
|
f550d59dfeb0e70fe46102e2b09cedaeffe08a90
|
/Advanced-R_exercises/Exercise_page_213.R
|
7e8f62aeb69c8c7d3ea17656291cd753532862b2
|
[] |
no_license
|
cimentadaj/random-stuff
|
f877ed9223583565f0d338850a1d04e79a7a3686
|
2f883a7c5fcb2001d0195a760615049bfb96279a
|
refs/heads/master
| 2021-03-08T16:53:27.168732
| 2018-02-01T10:11:54
| 2018-02-01T10:11:54
| 55,167,526
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,912
|
r
|
Exercise_page_213.R
|
# 1.
vapply(mtcars, sd, numeric(1))
num <- vapply(iris, is.numeric, logical(1))
vapply(iris[num], sd, numeric(1))
# 2
# Because certain objects might have two classes and sapply returns a vector. In a case
# like this one, you will obtain a list instead of a a vector.
df2 <- data.frame(x = 1:10, y = Sys.time() + 1:10)
sapply(df2, class)
# Within a function, this might be problematic because you will be expecting a vector.
vapply(df2, class, character(1))
# More usefully, you get an error pointing to which element has length > 1
# 3.
trials <- replicate(
100,
t.test(rpois(10, 10), rpois(7, 10)),
simplify = FALSE
)
sapply(trials, function(model) model$p.value)
sapply(trials, `[[`, 3)
# 4.
# replicate() repeats an expression N number of times.
# It does so by applying an sapply function n times over the expression provided
# the arguments naturally vary because it only takes the expression and the number
# of times it is repeated.
# In fact, we can replicate `replicate` doing this:
p <- sapply(1:100, function(i) t.test(rpois(10, 10), rpois(10, 10)), simplify = F)
# 5.
# I'm not sure I understand this question entirely but here's an approach seen from here:
# https://github.com/peterhurford/adv-r-book-solutions/blob/master/09_functionals/02_friends_of_lapply/exercise5.r
lapply2 <- function(X, names, FUN, ...) {
stopifnot(length(X) == length(names))
out <- vector("list", length(X))
for (i in seq_along(X)) {
out[[i]] <- FUN(X[[i]], ...)
}
names(out) <- names
out
}
lapply2(list(1:10, 20:20), c("hey", "ho"),mean)
# 6.
map_vapply <- function(..., FUN, FUN.VALUE) {
# Save all inputs
vec_list <- list(...)
# check all inputs are the same length
all_len <- vapply(vec_list, length, numeric(1))
stopifnot(all(all_len[1] == all_len))
# save the class and length of what the output should be
the_class <- mode(FUN.VALUE)
the_length <- length(FUN.VALUE)
# create the list that will contain objects to parallel over. this is
# the same length as the all the arguments in the function
out <- vector("list", all_len[1])
# This list is temporary that and will store
# N arguments at a time. So supposing there are 3 vectors to
# parallel over, this list will have the first three elements of the
# 3 vectors in the first run, and then the second elements of the
# 3 vectors and so on.
empty_out <- vector("list", length(vec_list))
# if there are argument names, set them to the empty list
if (!is.null(names(vec_list))) names(empty_out) <- names(vec_list)
# look through the empty out list (of length of
# any of the ... in the fun; only one!)
for (every_element in seq_along(out)) {
# and then loop through the number of arguments in ...
for (every_list in seq_along(vec_list)) {
# collect the 1st elements on all the arguments in ...
# collect the 2nd elements on all the arguments in ...
empty_out[[every_list]] <- vec_list[[every_list]][[every_element]]
}
# store the first elements in the first slot of out
# etc
out[[every_element]] <- empty_out
}
# out is now a list of the length of any of the arguments in ...
# the first slot contains the first element of all arguments in ...
# the second slot contains the second element of all arguments in ...
# loop through each of the arguments and run the FUN with do.call
# and place the arguments as a list. Check the result is both the same
# class and length as specified
real_out <- lapply(out, function(args_list) {
fresult <- do.call(as.character(quote(FUN)), args_list)
stopifnot(mode(fresult) == the_class)
stopifnot(length(fresult) == the_length)
fresult
})
# return final result
real_out
}
# will dispatch arguments in that some order to rnormr
map_vapply(100, 5, 2, FUN = rnorm, FUN.VALUE = numeric(100))
# named args will be matched and non-named will be put in order
# in the arguments of the FUN
map_vapply(100, n = 5, 2, FUN = rnorm, FUN.VALUE = numeric(5))
# for example, here the only remaining argument is mean and both
# first two args are named so the mean is set to 2.
map_vapply(sd = 100, n = 5, 2, FUN = rnorm, FUN.VALUE = numeric(5))
# can use anonymous function
map_vapply(rnorm(1e03), rnorm(1e03), rnorm(1e03), FUN = function(x, y, z) x + y + z, FUN.VALUE = numeric(1))
# 7
mcsapply <- function(X, FUN, ..., simplify = TRUE, USE.NAMES = TRUE) {
FUN <- match.fun(FUN)
result <- parallel::mclapply(X = X, FUN = FUN, ... = ...)
if (USE.NAMES && is.character(X) && is.null(names(result))) {
names(result) <- X
}
if (!identical(simplify, FALSE) && length(result)) {
simplify2array(result, higher = (simplify == "array"))
} else {
answer
}
}
microbenchmark::microbenchmark(
mc = mcsapply(1:1e6, log),
classic = sapply(1:1e6, log)
)
|
4acdc3896dcd2fbcf38989a9948bba0edb6ab4db
|
51553e75514bbcc59310f612a10b0e9c47f5b210
|
/scripts/ldshrink_ld.R
|
7adccfd70f95caba5a3ca9aa3c3c6449c8dd9161
|
[] |
no_license
|
CreRecombinase/ptb_workflowr
|
b5a63ff13037893c627e96233097332f322d50f2
|
d4c05d1b04888f397370a3172c57a15a2f867250
|
refs/heads/master
| 2020-05-18T15:05:04.640004
| 2020-05-08T18:24:46
| 2020-05-08T18:24:46
| 184,487,007
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,007
|
r
|
ldshrink_ld.R
|
library(dplyr)
library(ldmap)
library(ldshrink)
library(EigenH5)
shrink <- snakemake@params[["shrink"]]
if(is.null(shrink)){
doshrink <- TRUE
}else{
doshrink <- shrink=="shrink"
}
bim_df <- read_plink_bim(snakemake@input[["bimf"]]) %>%
mutate(snp_id = 1:n(),
ldmr = snp_overlap_region(snp_struct, ldetect_EUR),
rsid=rsid2int(rsid))
fam_df <- read_plink_fam(snakemake@input[["famf"]])
N <- nrow(fam_df)
bim_l <- split(bim_df, bim_df$ldmr)
purrr::walk(bim_l, function(df){
gl <- read_plink_bed(snakemake@input[["bedf"]], subset = df$snp_id, N = N)
Xm <- gt2matrix(gl)
if(!doshrink){
R <- stats::cor(Xm, use = "complete.obs")
}else{
R <- ldshrink::ldshrink(Xm, df$map, isGeno = TRUE)
}
ldmr_id <- as.character(unique(df$ldmr))
write_matrix_h5(R, snakemake@output[["h5f"]], paste0(ldmr_id, "/R"))
write_vector_h5(df$snp_id, snakemake@output[["h5f"]], paste0(ldmr_id, "/snp_id"))
write_vector_h5(df$rsid, snakemake@output[["h5f"]], paste0(ldmr_id, "/rsid"))
})
|
d385a6eb2bfcb1e487d86a1fa0df5c9dc094f67e
|
05e321568f7d4f3b5bee75a23d4c5c2378a72022
|
/semtiment analysis.R
|
d75462c81a7779b11f81d6bb34e1de14ca1e0f6a
|
[] |
no_license
|
Wisienkas/Datascience
|
5c154ae75d7d9e0b7cc074d4b963c37458a8a193
|
2f77a6bfadd746de3270c2e2079af4e31c36d406
|
refs/heads/master
| 2016-08-12T09:42:50.763329
| 2016-01-14T07:38:38
| 2016-01-14T07:38:38
| 49,534,793
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,256
|
r
|
semtiment analysis.R
|
# Load data
data <- read.csv2("testdata.manual.2009.06.14.csv", sep = ",", quote = '"')
names(data) <- c("fromid", "toid", "date", "userfrom", "userto", "msg")
# Ectract text msg and write to file
text <- data[, "msg"]
write.csv(text, file = "temp/data.txt")
install.packages("tm")
install.packages("wordcloud")
library("tm")
library("wordcloud")
# Corpus need a dir not a vector apparently :O :/ wtf
lords <- Corpus(DirSource("temp/"))
# Add filters
lords <- tm_map(lords, stripWhitespace)
lords <- tm_map(lords, content_transformer(tolower))
lords <- tm_map(lords, removeWords, stopwords("english"))
lords <- tm_map(lords, stemDocument)
# Plot wordcloud
wordcloud(lords, scale=c(5,0.2), max.words=100, random.order=FALSE,
rot.per=0.35, use.r.layout=FALSE, colors=brewer.pal(8, "Dark2"))
################
# Sentiment analysis
################
library(RCurl)
library(RJSONIO)
library(rjson)
library(stringr)
library(tm)
library(wordcloud)
install.packages('devtools')
require('devtools')
install_github('mananshah99/sentR')
require('sentR')
# Max 1000 requests per day
# So please only do 1 runthrough of the getSentiment loop as it takes 497 requests
# The key is associated to me, but i left it here so you could try to execute it.
# You might also want to execute after the 27th as i did it on this date
apikey = "4a572cc48b46ae91fd40eaa31b878101"
data <- read.csv2("testdata.manual.2009.06.14.csv", sep = ",", quote = '"')
names(data) <- c("fromid", "toid", "date", "userfrom", "userto", "msg")
# Ectract text msg and write to file
tweet_txt <- data[, "msg"]
getSentiment <- function (text, key){
text <- URLencode(text);
#save all the spaces, then get rid of the weird characters that break the API, then convert back the URL-encoded spaces.
text <- str_replace_all(text, "%20", " ");
text <- str_replace_all(text, "%\\d\\d", "");
text <- str_replace_all(text, " ", "%20");
if (str_length(text) > 360){
text <- substr(text, 0, 359);
}
data <- getURL(paste("http://api.datumbox.com/1.0/TwitterSentimentAnalysis.json?api_key=", key, "&text=",text, sep=""))
js <- RJSONIO::fromJSON(data, asText=TRUE);
# get mood probability
sentiment = js$output$result
return(list(sentiment=sentiment))
}
clean.text <- function(some_txt)
{
some_txt = gsub("(RT|via)((?:\\b\\W*@\\w+)+)", "", some_txt)
some_txt = gsub("@\\w+", "", some_txt)
some_txt = gsub("[[:punct:]]", "", some_txt)
some_txt = gsub("[[:digit:]]", "", some_txt)
some_txt = gsub("http\\w+", "", some_txt)
some_txt = gsub("[ \t]{2,}", "", some_txt)
some_txt = gsub("^\\s+|\\s+$", "", some_txt)
some_txt = gsub("amp", "", some_txt)
# define "tolower error handling" function
try.tolower = function(x)
{
y = NA
try_error = tryCatch(tolower(x), error=function(e) e)
if (!inherits(try_error, "error"))
y = tolower(x)
}
some_txt = sapply(some_txt, try.tolower)
some_txt = some_txt[some_txt != ""]
names(some_txt) = NULL
return(some_txt)
}
# clean text
tweet_clean = clean.text(tweet_txt)
tweet_num = length(tweet_clean)
# data frame (text, sentiment)
tweet_df = data.frame(text=tweet_clean, sentiment=rep("", tweet_num),stringsAsFactors=FALSE)
# apply function getSentiment
sentiment = rep(0, tweet_num)
for (i in 1:tweet_num)
{
tmp = getSentiment(tweet_clean[i], apikey)
tweet_df$sentiment[i] = tmp$sentiment
}
# delete rows with no sentiment
tweet_df <- tweet_df[tweet_df$sentiment!="",]
#separate text by sentiment
sents = levels(factor(tweet_df$sentiment))
# get the labels and percents
labels <- lapply(sents, function(x) paste(x,format(round((length((tweet_df[tweet_df$sentiment ==x,])$text)/length(tweet_df$sentiment)*100),2),nsmall=2),"%"))
nemo = length(sents)
emo.docs = rep("", nemo)
for (i in 1:nemo)
{
tmp = tweet_df[tweet_df$sentiment == sents[i],]$text
emo.docs[i] = paste(tmp,collapse=" ")
}
# remove stopwords
emo.docs = removeWords(emo.docs, stopwords("english"))
corpus = Corpus(VectorSource(emo.docs))
tdm = TermDocumentMatrix(corpus)
tdm = as.matrix(tdm)
colnames(tdm) = labels
# comparison word cloud
comparison.cloud(tdm, colors = brewer.pal(nemo, "Dark2"),
scale = c(3,.5), random.order = FALSE, title.size = 1.5)
|
9d0689009c8450a397c66fc8440b5a06410aa413
|
bb8dbb7667c71f5c2376b01412becbf2bee4fc6a
|
/cachematrix.R
|
f968ee05c9bf10c69a8947e7e3056ac7b15c243b
|
[] |
no_license
|
BobCDell/ProgrammingAssignment2
|
96d702947cb0dc946cfffdc2dd6911b1fa848dcc
|
ba45b6362cf3c0a9d82693436a5e7f65fa4bd9c9
|
refs/heads/master
| 2021-01-21T08:01:52.682877
| 2016-04-24T19:16:25
| 2016-04-24T19:16:25
| 56,986,782
| 0
| 0
| null | 2016-04-24T18:11:12
| 2016-04-24T18:11:10
| null |
UTF-8
|
R
| false
| false
| 2,119
|
r
|
cachematrix.R
|
## The functions makeCacheMatrix and cacheSolve create a special "matrix" object (makeCacheMatrix) that can
## cache its inverse which is then retrieved by cacheSolve.
## makeCacheMatrix: This function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(mat=matrix()){
#Description:
# Argument: mat - a square, invertible matrix
# Returns - a list of functions:
# a. setmat - sets the matrix.
# b. getmat - gets the matrix.
# c. setinvrs - sets the inverse matrix.
# d. getinvrs - gets the inverse matrix.
# the above list of functions serves as the argument to the
# accompanying function for this assignment: cacheSolve()
invrs <- NULL
setmat <- function(newmat) {
mat <<- newmat
#assign mat to newmat in a different environment using <<-
invrs <<- NULL
#assign invrs to NULL in a different environment using <<-
}
getmat <- function() mat
setinvrs <- function(inverse) invrs <<- inverse
getinvrs <- function() invrs
list(setmat <- setmat, getmat <- getmat, setinvrs <- setinvrs, getinvrs <- getinvrs)
}
#cacheSolve: This function computes the inverse of the special "matrix" returned by makeCacheMatrix above.
#If the inverse has already been calculated (and the matrix has not changed), then
#cachesolve retrieves the inverse from the cache.
cacheSolve <- function(mat,...) {
#Description:
# Argument: mat a matrix outputted from makeCacheMatrix()
# Return: the inverse of the original matrix used as the
# argument for getCacheMatrix
invrs = mat$getinvrs()
#Search for the inverse to see if it already exists.
if (!is.null(invrs)){
#If it exists, retrieve it from the cache.
message("Searching for cached matrix.")
return(invrs)
}
#If it does not exist, then calculate it.
mat.data <- mat$getmat()
invrs <- solve(mat.data, ...)
#Set the value of the cache inverse matrix
mat$setinvrs(invrs)
return(invrs)
}
|
58dbdd70340186821821d6d631df0f6055ff559c
|
82d5e4e6fdf1969172bafaf4f66c39d8458eba26
|
/man/Deltarho.Rd
|
51a1587a9d8f5d845f1b841a5551907b652f94e3
|
[] |
no_license
|
victormesquita40/DFA
|
b9de8f44354d2efb396586adc97e8bd0253b4816
|
01b2848b3b9be8905dee11795d6ae9b7c81c948d
|
refs/heads/master
| 2023-01-07T11:19:30.393913
| 2020-11-11T18:52:58
| 2020-11-11T18:52:58
| 271,644,851
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,298
|
rd
|
Deltarho.Rd
|
\name{Deltarho}
\alias{Deltarho}
\title{
Delta Amplitude Detrended Cross-Correlation Coefficient (DeltarhoDCCA)
}
\description{
Applies the Detrended Cross-Correlation Coefficient Difference (Deltarho) to nonstationary time series.
}
\usage{
Deltarho(file,file2,file3,file4,scale = 2^(1/8),box_size = 4,m=1)
}
\arguments{
\item{file}{
Univariate time series (must be a vector or data frame)}
\item{file2}{
Univariate time series (must be a vector or data frame)}
\item{file3}{
Univariate time series (must be a vector or data frame)}
\item{file4}{
Univariate time series (must be a vector or data frame)}
\item{scale}{
Specifies the ratio between successive box sizes (by default \code{scale = 2^(1/8)})}
\item{box_size}{
Vector of box sizes (must be used in conjunction with \code{scale = "F"})
}
\item{m}{
An integer of the polynomial order for the detrending (by default \code{m=1}).}
}
\details{
The Deltarho can be computed in a geometric scale or for different choices of boxes sizes.
}
\value{
\item{boxe}{Size \eqn{n} of the overlapping boxes. }
\item{DFA1}{DFA of the first time series (\code{file}).}
\item{DFA2}{DFA of the second time series (\code{file2}).}
\item{DFA3}{DFA of the third time series (\code{file3}).}
\item{DFA4}{DFA of the fourth time series (\code{file4}).}
\item{DCCA}{Detrended Cross-Correlation function between the first time series (\code{file}) and the second time series (\code{file2}).}
\item{DCCA2}{Detrended Cross-Correlation function between the third time series (\code{file3}) and the fourth time series (\code{file4}).}
\item{rhoDCCA}{Detrended Cross-Correlation Coefficient function, defined as the ratio between the \code{DCCA} and two DFA (\code{DFA1,DFA2}).}
\item{rhoDCCA2}{Detrended Cross-Correlation Coefficient function, defined as the ratio between the \code{DCCA2} and two DFA (\code{DFA3,DFA4}).}
}
\note{
The time series \code{file},\code{file2},\code{file3} and \code{file4} must have the same sample size.
}
\author{
Victor Barreto Mesquita
}
\references{
SILVA, Marcus Fernandes da et al. Quantifying cross-correlation between ibovespa
and brazilian blue-chips: The dcca approach. Physica A: Statistical Mechanics and its
Applications, v. 424,2015.
}
\examples{
#The following examples using the database of financial time series
#collected during the United States bear market of 2007-2009.
\donttest{
library(DFA)
data("NYA2008")
data("IXIC2008")
data("LSE.L2008")
data("SSEC2008")
file = NYA2008
file2= IXIC2008
file3 = LSE.L2008
file4 = SSEC2008
Deltarho(file,file2,file3,file4,scale = 2^(1/8),box_size = c(4,8,16),m=1)
}
\donttest{
# Example with different polynomial fit order.
library(DFA)
data("NYA2008")
data("IXIC2008")
data("LSE.L2008")
data("SSEC2008")
file = NYA2008
file2 = LSE.L2008
file3= IXIC2008
file4 = SSEC2008
Deltarho(file,file2,file3,file4,scale = 2^(1/8),box_size = c(4,8,16),m=2)
}
\donttest{
# Example using different choice of overlapping boxes sizes.
library(DFA)
data("NYA2008")
data("IXIC2008")
data("LSE.L2008")
data("SSEC2008")
file = NYA2008
file2= IXIC2008
file3 = LSE.L2008
file4 = SSEC2008
Deltarho(file,file2,file3,file4,scale = "F",box_size = c(4,8,16),m=1)
}
}
|
d633598a5f44ee25ba82aaeec5db9f95a45378bb
|
bbf2e61a2a1a9932012fbf260883ba517fb0001c
|
/cachematrix.R
|
2bd6da3fe2d46f7f1ce2294b8179b5534f2e9e1f
|
[] |
no_license
|
orfalchechor/ProgrammingAssignment2
|
30340ae80468d1dbb5ef7c47f855eb856cb2050f
|
04c9351c1b401495437666acc160bfcf4fb3cc08
|
refs/heads/master
| 2021-01-18T10:32:23.351439
| 2016-06-04T17:45:04
| 2016-06-04T17:45:04
| 60,422,029
| 0
| 0
| null | 2016-06-04T17:26:17
| 2016-06-04T17:26:16
| null |
UTF-8
|
R
| false
| false
| 1,561
|
r
|
cachematrix.R
|
## Description:
## makeCacheMatrix create an object contain a matrix, which cacheSolve
## can consume and calculate its inverse (provided the matrix invertible).
## Made possible by example code provided by course.org from repo rdpeng.
##
## Example:
## z <- makeCacheMatrix()
## z$set( c(1,-1,4,6), 2 )
## cacheSolve(z)
## function: makeCacheMatrix( x=matrix() )
## initialized with an empty matrix
##
## $set( x, ... )
## accepts data to populate a matrix
## initializes the cached inverse
## $get()
## returns the matrix
## $setinverse( x )
## private function, sets the calculated inverse
## $getinverse()
## returns the calculated inverse
makeCacheMatrix <- function( x = matrix() ) {
i <- NULL
set <- function(y, ... ) {
x <<- matrix( y, ... )
i <<- NULL
}
get <- function() x
setinverse <- function(solve) i <<- solve
getinverse <- function() i
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## function: cacheSolve( x, ... )
## accepts a makeCacheMatrix and retrieves
## any cached value for the inversed matrix.
## When none exist, it retrieves the data from
## makeCacheMatrix, calculates the inverse and
# stores it in makeCacheMatrix using the $set method.
##
## returns : inverse of the makeCacheMatrix
cacheSolve <- function(x, ...) {
i <- x$getinverse()
if(!is.null(i)) {
message("getting cached data")
return(i)
}
data <- x$get()
i <- solve(data, ...)
x$setinverse(i)
i
}
|
34703995d63b64493b079540213f3e63da8fb6f1
|
aa13ffbd51383f778dc7fe135af9615ffeae17b0
|
/assignment/HW01/HW01_63130500100.R
|
da8f831263abfbd7536418f58128019ec0d142f0
|
[
"MIT"
] |
permissive
|
jirasin-c/020-Video-Game-Sales
|
d961cedaebe9c61fc50cce1583a068b605a2c1e7
|
5912fecb33cb1d255253d463433a4356be62396d
|
refs/heads/main
| 2023-08-23T13:17:44.087765
| 2021-10-22T11:00:31
| 2021-10-22T11:00:31
| null | 0
| 0
| null | null | null | null |
WINDOWS-1252
|
R
| false
| false
| 1,599
|
r
|
HW01_63130500100.R
|
# Example 0
x <- 1
y <- 2
print(x+y) #3
#Exercise 1
num = c(10.4, 5.6, 3.1, 6.4, 21.7)
avr = mean(num) #average
avr #9.44
sum <- sum(num) #sum
sum #47.2
med <- median(num) #median
med
sd <- sd(num) #sd
sd
var <- var(num) #variance
var
#Exercise2
# List of Marvel movies (Order by Marvel Phase released)
names <- c("Iron Man","The Incredible Hulk","Iron Man 2","Thor","Captain America: The First Avenger",
"The Avengers","Iron Man 3","Thor: The Dark World","Captain America: The Winter Soldier",
"Guardians of the Galaxy","Avengers: Age of Ultron","Ant-Man","Captain America: Civil War",
"Doctor Strange","Guardians of the Galaxy 2","Spider-Man: Homecoming","Thor: Ragnarok","Black Panther",
"Avengers: Infinity War","Ant-Man and the Wasp","Captain Marvel","Avengers: Endgame",
"Spider-Man: Far From Home","WandaVision","Falcon and the Winter Soldier","Loki","Black Widow")
# List of released year of Marvel movies
years <- c(2008,2008,2010,2011,2011,2012,2013,2013,2014,2014,2015,2015,2016,2016,
2017,2017,2017,2017,2018,2018,2019,2019,2019,2021,2021,2021,2021)
# Or using Function
years <- c(2008,2008,2010,2011,2011,2012,rep(2013:2016,each=2),
rep(2017,4),rep(2018,2),rep(2019,3),rep(2021,4))
#Exercise2.1
marvel_movies <- data.frame(names,years)
marvel_movies #ãªédata frameà¾×èÍáÊ´§ãËéàË繤ÇÒÁÊÑÁ¾Ñ¹¸ì¢Í§¢éÍÁÙÅ
#Exercise2.2
#The numbers of movies
length(names) #27
#Finding the 19th movies name
names[19]
#Which year is most released movies
table(years) #2017,2021
|
869af6750fb241483e7addff802e2f5d78a37324
|
c2a3647efa1093e8a8d72b8ec65f2ead24060336
|
/inst/unitTests/test_TarSeqQC.R
|
002cb6b38497567f56f54f5f43706324d560769e
|
[] |
no_license
|
gamerino/TarSeqQC
|
2363d1ffe1dadcc6f14e552840f90fd79b4a1a30
|
ebdf8ca544f5d5073966cf0a16c912ceeac7202b
|
refs/heads/master
| 2021-07-15T03:28:58.735473
| 2020-01-31T11:24:13
| 2020-01-31T11:24:13
| 101,313,918
| 1
| 0
| null | 2017-08-24T16:06:36
| 2017-08-24T16:06:36
| null |
UTF-8
|
R
| false
| false
| 27,574
|
r
|
test_TarSeqQC.R
|
# rm(list=ls())
# library("RUnit")
# library("TarSeqQC")
# library("Rsamtools")
library(BiocParallel)
##-----------------------------------------------------------------------------
##TargetExperiment-class Tests
##-----------------------------------------------------------------------------
##Empty object test: Does TargetExperiment work without any parameter?
test_TargetExperiment<-function(){
checkTrue(validObject(TargetExperiment()),
msg="TargetExperiment works without any parameter: OK.")
}
##Build TargetExperiment object with user data
test_TargetExperimentWithData<-function(){
# Defining bam file, bed file and fasta file names and paths
bamFile<-system.file("extdata", "mybam.bam", package="TarSeqQC",
mustWork=TRUE)
bedFile<-system.file("extdata", "mybed.bed", package="TarSeqQC",
mustWork=TRUE)
fastaFile<-system.file("extdata", "myfasta.fa", package="TarSeqQC",
mustWork=TRUE)
attribute<-"coverage"
feature<-"amplicon"
object<-TargetExperiment(bedFile=bedFile, bamFile=bamFile,
fastaFile=fastaFile, feature=feature, attribute=attribute)
# Checking slots
#bedFile
checkEquals(class(getBedFile(object))[1],"GRanges",
msg="TargetExperiment bedFile slot type: OK.")
#bamFile
checkTrue(file.exists(bamFile),
msg="TargetExperiment bamFile existence: OK.")
checkTrue(class(getBamFile(object))=="BamFile",
msg="TargetExperiment bamFile slot type: OK.")
#fastaFile
checkTrue(file.exists(fastaFile),
msg="TargetExperiment fastaFile existence: OK.")
checkEquals(class(getFastaFile(object))[1],"FaFile",
msg="TargetExperiment fastaFile slot type: OK.")
#scanBamP
checkEquals(class(getScanBamP(object))[1],"ScanBamParam",
msg="TargetExperiment scanBamP slot type= OK.")
aux<-lapply(levels(seqnames(getBedFile(object))), function(x){
ranges(getBedFile(object)[seqnames(getBedFile(object)) == x])
})
names(aux)<-levels(seqnames(getBedFile(object)))
checkEquals(as.list(bamWhich(getScanBamP(object))),aux,
msg="Which parameter setting= OK.")
#pileupP
checkEquals(class(getPileupP(object))[1],"PileupParam",
msg="TargetExperiment pileupP slot type= OK.")
#featurePanel
checkEquals(class(getFeaturePanel(object))[1],"GRanges",
msg="TargetExperiment featurePanel slot type= OK.")
checkEquals(names(getFeaturePanel(object)),names(getBedFile(object)),
msg="TargetExperiment featurePanel names= OK.")
checkTrue(all(c("medianCounts", "IQRCounts", "coverage",
"sdCoverage") %in% names(mcols(object@featurePanel))),
msg="TargetExperiment featurePanel metadata= OK.")
checkEquals(length(getFeaturePanel(object)),length(getBedFile(object)),
msg="TargetExperiment featurePanel dimension= OK.")
#genePanel
checkEquals(class(getGenePanel(object))[1],"GRanges",
msg="TargetExperiment genePanel slot type= OK.")
checkTrue(all(names(getGenePanel(object)) %in% unique(mcols(getBedFile(
object))[,"gene"])), msg="TargetExperiment genePanel names= OK.")
checkTrue(all(c("medianCounts", "IQRCounts", "coverage",
"sdCoverage") %in% names(mcols(object@genePanel))),
msg="TargetExperiment featurePanel metadata= OK.")
#feature
checkTrue(is.character(getFeature(object)),
msg="TargetExperiment feature slot type= OK.")
#attribute
checkTrue(is.character(getAttribute(object)),
msg="TargetExperiment attribute slot type= OK.")
checkTrue(any(getAttribute(object) %in% c("medianCounts", "coverage")),
msg="TargetExperiment attribute slot value= OK.")
}
##Getters/setters: check getBedFile
test_getBedFile<-function(){
data(ampliPanel, package="TarSeqQC")
checkEquals(class(getBedFile(ampliPanel))[1], "GRanges",
msg="bedFile getter: OK.")
data(TEList, package="TarSeqQC")
checkEquals(class(getBedFile(TEList))[1], "GRanges",
msg="bedFile getter: OK.")
}
##Getters/setters: check setBedFile
test_setBedFile<-function(){
data(ampliPanel, package="TarSeqQC")
setBamFile(ampliPanel)<-system.file("extdata", "mybam.bam",
package="TarSeqQC", mustWork=TRUE)
setBedFile(ampliPanel)<-system.file("extdata", "mybed.bed",
package="TarSeqQC", mustWork=TRUE)
setFastaFile(ampliPanel)<-system.file("extdata", "myfasta.fa",
package="TarSeqQC", mustWork=TRUE)
bedFile<-system.file("extdata", "mybed.bed",
package="TarSeqQC", mustWork=TRUE)
#bedFile must be setted starting from a character object containing a valid
#file path and name
checkException(setBedFile(ampliPanel)<-"",
msg="bedFile setter: OK.", silent=TRUE)
#bedFile must be setted starting from a character object
setBedFile(ampliPanel)<-bedFile
checkTrue(validObject(ampliPanel),msg="bedFile setter: OK.")
}
##Getters/setters: check getBamFile
test_getBamFile<-function(){
data(ampliPanel, package="TarSeqQC")
checkEquals(class(getBamFile(ampliPanel))[1], "BamFile",
msg="bamFile getter: OK.")
}
##Getters/setters: check setBamFile
test_setBamFile<-function(){
data(ampliPanel, package="TarSeqQC")
bamFile<-system.file("extdata", "mybam.bam",
package="TarSeqQC", mustWork=TRUE)
#bamFile cannot be defined as a non-existing file
checkException(setBamFile(ampliPanel)<-"",
msg="bamFile setter: OK.", silent=TRUE)
#bamFile must be setted starting from a character object containing a valid
#file path and name
setBamFile(ampliPanel)<-bamFile
checkTrue(validObject(ampliPanel),msg="bamFile setter: OK.")
}
##Getters/setters: check getFastaFile
test_getFastaFile<-function(){
data(ampliPanel, package="TarSeqQC")
checkEquals(class(getFastaFile(ampliPanel))[1], "FaFile",
msg="fastaFile getter: OK.")
}
##Getters/setters: check setFastaFile
test_setFastaFile<-function(){
data(ampliPanel, package="TarSeqQC")
#fastaFile cannot be defined as a non-existing file
checkException(setFastaFile(ampliPanel)<-"",
msg="fastaFile setter: OK.", silent=TRUE)
#file path and name
fastaFile<-system.file("extdata", "myfasta.fa",
package="TarSeqQC", mustWork=TRUE)
setFastaFile(ampliPanel)<-fastaFile
checkTrue(validObject(ampliPanel),msg="fastaFile setter: OK.")
}
##Getters/setters: check getScanBamP
test_getScanBamP<-function(){
data(ampliPanel, package="TarSeqQC")
checkEquals(class(getScanBamP(ampliPanel))[1], "ScanBamParam",
msg="scanBamP getter: OK.")
}
##Getters/setters: check setScanBamP
test_setScanBamP<-function(){
data(ampliPanel, package="TarSeqQC")
#scanBamP should be ScanBamParam class
checkException(setScanBamP(ampliPanel)<-"",
msg="scanBamP setter: OK.", silent=TRUE)
#scanBamP must be setted using a ScanBamParam object
setScanBamP(ampliPanel)<-ScanBamParam()
checkTrue(validObject(ampliPanel),msg="scanBamP setter: OK.")
}
##Getters/setters: check getPileupP
test_getPileupP<-function(){
data(ampliPanel, package="TarSeqQC")
checkEquals(class(getPileupP(ampliPanel))[1], "PileupParam",
msg="pileupP getter: OK.")
}
##Getters/setters: check setPileupP
test_setPileupP<-function(){
data(ampliPanel, package="TarSeqQC")
#pileupP must be a PileupParam object
checkException(setPileupP(ampliPanel)<-"",
msg="pileupP setter: OK.", silent=TRUE)
#pileupP must be setted using a PileupParam object
setPileupP(ampliPanel)<-PileupParam()
checkTrue(validObject(ampliPanel),msg="pileupP setter: OK.")
}
##Getters/setters: check getFeaturePanel
test_getFeaturePanel<-function(){
data(ampliPanel, package="TarSeqQC")
checkEquals(class(getFeaturePanel(ampliPanel))[1], "GRanges",
msg="featurePanel getter: OK.")
}
##Getters/setters: check setFeaturePanel
test_setFeaturePanel<-function(){
data(ampliPanel, package="TarSeqQC")
#featurePanel cannot be setted as a data.frame
checkException(ampliPanel@featurePanel<-data.frame(),
msg="featurePanel setter: OK.", silent=TRUE)
#featurePanel must be setted starting from a GRanges object
setFeaturePanel(ampliPanel)<-GRanges()
checkTrue(validObject(ampliPanel),msg="featurePanel setter: OK.")
checkEquals(length(getFeaturePanel(ampliPanel)), 0,
msg="featurePanel setter: OK.")
}
##Getters/setters: check getGenePanel
test_getGenePanel<-function(){
data(ampliPanel, package="TarSeqQC")
checkEquals(class(getGenePanel(ampliPanel))[1],"GRanges",
msg="genePanel getter: OK.")
}
##Getters/setters: check setGenePanel
test_setGenePanel<-function(){
data(ampliPanel, package="TarSeqQC")
setBamFile(ampliPanel)<-system.file("extdata", "mybam.bam",
package="TarSeqQC", mustWork=TRUE)
setFastaFile(ampliPanel)<-system.file("extdata", "myfasta.fa",
package="TarSeqQC", mustWork=TRUE)
#genePanel cannot be setted as a data.frame
checkException(ampliPanel@genePanel<-data.frame(),
msg="genePanel setter: OK.", silent=TRUE)
#genePanel cannot be setted starting from an empty GRanges object
checkException(setGenePanel(ampliPanel)<-GRanges(),
msg="genePanel setter: OK.", silent=TRUE)
setGenePanel(ampliPanel)<-summarizePanel(ampliPanel)
checkTrue(validObject(ampliPanel), msg="genePanel setter: OK.")
}
##Getters/setters: check getFeature
test_getFeature<-function(){
data(ampliPanel, package="TarSeqQC")
feature<-"amplicon"
checkEquals(getFeature(ampliPanel),feature, msg="feature getter: OK.")
data(TEList, package="TarSeqQC")
feature<-"amplicon"
checkEquals(getFeature(TEList),feature, msg="feature getter: OK.")
}
##Getters/setters: check setFeature
test_setFeature<-function(){
data(ampliPanel, package="TarSeqQC")
feature<-getFeature(ampliPanel)
checkEquals(getFeature(ampliPanel), feature, msg="feature setter: OK.")
feature2<-factor(feature)
checkException(setFeature(ampliPanel)<-feature2, msg="feature setter: OK.",
silent=TRUE)
data(TEList, package="TarSeqQC")
feature<-getFeature(TEList)
checkEquals(getFeature(TEList), feature, msg="feature setter: OK.")
feature2<-factor(feature)
checkException(setFeature(TEList)<-feature2, msg="feature setter: OK.",
silent=TRUE)
}
##Getters/setters: check getAttribute
test_getAttribute<-function(){
data(ampliPanel, package="TarSeqQC")
attribute<-"coverage"
checkEquals(getAttribute(ampliPanel),attribute, msg="feature getter: OK.")
data(TEList, package="TarSeqQC")
attribute<-"coverage"
checkEquals(getAttribute(TEList),attribute, msg="feature getter: OK.")
}
##Getters/setters: check setFeature
test_setAttribute<-function(){
data(ampliPanel, package="TarSeqQC")
attribute<-"medianCounts"
setAttribute(ampliPanel)<-attribute
checkEquals(getAttribute(ampliPanel), attribute, msg="feature setter: OK.")
attribute2<-"mean"
checkException(setAttribute(ampliPanel)<-attribute2,
msg="feature setter: OK.", silent=TRUE)
}
##Test statistics:summaryFeatureLev
##
test_summaryFeatureLev<-function(){
data(ampliPanel, package="TarSeqQC")
checkTrue(is.matrix(summaryFeatureLev(ampliPanel)),
msg="summaryFeatureLev returned type: OK.")
checkTrue(all(colnames(summaryFeatureLev(ampliPanel)) %in% c("Min.",
"1st Qu.", "Median","Mean", "3rd Qu.", "Max.")),
msg="summaryFeatureLev returned matrix colnames: OK.")
}
##Test statistics:summaryGeneLev
test_summaryGeneLev<-function(){
data(ampliPanel, package="TarSeqQC")
checkTrue(is.matrix(summaryGeneLev(ampliPanel)),
msg="summaryGeneLev returned type: OK.")
checkTrue(all(colnames(summaryGeneLev(ampliPanel)) %in% c("Min.",
"1st Qu.", "Median","Mean", "3rd Qu.", "Max.")),
msg="summaryGeneLev returned matrix colnames: OK.")
}
##Test statistics:summaryIntervals
test_summaryIntervals<-function(){
data(ampliPanel, package="TarSeqQC")
checkTrue(is.data.frame(summaryIntervals(ampliPanel)),
msg="summaryIntervals returned type: OK.")
checkTrue(all(colnames(summaryIntervals(ampliPanel)) %in% c(paste(
getFeature(ampliPanel), getAttribute(ampliPanel), "intervals",
sep="_"), "abs", "cum_abs","rel", "cum_rel")),
msg="summaryIntervals returned data.frame colnames: OK.")
checkEquals(dim(summaryIntervals(ampliPanel)), c(5,5),
msg="summaryIntervals returned data.frame dimension: OK.")
checkEquals(summaryIntervals(ampliPanel)[nrow(summaryIntervals(
ampliPanel)), "cum_abs"], length(getFeaturePanel(ampliPanel)),
msg="summaryIntervals returned data.frame consistency: OK.")
data(TEList, package="TarSeqQC")
SITEList<-summaryIntervals(TEList)
checkTrue(is.list(SITEList),
msg="summaryIntervals returned type: OK.")
for(i in 1:length(SITEList)){
checkTrue(all(colnames(SITEList[[i]]) %in% c(paste(
getFeature(ampliPanel), getAttribute(ampliPanel), "intervals",
sep="_"), "abs", "cum_abs","rel", "cum_rel")),
msg="summaryIntervals returned data.frame colnames: OK.")
checkEquals(dim(SITEList[[i]]), c(5,5),
msg="summaryIntervals returned data.frame dimension: OK.")
checkEquals(SITEList[[i]][nrow(SITEList[[i]]),
"cum_abs"], length(getPanels(TEList)),
msg="summaryIntervals returned data.frame consistency: OK.")
}
}
##Test pileupCounts
test_pileupCounts<-function(){
data(ampliPanel, package="TarSeqQC")
bamFile<-system.file("extdata", "mybam.bam",
package="TarSeqQC", mustWork=TRUE)
bed<-getBedFile(ampliPanel)
fastaFile<-system.file("extdata", "myfasta.fa",
package="TarSeqQC", mustWork=TRUE)
myCounts<-pileupCounts(bed=bed[1:2], bamFile=bamFile, fastaFile=fastaFile)
checkEquals(class(myCounts), "data.frame",
msg="pileupCounts returned object type: OK.")
checkTrue(all(c("pos", "seqnames", "which_label", "counts") %in%
colnames(myCounts)), msg="pileupCounts returned data.frame colnames:
OK.")
checkEquals(dim(myCounts), c(140,12),
msg="pileupCounts returned data.frame dimension: OK.")
auxData<-unique(as.character(myCounts[,"which_label"]))
strsplit(auxData, ":")[1]
checkTrue(all(unique(sapply(1:length(auxData),function(x){ strsplit(
auxData[x], ":")[[1]][1]})) %in% c("chr1")),
msg="pileupCounts returned data.frame seqnames: OK.")
}
##Test buildFeaturePanel
test_buildFeaturePanel<-function(){
if (.Platform$OS.type != "windows")
BPPARAM <- MulticoreParam(2)
else
BPPARAM <- SerialParam()
data(ampliPanel, package="TarSeqQC")
setBamFile(ampliPanel)<-system.file("extdata", "mybam.bam",
package="TarSeqQC", mustWork=TRUE)
setFastaFile(ampliPanel)<-system.file("extdata", "myfasta.fa",
package="TarSeqQC", mustWork=TRUE)
#read only the first 2 amplicons
bed<-getBedFile(ampliPanel)[1:2]
ampliPanel@bedFile<-bed
scanBamP<-getScanBamP(ampliPanel)
bamWhich(scanBamP)<-bed
setScanBamP(ampliPanel)<-scanBamP
myFeaturePanel<-buildFeaturePanel(ampliPanel, BPPARAM=BPPARAM)
checkEquals(class(myFeaturePanel)[1], "GRanges",
msg="buildFeaturePanel returned object type: OK.")
checkTrue(all(c("gene", "medianCounts", "IQRCounts", "coverage",
"sdCoverage") %in% colnames(mcols(myFeaturePanel)) ),
msg="buildFeaturePanel returned metadata colnames: OK.")
checkEquals(length(myFeaturePanel), 2,
msg="buildFeaturePanels returned GRanges dimension: OK.")
checkTrue(all(names(myFeaturePanel) %in% names(getFeaturePanel(ampliPanel
))), msg="buildFeaturePanel returned GRanges names: OK.")
}
##Test summarizePanel
test_summarizePanel<-function(){
data(ampliPanel, package="TarSeqQC")
myGenePanel<-summarizePanel(ampliPanel)
checkEquals(class(myGenePanel)[1], "GRanges",
msg="summarizePanel returned object type: OK.")
checkTrue(all(colnames(mcols(myGenePanel)) %in% c("medianCounts",
"IQRCounts", "coverage", "sdCoverage")),
msg="summarizePanel returned metadata colnames: OK.")
checkEquals(length(myGenePanel), 8,
msg="summarizePanel returned GRanges dimension: OK.")
checkTrue(all(names(myGenePanel) %in% names(getGenePanel(ampliPanel))),
msg="summarizePanel returned GRanges names: OK.")
}
##Test readFrequencies
test_readFrequencies<-function(){
data(ampliPanel, package="TarSeqQC")
setBamFile(ampliPanel)<-system.file("extdata", "mybam.bam",
package="TarSeqQC", mustWork=TRUE)
setFastaFile(ampliPanel)<-system.file("extdata", "myfasta.fa",
package="TarSeqQC", mustWork=TRUE)
readsInfo<-readFrequencies(ampliPanel)
checkTrue(all(c("chr", "In", "Out", "InPerc", "OutPerc") %in%
colnames(readsInfo)),
msg="readFrequencies returned colnames: OK.")
checkTrue((sum(readsInfo[, c("InPerc", "OutPerc")]) > 99.9 & sum(
readsInfo[, c("InPerc", "OutPerc")] <= 100)),
msg="Percentages calculation: OK.")
}
##Test plotInOutFeatures
test_plotInOutFeatures<-function(){
data(ampliPanel, package="TarSeqQC")
setBamFile(ampliPanel)<-system.file("extdata", "mybam.bam",
package="TarSeqQC", mustWork=TRUE)
setFastaFile(ampliPanel)<-system.file("extdata", "myfasta.fa",
package="TarSeqQC", mustWork=TRUE)
readsInfo<-readFrequencies(ampliPanel)
g<-plotInOutFeatures(readsInfo)
checkTrue(is.ggplot(g), msg="returned plot type: OK.")
g<-plotInOutFeatures(ampliPanel)
checkTrue(is.ggplot(g), msg="returned plot type: OK.")
}
##Test biasExploration
test_biasExploration<-function(){
data(ampliPanel, package="TarSeqQC")
source<-"gc"
g<-biasExploration(ampliPanel, source=source)
checkTrue(is.ggplot(g), msg="returned plot type: OK.")
source<-"length"
g<-biasExploration(ampliPanel, source=source)
checkTrue(is.ggplot(g), msg="returned plot type: OK.")
}
##Test plotMetaDataExpl
test_plotMetaDataExpl<-function(){
data(ampliPanel, package="TarSeqQC")
source<-"gc"
g<-plotMetaDataExpl(ampliPanel, name=source)
checkTrue(is.ggplot(g), msg="returned plot type: OK.")
source<-"length"
g<-plotMetaDataExpl(ampliPanel, name=source)
checkTrue(is.ggplot(g), msg="returned plot type: OK.")
}
#### Test plot
## test_plot<-function(){
## data(ampliPanel, package="TarSeqQC")
## g<-plot(ampliPanel)
## checkTrue(is.ggplot(g), msg="returned plot type: OK.")
## checkEquals(dim(g$data), c(40,11), msg="returned plot data dimension:
## OK.")
## data(TEList, package="TarSeqQC")
## g<-plot(object)
## checkTrue(is.ggplot(g), msg="returned plot type: OK.")
##
## }
#### Test plotAttrExpl
## test_plotAttrExpl<-function(){
## data(ampliPanel, package="TarSeqQC")
## g<-plotAttrExpl(ampliPanel)
## checkTrue(is.ggplot(g), msg="returned plot type: OK.")
## checkEquals(dim(g$data), c(29,1), msg="returned plot data dimension:
## OK.")
## data(TEList, package="TarSeqQC")
## g<-plotAttrExpl(object)
## checkTrue(is.ggplot(g), msg="returned plot type: OK.")
##
## }
#### Test plotFeatPerform
##test_plotFeatPerform<-function(){
## data(ampliPanel, package="TarSeqQC")
## g<-plotFeatPerform(ampliPanel)
## checkTrue(class(g)[1] == "gg" , msg="returned plot type: OK.")
##}
#### Test plotAttrPerform
##test_plotAttrPerform<-function(){
## data(ampliPanel, package="TarSeqQC")
## g<-plotAttrPerform(ampliPanel)
## checkTrue(class(g)[1] == "gg" , msg="returned plot type: OK.")
##}
#### Test plotFeature
##test_plotFeature<-function(){
## data(ampliPanel, package="TarSeqQC")
## setBamFile(ampliPanel)<-system.file("extdata", "mybam.bam",
## package="TarSeqQC", mustWork=TRUE)
## setFastaFile(ampliPanel)<-system.file("extdata", "myfasta.fa",
## package="TarSeqQC", mustWork=TRUE)
## checkException(plotFeature(ampliPanel, featureID="nopresent"),
## msg="Testing feature ID: OK.", silent=TRUE)
## featureID<-"AMPL20"
## checkTrue(featureID %in% names(getFeaturePanel(ampliPanel)),
## msg="featureID present in the featurePanel: OK.")
## g<-plotFeature(ampliPanel, featureID)
## checkTrue(is.ggplot(g), msg="returned plot type: OK.")
## checkEquals(dim(g$data), c(63,12), msg="returned plot data dimension:
## OK.")
##}
#### Test plotGeneAttrPerFeat
##test_plotGeneAttrPerFeat<-function(){
## data(ampliPanel, package="TarSeqQC")
## checkException(plotGeneAttrPerFeat(ampliPanel, geneID="nopresent"),
## msg="Testing gene ID: OK.", silent=TRUE)
## geneID<-"gene1"
## checkTrue(geneID %in% names(getGenePanel(ampliPanel)),
## msg="geneID present in the featurePanel: OK.")
## g<-plotGeneAttrPerFeat(ampliPanel, geneID)
## checkTrue(is.ggplot(g), msg="returned plot type: OK.")
## checkEquals(dim(g$data), c(1,11), msg="returned plot data dimension:
## OK.")
##}
#### Test plotNtdPercentage
##test_plotNtdPercentage<-function(){
## data(ampliPanel, package="TarSeqQC")
## setBamFile(ampliPanel)<-system.file("extdata", "mybam.bam",
## package="TarSeqQC", mustWork=TRUE)
## setFastaFile(ampliPanel)<-system.file("extdata", "myfasta.fa",
## package="TarSeqQC", mustWork=TRUE)
## checkException(plotNtdPercentage(ampliPanel, featureID="nopresent"),
## msg="Testing feature ID: OK.", silent=TRUE)
## featureID<-"AMPL20"
## checkTrue(featureID %in% names(getFeaturePanel(ampliPanel)),
## msg="featureID present in the featurePanel: OK.")
## g<-plotNtdPercentage(ampliPanel, featureID)
## checkTrue(is.ggplot(g), msg="returned plot type: OK.")
## checkEquals(dim(g$data), c(252,3), msg="returned plot data dimension:
## OK.")
##}
#### Test plotRegion
##test_plotRegion<-function(){
## data(ampliPanel, package="TarSeqQC")
## setBamFile(ampliPanel)<-system.file("extdata", "mybam.bam",
## package="TarSeqQC", mustWork=TRUE)
## setFastaFile(ampliPanel)<-system.file("extdata", "myfasta.fa",
## package="TarSeqQC", mustWork=TRUE)
## region<-c(4500,6000)
## seqname<-"chr10"
## checkException(plotRegion(ampliPanel),
## msg="Testing function calling: OK.", silent=TRUE)
## checkException(plotRegion(ampliPanel, seqname="chr0"),
## msg="Testing function calling: OK.", silent=TRUE)
## checkException(plotRegion(ampliPanel, seqname="chr10"),
## msg="Testing function calling: OK.", silent=TRUE)
## checkTrue(seqname %in% levels(seqnames(getBedFile(ampliPanel))),
## msg="seqname present in the featurePanel: OK.")
## g<-plotRegion(ampliPanel, region, seqname)
## checkTrue(is.ggplot(g), msg="returned plot type: OK.")
## checkEquals(dim(g$data), c(1501,12),
## msg="returned plot data dimension: OK.")
##}
#### Test xlsx report creation
##test_reportCreation<-function(){
## data(ampliPanel, package="TarSeqQC")
## imageFile<-system.file("extdata", "plot.pdf", package="TarSeqQC",
## mustWork=TRUE)
## buildReport(ampliPanel, imageFile=imageFile, file="test.xlsx")
## checkTrue(file.exists("test.xlsx"), msg="Xlsx file creation: OK.")
##}
##-----------------------------------------------------------------------------
##TargetExperimentList-class Tests
##-----------------------------------------------------------------------------
##Empty object test: Does TargetExperimentList work without any parameter?
test_TargetExperimentList<-function(){
checkTrue(validObject(TargetExperimentList()),
msg="TargetExperimentList works without any parameter: OK.")
}
##Build TargetExperimentList object with user data
test_TargetExperimentListWithData<-function(){
# Defining the set of TargetExperiment objects
data(ampliPanel, package="TarSeqQC")
data(ampliPanel2, package="TarSeqQC")
ampliList<-list(ampliPanel, ampliPanel2)
# Defining feature parameter
feature<-"amplicon"
# Defining attribute parameter
attribute<-"coverage"
##Calling the constructor
object<-TargetExperimentList(TEList=ampliList, attribute=attribute,
feature=feature)
# Checking slots
#bedFile
checkEquals(class(getBedFile(object))[1],"GRanges",
msg="TargetExperimentList bedFile slot type: OK.")
checkEquals(class(getPanels(object))[1],"GRanges",
msg="TargetExperimentList panels slot type= OK.")
checkEquals(names(getPanels(object)),names(getBedFile(object)),
msg="TargetExperimentList panels names= OK.")
checkEquals(length(getPanels(object)),length(getBedFile(object)),
msg="TargetExperimentList panels dimension= OK.")
#feature
checkTrue(is.character(getFeature(object)),
msg="TargetExperimentList feature slot type= OK.")
#attribute
checkTrue(is.character(getAttribute(object)),
msg="TargetExperimentList attribute slot type= OK.")
checkTrue(any(getAttribute(object) %in% c("medianCounts", "coverage")),
msg="TargetExperimentList attribute slot value= OK.")
}
##Getters/setters: check getPanels
test_getPanels<-function(){
data(TEList, package="TarSeqQC")
checkEquals(class(getPanels(TEList))[1], "GRanges",
msg="featurePanel getter: OK.")
}
#### Test plotGlobalAttrExpl
## test_plotGlobalAttrExpl<-function(){
## data(TEList, package="TarSeqQC")
## g<-plotGlobalAttrExpl(object)
## checkTrue(is.ggplot(g), msg="returned plot type: OK.")
## checkEquals(dim(g$data), c(58,5), msg="returned plot data dimension:
## OK.")
##}
#### Test plotPoolPerformance
##test_plotPoolPerformance<-function(){
## data(TEList, package="TarSeqQC")
## g<-plotAttrExpl(object)
## checkTrue(is.ggplot(g), msg="returned plot type: OK.")
##}
##-----------------------------------------------------------------------------
##Test functions
##-----------------------------------------------------------------------------
##TargetExperiment class test
# test_TargetExperiment()
# test_TargetExperimentWithData()
# test_getBamFile()
# test_setBamFile()
# test_getBedFile()
# test_setBedFile()
# test_getFastaFile()
# test_setFastaFile()
# test_getScanBamP()
# test_setScanBamP()
# test_getPileupP()
# test_setPileupP()
# test_getFeature()
# test_setFeature()
# test_getAttribute()
# test_setAttribute()
# test_getFeaturePanel()
# test_setFeaturePanel()
# test_getGenePanel()
# test_setGenePanel()
# test_getPanels()
# test_summaryFeatureLev()
# test_summaryGeneLev()
# test_summaryIntervals()
# test_readFrequencies()
# test_pileupCounts()
# test_buildFeaturePanel()
# test_summarizePanel()
# test_plotInOutFeatures()
# test_biasExploration()
# test_plotMetaDataExpl()
# test_plot()
# test_plotAttrExpl()
# test_plotAttrPerform()
# test_plotFeatPerform()
# test_plotFeature()
# test_plotGeneAttrPerFeat()
# test_plotNtdPercentage()
# test_plotRegion()
# test_reportCreation()
# test_TargetExperimentList()
# test_TargetExperimentListWithData()
# test_getPanels()
# test_plotGlobalAttrExpl()
# test_plotPoolPerformance()
|
9002d320e88ebebee319e615486db8c0ee080634
|
1b427294f22048b45ef77636d91152a6012eb95c
|
/assists/01-scrape.R
|
b3fe57b40fe577f65f50933874fcded8e7250a9b
|
[] |
no_license
|
willhua03/d3
|
4fd35815b9ce863e3dfd2498d4005be00f1953b1
|
f0e5b977c82fe7752348e225b4d15f8ffd702649
|
refs/heads/master
| 2020-03-26T14:54:06.311959
| 2017-06-22T18:59:17
| 2017-06-22T18:59:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,705
|
r
|
01-scrape.R
|
library(mysportsfeedsR)
library(rvest)
library(stringr)
# authentication ----------------------------------------------------------
# authenticate_v1_0('username', 'password')
# functions ---------------------------------------------------------------
create_gameid <- function(entry) {
paste0(str_replace_all(entry$date, '-', ''), '-', entry$awayTeam$Abbreviation, '-', entry$homeTeam$Abbreviation)
}
get_goals_in_period <- function(pd) {
dat <- sapply(pd$scoring$goalScored, function(g) {
vec <- c(pd$`@number`, g$time, g$teamAbbreviation,
g$goalScorer$ID, g$goalScorer$LastName, g$goalScorer$FirstName,
g$assist1Player$ID, g$assist1Player$LastName, g$assist1Player$FirstName,
g$assist2Player$ID, g$assist2Player$LastName, g$assist2Player$FirstName)
c(vec, rep(NA, 12 - length(vec)))
})
df <- data.frame(t(dat))
if(ncol(df) == 0) return(NULL)
names(df) <- c('pd', 'time', 'team', 'g_id', 'g_last', 'g_first', 'a1_id', 'a1_last', 'a1_first', 'a2_id', 'a2_last', 'a2_first')
df
}
get_goals_in_game <- function(gm) {
lists <- lapply(gm$gameboxscore$periodSummary$period, get_goals_in_period)
df <- data.frame(date = gm$gameboxscore$game$date)
df <- cbind(df, do.call(rbind.data.frame, c(lists, stringsAsFactors = FALSE)))
numeric_cols <- which(names(df) %in% c('g_id', 'a1_id', 'a2_id'))
df[numeric_cols] <- apply(df[numeric_cols], 2, as.numeric)
df[-numeric_cols] <- apply(df[-numeric_cols], 2, as.character)
df
}
get_all_goals <- function(gameids) {
games <- lapply(gameids, function(id) {
boxscore <- msf_get_results(league='nhl', season='2016-2017-regular', feed='game_boxscore', params=list(gameid=id))
boxscore_content <- content(boxscore$response, 'parsed')
get_goals_in_game(boxscore_content)
})
games
do.call(rbind.data.frame, c(games, stringsAsFactors = FALSE))
}
# script ------------------------------------------------------------------
gamelogs <- msf_get_results(league='nhl',season='2016-2017-regular',feed='full_game_schedule',params=list(team='PIT'))
content <- content(gamelogs$response, 'parsed')
gameids <- sapply(game_entries, create_gameid)
allgoals <- get_all_goals(gameids)
write.csv(allgoals, 'data/allgoals.csv', row.names = FALSE)
players <- msf_get_results(league='nhl', season='2016-2017-regular', feed='active_players')
player_content <- content(players$response, 'parsed')
player_list <- lapply(player_content$activeplayers$playerentry, function(p) {
list(id = p$player$ID,
last = p$player$LastName,
first = p$player$FirstName,
pos = p$player$Position)
})
player_df <- bind_rows(player_list)
write_csv(player_df, 'data/allplayers.csv')
|
043360b6edf6666423cbb0bf29551f94535b90e3
|
e2b778b98bd747aa48ac247b09b324bd4a7cae00
|
/Definitive Data Generator.R
|
821a769985a1659bd9abea525fd54bce402454cc
|
[] |
no_license
|
Mait22/TQuant2017_G2
|
9f3ed0b779d6f1fcefed8799420083434eb49d36
|
d64ba771ddc0210c29b8efdd65b6867a2bde9c18
|
refs/heads/master
| 2021-01-18T15:55:38.713932
| 2017-03-31T22:35:43
| 2017-03-31T22:35:43
| 86,695,595
| 0
| 3
| null | 2017-03-30T14:26:15
| 2017-03-30T11:36:21
|
R
|
UTF-8
|
R
| false
| false
| 1,120
|
r
|
Definitive Data Generator.R
|
gen4 = function(x1,x2,x3,x4){
n = length(x1)
return( 0
+3*x1
+ 3*x1^2
+ 1*x2
+ 1*x1*x2
+ 1*x1*x3
+ 1*x2*x3
+ 0.008*x4
+ 0.0001*rnorm(length(x1),0,2*n/10)*sin(x1)*n^2
+ 0.0001*sin(x2)*runif(n,-n,n)
*sample(c(0,1),n,prob = c(0.04,0.96),replace = T)
*n^1.5
+rnorm(n = n, mean = 0, sd = 5000)
)
}
dataGen = function(n, generator = 1,plot=F) {
x1 = -(n/4):(n*3/4-1)
x2 = rnorm(n,5,30)
if(generator == 1){
y = gen(x1,x2)
} else if(generator ==2){
y = gen2(x1,x2)
}else if(generator ==3){
y = gen3(x1,x2)
}else if(generator ==4){
x1 = sort(rnorm(n,15,30))
x2 = rnorm(n,20,8)
x3 = rnorm(n,4,70)
x4 = rep(runif(4,-10,40),ceiling(n/4))[1:n]
y = gen4(x1,x2,x3,x4)
}
ds = data.frame("y"=y,"x1"=x1,"x2"=x2, "x3" = x3, "x4" = x4)
if(plot)plot(ds$y,main=paste0("Blackbox | n=",n))
return(ds)
}
#check the generated data
par(mfrow=c(1,1))
plot(dataGen(100,plot=T,generator = 4)$y)
|
807237ac11e88bdb5940229bc223800585db3558
|
53beba2f6c2bf43378da27f51bd11f14f06efc24
|
/plot4.R
|
fafb9d33812d178fc14524c36605a7e02014d3ca
|
[] |
no_license
|
ahmedelgendycoursera/ExData_Plotting1
|
92b4e282f5edf7337b4f4ccdeb7ba99013b71226
|
6c296ba5f1185c699f84db34096748c614397df3
|
refs/heads/master
| 2021-01-20T17:23:32.098343
| 2016-01-10T22:06:08
| 2016-01-10T22:06:08
| 49,340,006
| 0
| 0
| null | 2016-01-09T20:24:34
| 2016-01-09T20:24:33
| null |
UTF-8
|
R
| false
| false
| 1,446
|
r
|
plot4.R
|
#Read all data but save in the workspace part of based on a condition
data <- subset(read.table(file="household_power_consumption.txt",header = TRUE,na.strings = "?",sep=';'),(Date=="2/2/2007"|Date=="1/2/2007"))
#Format Date column from d/m/y format into Date class format to be mergable with Time column.
data$x1 <- as.Date(data$Date, format = "%d/%m/%Y")
#Merge Date and Time columns to create one a single datetime column.
data$x2 <- paste(data$x1,data$Time,sep=" ")
#Coerce datetime to POSIXct to be used in circular statistics.
data$x2 <- as.POSIXct(data$x2)
library("lubridate")
#Create a panel 2 rows by 2 columns
par(mfrow = c(2,2))
# fours plots. The third is made of 3 graphs
plot(round_date(data$x2,"minute"),data$Global_active_power, type = "l",ylab = "Global Active Power",xlab=NA)
plot(round_date(data$x2,"minute"),data$Voltage, type = "l",ylab = "Voltage",xlab="datetime")
plot(round_date(data$x2,"minute"),y = data$Sub_metering_1, type = "l",ylab = "Energy sub metering")
lines(round_date(data$x2,"minute"),y = data$Sub_metering_2, type = "l",col = "red")
lines(round_date(data$x2,"minute"),y = data$Sub_metering_3, type = "l",col = "blue")
legend("topright",c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),col = c("black","red", "blue"), pch = "__" , bty = "n")
plot(round_date(data$x2,"minute"),data$Global_reactive_power, type = "l",xlab="datetime",ylab = "Global_reactive_power")
dev.copy(png,file="plot4.png")
dev.off()
|
553363be574a20c2d4720d08731f9ffb09d74cd4
|
0d92a20f2f35dcfcd572c52f5e3b4184279bfa04
|
/seprcp26_posneg.R
|
aca125d221aeaf6da00f0d55419e9c8183856b48
|
[] |
no_license
|
richardcode/cmip5_anal
|
a578158209a67e2cae796a1d12d71d5afc8f1661
|
1d5588f177ac4d1eeb92d2d958a86b819724e1b0
|
refs/heads/master
| 2020-06-16T14:31:59.559643
| 2017-04-12T14:31:03
| 2017-04-12T14:31:03
| 75,091,699
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,546
|
r
|
seprcp26_posneg.R
|
source("tcre_var_functs.R")
#Load in the data from the CMIP5 ensembles
cmip5_data <- read.csv('./Data/ESM_cmip5_tempems.csv')
#Get models that are both in RCP2.6 and 4xCO2
rcp26_data <- cmip5_data[cmip5_data$Scenario=='RCP26',-c(6:(2004-1850+6))]
warm_threshs <- c(0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1.0)
for (warm_thresh in warm_threshs) {
out_rcp26_pos <- list(c(),c())
out_rcp26 <- list(c(),c())
for (j in c(1:nrow(rcp26_data[rcp26_data$Variable=='Temperature|rel to 1861-80',]))){
if (sum(as.numeric(rcp26_data[rcp26_data$Variable=='Total anthropogenic carbon flux',][j,-c(1:5)]) < 0.0 , na.rm=TRUE)==0){
log_pos_emms <- c(1:length(as.numeric(rcp26_data[rcp26_data$Variable=='Total anthropogenic carbon flux',][j,-c(1:5)])))
} else {
log_pos_emms <- c(1:(which((as.numeric(rcp26_data[rcp26_data$Variable=='Total anthropogenic carbon flux',][j,-c(1:5)]) < 0.0) )[1]-1))
}
temps_j <- as.numeric(rcp26_data[rcp26_data$Variable=='Temperature|rel to 1861-80',][j,-c(1:5)])
temps_j_p <- as.numeric(rcp26_data[rcp26_data$Variable=='Temperature|rel to 1861-80',][j,-c(1:5)])[log_pos_emms]
emms_j <- as.numeric(rcp26_data[rcp26_data$Variable=='Total anthropogenic carbon flux',][j,-c(1:5)])
emms_j_p <- as.numeric(rcp26_data[rcp26_data$Variable=='Total anthropogenic carbon flux',][j,-c(1:5)])[log_pos_emms]
out_rcp26_e <- calc_budget_dist(warm_thresh,temps_j,emms_j,c(2005:2159))
out_rcp26_e_p <- calc_budget_dist(warm_thresh,temps_j_p,emms_j_p,c(2005:2159)[log_pos_emms])
out_rcp26[[1]]<- c(out_rcp26[[1]],out_rcp26_e[[1]])
out_rcp26[[2]]<- c(out_rcp26[[2]],out_rcp26_e[[2]])
out_rcp26_pos[[1]]<- c(out_rcp26_pos[[1]],out_rcp26_e_p[[1]])
out_rcp26_pos[[2]]<- c(out_rcp26_pos[[2]],out_rcp26_e_p[[2]])
}
df_all <- data.frame(matrix(nrow=0,ncol=3))
df_all <- rbind(df_all,matrix(c(rep('RCP2.6',length(out_rcp26[[2]])),out_rcp26[[1]],out_rcp26[[2]]/warm_thresh),ncol=3))
df_all <- rbind(df_all,matrix(c(rep('RCP2.6- Positive emissions',length(out_rcp26_pos[[2]])),out_rcp26_pos[[1]],out_rcp26_pos[[2]]/warm_thresh),ncol=3))
df_all[,2] <- as.numeric(as.vector(df_all[,2]))
df_all[,3] <- as.numeric(as.vector(df_all[,3]))
colnames(df_all) <- c('Scenario','Duration','Budget')
p <- ggplot(df_all, aes(Budget, colour = Scenario)) + geom_density(alpha=0.1) + labs(x = "Budget (GtC/°C)") + ggtitle(paste('Warming: ',as.character(warm_thresh),'°C',sep=''))
ggsave(paste('Figures/rcp26_posneg_',as.character(warm_thresh),'.png',sep=''),plot=p,dpi=300,width=5,height=5)
}
|
1cee29b8622ea5b404a6efa7e00a1bb172a7ebcc
|
f8e2de8eac41b7049c161240c16907ae7f3f6000
|
/linearRegression/linearRegression.R
|
f3f0fe337f13b1bb09fdf522c14c98beca576f88
|
[] |
no_license
|
moriano/machineLearningExamples
|
a3394df27e43b2dc96101916ad0e7b4764bac649
|
7fb5d16bd30cdc122cd01832ee964ed7a23f13a5
|
refs/heads/master
| 2021-01-14T11:35:07.265197
| 2016-08-14T00:02:17
| 2016-08-14T00:02:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,605
|
r
|
linearRegression.R
|
all_data <- read.csv("sampleData1.csv")
# Quick exploration of the dataset
print(paste("Number of rows", nrow(all_data)))
head(all_data)
# First, lets split the set, 70% is for traing, 30% for testing
train_data <- all_data[0:70, ]
test_data <- all_data[71:100, ]
# Train a simple model
model <- lm(price ~ x + y, train_data)
# Predict and add two extra columns
predictions_linear <- predict(model, test_data)
results <- data.frame(price = test_data$price,
predictions_linear = predictions_linear,
error_linear = test_data$price - predictions_linear)
# Lets train now a model using a polynomial level of 2
model_square <- lm(price ~ poly(x + y, 2), train_data)
predictions_square <- predict(model_square, test_data)
results$predictions_square <- predictions_square
results$error_square <- test_data$price - predictions_square
# And now lets try polynomial of level 3
model_cube <- lm(price ~ poly(x + y, 3), train_data)
predictions_cube <- predict(model_cube, test_data)
results$predictions_cube <- predictions_cube
results$error_cube <- test_data$price - predictions_cube
# Finally, lets compute the total error for each of the cases, the total error
# Is equal to the sum of the absolute values of all the errors. After that
# the lower sum will give us the better approach.
error_linear <- sum(abs(results$error_linear))
error_square <- sum(abs(results$error_square))
error_cube <- sum(abs(results$error_cube))
print(paste("Linear error is ", error_linear))
print(paste("Square error is", error_square))
print(paste("Cube error is ", error_cube))
|
87a34bf1fa368635585cb73d295773cc3e2b8b4c
|
3830551a6c5213a309e9d4aa40fd54131c5fdcbb
|
/tests/testthat/test-datatable.R
|
bea46a341f1bb7c99edf69052799c3b8bfea5c97
|
[
"MIT"
] |
permissive
|
b-rodrigues/paint
|
765df57b266bdc078c9be3da19a44548f566a630
|
163e333d0ce785b797ea57389176e037817fa24f
|
refs/heads/master
| 2023-07-09T22:00:49.768581
| 2021-08-08T10:56:54
| 2021-08-08T10:58:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 466
|
r
|
test-datatable.R
|
test_that("data.table", {
rlang::with_options(
cli.num_colors = 256,
paint_n_rows = NULL,
paint_max_width = NULL,
paint_palette = NULL,
paint_align_row_head = NULL,
paint_dark_mode = NULL,
.expr = {
pp_dt <- data.table::as.data.table(palmerpenguins::penguins)
expect_snapshot(paint(pp_dt))
pp_dt_keyed <- data.table::setkey(pp_dt, body_mass_g, flipper_length_mm)
expect_snapshot(paint(pp_dt_keyed))
}
)
})
|
f59f1cd01e2b3fbd780775810285c86b8a00f257
|
2ab1525d7aaccd52b3d6a205310d7f9a31e9af48
|
/R/compileTimeVaryTransProbs.R
|
f8496ac8ccd2497cf582ccc9c4a221c82d4e147a
|
[
"CC-BY-4.0",
"CC0-1.0"
] |
permissive
|
KevinSee/DABOM
|
bbb3e8f03f3d8179c225fcd35588b8c90c1ba927
|
6c868732b7350be82600f50f9a9ccdb99e047f23
|
refs/heads/main
| 2023-08-27T03:57:23.327826
| 2023-08-25T19:21:54
| 2023-08-25T19:21:54
| 103,584,269
| 1
| 6
|
NOASSERTION
| 2023-08-25T19:21:55
| 2017-09-14T21:38:53
|
R
|
UTF-8
|
R
| false
| false
| 2,010
|
r
|
compileTimeVaryTransProbs.R
|
#' @title Compile Time-Varying Transition Probabilities
#'
#' @description Extracts the MCMC posteriors of time-varying transition probabilities for a DABOM model. The time-varying parameters should be set up so that they are organized as an array with the first dimension corresponding to the fish origin, the second to the model branch, and the third the model time strata.
#'
#' @author Kevin See
#'
#' @param dabom_mod An MCMC.list
#' @inheritParams createDABOMcapHist
#'
#' @import dplyr tidyr stringr
#' @export
#' @return NULL
#' @examples compileTimeVaryTransProbs()
compileTimeVaryTransProbs = function(dabom_mod = NULL,
parent_child = NULL) {
stopifnot(!is.null(dabom_mod),
!is.null(parent_child))
# make sure dabom_mod is mcmc.list
if(class(dabom_mod) == 'jagsUI') dabom_mod = dabom_mod$samples
stopifnot(!is.null(dabom_mod),
class(dabom_mod) %in% c('mcmc', 'mcmc.list'))
# determine root node (starting point)
root_node = parent_child %>%
PITcleanr::buildNodeOrder() %>%
filter(node_order == 1) %>%
pull(node)
trans_df = as.matrix(dabom_mod,
iters = T,
chains = T) %>%
as_tibble() %>%
# pull out movement parameters from root_node
select(CHAIN, ITER,
matches(root_node)) %>%
tidyr::pivot_longer(cols = -c(CHAIN, ITER),
names_to = "param",
values_to = "value") %>%
mutate(origin = stringr::str_split(param, '\\[', simplify = T)[,2],
origin = stringr::str_sub(origin, 1, 1)) %>%
mutate(brnch_num = stringr::str_split(param, '\\,', simplify = T)[,2],
strata_num = stringr::str_split(param, '\\,', simplify = T)[,3]) %>%
mutate(across(c(brnch_num, strata_num),
~ stringr::str_remove(., "\\]")),
across(c(brnch_num, strata_num, origin),
as.integer)) %>%
filter(!is.na(strata_num))
return(trans_df)
}
|
939635b2b92465bcbf29e46dd1a3ccfde3723fda
|
e02d833b8e59bc008dda8bb42be31aa89ee7255a
|
/man/Franken.Rd
|
b31d2eee4dad9489819aad2606c2b1c8b0bf710b
|
[] |
no_license
|
brian-j-smith/MRMCaov
|
1d437daf62f2619f72a4ab5bb0c3fa71137f5a18
|
d79e49bcef42ef2a17195aae84ca84ae569f8702
|
refs/heads/master
| 2023-01-29T10:40:59.924662
| 2023-01-18T21:05:53
| 2023-01-18T21:05:53
| 196,109,727
| 10
| 2
| null | null | null | null |
UTF-8
|
R
| false
| true
| 864
|
rd
|
Franken.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{Franken}
\alias{Franken}
\title{Multi-reader multi-case dataset}
\format{
A data frame with 800 rows and 5 variables:
\describe{
\item{reader}{reader identifier}
\item{treatment}{treatment identifier}
\item{case}{case identifier}
\item{truth}{true case status (1 = abnormal, 0 = normal)}
\item{rating}{ordinal reader ratings of abnormal case status (1 = definitely
normal, 5 = definitely abnormal)}
}
}
\usage{
Franken
}
\description{
Multi-reader multi-case dataset
}
\references{
Franken EA Jr, Berbaum KS, Marley SM, Smith WL, Sato Y, Kao SC, Milam SG
(1992). Evaluation of a digital workstation for interpreting neonatal
examinations: a receiver operating characteristic study. Investigational
Radiology, 27(9): 732-737.
}
\keyword{datasets}
|
872582ab4d598a2517fd27de2effe73b64936f0e
|
5213fc5250aeb751d6dbefcd35aa9b836d0c7c66
|
/run_analysis.R
|
cb1b39f7ac2f433b323b84e43963aad43f24a4f6
|
[] |
no_license
|
skallinen/Coursera-Getting-And-Cleaning-Data
|
62045baa38f2aea103f5b1ed3fd3feded3020352
|
8771a19c3fd538f0bb1c69a455bfc2a27bd5030c
|
refs/heads/master
| 2020-04-06T07:10:06.800402
| 2014-10-27T04:42:06
| 2014-10-27T04:42:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,990
|
r
|
run_analysis.R
|
## Create one R script called run_analysis.R that does the following:
## 1. Merges the training and the test sets to create one data set.
## 2. Extracts only the measurements on the mean and standard deviation for each measurement.
## 3. Uses descriptive activity names to name the activities in the data set
## 4. Appropriately labels the data set with descriptive variable names.
## 5. From the data set in step 4, creates a second, independent tidy data set with the average of each variable for each activity and each subject.
#===============================================================================================
# Load packages
#===============================================================================================
if(!require(dplyr)){install.packages("dplyr")}
library(dplyr)
if(!require(reshape2)){install.packages("reshape2")}
library(reshape2)
#===============================================================================================
# Download files, if on *nix system
#===============================================================================================
if(!file.exists("UCI HAR Dataset")){
if (Sys.info()["sysname"] == "Linux" | Sys.info()["sysname"] == "Darwin") {
system("curl -sS https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip > dataset.zip && unzip dataset.zip && rm dataset.zip")
} else {
stop("Please download the dataset zip archive and extract it to the working directory... https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip")
}
}
#===============================================================================================
# 1. Reads the files. Merges the training and the test sets to create one data set.
#===============================================================================================
## Read file function
readFiles <- function(files){
df <- data.frame()
for (file in files) {
file <- file.path(file)
t <- read.table(file, header=F)
if (length(df) == 0 ) {
df <- t
} else {
df <- cbind(df, t)
}
}
df
}
## build the first df
dataset <- c("./UCI HAR Dataset/test", "./UCI HAR Dataset/train")
files <- list.files(path=dataset[1], pattern="*.txt", full.names=T, recursive=F)
data <- readFiles(files)
files <- list.files(path=dataset[2], pattern="*.txt", full.names=T, recursive=F)
data <- rbind(data, readFiles(files))
## change the df into tbl_df, fix names
tidy_data <- tbl_df(data)
names(tidy_data)[1] <- "subject"
names(tidy_data)[length(tidy_data)] <- "activity"
## read the feature variable names
file <- file.path("./UCI HAR Dataset/features.txt")
features <- read.table(file, header=F, stringsAsFactors = F) %>% tbl_df() %>% select(V2)
## add an incremental suffix since several duplicatesnames exists
for (i in 1:length(features$V2)){
features$V2[i] <- paste(features$V2[i], formatC(i, width=3, flag="0"), sep ="_")
}
## attach the variable names to the dataset
names(tidy_data)[2:562] <- features$V2
#===============================================================================================
# 2. Extracts only the measurements on the mean and standard deviation for each measurement.
#===============================================================================================
tidy_data <- select(tidy_data, subject, activity, contains("mean()"), contains("std()"))
#===============================================================================================
# 4. Uses descriptive activity names to name the activities in the data set
#===============================================================================================
tidy_data$activity <- factor(tidy_data$activity,
levels = c(1:6),
labels = c("WALKING","WALKING_UPSTAIRS", "WALKING_DOWNSTAIRS", "SITTING", "STANDING", "LAYING"))
## Tidies the variablenames
for (i in 3:length(names(tidy_data))) {
names(tidy_data)[i] <- gsub("[^[:alpha:] ]", "", names(tidy_data)[i])
names(tidy_data)[i] <- gsub("mean", "Mean", names(tidy_data)[i])
names(tidy_data)[i] <- gsub("std", "Std", names(tidy_data)[i])
}
#===============================================================================================
# 5. From the data set in step 4, creates a second, independent tidy data set with the average
# of each variable for each activity and each subject.
#===============================================================================================
tidyMelt <- melt(tidy_data,id=c("subject","activity"),measure.vars=c(names(tidy_data)[3:length(names(tidy_data))]))
tidy_data <- dcast(tidyMelt, subject + activity ~ variable, mean)
## saves the tidy file to disk
write.table(tidy_data, file = "./tidy_data.txt", row.name=FALSE)
|
7ae863f67f878d4f5e880411483e34b044a3ea30
|
f3da1980b138389d08e5e5b0c3cb76dacbc0e1b6
|
/workflow.R
|
98128d63e1a29850f473fdd02b37a064b38f24d8
|
[] |
no_license
|
brianlle/PSPG_245B
|
64cdc05281d141c2a3463c68ed23ad336e32a7c0
|
1b25afbc7d17d1f7540091569de4e6e62564807b
|
refs/heads/master
| 2020-04-28T07:17:56.745296
| 2019-03-12T20:36:43
| 2019-03-12T20:36:43
| 175,086,816
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,237
|
r
|
workflow.R
|
#R3.5.2
#Adaped from Dr. Bin Chen's drug repositioning pipeline
#This is a simplified version of a pipeline used to predict drug hits for a given disease
#Depending on your context, you can also compare different phenotypes, responders vs. non-responders, mutation vs. wild type, etc
setwd("~/PSPG_245_Test/")
#install packages needed to run the code
source("http://www.bioconductor.org/biocLite.R")
biocLite("impute")
biocLite("siggenes")
biocLite("RankProd")
biocLite("preprocessCore")
biocLite("GEOquery")
biocLite("qvalue")
if (!requireNamespace("BiocManager", quietly = TRUE))
install.packages("BiocManager")
BiocManager::install("org.Hs.eg.db", version = "3.8")
install.packages(c("pheatmap", "gplots", "ggplot2", "RColorBrewer", "plyr"))
###############################
#parameters
disease <- "breast_cancer" #no space
#method to compute disease signatures. by default, we use SAM.
method_id <- 3 #2: rankprod, 3: siggenes sam,
q_thresh <- 0.05 #fdr; if there are no differentially expressed genes, can try loosening the threshold
fold_change <- 1 #fold change cutoff
#disease signature file
dz_sig.file <- paste(disease, "/dz_signature_cmap.txt", sep="")
###############################
#create a folder for the disease
if (!dir.exists(disease)) dir.create(disease)
#create disease signatures
#need to identify a dataset used to create disease signatures
#in the real case, you need to find a more robust to validate disease signatures before making drug predictions.
#update location of the disease dataset
disease_data_filepath <- "data/HiSeqV2"
#This code snippet takes in the disease gene expression data and generates a differential gene expression signature
#Also outputs a heatmap visualization of the separation of cases vs. controls (stored in disease folder)
source("code/create_dz_signature_from_TCGA.R")
#Predict drugs using connectivity map data with generated disease signature
cmd <- paste("Rscript code/predict_drugs_cmap.R", disease, "cmap", paste(disease, "/dz_signature_cmap.txt", sep=""))
system(cmd)
#Analyze predictions: apply filtering to drug hits, generate visualization of best hits and worst hits
source("code/drug_repurpose.R")
#all the results will be stored in the disease folder
|
d6becde3e81440a9d32ac864b5d131a5361109be
|
35dd3dd5dec0f7ae8e0fb99adf95f9784294f58c
|
/createMaps/Erdbeben/Tiefe.R
|
7f4f625479a1f342f9d88921fe09f15195955be0
|
[] |
no_license
|
ReneNyffenegger/Hydroplattentheorie
|
cdb76a165fdaf5372643919503c520fa076b1fb7
|
4a52f5e48b15bf31c0b7b792b9fd12c0d4b024c1
|
refs/heads/master
| 2021-01-17T01:14:21.933398
| 2018-01-13T08:07:55
| 2018-01-13T08:07:55
| 59,203,104
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 370
|
r
|
Tiefe.R
|
quakes <- read.csv("2015_Magnitude-groesser-gleich-5.csv", stringsAsFactors = FALSE)
depths <- quakes[c('depth')]$depth
x11()
hist(depths, main='Erdbebentiefen (2015, Mag >= 5)', xlab='Tiefe (km)', ylab='')
z <- locator(1)
depths <- depths[depths > 150]
hist(depths, main='Erdbebentiefen (2015, Mag >= 5, Tiefe>150 km)', xlab='Tiefe (km)', ylab='')
z <- locator(1)
|
f02ed6cc1be75518858f1f8a92e9e62df5e78441
|
61e67ee6b59b4649a8b3d1643edc98350a822d17
|
/R/get_recipe_nutrition.R
|
a2b5708be14f206336ead9102e01b33b30296ae6
|
[] |
no_license
|
cynthiawang315/SpoonacularAPI
|
fd7ac3f5b230cf86da398f5f1bd3464acf870475
|
3483dc9628ee5e098e21ea9ca9059095bd18b685
|
refs/heads/master
| 2021-01-06T01:17:02.024747
| 2020-02-17T19:26:19
| 2020-02-17T19:26:19
| 241,187,235
| 3
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,335
|
r
|
get_recipe_nutrition.R
|
#' Get the nutritional information of the recipes.
#'
#' This functions gets nutritional information of one recipe.
#' @param key API key
#' @param recipe_id The recipe ID.
#' @return The nutritional information of the recipe entered.
#' @examples
#' get_recipe_nutrition(key = Sys.getenv("SPOON_KEY"), recipe_id = "753644")
#' @export
get_recipe_nutrition <- function(key = NULL,recipe_id = NULL) {
if (is.null(key) | is.null(recipe_id)) {
return("API key or recipe_id is missing.")
}
querypar_nutr <- list("apiKey" = key)
nutr_data <- httr::GET(paste("https://api.spoonacular.com/recipes/",recipe_id,"/nutritionWidget.json",sep = ""), query = querypar_nutr)
if (httr::http_status(nutr_data)$category != "Success") {
return(httr::http_status(nutr_data))
} else {
nutr_list <- jsonlite::fromJSON(httr::content(nutr_data,as = "text"), flatten = TRUE)
if (is.null(nutr_list)) {
return("No results found. Please enter new parameters.")
} else {
general_nutrition_info <- as.data.frame(nutr_list[1:4])
bad_nutrients <- nutr_list[[5]] %>% dplyr::select(nutrients = title, amount, percentOfDailyNeeds)
good_nutrients <- nutr_list[[6]] %>% dplyr::select(nutrients = title, amount, percentOfDailyNeeds)
return(list(general_nutrition_info,good_nutrients,bad_nutrients))
}
}
}
|
b5306ec97ef34062153536b51c79ea0c830189a4
|
9bd22c31db29eec72a209c9cc2a5c721ce03b20a
|
/admissions/Kelvin/script2 (Autosaved).R
|
0c896a604618905d1192fd1e98eccd2549e292e7
|
[] |
no_license
|
kelvinabrokwa/wm-stats-blog
|
9fdb08939e5529e33781100cb2b95125cd482f89
|
5fbfe494bd916d8e5afa96686af2e618d57c3de4
|
refs/heads/master
| 2021-01-16T18:02:53.775614
| 2014-12-05T14:34:23
| 2014-12-05T14:34:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 249
|
r
|
script2 (Autosaved).R
|
setwd('/users/kelvinabrokwa/documents/repositories/wm-stats-blog/kelvin')
library(googleVis)
data.long <- read.csv('agg_data_long.csv', header=TRUE)
View(data.long)
motion = gvisMotionChart(data.long, idvar="variable", timevar="Year")
plot(motion)
|
641960003e87013d942d2a065438876331aa3c1b
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/comtradr/examples/ct_commodity_lookup.Rd.R
|
199ad2b36e08e7a12bacdf14c6ade4edc6ff13f1
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 552
|
r
|
ct_commodity_lookup.Rd.R
|
library(comtradr)
### Name: ct_commodity_lookup
### Title: UN Comtrade commodities database query
### Aliases: ct_commodity_lookup
### ** Examples
# Look up commodity descriptions related to "halibut"
ct_commodity_lookup("halibut",
return_code = FALSE,
return_char = FALSE,
verbose = TRUE)
# Look up commodity codes related to "shrimp".
ct_commodity_lookup("shrimp",
return_code = TRUE,
return_char = FALSE,
verbose = TRUE)
|
e1e991052e86d023a51b33096bb88ff7ffab230f
|
fc266ad2e073b99aff20b6fc718c0e9b27d1c617
|
/man/rowMax.units.Rd
|
5a370eef5a08431d3cecea436593613e3920490f
|
[] |
no_license
|
ttriche/oldGridExtra
|
59676ed24529f7fc4ae7ca768cc660e01476eb72
|
c28db2479f9e9d07afbbf51d508a60570eb8d55c
|
refs/heads/master
| 2016-09-06T14:39:49.324675
| 2015-07-16T19:49:42
| 2015-07-16T19:49:42
| 39,217,023
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 329
|
rd
|
rowMax.units.Rd
|
\name{rowMax.units}
\alias{colMax.units}
\alias{rowMax.units}
\title{rowMax.units}
\usage{
rowMax.units(u, nrow)
}
\arguments{
\item{u}{list of units}
\item{nrow}{nrow}
}
\value{
a vector of units
}
\description{
calculates the max of a list of units arranged in a
matrix
}
\seealso{
\code{unit.c}, \code{unit}
}
|
1485b5bf8f4d1f4c05b14de710a23c0201a002ee
|
0414e310bf964d10dfe0ca2c5fe0c487f04693c1
|
/figures/scripts/fig4-tp53-telomerase-panels.R
|
48c974c2a70e61413d2b12709f0fff3a1b3b1342
|
[
"CC-BY-4.0",
"CC-BY-3.0",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] |
permissive
|
jharenza/OpenPBTA-analysis
|
90838d0af9fa438a00148d6641fb7d65497ca2fd
|
489d729cde4318654a26b0a9ef0dee6d706352cd
|
refs/heads/master
| 2022-05-04T14:55:22.800848
| 2022-04-17T18:26:40
| 2022-04-17T18:26:40
| 305,783,266
| 0
| 0
|
NOASSERTION
| 2022-04-04T22:28:24
| 2020-10-20T17:25:03
|
HTML
|
UTF-8
|
R
| false
| false
| 14,329
|
r
|
fig4-tp53-telomerase-panels.R
|
# S. Spielman for ALSF CCDL 2022
#
# Makes pdf panels for reporting TP53 and telomerase results in main text
library(tidyverse)
# Establish base dir
root_dir <- rprojroot::find_root(rprojroot::has_dir(".git"))
# Declare output directory
output_dir <- file.path(root_dir, "figures", "pdfs", "fig4", "panels")
if (!dir.exists(output_dir)) {
dir.create(output_dir, recursive = TRUE)
}
# Data directory
data_dir <- file.path(root_dir, "data")
# Analysis directory
analyses_dir <- file.path(root_dir, "analyses")
tp53_dir <- file.path(analyses_dir, "tp53_nf1_score")
telomerase_dir <- file.path(analyses_dir, "telomerase-activity-prediction")
# Palette directory
palette_dir <- file.path(root_dir, "figures", "palettes")
# Read in clinical data and associated palette
histologies_df <- read_tsv(file.path(data_dir, "pbta-histologies.tsv"),
guess_max = 10000)
histologies_palette_df <- read_tsv(file.path(palette_dir, "broad_histology_cancer_group_palette.tsv"))
binary_palette_df <- readr::read_tsv(file.path(palette_dir, "binary_color_palette.tsv"))
# Read in tp53 data for ROC
tp53_roc_stranded <- read_tsv(file.path(tp53_dir, "results", "stranded_TP53_roc_threshold_results.tsv"))
tp53_roc_stranded_shuff <- read_tsv(file.path(tp53_dir, "results", "stranded_TP53_roc_threshold_results_shuffled.tsv"))
# Read in tp53 data file for violin plots
tp53_compare <- read_tsv(file.path(tp53_dir, "results", "tp53_altered_status.tsv"))
stranded_expression <- read_rds(file.path(data_dir,"pbta-gene-expression-rsem-fpkm-collapsed.stranded.rds"))
# Read in EXTEND scores. We read Stranded FPKM here.
extend_scores <- read_tsv(file.path(telomerase_dir, "results", "TelomeraseScores_PTBAStranded_FPKM.txt"))
# Read in TMB for highlighting points in boxplots
tmb_coding_df <- read_tsv(file.path(data_dir, "pbta-snv-consensus-mutation-tmb-coding.tsv"))
#### Define output PDF panels ----------------------------------------------------------------
tp53_roc_pdf <- file.path(output_dir, "tp53_stranded_roc_panel.pdf")
tp53_scores_altered_pdf <- file.path(output_dir, "tp53_scores_by_altered_panel.pdf")
tp53_expression_altered_pdf <- file.path(output_dir, "tp53_expression_by_altered_panel.pdf")
tp53_telomerase_scores_boxplot_pdf <- file.path(output_dir, "tp53_telomerase_boxplots_panel.pdf")
tp53_telomerase_scores_boxplot_legend_pdf <- file.path(output_dir, "tp53_telomerase_boxplots_panel_legend.pdf")
#forest_plot_pdf <- file.path(output_dir, "forest_survival_tp53_telomerase_hgg_panel.pdf")
### ROC curve ---------------------------------------------------------------------------------
# Create data frame that will plot ROC
roc_df <- bind_rows(tp53_roc_stranded, tp53_roc_stranded_shuff) %>%
mutate(auroc = round(auroc, 2),
shuffled = ifelse(shuffled, 'TP53 Shuffle', 'TP53'),
Classifier = paste0(shuffled, ' (AUROC = ', auroc,')'))
# prep in binary color palette
binary_scale <- binary_palette_df$hex_codes[binary_palette_df$color_names != "na_color"]
# Make the ROC curve
roc_plot <- ggplot(roc_df) +
aes(
x = fpr,
y = tpr
) +
geom_step(
aes(color = Classifier),
size = 0.75
) +
geom_segment(
aes(x = 0, y = 0, xend = 1, yend = 1),
color = "black"
) +
coord_fixed() +
scale_y_continuous(labels = scales::percent) +
scale_x_continuous(labels = scales::percent) +
scale_color_manual(values = binary_scale) +
labs(
x = "False Positive Rate",
y = "True Positive Rate") +
ggpubr::theme_pubr() +
theme(legend.text = element_text(size = rel(0.7)),
legend.title = element_text(size = rel(0.7)),
axis.text = element_text(size = rel(0.75)),
axis.title = element_text(size = rel(0.75))
)
ggsave(tp53_roc_pdf, roc_plot, width = 5, height = 5)
### TP53 scores and expression violin plots -----------------------------------------------------------
# We do not use color palettes since the color mappings will necessarily change across figures.
# We use ggplot instead of ggpubr because of jitter styling (ggpubr jitter point placement is deterministic)
# Function to plot both TP53 violin plots (score across altered and expression across altered)
plot_tp53 <- function(df, pvalue_y) {
# df: Assumes two columns: the variable of interest (either tp53_score, tp53_expression) is named tp53, and column tp53_altered with three values
# pvalue_y: y-axis coordinate of p-value annotation. Note that the x-axis coordinate is always the same
# seed for jitter
set.seed(4)
# Count group sizes to use in x-axis labels. Use this data frame going forward
df_counts <- df %>%
group_by(tp53_altered) %>%
mutate(altered_counts = n()) %>%
ungroup() %>%
mutate(tp53_altered = glue::glue("{tp53_altered}\n(N = {altered_counts})"))
# Perform test for variable without `other`
df_2cat <- filter(df_counts,
!(str_detect(tp53_altered,"other")))
# Prepare for use with `stat_pvalue_manual()`
wilcox_df <- ggpubr::compare_means(tp53 ~ tp53_altered,
data = df_2cat,
method = "wilcox.test") %>%
mutate(y.position = pvalue_y)
# Prepare stats df for median +/ IQR
stats_df <- df_counts %>%
group_by(tp53_altered) %>%
summarize(
y = median(tp53, na.rm=TRUE),
ymin = quantile(tp53, 0.25, na.rm=TRUE),
ymax = quantile(tp53, 0.75, na.rm=TRUE)
)
ggplot(df_counts) +
aes(x = tp53_altered,
y = tp53) +
geom_violin() +
geom_jitter(alpha = 0.25, # very light alpha to accomodate `other` category
width = 0.1,
size = 1) +
# Add median +/- IQR pointrange
geom_pointrange(data = stats_df,
aes(
x = tp53_altered,
y = y,
ymin = ymin,
ymax = ymax
),
color = "firebrick", size = rel(0.6)
) +
# Add p-value annotation with ggpubr
ggpubr::stat_pvalue_manual(
wilcox_df,
label = "Wilcoxon P-value = {p.adj}"
) +
ggpubr::theme_pubr()
}
# change `loss` --> `lost` for grammatical consistency
tp53_compare <- tp53_compare %>%
mutate(tp53_altered = case_when(
tp53_altered == "loss" ~ "lost",
TRUE ~ tp53_altered
))
# Prepare data for all plots - we want stranded ONLY
# subset to TP53
subset_stranded <- t(stranded_expression)[,"TP53"]
# Join STRANDED expression with tp53 alteration
# Note that because this is stranded only, it has fewer data points.
stranded_tp53 <- as.data.frame(subset_stranded) %>%
rename(tp53_expression=subset_stranded) %>%
rownames_to_column(var = "Kids_First_Biospecimen_ID_RNA") %>%
# easier to work with
as_tibble() %>%
# inner_join ensures stranded-only
inner_join(tp53_compare, by = "Kids_First_Biospecimen_ID_RNA") %>%
# keep only columns we need
select(Kids_First_Biospecimen_ID_RNA, tp53_expression, tp53_altered, tp53_score) %>%
distinct()
# Make the figures
tp53_scores_plot <- stranded_tp53 %>%
rename(tp53 = tp53_score) %>%
### ggplot
plot_tp53(pvalue_y = 1.05) +
# add labels for this plot
labs(
x = "TP53 altered status",
y = "TP53 score"
)
tp53_expression_plot <- stranded_tp53 %>%
rename(tp53 = tp53_expression) %>%
# log transform
mutate(tp53 = log(tp53 + 1)) %>%
### ggplot
plot_tp53(pvalue_y = 4.5) +
# add labels for this plot
labs(
x = "TP53 altered status",
y = "TP53 expression [log(FPKM)]"
)
# Export figures
ggsave(tp53_scores_altered_pdf, tp53_scores_plot, width = 6, height = 4)
ggsave(tp53_expression_altered_pdf, tp53_expression_plot, width = 6, height = 4)
## TP53 and telomerase scores boxplots across cancer groups with mutators emphasized -------------------------------------------
# Define cancer groups to show in boxplots
cancer_groups_to_plot <- c(
"Diffuse midline glioma",
"Low-grade glioma astrocytoma",
"Craniopharyngioma",
"High-grade glioma astrocytoma",
"Ganglioglioma",
"Medulloblastoma",
"Meningioma",
"Ependymoma",
"Schwannoma",
"Dysembryoplastic neuroepithelial tumor"
)
# Cancer group wrap number of characters for labeling x-axis in boxplots
cg_wrap <- 20
# Create combined data frame of mutator information, tp53 scores, and telomerase scores (NormEXTENDScores)
# Join tmb_coding_df with tp53_compare first, because both have DNA identifiers.
# Since tp53_compare also has the RNA identifier,then we can join with extend_scores
tp53_telo_mutator_df <- tmb_coding_df %>%
# columns of interest
select(Kids_First_Biospecimen_ID_DNA = Tumor_Sample_Barcode, # consistent naming for joining
tmb) %>%
# add a column about mutator
mutate(
mutator = case_when(
tmb < 10 ~ "Normal",
tmb >= 10 & tmb < 100 ~ "Hypermutant",
tmb >= 100 ~ "Ultra-hypermutant")
) %>%
# join with tp53_compare
inner_join(
select(tp53_compare,
Kids_First_Biospecimen_ID_DNA,
Kids_First_Biospecimen_ID_RNA,
tp53_score),
by = "Kids_First_Biospecimen_ID_DNA"
) %>%
# rename RNA column for joining
rename(Kids_First_Biospecimen_ID = Kids_First_Biospecimen_ID_RNA) %>%
# join with extend_scores using RNA column
inner_join(
select(extend_scores,
Kids_First_Biospecimen_ID = SampleID, # rename for joining
telo_score = NormEXTENDScores
)
)
# Prepare combined data for visualization
plot_df <- tp53_telo_mutator_df %>%
# add in histology information
inner_join(
select(histologies_df,
Kids_First_Biospecimen_ID,
cancer_group)
) %>%
# add in palette information
inner_join(
select(histologies_palette_df,
cancer_group,
cancer_group_display,
cancer_group_hex)
) %>%
# filter to cancer groups of interest
filter(cancer_group %in% cancer_groups_to_plot) %>%
# duplicate the tp53 scores column so we can eventually order can groups by it
mutate(tp53_forordering = tp53_score) %>%
# we want a single column for all scores so we can facet by scores
# first, change naming for facet labels:
rename(`Telomerase score` = telo_score,
`TP53 score` = tp53_score) %>%
gather(contains("score"),
key = "score_type",
value = "score") %>%
# order TP53 on top
mutate(score_type = fct_relevel(score_type, "TP53 score")) %>%
# wrap cancer group label
mutate(cancer_group_display = str_wrap(cancer_group_display, cg_wrap))
# Define colors to use
legend_colors <- c(Normal = "grey40",
Hypermutant = "orange",
`Ultra-hypermutant` = "red")
# Other plot parameters which need to be re-introduced in legend:
normal_alpha <- 0.7
normal_size <- 1.75
mutator_size <- 2.25
normal_pch <- 19
mutator_pch <- 21
jitter_width <- 0.15 # not in legend but often in main plot
# Boxplot with overlayed jitter with colors "mapped" to `mutator`
set.seed(14) # reproducible jitter to ensure we can see all N=6 points in BOTH facets
tp53_telo_tmb_boxplot <- ggplot(plot_df) +
aes(
x = fct_reorder(cancer_group_display, tp53_forordering), # order cancer groups by tp53 score
y = score
) +
geom_boxplot(
outlier.shape = NA, # no outliers
color = "grey20", # dark grey color
size = 0.4
) +
# Separate out jitters so that the mutant layers are ON TOP OF normal
geom_jitter(
data = plot_df[plot_df$mutator == "Normal",],
width = jitter_width,
alpha = normal_alpha,
size = normal_size,
pch = normal_pch,
color = legend_colors["Normal"]
) +
geom_jitter(
data = plot_df[plot_df$mutator == "Hypermutant",],
width = jitter_width,
pch = mutator_pch,
size = mutator_size,
fill = legend_colors["Hypermutant"]
) +
geom_jitter(
data = plot_df[plot_df$mutator == "Ultra-hypermutant",],
width = jitter_width,
pch = mutator_pch,
size = mutator_size,
fill = legend_colors["Ultra-hypermutant"]
) +
labs(x = "Cancer group",
y = "Score") +
facet_wrap(~score_type, nrow = 2) +
ggpubr::theme_pubr() +
theme(
axis.text.x = element_text(angle = 45, hjust=1, size = rel(0.8))
)
# Export plot
ggsave(tp53_telomerase_scores_boxplot_pdf,
tp53_telo_tmb_boxplot,
width = 9, height = 6)
# Make a legend for the grey/orange/red since this was not done with normal mapping
# We have to make a "fake" plot for this to extract the legend from
# Count numbers of each category to show in legend
tp53_plot_legend_df <- plot_df %>%
# keep 1 category only for counting
filter(score_type == "TP53 score") %>%
# column to use for ordering mutator levels
mutate(mutator_order = case_when(
mutator == "Normal" ~ 1,
mutator == "Hypermutant" ~ 2,
mutator == "Ultra-hypermutant" ~ 3
)) %>%
# count
group_by(mutator) %>%
mutate(mutator_count = n()) %>%
ungroup() %>%
# update labeling with count information
mutate(mutator = glue::glue("{mutator} (N = {mutator_count})"),
mutator_factor = factor(mutator),
mutator_factor = fct_reorder(mutator_factor, mutator_order))
legend_name <- "Mutation status"
tp53_plot_for_legend <- ggplot(tp53_plot_legend_df) +
aes(x = cancer_group, y = tmb, shape = mutator_factor, fill = mutator_factor, color = mutator_factor, size = mutator_factor, alpha = mutator_factor) +
geom_point(size =3) +
scale_size_manual(name = legend_name, values = c(normal_size, mutator_size, mutator_size))+
scale_shape_manual(name = legend_name, values = c(normal_pch, mutator_pch, mutator_pch)) +
scale_alpha_manual(name = legend_name, values = c(normal_alpha, 1, 1))+
scale_color_manual(name = legend_name,values = c(unname(legend_colors["Normal"]), "black", "black")) +
scale_fill_manual(name = legend_name, values = c("black", unname(legend_colors["Hypermutant"]), unname(legend_colors["Ultra-hypermutant"]))) +
# theme to remove gray background. this strategy works
theme_classic()
legend <- cowplot::get_legend(tp53_plot_for_legend)
# Export legend
pdf(tp53_telomerase_scores_boxplot_legend_pdf, width = 6, height = 3)
cowplot::ggdraw(legend)
dev.off()
|
3cb49dca579542fc2cc370256964ee9e39b0ff60
|
84526595f5bb52ad787c5bcd4eac5ee6af937ffb
|
/R/enrichment_clustperturb.R
|
1722b27e1c3a7043cd5c0a0d409be7018e56ff75
|
[] |
no_license
|
GregStacey/ppicluster
|
d2b9cb378603c627436d1d9fd454a3d6b110bd5f
|
912c31dcc74e2bc9f8ed5c5d248b1f9cab54d019
|
refs/heads/master
| 2021-07-03T16:31:37.156136
| 2020-09-01T00:34:12
| 2020-09-01T00:34:12
| 160,438,936
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,329
|
r
|
enrichment_clustperturb.R
|
source("functions.R")
source("clust-perturb-tool/clust-perturb.R")
source("clust-perturb-tool/functions.R")
# binarize corum
fn = "../data/allComplexes.txt"
corum = as.data.frame(read_tsv(fn))
corum = corum[corum$Organism=="Human",]
ints.corum = as.data.frame(read_tsv("../data/interactomes/corum_pairwise.txt"))
unqprots = unique(c(ints.corum$protA, ints.corum$protB))
#### cluster 4 algorithms
noise = 0.1
iters = 100
alg.names = c("k-Med", "MCL", "walktrap", "CO", "Louvain", "Leiden", "MCODE", "hierarchical")
alg = c(function(x) pam(x, 1500),
function(x) mymcl(x, infl = 2, iter = 100, verbose = T),
walktrap.community,
function(x) clusteroneR(x, pp=500, density_threshold = 0.1, java_path = "../java/cluster_one-1.0.jar"),
function(x) louvain(x, 15),
function(x) leiden(x, resolution_parameter = 2),
function(x) mcode(x, vwp = 0, haircut = FALSE, fluff = FALSE, fdt = 0.1),
function(x) print("hierarchical"))
edge.list.format = list(pam.edge.list.format,
mcl.edge.list.format,
function(x) graph_from_edgelist(as.matrix(x), directed = F),
NULL,
louvain.edge.list.format,
leiden.edge.list.format,
mcode.edge.list.format,
hierarch.edge.list.format)
cluster.format = list(pam.cluster.format,
mcl.cluster.format,
NULL,
NULL,
louvain.cluster.format,
leiden.cluster.format,
mcode.cluster.format,
hierarch.cluster.format)
if (0) {
load("../data/enrichment_clustperturb.Rda")
} else {
# # co
# print("co")
# jj = 4
# clusters.co = clust.perturb2(ints.corum, clustering.algorithm = alg[[jj]],
# noise = noise, iter = iters,
# edge.list.format = edge.list.format[[jj]],
# cluster.format = cluster.format[[jj]])
# save(clusters.kmed, clusters.mcl, clusters.walk, clusters.co,
# file = "../data/enrichment_clustperturb.Rda")
#
# # k-med
# print("k-med")
# jj = 1
# clusters.kmed = clust.perturb2(ints.corum, clustering.algorithm = alg[[jj]],
# noise = noise, iter = iters,
# edge.list.format = edge.list.format[[jj]],
# cluster.format = cluster.format[[jj]])
# save(clusters.kmed, clusters.mcl, clusters.walk, clusters.co,
# file = "../data/enrichment_clustperturb.Rda")
#
# # walktrap
# print("walktrap")
# jj = 3
# clusters.walk = clust.perturb2(ints.corum, clustering.algorithm = alg[[jj]],
# noise = noise, iter = iters,
# edge.list.format = edge.list.format[[jj]],
# cluster.format = cluster.format[[jj]])
# save(clusters.kmed, clusters.mcl, clusters.walk, clusters.co,
# file = "../data/enrichment_clustperturb.Rda")
#
# # mcl
# print("mcl")
# jj = 2
# clusters.mcl = clust.perturb2(ints.corum, clustering.algorithm = alg[[jj]],
# noise = noise, iter = iters,
# edge.list.format = edge.list.format[[jj]],
# cluster.format = cluster.format[[jj]])
# save(clusters.mcl,
# file = "../data/enrichment_clustperturb_mcl.Rda")
jj = 5
cluster.louvain = clust.perturb(ints.corum, clustering.algorithm = alg[[jj]],
noise = noise, iter = iters,
edge.list.format = edge.list.format[[jj]],
cluster.format = cluster.format[[jj]])
save(cluster.louvain, file = "../data/enrichment_clustperturb_mcpres.Rda,")
jj = 6
leiden.node.names = function(x) {
adjmat = graph_from_edgelist(as.matrix(x[,1:2]))
unqprots = names(V(adjmat))
return(unqprots)
}
cluster.leiden = clust.perturb2(ints.corum, clustering.algorithm = alg[[jj]],
noise = noise, iter = iters,
edge.list.format = edge.list.format[[jj]],
cluster.format = cluster.format[[jj]],
node.names = leiden.node.names)
save(cluster.louvain, cluster.leiden,
file = "../data/enrichment_clustperturb_mcpres.Rda,")
jj = 7
cluster.mcode = clust.perturb(ints.corum, clustering.algorithm = alg[[jj]],
noise = noise, iter = iters,
edge.list.format = edge.list.format[[jj]],
cluster.format = cluster.format[[jj]])
save(cluster.louvain, cluster.leiden, cluster.mcode,
file = "../data/enrichment_clustperturb_mcpres.Rda,")
jj = 8
cluster.hierarch = clust.perturb(ints.corum, clustering.algorithm = alg[[jj]],
noise = noise, iter = iters,
edge.list.format = edge.list.format[[jj]],
cluster.format = cluster.format[[jj]])
save(cluster.louvain, cluster.leiden, cluster.mcode, cluster.hierarch,
file = "../data/enrichment_clustperturb_mcpres.Rda,")
}
#### enrichment
# read ontology
ontology = get_ontology("../data/go-basic.obo")
# read annotations
if (0) {
goa = read_gpa("../data/goa_human.gpa",
filter.NOT = T, filter.evidence = c("ND", "IPI", "IEA", "NAS"),
ontology = ontology, propagate = T)
save(goa, file = "../data/goa_human.gpa.Rda")
} else {
load("../data/goa_human.gpa.Rda")
}
# remove roots
rootNames = c(BP = "GO:0008150", CC = "GO:0005575", MF = "GO:0003674")
goa %<>% dplyr::filter(!GO.ID %in% rootNames)
# process BP, CC, and MF annotations separately
bp = filter_roots(goa, ontology, 'BP') %>%
as_annotation_list("DB_Object_ID", "GO.ID")
cc = filter_roots(goa, ontology, 'CC') %>%
as_annotation_list("DB_Object_ID", "GO.ID")
mf = filter_roots(goa, ontology, 'MF') %>%
as_annotation_list("DB_Object_ID", "GO.ID")
anns = list(BP = bp, CC = cc, MF = mf)
# filter anns to >5 and <100
unqprots = unique(unlist(ints.corum[,1:2]))
for (ii in 1:length(anns)) {
print(ii)
anns[[ii]] = lapply(anns[[ii]], FUN = function(x) { x[x %in% unqprots]
})
anns[[ii]] = anns[[ii]][lapply(anns[[ii]],length)>5 & lapply(anns[[ii]],length)<100]
}
# calculate enrichment for every cluster
clusters = list("clusters.kmed" = clusters.kmed,
"clusters.mcl" = clusters.mcl,
"clusters.walk" = clusters.walk,
"clusters.co" = clusters.co)
all.enr = list() # 4 elements, one for each algorithm
for (aa in 1:length(clusters)) {
# loop over algorithms
all.enr[[aa]] = list()
for (ii in 1:3) {
all.enr[[aa]][[ii]] = data.frame(
qenriched.goid = character(nrow(clusters[[aa]])), # which go terms is the clusters[[aa]] enriched for
np.enriched = numeric(nrow(clusters[[aa]])), # how many enriched go terms (p<0.01)
nq.enriched = numeric(nrow(clusters[[aa]])) # how many enriched go terms (q<0.1)
, stringsAsFactors = F)
}
for (ii in 1:nrow(clusters[[aa]])) {
print(ii)
for (jj in 1:length(anns)) {
goids = names(anns[[jj]])
pp = rep(NA, length(anns[[jj]]))
for (kk in 1:length(anns[[jj]])) {
M = sum(unqprots %in% anns[[jj]][[kk]]) # how many proteins have this term (white balls in urn)
this.cluster = unlist(strsplit(clusters[[aa]]$cluster[ii], ";"))
K = length(this.cluster) # size of sample (number of balls drawn)
X = sum(this.cluster %in% anns[[jj]][[kk]]) # go id in this cluster (number of white drawn)
# probability of drawing that many white balls
pp[kk] = phyper(q=X-1, m=M, n=length(unqprots)-M, k=K, lower.tail=FALSE)
}
all.enr[[aa]][[jj]]$qenriched.goid[ii] = paste(goids[p.adjust(pp)<.1], collapse = "-")
all.enr[[aa]][[jj]]$np.enriched[ii] = sum(pp<.01)
all.enr[[aa]][[jj]]$nq.enriched[ii] = sum(p.adjust(pp)<.1)
}
}
save(clusters, all.enr, file = "../data/enrichment.Rda")
}
|
7a431d1498831efc108b599bb5de897f6e3aff7e
|
3ccae4dc240fd39478a93f2c8d58d1895b2c35f5
|
/R/Main.R
|
dc93c6cc7f8e67d6d42fcf5a0f0a2fc210d3e6ee
|
[] |
no_license
|
ohdsi-studies/CovariateImbalanceDiagnosticsEvaluation
|
849d5fab37069d2d2e6d6729e0642df3b1b6b787
|
8141a7d36dd11d6c2f117e12f322ea0ade9935da
|
refs/heads/master
| 2023-06-04T08:35:01.602840
| 2021-06-15T05:11:07
| 2021-06-15T05:11:07
| 346,760,692
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,936
|
r
|
Main.R
|
# Copyright 2021 Observational Health Data Sciences and Informatics
#
# This file is part of CovarBalDiagEval
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#' Execute the study
#'
#' @details
#' This function executes the study.
#'
#' @param connectionDetails An object of type \code{connectionDetails} as created using the
#' \code{\link[DatabaseConnector]{createConnectionDetails}} function in the
#' DatabaseConnector package.
#' @param cdmDatabaseSchema Schema name where your patient-level data in OMOP CDM format resides.
#' Note that for SQL Server, this should include both the database and
#' schema name, for example 'cdm_data.dbo'.
#' @param cohortDatabaseSchema Schema name where outcome data can be stored. You will need to have
#' write priviliges in this schema. Note that for SQL Server, this should
#' include both the database and schema name, for example 'cdm_data.dbo'.
#' @param cohortTable The name of the table that will be created in the \code{cohortDatabaseSchema}.
#' This table will hold the exposure and outcome cohorts used in this
#' study.
#' @param outputFolder Name of local folder to place results; make sure to use forward slashes
#' (/). Do not use a folder on a network drive since this greatly impacts
#' performance.
#' @param databaseId A short string for identifying the database (e.g. 'Synpuf').
#' @param packageName The name of the package, added for flexibility in package naming convention with find/replace.
#' @param randomSeed The seed to use for random samples - for greater reproducibility.
#' @param databaseName The full name of the database.
#' @param databaseDescription A short description (several sentences) of the database.
#' @param maxCores How many parallel cores should be used? If more cores are made available
#' this can speed up the analyses.
#' @param maxCohortSize The largest cohort size to be used when creating \code{CohortMethod} data objects.
#' @param createCohorts Create the exposure and outcome cohorts?
#' @param synthesizePositiveControls Create synthetic positive controls using \code{MethodEvaluation} package?
#' @param createCohortMethodObjects Create the CohortMethod data objects?
#' @param generateAnalysisObjects Computes the balance objects, fits outcome models, and generates population meta-data.
#' @param synthesizeAndExportResults Synthesize results across analyses?
#' @param verbose Should verbose logging be used?
#'
#' @export
execute <- function(connectionDetails,
cdmDatabaseSchema,
cohortDatabaseSchema,
cohortTable,
outputFolder,
databaseId,
packageName,
randomSeed = 123,
databaseName = databaseId,
databaseDescription = databaseId,
maxCores = parallel::detectCores() - 1,
maxCohortSize = 100000,
createCohorts = FALSE,
synthesizePositiveControls = FALSE,
createCohortMethodObjects = FALSE,
generateAnalysisObjects = FALSE,
synthesizeAndExportResults = FALSE,
verbose = TRUE) {
ParallelLogger::addDefaultFileLogger(file.path(outputFolder, "covBalanceLog.txt"))
ParallelLogger::addDefaultErrorReportLogger(file.path(outputFolder, "errorReportR.txt"))
on.exit(ParallelLogger::unregisterLogger("DEFAULT_FILE_LOGGER", silent = TRUE))
on.exit(ParallelLogger::unregisterLogger("DEFAULT_ERRORREPORT_LOGGER", silent = TRUE), add = TRUE)
startTime <- Sys.time()
if (verbose)
ParallelLogger::logInfo(sprintf("Starting database %s at %s", databaseId, startTime))
if (!file.exists(outputFolder)) {
dir.create(outputFolder, recursive = TRUE, showWarnings = FALSE)
}
# create cohorts (exposure, outcome, negative controls)
if (createCohorts) {
if (verbose)
ParallelLogger::logInfo("Creating exposure, outcome, and negative control cohorts")
createCohorts(connectionDetails = connectionDetails,
cdmDatabaseSchema = cdmDatabaseSchema,
cohortDatabaseSchema = cohortDatabaseSchema,
cohortTable = cohortTable,
outputFolder = outputFolder,
packageName = packageName,
verbose = verbose)
}
# injection signal in negative controls
if (synthesizePositiveControls) {
if(verbose)
ParallelLogger::logInfo("Synthesizing positive controls")
synthesizePositiveControls(connectionDetails = connectionDetails,
cdmDatabaseSchema = cdmDatabaseSchema,
cohortDatabaseSchema = cohortDatabaseSchema,
cohortTable = cohortTable,
oracleTempSchema = oracleTempSchema,
outputFolder = outputFolder,
packageName = packageName,
maxCores = maxCores,
verbose = verbose)
}
# create cohortMethod data object and studyPop
if(createCohortMethodObjects) {
if(verbose)
ParallelLogger::logInfo("Generating CohortMethod data")
createCohortMethodObjects(connectionDetails = connectionDetails,
cdmDatabaseSchema = cdmDatabaseSchema,
cohortDatabaseSchema = cohortDatabaseSchema,
cohortTable = cohortTable,
oracleTempSchema = oracleTempSchema,
outputFolder = outputFolder,
packageName = packageName,
maxCores = maxCores,
createStudyPops = FALSE,
maxCohortSize = maxCohortSize,
serializeObjects = TRUE,
verbose = verbose)
}
# fit outcome models and compute balance
if(generateAnalysisObjects) {
if(verbose)
ParallelLogger::logInfo("Fitting outcome models and computing balance")
generateAnalysisObjects(cmOutputFolder = getCmFolderPath(outputFolder),
packageName = packageName,
maxCores = maxCores,
randomSeed = randomSeed)
}
if(synthesizeAndExportResults) {
if(verbose)
ParallelLogger::logInfo("Exporting results")
synthesizeAndExportResults(resultsFolder = getResultsFolderPath(outputFolder = outputFolder),
cmOutputFolder = getCmFolderPath(outputFolder = outputFolder),
databaseId = databaseId,
maxCores = maxCores,
packageName = packageName)
}
endTime <- Sys.time()
delta <- endTime - startTime
if (verbose)
ParallelLogger::logInfo(sprintf("Database %s took %f %s", databaseId, signif(delta, 3), attr(delta, "units")))
}
|
e234100119f6cdf31dd5794cb592ca6872e553b1
|
a77b7389641ff5078d1c5bd4bb2404c100e84203
|
/cachematrix.R
|
4403801442eaeb7af485bdce330d3561e5b85b40
|
[] |
no_license
|
jaydoc/ProgrammingAssignment2
|
6e9ae423fc178a2b6646e2c310fa43910854cf61
|
dbe9badf0d37951aaf2002903cf8a4bff4432f1d
|
refs/heads/master
| 2020-12-03T09:19:53.053861
| 2014-04-27T01:20:43
| 2014-04-27T01:20:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 928
|
r
|
cachematrix.R
|
## The following function creates a list (in this context, a matrix) containing a function to
## set and get the values of the matrix and its inverse.
## function that creates a special matrix which can then cache its inverse
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL # since the matrix changed
}
get <- function() x
setinv <- function(inv_) inv <<- inv_
getinv <- function() inv
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
## to compute the inverse of above matrix. If not available it calculates and sets the value in the cache.
cacheSolve <- function(x, ...) {
inv <- x$getinv()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data, ...)
x$setinv(inv)
inv
}
|
93deb8f088ea8971ed7f6565c5b6816eea9a1b1c
|
dd9f94ac6181401f0ea48e78f9731f1c337b85ca
|
/tests/testthat/test_est_V.R
|
c7badd73fcde6663402981a507d06df44954e670
|
[
"MIT"
] |
permissive
|
unagpal/susieR
|
67de44e321ec21599d56342df409fa2c3dc8eef1
|
46d37a49ccd680b1ff5fa9dfecf8928ca09018cf
|
refs/heads/master
| 2023-07-02T20:50:26.853819
| 2021-07-22T21:13:29
| 2021-07-22T21:13:29
| 276,458,113
| 0
| 0
|
MIT
| 2020-07-01T18:52:45
| 2020-07-01T18:52:44
| null |
UTF-8
|
R
| false
| false
| 405
|
r
|
test_est_V.R
|
context("test_est_V.R")
test_that("est_V has V nonzero for the debug case", {
debug = readRDS('est_V_debug.rds')
betahat = debug$b
shat2 = debug$s2
prior_weights = debug$pw
V_debug = est_V_uniroot(betahat, shat2, prior_weights)
betahat_V0 = rep(0, 512)
shat2_V0 = rep(1, 512)
V_0 = est_V_uniroot(betahat_V0, shat2_V0, prior_weights)
expect_false(V_debug==0)
expect_equal(V_0, 0)
})
|
ce23515471e457a84f501e2aa0cc9c36332fe4f3
|
852b1aae6ad8138dc164eafbf4045e3ea5de87c8
|
/man/add_constant_metadata_resource.Rd
|
996eb4e31a78a914fe0b390f3d0a82e5c447cab0
|
[] |
no_license
|
PinkDiamond1/fin2ddh
|
35bafcb1a3de815d11554d87f85fb45b3d6262d0
|
40ffcf8f79ff3bb9845b742e57ddb1fbf5f82e66
|
refs/heads/master
| 2023-03-17T00:52:19.799023
| 2019-05-08T01:24:10
| 2019-05-08T01:24:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 478
|
rd
|
add_constant_metadata_resource.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/add_constant_metadata_resource.R
\name{add_constant_metadata_resource}
\alias{add_constant_metadata_resource}
\title{add_constant_metadata_resource}
\usage{
add_constant_metadata_resource(metadata_list)
}
\arguments{
\item{metadata_list}{list: List for machine names and their corresponding values}
}
\value{
list
}
\description{
Add metadata that have constant values across records for resources
}
|
a4f4de784380f6a96b0e061248ec8a56577c82a8
|
4201e9b754760dc35fc0aeef9df5a8b9d801c47f
|
/bin/R-3.5.1/src/library/stats/man/family.Rd
|
62b7765caa0cdc30b81545d6518f00d8a9f3528b
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"GPL-2.0-only"
] |
permissive
|
lifebit-ai/exomedepth
|
cbe59cb7fcf2f9183d187f8d466c6620fb1a0c2e
|
5a775ae5e2a247aeadc5208a34e8717c7855d080
|
refs/heads/master
| 2020-03-27T12:55:56.400581
| 2018-10-11T10:00:07
| 2018-10-11T10:00:07
| 146,578,924
| 0
| 0
|
MIT
| 2018-08-29T09:43:52
| 2018-08-29T09:43:51
| null |
UTF-8
|
R
| false
| false
| 9,595
|
rd
|
family.Rd
|
% File src/library/stats/man/family.Rd
% Part of the R package, https://www.R-project.org
% Copyright 1995-2015 R Core Team
% Distributed under GPL 2 or later
\name{family}
\alias{family}
\alias{binomial}
\alias{gaussian}
\alias{Gamma}
\alias{inverse.gaussian}
\alias{poisson}
\alias{quasi}
\alias{quasibinomial}
\alias{quasipoisson}
%\alias{print.family}
\title{Family Objects for Models}
\usage{
family(object, \dots)
binomial(link = "logit")
gaussian(link = "identity")
Gamma(link = "inverse")
inverse.gaussian(link = "1/mu^2")
poisson(link = "log")
quasi(link = "identity", variance = "constant")
quasibinomial(link = "logit")
quasipoisson(link = "log")
}
\arguments{
\item{link}{a specification for the model link function. This can be
a name/expression, a literal character string, a length-one character
vector or an object of class
\code{"\link[=make.link]{link-glm}"} (such as generated by
\code{\link{make.link}}) provided it is not specified
\emph{via} one of the standard names given next.
The \code{gaussian} family accepts the links (as names)
\code{identity}, \code{log} and \code{inverse};
the \code{binomial} family the links \code{logit},
\code{probit}, \code{cauchit}, (corresponding to logistic,
normal and Cauchy CDFs respectively) \code{log} and
\code{cloglog} (complementary log-log);
the \code{Gamma} family the links \code{inverse}, \code{identity}
and \code{log};
the \code{poisson} family the links \code{log}, \code{identity},
and \code{sqrt} and the \code{inverse.gaussian} family the links
\code{1/mu^2}, \code{inverse}, \code{identity}
and \code{log}.
The \code{quasi} family accepts the links \code{logit}, \code{probit},
\code{cloglog}, \code{identity}, \code{inverse},
\code{log}, \code{1/mu^2} and \code{sqrt}, and
the function \code{\link{power}} can be used to create a
power link function.
}
\item{variance}{for all families other than \code{quasi}, the variance
function is determined by the family. The \code{quasi} family will
accept the literal character string (or unquoted as a name/expression)
specifications \code{"constant"}, \code{"mu(1-mu)"}, \code{"mu"},
\code{"mu^2"} and \code{"mu^3"}, a length-one character vector
taking one of those values, or a list containing components
\code{varfun}, \code{validmu}, \code{dev.resids}, \code{initialize}
and \code{name}.
}
\item{object}{the function \code{family} accesses the \code{family}
objects which are stored within objects created by modelling
functions (e.g., \code{glm}).}
\item{\dots}{further arguments passed to methods.}
}
\description{
Family objects provide a convenient way to specify the details of the
models used by functions such as \code{\link{glm}}. See the
documentation for \code{\link{glm}} for the details on how such model
fitting takes place.
}
\details{
\code{family} is a generic function with methods for classes
\code{"glm"} and \code{"lm"} (the latter returning \code{gaussian()}).
For the \code{binomial} and \code{quasibinomial} families the response
can be specified in one of three ways:
\enumerate{
\item As a factor: \sQuote{success} is interpreted as the factor not
having the first level (and hence usually of having the second level).
\item As a numerical vector with values between \code{0} and
\code{1}, interpreted as the proportion of successful cases (with the
total number of cases given by the \code{weights}).
\item As a two-column integer matrix: the first column gives the
number of successes and the second the number of failures.
}
The \code{quasibinomial} and \code{quasipoisson} families differ from
the \code{binomial} and \code{poisson} families only in that the
dispersion parameter is not fixed at one, so they can model
over-dispersion. For the binomial case see McCullagh and Nelder
(1989, pp.\sspace{}124--8). Although they show that there is (under some
restrictions) a model with
variance proportional to mean as in the quasi-binomial model, note
that \code{glm} does not compute maximum-likelihood estimates in that
model. The behaviour of S is closer to the quasi- variants.
}
\note{
The \code{link} and \code{variance} arguments have rather awkward
semantics for back-compatibility. The recommended way is to supply
them is as quoted character strings, but they can also be supplied
unquoted (as names or expressions). In addition, they can also be
supplied as a length-one character vector giving the name of one of
the options, or as a list (for \code{link}, of class
\code{"link-glm"}). The restrictions apply only to links given as
names: when given as a character string all the links known to
\code{\link{make.link}} are accepted.
This is potentially ambiguous: supplying \code{link = logit} could mean
the unquoted name of a link or the value of object \code{logit}. It
is interpreted if possible as the name of an allowed link, then
as an object. (You can force the interpretation to always be the value of
an object via \code{logit[1]}.)
}
\value{
An object of class \code{"family"} (which has a concise print method).
This is a list with elements
\item{family}{character: the family name.}
\item{link}{character: the link name.}
\item{linkfun}{function: the link.}
\item{linkinv}{function: the inverse of the link function.}
\item{variance}{function: the variance as a function of the mean.}
\item{dev.resids}{function giving the deviance residuals as a function
of \code{(y, mu, wt)}.}
\item{aic}{function giving the AIC value if appropriate (but \code{NA}
for the quasi- families). See \code{\link{logLik}} for the assumptions
made about the dispersion parameter.}
\item{mu.eta}{function: derivative \code{function(eta)}
\eqn{d\mu/d\eta}.}
\item{initialize}{expression. This needs to set up whatever data
objects are needed for the family as well as \code{n} (needed for
AIC in the binomial family) and \code{mustart} (see \code{\link{glm}}).}
\item{validmu}{logical function. Returns \code{TRUE} if a mean
vector \code{mu} is within the domain of \code{variance}.}
\item{valideta}{logical function. Returns \code{TRUE} if a linear
predictor \code{eta} is within the domain of \code{linkinv}.}
\item{simulate}{(optional) function \code{simulate(object, nsim)} to be
called by the \code{"lm"} method of \code{\link{simulate}}. It will
normally return a matrix with \code{nsim} columns and one row for
each fitted value, but it can also return a list of length
\code{nsim}. Clearly this will be missing for \sQuote{quasi-} families.}
}
\references{
McCullagh P. and Nelder, J. A. (1989)
\emph{Generalized Linear Models.}
London: Chapman and Hall.
Dobson, A. J. (1983)
\emph{An Introduction to Statistical Modelling.}
London: Chapman and Hall.
Cox, D. R. and Snell, E. J. (1981).
\emph{Applied Statistics; Principles and Examples.}
London: Chapman and Hall.
Hastie, T. J. and Pregibon, D. (1992)
\emph{Generalized linear models.}
Chapter 6 of \emph{Statistical Models in S}
eds J. M. Chambers and T. J. Hastie, Wadsworth & Brooks/Cole.
}
\author{
The design was inspired by S functions of the same names described
in Hastie & Pregibon (1992) (except \code{quasibinomial} and
\code{quasipoisson}).
}
\seealso{
\code{\link{glm}}, \code{\link{power}}, \code{\link{make.link}}.
For binomial \emph{coefficients}, \code{\link{choose}};
the binomial and negative binomial \emph{distributions},
\code{\link{Binomial}}, and \code{\link{NegBinomial}}.
}
\examples{
require(utils) # for str
nf <- gaussian() # Normal family
nf
str(nf)
gf <- Gamma()
gf
str(gf)
gf$linkinv
gf$variance(-3:4) #- == (.)^2
## quasipoisson. compare with example(glm)
counts <- c(18,17,15,20,10,20,25,13,12)
outcome <- gl(3,1,9)
treatment <- gl(3,3)
d.AD <- data.frame(treatment, outcome, counts)
glm.qD93 <- glm(counts ~ outcome + treatment, family = quasipoisson())
\donttest{
glm.qD93
anova(glm.qD93, test = "F")
summary(glm.qD93)
## for Poisson results use
anova(glm.qD93, dispersion = 1, test = "Chisq")
summary(glm.qD93, dispersion = 1)
}
## Example of user-specified link, a logit model for p^days
## See Shaffer, T. 2004. Auk 121(2): 526-540.
logexp <- function(days = 1)
{
linkfun <- function(mu) qlogis(mu^(1/days))
linkinv <- function(eta) plogis(eta)^days
mu.eta <- function(eta) days * plogis(eta)^(days-1) * binomial()$mu_eta
valideta <- function(eta) TRUE
link <- paste0("logexp(", days, ")")
structure(list(linkfun = linkfun, linkinv = linkinv,
mu.eta = mu.eta, valideta = valideta, name = link),
class = "link-glm")
}
binomial(logexp(3))
## in practice this would be used with a vector of 'days', in
## which case use an offset of 0 in the corresponding formula
## to get the null deviance right.
## Binomial with identity link: often not a good idea.
\dontrun{binomial(link = make.link("identity"))}
## tests of quasi
x <- rnorm(100)
y <- rpois(100, exp(1+x))
glm(y ~ x, family = quasi(variance = "mu", link = "log"))
# which is the same as
glm(y ~ x, family = poisson)
glm(y ~ x, family = quasi(variance = "mu^2", link = "log"))
\dontrun{glm(y ~ x, family = quasi(variance = "mu^3", link = "log")) # fails}
y <- rbinom(100, 1, plogis(x))
# needs to set a starting value for the next fit
glm(y ~ x, family = quasi(variance = "mu(1-mu)", link = "logit"), start = c(0,1))
}
\keyword{models}
|
26d6eef4838ff9ad6e18a38c0edc625794e18347
|
aa734a9629d3e0e942bdf08fb1bed6a5b2d1031b
|
/cachematrix.R
|
8eedaf265b3ccf4f41f6f72b01929fc8c6f74e54
|
[] |
no_license
|
acgadala/ProgrammingAssignment2
|
126fae138a15105da6ad530c36d0aa1b6608b5d3
|
3545cc56cf515c4f2ce91fc99759dfdba1407df6
|
refs/heads/master
| 2021-01-15T23:35:03.538902
| 2014-08-24T17:55:01
| 2014-08-24T17:55:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 758
|
r
|
cachematrix.R
|
## These functions will calculate and cache the inverse of a matrix.
## Creates a special "matrix" object to cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
i<-NULL
set<-function(y){
x<<-y
i<<-NULL
}
get<-function() x
setInverse<-function(inverse) i<<-inverse
getInverse<-function()i
list(set=set, get=get, setInverse=setInverse, getInverse=getInverse)
}
## Calculates the inverse of the matrix created in makeCacheMatrix, and returns said inverse.
cacheSolve <- function(x, ...) {
i<-x$getInverse()
if(!is.null(i)){
message("getting cached data")
return(i)
}
data<-x$get()
i<-solve(data)
x$setInverse(i)
i
}
|
9f5d2ba133f754d95d1f322349ebcca658d882be
|
6eb980a9312f50491782a92875a52618dfbcffc6
|
/tests/testthat/setup-vcr.R
|
41522a15773edf52a377250650d1dd01b7e0047c
|
[] |
no_license
|
cran/deepdep
|
d9636bb8dd22b64e86b893adb2c0873ea87068c4
|
74b4aafcb30d8d1bde5e212c6187d052180a7e94
|
refs/heads/master
| 2023-03-05T18:45:35.804669
| 2023-02-20T23:10:05
| 2023-02-20T23:10:05
| 245,601,598
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 250
|
r
|
setup-vcr.R
|
if (requireNamespace("vcr", quietly = TRUE)) {
library("vcr") # *Required* as vcr is set up on loading
invisible(vcr::vcr_configure(
dir = vcr::vcr_test_path("fixtures"),
verbose_errors = TRUE
))
vcr::check_cassette_names()
}
|
a2abaef24b9999ebedd1fcc30e14159b9bee0fc5
|
6aec1a0100c3d9f09f3874ed43c72d254c8459f1
|
/Scripts_Booth_et_al_2018/batchReadLengthDistribution.R
|
40d8a52446145e7980c1d15a936f07bef3de4175
|
[] |
no_license
|
summer-yangqin/Pombe_PROseq
|
81050f8a0d5dbca4ebc3217143e511e4e2725dea
|
9d38b91cb2c03f615a78b1f41f1bdc765de11c04
|
refs/heads/master
| 2023-03-19T18:53:09.115451
| 2018-04-24T19:17:49
| 2018-04-24T19:17:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,064
|
r
|
batchReadLengthDistribution.R
|
## to use this script, just change the logpath to the directory where all of the clipLengthCounts.txt files are for an alignment.
## also change the name of the file to be output (last line of the script, while running plotting function)
## the functions will not be screwed up by other files present as it looks for " clipLengthCounts" in the name of the file
library(lattice)
library(ggplot2)
library(gplots)
logpath = "/Users/gregorybooth/Google\ Drive/SEAGATE_EXP/Fisher_collaboration/alignment_11-28-17/FisherPools/logs"
fig_dir = "/Users/gregorybooth/Google\ Drive/SEAGATE_EXP/Fisher_collaboration/analysis/figures/readLengthDistributions/"
dir.create(fig_dir)
importReadLengthsDists = function(logpath = logpath){
DF = matrix(ncol = 3, nrow = 0)
colnames(DF)<- c("length", "count", "sample")
sample = 0
sampleNames = c()
for (readDist in Sys.glob(file.path(logpath, "*.txt"))) {
file.name = strsplit(strsplit(readDist, "/")[[1]][length(strsplit(readDist, "/")[[1]])], '\\.')[[1]][1]
if (grepl("clipLengthCounts", file.name)){
sample = sample+1
cat(file.name,"\n")
sampleNames = c(sampleNames, file.name)
lengthDF = read.table(file = paste(logpath,"/", file.name, ".txt", sep = ""), sep = "")
lengthDF = cbind(lengthDF, sample)
DF = rbind(DF, lengthDF)
}
}
return(list(DF, sampleNames))
}
test = importReadLengthsDists(logpath = logpath)
batchPlotReadLengths = function(lengthDat = test, filename = "test.pdf"){
lenDat = lengthDat[[1]]
sampleNames = lengthDat[[2]]
for (i in 1:length(sampleNames)){
lenDat$sample[lenDat$sample == i] <- sampleNames[i]
}
ggplot(lenDat, aes(x = V1, y = V2)) +
geom_bar(stat="identity") + # stat="identity" allows you to use y column to set bar heights
facet_wrap(~ sample) +
xlab("Read Length") + ylab("Count")
ggsave(file = paste(fig_dir, filename, sep = ""),plot = last_plot(), width=15, height=15)
}
batchPlotReadLengths(lengthDat = test, filename = "alignment_11-28-17_FisherPools_ReadLengthDist.pdf")
|
95d04397990152141e151a5cb34ee4eafb6ee78e
|
0a906cf8b1b7da2aea87de958e3662870df49727
|
/distr6/inst/testfiles/C_EmpiricalMVPdf/libFuzzer_C_EmpiricalMVPdf/C_EmpiricalMVPdf_valgrind_files/1610036452-test.R
|
095431a7c324c0d66c6605f394b093749e04cc7b
|
[] |
no_license
|
akhikolla/updated-only-Issues
|
a85c887f0e1aae8a8dc358717d55b21678d04660
|
7d74489dfc7ddfec3955ae7891f15e920cad2e0c
|
refs/heads/master
| 2023-04-13T08:22:15.699449
| 2021-04-21T16:25:35
| 2021-04-21T16:25:35
| 360,232,775
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 322
|
r
|
1610036452-test.R
|
testlist <- list(data = structure(c(7.40700664744977e-304, 0, 1.24499602095536e-319, Inf), .Dim = c(4L, 1L)), x = structure(c(1.23802521124132e-308, -Inf, 5.79391781278816e-307, -Inf, 6.14293298952177e-183, NA, 6.14293298947794e-183), .Dim = c(7L, 1L)))
result <- do.call(distr6:::C_EmpiricalMVPdf,testlist)
str(result)
|
48c6b8ca82b704a1420fff5d125250cb0aa7f599
|
9397a453a4b9c4ddd9988235fe4a8ee6720358c1
|
/Paper2/test_droppoints.R
|
fb1089cd755a36b4294c046a0c89bc72266643c1
|
[] |
no_license
|
komazsofi/myPhD_escience_analysis
|
33e61a145a1e1c13c646ecb092081182113dbce3
|
5f0ecdd05e7eaeb7fce30f0c28e0728642164dbc
|
refs/heads/master
| 2021-06-04T21:39:58.115874
| 2020-06-18T12:59:53
| 2020-06-18T12:59:53
| 119,659,750
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,465
|
r
|
test_droppoints.R
|
"
@author: Zsofia Koma, UvA
Aim: Modelling drop out points
"
library("lidR")
library("rgdal")
library("dplyr")
# Set working dirctory
workingdirectory="D:/Sync/_Amsterdam/03_Paper2_bird_lidar_sdm/DataProcess_Paper2_1/"
setwd(workingdirectory)
#Import
las=readLAS("tile_61.laz")
options(digits = 20)
head(las$gpstime)
#Get one flight line
get_flightlines=unique(las@data$PointSourceID)
las_oneline=lasfilter(las,PointSourceID==get_flightlines[1])
writeLAS(las_oneline,"las_oneline.laz")
# Order by GPStime and get the difference - it is slow switch to data.table?
las_oneline_ord=las_oneline@data[order(las_oneline@data$gpstime),]
las_oneline_ord_gpsdiff=transform(las_oneline_ord, diff_gpstime = c(NA, diff(gpstime)))
# Get attributes where the difference is bigger then 0.00001 10^-5
sel_las_oneline=las_oneline_ord_gpsdiff[las_oneline_ord_gpsdiff$diff_gpstime>0.00001,]
write.csv(sel_las_oneline,"test_dropout1.txt")
las_oneline_ord_gpsdiff$isdrop <- 0
las_oneline_ord_gpsdiff$isdrop[las_oneline_ord_gpsdiff$diff_gpstime>0.00001] <- 1
index <- las_oneline_ord_gpsdiff$isdrop == 1
las_oneline_ord_gpsdiff$isdrop[which(las_oneline_ord_gpsdiff$isdrop==TRUE)-1] <- 1
sel_las_oneline_beaft=las_oneline_ord_gpsdiff[las_oneline_ord_gpsdiff$isdrop==1,]
write.csv(sel_las_oneline_beaft,"test_dropout2.txt")
# Put NaN where we would like to interpolate values
#Fill water points
#df2 <- sel_las_oneline_beaft %>%
#mutate_at(vars(Fuel, Dist), na.approx)
|
5be6ea4b83eb1258e5d060f21321dfcdfeb2198b
|
71129b1c03eed2abdd67fc2b52b57874bae49f45
|
/collapsibleTree/R/collapsibleTree.data.frame.R
|
41af5c2381880244df54015dae65b84ca7931a83
|
[] |
no_license
|
Bastiaanspanjaard/LINNAEUS
|
0eb880d8e581f870b58d69cea7060822baf8564a
|
6c86288e8e684d77f5499249023e7157d0c440dc
|
refs/heads/master
| 2022-11-13T10:48:40.584477
| 2020-07-03T14:56:07
| 2020-07-03T14:56:07
| 255,870,978
| 4
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,805
|
r
|
collapsibleTree.data.frame.R
|
#' @rdname collapsibleTree
#' @method collapsibleTree data.frame
#' @export
collapsibleTree.data.frame <- function(df, hierarchy, root = deparse(substitute(df)),
inputId = NULL, attribute = "leafCount",
aggFun = sum, fill = "lightsteelblue",
fillByLevel = TRUE, linkLength = NULL, fontSize = 10,
tooltip = FALSE, nodeSize = NULL, collapsed = TRUE,
zoomable = TRUE, width = NULL, height = NULL,
...) {
# preserve this name before evaluating df
root <- root
# acceptable inherent node attributes
nodeAttr <- c("leafCount", "count")
# reject bad inputs
if(!is.data.frame(df)) stop("df must be a data frame")
if(!is.character(hierarchy)) stop("hierarchy must be a character vector")
if(!is.character(fill)) stop("fill must be a character vector")
if(length(hierarchy) <= 1) stop("hierarchy vector must be greater than length 1")
if(!all(hierarchy %in% colnames(df))) stop("hierarchy column names are incorrect")
if(!(attribute %in% c(colnames(df), nodeAttr))) stop("attribute column name is incorrect")
if(!is.null(nodeSize)) if(!(nodeSize %in% c(colnames(df), nodeAttr))) stop("nodeSize column name is incorrect")
if(!(attribute %in% nodeAttr)) {
if(any(is.na(df[attribute]))) stop("attribute must not have NAs")
}
# if df has NAs, coerce them into character columns and replace them with ""
if(sum(complete.cases(df[hierarchy])) != nrow(df)) {
df[hierarchy] <- lapply(df[hierarchy], as.character)
df[is.na(df)] <- ""
}
# calculate the right and left margins in pixels
leftMargin <- nchar(root)
rightLabelVector <- as.character(df[[hierarchy[length(hierarchy)]]])
rightMargin <- max(sapply(rightLabelVector, nchar))
# create a list that contains the options
options <- list(
hierarchy = hierarchy,
input = inputId,
attribute = attribute,
linkLength = linkLength,
fontSize = fontSize,
tooltip = tooltip,
collapsed = collapsed,
zoomable = zoomable,
margin = list(
top = 20,
bottom = 20,
left = (leftMargin * fontSize/2) + 25,
right = (rightMargin * fontSize/2) + 25
)
)
# these are the fields that will ultimately end up in the json
jsonFields <- NULL
# the hierarchy that will be used to create the tree
df$pathString <- paste(
root,
apply(df[,hierarchy], 1, paste, collapse = "//"),
sep="//"
)
# convert the data frame into a data.tree node
node <- data.tree::as.Node(df, pathDelimiter = "//")
# fill in the node colors, traversing down the tree
if(length(fill)>1) {
if(length(fill) != node$totalCount) {
stop(paste("Expected fill vector of length", node$totalCount, "but got", length(fill)))
}
node$Set(fill = fill, traversal = ifelse(fillByLevel, "level", "pre-order"))
jsonFields <- c(jsonFields, "fill")
} else {
options$fill <- fill
}
# only necessary to perform these calculations if there is a tooltip
if(tooltip) {
# traverse down the tree and compute the weights of each node for the tooltip
t <- data.tree::Traverse(node, "pre-order")
data.tree::Do(t, function(x) {
x$WeightOfNode <- data.tree::Aggregate(x, attribute, aggFun)
# make the tooltips look nice
x$WeightOfNode <- prettyNum(
x$WeightOfNode, big.mark = ",", digits = 3, scientific = FALSE
)
})
jsonFields <- c(jsonFields, "WeightOfNode")
}
# only necessary to perform these calculations if there is a nodeSize specified
if(!is.null(nodeSize)) {
# Scale factor to keep the median leaf size around 10
scaleFactor <- 10/data.tree::Aggregate(node, nodeSize, stats::median)
# traverse down the tree and compute the weights of each node for the tooltip
t <- data.tree::Traverse(node, "pre-order")
data.tree::Do(t, function(x) {
x$SizeOfNode <- data.tree::Aggregate(x, nodeSize, aggFun)
# scale node growth to area rather than radius and round
x$SizeOfNode <- round(sqrt(x$SizeOfNode*scaleFactor)*pi, 2)
})
# update left margin based on new root size
options$margin$left <- options$margin$left + node$SizeOfNode - 10
jsonFields <- c(jsonFields, "SizeOfNode")
}
# keep only the JSON fields that are necessary
if(is.null(jsonFields)) jsonFields <- NA
data <- data.tree::ToListExplicit(node, unname = TRUE, keepOnly = jsonFields)
# pass the data and options using 'x'
x <- list(
data = data,
options = options
)
# create the widget
htmlwidgets::createWidget(
"collapsibleTree", x, width = width, height = height,
htmlwidgets::sizingPolicy(viewer.padding = 0)
)
}
|
5df665d82fa00c33987a8349a6ec20f0720b132c
|
d3e67d0e9d5c399e26f8f485fb219799aabe6e93
|
/clasificador_som.r
|
9e377f035e657d3eb80d4c6a8c09988379338c24
|
[] |
no_license
|
igmalta/algoritmo_som
|
ee58c4ff72532fec2d8a9d9a53071b7cdd6afb2d
|
11bc6f5c85fc0ddd2d1940c2894294374e9ee977
|
refs/heads/master
| 2023-04-22T18:11:53.780664
| 2021-05-04T22:46:11
| 2021-05-04T22:46:11
| 364,394,622
| 0
| 0
| null | null | null | null |
ISO-8859-1
|
R
| false
| false
| 15,685
|
r
|
clasificador_som.r
|
rm(list=ls())
gc()
# Librerías
# ==============================================================================
if (!require("data.table")) install.packages("data.table"); library("data.table")
if (!require("ggplot2")) install.packages("ggplot2") ; library("ggplot2")
if (!require("dplyr")) install.packages("dplyr") ; library("dplyr")
if (!require("gridExtra")) install.packages("gridExtra") ; library("gridExtra")
# Lectura de los datos de trabajo
# ==============================================================================
# Se fija el directorio de los datasets
setwd("data/")
# Datset "circulo.csv"
clouds <- setDF(fread("clouds.csv"))
names(clouds) <- c("x1", "x2", "y")
clouds[,1:2] <- scale(clouds[,1:2])
# Función para encontrar la neurona más cercana a una entrada
# ==============================================================================
neuronaCercana <- function(entrada, red){
#' Se ingresa una entrada y se busca la neurona más cercana a ella.
#'
#' entrada: entrada elegida.
#' red: conjunto de neuronas que forman la red o mapa.
# Un número alto cualquiera para iniciar
distanciaMinima <- 1000000
# Se calcula la distancia entre una entrada elegida y cada punto de la red
# neuronal
for (i in 1:nrow(red)){
# Cada peso de la red
w = red[i, ]
# Se calcula la distancia entre la entrada y un punto de la red
distancia <- sum(abs(w - entrada))
if (distancia < distanciaMinima){
distanciaMinima <- distancia
indiceGanadora <- i
}
}
# Se guarda la neurona más cercana
ganadora <- red[indiceGanadora,]
return (list("ganadora" = ganadora, "indiceGanadora" = indiceGanadora))
}
# Funciones para graficar
# ==============================================================================
graficarRed <- function(train, red, n, m, epoca) {
#' Grafica la red SOM en 2D.
#'
#' train: dataset de entrenamiento.
#' red: red de neuronas.
#' n: cantidad de neuronas en el eje x o ancho.
#' m: cantidad de neuronas en el eje y o alto.
# Se convierte a data.table
train <- data.table(train)
red <- data.table(red)
# Se crea un id para trabajar
red[, ID := .I]
# Cantidad de grupos de polígonos
cntGrupos <- (n-1) * (m-1)
# Se inicializa un data.frame vacío
dfGrupos <- data.frame(matrix(NA, ncol = 4, nrow = m*(n-1)-1))
# Se crea un vector de índices donde inician los polígonos
vertices <- 1:(m*(n-1)-1)
# Se crea un vector con los índices que sobran
eliminar <- c()
for (i in 1:(n-2)){
eliminar <- c(eliminar, i*m)
}
# Se eliminan los vértices que no inician polígonos
vertices <- vertices[-eliminar]
# Cada polígono se compone de 4 vértices que siguen una secuencia para
# graficarlos
for (i in vertices){
dfGrupos[i,] <- c(i, i + 1, (i + m) + 1, i + m)
}
# Se eliminan filas sobrantes
dfGrupos <- data.table(na.omit(dfGrupos))
# Se crea un identificador por grupo de polígonos
dfGrupos[, GRUPO := .I]
# Se hace un reshape del data.frame
reshape <- melt(dfGrupos, id.vars = c("GRUPO"))
# Se eliminan variables que no son de interés
reshape[, variable := NULL]
# Se renombran variables
names(reshape) <- c("GRUPO", "ID")
# Se hace un join de los datasets
red <- reshape[red, on = "ID"]
names(red) <- c("grupo", "id", "x", "y")
# Se ordenan los datos por grupo
setorder(red, grupo)
# Se cambia la posición de valores sumando y restando una unidad a id
# Se hace para acomodar la secuencia necesaria para graficar los polígonos de
# la red
for (i in 1:cntGrupos){
red[((i*4)-1), id := id + 1]
red[(i*4) , id := id - 1]
}
# Se ordenan los datos por id
setorder(red, id)
# Se grafica
gg <- ggplot() +
geom_point(data = train, aes(x = x1, y = x2), color = "#2cb1c9") +
geom_polygon(data = red, aes(x = x, y = y, group = grupo), color ="grey8",
alpha = 0) +
geom_point(data = red, aes(x = x, y = y), color = "red", size=3) +
xlab(paste0("Época N°: ", epoca))
theme_minimal()
return(gg)
}
# Graficar en 2D Clasificaciones
categorias <- function(xValid, clasePredicciones, title){
# Scatterplot
gg <- ggplot(xValid, aes(x1, x2, color = get(clasePredicciones))) +
geom_point(shape = 16, size = 3, show.legend = FALSE) +
theme_minimal() +
scale_color_gradient(low = "#0091ff", high = "#f0650e") +
ggtitle(title)
return(gg)
}
# ==============================================================================
SOM <- function(train, clase, n, m, prob, numEpocasGrueso, numEpocasMedio,
numEpocasFino, tasaAprendizajeGrueso, tasaAprendizajeMedio,
tasaAprendizajeFino, sleep, saltoGrafico, seed){
#' Construye una red SOM sobre los datos de entrenamiento.
#'
#' train: datos de entrenamiento.
#' clase: nombre de la columna de clases.
#' n: cantidad de neuronas en el eje x.
#' m: cantidad de neuronas en el eje y.
#' prob: porcentaje (0 a 1) del dataset de entrenamiento.
#' numEpocasGrueso: numéro de épocas en la etapa de ordenamiento topológico.
#' numEpocasMedio: numéro de épocas en la etapa de transición.
#' numEpocasFino: numéro de épocas en la etapa ajuste fino.
#' tasaAprendizajeGrueso: tasa de aprendizaje en la etapa de ordenamiento
#' topológico.
#' tasaAprendizajeMedio: tasa de aprendizaje en la etapa de transición.
#' tasaAprendizajeFino: tasa de aprendizaje en la etapa de ajuste fino.
#' sleep: tiempo en segundos que se pausa el algoritmo para graficar.
#' saltoGrafico: cada cuantas épocas se grafica.
#' seed: semilla.
# Se crea la variable idtempo
train <- as.data.table(mutate(train, idtempo = row_number()))
# Se divide el dataset en un porcentaje ("prob") para datos de entrenamiento
# y otro para datos de validación
set.seed(seed)
dtrain <- as.data.table(train %>%
group_by(!!as.name(clase)) %>%
sample_frac(prob) %>%
ungroup)
dvalid <- as.data.table(anti_join(train, dtrain, by = "idtempo"))
# Se eliminan columnas innecesarias
dtrain[, idtempo := NULL]
dvalid[, idtempo := NULL]
# Se separa la clase de los datos de entrenamiento
xTrain <- as.matrix(dtrain[, !clase, with = FALSE])
yTrain <- as.matrix(dtrain[, get(clase)])
xValid <- as.matrix(dvalid[, !clase, with = FALSE])
yValid <- dvalid[, get(clase)]
# Se crea una red de puntos (mapa) con forma regular, con distancia de
# separación unitaria
red <- data.frame(cbind(rep(1:n, each = m), rep(seq(1:m), n)))
# Se crea una matriz con los vecinos de cada neurona del mapa y se calcula la
# distancia de manhattan que los separa
matrizVecinos <- as.matrix(dist(red, method = "manhattan"))
# Se calcula para cada neurona de la red los vecinos que se encuentran dentro
# de una distancia manhattan
# aproximada a la mitad del mapa para utilizar luego en el ajuste grueso
vecindadGruesa <- floor(( n + m)/4)
indicesGrueso <- apply(matrizVecinos, 1,
function(x) (which(x <= vecindadGruesa)))
# Se escalan los valores de la red para formar los pesos
# Se suma 3 para descentrar la red
red <- scale(red) + 3
# Para una red unidimensional se alinean los puntos verticalmente sobre eje
# x = 0 (posición inicial)
if (n == 1){
red[,1] <- 0
}
# Se crea una matriz del mismo tamaño que el mapa pero con valores aleatorios
# en un determinado rango
set.seed(seed)
redRandom <- matrix(runif(n * m * ncol(xTrain), 0.1, 0.25),
ncol = ncol(xTrain))
# Se suma la matriz de valores aleatorios a la red para dar algo de
# aleatoriedad a los pesos de la misma
red <- red + redRandom
# Se calcula la cantidad de épocas totales
numEpoca <- numEpocasGrueso + numEpocasMedio + numEpocasFino
# Se inicia la actualización de los pesos del mapa
for (i in 1:numEpoca){
cat(paste0("Epoca N°: ", i, "...", "\n"))
# Se selecciona una observación aleatoria de la red
entrada <- xTrain[sample(nrow(xTrain), 1),]
# Se calcula la neurona ganadora, es decir, la más cercana a la entrada
ganadora <- neuronaCercana(entrada, red)
# Etapa de ordenamiento topológico
if (i <= numEpocasGrueso){
# Se actualizan pesos de la ganadora
red[ganadora$indiceGanadora, ] <- red[ganadora$indiceGanadora,] +
tasaAprendizajeGrueso * (entrada - red[ganadora$indiceGanadora,])
# Se actualizan pesos de los vecinos de la ganadora
for (k in 1:length(indicesGrueso[[ganadora$indiceGanadora]])){
indice <- indicesGrueso[[ganadora$indiceGanadora]][[k]]
red[indice, ] <- red[indice, ] + tasaAprendizajeGrueso *
(entrada - red[indice, ])
}
# Etapa de transición
} else if (i < (numEpocasGrueso + numEpocasMedio)) {
# La tasa de aprendizaje decrece linealmente
tasaAprendizajeMedio <- tasaAprendizajeGrueso -
((tasaAprendizajeGrueso - tasaAprendizajeMedio) / numEpocasMedio) *
(i - numEpocasGrueso)
# El alcance de la vecindad decrece linelamente
vecindadMedia <-
floor(vecindadGruesa - (vecindadGruesa - 1) / numEpocasMedio *
(i - numEpocasGrueso))
indicesMedio <-
apply(matrizVecinos, 1, function(x) (which(x <= vecindadMedia)))
# Se actualizan pesos de la ganadora
red[ganadora$indiceGanadora, ] <- red[ganadora$indiceGanadora, 1:2] +
tasaAprendizajeMedio * (entrada - red[ganadora$indiceGanadora, ])
# Se actualizan pesos de los vecinos de la ganadora
for (k in 1:length(indicesMedio[[ganadora$indiceGanadora]])){
indice <- indicesMedio[[ganadora$indiceGanadora]][[k]]
red[indice, ] <- red[indice, ] + tasaAprendizajeMedio *
(entrada - red[indice, ])
}
# Etapa de ajuste fino
} else {
# Se actualizan pesos de la ganadora
red[ganadora$indiceGanadora, ] <- red[ganadora$indiceGanadora, ] +
tasaAprendizajeFino * (entrada - red[ganadora$indiceGanadora, ])
}
# Graficar
if (i %% saltoGrafico == 0 | i == 1 ){
plot(graficarRed(xTrain, red, n, m, i))
Sys.sleep(sleep)
}
}
# Clasificación del dataset de entrenamiento
# --------------------------------------------------------------------------
# Se crean una matriz y un vector para guardar valores
# Hay una columna por cada k centro y una fila por cada observación en xTrain
# Es decir, se calcula para cada observación su distancia a cada neurona de
# la red
distanciaAneuronas <- matrix(0, nrow(xTrain), nrow(red))
neuronaMasCercana <- c()
# Se calcula la distancia entre las neuronas del mapa y las observaciones
for (i in 1:nrow(xTrain)){
for (j in 1:nrow(red)){
# Se trabaja con valores absolutos en lugar de distancia euclídea
distanciaAneuronas[i,j] <- sum(abs(xTrain[i,] - red[j,]))
}
}
# Se determina que observación está más cercana a cada neurona del mapa y se
# asigna a ella
for (i in 1:nrow(xTrain)){
neuronaMasCercana[i] <- which.min(distanciaAneuronas[i,])
}
# Se juntan las clases de las observaciones con la neurona a las que se
# ecuentran más cercana
claseRed <- data.frame(cbind(neuronaMasCercana, yTrain))
names(claseRed) <- c("neurona", "clase")
# Se suma cuantas observaciones de cada clase tiene cada neurona del mapa
claseMayor <- claseRed %>%
group_by(neurona, clase) %>%
summarise(cnt = n()) %>%
as.data.table()
# Se filtra para cada neurona la clase mayoritaria (suma de cantidad de clases
# de las observaciones)
claseNeuronas <- claseMayor %>% group_by(neurona) %>% slice(which.max(cnt))
# Se seleccionan columnas de interés
claseNeuronas <- claseNeuronas[, 1:2]
# Se agrega un índice para hacer match
claseNeuronas$id <- 1:nrow(claseNeuronas)
# Predecir sobre el dataset de validación
# --------------------------------------------------------------------------
# Hay neuronas que no tienen observaciones de xTrain, entonces se las elimina
# porque no se sabe a que categoría pertenecen
redConClases <- red[claseNeuronas$neurona,]
# Se crea una matriz y un vector para guardar valores
distAneuronaValid <- matrix(0, nrow(xValid), nrow(redConClases))
nMasCercanaValid <- c()
# Se calcula la distancia entre las neuronas del mapa y las observaciones de
# validación
for (i in 1:nrow(xValid)){
for (j in 1:nrow(redConClases)){
# Se trabaja con valores absolutos en lugar de distancia euclídea
distAneuronaValid[i,j] <- sum(abs(xValid[i,] - redConClases[j,]))
}
}
# Se determina que observación está más cercana a cada neurona del mapa y se
# asigna a ella
for (i in 1:nrow(xValid)){
nMasCercanaValid[i] <- which.min(distAneuronaValid[i,])
}
# Match nMasCercanasValid con el id de claseNeuronas
# El valor de neurona más cercana en valid corresponde al id en claseNeurona,
# se busca entonces a que el nro de neurona corresponde en el conjunto "red"
nMasCercanaValidMatch <- claseNeuronas[match(nMasCercanaValid, claseNeuronas$id ),
"neurona"]
# Se agrega la neurona más cercana al dataset de validación
xValid <- data.frame(cbind(xValid, claseReal = yValid,
nc = nMasCercanaValidMatch$neurona))
# Se matchea la clase de cada neurona (definida en claseNeuronas) con la
# neurona asignada a cada observación de xValid
xValid[,5] <- claseNeuronas[match(xValid$nc, claseNeuronas$neurona), "clase"]
names(xValid) <- c("x1", "x2", "claseReal", "neuronaCercana", "clasePredicha")
# Se verifica el porcentaje de aciertos obtenidos con los nuevos datos
aciertos <- sum(xValid$claseReal - xValid$clasePredicha == 0) / nrow(xValid)
return(list("aciertos" = aciertos, "xValid" = xValid))
}
# EVALUAR CLASIFICADOR
# ==============================================================================
# ==============================================================================
# Prueba del algoritmo SOM sobre el dataset circulos.csv
# ==============================================================================
# TEST 1: malla 20 x 20 y tasa de aprendizaje gruesa 0.3
som <- SOM(train = clouds,
clase = "y",
n = 20,
m = 20,
prob = 0.5,
numEpocasGrueso = 1000,
numEpocasMedio = 1000,
numEpocasFino = 1000,
tasaAprendizajeGrueso = 0.3,
tasaAprendizajeMedio = 0.1,
tasaAprendizajeFino = 0.03,
sleep = 1,
saltoGrafico = 500,
seed = 123)
# Se grafican los puntos coloreados por la clase predicha
titulo <- paste0("Categorías Predichas - Tasa de aciertos: ", som$aciertos,
" - Malla: 20 x 20 ")
g1 <- categorias(xValid = som$xValid, clasePredicciones = "clasePredicha",
title = titulo)
g2 <- categorias(xValid = som$xValid, clasePredicciones = "claseReal",
title = "Categorías Reales")
grid.arrange(g1, g2, nrow = 1)
|
ccacb4745aa3b912ce4255cce7e0aaabdeb91ddd
|
17fad5a66a5335387480a1767d742e2213085dd1
|
/R/5. Logistic Regression/Case Study 1/Credit Card Prediction.R
|
c251f15ca6a1826c029f8c9a48fc2f1ca2f20a4e
|
[] |
no_license
|
abhijitvp/DS
|
e3c2bd36d5974653d75bb74b525f7b93ede81b87
|
320ba0b9b8aeff6cb91f1579a72acfd2959ad907
|
refs/heads/master
| 2020-04-01T23:13:32.861798
| 2018-12-13T06:26:32
| 2018-12-13T06:26:32
| 153,749,029
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,285
|
r
|
Credit Card Prediction.R
|
# Logistic Regression
#Problem Statement:
#==================
----------------------------------------------
#Packages Needed
#dplyr
#car
#Install if not exists
#dplyr
if("dplyr" %in% rownames(installed.packages()) == FALSE) {
install.packages("dplyr")
}
#car
if("car" %in% rownames(installed.packages()) == FALSE) {
install.packages("car")
}
#use below package
library(dplyr)
library(car)
----------------------------------------------
#Since the problem statement has not been stated
#better way to set working directory
setwd(dirname(rstudioapi::getActiveDocumentContext()$path))
getwd()
ds1 <- read.csv("ds1.csv", stringsAsFactors = F)
ds2 <- read.csv("ds2.csv", stringsAsFactors = F)
ds3 <- read.csv("ds3.csv", stringsAsFactors = F)
ds4 <- read.csv("ds4.csv", stringsAsFactors = F)
glimpse(ds1)
glimpse(ds2)
glimpse(ds3)
glimpse(ds4)
#Now looking at the data we can guess we need to find out if card can be offered to customer.
#Use first 3 sets to as train data set and 4th to validate.
data.temp.set <- as.data.frame(rbind(ds1, ds2, ds3))
output.set <- ds4
#Create Backup for train.set
data.temp.set.bk <- data.temp.set
#-----------------------------------------------------------
# STEP 3
# Try to get most of the columns in numeric
#Creating Dummy Variables for demographic_slice
data.temp.set <- data.temp.set %>%
mutate(DS_AX03efs = as.numeric(demographic_slice=="AX03efs"),
DS_BWEsk45 = as.numeric(demographic_slice=="BWEsk45"),
DS_CARDIF2 = as.numeric(demographic_slice=="CARDIF2")) %>%
select(-demographic_slice)
#Creating Dummy Variables for country_reg
data.temp.set <- data.temp.set %>%
mutate(CR_East = as.numeric(country_reg=="E")) %>%
select(-country_reg)
#Creating Dummy Variablesfor ad_exp
data.temp.set <- data.temp.set %>%
mutate(AE_Yes = as.numeric(ad_exp=="Y")) %>%
select(-ad_exp)
#Resetting the output variable card_offer as TRUE to be 1
#and FALSE to be 0
data.temp.set$card_offer[which(data.temp.set$card_offer==TRUE)]=1
#Setting the seed to review
set.seed(2)
#Splitting the data in train and test
s=sample(1:nrow(data.temp.set),0.7*nrow(data.temp.set))
train.temp.set=data.temp.set[s,]
test.set=data.temp.set[-s,]
#Splitting the data in train and validation
s1=sample(1:nrow(train.temp.set),0.7*nrow(train.temp.set))
train.set=train.temp.set[s1,]
val.set=train.temp.set[-s1,]
library(car)
for_vif=lm(card_offer~.-customer_id,data=train.set)
sort(vif(for_vif), decreasing = T)
#Getting the error "there are aliased coefficients in the model"
#alias(lm(card_offer ~. -customer_id, data=train.set))
#Found that the linear variables were DS_CARDIF2 and DS_BWEsk45
#Hence removing DS_CARDIF2
#for_vif=lm(card_offer~. -customer_id -DS_CARDIF2, data=train.set)
#sort(vif(for_vif),decreasing = T)
#Considering VIF to be 4 for the removal of auto corelation variables
for_vif=lm(card_offer~. -customer_id -imp_cscore, data=train.set)
sort(vif(for_vif),decreasing = T)
fit.set <- train.set %>%
select(-customer_id, -imp_cscore)
#Applying the glm function
fit <- glm(card_offer~., family="binomial", data=fit.set)
#Stepwise Variable Reduction by checking the quality of model using AIC (Akaikie Information Criteria)
fit <- step(fit)
#Getting the formula of the fit
formula(fit)
#Using the formula to check the significance of the variables
fit1 <- glm(card_offer ~ est_income + hold_bal + pref_cust_prob + RiskScore +
imp_crediteval + DS_AX03efs + DS_CARDIF2 + CR_East, data=fit.set, family="binomial")
summary(fit1)
#Repeating all the steps done with the train data for the validation data
library(car)
for_vif=lm(card_offer~.-customer_id,data=val.set)
sort(vif(for_vif), decreasing = T)
#Getting the error "there are aliased coefficients in the model"
#alias(lm(card_offer ~. -customer_id, data=val.set))
#Found that the linear variables were DS_CARDIF2 and DS_BWEsk45
#Hence removing DS_CARDIF2
#for_vif=lm(card_offer~. -customer_id -DS_CARDIF2, data=val.set)
#sort(vif(for_vif),decreasing = T)
#Considering VIF to be 4 for the removal of auto corelation variables
for_vif=lm(card_offer~. -customer_id -imp_cscore, data=val.set)
sort(vif(for_vif),decreasing = T)
fit.set <- val.set %>%
select(-customer_id, -imp_cscore)
#Applying the glm function
fit <- glm(card_offer~., family="binomial", data=fit.set)
#Stepwise Variable Reduction by checking the quality of model using AIC (Akaikie Information Criteria)
fit <- step(fit)
#Getting the formula of the fit
formula(fit)
#Using the formula to check the significance of the variables
fit2 <- glm(card_offer ~ est_income + hold_bal + pref_cust_prob + imp_crediteval +
DS_AX03efs + DS_CARDIF2 + CR_East, data=fit.set, family="binomial")
summary(fit2)
#Displaying the formulae for fit1 and fit2 to compute the fit_final having common variables from both the formulae
formula(fit1)
formula(fit2)
fit_final <- glm(card_offer ~ est_income + hold_bal + pref_cust_prob + imp_crediteval +
DS_AX03efs + DS_CARDIF2 + CR_East, family="binomial", data=train.set)
summary(fit_final)
#Getting the score
val.set$score <- predict(fit_final, newdata = val.set, type = "response")
#Determining the KS Cutoff
cutoff_data=data.frame(cutoff=0,TP=0,FP=0,FN=0,TN=0,P=0,N=0)
cutoffs=seq(0,1,length=100)
cutoff = cutoffs[3]
for (cutoff in cutoffs){
predicted=as.numeric(val.set$score>cutoff)
TP=sum(predicted==1 & val.set$card_offer==1)
FP=sum(predicted==1 & val.set$card_offer==0)
FN=sum(predicted==0 & val.set$card_offer==1)
TN=sum(predicted==0 & val.set$card_offer==0)
P=FN+TP
N=TN+FP
cutoff_data=rbind(cutoff_data,c(cutoff,TP,FP,FN,TN,P,N))
}
# removing the dummy data cotaining top row
cutoff_data=cutoff_data[-1,]
cutoff_data=cutoff_data %>%
mutate(Sn=TP/P, Sp=TN/N,dist=sqrt((1-Sn)**2+(1-Sp)**2)) %>%
mutate(KS=abs((TP/P)-(FP/N))) %>%
mutate(Accuracy=(TP+TN)/(P+N)) %>%
mutate(Lift=(TP/P)/((TP+FP)/(P+N))) %>%
mutate(M=(8*FN+2*FP)/(P+N)) %>%
select(-P,-N)
test.set$score=predict(fit_final,newdata = test.set,type = "response")
KS_cutoff=cutoff_data$cutoff[which(cutoff_data$KS==max(cutoff_data$KS))]
KS_cutoff
table(y = test.set$card_offer,cutoff = as.numeric(test.set$score>KS_cutoff))
#Accuracy of the model = (7184+1302)/(7184+490+24+1302) = 94.28%Ac
#Munching data for the output file
#Creating Dummy Variables for demographic_slice
output.set <- output.set %>%
mutate(DS_AX03efs = as.numeric(demographic_slice=="AX03efs"),
DS_BWEsk45 = as.numeric(demographic_slice=="BWEsk45"),
DS_CARDIF2 = as.numeric(demographic_slice=="CARDIF2")) %>%
select(-demographic_slice)
#Creating Dummy Variables for country_reg
output.set <- output.set %>%
mutate(CR_East = as.numeric(country_reg=="E")) %>%
select(-country_reg)
#Creating Dummy Variablesfor ad_exp
output.set <- output.set %>%
mutate(AE_Yes = as.numeric(ad_exp=="Y")) %>%
select(-ad_exp)
#Creating the card_offer field to numeric
output.set$card_offer <- as.numeric(output.set$card_offer)
#Resetting the output variable card_offer as TRUE to be 1 and FALSE to be 0
output.set$score=predict(fit_final,newdata = output.set,type = "response")
output.set$card_offer = as.logical(output.set$score>KS_cutoff)
write.csv(output.set, file = "DS4_output.csv")
|
f7b499a38007d8e9d2cbb1db4ca6d740d22a223c
|
e10648f044122f75e85e49f0d25141ccf218e276
|
/Package_Niels/man/lab2.Rd
|
b3bff4a851482f897a7dc19081e7b32a962dac43
|
[] |
no_license
|
rainbowniels/Advanced-Models-in-R
|
7b2aa089ed475741122f3f4a648c630c40f11079
|
128568a875aeefa38f413e38acb41b3e7161cf28
|
refs/heads/master
| 2021-07-15T13:20:19.465057
| 2017-10-23T02:37:00
| 2017-10-23T02:37:00
| 107,921,897
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 525
|
rd
|
lab2.Rd
|
\name{lab2}
\alias{lab2}
\title{
Exercise 2 in advanced modeling in r
}
\description{
function named lab2 which produces log likelihoood scores done as an exercise for advanced R modeling.
}
\usage{
lab2(theta,y,X,Z)
}
\arguments{
\item{theta}{
value of theta}
\item{Z}{
value of Z}
\item{X}{
value of X}
\item{y}{
value of y
}
}
\author{
Niels Kruse, \email{niels.kruse@uzh.ch}
}
\keyword{ regression, mls, log }
|
95b3ae68cff52b217517c6c9644dda691c509c2f
|
60dffdc12c12478b469d78cf4cfe5ee148107dd4
|
/R/calculateGLR.R
|
8147045303bf2de6c629ae57eec88dc92006b74c
|
[] |
no_license
|
cran/evian
|
dab851281636703b01c062b148327ceed16b7c28
|
e65295408da03281978691febd50c92657d972ea
|
refs/heads/master
| 2020-03-27T03:58:07.273598
| 2019-05-23T15:30:03
| 2019-05-23T15:30:03
| 145,902,356
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,179
|
r
|
calculateGLR.R
|
calculateGLR=function(snp,formula_tofit,model,data,bim,lolim,hilim,m,bse,family,c,plinkCC){
#same as evian linear/logit in cleaning data, picking models, and calculate likelihood
# GLR does not have robust correction
data_subset=subsetData(snp,formula_tofit,data,plinkCC) # subset so that only contains 1 snp
af=sum(as.numeric(data_subset$X))/(2*nrow(data_subset)) #it is wrt to ref at this stage
data_nomiss=adjustModel(data_subset,model) #adjust genotypes based on model
names(bim)=c('chr','snp','physicalDist','pos','ref','alt') #ref is the allele in effect
ref=bim$ref[bim$snp==snp]; alt=bim$alt[bim$snp==snp]; pos=bim$pos[bim$snp==snp]
if (model=='overdominance'){ # parameter of interest is the deviation from additive
formula_tofit=as.formula(paste(as.character(formula_tofit)[2],'~X1+',as.character(formula_tofit)[3]))
}
robustFactor=1
if (is.null(lolim)| is.null(hilim)){ # if bse is provided, it will ignore the input lower and upper bound.
#running this feature will significant increase calculation time
bounds=getGridBound(formula=formula_tofit,data=data_nomiss,bse=bse,k=1,m=m,family=family,robust=robustFactor)
lolim=bounds[1]; hilim=bounds[2]
}
# the therotical derivation of GLR require a symmetrical region, i.e. H0 -c<=theta<=c
c=abs(c)
# we have to fix the theta estimation region (lolim and hilim) such that it contains (-c,c) and symmetrical
maxBound=max(abs(lolim),abs(hilim),c)
if (maxBound==c){
stop(paste0('paramater c input (|c|=',c,') exceeds the current grid search range (',lolim,', ',hilim,') for theta estimation. Please provide a suitable search range or c value.'))
}
lolim=-maxBound; hilim=maxBound
rst=profilelike.glm(formula=formula_tofit,profile.theta ='X',data=data_nomiss,lo.theta=lolim,hi.theta = hilim,length=m,family=family)
alternative=rst$profile.lik.norm[rst$theta < -c | rst$theta > c]
null=rst$profile.lik.norm[rst$theta >= -c & rst$theta <= c]
glr=max(alternative, na.rm=T)/max(null, na.rm=T)
# summarize all results to output
summaryStats=data.frame(GLR=glr,boundary=c,AF=af,SNP=snp,bp=pos,effect=ref,ref=alt,stringsAsFactors = F)
return(summaryStats)
}
|
f08b410b1b6b4a0fbc9735dbd458be8fdcf4ad6f
|
a82d76818477a635dfa49cd7da7694703e0973c3
|
/app.R
|
b2016a2d8249349bd6f9c456949e8a021cc135fc
|
[] |
no_license
|
WarrenSink/Election2020-shiny
|
b38e0e0732edab01e909f718d632063133a71b51
|
5b71d30a91eedd4224044eb013fca7fa5601132b
|
refs/heads/main
| 2023-01-12T13:22:27.995653
| 2020-11-14T22:36:10
| 2020-11-14T22:36:10
| 312,733,936
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 726
|
r
|
app.R
|
library(shiny)
library(spData)
library(tmap) # for static and interactive maps
library(leaflet) # for interactive maps
library(tidyverse) # tidyverse data visualization package
library(sf)
get_congress_map <- function(cong=113) {
tmp_file <- tempfile()
tmp_dir <- tempdir()
zp <- sprintf("http://cdmaps.polisci.ucla.edu/shp/districts%03i.zip",cong)
download.file(zp, tmp_file)
unzip(zipfile = tmp_file, exdir = tmp_dir)
fpath <- paste(tmp_dir, sprintf("districtShapes/districts%03i.shp",cong), sep = "/")
st_read(fpath)
}
cd114 <- get_congress_map(114)
us_states_map = st_transform(us_states, 2163)
us_states_map = tm_shape(us_states, projection = 2163) + tm_polygons() +
tm_layout(frame = FALSE)
|
5aa8d924bd709d02aa5ecd0a2e4bc4d75b5a042a
|
72d9009d19e92b721d5cc0e8f8045e1145921130
|
/lwgeom/R/split.R
|
4b524dff1b4242dd11ec678cd51855efe383d6e2
|
[] |
no_license
|
akhikolla/TestedPackages-NoIssues
|
be46c49c0836b3f0cf60e247087089868adf7a62
|
eb8d498cc132def615c090941bc172e17fdce267
|
refs/heads/master
| 2023-03-01T09:10:17.227119
| 2021-01-25T19:44:44
| 2021-01-25T19:44:44
| 332,027,727
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,696
|
r
|
split.R
|
#' Return a collection of geometries resulting by splitting a geometry
#'
#' @name st_split
#' @param x object with geometries to be splitted
#' @param y object split with (blade); if \code{y} contains more than one feature geometry, the geometries are \link[sf:geos_combine]{st_combine} 'd
#' @return object of the same class as \code{x}
#' @examples
#' library(sf)
#' l = st_as_sfc('MULTILINESTRING((10 10, 190 190), (15 15, 30 30, 100 90))')
#' pt = st_sfc(st_point(c(30,30)))
#' st_split(l, pt)
#' @export
st_split = function(x, y) UseMethod("st_split")
#' @export
st_split.sfg = function(x, y) {
st_split(st_geometry(x), st_geometry(y))[[1]]
}
#' @export
st_split.sfc = function(x, y) {
if (length(y) > 1) y = sf::st_combine(y)
if (inherits(x, "sfc_POLYGON") || inherits(x, "sfc_MULTIPOLYGON"))
stopifnot(inherits(y, "sfc_LINESTRING") || inherits(y, "sfc_MULTILINESTRING"))
else
stopifnot(inherits(x, "sfc_LINESTRING") || inherits(x, "sfc_MULTILINESTRING"))
st_sfc(CPL_split(x, st_geometry(y)), crs = st_crs(x))
}
#' @export
st_split.sf = function(x, y) {
st_set_geometry(x, st_split(st_geometry(x), y))
}
#' get substring from linestring
#' @export
#' @param x object of class \code{sfc}, \code{sf} or \code{sfg}
#' @param from relative distance from origin (in [0,1])
#' @param to relative distance from origin (in [0,1])
#' @param ... ignored
#' @param tolerance tolerance parameter, when to snap to line node node
#' @return object of class \code{sfc}
#' @examples
#' library(sf)
#' lines = st_sfc(st_linestring(rbind(c(0,0), c(1,2), c(2,0))), crs = 4326)
#' spl = st_linesubstring(lines, 0.2, 0.8) # should warn
#' plot(st_geometry(lines), col = 'red', lwd = 3)
#' plot(spl, col = 'black', lwd = 3, add = TRUE)
#' st_linesubstring(lines, 0.49999, 0.8) # three points
#' st_linesubstring(lines, 0.49999, 0.8, 0.001) # two points: snap start to second node
st_linesubstring = function(x, from, to, tolerance, ...) UseMethod("st_linesubstring")
#' @export
st_linesubstring.sfc = function(x, from, to, tolerance = 0.0, ...) {
if (isTRUE(st_is_longlat(x)))
warning("st_linesubstring does not follow a geodesic; you may want to use st_geod_segmentize first")
st_sfc(CPL_linesubstring(x, from, to, tolerance), crs = st_crs(x))
}
#' @export
st_linesubstring.sf = function(x, from, to, tolerance = 0.0, ...) {
if (isTRUE(st_is_longlat(x)))
warning("st_linesubstring does not follow a geodesic; you may want to use st_geod_segmentize first")
st_set_geometry(x, st_linesubstring(st_geometry(x), from, to, tolerance))
}
#' @export
st_linesubstring.sfg = function(x, from, to, tolerance = 0.0, ...) {
CPL_linesubstring(st_geometry(x), from, to, tolerance)[[1]]
}
|
a2bf41ac9bd2d9dfc6686c402c6c42a94e5d92c4
|
85d70ac0202118ab478c49daead8a3f6c272cdda
|
/cachematrix.R
|
943de2d0cada1a2c64e8734864026da5846341e0
|
[] |
no_license
|
preeti-d/ProgrammingAssignment2
|
5e178dfab1a8623e5e8827eb01959d23da97000c
|
b1618cd5e0d8d22f5dc21b87ea0d54d51ac1ee2b
|
refs/heads/master
| 2021-01-14T13:06:28.777534
| 2015-04-25T23:23:29
| 2015-04-25T23:23:29
| 34,301,116
| 0
| 0
| null | 2015-04-21T03:10:52
| 2015-04-21T03:10:51
| null |
UTF-8
|
R
| false
| false
| 2,014
|
r
|
cachematrix.R
|
## This function makeCacheMatrix requires an input argument of a matrix.
## The function computes the invert of the matrix.
## Matrix conversion can be a costly operation.
## This function uses the "<<-" operator that can be used to cache the value of a variable.
## This function is is used to create a special object that stores a matrix and cahces its inverse.
## The function makeCacheMatrix creates a list that can be used to:
## 1. set the value of the matrix
## 2. get the value of the matrix
## 3. set the value of inverse of the matrix
## 4. get the value of inverse of the matrix
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinverse <- function(inverse) inv <<- inverse
getinverse <- function() inv
list(set=set, get=get, setinverse=setinverse, getinverse=getinverse)
}
## The cacheSolve function can be used to return inverse of the matrix . Instead
## of computing the inverse each time, the function first checks if the inverse
## already exists in the cache. Thus this saves time and improves performance.
## This function uses solve to invert the matrix.
## NOTE: The function only works on SQUARE matrix.
##NOTE: The function assumes that the matrix is always invertible.
cacheSolve <- function(x, ...) {
inv <- x$getinverse()
if(!is.null(inv)) {
message("getting cached data.")
return(inv)
}
data <- x$get()
inv <- solve(data)
x$setinverse(inv)
inv
}
## Sample run:
## > x = rbind(c(1, 2), c(2,1))
## > m = makeCacheMatrix(x)
## > m$get()
## [,1] [,2]
## [1,] 1 2
## [2,] 2 1
## > m$getinverse()
## NULL
## > cacheSolve(m)
## [,1] [,2]
## [1,] -0.3333333 0.6666667
## [2,] 0.6666667 -0.3333333
## > ## Retrieving from the cache in the second execution
## > cacheSolve(m)
## getting cached data.
## [,1] [,2]
## [1,] -0.3333333 0.6666667
## [2,] 0.6666667 -0.3333333
|
e1257f54af036fd8df3f6f69e8205126eb28b3e5
|
003b71cd1c3e6468089fd97b9cfe0696f241986d
|
/man/plot.cv.bridge.Rd
|
d3ffb9f4c41bad8b626e298ce5cde265971e9b1b
|
[] |
no_license
|
cran/rbridge
|
80c66c90f0d805376ed9d212785ce85e7bdc38be
|
3c9d0364d3731b7cc5f263eb2f3141c9b45ef28a
|
refs/heads/master
| 2020-08-23T05:18:35.298287
| 2020-02-29T10:40:03
| 2020-02-29T10:40:03
| 216,552,135
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| true
| 693
|
rd
|
plot.cv.bridge.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/methods.R
\name{plot.cv.bridge}
\alias{plot.cv.bridge}
\title{Plot a 'cv.bridge' object function}
\usage{
\method{plot}{cv.bridge}(x, sign.lambda = 1, ...)
}
\arguments{
\item{x}{Design matrix.}
\item{sign.lambda}{Either plot against \code{log(lambda)} (default) or its negative if sign.\code{lambda=-1}.}
\item{...}{Other graphical parameters to plot}
}
\description{
Plots the cross-validation curve, and upper and lower standard deviation curves, as a function of the lambda values used.
}
\author{
Bahadir Yuzbasi, Mohammad Arashi and Fikri Akdeniz \cr Maintainer: Bahadir Yuzbasi \email{b.yzb@hotmail.com}
}
|
ba0ff4671c861d1a3eac9c9a42de1053a282ebd2
|
2cb6a06d171e3d5d4a5f092acdaf1f642d11f944
|
/Questionnaire.R
|
d80664e9dc6d0d7ba8fd23b5af3a2aa5b8a3e831
|
[] |
no_license
|
SNaGLab/Risky-health-choices-and-the-Balloon-Economic-Risk-Protocol
|
9e48d4759d31b545a74f7f2fef5e57e496ba9cf1
|
03717703af778d75cb83b45df09a66c3c6caa4b7
|
refs/heads/master
| 2020-04-10T18:15:14.571146
| 2019-04-08T09:39:36
| 2019-04-08T09:39:36
| 161,198,463
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,669
|
r
|
Questionnaire.R
|
# Questionnaire data analyses
cd <- "/DataFiles/" ### Put your own current directory here.
q1="Questionnaire.csv" #leave name intact
### Upload file: stored in R as Q.
file1=paste(cd, q1, sep = "")
Q=read.csv(file1)
Q$Participant <- Q$What.is.your.subject.ID.
## Initialize count variable
count=0
## Code impulsivity and sensation seeking
# Barratt impulsiveness scale
BIS1=Q$I.plan.tasks.carefully.
BIS2=Q$I.do.things.without.thinking.
BIS3=Q$I.make.up.my.mind.quickly.
BIS4=Q$I.am.happy.go.lucky.
BIS5=Q$I.don.t.pay.attention.
BIS6=Q$I.have.racing.thoughts.
BIS7=Q$I.plan.trips.well.ahead.of.time.
BIS8=Q$I.am.self.controlled.
BIS9=Q$I.concentrate.easily.
BIS10=Q$I.save.regularly.
BIS11=Q$I..squirm..at.plays.or.lectures.
BIS12=Q$I.am.a.carefull.thinker.
BIS13=Q$I.plan.for.job.security.
BIS14=Q$I.say.things.without.thinking.
BIS15=Q$I.like.to.think.about.complex.problems.
BIS16=Q$I.change.jobs.
BIS17=Q$I.act..on.impulse..
BIS18=Q$I.get.easily.bored.when.solving.thought.problems.
BIS19=Q$I.act.on.the.spur.of.the.moment.
BIS20=Q$I.am.a.steady.thinker.
BIS21=Q$I.change.where.I.live.
BIS22=Q$I.buy.things.on.impulse.
BIS23=Q$I.can.only.think.about.one.problem.at.a.time.
BIS24=Q$I.change.hobbies.
BIS25=Q$I.spend.or.charge.more.than.I.earn.
BIS26=Q$I.have.outside.thoughts.when.thinking.
BIS27=Q$I.am.more.interested.in.the.present.than.the.future.
BIS28=Q$I.am.restless.at.lectures.or.talks.
BIS29=Q$I.like.puzzles.
BIS30=Q$I.plan.for.the.future.
BIS=data.frame(BIS1,BIS2,BIS3,BIS4,BIS5,BIS6,BIS7,BIS8,BIS9,BIS10,BIS11,BIS12,BIS13,BIS14,BIS15,BIS16,BIS17,BIS18,BIS19,BIS20,BIS21,BIS22,BIS23,BIS24,BIS25,BIS26,BIS27,BIS28,BIS29,BIS30) #Make a BIS data frame
BIS[is.na(BIS)] <- 0
for (i in 1:(nrow(Q))) {
count[i]=with(BIS[i,], c(sum(BIS[i,] == 0)))
}
BIS$countna=count
#Reverse coding: 1, 7, 8, 9, 10, 12, 13, 15, 20, 29, 30 (1-4 = 4-1)
RCBIS<-c('BIS1','BIS7','BIS8','BIS9','BIS10','BIS12','BIS13','BIS15','BIS20','BIS29','BIS30')
y <- length(c(RCBIS))
for (k in 1:y) {
z <- RCBIS[k]
x <- length(BIS[,z])
for (i in 1:x) {
if(BIS[i,z]==4) {
BIS[i,z]=1
} else if(BIS[i,z]==3) {
BIS[i,z]=2
} else if(BIS[i,z]==2) {
BIS[i,z]=3
} else if(BIS[i,z]==1) {
BIS[i,z]=4
} else BIS[i,z]=0
}
}
# Summing the numbers before the zeros have been replaced with the average per part score
BIS$BISscore <- rowSums(BIS[,1:30],na.rm = T)
BIS$BISavscore <- round(BIS$BISscore/(30-BIS$countna))
for (i in 1:(nrow(Q))) {
for (q in 1:30) {
BIS[i,q] <- ifelse(BIS[i,q]==0, BIS$BISavscore[i], BIS[i,q])
}
}
# Now make composite score (after reverse coding and after solving some NA's)
BIS$BISscore <- rowSums(BIS[,1:30])
# Add participant as to combine it with another data frame later.
BIS$Participant=Q$Participant
# Brief Sensation seeking scale
BSS1=Q$I.would.like.to.explore.strange.places.
BSS2=Q$I.would.like.to.take.off.on.a.trip.with.no.pre.planned.routes.or.timetables.
BSS3=Q$I.get.restless.when.I.spend.too.much.time.at.home..
BSS4=Q$I.prefer.friends.who.are.excitingly.unpredictable..
BSS5=Q$I.like.to.do.frightening.things..
BSS6=Q$I.would.like.to.try.bungee.jumping..
BSS7=Q$I.like.wild.parties..
BSS8=Q$I.would.love.to.have.new.and.exciting.experiences..even.if.they.are.illegal..
BSSS=data.frame(BSS1,BSS2,BSS3,BSS4,BSS5,BSS6,BSS7,BSS8)
#Solve those na's...
BSSS[is.na(BSSS)] <- 0
for (i in 1:(nrow(Q))) {
count[i]=with(BSSS[i,], c(sum(BSSS[i,] == 0)))
}
BSSS$BSSSscore <- rowSums(BSSS,na.rm = T)
BSSS$countna=count
BSSS$avscoreBSSS <- round(BSSS$BSSSscore/(8-BSSS$countna))
for (i in 1:(nrow(Q))) {
for (q in 1:8) {
BSSS[i,q] <- ifelse(BSSS[i,q]==0, BSSS$avscoreBSSS[i], BSSS[i,q])
}
}
# Now make composite and subset scale scores (after solving some NA's)
BSSS$BSSSscore <- rowSums(BSSS[,1:8])
BSSS$experienceseeking <- BSSS$BSS1+BSSS$BSS2
BSSS$boredom <- BSSS$BSS3+BSSS$BSS4
BSSS$thrill <- BSSS$BSS5+BSSS$BSS6
BSSS$disinhibition <- BSSS$BSS7+BSSS$BSS8
# Add participant as to combine it with another data frame later.
BSSS$Participant=Q$Participant
#Audit: relabel the Questions for ease of coding
AUDIT1 = Q$How.often.do.you.have..a.drink.containing.alcohol..
AUDIT2A = Q$How.many.drinks.containing.alcohol.do.you.have.on.a.typical.day.when.you.are.drinking.by.yourself..
AUDIT2S = Q$How.many.drinks.containing.alcohol.do.you.have.on.a.typical.day.when.you.are.drinking.with.others.
AUDIT3A = Q$How.often.do.you.have.six.or.more.drinks.on.one.occasion.when.you.are.drinking.alone.
AUDIT3S = Q$How.often.do.you.have.six.or.more.drinks.on.one.occasion.when.you.are.drinking.with.others.
AUDIT4A = Q$How.often.during.the.last.year.have.you.found.that.you.were.not.able.to.stop.drinking.once.you.ha...
AUDIT4S = Q$How.often.during.the.last.year.have.you.found.that.you.were.not.able.to.stop.drinking.once.you.ha....1
AUDIT5A = Q$How.often.during.the.last..year.have.you.failed.to.do.what.was.normally.expected.of.you.because.a...
AUDIT5S = Q$How.often.during.the.last..year.have.you.failed.to.do.what.was.normally.expected.of.you.because.a....1
AUDIT6 = Q$How.often.during.the.last.year.have.you.needed.a.first.drink.in.the.morning.to.get.yourself.going...
AUDIT7A = Q$How.often.during.the.last.year.have.you.had.a.feeling.of.guilt.or.remorse.after.drinking.by.yours...
AUDIT7S = Q$How.often.during.the.last.year.have.you.had.a.feeling.of.guilt.or.remorse.after.drinking.with.oth...
AUDIT8 = Q$How.often.during.the.last.year.have.you.been.unable.to.remember.what.happened.the.night.before.be...
AUDIT9 = Q$Have.you.or.someone.else.been.injured.because.of.your.drinking..
AUDIT10 = Q$Has.a.relative..friend..doctor..or.other.health.care.worker.been.concerned.about.your.drinking.or...
#Audit: originally for the 3-option questions scored to 0,2 and 4.
AUDIT9 <- ifelse(AUDIT9==2, 4, AUDIT9)
AUDIT9<- ifelse(AUDIT9==1, 2, AUDIT9)
AUDIT10 <- ifelse(AUDIT10==2, 4, AUDIT10)
AUDIT10<- ifelse(AUDIT10==1, 2, AUDIT10)
# New data frame based on recalculated questions
AuditA <- data.frame(AUDIT1,AUDIT2A,AUDIT3A,AUDIT4A,AUDIT5A,AUDIT6,AUDIT7A,AUDIT8,AUDIT9,AUDIT10)
AuditA[is.na(AuditA)] <- 0
AuditA$AAscore <- rowSums(AuditA,na.rm = T)
AuditA$Participant=Q$Participant
AuditS <- data.frame(AUDIT1,AUDIT2S,AUDIT3S,AUDIT4S,AUDIT5S,AUDIT6,AUDIT7S,AUDIT8,AUDIT9,AUDIT10)
AuditS[is.na(AuditS)] <- 0
AuditS$ASscore <- rowSums(AuditS,na.rm = T)
AuditS$Participant=Q$Participant
# Centers for Disease Control Youth Risk Behaviour Surveillance System (CDC, 2001)
CDCrisk1=Q$Smoked.a.cigarette..even.a.puff.
CDCrisk2=Q$Drank.alcohol..even.one.drink..
CDCrisk3=Q$Used.any.illegal.drug
CDCrisk4=Q$Gambled.for.real.money.
CDCrisk5=Q$Had.sexual.intercourse.without.a.condom.
CDCrisk6=Q$Stolen.anything.from.a.store.
CDCrisk7=Q$Carried.a.weapon.such.as.a.gun..knife..or.club.outside.of.your.home
CDCrisk8=Q$Been.in.a.physical.fight.
CDCrisk9=Q$Ridden.in.a.car.without.wearing.your.seatbelt..even.once.
CDCrisk10=Q$Ridden.a.bicycle.or.motorcycle.without.a.helmet..even.once.
CDCrisk=data.frame(CDCrisk1,CDCrisk2,CDCrisk3,CDCrisk4,CDCrisk5,CDCrisk6,CDCrisk7,CDCrisk8,CDCrisk9,CDCrisk10)
CDCrisk[is.na(CDCrisk)] <- 0
CDCrisk$CDCscore=rowSums(CDCrisk)
CDCrisk$CDCviolence=rowSums(CDCrisk[6:8])
CDCrisk$CDCharmfuloneself=rowSums(CDCrisk[,c(5,9,10)])
CDCrisk$Participant=Q$Participant
#drugs
Marijuana_A=Q[,40]
Stimulants_A=Q[,41]
Cocaine_A=Q[,42]
Hallucinogens_A=Q[,43]
Opiates_A=Q[,44]
Sedatives_A=Q[,45]
Nodrugs_A=Q[,46]
Marijuana_O=Q[,49]
Stimulants_O=Q[,50]
Cocaine_O=Q[,51]
Hallucinogens_O=Q[,52]
Opiates_O=Q[,53]
Sedatives_O=Q[,54]
Nodrugs_O=Q[,55]
Druguse=data.frame(Marijuana_A,Stimulants_A,Cocaine_A,Hallucinogens_A,Opiates_A,Sedatives_A,Nodrugs_A,Marijuana_O,Stimulants_O,Cocaine_O,Hallucinogens_O,Opiates_O,Sedatives_O,Nodrugs_O)
Druguse[is.na(Druguse)] <- 0
Druguse$Drugstotal_A=rowSums(Druguse[,1:6])
Druguse$Drugstotal_O=rowSums(Druguse[,8:13])
Druguse$hitup_A=Q[,58]
Druguse$hitup_O=Q[,59]
Druguse[is.na(Druguse)] <- 0
# Incl heroine use in the drugs score
Heroine_O <- ifelse(Druguse$hitup_O>0, 1, 0)
Druguse$Drugstotal_O = Druguse$Drugstotal_O + Heroine_O
Druguse$Participant=Q$Participant
#Sex
Sexpartners=Q[,62] #0: none, 1=1, 2=2, 3=3, 4=4, 5=5, 6: 5-10, 7: >10 in past year
Regularpartners=Q[,63] #0: none, 1=1, 2=2, 3=3, 4=4, 5=5, 6: 5-10, 7: >10 in past year
Casualpartners=Q[,66]
STDregpartner=Q[,64] #0=no, 1=yes,
STDcaspartner=Q[,67] #0=never, 1=sometimes, 2=often, 3=always
Condomregpartner=Q[,65] #0=never, 1=rarely, 2=sometimes, 3=often, 4=every time
Condomcaspartner=Q[,68] #0=never, 1=rarely, 2=sometimes, 3=often, 4=every time
Sex=data.frame(Sexpartners, Regularpartners, Casualpartners, STDregpartner, STDcaspartner, Condomregpartner, Condomcaspartner)
Sex[is.na(Sex)] <- 0 #many NA's here, eg question about sex behavior with reg partner, but you don't have them.
# Code carefully.
Sex$Participant=Q$Participant
# Demographics
Age=Q[,171]
Gender=Q[,172] #1=female, 2=male, 0=notreported
Ethnicity=Q[,173] #0=unknown/unreported, 1=hispanic/latino, 2=nothispanic/latino
Race=Q[,174] #0=unknown/unreported,1=american indian or alaska native, 2=asian, 3=black, 4=pacific islander, 5=white, 6=more than one race
Student=Q[,175] #0=neither, 1=studentCU, 2=studentnocu, 3=employee
Education=Q[,176] #1=high school, 2=high school equiv, 3=college, 4=ass degree, 5=bachelor, 6=master, 7=PhD, 8=prof degree
Towardsmen=Q[,177] #1-7: 1=very strongly avoid, 7=very strongly prefer
Towardswomen=Q[,178] #1-7: 1=very strongly avoid, 7=very strongly prefer
Demographics=data.frame(Age,Gender,Ethnicity,Race,Student,Education,Towardsmen,Towardswomen)
# Add participant as to combine it with another data frame later.
Demographics$Participant=Q$Participant
|
9c3ec8337cd346273b24c00a6e015eeb897bf2fd
|
bf610e1f34c1ab03178d1ad8f15bcff5e72ab522
|
/man/effect_N_continuous.Rd
|
a9a788066a1f5f7b2fd79a64cc8365c3ea00437f
|
[] |
no_license
|
caroliver/mobr
|
bd1d5cd9d5d41be5b62a9801e49a37ae615b001d
|
033569e1a2b571661505c5240d3bb8b65e9f6e21
|
refs/heads/master
| 2021-04-29T22:21:43.653097
| 2019-02-17T17:44:55
| 2019-02-17T17:44:55
| 121,636,007
| 1
| 0
| null | 2019-02-17T19:09:23
| 2018-02-15T13:59:32
|
R
|
UTF-8
|
R
| false
| true
| 474
|
rd
|
effect_N_continuous.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mobr.R
\name{effect_N_continuous}
\alias{effect_N_continuous}
\title{Auxiliary function for get_delta_stats()
Effect of N when type is "continuous"}
\usage{
effect_N_continuous(mob_in, S, group_levels, env_levels, group_data, plot_dens,
plot_abd, ind_sample_size, corr, n_perm)
}
\description{
Auxiliary function for get_delta_stats()
Effect of N when type is "continuous"
}
\keyword{internal}
|
706a2f614c6f158a99e1a52385f2a87bf2d4b857
|
184a5b70c5bf8642501c82610e8ea5562445029b
|
/R/addmod2.R
|
3f4e92e8bd7657f2bc08f4fbfbf7a8678b1be3fd
|
[] |
no_license
|
cran/QuantumOps
|
4910e53dda44803981801cbe1545e4ab63154538
|
35e2a8be5a6bbefbdc53a732eb6145a04dcd9e8e
|
refs/heads/master
| 2020-04-07T09:57:13.065211
| 2020-02-03T08:20:18
| 2020-02-03T08:20:18
| 158,270,510
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 273
|
r
|
addmod2.R
|
#bit-wise mod 2 add two integers
#' @export
addmod2 <- function(x,a){
bx <- as.character(intToBits(x))
ba <- as.character(intToBits(a))
s <- 0
for(j in 1:length(bx)){
if( (bx[j] == "00" & ba[j] == "01") | (bx[j] == "01" & ba[j] == "00") )
s <- s + 2^(j-1)
}
s
}
|
bdec900a1c8b649301e45f2651002ac424002cf9
|
755af7368270bc41b4025f49a094e48d421edcc3
|
/run_analysis.R
|
6d5c99aac2259eea98bfa7739102fad4e7d9ee2e
|
[] |
no_license
|
clarcombe/DSassignments
|
096c4e592210aea610efde72f63bcef988a4428d
|
9056b481a0404873c9801812327156825c8bb5a9
|
refs/heads/master
| 2021-01-21T12:06:31.275456
| 2015-08-19T10:07:32
| 2015-08-19T10:07:32
| 41,026,842
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,476
|
r
|
run_analysis.R
|
#Load Relevan libraries
library(tidyr)
library(dplyr)
library(rapport)
# Initialise all of the variables for the filenames
xtrainfile = file.path("train","X_train.txt")
ytrainfile = file.path("train","y_train.txt")
xtestfile = file.path("test","X_test.txt")
ytestfile = file.path("test","y_test.txt")
activitiesfile = file.path("activity_labels.txt")
featuresfile = file.path("features.txt")
# Read the features file (column position of X data and X data label )
features=read.table(file=featuresfile,colClasses = c("integer","character"))
# Only import mean and standard deviation columns mean() and std())
xcols=data.frame(features[grep("mean\\(\\)|std\\(\\)",features$V2),])
names(xcols)<-c("column","head")
# Make the column headings clear
# Convert to CamelCase
xcols$head<-tocamel(xcols$head)
# And make them more meaningful
xcols$head<-gsub("Acc","Accelerometer",xcols$head)
xcols$head<-gsub("Gyro","Gyroscope",xcols$head)
xcols$head<-gsub("Mag","Magnitude",xcols$head)
xcols$head<-gsub("BodyBody","Body",xcols$head)
#Transpose rows to make column names vector
names.xcols=t(xcols$head)
#Read train files and test files
xtrain<-read.table(file=xtrainfile)
xtest<-read.table(file=xtestfile)
ytrain<-read.table(file=ytrainfile)
ytest<-read.table(file=ytestfile)
#Assign column names for Y files
names(ytrain)<-"ActivityId"
names(ytest)<-"ActivityId"
#From imported X files,select only columns containing() mean or std() - previously calculated
xtrain<-select(xtrain,num_range("V",xcols$column))
xtest<-select(xtest,num_range("V",xcols$column))
# Add the enhanced column names to the x datasets
names(xtrain)<-names.xcols
names(xtest)<-names.xcols
# Merge Activity Columns (Y) with X
train.ds<-bind_cols(ytrain,xtrain)
test.ds<-bind_cols(ytest,xtest)
# Build one final dataset of train and test data
one.ds<-bind_rows(train.ds,test.ds)
# Read the activities file
activities<-read.table(file=activitiesfile,col.names = c("ActivityId","Activity"))
# And join this to the final dataset via the ActivityId column
one.ds<-inner_join(activities,one.ds,by="ActivityId")
# Then remove the superfluous column ActivityId
one.ds$ActivityId<-NULL
# Now create the tidy dataset, by grouping by the Activity
two.ds<-group_by(one.ds,Activity)
# Then averaging by the Activity grouping
three.ds<-summarise_each(two.ds,funs(mean))
#Finally write out the file
tidy.ds.file<-file.path("colins.tidydataset.txt")
write.table(three.ds,tidy.ds.file,row.names=FALSE,sep=";")
|
5b357ec2fb086c2876a31c304440a18bc785e8f6
|
4ba27be09d1e637028ebd20fb1a648a7b98ad515
|
/shiny/ui.R
|
743ee7a1c190d361ae2bf6510de97aca66425bfb
|
[
"MIT"
] |
permissive
|
nezakrzan/Analiza-bruto-mesecnih-plac-v-Sloveniji
|
8afe9d87a91edd4c66198ca8f168e027e0c8271d
|
729ca1a394cfac5767f52b289b461674965334c6
|
refs/heads/master
| 2023-02-25T00:39:32.559880
| 2021-01-28T21:17:06
| 2021-01-28T21:17:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 324
|
r
|
ui.R
|
library(shiny)
fluidPage(
titlePanel(""),
tabPanel("Graf",
sidebarPanel(
selectInput("Leto", label = "Izberi leto",
choices = unique(gospodarskadejavnost$leto))),
mainPanel(plotOutput("graf_dejavnosti"),
tableOutput("legenda")))
)
|
892ec82e526039ea73aefd9232618017ca581725
|
2acf0eaf8524b8a3a3a7d56f48d32f89d4529dd6
|
/plot1.R
|
1328e5bd1a22582b39c16031d06078db62c2cf7e
|
[] |
no_license
|
kishmk/ExData_Plotting1
|
c9d2f3f6e0f59eca950b687584da5a572563574c
|
933d90735cc301160001aa737fc77ec482f9c14c
|
refs/heads/master
| 2021-01-17T10:27:38.655905
| 2015-09-13T20:45:19
| 2015-09-13T20:45:19
| 42,238,873
| 0
| 0
| null | 2015-09-10T10:48:50
| 2015-09-10T10:48:50
| null |
UTF-8
|
R
| false
| false
| 432
|
r
|
plot1.R
|
plot1 <- function()
{
data <- read.table(pipe('grep "^[1-2]/2/2007" "../household_power_consumption.txt"'),sep=";",colClasses="character",header=T)
data
datah <- read.table("../household_power_consumption.txt",sep=";",nrows=1,header=T)
names(data)<-names(datah)
png(file="plot1.png")
hist(as.numeric(data$Global_active_power),main="Global Active Power",xlab="Global Active Power (kilowatts)",col="RED")
dev.off()
}
|
41fa46cdc39177bc4da84cfaabf0d2877497581f
|
9b7d1879ea46e49138469b275dc5fa8afc30c00e
|
/scripts/TCL.R
|
91f5ed02da87104ec3c7084da25dd65945085d84
|
[] |
no_license
|
lbelzile/MATH60604-diapos
|
84f9c182e56ba0ae97d7b2392ecc0897d7edf4d1
|
31947d266e6c5589e0f98a381094d498b02dbbf2
|
refs/heads/master
| 2023-04-23T07:45:51.660337
| 2021-05-04T23:23:43
| 2021-05-04T23:23:43
| 285,688,695
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,530
|
r
|
TCL.R
|
setwd(dirname(rstudioapi::getActiveDocumentContext()$path))
setwd("../img")
library(gganimate)
#> Loading required package:
library(ggplot2)
library(viridisLite)
set.seed(1234)
pmix <- 0.6
sampmixt <- function(n){
ifelse(rbinom(n = n, size = 1, pmix),
TruncatedNormal::rtnorm(n = n, mu = 2, sd = 3, lb = 0, ub = Inf),
TruncatedNormal::rtnorm(n = n, mu = 20, sd = 5, lb = 2, ub = 40))
}
densmixt <- function(x){pmix*TruncatedNormal::dtmvnorm(matrix(x),
mu = 2, sigma = matrix(9), lb = 0, ub = Inf) +
(1-pmix)*TruncatedNormal::dtmvnorm(x = matrix(x),
mu = 20, sigma = matrix(25), lb = 2, ub = 40)
}
tnormmean <- function(mu, sigma, a, b){
alpha <- (a-mu)/sigma
beta <- (b-mu)/sigma
mu + (dnorm(alpha)-dnorm(beta))/(pnorm(beta) - pnorm(alpha))*sigma
}
tnormvar <- function(mu, sigma, a, b){
stopifnot(a<b)
alpha <- (a-mu)/sigma
beta <- (b-mu)/sigma
if(is.finite(b) && is.finite(a)){
Va <- sigma^2*(1+(alpha*dnorm(alpha) - beta*dnorm(beta))/(pnorm(beta)-pnorm(alpha)) -
((dnorm(alpha) - dnorm(beta))/(pnorm(beta)-pnorm(alpha)))^2)
} else if(is.infinite(b) && is.finite(a)){
Va <- sigma^2*(1+(alpha*dnorm(alpha))/pnorm(alpha, lower.tail = FALSE) -
(dnorm(alpha)/pnorm(alpha, lower.tail = FALSE))^2)
} else if(is.finite(b) && is.infinite(a)){
Va <- sigma^2*(1 - beta*dnorm(beta)/pnorm(beta) -
(dnorm(beta)/pnorm(beta))^2)
} else{ Va <- sigma^2}
Va
}
varmixt <- function(w, mui, sigmai, mu){
mu <- sum(w*mui)
sum(w*(sigmai^2+mui^2 - mu^2))
}
mu1 <- tnormmean(2, 3, 0, Inf)
mu2 <- tnormmean(20, 5, 2, 40)
sigma1 <- sqrt(tnormvar(2, 3, 0, Inf))
sigma2 <- sqrt(tnormvar(20, 5, 2, 40))
mug <- sum(c(pmix, 1-pmix)*c(mu1, mu2))
sigmag <- sqrt(varmixt(w = c(pmix, 1-pmix), c(mu1, mu2), c(sigma1, sigma2)))
ggplot() +
geom_function(fun = "densmixt", xlim = c(0, 45), n = 1001) +
geom_vline(xintercept = mug, col = "red") +
theme_minimal() +
ylab("densité") + xlab("x")
ggsave(filename = "densite.pdf", width = 8, height = 5)
nsamp <- 20
n <- 10
B <- n*nsamp
dat <- data.frame(x = sampmixt(B), n = factor(rep(1:nsamp, each = n)))
datmean <- data.frame(mean = colMeans(matrix(dat$x, nrow = n)),
n = factor(1:nsamp))
# We'll start with a static plot
p <- ggplot(dat, aes(x = x)) +
geom_dotplot(binwidth = 1, method = "histodot") +
geom_vline(data = datmean, aes(xintercept = mean), col = "red", size = 1) +
theme_minimal() +
theme(legend.position = "none",
axis.text.y = element_blank(),
axis.ticks.y = element_blank()) +
scale_y_continuous(name = "", breaks = NULL) +
xlab("") +
transition_states(n, wrap = FALSE,
transition_length = 1,
state_length = 9) +
ease_aes('cubic-in-out') +
enter_fade() +
ggtitle('Échantillon {closest_state}', subtitle = "n = 10")
anim_save(p, filename = "clt_mean_10.gif")
n <- 100
B <- n*nsamp
dat <- data.frame(x = sampmixt(B),
n = factor(rep(1:nsamp, each = n))
)
datmean <- data.frame(mean = colMeans(matrix(dat$x, nrow = n)), n = factor(1:nsamp))
# We'll start with a static plot
p <- ggplot(dat, aes(x = x)) +
geom_dotplot(binwidth = 0.5, method = "histodot") +
geom_vline(data = datmean, aes(xintercept = mean), col = "red", size = 1) +
theme_minimal() +
theme(legend.position = "none",
axis.text.y = element_blank(),
axis.ticks.y = element_blank()) +
scale_y_continuous(name = "", breaks = NULL) +
xlab("") +
transition_states(n, wrap = FALSE,
transition_length = 1,
state_length = 9) +
ease_aes('cubic-in-out') +
enter_fade() +
ggtitle('Échantillon {closest_state}', subtitle = "n=100")
anim_save(p, filename = "clt_mean_100.gif")
n <- 1000
B <- n*nsamp
dat <- data.frame(x = sampmixt(B),
n = factor(rep(1:nsamp, each = n))
)
datmean <- data.frame(mean = colMeans(matrix(dat$x, nrow = n)), n = factor(1:nsamp))
# We'll start with a static plot
p <- ggplot(dat, aes(x = x)) +
geom_dotplot(binwidth = 0.5, method = "histodot") +
geom_vline(data = datmean, aes(xintercept = mean), col = "red", size = 1) +
theme_minimal() +
theme(legend.position = "none",
axis.text.y = element_blank(),
axis.ticks.y = element_blank()) +
scale_y_continuous(name = "", breaks = NULL) +
xlab("") +
transition_states(n, wrap = FALSE,
transition_length = 1,
state_length = 9) +
ease_aes('cubic-in-out') +
enter_fade() +
ggtitle('Échantillon {closest_state}', subtitle = "n=1000")
anim_save(p, filename = "clt_mean_1000.gif")
# Mean of different samples
ggplot(data = data.frame(x = replicate(n = 10000, mean(sampmixt(10))))) +
geom_histogram(aes(x, y = ..density..), binwidth = 4/10) +
geom_function(fun = "dnorm", args = list(mean = mug, sd = sigmag/sqrt(10)),
n = 1001, col = viridis(n = 3)[1]) +
theme_minimal() +
ylab("densité") + xlab("x")
ggsave(filename = "densmean10.pdf", width = 8, height = 5)
ggplot(data = data.frame(x = replicate(n = 10000, mean(sampmixt(100))))) +
geom_histogram(aes(x, y = ..density..), binwidth = 6/50) +
geom_function(fun = "dnorm", args = list(mean = mug, sd = sigmag/10),
n = 1001, col = viridis(n = 3)[2]) +
theme_minimal() +
ylab("densité") + xlab("x")
ggsave(filename = "densmean100.pdf", width = 8, height = 5)
ggplot(data = data.frame(x = replicate(n = 10000, mean(sampmixt(1000))))) +
geom_histogram(aes(x, y = ..density..), binwidth = 1/50, alpha = 0.4) +
geom_function(fun = "dnorm", args = list(mean = mug, sd = sigmag/sqrt(1000)),
n = 1001, col = viridis(n = 3)[3]) +
theme_minimal() +
ylab("densité") + xlab("x")
x0 <- seq(0, 26, length.out = 1001)
ggplot(dat = data.frame(x = rep(x0, 3),
n = factor(rep(c(10,100,1000), each = 1001)),
y = c(dnorm(x0, mean = mug, sd = sigmag/sqrt(10)),
dnorm(x0, mean = mug, sd = sigmag/sqrt(100)),
dnorm(x0, mean = mug, sd = sigmag/sqrt(1000)))),
aes(x = x, y = y, col = n)) +
geom_line() +
theme_minimal() +
theme(legend.position = "bottom") +
scale_colour_viridis_d() +
xlim(c(0,22)) +
ylim(c(0,1.5)) +
ylab("densité") + xlab("x")
ggsave(filename = "densmean1000.pdf", width = 8, height = 5)
|
1c2762581de61e88bd1a96b3ff23db64a25213e7
|
ed0738a868e64399ff12b23b830be0582eae509a
|
/hw2.5.R
|
d4e075a690a6e4dcbdbccddbb62619c036d4131a
|
[] |
no_license
|
xkuang/internationalEducationData
|
4fadba2a53ceea2960482a0f3ce226e0eeb9a10f
|
9515a36cdb30d7cf99125d170cd9a71b7067ab1c
|
refs/heads/master
| 2021-01-11T21:33:35.171266
| 2017-02-13T13:30:00
| 2017-02-13T13:30:00
| 78,805,708
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,394
|
r
|
hw2.5.R
|
library(cluster)
library(Matrix)
P <- matrix(c(97,4,4,7,2,
9,87,8,37,9,
8,16,93,12,12,
11,59,17,96,12,
9,15,26,12,86),nrow=5,byrow=TRUE)
# the following code puts elements of the lowerhalf vector P into the full matrix D
#symmetry the matrix
#method 1
S <- forceSymmetric(P)
#method 2
P[lower.tri(P)] = t(P)[lower.tri(P)]
DS <- as.matrix(daisy(P, metric = "euclidean", stand = FALSE))
# Now that we can program in R, let's wite code to check the triangle inequality (TI) for all triples
# of points. Then fix any violations of TI by adding a constant c to all entries.
# m = size of matrix (# of stimuli).
m<-4; minc<-0 # note you can put two R statements on one line - separate them with semicolon
for (k in 3:m)
{ for (j in 2:(k-1))
{ for (i in 1:(j-1))
{ i;j;k
c1<-D[i,j]+D[j,k]-D[i,k]
c2<-D[j,k]+D[i,k]-D[i,j]
c3<-D[i,j]+D[i,k]-D[j,k]
c <- min(c1,c2,c3,c4,c5)
if (c<0) minc<-min(c,minc) }}}
# if minc<0, then the TI is violated, by abs(c)
C<-matrix(numeric(16),4,4)
C<-C+abs(minc) # (matrix + scalar) adds the scalar elementwise to the matrix
D
C
Delta<-D+C
for (i in 1:m) Delta[i,i]=0 # put 0s on diagonal of Delta
Delta
DeltaSq<-Delta^2
DeltaSq
# now compute the row / col means and the grand mean
aveDsq<-c(1:4)
for (i in 1:m) aveDsq[i]<-mean(DeltaSq[i,])
m<-4
aveDsq
grmean<-mean(aveDsq[])
grmean
# now we can define matrix B*, the quasi-scalar products matrix
B<-matrix(numeric(16),4,4)
for (i in 1:m)
{ for (j in 1:m)
{ B[i,j] <- -0.5*(DeltaSq[i,j]-aveDsq[i]-aveDsq[j]+grmean)
}}
B
# now factor matrix B*; start with eigendecomposition
# Function "eigen" puts eigenvalues into object "values", eigenvectors into "vectors"
Bcomp<-eigen(B)
Bcomp
#define principal components (use first two only)
wts<-matrix(numeric(4),2,2)
for (i in 1:2) wts[i,i]<-sqrt(Bcomp$values[i])
evec<-Bcomp$vectors[,1:2]
wts
evec
P<-evec%*%wts
P
# plot the final 2-dim configuration
plot(P)
points<-c("a","b","c","d") # prepare to label points
text(P,points)
# note that the obtained configuration matrix (X) is close to the one we used
# to generate the proximity "data".
# however, the large negative eigenvalue (#4) is perhaps problematic. Using a larger additive
# constant will generally increase all the eigenvalue, so it might help. But figuring out the
# best constant to use is the "additive constant problem". There has been recent work on this.
|
5dc650c5bdd9bf717e1c9a913f3f22eee340b368
|
b2f61fde194bfcb362b2266da124138efd27d867
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Database/Pan/k_d4_n/k_d4_n-6/k_d4_n-6.R
|
eff98737262e7eec0501af6e0d2e7f61d6bfef42
|
[] |
no_license
|
arey0pushpa/dcnf-autarky
|
e95fddba85c035e8b229f5fe9ac540b692a4d5c0
|
a6c9a52236af11d7f7e165a4b25b32c538da1c98
|
refs/heads/master
| 2021-06-09T00:56:32.937250
| 2021-02-19T15:15:23
| 2021-02-19T15:15:23
| 136,440,042
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 58
|
r
|
k_d4_n-6.R
|
0e42fc16fc805cd9bf64377ae76476bd k_d4_n-6.qdimacs 567 1950
|
c7bde0d0ff6a1ecdb14a094593f295e6082c55ba
|
13cb16a2eff4bae3ddd5f4826edfd879a03001a0
|
/man/createDistMat.Rd
|
4e7093ce2de67914809aad11027ea53dcc9906ac
|
[] |
no_license
|
nkurzaw/Rtpca
|
2a3471ff14756699883f57a6be7590bdd05cd940
|
c1922eb6c195263cae2fe941584f32b8035b3df1
|
refs/heads/master
| 2023-05-29T19:50:58.078565
| 2023-04-25T15:20:19
| 2023-04-25T15:20:19
| 227,799,428
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,553
|
rd
|
createDistMat.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/functions.R
\name{createDistMat}
\alias{createDistMat}
\title{Create distance matrix of all vs all protein
melting profiles}
\usage{
createDistMat(
objList,
rownameCol = NULL,
summaryMethodStr = "median",
distMethodStr = "euclidean"
)
}
\arguments{
\item{objList}{list of objects suitable for the analysis,
currently allowed classes of objects are: matrices,
data.frames, tibbles and ExpressionSets}
\item{rownameCol}{in case the input objects are tibbles
this parameter takes in the name (character) of the column
specifying protein names or ids}
\item{summaryMethodStr}{character string indicating a method
to use to summarize measurements across replicates,
default is "median", other options are c("mean", "rbind")}
\item{distMethodStr}{method to use within dist function,
default is 'euclidean'}
}
\value{
a distance matrix of all pairwise protein
melting profiles
}
\description{
Create distance matrix of all vs all protein
melting profiles
}
\examples{
library(Biobase)
m1 <- matrix(1:12, ncol = 4)
m2 <- matrix(2:13, ncol = 4)
m3 <- matrix(c(2:10, 1:7), ncol = 4)
rownames(m1) <- 1:3
rownames(m2) <- 2:4
rownames(m3) <- 2:5
colnames(m1) <- paste0("X", 1:4)
colnames(m2) <- paste0("X", 1:4)
colnames(m3) <- paste0("X", 1:4)
mat_list <- list(
m1, m2, m3
)
createDistMat(mat_list)
expr1 <- ExpressionSet(m1)
expr2 <- ExpressionSet(m2)
expr3 <- ExpressionSet(m3)
exprSet_list <- list(
expr1, expr2, expr3
)
createDistMat(exprSet_list)
}
|
5aa31780c8843116686fd5a0f970ff0950591a5c
|
8d319d56425bae6aace33374d41280ded36e00c6
|
/man/simeans.binormal.Rd
|
451d07ce4b0fa2391470503827df2f02cfa31ad1
|
[] |
no_license
|
cran/asd
|
6a8733a815ca87deb186809ac26a55f0b23dd31b
|
64a2e65f0c0d460810f02494eb4ca22115434dac
|
refs/heads/master
| 2021-01-21T13:48:45.488898
| 2016-05-23T10:14:13
| 2016-05-23T10:14:13
| 17,694,497
| 0
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,121
|
rd
|
simeans.binormal.Rd
|
\name{simeans.binormal}
\alias{simeans.binormal}
\title{
Simulate Bivariate Normal Means
}
\description{
Simulates bivariate normal means; for use with \code{asd.sim} and \code{gasd.sim} in ASD.
}
\usage{
simeans.binormal(n = n, means = means, vars = vars, corr = corr)
}
\arguments{
\item{n}{
Number of records used to calculate means
}
\item{means}{
Vector of expected means for two samples
}
\item{vars}{
Vector of expected variances for two samples
}
\item{corr}{
Correlation between two samples
}
}
\details{
Uses function \code{rmvnorm} from package \code{mvtnorm} to generate means from correlated normal variates.
}
\value{
\item{samp1}{Mean of sample 1}
\item{samp2}{Mean of sample 2}
}
\author{
Nick Parsons (\email{nick.parsons@warwick.ac.uk})
}
\seealso{
\code{\link{treatsel.sim}}, \code{\link{dunnett.test}}, \code{\link{hyp.test}}, \code{\link{select.rule}}, \code{\link{combn.test}}
}
\examples{
# need to load mvtnorm
library(mvtnorm)
# generate data
set.seed(1234)
simeans.binormal(n=10,means=c(2,3),vars=c(1,5),corr=0.5)
}
\keyword{design}
|
d41a7b5acf6690be9dea1c8e08278c3dd8afd641
|
e6e8e6339083a42d8fd6ce6ea22d17dd6fa92ea8
|
/cachematrix.R
|
2d41c5c503c56b5ef0e34ebf40de493fc658527c
|
[] |
no_license
|
chris451/ProgrammingAssignment2
|
7e1183f6f99b3c2311a8dca04a763a02aea470f2
|
86fc6a74e9912998366431b2f3817a0a90af3e5a
|
refs/heads/master
| 2020-05-29T11:56:37.499258
| 2014-04-27T11:50:09
| 2014-04-27T11:50:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,487
|
r
|
cachematrix.R
|
# This code contains 2 functions:
# - makeCacheMatrix: create an advanced ("special") matrix which also contains its inverse
# - cacheSolve: compute the inverse of a matrix if the inverse has not been calculated already
#
# usage:
# source("cachematrix.R")
# m <- makeCacheMatrix(matrix(5:8,2))
# m$get()
# cacheSolve(m) # compute the inverse of the matrix
# cacheSolve(m) # retrieve the inverse from cache
## function which creates a special "matrix" object that can cache its inverse:
#
makeCacheMatrix <- function(x = matrix()) {
I <- NULL
set <- function(y) {
x <<- y
I <<- NULL
}
get <- function() x
setInverse <- function(inverse) I <<- inverse
getInverse <- function() I
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## Function which computes the inverse of the special "matrix" returned by makeCacheMatrix
## If the inverse has already been calculated (and the matrix has not changed), then cacheSolve retrieves the inverse from the cache.
#
cacheSolve <- function(x, ...) {
I <- x$getInverse()
if(!is.null(I)) {
message("getting cached data")
return(I)
}
data <- x$get()
I <- solve(data) # Return a matrix that is the inverse of 'x' (i.e. data)
x$setInverse(I)
I
}
|
63dbe3c045b69e5c9574e0f35bd6a0d9c798a6ed
|
43be21b1b2063184e3f6877fa08233bce701cc2e
|
/Dashboard2/test_graph_general.R
|
e555c33ddd679d181122f1112fd7b2c8c29deed7
|
[] |
no_license
|
escandethomas/Dashboard_V_Dem
|
0a4a94ac42e653fa589049a0554f999acf254f1c
|
261644146e762800a9b10ead9e2c75ac75a05af3
|
refs/heads/main
| 2023-06-19T05:43:57.465606
| 2021-07-19T13:14:18
| 2021-07-19T13:14:18
| 326,505,915
| 0
| 0
| null | 2021-03-17T21:56:28
| 2021-01-03T21:41:12
|
R
|
UTF-8
|
R
| false
| false
| 815
|
r
|
test_graph_general.R
|
library(ggplot2)
democracies <-readRDS("data/data_dashboard.rds")
# ui.R
ui <- fluidPage(
titlePanel("Investigating Seedling Traits across Elevation"),
sidebarLayout(
sidebarPanel(
selectInput(inputId = "var_y",
label = 'Trait',
choices = c("Height (cm)" = "v2x_polyarchy",
"Number of leaves" = "v2x_libdem"),
selected = NULL)),
mainPanel(plotOutput("traitplot"))
)
)
# Define server logic required to draw a histogram
server <- function(input, output) {
output$traitplot <- renderPlot(
ggplot(data = democracies,
aes_string(x = year, y = input$var_y)) +
geom_point() + theme_classic()
)
}
# Run the application
shinyApp(ui = ui, server = server)
|
77c37c10a253df66474915a9ba84758514bf1991
|
a9cd617502cc40c385cb138b14b476511f12de45
|
/2 Getting and Cleaning Data/Assignments/run_analysis.R
|
cb941c5b7c2e97dee14ddb69371e4ac50c61c0ea
|
[] |
no_license
|
ssmolenski/CourseraDataScience
|
3a3a26225428889c4a508501cba3c08e7cfd9e57
|
aa785ae237aa6eae376911ee81e40f365327f7d3
|
refs/heads/master
| 2020-04-01T02:32:22.908081
| 2019-05-03T18:09:26
| 2019-05-03T18:09:26
| 152,783,384
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,056
|
r
|
run_analysis.R
|
library(dplyr)
library(tidyr)
library(devtools)
library(stringr)
setwd("C:/Users/Sarah/Downloads/Dataset/UCI HAR Dataset")
#Read in the features and split into a vector, then subset only mean/std values
readLines("features.txt") %>% strsplit(" ") -> features
features<-features[grepl("(mean|std)",features)]
features<-features[!grepl("Freq",features)]
#Read in activity labels and transform to neat
activity_labels<-readLines("activity_labels.txt")
activity_labels<-gsub("\\d\\s","",activity_labels)
gsub("\\_"," ",activity_labels) %>% tolower() -> activity_labels
#Read in participant and activity data and turning numbers to labels
readLines("test/subject_test.txt") %>% as.numeric -> subjects_test
readLines("train/subject_train.txt") %>% as.numeric -> subjects_train
participants<-c(subjects_test,subjects_train)
# rm(subjects_test,subjects_train)
readLines("test/y_test.txt") %>% as.numeric -> activity_test
readLines("train/y_train.txt") %>% as.numeric -> activity_train
activity1<-character()
activity2<-character()
for(i in 1:length(activity_test)){
activity1[i]<-activity_labels[activity_test[i]]
}
for(i in 1:length(activity_train)){
activity2[i]<-activity_labels[activity_train[i]]
}
activities<-c(activity1,activity2)
# rm(activity_labels,activity_test,activity_train,activity1,activity2)
#Reading in data and removing empty elements
readLines("test/X_test.txt") %>% strsplit(" ") -> data_test
for (i in 1:length(data_test)){
data_test[[i]]<-data_test[[i]][grepl("\\d",data_test[[i]])]
}
readLines("train/X_train.txt") %>% strsplit(" ") -> data_train
for (i in 1:length(data_train)){
data_train[[i]]<-data_train[[i]][grepl("\\d",data_train[[i]])]
}
#Extracting the element names and numbers for relevant mean/std data
subset<-numeric()
names<-character()
for(i in 1:length(features)) {
subset<-c(subset,as.numeric(features[[i]][1]))
names<-c(names,features[[i]][2])
}
#cleaning up names
gsub("\\-"," ", names) %>%
gsub("\\Q()\\E","", .) %>%
gsub("^t","",.) %>%
gsub("^f","FFT ",.) %>%
gsub("Mag"," magnitude",.) %>%
gsub("BodyAcc[^J]","raw accel signal ",.)%>%
gsub("BodyGyro[^J]","raw gyro signal ",.)%>%
gsub("GravityAcc[^J]","gravity accel ",.)%>%
gsub("Jerk","",.) %>%
gsub("Acc","accel",.) %>%
gsub("Gyro","angular velocity",.) %>%
gsub("Gravity","gravity ",.) %>%
gsub("Body","",.)->names
# rm(features)
#Subsetting only the elements containing mean/std data
for(i in 1:length(data_test)){
data_test[[i]]<-data_test[[i]][subset]
}
for(i in 1:length(data_train)){
data_train[[i]]<-data_train[[i]][subset]
}
#bind lists of vectors into data frames and bind participants/activities as new columns
as.data.frame(do.call(rbind, data_test)) %>% tbl_df -> test_df
as.data.frame(do.call(rbind, data_train)) %>% tbl_df -> train_df
data<-rbind(test_df,train_df)
names(data)<-names
data<-mutate(data,participant=participants,activity=activities)
last<-ncol(data)
data<-select(data,last,(last-1),1:(last-2))
#Convert all the shitty factors into numbers
data %>% mutate_if(is.factor,as.character) -> data
for(i in 3:ncol(data)){
col<-data[[i]]
newcol<-numeric()
for(j in 1:length(col)){
num<-as.numeric(str_extract(col[j], "\\d\\.\\d+"))
exp<-str_extract(col[j],"e(\\+|\\-)\\d+")
multiplier <- if (exp=="e+000"){1}
else if (exp=="e-001"){.1}
else if (exp=="e-002"){.01}
else if (exp=="e-003"){.001}
else if (exp=="e-004"){.0001}
else if (exp=="e-005"){.00001}
else if (exp=="e-006"){.000001}
else if (exp=="e-007"){.0000001}
else if (exp=="e-008"){.00000001}
else {exp}
newcol[j]<-num*multiplier
}
data[[i]]<-newcol
}
#write.csv(data,file="tidydata.csv")
write.table(data,"data.txt",row.name=FALSE)
data<-group_by(data,activity,participant)
meandata<-summarize_all(data,mean)
#write.csv(meandata,file="meandata.csv")
write.table(meandata,"summary.txt",row.name=FALSE)
|
5c0ea1298319357f6b640ca75bf7670828b40543
|
7b838911abcf1d892c507c3b97c285b76525cf8f
|
/man/grapes-IfNull-grapes.Rd
|
6b48d7ba25d164f9a28cf0a2a066e06e00898bc6
|
[
"MIT"
] |
permissive
|
Mehranmzn/LMMELSM
|
d6412ef299df70ae906fc75ac636f6b2b9bdc8eb
|
b4c05f9d3e4be0fb3585217a02821f218da61473
|
refs/heads/master
| 2023-03-17T23:44:43.930882
| 2021-03-18T00:14:17
| 2021-03-18T00:14:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 532
|
rd
|
grapes-IfNull-grapes.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utility.R
\name{\%IfNull\%}
\alias{\%IfNull\%}
\title{Operator for testing NULL and returning expr if NULL}
\usage{
object \%IfNull\% expr
}
\arguments{
\item{object}{Object to test for NULL}
\item{expr}{Expression to evaluate and return if object is NULL}
}
\value{
object if object is non-NULL, expression output if object is NULL.
}
\description{
Operator for testing NULL and returning expr if NULL
}
\author{
Stephen R. Martin
}
\keyword{internal}
|
6db40089d12c97d75b2a610cd20f4d8d68b9719a
|
72d9009d19e92b721d5cc0e8f8045e1145921130
|
/MGMM/man/tr.Rd
|
fe815bfcdd9f71cee41f8ed3d544640cbd90fb49
|
[] |
no_license
|
akhikolla/TestedPackages-NoIssues
|
be46c49c0836b3f0cf60e247087089868adf7a62
|
eb8d498cc132def615c090941bc172e17fdce267
|
refs/heads/master
| 2023-03-01T09:10:17.227119
| 2021-01-25T19:44:44
| 2021-01-25T19:44:44
| 332,027,727
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 266
|
rd
|
tr.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{tr}
\alias{tr}
\title{Matrix Trace}
\usage{
tr(A)
}
\arguments{
\item{A}{Numeric matrix.}
}
\value{
Scalar.
}
\description{
Calculates the trace of a matrix \eqn{A}.
}
|
25e96ad2c7223d8fa4da833420bbdde885ccfef7
|
9d016ff8a3482452bd280a0a73797eb65f3ab83b
|
/R/get_bootstrap_matrix.r
|
ad52d2d0858754107c0742b3658dd0b23f27da74
|
[] |
no_license
|
NathanSkene/ALS_Human_EWCE
|
dd199b78530224b118ea51514d598b6df922d785
|
551c4478cbf0b97d938632522de88946428c679a
|
refs/heads/master
| 2022-01-13T03:55:42.947202
| 2019-06-25T10:31:30
| 2019-06-25T10:31:30
| 176,797,486
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 783
|
r
|
get_bootstrap_matrix.r
|
get_bootstrap_matrix <- function(nReps,mouse.hits,mouse.bg,cell_types,sct_data,annotLevel){
combinedGenes = unique(c(mouse.hits, mouse.bg))
exp_mats = list()
for(cc in cell_types){ # For each celltype...
exp_mats[[cc]] = matrix(0,nrow=nReps,ncol=length(mouse.hits)) # Create an empty matrix, with a row for each bootstrap replicate
rownames(exp_mats[[cc]]) = sprintf("Rep%s",1:nReps)
}
for(s in 1:nReps){
bootstrap_set = sample(combinedGenes,length(mouse.hits))
ValidGenes = rownames(sct_data[[annotLevel]]$specificity)[rownames(sct_data[[annotLevel]]$specificity) %in% bootstrap_set]
expD = sct_data[[annotLevel]]$specificity[ValidGenes,]
for(cc in cell_types){
exp_mats[[cc]][s,] = sort(expD[,cc])
}
}
return(exp_mats)
}
|
f58462954085696d42a9a37a3685ac900f4ab17c
|
b0255d4e54415b6fb1519b8fc0e4d1ca6717b080
|
/R/completeVecs.R
|
ded679da0961a308006eaf67d58c2d1f1d1912ec
|
[] |
no_license
|
mrdwab/SOfun
|
a94b37d9c052ed32f1f53372a164d854537fcb4a
|
e41fa6220871b68be928dfe57866992181dc4e1d
|
refs/heads/master
| 2021-01-17T10:22:12.384534
| 2020-06-19T22:10:29
| 2020-06-19T22:10:29
| 16,669,874
| 30
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 990
|
r
|
completeVecs.R
|
#' Extract the `complete.cases` for a Set of Vectors
#'
#' Takes vectors as input and outputs a matrix of the complete cases across
#' these vectors.
#'
#'
#' @param \dots The vectors that need to be combined.
#' @return A matrix with the same number of columns as there are input vectors.
#' @note Short vectors are recycled without warnings. If your vectors are
#' different lengths, you should decide whether this is a desirable behavior or
#' not before using this function.
#' @author Ananda Mahto
#' @references <http://stackoverflow.com/a/20146003/1270695>
#' @examples
#'
#' A <- c(12, 8, 11, 9, NA, NA, NA)
#' B <- c(NA, 7, NA, 10, NA, 11, 9)
#'
#' completeVecs(A, B)
#'
#' C <- c(1, 2, NA)
#'
#' completeVecs(A, B, C)
#'
#' @export completeVecs
completeVecs <- function(...) {
myList <- list(...)
Names <- sapply(substitute(list(...)), deparse)[-1]
out <- suppressWarnings(
do.call(cbind, myList)[!is.na(Reduce("+", myList)), ])
colnames(out) <- Names
out
}
|
b3ac58d802779c5190af25d2398b6c8a407bfd26
|
ae0bb1ca6a4cad154695d5145e14496c336b131a
|
/pctcurves/pctcurves/man/pct_vs_t_fun_logit_quad.Rd
|
78ec772538f068e31fe08fced16f5fa3f0a445ef
|
[] |
no_license
|
Sempa/assaypctcurves
|
fee70b13c3ebe1eb113a5d31a9b2324d7c4fa98b
|
5f74d48be3e405a210fb4fa2ac86690b42865047
|
refs/heads/main
| 2023-07-26T22:08:27.231553
| 2021-09-10T14:43:15
| 2021-09-10T14:43:15
| 404,289,420
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,799
|
rd
|
pct_vs_t_fun_logit_quad.Rd
|
\name{pct_vs_t_fun_logit_quad}
\alias{pct_vs_t_fun_logit_quad}
\title{
Percentiles generated from P_r(t), with glm and logit link gml funtion
%% ~~function to do ... ~~
}
\description{
A function to create percentile curves using a glm function and logit link function, with time since infection as a quad funtion
}
\usage{
func_logit_quad(data_set, ODnTh, t_since_inf, percentile)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{data_set}{
The dataset to be evaluated. It should include the following variables eddi (for time since infection), viral_load (for viral load) and ODn (recency test reuslt)
%% ~~Describe \code{x} here~~
}
\item{ODnTh}{
The ODn resolution you need to evaluate the probability of being recent at time t (P_r(t))
%% ~~Describe \code{x} here~~
}
\item{t_since_inf}{
Timepoints within the intertest interval at which the function should be evaluated. in this case we use daily time steps from 1 to 1000 for this function. These timsteps are prefered because our aim with this is to create a dataset of descrete timesteps for use in the likelihood function.
%% ~~Describe \code{x} here~~
}
\item{percentile}{
Sequence of percentils from 0.1 to .9 in steps of 0.1
%% ~~Describe \code{x} here~~
}
}
\details{
function contains uniroot, a function that estimates roots (time points) at where the percentile is true for the func_logit_squared
%% ~~ If necessary, more details than the description above ~~
}
\value{
funtcion returnes a dataframe the includes timepoints when the percentile if true for the func_logit_squared.
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
}
\references{
%% ~put references to the literature/web site here ~
}
\author{
Joseph B. Sempa
%% ~~who you are~~
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
\code{\link{uniroot}}
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
%##---- Should be DIRECTLY executable !! ----
%##-- ==> Define data, use random,
%##-- or do help(data=index) for the standard data sets.
ODnth <- seq(0.01, 5, 0.01)#c(.5, 1, 1.5, 2, 2.5, 3, 3.5, 4)#
time_var <- seq(1, 1000, 1)
x <- data.frame(pct_vs_t = pct_vs_t_fun_logit_quad(
data_set = data_generate_pr_t,
ODnTh = ODnth,
t_since_inf = time_var,
percentile = seq(0.1, 0.9, 0.1)
))
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory (show via RShowDoc("KEYWORDS")):
% \keyword{ ~kwd1 }
% \keyword{ ~kwd2 }
% Use only one keyword per line.
% For non-standard keywords, use \concept instead of \keyword:
% \concept{ ~cpt1 }
% \concept{ ~cpt2 }
% Use only one concept per line.
|
4b3d9dfd5af1baaafeb1c87932a065feb471cac9
|
9eac9f8e7495d916f7596c4444461521b1a39086
|
/scripts/plot_Gviz.R
|
ad8e9a293f9beeb21b93b36421d556ecd2dca112
|
[
"Apache-2.0"
] |
permissive
|
uniqueg/scripts
|
bbb42d455196f8e047df2681661a02d38e4a762f
|
9fdcb93f740c0d353b8f9c0fe3ceab6a941af87d
|
refs/heads/master
| 2023-04-08T17:08:00.911197
| 2023-03-16T08:46:40
| 2023-03-16T08:46:40
| 211,389,152
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,070
|
r
|
plot_Gviz.R
|
## PLOT YH2AX CLUSTERS AND ASISI SITES
# Import .bed file as GRanges object
library(rtracklayer)
yH2Ax <- import("clusters_100000_z_cutoff_induced.bed", genome="hg19", asRangedData=FALSE)
AsiSI <- import("AsiSI_orig.bed", genome="hg19", asRangedData=FALSE)
# Extract genome and chromosome information
gen <- as.character(unique(genome(c(yH2Ax,AsiSI))))
chr <- names(genome(yH2Ax))
chr <- chr[!grepl("_", chr)] # Remove incomplete/unknown chromosomes
chr <- chr[!grepl("chrM", chr)] # Remove mitochondrial chromosome
# Gviz
library(Gviz)
gTrack <- GenomeAxisTrack()
iTrack <- IdeogramTrack(genome=gen, chromosome=chr, showId=FALSE, fill=FALSE, col=FALSE)
yH2AxTrack <- AnnotationTrack(yH2Ax, name="yH2Ax clusters", stacking="dense")
AsiSITrack <- AnnotationTrack(AsiSI, name="AsiSI recognition sites", stacking="dense", fill="orange")
lapply(chr, function(x) {
#pdf(paste(x,".pdf", sep=""), width=12, height=3)
plotTracks(list(iTrack, gTrack, yH2AxTrack, AsiSITrack), from=NULL, to=NULL, chromosome=x)
#dev.off()
})
## PLOT YH2AX CLUSTERS AND ASISI SITES
# Import .bed file as GRanges object
library(rtracklayer)
yH2Ax_i <- import("clusters_100000_z_cutoff_induced.bed", genome="hg19", asRangedData=FALSE)
yH2Ax_u <- import("clusters_100000_z_cutoff_uninduced.bed", genome="hg19", asRangedData=FALSE)
AsiSI <- import("AsiSI_orig.bed", genome="hg19", asRangedData=FALSE)
yH2Ax_AsiSI_i <- import("overlap_clusters_100000_induced_AsiSI.bed", genome="hg19", asRangedData=FALSE)
yH2Ax_AsiSI_u <- import("overlap_clusters_100000_uninduced_AsiSI.bed", genome="hg19", asRangedData=FALSE)
Overlap_iu <- import("overlap_clusters_100000_induced_vs_uninduced.bed", genome="hg19", asRangedData=FALSE)
Iacovoni_i <- import("yH2Ax_domains_Iacovoni_EMBO_hg19_induced.bed", genome="hg19", asRangedData=FALSE)
Iacovoni_u <- import("yH2Ax_domains_Iacovoni_EMBO_hg19_uninduced.bed", genome="hg19", asRangedData=FALSE)
Overlap_Iacovoni_i <- import("overlap_ChIP_with_Iacovoni_EMBO_induced.bed", genome="hg19", asRangedData=FALSE)
Overlap_Iacovoni_u <- import("overlap_ChIP_with_Iacovoni_EMBO_uninduced.bed", genome="hg19", asRangedData=FALSE)
# Extract genome and chromosome information
gen <- as.character(unique(genome(AsiSI)))
chr <- names(genome(AsiSI))
chr <- chr[!grepl("_", chr)] # Remove incomplete/unknown chromosomes
chr <- chr[!grepl("chrM", chr)] # Remove mitochondrial chromosome
# Gviz
library(Gviz)
gTrack <- GenomeAxisTrack()
iTrack <- IdeogramTrack(genome=gen, chromosome=chr, showId=FALSE, fill=FALSE, col=FALSE)
yH2Ax_i_Track <- AnnotationTrack(yH2Ax_i, name="yH2Ax clusters, induced", stacking="dense")
yH2Ax_u_Track <- AnnotationTrack(yH2Ax_u, name="yH2Ax clusters, uninduced", stacking="dense", fill="orange")
AsiSI_Track <- AnnotationTrack(AsiSI, name="AsiSI recognition sites", stacking="dense", fill="green")
yH2Ax_AsiSI_i_Track <- AnnotationTrack(yH2Ax_AsiSI_i, name="yH2Ax clusters with AsiSI sites, induced", stacking="dense")
yH2Ax_AsiSI_u_Track <- AnnotationTrack(yH2Ax_AsiSI_u, name="yH2Ax clusters with AsiSI sites, uninduced", stacking="dense", fill="orange")
Overlap_iu_Track <- AnnotationTrack(Overlap_iu, name="Overlap induced vs. uninduced", stacking="dense", fill="red")
Iacovoni_i_Track <- AnnotationTrack(Iacovoni_i, name="Iacovoni et al. clusters, induced", stacking="dense")
Iacovoni_u_Track <- AnnotationTrack(Iacovoni_u, name="Iacovoni et al. clusters, uninduced", stacking="dense", fill="orange")
Overlap_Iacovoni_i_Track <- AnnotationTrack(Overlap_Iacovoni_i, name="Overlap with Iacovoni et al., induced", stacking="dense", fill="red")
Overlap_Iacovoni_u_Track <- AnnotationTrack(Overlap_Iacovoni_u, name="Overlap with Iacovoni et al., uninduced", stacking="dense", fill="red")
lapply(chr, function(x) {
pdf(paste(x,".pdf", sep=""), width=12, height=6)
plotTracks(list(iTrack, gTrack, yH2Ax_i_Track, yH2Ax_u_Track, AsiSI_Track, yH2Ax_AsiSI_i_Track, yH2Ax_AsiSI_u_Track, Overlap_iu_Track, Iacovoni_i_Track, Iacovoni_u_Track, Overlap_Iacovoni_i_Track, Overlap_Iacovoni_u_Track), from=NULL, to=NULL, chromosome=x)
dev.off()
})
|
1e8eeed3d057f147e70dcd4acb45ab408f500c68
|
10ddc648602995325c6a467d0b17595cb8766fdb
|
/R/pipe.R
|
f591f1876359017573e396469555a40a42b1c120
|
[
"MIT"
] |
permissive
|
rich-iannone/hyper
|
cfd4479ee232aadee89477872e3e814061ce3b34
|
ef5f6e4672a4fe6b8d5d9d4875b04d299159c97a
|
refs/heads/master
| 2021-04-29T17:20:31.200263
| 2018-05-14T04:14:13
| 2018-05-14T04:14:13
| 121,667,337
| 5
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 209
|
r
|
pipe.R
|
#' The magrittr pipe
#'
#' hyper uses the pipe function [%>%] to turn
#' function composition into a series of
#' imperative statements
#' @importFrom magrittr %>%
#' @name %>%
#' @rdname pipe
#' @export
NULL
|
04d653c835f480269c1d4fe24398b2ce5413be69
|
248182be042384a92a64acbe7052e4c0bae54c84
|
/plot2.R
|
4a13cf159d25cb33ffca3fb7c6f23d3f2954054f
|
[] |
no_license
|
ptconroy/ExData_Plotting1
|
ef8b5e574d8f6ace5398c5f127192bb4234d6612
|
9b6cb52023084f2775717e605621b93114ec3afd
|
refs/heads/master
| 2021-01-16T18:17:45.859636
| 2016-07-06T05:30:05
| 2016-07-06T05:30:05
| 62,682,009
| 0
| 0
| null | 2016-07-06T01:40:08
| 2016-07-06T01:40:06
| null |
UTF-8
|
R
| false
| false
| 610
|
r
|
plot2.R
|
library(lubridate)
library(dplyr)
plot2 <- function() {
din <- read.table("household_power_consumption.txt", header = TRUE, na.strings = "?", sep = ";")
first_day <- ymd("2007-02-01")
second_day <- ymd("2007-02-02")
our_days <- filter(din, dmy(Date) == second_day | dmy(Date) == first_day)
temp_dt <- paste(our_days$Date, our_days$Time)
final <- mutate(our_days, datetime = parse_date_time(temp_dt, "dmYHMS"))
png("plot2.png")
with(final, plot(Global_active_power ~ datetime, xlab = "",
type = "l", ylab = "Global Active Power (kilowatts)"))
dev.off()
}
|
8a5b2603d6b9ed7a7301c117726d8214717eb7a2
|
0a906cf8b1b7da2aea87de958e3662870df49727
|
/grattan/inst/testfiles/IncomeTax/libFuzzer_IncomeTax/IncomeTax_valgrind_files/1610125465-test.R
|
beb14046fb3b187ab5a44770b6a583b59bdf33ed
|
[] |
no_license
|
akhikolla/updated-only-Issues
|
a85c887f0e1aae8a8dc358717d55b21678d04660
|
7d74489dfc7ddfec3955ae7891f15e920cad2e0c
|
refs/heads/master
| 2023-04-13T08:22:15.699449
| 2021-04-21T16:25:35
| 2021-04-21T16:25:35
| 360,232,775
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 155
|
r
|
1610125465-test.R
|
testlist <- list(rates = numeric(0), thresholds = numeric(0), x = c(8.22753060281186e+62, Inf))
result <- do.call(grattan::IncomeTax,testlist)
str(result)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.