blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
70b7175d031b8a6fbd57fb8434b0ad5002c75a48 | d380feade427b7b16b0d4e6b5d953c858e0d2c4c | /R/tiers.r | 2e8c9b90c1ff9a69dba1e984f20a8f9d5dc6b9c3 | [] | no_license | amcox/star | 3d5588ec7170cf690cba2c2e44bf632fc0ddcf5e | f1eaf596f4175924829cd16bad6eed3502a6ca1c | refs/heads/master | 2020-06-04T07:37:33.777105 | 2015-06-28T16:36:17 | 2015-06-28T16:36:17 | 24,352,111 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 982 | r | tiers.r | library(dplyr)
library(tidyr)
library(stringr)
library(lubridate)
update_functions <- function() {
old.wd <- getwd()
setwd("functions")
sapply(list.files(), source)
setwd(old.wd)
}
update_functions()
db <- students_with_tiers()
# Find the number of students at each tier for each small school
dc <- db %>% group_by(small.school, subject) %>%
summarize(total.students=n(),
total.tiered.students=sum(!is.na(tier.num)),
t2.students=sum(tier.num == 2, na.rm=T),
t3.students=sum(tier.num == 3, na.rm=T),
perc.t1=round((sum(is.na(tier.num)) / length(tier.num)), digits=2),
perc.t2=round((sum(tier.num == 2, na.rm=T) / length(tier.num)), digits=2),
perc.t3=round((sum(tier.num == 3, na.rm=T) / length(tier.num)), digits=2)
)
save_df_as_csv(dc, 'RTI Tier Percentages')
# Save file for tier lookup
d <- db %>% select(student_number, subject, tier.num)
d$tier.num[is.na(d$tier.num)] <- 1
dw <- d %>% spread(subject, tier.num)
save_df_as_csv(dw, 'tiers')
|
56ad2872511e680f892efb8f4301b4005cbc2aee | fd63587c1cf67cce9bd2aef18508fa368dd9aa5a | /step1_getDEG.R | 49daf3d2f85453cd03ac2d3ff76997539c631172 | [] | no_license | uui23/RNAseq-note | 54a64e3f608e43e0c7d55725f622f99e143b459c | f929d0a2fca39c05aa326e806abddc139833e8fc | refs/heads/master | 2020-09-29T09:38:50.040562 | 2020-03-27T05:07:37 | 2020-03-27T05:07:37 | 227,011,489 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,234 | r | step1_getDEG.R | # step1 getDEG
# input expSet
# output deseq2/edger (data.frame)
# rpkm>1 expSet
# means_expSet
# step1
rm(list = ls())
options(stringsAsFactors = F)
genelength <- read.table("./src/mm10.length", stringsAsFactors = F)
exprSet <- read.delim("./src/expSet_htSeq.txt", row.names=1, stringsAsFactors = F)
#manual
exprSet.sub<-exprSet[1:37315,]
exprSet.sub[,5] <- 0
exprSet.sub[genelength[,1],5] <- genelength[,2]
depth<-colSums(exprSet.sub[,-5])
RPKM<-t(apply(exprSet.sub, 1, function(x){x[1:4]/(x[5]*depth)*10^9}))
RPKM.selected <- RPKM[apply(RPKM, 1, function(x){sum(x>1)})==4,] #matrix
exprSet.rpkm <- RPKM
a <- as.data.frame(RPKM.selected)
a$mean_ko= (a$ko4 + a$ko5) / 2
a$mean_ctrl = (a$ch + a$c7) / 2
expset.mean <- a
save(expset.mean,exprSet.rpkm,RPKM.selected,file='res1_getDEG.R')
load(file='res1_getDEG.R')
check
load(file='res1_getDEG.R')
exprSet=exprSet.rpkm
group_list=colnames(exprSet)
table(group_list)
if(T){
colnames(exprSet)
pheatmap::pheatmap(cor(exprSet))
group_list
tmp=data.frame(g=group_list)
rownames(tmp)=colnames(exprSet)
# 组内的样本的相似性理论上应该是要高于组间的
# 但是如果使用全部的基因的表达矩阵来计算样本之间的相关性
# 是不能看到组内样本很好的聚集在一起。
pheatmap::pheatmap(cor(exprSet),annotation_col = tmp)
dim(exprSet)
# 所以我这里初步过滤低表达量基因。
RPKM.selected <- RPKM[apply(RPKM, 1, function(x){sum(x>1)})==4,]
exprSet.test=exprSet[apply(exprSet,1, function(x) sum(x>1) > 4),]
exprSet.test <- exprSet[apply(exprSet, 1, function(x){sum(x>1)})==4,]
dim(exprSet)
exprSet=log(edgeR::cpm(exprSet)+1)
dim(exprSet)
# 再挑选top500的MAD值基因
exprSet=exprSet[names(sort(apply(exprSet, 1,mad),decreasing = T)[1:500]),]
dim(exprSet)
M=cor(log2(exprSet+1))
tmp=data.frame(g=group_list)
rownames(tmp)=colnames(M)
pheatmap::pheatmap(M,annotation_col = tmp)
# 现在就可以看到,组内样本很好的聚集在一起
# 组内的样本的相似性是要高于组间
pheatmap::pheatmap(M,annotation_col = tmp,filename = 'cor.png')
library(pheatmap)
pheatmap(scale(cor(log2(exprSet+1))))
} |
5efa0b62598ff896a1a17329be0601cb99ab0471 | 6008969de07750cd1aac39df809a4d802071d3a4 | /man/WGCNA_dissTOM.Rd | 0173dbe2582d77f628cc614baa417780ef0f0d43 | [] | no_license | Heng19/OmicPath | 3d3d07f8e0e6debcf7399b2dfd0f75abf224e3f5 | 851c0b6cc3eaff972e26165947c00fd7efcd0cb2 | refs/heads/master | 2023-02-02T21:28:46.961459 | 2020-12-19T17:37:18 | 2020-12-19T17:37:18 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 455 | rd | WGCNA_dissTOM.Rd | \name{WGCNA_dissTOM}
\alias{WGCNA_dissTOM}
\title{
get dissTOM matrix
}
\description{
get dissTOM matrix
}
\usage{
WGCNA_dissTOM(tom=tom, modules=modules, indir=indir, outdir=outdir);
}
\arguments{
\item{tom}{dissTOM matrix}
\item{modules}{module file vector}
\item{indir}{input directory}
\item{outdir}{output directory}
}
\author{
Ying Hu <yhu@mail.nih.gov>
Chunhua Yan <yanch@mail.nih.gov>
}
\references{
##
}
\examples{
##
}
|
f5a2d3c6083616de1d7f120386e2fa58fdf813ac | 55315799e4b5bcb382167eb4d31d8bd4ad0690fc | /trend_resistance.R | 706cdd80331d38894845be18439959b7a0f0810a | [] | no_license | julianflowers/fes2014 | 218d034cd11d666bdc2f3d82089073b80f505d47 | 4bc7c117a4f3fbee1fa3a995b88ef1c3f202c00f | refs/heads/master | 2021-01-18T10:57:27.193399 | 2014-02-28T17:28:58 | 2014-02-28T17:28:58 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,209 | r | trend_resistance.R | # Figure 2: trend in resistances by organism
yr.trend <- ddply(df, .(year, organism.name), summarise,
ampamox.tested = sum(ampamox.tested, na.rm = TRUE),
ampamox.resistant = sum(ampamox.resistant, na.rm = TRUE),
cla.tested = sum(cla.tested, na.rm = TRUE), cla.resistant = sum(cla.resistant, na.rm = TRUE),
dox.tested = sum(dox.tested, na.rm = TRUE), dox.resistant = sum(dox.resistant, na.rm = TRUE)
# rec_cef.tested = sum(rec.cef.tested, na.rm = TRUE),
# rec_cef.resistant = sum(rec.cef.resistant, na.rm = TRUE)
)
m.yr.trend <- melt(yr.trend, id.vars = c("year", "organism.name"))
m.yr.trend$abx <- sub("\\.\\w+$","", m.yr.trend$variable)
m.yr.trend$tested <- grepl("tested", m.yr.trend$variable)
m.yr.trend$tested[m.yr.trend$tested == "TRUE"] <- "tested"
m.yr.trend$resistant <- grepl("resistant", m.yr.trend$variable)
m.yr.trend$resistant[m.yr.trend$resistant == "TRUE"] <- "resistant"
m.yr.trend$variable <- NULL
yr.trend.2 <- dcast(m.yr.trend, year + organism.name + abx ~ tested + resistant, value.var = "value")
names(yr.trend.2) <- c("Year", "Organism", "Antibiotic", "Resistant", "Tested")
yr.trend.2$Organism <- simpleCap(yr.trend.2$Organism)
yr.trend.2$pc.resistant <- binom.confint(yr.trend.2$Resistant, yr.trend.2$Tested, methods = "exact")$mean*100
yr.trend.2$lci <- binom.confint(yr.trend.2$Resistant, yr.trend.2$Tested, methods = "exact")$lower*100
yr.trend.2$uci <- binom.confint(yr.trend.2$Resistant, yr.trend.2$Tested, methods = "exact")$upper*100
# Very low numbers of S. aureus tested against cephalosporins anyway, no point including.
yr.trend.2$drop <- 0
yr.trend.2$drop[yr.trend.2$Organism == "Staphylococcus aureus" & yr.trend.2$Antibiotic == "rec_cef"] <- 1
yr.trend.2 <- yr.trend.2[yr.trend.2$drop == 0,]
yr.trend.2$drop <- NULL
head(yr.trend.2)
yr.trend.2$Antibiotic[yr.trend.2$Antibiotic == "ampamox"] <- "Ampicillin/\namoxicillin"
yr.trend.2$Antibiotic[yr.trend.2$Antibiotic == "cla"] <- "Clarithromycin"
yr.trend.2$Antibiotic[yr.trend.2$Antibiotic == "dox"] <- "Doxycycline"
yr.trend.2$Antibiotic[yr.trend.2$Antibiotic == "rec_cef"] <- "Any recommended \n cephalosporin"
#yr.trend.2$Antibiotic[yr.trend.2$Antibiotic == "Any recommended \\n cephalosporin"] <- "Any recommended \n cephalosporin" #oops
yr.trend.2 <- subset(yr.trend.2, Antibiotic != "Any recommended \n cephalosporin")
f2 <- ggplot(yr.trend.2, aes(x = Year, y = pc.resistant, group = Antibiotic)) + facet_wrap(~Organism) +
geom_line(aes(colour = Antibiotic)) +
geom_errorbar(aes(ymax = uci, ymin = lci, colour = Antibiotic), width = 0.25)
png("F:\\antimicrobial resistance\\Simon\\thorax2\\FES2014\\presentation\\trend_resistance.png",
width = 3060, height = 2295, res = 300)
f2 + theme_grey(base_size = 10) +
theme(strip.text.x = element_text(face = 'italic'), legend.position = c(0.84,0.86),
legend.background = element_rect(fill = "#ffffffaa", colour = NA),
axis.text.x = element_text(angle = 90, hjust = 1, vjust = 0.5)) +
scale_y_continuous("Per cent resistant", limits = c(0,100))
dev.off()
rm(m.yr.trend, yr.trend, yr.trend.2, f2)
gc()
|
6676e4a6d42f6ad65eaa4d0b51a7c7287b288f50 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/plsRglm/examples/confints.bootpls.Rd.R | d97ee22732f882a1c6a8aa6487bf4ba75e32cf26 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 429 | r | confints.bootpls.Rd.R | library(plsRglm)
### Name: confints.bootpls
### Title: Bootstrap confidence intervals
### Aliases: confints.bootpls
### Keywords: regression models
### ** Examples
## No test:
data(Cornell)
#Lazraq-Cleroux PLS (Y,X) bootstrap
set.seed(250)
modpls <- plsR(Y~.,data=Cornell,3)
Cornell.bootYX <- bootpls(modpls, R=250)
confints.bootpls(Cornell.bootYX,2:8)
confints.bootpls(Cornell.bootYX,2:8,typeBCa=FALSE)
## End(No test)
|
3396a9dfd5c6523bf80252e23ef45dfa68c172fb | b89af8da51f82685f908354211e17308ba77695c | /man/plotTraceback.Rd | b786bf139d8802a26aa606f680219f8a3256b2e1 | [] | no_license | walter-ca/Scale4C | 3ab8706c587e0f92ab5abdc49e035fab0c50cbfe | 380401c92715ae1bc3a6510094c346d0be287c4e | refs/heads/master | 2021-01-23T00:48:47.575850 | 2017-06-06T17:04:22 | 2017-06-06T17:04:22 | 92,850,120 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,723 | rd | plotTraceback.Rd | \name{plotTraceback}
\alias{plotTraceback}
\alias{plotTraceback,Scale4C-method}
\title{Draw the traceback results for a list of singular points on a fingerprint
map}
\description{
This method plots the traceback results together with fingerprint data, allowing
to check for possible errors during tracking. Problems during tracking can occur
if contours are very close, have holes, or if the singularity in question is not
recognized at all due to holes at the meeting point of both contours that form
a singular point. Each singular point is marked with a grey triangle, and the
traced left and right end of the corresponding feature are connected with grey
lines. If a contour's end doesn't match the traceback line, manual correction is
possible in the singularity list.
}
\usage{plotTraceback(data, maxSQSigma = -1, fileName = "tracebackPlot.pdf",
width = 15, height = 15, useIndex = TRUE)}
\arguments{
\item{data}{\code{Scale4C} object with singularity data}
\item{maxSQSigma}{Maximum square sigma (i.e. maximum y value) to be drawn;
if -1 then all available rows in the fingerprint map are used}
\item{fileName}{Optional name for export file (pdf)}
\item{width}{Width of the plot}
\item{height}{Height of the plot}
\item{useIndex}{If TRUE, use fragment index for x-axis}
}
\value{A traceback plot, showing the traced singular points with their points of
origin throughout different smoothing layers)}
\note{
PDF export is supported. If no plot file name is provided, the result is
plotted on screen.
}
\examples{
if(interactive()) {
data(liverData)
plotTraceback(liverData)
}
}
\author{Carolin Walter}
\keyword{plotTraceback}
|
1095cd4ee3d74a6ba1d0703ade7ce3b9c736dd9f | f06e2420351b99cbd18cbbc0d07a7ab1114415c3 | /SimpleInterestRate.R | 032629fa0158e3f60befb9757cf803058825f7cf | [
"MIT"
] | permissive | ChrisJones687/StockAnalysis | 1e1ea50b0c4a00a136899900e9fa2406dfe7bd0c | d2ed323da798428c81428a50cd6834329aac525c | refs/heads/master | 2020-03-23T00:03:47.741898 | 2018-07-23T18:33:13 | 2018-07-23T18:33:13 | 140,841,655 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 213 | r | SimpleInterestRate.R | rate <- c(.03,0.03,0.027,0.0255,0.031,0.035, 0.033,0.019)
amount <- c(3000,5000,5000,5000,2000,5000,1000,21000)
yearly_return = rate*amount
monthly_return = yearly_return/12
sum(yearly_return)
sum(monthly_return)
|
8e734caa0b7535577bbcf95f74257a731e7cf891 | 66279694b5b098eb05bbf369056389634110f411 | /plot1.R | e14bba60ea1582bfd4eaaa77b351ebe11e56260a | [] | no_license | kriskrauss/ExData_Plotting1 | 6b960d55d80518d951cb8bbdb4fd392b07a153a1 | badaf04d2ff9993a23e48f496c6d0f725bb96cb9 | refs/heads/master | 2020-12-30T19:11:20.569320 | 2014-07-14T01:09:05 | 2014-07-14T01:09:05 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 739 | r | plot1.R | library(data.table)
## Read data using data.table; much faster
DT<-fread('household_power_consumption.txt',header=TRUE,sep=";",na.strings="?")
## subset to only what we want
DT2<-data.table(DT[(DT$Date=="1/2/2007" | DT$Date=="2/2/2007")])
## Create a datetime column combining date and time
DT2[,DateTime:=as.POSIXct(strptime(paste(Date,Time,sep=","),"%d/%m/%Y,%H:%M:%S"))]
## Convert Global_active_power to be numeric
DT2[,Global_active_power:=as.numeric(Global_active_power)]
## create a png file
png("plot1.png", width=480, height=480)
## plot histogram for Global_active_power
hist(DT2$Global_active_power, col="red", main="Global Active Power", xlab="Global Active Power (kilowatts)")
## remember to close the device
dev.off()
|
cfb566baedd528e4209a7572dfab161d968a5f18 | 560fc7acf20ba46c777357fda77a0785bac13f4f | /RPEXE Version 2.0_yz/R version/pava_dfr.R | e2eb1d0ba95b15a1ca9ca49c0a01d5b370208ad6 | [] | no_license | zhangyuwinnie/SurvivalAnalysis_Matlab2R | a83dcafd5d9bb2370b92c0c71790b8a3f6c37c1f | e06eac7b601af0ad6565373b44d8ec4bb087bd6a | refs/heads/master | 2021-01-12T09:13:07.245783 | 2016-12-18T19:41:21 | 2016-12-18T19:41:21 | 76,799,674 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,346 | r | pava_dfr.R | #merge certain entries to make the sequence of ttot to be non decreasing
#Input:
# time_die == a sequence of times where deaths happened.
# ttot == the total time on test at each time point.
# deaths == the number of deaths at each time point.
#Output:
# time2 == the merged time_die
# ttot2 == .......... ttot
# deaths2 == .......... deaths
pava_dfr <- function(time_die,ttot,deaths)
{
len=length(ttot)
if (len == 1)
{
time2 = time_die
ttot2 = ttot
deaths2 = deaths
}
if (len!=1)
{
for (j in 1:len)
{
#indicate deleted items
n=length(ttot)
if(n!=1)
{
for (i in 1:(n-1))
#indicate items to be deleted
if ((ttot[i]/deaths[i])>(ttot[i+1]/deaths[i+1]))
{
deaths[i+1] = deaths[i]+deaths[i+1]
deaths[i]= 0
ttot[i+1]=ttot[i]+ttot[i+1]
ttot[i]=0
}
#delete the indicated items
k=as.null()
for (i in 1:length(ttot))
if(ttot[i]==0)
k[length(k)+1]=i
if (!is.null(k))
{
ttot=ttot[-k]
deaths=deaths[-k]
time_die=time_die[-k]
}
}
}
}
time2 = time_die
ttot2 = ttot
deaths2 = deaths
returnval=cbind(time2,ttot2,deaths2)
return(returnval)
}
|
20ba0e147c210fd7d7af5f702d9b865c804b812a | 072abdc10047d070030f0892815196d28e300b6a | /Scripts/GeneEnrichment.R | ebb459600ad68afc7571f61b785f757d32527e97 | [] | no_license | elswob/rna-seq-pipe | 1cdadab2c7cc35469939d73c92884cbb757a122a | 45c3d7c6a483ac9d5b1ac3c83401f550fd538361 | refs/heads/master | 2020-12-31T06:09:15.266545 | 2017-02-01T11:48:24 | 2017-02-01T11:48:24 | 80,614,954 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,695 | r | GeneEnrichment.R | library(org.Mm.eg.db)
library(org.Hs.eg.db)
library(AnnotationDbi)
library(GOstats)
library(GSEABase)
library(GO.db)
library(DBI)
args <- commandArgs();
pipeDir<-as.character(args[6])
species<-as.character(args[7])
source(paste(pipeDir,"/Scripts/GOanalysis.R",sep=""))
source(paste(pipeDir,"/Scripts/GeneSetCollection-methods.R",sep=""))
source(paste(pipeDir,"/Scripts/import_superclusters.R",sep=""))
fname=dir(pattern="^[^.].*$")
filetype=gsub("EdgeR_|Downreg_|Upreg_|_FDR_0.05.tsv"," ",fname)
ffname<-split(fname,filetype)
titles <- ffname
dir.create("GO_Analysis")
for(i in 1:length(titles)) {
titles[[i]] <- sub(names(titles)[i], paste0(names(titles)[i], " - "), titles[[i]])
}
#titles2<-gsub(names("-enrichment.tsv|-|_"titles)[i])
titles[[i]] <- sub(names(titles)[i], paste0(names(titles)[i], " - "), titles[[i]])
if(species=="Mouse"){
print("Running mouse")
for (i in 1:length(ffname)){
gsm1<-readLines(ffname[[i]][1])
gsmm1<-sub("[.0-9].$","",gsm1)
gsmmm1<-sub("\\.", "", gsmm1)
geneIdType <- ENSEMBLIdentifier("org.Mm.eg.db")
title<-gsub("_EdgeR_|_FDR0.05.tsv|_|-","_",titles[[i]][1])
genesetlist<-GeneSet(gsmmm1, geneIdType=geneIdType, setName=title)
geneIdType(genesetlist) <- EntrezIdentifier("org.Mm.eg.db")
res <- GeneSetCollection(genesetlist)
title1<-gsub("Downreg_|EdgeR_|_FDR_0.05.tsv","",titles[[i]][1])
outdir<-paste0("GO_Analysis/",title1)
module.enrichment.analysis(res, test.type="BP",outdir=outdir)
module.enrichment.analysis(res, test.type="CC",outdir=outdir)
module.enrichment.analysis(res, test.type="MF",outdir=outdir)
gsm1<-readLines(ffname[[i]][2])
gsmm1<-sub("[.0-9].$","",gsm1)
gsmmm1<-sub("\\.", "", gsmm1)
geneIdType <- ENSEMBLIdentifier("org.Mm.eg.db")
title<-gsub("_EdgeR_|_FDR0.05.tsv|_|-","_",titles[[i]][2])
genesetlist<-GeneSet(gsmmm1, geneIdType=geneIdType, setName=title)
geneIdType(genesetlist) <- EntrezIdentifier("org.Mm.eg.db")
res <- GeneSetCollection(genesetlist)
title1<-gsub("Downreg_|EdgeR_|_FDR_0.05.tsv","",titles[[i]][1])
outdir<-paste0("GO_Analysis/",title1)
module.enrichment.analysis(res, test.type="BP",outdir=outdir)
module.enrichment.analysis(res, test.type="CC",outdir=outdir)
module.enrichment.analysis(res, test.type="MF",outdir=outdir)
}
}else{
print("Running human")
for (i in 1:length(ffname)){
gsm1<-readLines(ffname[[i]][1])
gsmm1<-sub("[.0-9].$","",gsm1)
gsmmm1<-sub("\\.", "", gsmm1)
geneIdType <- ENSEMBLIdentifier("org.Hs.eg.db")
title<-gsub("_EdgeR_|_FDR0.05.tsv|_|-","_",titles[[i]][1])
genesetlist<-GeneSet(gsmmm1, geneIdType=geneIdType, setName=title)
geneIdType(genesetlist) <- EntrezIdentifier("org.Hs.eg.db")
res <- GeneSetCollection(genesetlist)
title1<-gsub("Downreg_|EdgeR_|_FDR_0.05.tsv","",titles[[i]][1])
outdir<-paste0("GO_Analysis/",title1)
module.enrichment.analysis(res, test.type="BP",outdir=outdir)
module.enrichment.analysis(res, test.type="CC",outdir=outdir)
module.enrichment.analysis(res, test.type="MF",outdir=outdir)
gsm1<-readLines(ffname[[i]][2])
gsmm1<-sub("[.0-9].$","",gsm1)
gsmmm1<-sub("\\.", "", gsmm1)
geneIdType <- ENSEMBLIdentifier("org.Hs.eg.db")
title<-gsub("_EdgeR_|_FDR0.05.tsv|_|-","_",titles[[i]][2])
genesetlist<-GeneSet(gsmmm1, geneIdType=geneIdType, setName=title)
geneIdType(genesetlist) <- EntrezIdentifier("org.Hs.eg.db")
res <- GeneSetCollection(genesetlist)
title1<-gsub("Downreg_|EdgeR_|_FDR_0.05.tsv","",titles[[i]][1])
outdir<-paste0("GO_Analysis/",title1)
module.enrichment.analysis(res, test.type="BP",outdir=outdir)
module.enrichment.analysis(res, test.type="CC",outdir=outdir)
module.enrichment.analysis(res, test.type="MF",outdir=outdir)
}
} |
9dea36020b19b9344dcb3adb2bb6500687548c99 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/extraDistr/examples/Kumaraswamy.Rd.R | 63e725d931e2f7790a137a1770471bd400bab429 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 390 | r | Kumaraswamy.Rd.R | library(extraDistr)
### Name: Kumaraswamy
### Title: Kumaraswamy distribution
### Aliases: Kumaraswamy dkumar pkumar qkumar rkumar
### Keywords: distribution
### ** Examples
x <- rkumar(1e5, 5, 16)
hist(x, 100, freq = FALSE)
curve(dkumar(x, 5, 16), 0, 1, col = "red", add = TRUE)
hist(pkumar(x, 5, 16))
plot(ecdf(x))
curve(pkumar(x, 5, 16), 0, 1, col = "red", lwd = 2, add = TRUE)
|
635f698786f2592a38d83c291294f426c46b4c73 | cf5bd6c2929748d30fbe29fb3ea25f7c7827cd5a | /MAE5870/lista1/3c_sim.R | 407e76afa5b5e058099360a98248312f6d2e37dc | [] | no_license | kayaman/tsa | edfc01fbbb2243f2196b6663807eb0bbea62ee7f | 55dbe3ee7ae0fb3eef51fedbef24c0c85d2cc67f | refs/heads/master | 2020-05-03T22:48:56.243484 | 2019-04-10T01:25:01 | 2019-04-10T01:25:01 | 178,850,611 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 340 | r | 3c_sim.R | # Lista 1 - Exercicio 3 - item c
# Zt = 0,4*Zt−1 + at − 0,3*at-1 − 0,8*at−2 ==> ARMA(p, q), p=1, q=2 ==> ARMA(1,2)
set.seed(666)
Z_t <- arima.sim(model = list(ar = c(.4), ma = c(-.3, -.8)), n = 1000)
par(mfrow=c(3,1))
ts.plot(Z_t)
ar.acf <- acf(Z_t, type = "correlation", plot = T)
ar.pacf <- acf(Z_t, type = "partial", plot = T)
|
f0889f9fa412e1f254e24ca6aaf2b6161eb662a1 | 006800321cf6394099ae929bfe75626b2b22c7b6 | /scripts/script5b.R | 9da142814514945e189be28c3c4647088a60c4a2 | [] | no_license | ChuckBV/ms-now-ppo-kairomone-2017-2yr | ffcc8fe5c5933707a8771ad2e353126fdaa7cd89 | c401449a11e534e80322b72e440e7f1bf69b33d4 | refs/heads/master | 2022-04-10T09:52:54.636896 | 2020-03-14T22:56:23 | 2020-03-14T22:56:23 | 193,812,218 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,372 | r | script5b.R | #============================================================================
# script5b.R
#
# Examine proportion of empty traps in the summer 2019 trap comparisons
#
# Parts
# 1. Import data set to data frame (line 32)
# 2. Characterize number of NAs, sampling dates, and weekly phenology
# (line 65)
# 3. Pool data across dates, output for SAS, and obtain a table of
# mean and SE
#
#============================================================================
# load libaries
library(tidyverse)
library(lubridate)
library(DescTools)
library(viridis)
library(multcompView)
library(userfriendlyscience)
# load functions
se <- function(number){
sd(number, na.rm = TRUE)/sqrt(sum(!is.na(number), na.rm = TRUE))
}
#== 1. Count data, 2019 trap test ======================================
delta <- read_csv("./data/Y19_delta.csv")
delta <- delta[ ,3:8] # Drop Experiment and Location contants
# Make Treatment an ordered factor with the desired order
delta <- delta %>%
mutate(Treatment = factor(Treatment,
levels = c("WingPhero","WingPpo","WingCombo","DeltaPpo","DeltaCombo","ModPpo","ModCombo")))
levels(delta$Treatment)
delta$StartDate <- as.Date(delta$StartDate)
delta$EndDate <- as.Date(delta$EndDate)
strt_dats <- unique(delta$StartDate)
end_dats <- unique(delta$EndDate)
nmbr_wks <- as.numeric(sum(end_dats - strt_dats)/7)
delta
#== 2. Determine proportion 0 counts ========================================
delta %>%
group_by(Treatment) %>%
summarise(nZero = sum(Count == 0, na.rm = TRUE),
nObs = sum(!is.na(Count)),
pct_zero = 100*nZero/nObs)
### Answer Brad Higbee's query about 40%
dltppo <- delta %>%
filter(Treatment == "DeltaPpo") %>%
arrange(Count)
# Traps with 0 counts in 18 out of 47 rows
dltppo
# Compare this with delta2, the sums data set used in script5.$ to generate
# Fig. 3
dltppo2 <- delta2 %>%
filter(Treatment == "DeltaPpo") %>%
arrange(Count)
dltppo2
# A tibble: 8 x 4
# Groups: Treatment [1]
# Treatment Replicate Count perwk
# <fct> <dbl> <dbl> <dbl>
# 1 DeltaPpo 2 4 0.519
# 2 DeltaPpo 7 4 0.519
# 3 DeltaPpo 8 4 0.519
# 4 DeltaPpo 6 8 1.04
# 5 DeltaPpo 3 9 1.17
# 6 DeltaPpo 4 12 1.56
# 7 DeltaPpo 5 13 1.69
# 8 DeltaPpo 1 16 2.07 |
16190e6750bbe87f59128d788e2fe79746b405ce | b16d24e80f34c2799def9bea2edb35b78c24b070 | /Marie/progR_readHOP.R | 65662042f734e533adfae7016d1c0f84cbbc5a41 | [] | no_license | kevinwolz/hisafe_R_code | af2763b24041e87aaaa44d4797a5a9a7fe21a9ee | eb6a29da6b70ae52068f7f4b31bb886e23d64a33 | refs/heads/master | 2021-09-13T12:02:30.453605 | 2018-04-29T18:20:07 | 2018-04-29T18:20:07 | 110,239,361 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 15,933 | r | progR_readHOP.R | empile<-function(...){
N<-unique(unlist(lapply(list(...),names)))
result<-NULL
for(DF in list(...)){
x<-as.data.frame(lapply(N,function(n)if(n %in% colnames(DF)) DF[,n] else NA))
names(x)<-N
result<-rbind(result,x)
}
result
}
infos<-function(df) {
if (! "data.frame" %in% class(df)) print(paste("attention, c est pas un data.frame mais un", paste(class(df), collapse=" ")))
df<-as.data.frame(df)
#affiche les numeros et types et nb NA de chaque variable de df
print(paste(dim(df)[1]," lignes"))
return(data.frame(nom=colnames(df), num=1:length(df), class=sapply(df, function(x) class(x)[1]), nb_NA=sapply(df, function(x) sum(is.na(x))) ))
}
lectureHisAFe<-function(file, readata=TRUE){
#reads one output file
#returns a list with two elements: the information on the variables (top part of the file) and the data (empty data frame if readata=FALSE)
toto<-readLines(file)
debutdonnees<-which(toto[-1]=="")[1]
variables<-read.table(file, skip=7, nrows=debutdonnees-8, header=TRUE, sep="\t", encoding="latin1")
if (readata) {
donnees<-read.table(file, skip=debutdonnees, header=TRUE, sep="\t", encoding="latin1")
donnees$Date<-as.Date(gsub(pattern="a.", replacement="", x=donnees$Date, fixed=TRUE), format="%d/%m/%Y")
donnees<-donnees[!is.na(donnees$Date),]
} else donnees<-data.frame()
return(list(data=donnees, variables=variables))
}
readVirtualExperiment<-function(experimentalplan, folder, profiles=c("trees", "plot", "monthCells")) {
#reads all the simulations given by the rownames of experimentalplan, in folder folder.
#profiles: names of the export profiles to read
#returns an object of class "hop" (Hisafe OutPuts), with 3 elements (one for each output profile), and attributes "experimentalplan" (which was passed as argument) and "definitions" (definitions of the variables)"
#read the definitions of variables only from the first file (should be the same for all outputs for the whole experiment)
simu_name<-rownames(experimentalplan)[1]
names(profiles)<-profiles
variables<-lapply(profiles, function(x) lectureHisAFe(paste0(folder,"/" ,simu_name, "/output-",simu_name,".sim", "/", simu_name,"_", x, ".txt" ), readata=FALSE)$variables)
#read the data for all simulations of the experiment
lectures<-lapply(profiles, function(x) return(data.frame()))
for (i in 1: nrow(experimentalplan)) {
simu_name<-rownames(experimentalplan)[i]
print(paste("reading simulation", simu_name))
toto<-lapply(profiles, function(x) lectureHisAFe(paste0(folder,"/" ,simu_name, "/output-",simu_name,".sim", "/", simu_name,"_", x, ".txt" ))$data)
lectures<-mapply(rbind, lectures, toto)
}
class(lectures)<-c("hop", class(lectures))
attr(lectures, "definitions") <- variables
attr(lectures, "experimentalplan") <- experimentalplan
return(lectures)
}
summary.hop<-function(hop) {
# print some information about the variables in each element of object of class "hop" (Hisafe OutPuts)
lapply(names(hop), function (nom) {
print(nom)
toto<-merge(infos(hop[[nom]]), attr(hop, "definitions", exact = TRUE)[[nom]][,c("NomVariable", "Unité", "Description")], all=TRUE, by.x="nom", by.y="NomVariable")
toto<-toto[order(toto$num),]
print(toto)
return(nom)
})
}
createdfcoordtime<-function(hop, yaxis=c(dbh="trees"), simulationNames=NULL, DayofYear=NULL, selectid=NULL) {
#prepares the dataframe for plotting, when xaxis is time: collects the variables in the different elements of hop, and reshapes when there are several individuals to get several columns
#returns a data.frame with columns "SimulationName", "xcoord", and 1 or several columns (ycoord and/or ycoord.1, ycoord.2)
#hop: object of class "hop" (Hisafe OutPuts)
#yaxis= named vector, with the names equal to the variables you want to plot and the values equal to the export profile where these variables can be found
#DayofYear (optional): only if xaxis="Time": vector of Julian days, only the data for these days is plotted
#selectid (optional): only for trees or cells: numeric vector to select specific ids of trees or cells to plot (default: all individuals are plotted)
dfcoord<-data.frame()
for (i in 1:length(yaxis)) {
scale<-unname(yaxis[i])
variable<-names(yaxis)[i]
if (scale %in% c("trees", "monthCells")) { #create one column for each tree/cell
if (!is.null(selectid)) {
existepas<-setdiff(selectid, hop[[scale]]$id)
if (length(existepas)>0) print(paste("warning: the following ids do not exist ", paste(existepas, collapse=" ")))
selected<-hop[[scale]][hop[[scale]]$id %in% selectid, ]
} else {selected<-hop[[scale]]}
#selected<-selected[!is.na(selected$Date),]
if (!is.null(simulationNames)) selected<-selected[selected$SimulationName %in% simulationNames,]
titi<-data.frame(xcoord=selected$Date, ycoord=selected[[variable]], id=selected$id, SimulationName=selected$SimulationName)
toto<-reshape(titi, direction="wide", v.names="ycoord", timevar="id", idvar= c("xcoord", "SimulationName"))
} else {
toto<-data.frame(xcoord=hop[[scale]]$Date, ycoord=hop[[scale]][[variable]], SimulationName=hop[[scale]][["SimulationName"]])
if (!is.null(simulationNames)) toto<-toto[toto$SimulationName %in% simulationNames,]
}
if (!is.null(DayofYear)) {toto<-toto[as.POSIXlt(toto$xcoord)$yday %in% DayofYear,]}
dfcoord<-empile(dfcoord, toto)
dfcoord<-dfcoord[-1,]
if (class(dfcoord$xcoord)=="numeric") dfcoord$xcoord<-as.Date(dfcoord$xcoord, origin="1970-01-01")
}
return(dfcoord)
}
addsimu<-function(formating, dfcoord, col=NULL, lty=NULL, lwd=NULL, pch=NULL, bg=NULL, cex=NULL){
if (!is.null(col)) formating$col<-col
if (!is.null(lty)) formating$lty<-lty
if (!is.null(lwd)) formating$lwd<-lwd
if (!is.null(pch)) formating$pch<-pch
if (!is.null(bg)) formating$bg<-bg
if (!is.null(cex)) formating$cex<-cex
for (i in 1:nrow(formating)) {
format<-formating[i,]
simu_name<-rownames(format)
print(simu_name)
xsubset<-dfcoord$xcoord[dfcoord$SimulationName==simu_name]
for (v in setdiff(names(dfcoord), c("xcoord", "SimulationName"))) {
ysubset<-dfcoord[dfcoord$SimulationName==simu_name, v]
if (!is.null(format$lty)) lines(xsubset, ysubset, col=format$col,lty=format$lty,lwd=format$lwd)
if (!is.null(format$pch)) points(xsubset, ysubset, col=format$col,pch=format$pch,bg=format$bg, cex=format$cex)
}
}
}
addid<-function(dfcoord, freq=0.1) {
# adds the ids on some randomly chosen points
for (v in setdiff(names(dfcoord), c("SimulationName", "xcoord"))) {
nom<-gsub(pattern="ycoord.", replacement="", x=v, fixed=TRUE)
keep<-which(as.logical(rbinom(nrow(dfcoord), 1, freq)))
text(dfcoord$xcoord[keep], dfcoord[keep, v], nom)
}
}
addlegende<-function(formating, addlegend=FALSE) {
#if argument addlegend is boolean, add legend only if it is true (and allways lines on topleft and points on topright) ;
# else addlegend should be a length 2 character vector giving the position of lines and then points
if (class(addlegend)=="logical") {
if (addlegend) {
if (!is.null(formating$lty)) legend("topleft", legend=rownames(formating), col=formating$col,lty=formating$lty,lwd=formating$lwd, inset=0.01)
if (!is.null(formating$pch)) legend("topright", legend=rownames(formating), col=formating$col, pch=formating$pch, pt.bg=formating$bg, pt.cex=formating$cex, inset=0.01)
}
} else {
if (!is.null(formating$lty)) legend(addlegend[1], legend=rownames(formating), col=formating$col,lty=formating$lty,lwd=formating$lwd, inset=0.01)
if (!is.null(formating$pch)) legend(addlegend[2], legend=rownames(formating), col=formating$col,pch=formating$pch,pt.bg=formating$bg, pt.cex=formating$cex, inset=0.01)
}
}
plot.hop<-function(x, formating=NULL, xaxis="Time", yaxis=c(dbh="trees"), addlegend=FALSE, DayofYear=NULL, rangedate=NULL, selectid=NULL, addid=NULL, ...) {
#x= a hop object as returned from function readVirtualExperiment
#formating= a data.frame with the rownames equal to the simulationNames of the simulations you want to plot, and as columns, the formating (pch, col, bg, cex, lty, lwd) of points or lines
#xaxis= either "Time" or a named character, with the name equal to the variable you want to use as xaxis and the value equal to the export profile where this variable can be found
#yaxis= named vector, with the names equal to the variables you want to plot and the values equal to the export profile where these variables can be found
#addlegend (optional): either a logical (should the legend be plotted?) or a length 2 character vector taken from the possible locations of legend (“bottomright”, “bottom”, “bottomleft”, “left”, “topleft”, “top”, “topright”, “right”, “center”)
##first one = legend of the lines, second= legend of the points
#DayofYear (optional): only if xaxis="Time": vector of Julian days, only the data for these days is plotted
#rangedate (optional): only if xaxis="Time": range of dates you want to plot (important if you want to overlay other simulations with a different range of data)
#selectid (optional): only for trees or cells: numeric vector to select specific ids of trees or cells to plot
#addid (optional, use only if selectid is provided) : frequency of added id labels (]0,1])
if(is.null(formating)| nrow(formating)==0 | ncol(formating)==0) {print("formating must be provided, contain at least one row and have at least column pch or lty"); return()}
if (xaxis=="Time") {
dfcoord<-createdfcoordtime(x, yaxis, simulationNames=rownames(formating), DayofYear, selectid)
if (is.null(rangedate)) rangedate<-range(dfcoord$xcoord, na.rm=TRUE)
plot(rangedate, range(dfcoord[,setdiff(names(dfcoord), c("xcoord", "SimulationName"))], na.rm=TRUE), type="n", xlab="Time", ylab=paste(unique(names(yaxis)), collapse=" or "), ...)
addsimu(formating, dfcoord, ...)
if (!is.null(addid)) addid(dfcoord, addid)
} else {
print("not done yet")
}
addlegende(formating, addlegend)
}
points.hop<-function(x, formating=NULL, xaxis="Time", yaxis=c(dbh="trees"), addlegend=FALSE, DayofYear=NULL, rangedate=NULL, selectid=NULL, addid=NULL, ...) {
if (xaxis=="Time") {
dfcoord<-createdfcoordtime(x, yaxis, simulationNames=rownames(formating), DayofYear, selectid)
addsimu(formating, dfcoord, ...)
if (!is.null(addid)) addid(dfcoord)
} else {
print("not done yet")
}
addlegende(formating, addlegend)
}
lines.hop<-points.hop
################## map of one variable in cells
map<-function(x, zaxis=c(monthCells=LAI), selectsimulations=NULL, selectdates=NULL, rows=NULL) {
toto<-x[[names(zaxis)]]
toto<-toto[as.character(toto$SimulationName) %in% selectsimulations & as.character(toto$Date) %in% selectdates,]
nbgraphes<-length(selectsimulations)*length(selectdates)
if (nbgraphes>1) {if (is.null(rows)) {
racine<-ceiling(sqrt(nbgraphes)) ; layout(matrix(1:racine^2, ncol=racine, byrow=TRUE))
} else {
if (rows=="date") {
layout(matrix(1:nbgraphes, nrow=length(selectdates), byrow=FALSE))
} else {
layout(matrix(1:nbgraphes, nrow=length(selectsimulations), byrow=TRUE))
}
}}
toto$zaxisaplotter<-eval(parse(text=zaxis), toto)
zlim<-range(toto$zaxisaplotter)
for (simu in selectsimulations) for (date in selectdates) {
dat<-toto[toto$SimulationName==simu & toto$Date==date, ]
if (nrow(toto)==0) {
print(paste("no data at date", date, "in simulation", simu))
} else {
matrice<-matrix(dat[order(dat$y, dat$x),"zaxisaplotter"], ncol=length(unique(dat$y)))
#filled.contour(z=matrice, main= paste(zaxis, simu, date, sep="\n"))
image(matrice, main=paste(zaxis, simu, date, sep="\n"), asp = ncol(matrice)/nrow(matrice), useRaster=TRUE, ylim=c(0,1), zlim=zlim, xaxt="n", yaxt="n")
abline(v=0.5, lty=2)
}
}
}
################# map of nitrogen fluxes
NitrogenMap<-function(df,stocksScale=1, fluxesScale=1, titre="Nitrogen Stocks and fluxes") {
#if (nrow(df)>1) layout(matrix(1:nrow(df), byrow=TRUE))
for (i in 1:nrow(df)){
x<-df[i,]
if (length(titre)==1) titre<-rep(titre, nrow(df))
if (nrow(df)>1) dev.new()
dev.new()
plot(c(0,100), c(0,100), type="n", xaxt="n", yaxt="n", xlab="", ylab="", bty="n", main=titre[i])
rect(xleft=30-(x$residues*stocksScale/10), ybottom=50, xright=30, ytop=60, col = "lightgreen", border = "lightgreen")
text(30-(x$residues*stocksScale/10)/2, 55, "Residuals")
rect(xleft=30-sqrt(x$microorg*stocksScale), ybottom=40-sqrt(x$microorg*stocksScale), xright=30, ytop=40, col = "orange", border = "orange")
text(30-sqrt(x$microorg*stocksScale)/2, 40-sqrt(x$microorg*stocksScale)/2, "Microorg")
rect(xleft=55, ybottom=40, xright=55+sqrt(x$humus*stocksScale), ytop=40+sqrt(x$humus*stocksScale), col = "brown", border = "brown")
text(55+sqrt(x$humus*stocksScale)/2, 40+sqrt(x$humus*stocksScale)/2, "Humus")
rect(xleft=70, ybottom=30-sqrt(x$microorg*stocksScale)-sqrt(x$NO3*stocksScale), xright=70+sqrt(x$NO3*stocksScale), ytop=30-sqrt(x$microorg*stocksScale), col = "red", border = "red")
text(70+sqrt(x$NO3*stocksScale)/2, 30-sqrt(x$microorg*stocksScale)-sqrt(x$NO3*stocksScale)/2, "NO3")
arrows(x0=70, y0=30-sqrt(x$microorg*stocksScale)-sqrt(x$NO3*stocksScale), x1 = 0, y1 = 30-sqrt(x$microorg*stocksScale)-sqrt(x$NO3*stocksScale), length = 0, lwd=fluxesScale*x$NO3toPlant) #trait horizontal
arrows(x0=0, y0=30-sqrt(x$microorg*stocksScale)-sqrt(x$NO3*stocksScale), x1 = 0, y1 = 60, length = 0.25, lwd=fluxesScale*x$NO3toPlant)
arrows(x0=0, y0=80, x1 = 30, y1 = 80, length = 0, lwd=fluxesScale*x$PlanttoResidues) #trait horizontal
arrows(x0=30, y0=80, x1 = 30, y1 = 60, length = 0.25, lwd=fluxesScale*x$PlanttoResidues)
arrows(x0=30, y0=50, x1 = 30, y1 = 40, length = 0.25, lwd=fluxesScale*x$ResiduestoMicroorg)
arrows(x0=30, y0=40, x1 = 55, y1 = 40, length = 0.25, lwd=fluxesScale*x$MicroorgtoHumus)
arrows(x0=30, y0=40-sqrt(x$microorg*stocksScale), x1 = 70, y1 = 30-sqrt(x$microorg*stocksScale), length = 0.25, lwd=fluxesScale*x$MicroorgtoNO3)
arrows(x0=55+sqrt(x$humus*stocksScale), y0=40, x1 = 70, y1 = 30-sqrt(x$microorg*stocksScale), length = 0.25, lwd=fluxesScale*x$HumustoNO3)
arrows(x0=70+sqrt(x$NO3*stocksScale), y0=30-sqrt(x$microorg*stocksScale), x1 = 70+sqrt(x$NO3*stocksScale), y1 = 90, length = 0.25, lwd=fluxesScale*x$NO3toAtm)
arrows(x0=68+sqrt(x$NO3*stocksScale), y1=30-sqrt(x$microorg*stocksScale), x1 = 68+sqrt(x$NO3*stocksScale), y0 = 90, length = 0.25, lwd=fluxesScale*x$NO3toAtm)
arrows(x0=70+sqrt(x$NO3*stocksScale), y0=30-sqrt(x$microorg*stocksScale)-sqrt(x$NO3*stocksScale), x1 = 100, y1 = 30-sqrt(x$microorg*stocksScale)-sqrt(x$NO3*stocksScale), length = 0, lwd=fluxesScale*x$PlanttoResiduals) #trait horizontal
arrows(x0=100, y0=30-sqrt(x$microorg*stocksScale)-sqrt(x$NO3*stocksScale), x1 = 100, y1 = 60, length = 0.25, lwd=fluxesScale*x$NO3toTree)
}
}
# x1<-data.frame(residues=10, microorg=50, humus=20, NO3=10,
# PlanttoResiduals=4, ResiduestoMicroorg=0.5,
# MicroorgtoHumus=6, MicroorgtoNO3=10, HumustoNO3=3,
# NO3toAtm=0.1, AtmtoNO3=5, NO3toPlant=1, NO3toTree=3)
# NitrogenMap(x1, stocksScale=10, titre="jour 1")
# simuChristian<-lectureHisAFe("/Users/user/Documents/a_System/modelisation/Hi-SAFE/simulationsChristian/CD21_AF_A3_13x8_2_arbres_plot.txt")
# reformate<-simuChristian$data[, c("Date", "nitrogenResidus", "microorgBiomass", "activeNitrogenHumusStock", "mineralNitrogenStock",
# "nLeafLitter", "nitrogenImmobilisation",
# "nitrogenHumification", "nitrogenRestitution", "nitrogenHumusMineralisation",
# "nitrogenVolatilisation", "nitrogenFixation", "nitrogenExtractedByCrops", "nitrogenExtractedByTrees"
# )]
# names(reformate)<-c("date", "residues","microorg","humus","NO3","PlanttoResiduals","ResiduestoMicroorg", "MicroorgtoHumus",
# "MicroorgtoNO3","HumustoNO3","NO3toAtm","AtmtoNO3","NO3toPlant","NO3toTree" )
# NitrogenMap(reformate[13000,], stocksScale=0.01, fluxesScale=0.1, titre="jour 13000")
# jours<-c(10500, 11500, 12500, 13500)
# NitrogenMap(reformate[jours,], stocksScale=0.1, fluxesScale=0.1, titre=paste("jour", jours))
|
adf164d40c1e9905ec9cb5b3e2cef14acadb47a7 | 3c7b06bb5767d78f2bac2726f84740aeb1f06a46 | /man/locate_warehouse.Rd | 39bfbfb938d4978ee206910627f22c120de0e458 | [] | no_license | will-jac/GSC | efd4b65822305f0c300cbb6cc873a72389336f93 | f9700cfc09fcac6f6f3b5a4ec5d6d8705ec08a93 | refs/heads/master | 2020-09-13T12:38:24.045545 | 2020-06-13T01:17:28 | 2020-06-13T01:17:28 | 222,782,634 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 439 | rd | locate_warehouse.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/helper_functions.R
\name{locate_warehouse}
\alias{locate_warehouse}
\title{Centrally locates a warehouse across a region defined by the data frame x}
\usage{
locate_warehouse(x)
}
\arguments{
\item{x}{the data frame containing locations to locate the warehouse across}
}
\description{
Centrally locates a warehouse across a region defined by the data frame x
}
|
c0655948214393fdbfa6b55fdedd82b6f87d57c7 | 3946ee744fdcfdcb58f585602682d088d312a572 | /01_scripts/read_counts_to_annotations.R | 3c77a8db45818fa842ddecaf5eec918e74d84d97 | [] | no_license | bensutherland/eRNA_taxo | c7334331303ba354a5fcffc7554b25a9d2a51d68 | 6bcb56bf7760eb9aebc89fb15437732fd5b7162f | refs/heads/master | 2023-06-07T05:07:08.637205 | 2021-06-15T21:33:36 | 2021-06-15T21:33:36 | 96,823,636 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 11,079 | r | read_counts_to_annotations.R | # Connect read counts to the annotations and plot
# Input: RData from normalization step (post-filter, CPM) and MEGAN (taxonomy ID per amplicon)
# rm(list=ls())
# Choose dataset
datatype <- "eRNA_taxo"
#### 0. Setup (no changes reqd) ####
# Install Packages
#install.packages("RColorBrewer")
library("RColorBrewer")
# Set working directory depending on the dataset
working.dir <- paste("~/Documents/02_eRNA_taxo/", datatype, sep = "")
setwd(working.dir)
## Create a filenames list that contains file names for each dataset (1st obitab output; 2nd MEGAN output)
filenames.list <- list()
# # Root taxonomy level
# filenames.list[["eRNA_taxo"]] <- setNames(object = c(
# "08_gx_levels/normalized_output_linear.csv" # counts file
# , "16_libs_contig_expr_only_taxonomy_hits-ex.txt") # annot file (TO BE MOVED)
# , nm = c("count", "annot"))
# Family-level
# filenames.list[["eRNA_taxo"]] <- setNames(object = c(
# "08_gx_levels/normalized_output_linear.csv" # counts file
# , "16_libs_contig_expr_only_taxonomy_hits-ex_Family.txt") # annot file (TO BE MOVED)
# , nm = c("count", "annot"))
# Family-level (and all)
filenames.list[["eRNA_taxo"]] <- setNames(object = c(
"08_gx_levels/normalized_output_linear.csv" # counts file
, "16_libs_contig_expr_only_taxonomy_hits-ex_Family_and_all.txt") # annot file (TO BE MOVED)
, nm = c("count", "annot"))
filenames.list[["eRNA_taxo"]][2]
#### 1.0 Import input data and merge #####
paste("You are analyzing ", datatype, sep = "")
# counts <- read.delim2(file = paste(filenames.list[[datatype]][1], sep = ""), sep = ",")
load("08_gx_levels/normalized.RData")
str(normalized.output.linear) # this is the counts data, we need to change it though
normalized.output.linear.df <- as.data.frame(normalized.output.linear) # make it a data frame so that we can incorporate the contig ID as a character column
str(normalized.output.linear.df)
normalized.output.linear.df$id <- rownames(normalized.output.linear.df)
str(normalized.output.linear.df)
counts <- normalized.output.linear.df
# now it is ready to continue
# annot <- read.delim2(paste(filenames.list[[datatype]][2], sep = ""), header = F
# , col.names = c("id","taxon"))
annot <- read.delim2(filenames.list[["eRNA_taxo"]][2], header = F, col.names = c("id", "taxon")
, colClasses = c("character", "character"))
str(annot)
# ### FRUSTRATING IF NEEDED ###
# # Save the
# rownames(counts) <- counts$X
# counts <- counts[, -1]
#
# head(counts)
# counts.df <- as.data.frame(counts)
# id <- rownames(counts.df)
# library(dplyr)
# counts <- mutate_all(counts.df, function(x) as.numeric(as.character(x)))
# id <- rownames(counts)
# str(id)
#
# test <- cbind(counts)
#
# str(test)
#### END FRUSTRATING IF NEEDED ###
str(counts)
str(annot)
dim(counts)
dim(annot)
head(counts)
head(annot)
names(counts)
names(annot)
### not needed ? ##
# # This assumes the first column of the counts file is the contig name
# colnames(counts)[1] <- "id"
### end not needed ? ##
# Sort data
counts.sorted <- counts[order(counts$id),]
annot.sorted <- annot[order(annot$id),]
# Merge
data <- merge(x = annot.sorted, y = counts.sorted, by = "id")
names(data)
head(data)
str(data)
# total number of reads dealing with
# Note the difference between normalized data size and non-normalized data size
sum(colSums(data[,3:ncol(data)]))
# per sample
colSums(data[,3:ncol(data)]) # confirm, because this seems greater than the lib sizes earlier..
sum(my.counts$counts[,1]) # see, this is the number of actual reads for first sample, which matches the 'lib.size' within the DGElist. So the normalization must increase this value
sum(my.counts$samples$lib.size) # this is all of the lib sizes
data.df <- data
#### REMOVE UNKNOWNS #####
# Set species to remove (e.g. humans)
remove.from.all <- c("Not assigned", "No hits")
species.remove <- list()
species.remove[["eRNA_taxo"]] <- c(remove.from.all)
species.remove <- species.remove[[datatype]] # Use datatype for removing species
species.remove
# Remove species from data.df
dim(data.df)
data.df <- data.df[ ! data.df$taxon %in% species.remove, ]
dim(data.df) # see how the number of taxa is reduced
### for eRNA_taxo, a ton of data is unassigned or unknown
# goes from 32866 rows (contigs) to 2490!
str(data.df)
sum(data.df[,3:ncol(data.df)]) # still contains 1,891,674 CPM though..
#### 2. Get proportional and count data by taxon per site ####
# Set nulls
sample.tot <- NULL ; sample <- NULL; result <- NULL; result.prop <- NULL ; count.list <- NULL
prop.list <- list(); agg.counts.list <- list()
# Loop to get count by species by site (count.list) and proportion of species by site (prop.list)
for(col in 3:ncol(data.df)) {
# name of the sample this iteration
sample <- names(data.df[col])
# total number reads in this sample
sample.tot <- sum(data.df[,col])
# Add 0.0001 to avoid 0 value if sample.tot is 0 (e.g. for controls)
if(sample.tot==0){
sample.tot <- 0.00001
}
# Per sample, aggregate counts by taxon
result <- aggregate(x = data.df[,col], by = list(data.df$taxon), FUN = sum, na.rm = T)
# Make result proportional by dividing by the amount for that species (2nd column in 'result')
result.prop <- (result[,2]/sample.tot)*100
# Save count values into a list
count.list[[sample]] <- setNames(result[,2], result[,1]) # pull the value into count.list with names as the first column of result
# Save proportions into a list
prop.list[[sample]] <- setNames(result.prop, result[,1])
}
str(prop.list)
str(count.list)
#### 3. Pull out relevant information from proportional and count data #####
# Proportion data
prop.df <- NULL
for(i in 1:length(prop.list)){
prop.df <- cbind(prop.df, prop.list[[i]])}
head(prop.df)
# Count data
counts.df <- NULL
for(i in 1:length(count.list)){
counts.df <- cbind(counts.df, count.list[[i]])}
head(counts.df)
##### ADDING SAMPLE NAMES #####
# would be a good place to rename certain columns if necessary
# Incorporate location names
# instead of the following ### NOT WORKING ###
# site.names <- sample.locations[1:length(prop.df[1,])] # Assumes is in same order for the two major types individually (SOG or C3)
site.names <- colnames(data.df[3:ncol(data.df)])
colnames(prop.df) <- site.names # name prop.df
colnames(counts.df) <- site.names # name counts.df
head(prop.df)
head(counts.df)
# Set filenames for saving out
count.output.csv.filename <- paste("09_results/", datatype, "_count_by_taxa.csv", sep = "")
prop.output.csv.filename <- paste("09_results/", datatype, "_prop_by_taxa.csv", sep = "")
# write.csv(x = counts.df, file = count.output.csv.filename)
# write.csv(x = prop.df, file = prop.output.csv.filename)
# Find total numbers of reads mapping (###CPM HERE##)
colnames(counts.df)
sample.reads <- colSums(x = counts.df[, c(1:ncol(counts.df))])
# Filter counts table to remove very low values
min.count <- 10
counts.filtered.df <- counts.df
head(counts.filtered.df)
counts.filtered.df[which(counts.filtered.df < min.count)]
counts.filtered.df[which(counts.filtered.df < min.count)] <- 0
head(counts.filtered.df)
counts.filtered.filename <- paste("09_results/", datatype, "_count_by_taxa_filt_at_", min.count, ".csv", sep = "")
# write.csv(x = counts.filtered.df, file = counts.filtered.filename)
##### 4.0 Prepare plotting (colors) ####
# Prepare palette
#display.brewer.all() # see color options
cols <- brewer.pal(n = 9, name = "Set1")
cols2 <- brewer.pal(n = 8, name = "Set2")
cols3 <- brewer.pal(n = 10, name = "Set3")
cols4 <- brewer.pal(n = 8, name = "Pastel2")
cols5 <- brewer.pal(n = 9, name = "Pastel1")
cols6 <- brewer.pal(n = 11, name = "BrBG")
cols7 <- brewer.pal(n = 10, name = "Paired")
cols8 <- brewer.pal(n = 11, name = "Spectral")
cols9 <- brewer.pal(n = 9, name = "YlOrRd")
cols10 <- brewer.pal(n = 9, name = "YlGnBu")
cols11 <- brewer.pal(n = 9, name = "YlGn")
cols12 <- brewer.pal(n = 9, name = "RdPu")
cols13 <- brewer.pal(n = 9, name = "Purples")
cols14 <- brewer.pal(n = 9, name = "PuRd")
cols15 <- brewer.pal(n = 9, name = "Greys")
cols16 <- brewer.pal(n = 11, name = "RdGy")
palette <- c(cols,cols2,cols3,cols4,cols5,cols6,cols7,cols8,cols9,cols10,cols11,cols12,cols13,cols14,cols15,cols16)
length(palette)
# Randomly select from palette
set.seed(100)
index <- sample(1:nrow(counts.df))
index
# In case need numerous sets
palette.numerous<- rep(x = palette, times = 4)
# Set up in case the number is greater than a single palette
if(length(index) > length(palette)){
this.palette <- palette.numerous[index]
} else {
this.palette <- palette[index]
}
# ##### 4.2 Create Legend ####
# # Prepare legend size
# legend.cex <- c(1, 1, 1, 1, 1, 1) ; names(legend.cex) <- c("C3_16s","C3_COI", "SOG_16s", "C3_val", "SOG_val", "SOG_COI")
# Create dataframe with the taxon and the color
color.index <- cbind(rownames(prop.df), this.palette)
colnames(color.index) <- c("taxon","color")
head(color.index)
color.index.df <- as.data.frame(color.index)
# note this will not work until you move the color codes to character
# Identify which taxa are high proportion in any one sample
min.proport <- 5
# Set null
high.presence.taxa <- NULL
for(i in 1:nrow(prop.df)){
high.presence.taxa.add <- if(max(prop.df[i,]) > min.proport) { print(rownames(prop.df)[i])}
high.presence.taxa <- c(high.presence.taxa, high.presence.taxa.add)
}
high.presence.taxa
# Select the rows of the color index for only those high.presence taxa
legend.info <- color.index.df[color.index.df$taxon %in% high.presence.taxa, ]
#### 5. Plot ####
# filename <- paste("06_output_figures/", datatype, "_read_count_and_prop_by_loc.pdf", sep = "")
#
# # if want to work interactively, comment out the following line
# pdf(file = filename, width = 10, height = 8)
par(mfrow=c(3,1), mar= c(2.5,4.5,2,1) + 0.2, mgp = c(3.75, 0.75, 0))
# Plot count data
position.info <- barplot(as.matrix(counts.df)
, col = this.palette, las = 2, xaxt = "n"
, xlim = c(0, ncol(prop.df)+4)
, ylab = "Reads")
# axis(side = 1, at = position.info, labels = sample.locations, las = 3, cex.axis = 0.9)
# Plot proportion data
position.info <- barplot(as.matrix(prop.df), col = this.palette
, xlim = c(0, ncol(prop.df)+4)
, las = 1
, ylab = "Proportion (%)"
, xaxt = "n")
axis(side = 1, at = position.info,
labels = site.names, las = 3
)
# Add information about read counts per sample
# mtext(x = position.info, text = sample.reads
# , side=3, at = position.info, cex = 0.7)
# Plot Legend, first make blank second plot
plot(1, type = "n", axes = F, xlab = "", ylab = "")
# fix legend info to character text
legend(x = "center", y = "center", legend = legend.info$taxon
, fill = as.character(legend.info$color)
#, cex = legend.cex[datatype]
, ncol = 4)
#dev.off()
#
# Save out as 10 x 8 in portrait
|
99d547512a64ce1c010565658a8b0a2cf9529ac8 | 2c834e1586b5902391350877d76138ceec31fa42 | /man/polbirth.Rd | 5df958b5ec20a720cfeed983ebf3d902f04f7159 | [] | no_license | cran/hmmm | 9b34c9a5c46d43e2135e361ea7bf70cfa65aab6b | 7689db4c402ac3dac0b15b90cbd9ed3198886fef | refs/heads/master | 2021-05-16T02:49:30.840122 | 2018-03-14T10:26:58 | 2018-03-14T10:26:58 | 17,696,667 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,065 | rd | polbirth.Rd | \name{polbirth}
\alias{polbirth}
\docType{data}
\title{political orientation and teenage birth control data}
\description{Data on political orientation and opinion on teenage birth control of a sample of 911 U.S. citizens.}
\usage{data(polbirth)}
\format{A data frame whose columns contain:
\describe{\item{\code{Politics}}{A factor with levels: \code{Extremely liberal}, \code{Liberal}, \code{Slightly liberal}, \code{Moderate},
\code{Slightly conservative}, \code{Conservative}, \code{Extremely conservative}}
\item{\code{Birthcontrol}}{A factor with levels: \code{Strongly agree}, \code{Agree}, \code{Disagree}, \code{Strongly disagree}}
\item{\code{Freq}}{A numeric vector of frequencies}}}
\details{This is a sub-data frame obtained by marginalizing the data frame `relpolbirth'
with respect to the variable religion.}
\source{General Social Survey, 1993.}
\references{Bergsma W, Croon M, Hagenaars JA (2009) Marginal Models for Dependent, Clustered, and Longitudinal Categorical Data. Springer.}
\examples{data(polbirth)}
\keyword{datasets}
|
9665305a23816aa6156846420fe7707064e6f2e3 | db25cd49f5c2d5c34bcb4852a3a1de86397698d9 | /Test_module/Irrigation_modules/Compute_irrigatioN_losses.R | 23a7c3737c082edead3598e65b57f9851b24cddf | [
"Apache-2.0"
] | permissive | shekharsg/MITERRA-PORTUGAL | 292d210b89572f207d0af5ff44f4415d52fd553b | 4785a2d5a0fe49f27fe0c80cd0936e457142311c | refs/heads/master | 2023-03-19T17:16:15.757153 | 2020-03-25T14:10:26 | 2020-03-25T14:10:26 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 435 | r | Compute_irrigatioN_losses.R | source('./Runoff_module/Function/Compute_irrigation_runoff.R')
source('./Runoff_module/Function/Compute_runoff_application.R')
print('Loading Irrigation runoff submodule')
year <- c(1999, 2009)
for (i in year)
{
#compute_irrigation_sys_inefficiency(i, TRUE) #computes irrigation N losses through inefficiencies
compute_runoff_irrig(i, T) #computes irrigation N losses through runoff || Writes data to irrigation N inefficiency
} |
99e1a55776ea9dc4260a2d3c086ea0ee3846b1d4 | f1d92e4a0a155ea83a46798e2130286b9492b5df | /man/ContentView.Rd | 37005a646329133c46667d34d8328c02db51cc64 | [
"MIT"
] | permissive | tynesjo/lookr | f7a1f7d5d3e19ea7f6358518972a37179fe59a45 | aeb899fb33465641ebaac8779ec3348fb9f72900 | refs/heads/master | 2020-08-06T13:29:12.761916 | 2019-10-05T12:09:42 | 2019-10-05T12:09:42 | 212,991,893 | 0 | 1 | MIT | 2019-10-05T12:01:01 | 2019-10-05T12:01:01 | null | UTF-8 | R | false | true | 658 | rd | ContentView.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ContentView.r
\docType{data}
\name{ContentView}
\alias{ContentView}
\title{ContentView Class}
\format{An object of class \code{R6ClassGenerator} of length 24.}
\usage{
ContentView
}
\description{
ContentView Class
}
\section{Fields}{
\describe{
\item{\code{id}}{}
\item{\code{look_id}}{}
\item{\code{dashboard_id}}{}
\item{\code{content_metadata_id}}{}
\item{\code{user_id}}{}
\item{\code{group_id}}{}
\item{\code{view_count}}{}
\item{\code{favorite_count}}{}
\item{\code{last_viewed_at}}{}
\item{\code{start_of_week_date}}{}
\item{\code{can}}{}
}}
\keyword{datasets}
|
6fd980b887051950eb3c6c7bb8d4dc76893b85e9 | 673adb50ef81fbd3f604e398dfe70f4c2170126d | /dataPrep/dataPrep.R | 9c849f1b81d655fec47c9c4d5c1144bc185feb7f | [] | no_license | leeleavitt/cellTypePredictor | d6f7fc2307bf7c767aa2362ed9f95c974be00f31 | 3b5b364596fffdf9bc927163bf0976120a138251 | refs/heads/master | 2023-01-28T03:15:47.130683 | 2020-12-04T18:37:04 | 2020-12-04T18:37:04 | 258,028,863 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,307 | r | dataPrep.R | ###############################################################
###############################################################
###############################################################
# find all files
fileLocation <- './rawData/multiClassData/'
# Only load from microscope 3
files <- list.files(fileLocation, '[mM]3')
files <- files
# load in all files, thank god for all my ram
if(length(ls(pattern = 'RD.')) < 1){
for(i in files){
print(i)
load( paste0(fileLocation,i) )
}
}
# Find all experiments in the workspace
rdExps <- ls(pattern ='RD[.]')
# Now search through each experiment to see if they have the required cell types
cellTypes <- c('L1', 'L2', 'L3', 'L4', 'L5', 'L6', 'G7', 'G8', 'G9', 'G10', 'R11', 'R12', 'R13', 'N14', 'N15', 'N16')
goodRdExps <- c()
for(i in 1:length(rdExps) ){
# get the data
tmpRD <- get(rdExps[i])
# Some experiments have spelled cell.types as cell_types
cellTypeName <- grep('^cell[._]types$', names(tmpRD))
# Are the cell types i've list above in this experiment?
# first ask the logic
correctCtLogic <- names(tmpRD[[ cellTypeName ]]) %in% cellTypes
# Does the cell type match the correct length?
correctCt <- names(tmpRD[[ cellTypeName ]])[correctCtLogic]
if(length(correctCt) != 0 & length(correctCt) == length(cellTypes)){
print(paste('good: ', length(correctCt)))
goodRdExps <- c(goodRdExps, rdExps[i])
}else{print(paste('bad: ', length(correctCt)))}
}
rdExps <- setdiff(goodRdExps, "RD.190612.38.f.M3.W1.ConROI_Y5yM13L")
# Looking at all images
for( i in 1:length(rdExps)){
rdName <- rdExps[i]
cat('\n', rdName)
tmpRD <- get( rdExps[i] )
cat('\nimage2 dim: ', dim(tmpRD$img2) )
cat('\nimage3 dim: ', dim(tmpRD$img3) )
cat('\nimage4 dim: ', dim(tmpRD$img4) )
cat('\nimage8 dim: ', dim(tmpRD$img8) )
}
# Now wee need to add these cells types to the binary collumn.
# We will make our lives easier and just call it cell_types
for( i in 1:length(rdExps)){
# Get the experiment
tmpRD <- get(rdExps[i])
# Some experiments have spelled cell.types as cell_types, get it right
cellTypeName <- grep('^cell[._]types$', names(tmpRD), value = TRUE)
# Grab only the cell types we are interested in
rdCellType <- tmpRD[[ cellTypeName ]][ cellTypes ]
# Add cell_types to the bin dataframe
tmpRD$bin[,'cell_types'] <- NA
# Itteratively update the binary data frame to contain the new labels.
for(j in 1:length(rdCellType)){
tmpRD$bin[rdCellType[[j]], 'cell_types'] <- j
}
assign(rdExps[i], tmpRD)
}
source("./R/imageExtractor.R")
imageExtractor(rdExps, 20, 'cell_types', 'img3', c(2) )
# # Image 1 is a total bright feild overlay
# imageExtractor(rdExps, 20, 'cell_types', 'img1', c(1,2,3) )
# # Image 2 is the gfp cy5 and sometime dapi
# imageExtractor(rdExps, 20, 'cell_types', 'img2', c(1,2,3) )
# # Image 3 is ib4 only/red label
# imageExtractor(rdExps, 20, 'cell_types', 'img3', c(2) )
# # Image 5 is the gfp only/green label
# imageExtractor(rdExps, 20, 'cell_types', 'img4', c(2) )
# Image 8 is the roi and the dapi
imageExtractor(rdExps, 20, 'drop', 'img8', c(1, 2, 3) )
# Traces are more complicated so just use this script
source("./R/traceExtractor.R")
|
30d1b9b643994ea05b1a491a530dd5dff41cdc05 | 2472ef56324f7f69b12a4a74eae53b9f3aa342ed | /run_analysis.R | b9d8986dacdd910aaefc77db8d3f7864539186ac | [] | no_license | dinataruni/CleaningData | 494295177d68e5bfbb17f34fb96e22313db7c412 | 1e2d30fdabb4276052c1afb61b9495865d34eae3 | refs/heads/master | 2021-01-16T01:09:45.991405 | 2014-11-23T21:53:32 | 2014-11-23T21:53:32 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,526 | r | run_analysis.R | ## Create one R script called run_analysis.R that does the following:
## 1. Merges the training and the test sets to create one data set.
## 2. Extracts only the measurements on the mean and standard deviation for each measurement.
## 3. Uses descriptive activity names to name the activities in the data set
## 4. Appropriately labels the data set with descriptive activity names.
## 5. Creates a second, independent tidy data set with the average of each variable for each activity and each subject.
library(dplyr)
library(reshape2)
print("Reading datasets")
# Read Training datasets
x_train <- read.table("./train/X_train.txt")
y_train <- read.table("./train/y_train.txt")
subject_train <- read.table("./train/subject_train.txt")
# Read Test datasets
x_test <- read.table("./test/X_test.txt")
y_test <- read.table("./test/y_test.txt")
subject_test <- read.table("./test/subject_test.txt")
# Read Activities
activities <- read.table("activity_labels.txt")
# Merging
x_merged <- rbind(x_train, x_test)
y_merged <- rbind(y_train, y_test)
sub_merged <- rbind(subject_train, subject_test)
# 3. Uses descriptive activity names to name the activities in the data set
print("Assigning activity labels")
merge_act <- inner_join(activities, y_merged)
# Read Features
features <- read.table("features.txt")
# 2. Extracts only the measurements on the mean and standard deviation for each measurement.
print("Extracting only variables regarding mean or standard deviation")
extract_features <- features[grep('mean|std|Mean', features$V2), ]
# Subset merged data to only show measurements on mean and standard deviation
print("Subsetting dataset based on extracted variables")
subset_meanstd <- x_merged[ ,c(extract_features$V1)]
# Assign useful colnames
colnames(subset_meanstd) <- extract_features$V2
# Merge Subjet & Activity and Merges the training and the test sets to create one data set.
print("Merging data")
data <- cbind(sub_merged, merge_act, subset_meanstd)
# Renaming columns
names(data)[1] <- "Subject"
names(data)[2] <- "Activity_ID"
names(data)[3] <- "Activity"
# Summarizing by Subject & Activity
print("Summarizing data")
data.long <- melt(data, id = c("Subject", "Activity"))
data.wide <- dcast(data.long, Subject + Activity ~ variable, mean)
# 5. Creates a second, independent tidy data set with the average of each variable for each activity and each subject.
print("Creating tidy data file")
tidy.data <- data.wide
write.table(tidy.data, "tidydata.txt", row.names = FALSE, quote = FALSE) |
9a11f541e5d988f8b67e841bf7e99032e371954c | 5c112972e0f41308d1c780cef8f7453e37af16c5 | /graphomax/R/baromax.R | 17146858e1b71776e6c29ffb9f43f5215f34ebc3 | [] | no_license | wswade2/Stat547 | f0a00e59206b81855340cf3ff51c483429a841b2 | 064a2054591f0028933b69ceff623e5cfe35b0ed | refs/heads/master | 2021-03-27T09:41:53.908705 | 2017-12-08T08:04:00 | 2017-12-08T08:04:00 | 110,039,539 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 875 | r | baromax.R | #' Welcome to the baromax function!
#'
#' This function is designed to plot 2x2 barplots with standard error bars using only
#' one line of code.
#'
#' To use it, simply specify the dataset, x, y, and fill
#' These types of plots are designed for cases where y is numeric and
#' both x and fill are categorical variables.
#'
#' Change the width of the error bars by specifying error_width = ___
baromax<- function(data, x, y, fill, error_width=.2){
ggplot(data) +
aes_string(x, y, fill = fill) +
stat_summary(fun.y=mean, geom = "bar") +
stat_summary(fun.data = mean_cl_normal,
geom="errorbar",
width=error_width,
position=position_dodge(width=.9)) +
theme_bw() +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank())
}
# Example:
#baromax(data = iris,x = "Species", y = "Sepal.Width", fill = "Species")
|
83789c1dd5154f54c888f5c02485af57ea726a2e | 248addf19dad4feba67bea460e48546d1858909c | /plot1.R | bc310862e33700f59d215906b4bc456b9b998ac8 | [] | no_license | uzma35/ExData_Plotting1 | 591c7ce8525ca2e5a3227091c7b28cd03834dda7 | b888506410a00122e7d993ec803b45fa65efd9ed | refs/heads/master | 2022-11-17T23:49:12.171005 | 2020-07-06T17:29:14 | 2020-07-06T17:29:14 | 277,532,080 | 0 | 0 | null | 2020-07-06T12:10:58 | 2020-07-06T12:10:57 | null | UTF-8 | R | false | false | 816 | r | plot1.R | ##loading the electricity consumption dataset
power_con <- read.table("household_power_consumption.txt",skip=1,sep=";")
##naming the dataset
names(power_con) <- c("Date","Time","Global_active_power","Global_reactive_power","Voltage","Global_intensity","Sub_metering_1","Sub_metering_2","Sub_metering_3")
##subsetting the dataset for desired dates
subpower_con <- subset(power_con,power_con$Date=="1/2/2007" | power_con$Date =="2/2/2007")
##basic histogram plot function
hist(as.numeric(as.character(subpower_con$Global_active_power)),col="red",main="Global Active Power",xlab="Global Active Power(kilowatts)")
##annotating graph
title(main="Global Active Power")
##saving output
dev.copy(png, file="plot1.png", width=480, height=480)
dev.off()
cat("saving Plot1.png in", getwd()) |
1a447b8d82c5268fe0e7306da342d4646fda3f0d | 99d4b4863a3e855df7bd974cf01e021191c9acda | /R/02_benthic_terrain_modeling.R | b30b72e1223cab0e675b11c58d0d7540b45de49d | [
"MIT"
] | permissive | dmarch/ub-rworkshop2021 | 04e406e343fe7341ab7aaca6913c4c06f8a51d94 | 6c6fd86c072804ca6e6768346f6d6a6b697367ab | refs/heads/main | 2023-04-21T13:25:08.508135 | 2021-04-30T07:44:35 | 2021-04-30T07:44:35 | 361,194,429 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,925 | r | 02_benthic_terrain_modeling.R | ################################################################################
#
# Title: Benthic Terrain Modeling
# Course: Using R to work with marine spatial data
#
# Author: David March
# Email: dmarch@ub.edu
# Last revision: 2021/04/29
#
# Keywords: R, marine, data, GIS, map, raster, bathymetry, terrain
#
################################################################################
# load libraries
library(raster)
library(leaflet)
library(rgdal)
library(RColorBrewer)
library(rasterVis)
library(rgl)
library(ggplot2)
library(tidyr)
#----------------------------------------------
# Part 1: Import raster data
#----------------------------------------------
# Import bathymetry
# see download-emodnet.R for details on how to download this data
bat <- raster("data/emodnet-mean-westmed.tif")
# Inspect raster data
class(bat)
bat
hist(bat)
# Plot raster data
# base
plot(bat)
# rasterVis
rasterVis::levelplot(
bat,
margin = TRUE, contour = T, main = "Bathymetry (m) - EMODnet",
par.settings = rasterVis::rasterTheme(region = brewer.pal("Blues", n = 9))
)
# 3d plot
myPal <- colorRampPalette(brewer.pal(9, 'Blues'), alpha=TRUE) # palette
plot3D(bat*(-1), col=myPal, rev=TRUE, specular="black") # plot 3d with rgl
#----------------------------------------------
# Part 2: Bathymetric Terrain Modeling
#----------------------------------------------
# Calculate terrain characteristic from bathymetry
slope <- terrain(bat, opt=c("slope"), unit='degrees')
# Exercise:
# 1. Inspect derived data
# 2. Explore other metrics that can be derived from terrain()
#----------------------------------------------
# Part 3: Manipulate raster data
#----------------------------------------------
# transform 0 to NA
bat[bat == 0] <- NA
# resample to a coarser resolution (0.042 x 0.042 degrees)
bathy_ag <- aggregate(bat, fact = 20, fun = mean)
# prepare raster to calculate distance
bathy_d <- bathy_ag
bathy_d[is.na(bathy_d[])] <- 10000
bathy_d[bathy_d < 10000] <- NA
# distance
dist2coast <- distance(bathy_d) # calculate distance
dist2coast <- dist2coast / 1000 # convert to km
dist2coast[dist2coast == 0] <- NA # set 0 values to NA
plot(dist2coast)
#---------------------------------------------------------------
# Part 4: Distance metrics: distance to colony
#---------------------------------------------------------------
library(gdistance)
# Colony location
CalaMorell <- c(3.86877, 40.055872)
# create ocean mask using the bathymetry
mask <- bat/bat
# change to a coarser resolution
mask_ag <- aggregate(mask, fact = 10)
# create surface
tr1 <- transition(mask_ag, transitionFunction=mean, directions=16)
tr1C <- geoCorrection(tr1)
# calculate distance to colony
dist2col <- accCost(tr1C, CalaMorell)
dist2col[is.infinite(dist2col)] <- NA
plot(dist2col)
#----------------------------------------------
# Part 5: Export raster data
#----------------------------------------------
# create output directory
out_dir <- "output"
if (!dir.exists(out_dir)) dir.create(out_dir, recursive = TRUE)
# export your data in multiple formats
writeRaster(slope, filename="output/slope.grd", overwrite=TRUE) # save binary file for slope
writeRaster(slope, filename="output/slope.tif", overwrite=TRUE) # save binary file for slope
writeRaster(slope, filename="output/slope.nc", overwrite=TRUE) # save binary file for slope
KML(bat, "output/bat.kml", col = myPal(100), overwrite = TRUE) # save KML file for bathymetry
#----------------------------------------------
# Part 6: Extract raster data for animal track
#----------------------------------------------
# import tracking data
data <- read.csv("data/L1_locations_6002105.csv")
head(data)
# Use extract function
data$depth <- raster::extract(bat, cbind(data$lon, data$lat))
head(data$depth)
summary(data$depth)
hist(data$depth)
# Exercise:
# - Extract slope and other metrics
# - Plot time series of extracted variables
|
98ed92b68f6f1caa0dc5190b78a4b12720778d3a | b1576dfa164bc357c2aea44a625d2a88327cc209 | /cachematrix.R | 0d91a742963e0ae94603d5e45e3e2abf65f066bb | [] | no_license | jyork13/ProgrammingAssignment2 | c88f0421847c270a0c05620164d267e0dda5034a | 22c61bd551e40f5f71fa7b0354b6dba8164d3ad1 | refs/heads/master | 2020-12-11T09:30:07.209219 | 2015-07-25T11:47:34 | 2015-07-25T11:47:34 | 39,348,401 | 0 | 0 | null | 2015-07-19T20:47:03 | 2015-07-19T20:47:02 | null | UTF-8 | R | false | false | 2,683 | r | cachematrix.R | # Programming Assignment 2: Lexical Scoping
# by J. York
#
# R Programming tought by Johns Hopkins on Coursera
#
#
# The purpose of these functions is to create a special matrix object to return
# a matrix inverse either by calulating the inverse or returning a previously
# calculated cached matrix inverse.
#
# Usage example:
#
# # Create a square matrix
# B = matrix(c(1,2,3,4),2,2)
#
# # Create the special "matrix"
# b = makeCacheMatrix(B)
#
# # Invoke the get function to return the matrix
# b$get()
#
# # Try to get the cached inverse (Just to show it doesn't exist yet)
# b$getInv()
#
# # Calculate the inverse matrix (first time run no cache so its calculated)
# cacheSolve(b)
#
# # Calculate the inverse matrix (second time run it's retrieved from cache)
# cacheSolve(b)
#
# # set b to a new matrix with the set function (clears cache)
# b$set(matrix(c(4,5,6,7),2,2))
#
# makeCacheMatrix: This function creates a special "matrix" object that can
# cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
# clear any previous cache
inverse_matrix <- NULL
# creates a new matrix copy and clears cached inverse matrix
set <- function(y) {
x <<- y
inverse_matrix <<- NULL
}
# returns the original matrix passed in
get <- function() x
# calculates and caches the inverse of matrix x
setInv <- function(solve) inverse_matrix <<- solve
# gets the inverse
getInv <- function() inverse_matrix
# returns a list of the 4 functions
list(set = set, get = get,
setInv = setInv,
getInv = getInv)
}
# cacheSolve: This function computes the inverse of the
# special "matrix" returned by makeCacheMatrix above. If the inverse has already
# been calculated (and the matrix has not changed), then the cachesolve should
# retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
# Get the cached inverse matrix
inverse_matrix <- x$getInv()
cache_exists <- !is.null(inverse_matrix)
if(cache_exists) {
message("getting cached inverse matrix")
return(inverse_matrix)
}
# Cache does not exists, so calculate inverse
data <- x$get()
inverse_matrix <- solve(data, ...)
# Use the setInv to store the new inverse in cache and return inverse
x$setInv(inverse_matrix)
inverse_matrix
}
|
1b624c50983b852d9ebe8dbced749c67f3c6d62b | 5a10a60157e344bcdea3b2a5af1ef150cd5864e3 | /R/cv_method_single_MC.R | 68cd7935f086caad498cb160822c9be135db260d | [] | no_license | cran/HDDesign | 7d7044a1d3ae5f2e87bb2d24cbf2811be147cc29 | 5ea3d93332514d337fcbf151be677e19072e6b05 | refs/heads/master | 2020-04-02T04:11:11.704074 | 2016-06-11T09:37:25 | 2016-06-11T09:37:25 | 60,896,538 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,793 | r | cv_method_single_MC.R | cv_method_single_MC <-
function(mu0, p, m, n, alpha_list, p1, ss, ntest, sampling.p=0.5)
{
data=gen_data(mu0, p, m, n, p1,sampling.p)
d1=subset(data$x, data$y==1)
d2=subset(data$x, data$y==-1)
n1=nrow(d1)
n2=nrow(d2)
cv_data=data
# 10 fold CV
K=10 #10 fold CV unless one group has less than 10, then smaller fold:
if(min(n1,n2)<10) K=min(n1,n2)
cv_ids1= sample(rep(1:K, ceiling(n1/K))[1:n1], n1, replace=F)
cv_ids2= sample(rep(1:K, ceiling(n2/K))[1:n2], n2, replace=F)
cv_data$cv_idx=c( cv_ids1, cv_ids2)
cv_pcc=sapply(alpha_list, get_cv_pcc, cv_data, p1)
pvalue_threshold=alpha_list[which.max(cv_pcc)]
mean1=apply(d1,2,mean)
mean2=apply(d2,2,mean)
xsq1=apply(d1^2,2,sum)
xsq2=apply(d2^2,2,sum)
s1= xsq1-n1*mean1^2
s2= xsq2-n2*mean2^2
pool.sd= sqrt((s1+s2)/(n-2))
pool.var= (s1+s2)/(n-2)
zscore= (mean1-mean2)/( pool.sd*sqrt(1/n1+1/n2))
pvalue=2*pt(abs(zscore),df=n-2, lower.tail=F)
weight=get_weight_zp(zscore, pvalue, pvalue_threshold)
u=sum(weight[1:m])*mu0
v=sum(abs(weight))
testdata=gen_data(mu0, p, m, ntest, p1, sampling.p=0.5)
if (all(weight==0))
{
pred=rbinom(length(testdata$y), size=1, prob=p1)
}
else
{
mu0hat= dot( abs(weight), abs((mean1-mean2))/2) / (dot(weight, weight))
av.pool.var=mean( pool.var)
cl_cutoff= (1/2) * log((1-p1)/p1) * (av.pool.var/mu0hat)
pred=as.numeric((testdata$x %*% weight)>cl_cutoff)
}
# convert (0,1) to (-1, 1)
pred=pred*2-1
if (ss==F)
c(mean(pred==testdata$y) )
else
n1test=ntest*sampling.p
c(mean(pred==testdata$y), mean(pred[1:n1test]==testdata$y[1:n1test]), mean(pred[(n1test+1):ntest]==testdata$y[(n1test+1):ntest]) )
}
|
fba643144b888538b63869a97056d417618586f1 | 9ff617cb1f08e7f6df663c1c8e95bc857d92c1b9 | /boxPlotMaker.R | fb67792c6665e5c18a9d943692cf05e82c8837cd | [] | no_license | InchanKim/GroupProject | f44781a9ef0bcc3930dfcef621b64394f9aa4ac2 | 1c543ce0a13a0b10c268088f0fc1d9ba97245380 | refs/heads/master | 2021-04-26T22:45:44.162229 | 2018-04-27T19:34:09 | 2018-04-27T19:34:09 | 124,142,628 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,428 | r | boxPlotMaker.R | source("salaryBrackets.R")
mx <- 150000
college <- tbl(conStudent, "college")
graduate <- tbl(conStudent, 'graduation')
big <- left_join(graduationTitle, title, by = "idTitle") %>%
right_join(graduate, by = "graduationId") %>%
left_join(college, by = "idCollege") %>%
select(graduationId, majorName, collegeName) %>%
collect() %>%
right_join(yy, by = 'graduationId') %>%
filter(between(salary, mn, mx)) %>%
filter(!is.na(majorName)) %>%
mutate(majorName = case_when(
majorName == "NA" ~ "Interdisciplinary Studies",
TRUE ~ majorName
)) %>%
mutate(collegeName = case_when(
majorName == "Biological Sciences" ~ "Science",
majorName == "Spanish" ~ "Humanities & Social Sciences",
majorName == "Mass Communication" ~ "Mass Communication",
majorName == "Mathematics" ~ "Science",
majorName == "Economics" ~ "Business",
TRUE ~ collegeName
))
sub <- split(big, big$collegeName)
subPlots <- sub %>% map(~ ggplot(.) + geom_boxplot(aes(x = majorName, y = salary)) +
ggtitle(label = "Salaries by Major") +
theme(axis.text.x = element_text(angle = 45, hjust = 1)) +
scale_x_discrete(name = "Major") +
scale_y_continuous(name = "Salary", labels = scales::dollar))
subFiles <- paste0("plots/box", names(sub) %>% str_remove_all("[ &]"), ".png")
#map2(subFiles, subPlots, ggsave)
|
7cb15c341f73a1f6e697c33f087d340aaeb1585c | 2ff89b6657b355de575885208d7d6b37b697b7bf | /examples/ex_optparse.R | 88f7b1fc3a216e2dd1811ab7640f7e61bb026ce3 | [] | no_license | dmattek/training-MIC20200319 | e2abe7db4fd61635dd0b5650331011bd1253abf7 | a0259c724faef9e5e43633983744d1d4816703f6 | refs/heads/master | 2021-04-08T16:12:32.975347 | 2020-11-05T08:35:45 | 2020-11-05T08:35:45 | 248,788,840 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,113 | r | ex_optparse.R | #!/usr/bin/env Rscript
## Load libraries ----
library(optparse)
## Process command-line parameters ----
option_list = list(
optparse::make_option(c("-r", "--rootdir"),
type="character",
default=NULL,
help="full path to root directory",
metavar="character"),
optparse::make_option(c("-d", "--debug"),
action="store_true",
default = FALSE,
dest = 'debug',
help="Print extra output")
);
opt_parser = optparse::OptionParser(option_list=option_list);
opt = optparse::parse_args(opt_parser)
if (is.null(opt$rootdir)){
optparse::print_help(opt_parser)
stop("Please supply a full path to root directory with data to analyse.\n", call.=FALSE)
}
## Assign global params ----
lPar = list()
lPar$dir.root = opt$rootdir
cat(sprintf('Working in:\n%s\n\n', lPar$dir.root))
if (opt$debug)
cat('\nEntering debug mode. More output...\n\n')
## Analyse data ----
if (opt$debug)
cat('\nAnalysis finished...\n\n') |
be218945a0989751dbe37652d0e4d324a5b92885 | 006698f9ad3934b41425cdff0b5da377b6adbbb0 | /R/convert.R | 30822b791e8e8ea8e935a7323dc827e583138172 | [] | no_license | nagyistge/imageanalysisBrain | 4878d7c3663cb4bd51ce1f754ac02b96280215f3 | 182a1ce3e2ab6e5312b69ee2528ca4a29aaca5d8 | refs/heads/master | 2021-01-21T06:27:05.669999 | 2017-02-02T18:19:29 | 2017-02-02T18:19:29 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,178 | r | convert.R | #' @title Convert ENC to DEC
#'
#' @description
#' Converts encoded coordinates to x,y,z values
#'
#' @param values imaging values
#' @param enc value, containing x,y,z, values
#'
#' @return data.frame with the decoded coordiantes
#'
#' @export
#'
#' @examples
#' enc<-c(51512331,51512254,51513355)
#' val<-c(342,221,123)
#' options("BITSIZE"=10)
#' enc2dec(val,enc)
enc2dec <- function(values, enc) {
df <- data.frame(x=decX(enc),
y=decY(enc),
z=decZ(enc),
val=values,
coord=enc)
return(df)
}
#' @title Convert DEC to ENC
#'
#' @description
#' Convert (x,y,z) coordinates to encoded coordinated
#'
#' @param data data.frame for volume (x,y,z, val)
#' @param index colum with values
#'
#' @return data.frame containing values and encoded coordinates
#'
#' @export
#'
#' @examples
#' data <- data.frame(
#' x=c(60,61,62),
#' y=c(40,40,40),
#' z=c(1,1,1),
#' val=c(234,112,45)
#' )
#' options("BITSIZE"=10)
#' dec2enc(data)
dec2enc <- function(data, index=4) {
df <- data.frame(val=data[,index],
COORD=enc(data$x, data$y, data$z))
return(df)
}
|
162fb56ef9b7a071aeaa3ce509e99310d07cd826 | 92c12cc1813db469bd25cb165ea7516344697ba5 | /scripts/classifiers/kmeans/KMeans.r | 62f0d1ae2979fd9927291d9d047eceb95b908337 | [
"MIT"
] | permissive | amanparmar17/Advance-Sentiment-Analysis | 1b6716622bf56fb9b7cd6140a7d699eae18d8ae1 | 7ce463644c1fd5b76e00299840ded49502702291 | refs/heads/master | 2020-05-07T14:05:35.925528 | 2019-04-29T06:46:17 | 2019-04-29T06:46:17 | 180,577,769 | 0 | 0 | MIT | 2019-04-29T06:46:18 | 2019-04-10T12:33:13 | R | UTF-8 | R | false | false | 2,045 | r | KMeans.r | #importing the dataset
setwd("git_repos/Advance-Sentiment-Analysis/scripts/classifiers/kmeans/")
dataset <- read.csv("sentiment.csv")
#selecting the columns of concern from the dataset in another variable
# %>% is called the pipe symbol
library(magrittr) # need to run every time you start R and want to use %>%
library(dplyr) # alternative, this also loads %>%
preprocessed_data <- dataset %>%
select(AS = airline_sentiment, ASC = airline_sentiment_confidence, NRC = negativereason_confidence)
#substituting null values with the mean values of the column
MeanNRC<-mean(preprocessed_data$NRC, na.rm=TRUE)
preprocessed_data$NRC[which(is.na(preprocessed_data$NRC))] <- MeanNRC
#creating a CSV file to store the columns of concern
write.csv(preprocessed_data,"preprocessed_dataset.csv",row.names = F)
#importing the refined dataset
KMeans<-read.csv("preprocessed_dataset.csv")
KMeans_2<-KMeans[-1]
KMeans_3<-as.data.frame(scale(KMeans_2))
KMeans_3
#finding the mean and standard deviation
sapply(KMeans_2,mean)
sapply(KMeans_2,sd)
sapply(KMeans_3,mean)
sapply(KMeans_3,sd)
#applying KMeans to the datasets
# NbClust package provides 30 indices for determining the number of clusters and proposes to user
# the best clustering scheme from the different results obtained by varying all combinations of number of
# clusters, distance measures, and clustering methods.
library(NbClust)
wssplot <- function(data, nc=15, seed=1234){
wss <- (nrow(data)-1)*sum(apply(data,2,var))
for (i in 2:nc){
set.seed(seed)
wss[i] <- sum(kmeans(data, centers=i)$withinss)}
plot(1:nc, wss, type="b", xlab="Number of Clusters",
ylab="Within groups sum of squares")}
# nc <- NbClust(KMeans_3, min.nc=2, max.nc=15, method="kmeans")
# print(table(nc$Best.n[1,]))
#plotting the graph with the clusters
wssplot(KMeans_3,nc=30,seed=1234)
#visualising the results
# nc=6 as using the elbow method the appropriate number of clusters should be 6
base_kmeans<-kmeans(KMeans_3,6)
base_kmeans
base_kmeans$centers
base_kmeans$size |
c0f0f4ec7dad29271524a3c8a579797edb1ab989 | 3c452b0b285add7e8d2bc6627258cd410ecb53fa | /man/julia_eval.Rd | 9b2baed900f39ca51c492f5d4d8169dec9ba9451 | [
"MIT"
] | permissive | Non-Contradiction/JuliaCall | 295ff96fb5e6f1dbf9e79ad10871572a7ff8e1d1 | c05473bea78a0197c639f7e82ab1c6f2e943e1cc | refs/heads/master | 2023-07-22T02:56:15.201964 | 2023-07-13T16:17:34 | 2023-07-13T16:17:34 | 99,278,989 | 267 | 40 | NOASSERTION | 2023-06-14T18:43:16 | 2017-08-03T22:08:52 | R | UTF-8 | R | false | true | 1,198 | rd | julia_eval.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/interface.R
\name{julia_eval}
\alias{julia_eval}
\title{Evaluate string commands in julia and get the result back in R.}
\usage{
julia_eval(cmd, need_return = c("R", "Julia"))
}
\arguments{
\item{cmd}{the command string you want to evaluate in julia.}
\item{need_return}{whether you want julia to return value as an R object or
a wrapper for julia object.}
}
\value{
the R object automatically converted from julia object.
}
\description{
\code{julia_eval} evaluates string commands in julia and
returns the result to R.
The returning julia object will be automatically converted
to an R object or a JuliaObject wrapper,
see the documentation of the argument `need_return` for more details.
`julia_eval` will not invoke julia display system.
If you don't need the returning result in R or
you want to invoke the julia display system, you can
use \code{julia_command}.
}
\examples{
if (identical(Sys.getenv("AUTO_JULIA_INSTALL"), "true")) { ## julia_setup is quite time consuming
## doing initialization and automatic installation of Julia if necessary
julia_setup(installJulia = TRUE)
julia_eval("sqrt(2)")
}
}
|
2cc5db8202d7dcb245cdeac197d28525a2084f40 | 3c127b6565e3a6621c30e0a633246f227c587d57 | /Naive Bayes.R | ff335bd41a67e83ec550e3819db2a721b5e0f058 | [] | no_license | vikramjaisingh01/Student-Grade-Prediction-1 | 6576f7bf1a9d8636935d106258ad9d334326a2e7 | 2c95d2cda43d38df1098016820dc842460620cd0 | refs/heads/master | 2020-04-04T19:03:00.394023 | 2018-11-05T09:11:48 | 2018-11-05T09:11:48 | 156,189,726 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 3,488 | r | Naive Bayes.R | # Data preprocessing
dataset = read.csv('student-mat-logit.csv')
# no null variables present in the dataset
# Encoding categorical data
dataset$school = factor(dataset$school,
levels = c('GP', 'MS'),
labels = c(1,2) )
dataset$sex = factor(dataset$sex,
levels= c('M', 'F'),
labels= c('1', '2') )
dataset$address = factor(dataset$address,
levels = c('U', 'R'),
labels = c(1,2) )
dataset$famsize = factor(dataset$famsize,
levels= c('LE3', 'GT3'),
labels= c('1', '2') )
dataset$Pstatus = factor(dataset$Pstatus,
levels= c('A', 'T'),
labels= c('1', '2') )
dataset$Mjob = factor(dataset$Mjob,
levels= c('at_home', 'health', 'other', 'services', 'teacher'),
labels= c('1', '2', '3', '4', '5') )
dataset$Fjob = factor(dataset$Fjob,
levels= c('at_home', 'health', 'other', 'services', 'teacher'),
labels= c('1', '2', '3', '4', '5') )
dataset$reason = factor(dataset$reason,
levels= c('course', 'home','other','reputation'),
labels= c('1', '2','3','4') )
dataset$guardian = factor(dataset$guardian,
levels= c('father', 'mother','other'),
labels= c('1', '2','3') )
dataset$schoolsup = factor(dataset$schoolsup,
levels= c('yes', 'no'),
labels= c('1', '0') )
dataset$famsup = factor(dataset$famsup,
levels = c('yes', 'no'),
labels = c('1', '0') )
dataset$paid = factor(dataset$paid,
levels= c('yes', 'no'),
labels= c('1', '0') )
dataset$activities = factor(dataset$activities,
levels= c('yes', 'no'),
labels= c('1', '0') )
dataset$nursery = factor(dataset$nursery,
levels= c('yes', 'no'),
labels= c('1', '0') )
dataset$higher = factor(dataset$higher,
levels= c('yes', 'no'),
labels= c('1', '0') )
dataset$internet = factor(dataset$internet,
levels= c('yes', 'no'),
labels= c('1', '0') )
dataset$romantic = factor(dataset$romantic,
levels= c('yes', 'no'),
labels= c('1', '0') )
#Encoding the target feature as factor
dataset$Pass.Fail = factor(dataset$Pass.Fail, levels = c(0,1))
#Splitting into training set and test set
#install.packages('caTools')
library(caTools)
set.seed(123)
split = sample.split(dataset$Pass.Fail, SplitRatio = 0.8)
training_set = subset(dataset, split == TRUE)
test_set = subset(dataset, split == FALSE)
#Fitting Naive Bayes to the training set
library(e1071)
classifier = naiveBayes(x = training_set[,c(2, 3, 15, 16, 25, 26, 29, 30)],
y = training_set$Pass.Fail)
y_pred = predict(classifier, newdata = test_set[,c(2, 3, 15, 16, 25, 26, 29, 30)])
y_pred_nb = performance(y_pred)
cm = table(test_set[,31], y_pred)
install.packages("pROC")
plot(roc(y_pred))
install.packages("ROCR")
|
b0d2607694848db87b3af77965062e743d2ce9dd | 6ef7ef07957ac0416dad1ec5e46994e25f47b283 | /R/meltMyTS.R | af68b277ca34551fe94c44d2e34c79ea065b8c05 | [] | no_license | jgabry/QMSS_package | a12cee9913f948df14ec898fab81c85e6980cbef | 58e49183a6066581c2dceacb9eebeced15c20292 | refs/heads/master | 2021-01-19T01:51:35.113047 | 2016-06-06T20:30:33 | 2016-06-06T20:30:33 | 22,050,285 | 7 | 4 | null | null | null | null | UTF-8 | R | false | false | 1,696 | r | meltMyTS.R | #' Use \pkg{reshape2}'s \code{melt} on multivariate time series object
#'
#' \code{meltMyTS} is primarily designed to prepare data to be used with
#' \code{\link[QMSS]{ggMyTS}} (\pkg{QMSS}) to make plots of time series trends
#' with \code{\link[ggplot2]{ggplot}}.
#'
#' @param mv.ts.object A multivariate time series object created with \code{\link[stats]{ts}}.
#' @param time.var A character string naming the time variable.
#' @param keep.vars An optional character vector with names of variables to keep.
#' If \code{keep.vars} is not specified then all variables in \code{mv.ts.object}
#' will be kept. However, if any variables are named then all other variables will
#' be dropped, except \code{time.var}, which is always kept.
#' @return A molten data frame.
#' @author Jonah Gabry <jsg2201@@columbia.edu>. See \code{\link[reshape2]{melt}}
#' in (\pkg{reshape2}) for the author of the original \code{melt} function.
#' @seealso \code{\link[QMSS]{ggMyTS}}, \code{\link[reshape2]{melt.data.frame}}
#' @export
#' @examples
#' See examples in documentation for ggMyTS.
meltMyTS <- function(mv.ts.object, time.var, keep.vars){
# mv.ts.object = a multivariate ts object
# keep.vars = character vector with names of variables to keep
# time.var = character string naming the time variable
require(reshape2)
if(missing(keep.vars)) {
melt.dat <- data.frame(mv.ts.object)
}
else {
if (!(time.var %in% keep.vars)){
keep.vars <- c(keep.vars, time.var)
}
melt.dat <- data.frame(mv.ts.object)[, keep.vars]
}
melt.dat <- melt(melt.dat, id.vars = time.var)
colnames(melt.dat)[which(colnames(melt.dat) == time.var)] <- "time"
return(melt.dat)
}
|
94798f49098cb54388a1ac563d4d3a53a21350e1 | 0cfe694476706c53390e4a80bf8429aaaed5683c | /accessory_code/process_new_CEs/1_process_new_CEs_extend_rasters.r | c29c7d5b6739ef4ea7cfd466fdf63eb7d9161560 | [] | no_license | brasilbrasil/arcpyVAtool | 3a306a925a57b1de12b51037816d76e7122eb52c | 777bf97855e24b109e2afea7571d0c9f444d5433 | refs/heads/master | 2023-05-15T10:08:15.513913 | 2016-04-22T23:57:57 | 2016-04-22T23:57:57 | 56,891,554 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,237 | r | 1_process_new_CEs_extend_rasters.r | rm(list = ls()) #remove all past worksheet variables
library(raster)
source_dir="Y:/PICCC_data/VA data/DD A1B HRCM v2 CEs/range maps bin/" #DD A1B HRCM v2 CEs // SD RCP45 CEs // SD RCP85 CEs
wd="Y:/PICCC_data/VA data/DD A1B HRCM v2 CEs/range maps bin extended/"
dir.create(wd, showWarnings = F)
setwd(wd)
cpucores=20
tif_file_nms=list.files(source_dir, pattern="\\.tif$", recursive=T)
#extend rasters
extend_fx=function(tif_file_nm, source_dir.=source_dir, wd.=wd){
tif_file=raster(paste0(source_dir., tif_file_nm))
e=extent(-159.8003, -154.8053, 18.88725, 22.25325)
tif_file=extend(tif_file, e, value=NA)
dir.create(dirname(tif_file_nm), showWarnings = F)
writeRaster(tif_file, paste0(wd., tif_file_nm), format="GTiff", overwrite=TRUE, datatype='INT1U')
}
require(snowfall)
if (is.null(cpucores)){
cpucores=as.integer(Sys.getenv('NUMBER_OF_PROCESSORS'))
}else{
cpucores=min(cpucores, as.integer(Sys.getenv('NUMBER_OF_PROCESSORS')))
}
sfInit( parallel=T, cpus=cpucores) #
sfExportAll()
sfLibrary(raster)
sfLapply(tif_file_nms,fun=extend_fx)
#system.time(sfClusterApplyLB(iter_strings,fun=sp_parallel_run)) #why not alway us LB? Reisensburg2009_TutParallelComputing_Knaus_Porzelius.pdf
sfRemoveAll()
sfStop()
|
1ed2e23121bf1c964a4a1d51c68718a4b5a71da5 | e0548caf7bd8153f8d991b7d7c1bed487402f0bc | /semestr-4/SWD/kol1/zad2.R | 339abace6e62faa9a55faf2e4c9a41e8311767d6 | [] | no_license | Ch3shireDev/WIT-Zajecia | 58d9ca03617ba07bd25ce439aeeca79533f0bcb6 | 3cd4f7dea6abdf7126c44a1d856ca5b6002813ca | refs/heads/master | 2023-09-01T11:32:12.636305 | 2023-08-28T16:48:03 | 2023-08-28T16:48:03 | 224,985,239 | 19 | 24 | null | 2023-07-02T20:54:18 | 2019-11-30T08:57:27 | C | UTF-8 | R | false | false | 487 | r | zad2.R |
data = c(43, 56, 49, 41, 32, 42, 45, 56, 46, 38, 37, 32, 41, 53, 38, 44, 51, 45, 42, 40, 42, 44, 47, 35, 41, 36, 26, 36, 56, 36)
sigma = 7
# wiemy ze czas instalacji ma rozkład normalny, znamy również odchylenie standardowe, zatem możemy skorzystać z Modelu 1 z estymacji parametrycznej
# tj. z funkcji z.test. Domyślny parametr dla z.test to właśnie 95% przedziału ufności
z.test(data, stdev=sigma)
#95 percent confidence interval:
#39.82846 44.83821
# Wynik: 12 punktów |
84de8a95e5ab1b7ae2c4c9353f8c61f78d5b22f1 | acf7af720e1acb217bd4da13e1007ee94db0bfab | /scripts/plot_correlations.R | 43f6f36ecd463d467e50f5d0dff0e950e38c8831 | [] | no_license | venkoyoung/Network | 4184d2ec8f12fa8536b9b2fa2ba2ac0e5a2863eb | 6952d0aec7094a1539c4f7d77cef406ec4dbf09a | refs/heads/master | 2020-05-15T02:22:23.743714 | 2019-03-29T11:34:44 | 2019-03-29T11:34:44 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,382 | r | plot_correlations.R | #finalMatrixCOmparison
library(data.table)
library(stringr)
forPlot<-dfOrigEvents
#row.names(forPlot)<-rownames(dfOrigEvents)
row.names(forPlot)<-1:31
colnames(forPlot)<-colnames(dfOrigEvents)
length(which(is.na(forPlot)))#681
forPlot[is.na(forPlot)] <-4
forPlot[1:5,1:9]
#SD controles
forPlotMelted<-melt(forPlot)
head(forPlotMelted)
table(forPlotMelted$value)
#xlabels
eventName<-row.names(dfOrigEvents)
head(dfOrigEvents)
head(forPlotMelted)
table(forPlotMelted$value)
####################################
#pdf('withNs_hor_large.pdf', width = 80, height = 10)
pdf("withNs_hor_A4.pdf", width = 11.69, height = 8.27)
ggplot(forPlotMelted,
aes(x = Var2, y = Var1, fill = factor(value))) +
theme_minimal() +
geom_tile(position = "identity",
color = "grey") +
scale_y_continuous(breaks=c(1:31), labels=eventName,
limits=c(0,32) , expand = c(0,-0.5)) +
scale_x_discrete(position = "top") +
scale_fill_manual(values=c("white","blue","green","pink","red"))+
theme(
axis.text.x = element_text(angle = 90, hjust=0, vjust=-5,size=3),
axis.ticks.length=unit(0,"cm"),
plot.title = element_blank(),
axis.title.x = element_blank(),
axis.title.y = element_blank(),
panel.border=element_blank(),
legend.position="none",
axis.line=element_blank()
)
dev.off()
|
6aabaedd0d794dc483d7b4df2eff8cd9535e89d2 | 7038a3930711d5dcc106df3ccd934824ab3a28c1 | /미래신호/미래신호_보건복지정책 미래신호 예측.R | 48ada381b222a8114766645f8caa01942045d38d | [] | no_license | jaehyunHa/hello | 1c4d6ca65c1d42c3d8426956603219fcce6b2fee | 9c4e458ded4eee14eb9e03cdbaedf043b635549c | refs/heads/master | 2020-05-16T04:17:51.765758 | 2019-04-22T12:36:30 | 2019-04-22T12:36:30 | 182,770,818 | 0 | 0 | null | null | null | null | UHC | R | false | false | 10,835 | r | 미래신호_보건복지정책 미래신호 예측.R | ## 5.보건복지정책 감정 변화 그래프(본 내용 사용)
#(col(1-6) 검정, 빨강, 초록, 파랑, 연파랑, 보라, 노랑, 회색)
rm(list=ls())
policy=read.csv("정책감정_그래프.csv",sep=",",stringsAsFactors=F)
a=policy$반대
b=policy$찬성
plot(a,xlab="",ylab="",ylim=c(0,120),type="o",axes=FALSE,ann=F,col=1) #반대 그래프
title(main="2016년 보건복지 정책 감정(1-9월)",col.main=1,font.main=2)
#title(xlab="정책",col.lab=1)
title(ylab="버즈(%)",col.lab=1)
axis(1,at=1:18,lab=c(policy$정책),las=2)
axis(2,ylim=c(0,120),las=2)
lines(b,col=2,type="o", lty=2) #찬성 그래프
colors=c(1,2,4)
ITY=cbind(lty=1, lty=2,lty=3)
legend(13,120,c("반대","찬성","평균"),cex=0.9,col=colors,lty=ITY,lwd=2)
abline(h=12.7,lty=3, col=4, lwd=0.5)
abline(h=87.3,lty=3, col=4, lwd=0.5)
savePlot("정책감정그래프.png",type="png")
rm(list=ls())
policy=read.csv("정책감정_그래프_13.csv",sep=",",stringsAsFactors=F)
a=policy$반대
b=policy$찬성
plot(a,xlab="",ylab="",ylim=c(0,120),type="o",axes=FALSE,ann=F,col=1)
title(main="2016년 보건복지 정책 감정(1-3월)",col.main=1,font.main=2)
#title(xlab="정책",col.lab=1)
title(ylab="버즈(%)",col.lab=1)
axis(1,at=1:18,lab=c(policy$정책),las=2)
axis(2,ylim=c(0,120),las=2)
lines(b,col=2,type="o")
colors=c(2,1)
legend(13,120,c("반대","찬성"),cex=0.9,col=colors,lty=1,lwd=2)
abline(h=12.8,lty=1, col=4, lwd=0.5)
abline(h=87.2,lty=1, col=4, lwd=0.5)
savePlot("정책감정그래프_13.png",type="png")
rm(list=ls())
policy=read.csv("정책감정_그래프_46.csv",sep=",",stringsAsFactors=F)
a=policy$반대
b=policy$찬성
plot(a,xlab="",ylab="",ylim=c(0,120),type="o",axes=FALSE,ann=F,col=1)
title(main="2016년 보건복지 정책 감정(4-6월)",col.main=1,font.main=2)
#title(xlab="정책",col.lab=1)
title(ylab="버즈(%)",col.lab=1)
axis(1,at=1:18,lab=c(policy$정책),las=2)
axis(2,ylim=c(0,120),las=2)
lines(b,col=2,type="o")
colors=c(2,1)
legend(13,120,c("반대","찬성"),cex=0.9,col=colors,lty=1,lwd=2)
abline(h=13.5,lty=1, col=4, lwd=0.5)
abline(h=86.5,lty=1, col=4, lwd=0.5)
savePlot("정책감정그래프_46.png",type="png")
rm(list=ls())
policy=read.csv("정책감정_그래프_79.csv",sep=",",stringsAsFactors=F)
a=policy$반대
b=policy$찬성
plot(a,xlab="",ylab="",ylim=c(0,120),type="o",axes=FALSE,ann=F,col=1)
title(main="2016년 보건복지 정책 감정(7-9월)",col.main=1,font.main=2)
#title(xlab="정책",col.lab=1)
title(ylab="버즈(%)",col.lab=1)
axis(1,at=1:18,lab=c(policy$정책),las=2)
axis(2,ylim=c(0,120),las=2)
lines(b,col=2,type="o")
colors=c(2,1)
legend(13,120,c("반대","찬성"),cex=0.9,col=colors,lty=1,lwd=2)
abline(h=12.0,lty=1, col=4, lwd=0.5)
abline(h=88.0,lty=1, col=4, lwd=0.5)
savePlot("정책감정그래프_79.png",type="png")
### 시각화
#6. 보건복지 정책, 이슈의 월별 키워드 변화
#install.packages("wordcloud")
library(wordcloud)
#감성분석 결과 찬성요인(Attitude)
key=c('관심','마련','최고','진행','참여','다양','운영','실현','행복','노력','소중','지원','가능',
'계획','확대','시행','최우선','발표','증가','필요','도움','추진')
freq=c(12777,8471,6570,16180,10116,11445,14545,3757,9318,242,22696,18572,5632,12821,
9798,726,6439,4443,21242,9930,10306)
library(RColorBrewer)
palete=brewer.pal(9,"Set1")
wordcloud(key,freq,scale=c(5,0.5),rot.per=.12,min.freq=5,
random.order=F,random.color=T,colors=palete)
savePlot("2016정책찬성.png",type="png")
#반대 요인
key=c('부족','무시','반대','지적','부담','억울','비판','논란','문제','어려움','규제')
freq=c(3011,2451,7633,4065,6055,236,3535,4180,14794,4078,1029)
library(RColorBrewer)
palete=brewer.pal(9,"Set1")
wordcloud(key,freq,scale=c(5,0.5),rot.per=.12,min.freq=5,
random.order=F,random.color=T,colors=palete)
savePlot("2016정책반대.png",type="png")
# TF wordcolud
#
#정책, 이슈
key=c('행복한노후','국민연금','기초연금','보육','결혼출산','가족친화',
'미래세대육성','의료민영화','무상정책','건강보험','원격의료','중증질환',
'환자안전','보건산업','복지급여','건강증진','일자리','의료비','자살',
'등록금','세금','개인정보','부동산','양극화','치료','담배','증세')
freq=c(964,2892,2433,5550,15720,1436,2095,1943,2869,5458,1808,696,
891,4286,20006,14691,27845,2545,2003,1445,13050,1947,1893,1005,
11114,1788,20736)
library(RColorBrewer)
palete=brewer.pal(9,"Set1")
wordcloud(key,freq,scale=c(3,0.5),rot.per=.12,min.freq=5,random.order=F,
random.color=T,colors=palete)
savePlot("2016정책이슈_TF_전체.png",type="png")
freq=c(333,1100,1205,1919,4330,468,1214,680,1396,554,372,309,1266,6986,3976,10390,1122,
701,525,6543,894,724,314,3769,488,10049)
library(RColorBrewer)
palete=brewer.pal(9,"Set1")
wordcloud(key,freq,scale=c(3,0.5),rot.per=.12,min.freq=5,random.order=F,random.color=T,colors=palete)
savePlot("2016정책이슈_TF_13월.png",type="png")
freq=c(276,1214,599,2069,4399,512,395,978,814,1725,367,131,205,1503,5563,3973,8202,693,
639,378,3369,695,577,295,3734,528,4001)
library(RColorBrewer)
palete=brewer.pal(9,"Set1")
wordcloud(key,freq,scale=c(3,0.5),rot.per=.12,min.freq=5,random.order=F,random.color=T,colors=palete)
savePlot("2016정책이슈_TF_46월.png",type="png")
freq=c(355,578,629,1562,6991,456,486,285,659,1747,887,193,377,1517,7457,6742,9253,730,
663,542,3138,358,592,396,3611,772,6686)
library(RColorBrewer)
palete=brewer.pal(9,"Set1")
wordcloud(key,freq,scale=c(3,0.5),rot.per=.12,min.freq=5,random.order=F,random.color=T,colors=palete)
savePlot("2016정책이슈_TF_79월.png",type="png")
#7.보건복지 관련 키워드
## KEM(keyword emergency map)-tf
rm(list=ls())
data_spss=read.table(file="미래정책_DoV.txt",header=T)
windows(height=5.5, width=5)
plot(data_spss$tf,data_spss$df,xlim=c(0,10000), ylim=c(-0.3,1.0), pch=18 ,
col=8,xlab='average_term_frequency', ylab='time_weighted_increasing_rate',
main='Keyword Emergence Map')
text(data_spss$tf,data_spss$df,label=data_spss$정책,cex=0.8, col='red')
abline(h=0.093, v=811, lty=1, col=4, lwd=0.5)
savePlot('미래정책_DoV',type='png')
## KIM(keyword issue map)-df
rm(list=ls())
data_spss=read.table(file="미래정책_DoD.txt",header=T)
windows(height=5.5, width=5)
plot(data_spss$tf,data_spss$df,xlim=c(0,6000), ylim=c(-0.4,1.5), pch=18 ,
col=8,xlab='average_document_frequency', ylab='time_weighted_increasing_rate',
main='Keyword Issue Map')
text(data_spss$tf,data_spss$df,label=data_spss$정책,cex=0.8, col='red')
abline(h=0.037, v=517, lty=1, col=4, lwd=0.5)
savePlot('미래정책_DoD',type='png')
## KEM
rm(list=ls())
data_spss=read.table(file="미래정책_DoV.txt",header=T)
windows(height=5.5, width=5)
plot(data_spss$tf,data_spss$df,xlim=c(0,10000), ylim=c(-0.3,1.0), pch=18 ,
col=8,xlab='평균단어빈도', ylab='평균증가율',
main='Keyword Emergence Map')
text(data_spss$tf,data_spss$df,label=data_spss$정책,cex=0.8, col='red')
abline(h=0.093, v=811, lty=1, col=4, lwd=0.5)
savePlot('미래정책국문_DoV',type='png')
## KIM
rm(list=ls())
data_spss=read.table(file="미래정책_DoD.txt",header=T)
windows(height=5.5, width=5)
plot(data_spss$tf,data_spss$df,xlim=c(0,6000), ylim=c(-0.4,1.5), pch=18 ,
col=8,xlab='평균문서빈도', ylab='평균증가율',
main='Keyword Issue Map')
text(data_spss$tf,data_spss$df,label=data_spss$정책,cex=0.8, col='red')
abline(h=0.037, v=517, lty=1, col=4, lwd=0.5)
savePlot('미래정책국문_DoD',type='png')
#4-4. 보건복지정책 미래신호 예측
#1)랜덤포레스트
##8.랜덤포레스트 모델의 보건복지 주요 정책의 중요도
## random forest
#install.packages("randomForest")
library(randomForest)
tdata = read.table('보건복지_random_정책.txt',header=T)
tdata.rf = randomForest(Attitude~., data=tdata,forest=FALSE,importance=TRUE)
varImpPlot(tdata.rf, main='Random forest importance plot')
tdata = read.table('보건복지_random_이슈.txt',header=T)
tdata.rf = randomForest(Attitude~., data=tdata,forest=FALSE,importance=TRUE)
varImpPlot(tdata.rf, main='Random forest importance plot')
#2)연관규칙
## association rule
#11. 보건복지 주요 정책의 연관규칙
asso=read.table('보건복지_association_정책.txt',header=T)
library(arules)
trans=as.matrix(asso,"Transaction")
rules1=apriori(trans,parameter=list(supp=0.01,conf=0.5), appearance=list(rhs=c("찬성", "반대"),
default="lhs"),control=list(verbose=F))
inspect(sort(rules1))
summary(rules1)
rules.sorted=sort(rules1, by="confidence")
inspect(rules.sorted)
rules.sorted=sort(rules1, by="lift")
inspect(rules.sorted)
asso=read.table('보건복지_association_이슈.txt',header=T)
trans=as.matrix(asso,"Transaction")
rules1=apriori(trans,parameter=list(supp=0.01,conf=0.5), appearance=list(rhs=c("찬성", "반대"),
default="lhs"),control=list(verbose=F))
inspect(sort(rules1))
summary(rules1)
rules.sorted=sort(rules1, by="confidence")
inspect(rules.sorted)
rules.sorted=sort(rules1, by="lift")
inspect(rules.sorted)
## 제한 규칙만 추출
rule_sub=subset(rules1,subset=confidence>=0.6)
inspect(sort(rule_sub,by="lift"))
## 제한 규칙만 추출
rule_sub=subset(rules1,subset=rhs%pin% 'Failure' & confidence>=0.31)
inspect(sort(rule_sub,by="lift"))
# 12.보건복지정책_키워드간 연관규칙
asso=read.table('보건복지_association_정책이슈.txt',header=T)
library(arules)
trans=as.matrix(asso,"Transaction")
rules1=apriori(trans,parameter=list(supp=0.002,conf=0.45,target="rules"))
inspect(sort(rules1))
summary(rules1)
rules.sorted=sort(rules1, by="confidence")
inspect(rules.sorted)
rules.sorted=sort(rules1, by="lift")
inspect(rules.sorted)
#install.packages("dplyr")
library(dplyr)
#install.packages("igraph")
library(igraph)
rules = labels(rules1, ruleSep="/", setStart="", setEnd="")
rules = sapply(rules, strsplit, "/", USE.NAMES=F)
rules = Filter(function(x){!any(x == "")},rules)
rulemat = do.call("rbind", rules)
rulequality = quality(rules1)
ruleg = graph.edgelist(rulemat,directed=F)
#plot for important pairs
ruleg = graph.edgelist(rulemat[-c(1:16),],directed=F)
plot.igraph(ruleg, vertex.label=V(ruleg)$name, vertex.label.cex=1, vertex.size=20,
layout=layout.fruchterman.reingold.grid)
savePlot("보건복지정책_키워드연관.png", type="png")
## graph rule
#install.packages('MASS')
library(MASS)
#install.packages("arulesViz")
library(arulesViz)
## 그래프기반 시각화(graphed-based visualization)
plot(rules1, method='graph',control=list(type='items'))
## 병렬좌표플롯(parallel coordinates plots) :
plot(rules1, method='paracoord',control=list(reorder=T))
## 그룹화 행렬 기반시각화(grouped matrix-based visualizations)
plot(rules1, method='grouped')
|
e0ef01766c491c439416bb0fea4523025e0e3b73 | b08c25c032524882c6e3e5dae632c7ffc00b7bf1 | /Fit_IndModel_3scr_A1_noI.R | b7c6e4ad838992f75339d283a2e69fa73dbe11cc | [] | no_license | haejung017/BiostatsR | 959e0351b7e356fcefcd5078a95a7711ca962915 | 4937e6978cba002317ae0c4b80ae2d2730f5f531 | refs/heads/master | 2020-04-21T11:15:57.645621 | 2019-02-07T04:45:11 | 2019-02-07T04:48:00 | 169,517,945 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 16,131 | r | Fit_IndModel_3scr_A1_noI.R |
# ficher's z-transformation
alpha_2_eta<-function(alpha) log( (1+alpha)/(1-alpha))/2
eta_2_alpha<-function(eta) (exp(2*eta)-1)/(exp(2*eta)+1)
# alpha_2_eta(0)
# eta_2_alpha(0)
#### fit Copula model
fit.Ind<-function(pars, dataset, TDf_or, TDf_sc, agemin, phi0=1e-5, phi=c(3,2,1e-5), wgt=FALSE){
ee0=phi0
ee1=phi[1]
ee2=phi[2]
ee3=phi[3]
phi_init0=log(ee0)
phi_init1=log(ee1)
phi_init2=log(ee2)
phi_init3=log(ee3)
phi_sc=c(phi_init1, phi_init2, phi_init3)
a00=0.001
a01=0.001
a02=0.001
a03=0.001
a0=c(0.001, 0.001, 0.001)
pars1=pars
if(TDf_or=="PE" && TDf_sc=="ED")
pars1=c(pars, phi_sc)
if(TDf_or=="PE" && TDf_sc=="CO")
pars1=c(pars, phi_init1, a01, phi_init2, a02, phi_init3, a03 )
if(TDf_or=="ED" && TDf_sc=="PE")
pars1=c(pars, phi_init0)
if(TDf_or=="ED" && TDf_sc=="ED")
pars1=c(pars,phi_init0, phi_sc)
if(TDf_or=="ED" && TDf_sc=="CO")
pars1=c(pars,phi_init0, phi_init1, a01, phi_init2, a02, phi_init3, a03 )
if(TDf_or=="CO" && TDf_sc=="PE")
pars1=c(pars, phi_init0, a00)
if(TDf_or=="CO" && TDf_sc=="ED")
pars1=c(pars,phi_init0, a00, phi_sc)
if(TDf_or=="CO" && TDf_sc=="CO")
pars1=c(pars,phi_init0, a00, phi_init1, a01, phi_init2, a02, phi_init3, a03 )
out <- try( optim(pars1, loglik.Ind, data=dataset, agemin=agemin, TDf_or=TDf_or, TDf_sc=TDf_sc, wgt=wgt,
hessian=TRUE, control = list(trace = 1, maxit = 10000) ) , TRUE)
if(inherits(out ,'try-error')){
warning(as.matrix(out))
out <- optim(pars1, loglik.Ind, data=dataset, agemin=agemin, TDf_or=TDf_or, TDf_sc=TDf_sc, wgt=wgt, method="Nelder-Mead",
control = list(trace = 1, maxit = 10000) )
out$hessian <- hessian(loglik.Ind, out$par, data=dataset, agemin=agemin, TDf_or=TDf_or, TDf_sc=TDf_sc, wgt=wgt)
}
print(out$convergence)
# print(out$hessian)
#out$hessian[out$hessian==0]=1e-7
# Var1 <- try(solve(out$hessian), TRUE)
Var1 <- try(ginv(out$hessian), TRUE)
if(inherits(Var1 ,'try-error')){
warning(as.matrix(Var1))
Var1 <- try(solve(out$hessian), TRUE)
if(inherits(Var1 ,'try-error')){
warning(as.matrix(Var1))
Var1 <-NA_real_
}
}
#print(Var1)
print(diag(Var1))
Var<-Var1
# Results
pars.est <- out$par
pars.est[1:2]<-exp(pars.est[1:2])
dg<-abs(diag(Var))
se <- sqrt(dg)
se.exp <- exp(out$par)*se
# se.cg <- eta_2_alpha(out$par)*se
SE <- c(se.exp[1:2], se[3:7])
pars.est1<-c(pars.est[1:7], phi0=NA, a00=NA, phi1=NA, a01=NA, phi2=NA, a02=NA, phi3=NA, a03=NA, LRTphi=NA, alpha=NA, tau=NA, df=NA, LRT=NA, AIC=NA, BIC=NA, LogL=NA)
SE1<-c(SE[1:7], phi0=NA, a00=NA, phi1=NA, a01=NA, phi2=NA, a02=NA, phi3=NA, a03=NA, LRTphi=NA, alpha=NA, tau=NA, df=NA, LRT=NA, AIC=NA, BIC=NA, LogL=NA)
LogL<- -out$value
AIC <- 2*(length(out$par)-LogL)
dd=sum(dataset$status, na.rm=T)
BIC=log(dd)*length(out$par) -2*LogL
# WALD & Pvalue
wald <- pars.est1/SE1
pval<-ifelse(wald>0, 2*(1-pnorm(wald)), 2*(pnorm(wald)) )
if(TDf_or=="PE" && TDf_sc=="ED"){ # two terms of the likelihood under frailty model
k=7
pars.est1[k+3]<-exp(pars.est[k+1])
SE1[k+3]<-se.exp[k+1]
wald[k+3]=pars.est1[k+3]/SE1[k+3]
pval[k+3]<- 1-pnorm(wald[k+3])
pars.est1[k+5]<-exp(pars.est[k+2])
SE1[k+5]<-se.exp[k+2]
wald[k+5]=pars.est1[k+5]/SE1[k+5]
pval[k+5]<- 1-pnorm(wald[k+5])
pars.est1[k+7]<-exp(pars.est[k+3])
SE1[k+7]<-se.exp[k+3]
wald[k+7]=pars.est1[k+7]/SE1[k+7]
pval[k+7]<- 1-pnorm(wald[k+7])
}
if(TDf_or=="PE" && TDf_sc=="CO"){ # two terms of the likelihood under frailty model
k=7
pars.est1[k+3]<-exp(pars.est[k+1])
pars.est1[k+4]<-pars.est[k+2]
pars.est1[k+5]<-exp(pars.est[k+3])
pars.est1[k+6]<-pars.est[k+4]
pars.est1[k+7]<-exp(pars.est[k+5])
pars.est1[k+8]<-pars.est[k+6]
SE1[(k+3)]<-se.exp[(k+1)]
SE1[(k+4)]<-se[(k+2)]
SE1[(k+5)]<-se.exp[(k+3)]
SE1[(k+6)]<-se[(k+4)]
SE1[(k+7)]<-se.exp[(k+5)]
SE1[(k+8)]<-se[(k+6)]
wald[c(10,12,14)] <- pars.est1[c(10,12,14)]/ SE1[c(10,12,14)]
pval[c(10,12,14)]<- 1-pnorm(wald[c(10,12,14)])
wald[c(11,13,15)] <- pars.est1[c(11,13,15)]/ SE1[c(11,13,15)]
pval[c(11,13,15)]<- ifelse(wald[c(11,13,15)]>0, 2*(1-pnorm(wald[c(11,13,15)])), 2*(pnorm(wald[c(11,13,15)])) )
}
if(TDf_or=="ED" && TDf_sc=="PE"){ # two terms of the likelihood under frailty model
pars.est1[8]<-exp(pars.est[length(pars.est)])
SE1[8]<-se.exp[length(pars.est)]
wald[8]=pars.est1[8]/SE1[8]
pval[8]<- 1-pnorm(wald[8])
}
if(TDf_or=="ED" && TDf_sc=="ED"){ # two terms of the likelihood under frailty model
k=7
pars.est1[k+1]<-exp(pars.est[k+1])
SE1[k+1]<-se.exp[k+1]
wald[k+1]=pars.est1[k+1]/SE1[k+1]
pval[k+1]<- 1-pnorm(wald[k+1])
pars.est1[k+3]<-exp(pars.est[k+2])
SE1[k+3]<-se.exp[k+2]
wald[k+3]=pars.est1[k+3]/SE1[k+3]
pval[k+3]<- 1-pnorm(wald[k+3])
pars.est1[k+5]<-exp(pars.est[k+3])
SE1[k+5]<-se.exp[k+3]
wald[k+5]=pars.est1[k+5]/SE1[k+5]
pval[k+5]<- 1-pnorm(wald[k+5])
pars.est1[k+7]<-exp(pars.est[k+4])
SE1[k+7]<-se.exp[k+4]
wald[k+7]=pars.est1[k+7]/SE1[k+7]
pval[k+7]<- 1-pnorm(wald[k+7])
}
if(TDf_or=="ED" && TDf_sc=="CO"){ # two terms of the likelihood under frailty model
k=7
pars.est1[k+1]<-exp(pars.est[k+1])
SE1[k+1]<-se.exp[k+1]
wald[k+1]=pars.est1[k+1]/SE1[k+1]
pval[k+1]<- 1-pnorm(wald[k+1])
pars.est1[k+3]<-exp(pars.est[k+2])
SE1[k+3]<-se.exp[k+2]
wald[k+3]=pars.est1[k+3]/SE1[k+3]
pval[k+3]<- 1-pnorm(wald[k+3])
pars.est1[k+4]<-pars.est[k+3]
SE1[k+4]<-se[k+3]
wald[k+4]=pars.est1[k+4]/SE1[k+4]
pval[k+4]<- ifelse(wald[k+4]>0, 2*(1-pnorm(wald[k+4])), 2*(pnorm(wald[k+4])) )
pars.est1[k+5]<-exp(pars.est[k+4])
SE1[k+5]<-se.exp[k+4]
wald[k+5]=pars.est1[k+5]/SE1[k+5]
pval[k+5]<- 1-pnorm(wald[k+5])
pars.est1[k+6]<-pars.est[k+5]
SE1[k+6]<-se[k+5]
wald[k+6]=pars.est1[k+6]/SE1[k+6]
pval[k+6]<-ifelse(wald[k+6]>0, 2*(1-pnorm(wald[k+6])), 2*(pnorm(wald[k+6])) )
pars.est1[k+7]<-exp(pars.est[k+6])
SE1[k+7]<-se.exp[k+6]
wald[k+7]=pars.est1[k+7]/SE1[k+7]
pval[k+7]<- 1-pnorm(wald[k+7])
pars.est1[k+8]<-pars.est[k+7]
SE1[k+8]<-se[k+7]
wald[k+8]=pars.est1[k+8]/SE1[k+8]
pval[k+8]<-ifelse(wald[k+8]>0, 2*(1-pnorm(wald[k+8])), 2*(pnorm(wald[k+8])) )
}
if(TDf_or=="CO" && TDf_sc=="PE"){ # two terms of the likelihood under frailty model
pars.est1[8]<-exp(pars.est[length(pars.est)-1])
SE1[8]<-se.exp[length(pars.est)-1]
wald[8]=pars.est1[8]/SE1[8]
pval[8]<- 1-pnorm(wald[8])
pars.est1[9]<-pars.est[length(pars.est)]
SE1[9]<-se[length(pars.est)]
wald[9]=pars.est1[9]/SE1[9]
pval[9]<- ifelse(wald[9]>0, 2*(1-pnorm(wald[9])), 2*(pnorm(wald[9])) )
}
if(TDf_or=="CO" && TDf_sc=="ED"){ # two terms of the likelihood under frailty model
k=7
pars.est1[8]<-exp(pars.est[k+1])
SE1[8]<-se.exp[k+1]
wald[8]=pars.est1[8]/SE1[8]
pval[8]<- 1-pnorm(wald[8])
pars.est1[9]<-pars.est[k+2]
SE1[9]<-se[k+2]
wald[9]=pars.est1[9]/SE1[9]
pval[9]<- ifelse(wald[9]>0, 2*(1-pnorm(wald[9])), 2*(pnorm(wald[9])) )
pars.est1[k+3]<-exp(pars.est[k+3])
SE1[k+3]<-se.exp[k+3]
wald[k+3]=pars.est1[k+3]/SE1[k+3]
pval[k+3]<- 1-pnorm(wald[k+3])
pars.est1[k+5]<-exp(pars.est[k+4])
SE1[k+5]<-se.exp[k+4]
wald[k+5]=pars.est1[k+5]/SE1[k+5]
pval[k+5]<- 1-pnorm(wald[k+5])
pars.est1[k+7]<-exp(pars.est[k+5])
SE1[k+7]<-se.exp[k+5]
wald[k+7]=pars.est1[k+7]/SE1[k+7]
pval[k+7]<- 1-pnorm(wald[k+7])
}
if(TDf_or=="CO" && TDf_sc=="CO"){ # two terms of the likelihood under frailty model
k=7
pars.est1[8]<-exp(pars.est[k+1])
SE1[8]<-se.exp[k+1]
wald[8]=pars.est1[8]/SE1[8]
pval[8]<- 1-pnorm(wald[8])
pars.est1[9]<-pars.est[k+2]
SE1[9]<-se[k+2]
wald[9]=pars.est1[9]/SE1[9]
pval[9]<- ifelse(wald[9]>0, 2*(1-pnorm(wald[9])), 2*(pnorm(wald[9])) )
pars.est1[k+3]<-exp(pars.est[k+3])
SE1[k+3]<-se.exp[k+3]
wald[k+3]=pars.est1[k+3]/SE1[k+3]
pval[k+3]<- 1-pnorm(wald[k+3])
pars.est1[k+4]<-exp(pars.est[k+4])
SE1[k+4]<-se[k+4]
wald[k+4]=pars.est1[k+4]/SE1[k+4]
pval[k+4]<- ifelse(wald[k+4]>0, 2*(1-pnorm(wald[k+4])), 2*(pnorm(wald[k+4])) )
pars.est1[k+5]<-exp(pars.est[k+5])
SE1[k+5]<-se.exp[k+5]
wald[k+5]=pars.est1[k+5]/SE1[k+5]
pval[k+5]<- 1-pnorm(wald[k+5])
pars.est1[k+6]<-pars.est[k+6]
SE1[k+6]<-se[k+6]
wald[k+6]=pars.est1[k+6]/SE1[k+6]
pval[k+6]<-ifelse(wald[k+6]>0, 2*(1-pnorm(wald[k+6])), 2*(pnorm(wald[k+6])) )
pars.est1[k+7]<-exp(pars.est[k+7])
SE1[k+7]<-se[k+7]
wald[k+7]=pars.est1[k+7]/SE1[k+7]
pval[k+7]<-2*(1-pnorm(abs(wald[k+7])))
pars.est1[k+8]<-pars.est[k+8]
SE1[k+8]<-se[k+8]
wald[k+8]=pars.est1[k+8]/SE1[k+8]
pval[k+8]<-ifelse(wald[k+8]>0, 2*(1-pnorm(wald[k+8])), 2*(pnorm(wald[k+8])) )
}
pars.est1[length(pars.est1)-2]<-AIC
pars.est1[length(pars.est1)-1]<-BIC
pars.est1[length(pars.est1)]<-LogL
mle<-matrix(NA,ncol=3,nrow=length(pars.est1) )
colnames(mle)<-c("Est","SE","Pvalue")
mle[,1]=pars.est1
mle[,2]=SE1
mle[,3]=pval
rownames(mle)=c("Lambda","Rho","gene","Ooph", "scr1","scr2","scr3",
"phi0", "a00", "phi1", "a01", "phi2", "a02", "phi3", "a03","LRTphi", "alpha","tau","df","LRT", "AIC","BIC","LogL")
return( mle )
}
#### TD Log-likelihood function under copula model
loglik.Ind=function(pars, data, agemin, TDf_or="PE", TDf_sc="PE", wgt=FALSE){
# the data
data = data[data$currentage>=agemin,]
Y=data$time - agemin
#G=data$mgene
Ybr=data$br.censortime-agemin
Yor=data$ov.censortime-agemin
Yst1=data$st1-agemin
Yst2=data$st2-agemin
Yst3=data$st3-agemin
cp = data$carrp.geno #carrier probabilty
status=data$status
wt<-rep(1,length(Y))
if(wgt==TRUE)
wt<-1/data$weight
# BRI <- ifelse(Y-Ybr> 0,1,0)
# Y[BRI==1]<-Ybr[BRI==1]
# status[BRI==1]=0
#parameters
lamb<-exp(pars[1])
rho<-exp(pars[2])
beta.g<-pars[3]
beta.or<-pars[4]
#beta.gor<-pars[5]
beta.sc<-pars[5:7]
#beta.gsc<-c(0,0,0)
# beta.orsc<-pars[12:14]
if(TDf_or=="PE" && TDf_sc=="PE" ){
mpars=c(lamb, rho,beta.g, beta.or, beta.sc)
} else if(TDf_or=="PE" && TDf_sc=="ED" ){
phi1=exp(pars[length(pars)-2])
phi2=exp(pars[length(pars)-1])
phi3=exp(pars[length(pars)])
mpars=c(lamb, rho,beta.g, beta.or, beta.sc, phi1, phi2, phi3)
} else if(TDf_or=="PE" & TDf_sc=="CO" ){
phi1=exp(pars[length(pars)-5])
a01=pars[length(pars)-4]
phi2=exp(pars[length(pars)-3])
a02=pars[length(pars)-2]
phi3=exp(pars[length(pars)-1])
a03=pars[length(pars)]
mpars=c(lamb, rho,beta.g, beta.or, beta.sc, phi1, a01, phi2, a02, phi3, a03)
} else if(TDf_or=="ED" && TDf_sc=="PE" ){
phi0=exp(pars[length(pars)])
mpars=c(lamb, rho,beta.g, beta.or, beta.sc, phi0)
# if(phi0<e1 || phi0>e2) return( Inf )
} else if(TDf_or=="ED" && TDf_sc=="ED" ){
phi0=exp(pars[length(pars)-3])
phi1=exp(pars[length(pars)-2])
phi2=exp(pars[length(pars)-1])
phi3=exp(pars[length(pars)])
mpars=c(lamb, rho,beta.g, beta.or, beta.sc, phi0, phi1, phi2, phi3)
} else if(TDf_or=="ED" && TDf_sc=="CO" ){
phi0=exp(pars[length(pars)-6])
phi1=exp(pars[length(pars)-5])
a01=pars[length(pars)-4]
phi2=exp(pars[length(pars)-3])
a02=pars[length(pars)-2]
phi3=exp(pars[length(pars)-1])
a03=pars[length(pars)]
mpars=c(lamb, rho,beta.g, beta.or, beta.sc, phi0, phi1, a01, phi2, a02, phi3, a03)
} else if(TDf_or=="CO" && TDf_sc=="PE" ){
phi0=exp(pars[length(pars)-1])
a00=pars[length(pars)]
mpars=c(lamb, rho,beta.g, beta.or, beta.sc, phi0, a00)
} else if(TDf_or=="CO" && TDf_sc=="ED" ){
phi0=exp(pars[length(pars)-4])
a00=pars[length(pars)-3]
phi1=exp(pars[length(pars)-2])
phi2=exp(pars[length(pars)-1])
phi3=exp(pars[length(pars)])
mpars=c(lamb, rho,beta.g, beta.or, beta.sc, phi0, a00, phi1, phi2, phi3)
} else if(TDf_or=="CO" && TDf_sc=="CO" ){
phi0=exp(pars[length(pars)-7])
a00=pars[length(pars)-6]
phi1=exp(pars[length(pars)-5])
a01=pars[length(pars)-4]
phi2=exp(pars[length(pars)-3])
a02=pars[length(pars)-2]
phi3=exp(pars[length(pars)-1])
a03=pars[length(pars)]
mpars=c(lamb, rho,beta.g, beta.or, beta.sc, phi0, a00, phi1, a01, phi2, a02, phi3, a03)
}
#marginal distributions
mdist=marg_fun(mpars=mpars, Y=Y, Yor=Yor, Yst1=Yst1, Yst2=Yst2, Yst3=Yst3, Ybr=Ybr, TDf_or=TDf_or, TDf_sc=TDf_sc, cp=cp)
loghaz=mdist$logh
H=mdist$H
print( mpars )
term1= wt*loghaz
term2=-wt*H
# the naive log-likelihood function
loglik<- sum(status*term1, na.rm=T) + sum(term2, na.rm=T)
## corrected term bases on affected and unaffected proband
ip<- which(data$proband==1)
cage<- data$currentage[ip]-agemin
timep<- data$time[ip]-agemin
Gp=data$mgene[ip]
Yst1p=data$st1p[ip]-agemin
Yst2p=data$st2p[ip]-agemin
Yst3p=data$st3p[ip]-agemin
Ybrp=data$br.censortimep[ip]-agemin
Yorp=data$ov.censortimep[ip]-agemin
nbSCp=data$screen_number_p
statusp <- data$affect[ip] # proband disease status at the study entry
wtp=wt[ip]
cpp=cp[ip]
# BRIp=ifelse(cage<=Ybrp,0,1)
# cage[BRIp==1]<-Ybrp[BRIp==1]
# statusp[BRIp==1]=0
### For unaffected proband
ip0<-which(statusp==0)
cage0<- cage[ip0]
Gp0=Gp[ip0]
Yst1p0=Yst1p[ip0]
Yst2p0=Yst2p[ip0]
Yst3p0=Yst3p[ip0]
Ybrp0=Ybrp[ip0]
Yorp0=Yorp[ip0]
wtp0=wtp[ip0]
cpp0=cpp[ip0]
Hp0<-marg_fun(mpars=mpars, Y=cage0, Yor=Yorp0, Yst1=Yst1p0, Yst2=Yst2p0, Yst3=Yst3p0, Ybr=Ybrp0, TDf_or=TDf_or, TDf_sc=TDf_sc, cp=cpp0)$H
#Hp0[Hp0<=0]=1e-10
logasc0 <- - Hp0 # log survival until study entry for unaffacted probands
logasc0 <- sum(wtp0*logasc0,na.rm=TRUE)
### For affected proband
ip1<-which(statusp==1 | statusp==2)
cage1<- cage[ip1]
Gp1=Gp[ip1]
#Yscp1=Yscp[statusp==1 |statusp==2]
Ybrp1=Ybrp[ip1]
Yorp1=Yorp[ip1]
Yst1p1=Yst1p[ip1]
Yst2p1=Yst2p[ip1]
Yst3p1=Yst3p[ip1]
wtp1=wtp[ip1]
cpp1=cpp[ip1]
#Hp1<-marg_funp1(mpars=mpars, Y=cage1, Yor=Yorp1, TDfunc=TDfunc, cp=cpp1)$H
Hp1<-marg_fun(mpars=mpars, Y=cage1, Yor=Yorp1, Yst1=Yst1p1, Yst2=Yst2p1, Yst3=Yst3p1, Ybr=Ybrp1, TDf_or=TDf_or, TDf_sc=TDf_sc, cp=cpp1)$H
#print(Hp1)
#Hp1[Hp1<=0]=1e-10
logasc1 <- log(1-exp(-Hp1) ) # log penetrace at study entry for affected probands
logasc1 <- sum(wtp1*logasc1,na.rm=TRUE)
slogasc = logasc0 + logasc1
# corrected log-likelihood
cloglik<- loglik - slogasc
# cloglik<- loglik
return(-cloglik)
}
# # test between ED and PE
# mpars=c(log(I.PE.PE[1:2,1]),I.PE.PE[3:10,1])
# loglik.Ind(pars=mpars, data, agemin, TDf_or="PE", TDf_sc="PE", wgt=FALSE)
# loglik.Ind(pars=c(mpars, log(1e-7)), data, agemin, TDf_or="ED", TDf_sc="PE", wgt=FALSE)
# loglik.Ind(pars=c(mpars,log(1e-7), log(1e-7), log(1e-7)), data, agemin, TDf_or="PE", TDf_sc="ED", wgt=FALSE)
# loglik.Ind(pars=c(mpars,log(1e-7), 0, log(1e-7), 0, log(1e-7), 0), data, agemin, TDf_or="PE", TDf_sc="CO", wgt=FALSE)
# loglik.Ind(pars=c(mpars, log(1e-7), log(1e-7), log(1e-7), log(1e-7) ), data, agemin, TDf_or="ED", TDf_sc="ED", wgt=FALSE)
# loglik.Ind(pars=c(mpars, log(1e-7)), data, agemin, TDf_or="ED", TDf_sc="PE", wgt=FALSE)
# loglik.Ind(pars=c(mpars,log(1e-7), log(1e-7), log(1e-7), log(1e-7)), data, agemin, TDf_or="ED", TDf_sc="ED", wgt=FALSE)
# loglik.Ind(pars=c(mpars,log(1e-7),log(1e-7), 0, log(1e-7), 0, log(1e-7), 0), data, agemin, TDf_or="ED", TDf_sc="CO", wgt=FALSE)
# loglik.Ind(pars=c(mpars, log(1e-7),0), data, agemin, TDf_or="CO", TDf_sc="PE", wgt=FALSE)
|
044283e9cf10e79d841bac5b43f7ea776eadf0f2 | 0a906cf8b1b7da2aea87de958e3662870df49727 | /ggforce/inst/testfiles/enclose_points/libFuzzer_enclose_points/enclose_points_valgrind_files/1610031203-test.R | f1925c2d3d09a7184bd9fd538523016b5df819d6 | [] | no_license | akhikolla/updated-only-Issues | a85c887f0e1aae8a8dc358717d55b21678d04660 | 7d74489dfc7ddfec3955ae7891f15e920cad2e0c | refs/heads/master | 2023-04-13T08:22:15.699449 | 2021-04-21T16:25:35 | 2021-04-21T16:25:35 | 360,232,775 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 313 | r | 1610031203-test.R | testlist <- list(id = integer(0), x = c(6.14293298947794e-183, 6.14293298947794e-183, 6.14293298947794e-183, 6.14293298947794e-183, 6.14293298947794e-183, 6.14293298947794e-183, 6.14293298947794e-183, 2.5909984685287e-312, 0, 0), y = numeric(0))
result <- do.call(ggforce:::enclose_points,testlist)
str(result) |
5e3a00c05dfa5ce4e5a665ad2083b38a013f6360 | 2c4e38480f522f3762b6967d7cfbf8aae0a948aa | /man/H5Sset_extent_simple.Rd | 21b3928ccd0f4857fb882d6a3129f18ce979157f | [] | no_license | grimbough/rhdf5 | 097f8c48f322172415308491b603ff5b6cb4521a | 4d430edf2a95cc9a2e4dfbf857ff6ff41cdef61d | refs/heads/devel | 2023-07-27T14:14:36.645790 | 2023-07-12T09:09:12 | 2023-07-12T09:09:12 | 101,306,379 | 51 | 24 | null | 2023-04-05T08:13:17 | 2017-08-24T14:51:27 | R | UTF-8 | R | false | true | 785 | rd | H5Sset_extent_simple.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/H5S.R
\name{H5Sset_extent_simple}
\alias{H5Sset_extent_simple}
\title{Set the size of a dataspace}
\usage{
H5Sset_extent_simple(h5space, dims, maxdims)
}
\arguments{
\item{h5space}{\linkS4class{H5IdComponent} object representing a dataspace.}
\item{dims}{Dimension of the dataspace. This argument is similar to the dim
attribute of an array. When viewing the HDF5 dataset with an C-program
(e.g. HDFView), the dimensions appear in inverted order, because the
fastest changing dimension in R is the first one, and in C its the last
one.}
\item{maxdims}{Maximum extension of the dimension of the dataset in the
file. If not provided, it is set to \code{dims}.}
}
\description{
Set the size of a dataspace
}
|
63939052289d88ddb833e2eeb13f9aa28de6224e | 1f3470378a71d5c6efa5bccd845e0e8ab04089ef | /buildData.R | e28ea95d8e853e056d58f09d2638643304191638 | [] | no_license | rfgordonjr/sampleGBM | 18a335c59a7322b4e632836b6a2cd4ccf53e033e | 7f218363990aace7a2f6848cc10c76a061186e8b | refs/heads/master | 2020-04-08T06:34:14.084054 | 2018-11-26T22:45:49 | 2018-11-26T22:45:49 | 159,101,398 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,532 | r | buildData.R | ## packages used ####
library(dplyr)
library(Hmisc)
library(ggplot2)
## Pull kaggle data ####
## https://www.kaggle.com/blastchar/telco-customer-churn/version/1 ####
rawdata <- read.csv(file = "/Users/robertgordon/Documents/equifaxCaseStudy/sampleAttritionModel/data/WA_Fn-UseC_-Telco-Customer-Churn.csv",stringsAsFactors = FALSE)
str(rawdata)
describe(rawdata)
## Form data into factor so that reference level is the level occurring most frequently ####
data <- rawdata %>%
mutate(gender_factor = factor(gender, levels = c("Male", "Female")),
partner_factor = factor(Partner, levels = c("No", "Yes")),
dependents_factor = factor(Dependents, levels = c("No", "Yes")),
phoneService_factor = factor(PhoneService, levels = c("Yes", "No")),
multipleLines_factor = factor(MultipleLines, levels = c("No", "Yes", "No phone service")),
internetService_factor = factor(InternetService, levels = c("Fiber optic", "DSL", "No")),
onlineSecurity_factor = factor(OnlineSecurity, levels = c("No", "Yes", "No internet service")),
onlineBackup_factor = factor(OnlineBackup, levels = c("No", "Yes", "No internet service")),
deviceProtection_factor = factor(DeviceProtection, levels = c("No", "Yes", "No internet service")),
techSupport_factor = factor(TechSupport, levels = c("No", "Yes", "No internet service")),
streamingTV_factor = factor(StreamingTV, levels = c("No", "Yes", "No internet service")),
streamingMovies_factor = factor(StreamingMovies, levels = c("No", "Yes", "No internet service")),
contract_factor = factor(Contract, levels = c("Month-to-month", "Two year", "One year")),
paperlessBilling_factor = factor(PaperlessBilling, levels = c("Yes", "No")),
paymentMethod_factor = factor(PaymentMethod, levels = c("Electronic check", "Mailed check", "Bank transfer (automatic)", "Credit card (automatic")),
tenureTimesMonthly = tenure*MonthlyCharges,
feeChange = case_when(TotalCharges < tenureTimesMonthly ~ "Prior Increase",
TotalCharges > tenureTimesMonthly ~ "Prior Decrease",
TotalCharges == tenureTimesMonthly ~ "No Change",
TRUE ~ "NA"),
feeChange = na_if(feeChange, "NA"),
feeChange_factor = factor(feeChange, levels = c("Prior Increase", "Prior Decrease", "No Change")),
churn_target = if_else(Churn=="Yes", 1, 0))
## How close is total charges to tenure x monthly charges? ####
describe(data$tenureTimesMonthly)
## If total charges less than current monthly bill x tenure, an increase took place
## If total charges greater than current monthly bill x tenure, a decrease took place
data %>%
ggplot(., aes(tenureTimesMonthly, TotalCharges)) +
geom_point() +
geom_abline(slope = 1, intercept = 0, col = "red")
## How many are exactly equal? ####
data %>% filter(tenureTimesMonthly == TotalCharges) %>% nrow()
describe(data$feeChange)
## Build a model ####
set.seed(12345)
## Create train/test data
data_split <- data %>%
mutate(unif = runif(n = nrow(.)),
dat = if_else(unif <= 0.7, "Train", "Test"))
train <- data_split %>% filter(dat == "Train")
test <- data_split %>% filter(dat == "Test")
## Save train/test to separate rds files ####
saveRDS(train, "/Users/robertgordon/Documents/equifaxCaseStudy/sampleAttritionModel/data/train.rds")
saveRDS(test, "/Users/robertgordon/Documents/equifaxCaseStudy/sampleAttritionModel/data/test.rds")
|
574fc4a36642edcfbebd14e2080ff53d70509b3e | 69ed15a883dfbc2d67023d436dbb4cb9742b3970 | /R/getESS.R | 90948b7f1c33a08da6f1fbec699f2b77b8ac27b2 | [] | no_license | joh4n/JGTools | 57f163463b107028509243260e80f7f05f847dd5 | 7418f924665c03791e758da7bc3112cd6b2022d9 | refs/heads/master | 2021-06-20T08:26:36.857149 | 2017-06-17T13:35:56 | 2017-06-17T13:35:56 | 77,140,153 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 729 | r | getESS.R | #' A function to calculate the ESS of a STAN_fit
#'
#' This function returns the ESS of a STAN_fit ascending order
#' @param min_ess the lowest ESS to not be returned
#' @param STAN_fit a stan fit df
#' @return A data frame with the ESS below the min_ess limit in ascending order
#' @examples
#' getESS( min_ess = 900, STAN_fit = sfitdf)
#' @export
getESS <- function(min_ess = 400, STAN_fit = NULL ){
if(is.null(STAN_fit)){
ess_minimums <- sort(ess_warn(STAN_fit = sfitdf, min_ess = min_ess))
}else{
ess_minimums <- sort(ess_warn(STAN_fit = STAN_fit, min_ess = min_ess))
}
ess_minimums <- data.frame(Parameter = names(ess_minimums), ESS = ess_minimums)
rownames(ess_minimums) <- NULL
return(ess_minimums)
}
|
d3bfb4cba0b83bb5d500c736b8866b4222f66736 | bce7204fa229f7c9966ef7b8fe8102e881d30e38 | /man/plot_list.Rd | 0d2b66f835bb9fb42ee42b832eec8bbf005d1d48 | [] | no_license | king8w/aplot | 4aa865df3f0e1668341820d894b97d2b6ffb180a | b7c1a643ed3f224e12ff1c2c1f9ea87c76cf5c39 | refs/heads/master | 2023-07-23T23:47:23.610808 | 2021-08-27T08:24:28 | 2021-08-27T08:24:28 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 650 | rd | plot_list.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot-list.R
\name{plot_list}
\alias{plot_list}
\title{plot a list of ggplot objects}
\usage{
plot_list(gglist, ncol = NULL, nrow = NULL, widths = NULL, heights = NULL, ...)
}
\arguments{
\item{gglist}{list of ggplot objects}
\item{ncol}{number of columns}
\item{nrow}{number of rows}
\item{widths}{relative widths}
\item{heights}{relative heights}
\item{...}{additional parameters that passed to plot_layout}
}
\value{
composite plot
}
\description{
plot a list of ggplot objects using patchwork, similar to `cowplot::plot_grid(plotlist)`
}
\author{
Guangchuang Yu
}
|
7ca19f30382b5700c228d0f69c38b4df1dae38f8 | 0229705f89eb090f7bc5b7899dc07e7609e2cc80 | /R/compare.graphs.R | 5e56643c5740ea1cd79c562a3a3b6973c2927445 | [] | no_license | cran/causaleffect | d77a060d14f6d985b355c6b743c947bd8093a751 | 687d5177aa178cb89782fd496523c0e1f3ceb9f5 | refs/heads/master | 2022-07-24T03:48:40.739576 | 2022-07-14T08:10:05 | 2022-07-14T08:10:05 | 24,474,611 | 5 | 6 | null | null | null | null | UTF-8 | R | false | false | 560 | r | compare.graphs.R | compare.graphs <- function(G1, G2) {
e1 <- as.data.frame(igraph::get.edges(G1, igraph::E(G1)))
e1[ ,3] <- igraph::edge.attributes(G1)
e2 <- as.data.frame(igraph::get.edges(G2, igraph::E(G2)))
e2[ ,3] <- igraph::edge.attributes(G2)
n1 <- nrow(e1)
n2 <- nrow(e2)
if (n1 != n2) return(FALSE)
if (ncol(e1) == 2) e1$description <- "O"
if (ncol(e2) == 2) e2$description <- "O"
e1[which(is.na(e1[,3])), 3] <- "O"
e2[which(is.na(e2[,3])), 3] <- "O"
if (all(duplicated(rbind(e1, e2))[(n1+1):(2*n1)])) return(TRUE)
return(FALSE)
} |
9980f3366b6672036329bfd2217179db38e29b6f | 5e17745a3ef7aec4e3a2cf72bea6421d38074e43 | /man/fit2dt.Rd | 9e05fe09d609b056d4b3c862b0785f4de91ffc8a | [] | no_license | SRenan/PSUmisc | a81be8ba6c8c1568955f276499eae678220f987b | a892f134c4c2f0b43dfa70d4097165053f27381f | refs/heads/master | 2021-06-03T14:08:11.104044 | 2021-04-15T14:32:53 | 2021-04-15T14:32:53 | 136,512,680 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 353 | rd | fit2dt.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/stats.R
\name{fit2dt}
\alias{fit2dt}
\title{Transform lm/glm output to data.table}
\usage{
fit2dt(fit)
}
\arguments{
\item{pos}{A \code{lm} object. The output of \code{lm} or \code{glm}}
}
\description{
For a list of positions, find the closest match in a given reference.
}
|
8cfb6aae08f10e9496b4c64c65746a2eb9d5dc2e | 4a83ad27beef869488fe4234399c2c9aa995d79b | /man/extracted.Rd | 23fb0c63de65a83949fb7a121936fa52a2d67732 | [] | no_license | NateByers/transformR | 4076c3ed8128071b2c1ff82d84947c8a5ec8d70f | 0ad03d33eb7da13d6bfde5903eaa9cbbc1d567bc | refs/heads/master | 2020-06-08T21:42:50.488584 | 2015-06-11T19:40:47 | 2015-06-11T19:40:47 | 37,281,714 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 419 | rd | extracted.Rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/transform.R
\name{extracted}
\alias{extracted}
\title{Constructor function for \code{extracted} class}
\usage{
extracted(data)
}
\arguments{
\item{data}{A \code{data.frame} extracted from a database}
}
\value{
A \code{data.frame} with primary class \code{extracted}
}
\description{
Constructor function for \code{extracted} class
}
|
e9adf83f4488b1de8815e6814c6ef4a456d4fdd0 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/soobench/examples/branin_function.Rd.R | 9f5187a465ecc0069f2fa416509fcc601f88ad78 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 190 | r | branin_function.Rd.R | library(soobench)
### Name: branin_function
### Title: Generatore for the Branin test function.
### Aliases: branin_function
### ** Examples
f <- branin_function()
plot(f, rank=TRUE)
|
71200bffacde31791c936edd46cf9f7350d52bc3 | 83abd69780507ed850f1b35df6781b34aec059dd | /sources/framework/visioneval/man/initDatastoreGeography.Rd | 3b2c21d4a69e13ebb5903a08463daa8340b89cff | [
"Apache-2.0"
] | permissive | goreaditya/OregonDOT-VisionEval | b6677fccf279d9d8cb8ba42d5a1c0eca3bcb7722 | bf036adf6e9cca2aae813b4aa16a752b16be2e3b | refs/heads/master | 2020-09-04T16:34:14.378987 | 2019-10-29T19:21:08 | 2019-10-29T19:21:08 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,076 | rd | initDatastoreGeography.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/initialization.R
\name{initDatastoreGeography}
\alias{initDatastoreGeography}
\title{Initialize datastore geography.}
\usage{
initDatastoreGeography()
}
\value{
The function returns TRUE if the geographic tables and datasets are
sucessfully written to the datastore.
}
\description{
\code{initDatastoreGeography} a visioneval framework control function that
initializes tables and writes datasets to the datastore which describe
geographic relationships of the model.
}
\details{
This function writes tables to the datastore for each of the geographic
levels. These tables are then used during a model run to store values that
are either specified in scenario inputs or that are calculated during a model
run. The function populates the tables with cross-references between
geographic levels. The function reads the model geography (Geo_df) from the
model state file. Upon successful completion, the function calls the
listDatastore function to update the datastore listing in the global list.
}
|
af81e4e20a865955fae77bfbc71dfe8bee3e2021 | 1f84c6855a1e0f547b97f67956a0c848893afe23 | /runr/200512/r/1589319085627-runr-3513979055283432-script_Aula1_Introdu__o_ao_R.r | 33041d180eb7df241ac1bfc013559126e5f84714 | [] | no_license | variskindo/files | 90508bebf67a75594a25a293be7f3260fef25adc | c6109f0ab6586ef9b3120aaa12d0aeed09a4d137 | refs/heads/master | 2023-08-24T14:52:35.023447 | 2023-08-23T13:26:19 | 2023-08-23T13:26:19 | 189,692,180 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 17,863 | r | 1589319085627-runr-3513979055283432-script_Aula1_Introdu__o_ao_R.r | ######################################################################
############### Aula 1 - Introdução ao software R ####################
######################################################################
# Instalar o interprete do R
# https://www.r-project.org/
# Instalar o IDE, RStudio
# https://rstudio.com/products/rstudio/
# Exploração e ajustes da interface
# Preparação do diretório de trabalho
getwd() # diretório atual
setwd("D:/AULAS/20200501_GLM/Aula_1_Software_R/") # seleciona o novo diretório
# Calculadora ########################################################
10 + 3
10 - 3
10 / 3
10 * 3
10 ^ 3
10 ** 3
(10 + 3) * 2
# Linguagem orientada por objetos ####################################
# Veja o uso do operador "<-"
numeroA <- 10
numeroB <- 3
numeroC <- 2
# Operações podem ser realidas ao 'chamar' objetos pelos seus nomes
numeroA + numeroB
numeroA - numeroB
numeroA / numeroB
numeroA * numeroB
numeroA ^ numeroB
numeroA ** numeroB
(numeroA + numeroB) * numeroC
# Obs.: A linguagem é 'case sensitive'
dfa <- 2.1
dfA <- 1.2
# Funções ############################################################
# São comandos aplicados a objetos, que são informados como argumentos
# entre '()'
sum(1, 1)
sqrt(9)
# Tipos de objetos ###################################################
# Depósito de diversos tipos de conteúdo em novos objetos
objetoA <- 35 # numeric
objetoB <- "String de caracteres" # character
objetoC <- 35L # integer
objetoD <- TRUE # logical (boolean)
# Checando o tipo
class(objetoA) # em alguns contextos pode ser chamado de 'double'
class(objetoB)
class(objetoC)
class(objetoD)
# Checando a estrutura
str(objetoA)
str(objetoB)
str(objetoC)
str(objetoD)
# Vetores ############################################################
c(1, 2, 3, 4) # concantenação de um cojunto específico de valores
c(1:4) # concatenação de um intervalo
1:4 # intervalo dispensa a função de concatenação
str(1:4) # diagnostico: estrutura
length(1:4) # diagnostico: comprimento
plot(c(1:10)) # diagnostico: visual
# exemplo: variação fictícia de temperatura diária durante um ano
{plot(rnorm(365, mean = 20, sd = 0.2) +
c(seq(1.5, -1, length.out = 160), seq(-1, 1.5, length.out = 205)),
type = "l", xlab = "Dia", ylab = "Temperatura (ºC)")
}
# um vetor pode ser construido de formas diferentes.
# Essa forma...
c(c(1:3), c(3:1))
# ...produz o mesmo vetor que essa:
vetorA <- 1:3
vetorB <- 3:1
vetorC <- c(vetorA, vetorB)
vetorC
# mas retém nomes de objetos, úteis para uso posterior,
# Se estes objetos extras não são necessários
ls() # listando os objetos no ambiente do R
rm(vetorA, vetorB) # removendo alguns deles
ls() # veja como não estão mais disponíveis no ambiente
# É importante compreender como o comportamento de c() muda segundo a
# natureza do conteúdo.
# Vetor numérico
str(c(1, 2, 3, 4))
# Vetor de caracteres com um valor não-disponível ('NA')
str(c("um", "dois", "tres", NA))
# Vetor de valores lógicos ou booleanos
str(c(TRUE, FALSE, F, T, NA))
# Se um dos valores não é numérico, o restante é lido como caractere
str(c(1, "2", 3, NA))
# Vetor de valores inteiros
str(c(1L, 2L, 3L, NA))
# Se não são todos inteiros, são lidos como numéricos
str(c(1L, 2L, 3, NA))
# Quando são combinados vários, character é o default por preservar
# melhor o conteúdo
str(c(1L, 2.1, "tres", NA, TRUE))
# Outros valores especiais
1/0 # Infinito
0/0 # Idefinido
# Outras formas de criar vetores
# Repetindo o valor uma determinada quantidade de vezes
?rep
rep(x = "TRUE", times = 4)
rep(FALSE, 4)
rep(1:3, 2)
rep(1:3, each = 2)
# ... por sequencias em intervalos definidos
seq(from = 0, to = 4, by = 0.5)
# ... por números gerados aleatoriamente (com distribução normal)
hist(rnorm(n = 100, mean = 2, sd = 0.5))
# As classes de vetores podem ser modificadas
# valores numéricos como caracteres
str(as.character(c(1, 2, 3)))
# valores numéricos como valores inteiros
str(as.integer(c(1, 2, 3)))
# caracteres como valores numéricos
str(as.numeric(c("1B", "2", "3")))
fatores <- as.factor(c('A', 'B', 'C', 'A', 'B'))
fatores
length(fatores)
levels(fatores) # output é um vetor de strings
str(fatores)
as.character(fatores) # pode ser convertido em um vetor de strings
as.numeric(fatores) # ou em um vetor numerico
as.integer(fatores) # ou em um vetor de inteiros
# Mudando os nomes dos níveis e repetindo os diagnósticos
levels(fatores) <- c("um", "dois", "tres")
fatores
length(fatores)
levels(fatores)
str(fatores)
as.character(fatores)
as.numeric(fatores)
as.integer(fatores)
# Operações com vetores ##############################################
# Veja como a soma direta de vetores difere da função 'sum()'
vetor.x <- c(1, 1, 1, 1)
vetor.y <- c(1, 2, 3)
vetor.z <- c(10, 20)
vetor.i <- c(1, 1, 1, NA)
length(vetor.x)
length(vetor.y)
vetor.x + vetor.x #
vetor.x + vetor.y # comprimento parcialmente compativel gera alerta
vetor.x + vetor.z # compativel, mas menor
vetor.x + vetor.i # compativel, mas com NA
sum(vetor.x, vetor.i) # NA gera comportamento indesejado
sum(vetor.x, vetor.i, na.rm = TRUE) # NA removido
# Pipelines ##########################################################
# Processos mais complexos como
# 'rnorm()' gera numeros aleatorios de distribuição gaussiana,
# 'round()' ajusta a quantidade de casas depois da virgula e
# 'sort()' coloca os valores em ordem decrescente
# Podem ser computados eusando uma sequencia de objetos no R base
A <- rnorm(n = 8)
A
B <- round(A, digits = 3)
B
C <- sort(B, decreasing = TRUE)
C
# Na forma de uma pipeline em R-base
sort(c(100, round(A, digits = 3)), decreasing = T)
# Na forma de uma pipeline no estilo do pacote 'dplyr'
library(dplyr) # carregando um pacote externo
# Baixar o pacote, caso não esteja instalado
install.packages("dplyr") # executar e aguardar a instalação
library(dplyr) # segunda tentativa de carregar
A %>% round(3) %>% c(100, .) %>% sort(decreasing = T)
# Condições e Indexação ##############################################
# Condições dão valores lógicos como output
5 > 2 # 'maior que'
2 > 5 # 'maior que'
c(1, 3, 6) > 2 # 'maior que' em vetor
2 >= 2 # 'maior ou igual a'
2 >= 1 # 'maior ou igual a'
2 >= 5 # 'maior ou igual a'
2 <= 5 # 'menor ou igual a'
2 == 2 # 'igual a'
2 == 3 # 'igual a'
2 != 3 # 'diferente de'
2 != 2 # 'diferente de'
2 == 2 & 2 <= 3 # 'igual a' e 'menor ou igual a'
2 == 2 & 2 <= 1 # 'igual a' e 'menor ou igual a'
2 == 2 | 2 <= 1 # 'igual a' ou 'menor ou igual a'
3 != 4 | 3 >= 4 # 'diferente de' ou 'maior ou igual a'
3 != 3 | 3 >= 4 # 'diferente de' ou 'maior ou igual a'
3 %in% c(1, 2, 3, 4) # 'está contido em'
c(1, 3, 6) %in% c(1, 2, 3, 4) # Está contido em
x <- c(1, 2, 3, 4, NA)
x <- c(1, 2, 3, 4)
if (anyNA(x)) {
print("Tem NA")
} else {
print("Não tem NA")
}
# Indexação em vetores ###############################################
# Criando o cetor usado nos exmeplos a seguir (sequência de 1 a 20,
# repetida 3x, com a concatenação de um NA no final)
x <- c(rep(seq(0, 20, 1), 3), NA)
# Indexação para obter subconjuntos ou subsets do vetor original
x[4] # 'somente o 4'
x[-4] # 'todos menos o 4'
x[2:4] # 'do 2 ao 4'
x[-(2:4)] # 'menos a seq de 2 a 4'
x[c(1, 5)] # 'somente 1 e 5'
x[x == 12] # 'igual a 12'
x[x != 12] # 'todos diferentes de 12'
x[x < 10] # 'todos maiores que 10'
x[x <= 10] # 'todos maiores ou iguais a 10'
x[x > 10 & x == 3]# 'maiores que 10' e 'iguais a 3'
x[x < 10 | x == 3]# 'menores que 10' ou 'iguais a 3'
x[x %in% c(2, 12)] # todos os contidos no vetor
x[is.na(x)] # valores NA
# Indexação para localizar as posições dos elementos que satisfazem as
# condições
which(x == 12) # iguais a 12
which(x < 10) # menores que 10
which(x <= 10) # menores ou iguais a 10
which(x > 10 & x == 3)# maiores que 10 ou iguais a 3
which(x < 10 | x != 3)# menores que 10 ou diferentes de 3
which(x %in% c(2, 12)) # contidos no vetor
# Valor máximo no vetor
max(x) # note que o NA produz comportamento indesejado
max(x, na.rm = T) # mas pode ser ignorado
# Qual é a posição, ou índice, do valor máximo
which.max(x) # se existem multiplos, retorna somente do primeiro
# Para as posições de todos o que satisfazem essa condição
which(x == max(x, na.rm = T))
# Existem valores não-disponíveis no vetor?
anyNA(c(1, 2, NA, 4)) # o resultado é um valor lógico
# Quais valores satisfazem essa condição?
is.na(c(1, 2, NA, 4)) # o resultado é um vetor de valores lógicos
# Qual é a posição dos que satisfaem essa condição no vetor?
which(is.na(c(1, 2, NA, 4))) # o resultado é um índice
which(!is.na(c(1, 2, NA, 4))) # o resultado é um índice
# Matrizes ###########################################################
# Matrizes são vetores organizados em linhas e colunas, quando
# Primeiro, criamos um vetor
vetor.a <- c(1, 2, 3, 4, 5, 6, 7, 8, 9)
# Convertendo ele para o formato de uma matriz de 3 x 3
m <- matrix(vetor.a, nrow = 3, ncol = 3)
m # preenchimento por coluna
m <- matrix(vetor.a, nrow = 3, ncol = 3, byrow = T)
m # preenchimento por linha
# Alguns diagnósticos úteis
str(m) # checar a estrutura
dim(m) # checar as dimensões
length(m) # checar o comprimento do vetor implícito
attributes(vetor.a)
attributes(m)
# Exemplo: Matriz topografica com valores de altitude de um vulcão
m %>% glimpse() %>% image(col = viridis::viridis(100))
volcano %>% glimpse() %>% image(col = viridis::magma(100))
# Idexação e funções úteis
m[3, ] # somente a linha 3
m[, 3] # somente a coluna 3
m[3, 3] # somente o valor na linha 3 e na coluna 3
m[3] # sem coordenadas, o R le a matriz como vetor
m / 10 # operações matemáticas simples são recursivas
m + m # operações matriciais
m + c(0.1, 0.1, 0.1) # operações matriciais
m %*% m # multiplicação pela matriz transposta
m * t(m) # multiplicação pela matriz transposta
sum(m) # somatório
log(m) # log
# Outra forma de criar uma matriz:
# Criar uma matriz vazia e introduzir o conteudo por indexação
m <- matrix(nrow = 3, ncol = 3)
m[, 1] <- c(1, 2, 3)
m[, 2] <- c(4, 5, 6)
m[, 3] <- c(7, 8, 9)
# Reunir matrizes pre-existentes
cbind(m, m) # 'colando' em novas colunas
rbind(m, m) # 'colando' em nobas linhas
#
m <- matrix(vetor.a, nrow = 3, ncol = 3)
str(m)
x <- c(1, 2, 3, 4)
y <- c(4, 3, 2, 1)
m <- cbind(x, y)
str(m)
rownames(m) <- c("a", "b", "c", "d")
colnames(m) <- c("coluna 1", "coluna 2")
str(m)
m
attributes(m)
m
vetor.nomeado <- m[, "coluna 2"]
str(vetor.nomeado)
attributes(vetor.nomeado)
# Data frames ########################################################
# Compondo matriz para o exemplo
x <- c(1, 2, 3, 4)
y <- c(4, 3, 2, 1)
m <- cbind(x, y)
rownames(m) <- c("a", "b", "c", "d")
# Convertendo o tipo de objeto
dfA <- data.frame(m)
dfA <- as.data.frame(m)
m
dfA
# As diferenças são mais óbvias na estrutura dos objetos
str(m)
str(dfA)
# A indexação pode ser feita da mesma forma que em matrizes
dfA["a", "y"]
dfA[1, 2]
dfA[1, ]
# Com algumas praticidades extras, como a indexação por nome
dfA$x
dfA$y
# ... como a capacidade de reunir vetores de diferentes classes
dfA$z <- as.factor(c(2, 2, 1, 2))
dfA$log.de.X <- log(dfA$x)
dfA$log.de.X <- NULL
dfA$i <- as.character(c("um", "dois", "tres", "quatro"))
str(dfA)
dfA$especie <- c("Columba livia", "Passer domesticus",
"Columba livia", "Passer domesticus")
dfA$morreu <- as.factor(c("morreu", "morreu","não morreu","morreu"))
# ... o input manual dos dados é mais simples
dfB <- data.frame(
amostra = as.character(c("A", "B","C", "D", "E", "F")), # Importante para GLM
massa = c(152, 171.5, 165, 152, NA, 165),
morreu = as.factor(c("sim", "não", "sim", "sim", "não", "sim")),
pH = c(6.1, 7.3, 6, 6.1, 7.3, 6),
tratamento = c(NA, "Pesticida", "Controle", "Pesticida",
"Controle", "Pesticida"),
stringsAsFactors = FALSE)
dfB$tratamento <- as.factor(dfB$tratamento)
dfB
rownames(dfB) <- dfB$amostra
dfB
str(dfB)
nrow(dfB)
dim(dfB)
rownames(dfB)
colnames(dfB)
summary(dfB)
length(dfB)
ncol(dfB)
nrow(dfB)
dfB %>% as.matrix %>% length
# Exemplo de indexação orientada por uma condição
dfB[dfB$massa > 165, ]
dfB[dfB$massa > 165, 1:2]
dfB[dfB$massa > 165, c(1, 4)]
dfB[dfB$massa > 165, c("pH", "tratamento")]
# Existem valores não-disponíveis no vetor?
anyNA(dfB) # o resultado é um valor lógico
# Quais valores satisfazem essa condição?
is.na(dfB) # o resultado é um vetor de valores lógicos
# Qual é a posição dos que satisfaem essa condição no vetor?
which(is.na(dfB), arr.ind = TRUE) # o resultado é um índice
# Importando dados ###################################################
"D:\AULAS\20200501_GLM\Aula_1_Software_R"
getwd()
list.files()
# setwd("/media/grosa/SSDext_GRos/AULAS/20200501_GLM/Aula_1_Software_R/")
setwd("D:/AULAS/20200501_GLM/Aula_1_Software_R/")
getwd()
list.files()
df.irisA <- read.csv(file = "iris.data.csv",
row.names = 1) %>% glimpse()
df.irisB <- read.csv(file = "iris.dataV2.csv", header = F) %>%
glimpse()
df.irisC <- read.csv(file = "iris.dataV3.csv", sep = ";",
row.names = 1) %>% glimpse()
head(df.irisA, 10)
tail(df.irisA, 10)
head(df.irisB, 10)
head(df.iris, 10)
df.irisB$V6 %>% levels
read.csv(file = "iris.dataV2.csv", header = F,
na.strings = "") %>% glimpse()
df.irisC <- read.csv("iris.dataV2.csv", header = F,
na.strings = "", row.names = 1,
stringsAsFactors = T) %>% glimpse()
str(df.irisC)
head(df.irisC, 10)
anyNA(df.irisC)
length(which(is.na(df.irisC)))
which(is.na(df.irisC), arr.ind = T)
df.irisC[-which(is.na(df.irisC), arr.ind = T)[, 1], ] %>% str
na.omit(df.irisC) %>% glimpse()
# Salvando o ambiente ################################################
save.image("")
load("")
# Exportando conjuntos de dados ######################################
df.irisC %>% str
# Identificar o diretório atual
getwd()
# Procurar o diretório de trabalho desejado
# setwd('')
# Lista arquivos
list.files(include.dirs = T)
# Criar um diretório chamado 'backup'
dir.create("backup")
# Verificar se ele consta no diretório de trabalho
list.files(include.dirs = T)
# Escrever um arquivo .csv com o conteúdo do objeto 'dfC'
write.csv2(x = df.irisC, file = "backup/df_backup2.csv")
# Ou um arquivo de texto
write.table(df.irisC, "backup/df_backup.txt", sep = ";")
saveRDS(df.irisC, "backup/dfC.objeto.bruto.rds")
rm(df.irisC)
ls()
df.irisC <- readRDS("backup/dfC.objeto.bruto.rds")
ls()
# Listas #############################################################
# Criando objetos dos próximos exemplos
x <- c(1, 2, 3, 4)
y <- c(4, 3, 2, 1)
z <- rnorm(6)
a <- c("um", "dois", "tres")
df <- dfB
# Criando listas:
list(NULL) # Lista vazia
list(x)
list(x, y)
list(x, y, z, a, df, sum)
# A indexação é um pouco diferente do que em matrizes e dataframes
# Veja a partir deste exemplo:
lista1 <- list(x, y, z, a, df, sum)
# Elementos podem ser acessado como em um vetor
lista1[3]
lista1[3, ]
lista1[[3]]
# A indexação pode ser feita de forma recursiva no interior da lista
lista1[[3]][1]
lista1[[3]][2:3]
lista1[[5]]$altura
lista1[[5]][1, 3]
lista1[[5]]$peso[1]
# Veja o elemento da lista e o vetor a seguir
lista1[1]
c(2, 4, 6, 8)
# Tentativa de substituir o conteúdo pelo do vetor
lista1[1] <- c(2, 4, 6, 8) # não dá certo
lista1[1] # e destroi o elemento da lista
# Restaurando a lista ao estado original
lista1 <- list(x, y, z, a, df, sum)
lista1[[1]] # Assim a indexação considera o elemento como vetor
lista1[[1]] <- c(2, 4, 6, 8) # substitui corretamente e
lista1[[1]] # voilà
# Listas também suportam nomeação dos elementos
lista1 <- list(x, y, z, a, df, sum)
lista1
str(lista1) # Lista sem nomes
names(lista1) <- c(
"vetor.num1", "vetor.num2", "vetor.rand1",
"vetor.char", "conjunto de dados", "soma")
str(lista1) # lista com nomes
# O que abre novas possibilidades de indexação
lista1[1:2]
lista1$vetor.rand1
lista1$vetor.rand1[2:3]
lista1$`conjunto de dados`$idade
# É possível desfazer a estrutura de uma lista
unlist(lista1[1:2])
do.call(cbind, lista1[1:2])
do.call(rbind, lista1[1:2])
lista2 <- lista1
lista2$inception <- lista1
str(lista2)
# lista2[3]$
lista2[[7]][[1]]
lista2[[7]]$vetor.num1
lista2$inception$`conjunto de dados`$idade
length(lista2)
|
88b1db9f38441eb7b5faf4f9d6078d8b0cb537f2 | 784a27735568199bf6d067f2b3650484a50d181c | /Code - Visualization.R | 33d59a497e75c68fa974d676dc16224a23064cec | [] | no_license | kshitijsankesara/Visualizing-USA-Crime-Rates | c3a612ddbd37a46ca23d62563938b3b7945f3ca4 | 4f7c0e484dfa7589ccc91ce757512115f691a7d7 | refs/heads/master | 2022-10-12T14:09:26.917732 | 2020-06-12T16:17:46 | 2020-06-12T16:17:46 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,330 | r | Code - Visualization.R | #Step A: Load and Merge datasets
#1) Read in the census dataset
#Using the function created in HW3
cleanstate <- function()
{
Statesnew <- read.csv("states.csv")
Statesnew <- Statesnew[-1,-1]
Statesnew <- Statesnew[-52,]
Statesnew <- Statesnew[,-1:-3]
colnames(Statesnew) <- c("stateName", "population", "popOver18", "percentOver18")
return(Statesnew)
}
cleanstate()
StatePop <- cleanstate()
View(StatePop)
#2) Copy the USArrests dataset into a local variable
Arrests <- USArrests
Arrests$stateName <- row.names(Arrests)
View(Arrests)
#3) Create a merged dataframe -- with the attributes from both dataframes
Merged <- merge(StatePop, Arrests, By = "stateName") #Merging two datasets by stateName
View(Merged)
#Step B: Explore the Data ??? Understanding distributions
#Installing Package ggplot2
install.packages("ggplot2")
library("ggplot2")
#4) Create a histogram using GGPLOT for the population and a different histogram for the murder rate
#Histogram for population
histpop <- ggplot(Merged, aes(x = population)) #Plotting histogram using ggplot package
histpop <- histpop + geom_histogram(color = "white", fill = "darkgrey", bins = 50) #Dividing x axis into 50 bins and displaying counts with individual bars
histpop <- histpop + ggtitle("Histogram of Population") #Assigning title to the histogram
histpop #To view the histogram
#Histogram for murder rate
histmurder <- ggplot(Merged, aes(x = Murder))
histmurder <- histmurder + geom_histogram(color = "white", fill = "darkgrey", bins = 50)
histmurder <- histmurder + ggtitle("Histogram of Murder Rate")
histmurder
#Histogram for Assault
histassault <- ggplot(Merged, aes(x = Assault))
histassault <- histassault + geom_histogram(color = "white", fill = "darkgrey", bins = 50)
histassault <- histassault + ggtitle("Histogram of Assault")
histassault
#Histogram for UrbanPop
histurbanpop <- ggplot(Merged, aes(x = UrbanPop))
histurbanpop <- histurbanpop + geom_histogram(color = "white", fill = "darkgrey", bins = 50)
histurbanpop <- histurbanpop + ggtitle("Histogram of UrbanPop")
histurbanpop
#Histogram for Rape
histrape <- ggplot(Merged, aes(x = Rape))
histrape <- histrape + geom_histogram(color = "white", fill = "darkgrey", bins = 50)
histrape <- histrape + ggtitle("Histogram of Rape")
histrape
#I have kept bins equal to 50 so that my histogram looks proper
#5) Create a boxplot for the population, and a different boxplot for the murder rate
#Boxplot for population
boxpop <- ggplot(Merged, aes(x=factor(0), y=population))
boxpop <- boxpop + geom_boxplot(color = "black", fill = "white")
boxpop <- boxpop + ggtitle("Boxplot of Population")
boxpop
#Boxplot for Murder Rate
boxmurder <- ggplot(Merged, aes(x=factor(0), y=Murder))
boxmurder <- boxmurder + geom_boxplot(color = "black", fill = "white")
boxmurder <- boxmurder + ggtitle("Boxplot of Murder Rate")
boxmurder
#6) Create a block comment explaining which visualization (boxplot or histogram) you thought was more helpful (explain why)
#Histogram is a better option for visualization, as its more spread out.
#Its easier to understand than boxplot.
#Step C: Which State had the Most Murders ??? bar charts
#7) Calculate the number of murders per state
#Creating a new column murderstate
Merged$murderstate <- Merged$population * Merged$Murder/100
View(Merged)
#8) Generate a bar chart, with the number of murders per state
#Bar Chart of murder per state
barmurderstate <- ggplot(Merged, aes(x=stateName, y=murderstate))
barmurderstate <- barmurderstate + geom_col()
barmurderstate <- barmurderstate + ggtitle("Bar Chart of Murder per State")
barmurderstate
#9) Generate a bar chart, with the number of murders per state. Rotate text (on the X axis), so we can see x labels
#Rotating state names
barmurderstate2 <- barmurderstate + theme(axis.text.x = element_text(angle = 90, hjust = 1))
barmurderstate2
#10) Generate a new bar chart, the same as in the previous step, but also sort the x-axis by the murder rate
#Reordering states by their murder rate
barmurderstate3 <- ggplot(Merged, aes(x=reorder(stateName, Murder), y=murderstate))
barmurderstate3 <- barmurderstate3 + geom_col()
barmurderstate3 <- barmurderstate3 + theme(axis.text.x = element_text(angle = 90, hjust = 1))
barmurderstate3
#11) Generate a third bar chart, the same as the previous step, but also showing percentOver18 as the color of the bar
barmurderstate4 <- ggplot(Merged, aes(x=reorder(stateName, Murder), y=murderstate, fill=percentOver18))
barmurderstate4 <- barmurderstate4 + geom_col()
barmurderstate4 <- barmurderstate4 + theme(axis.text.x = element_text(angle = 90, hjust = 1))
barmurderstate4 <- barmurderstate4 + ggtitle("Bar Chart of Murder per State and Population per State")
barmurderstate4
#Step D: Explore Murders ??? scatter chart
#12) Generate a scatter plot ??? have population on the X axis, the percent over 18 on the y axis, and the size & color represent the Murder rate
scatter <- ggplot(Merged, aes(x = population, y = percentOver18))
scatter <- scatter + geom_point(aes(size = Murder, color = Murder))
scatter |
561f19f86d3a7d2d277709c6e56e7839f4927876 | 439b420e36709c0ed2a001e9a0036a100489e29d | /tests/manual.query.test/query.tests.R | 28b7a25b59523b2c02f8ef847511bdb789a3bd47 | [
"Apache-2.0"
] | permissive | reactome/ReactomeGraph4R | da26ef5c48b76554daec943c2ee34d11b20c0fbf | 8b863e9111802a28034ae4f17da7b1a2a67b1924 | refs/heads/master | 2023-05-08T00:41:09.511805 | 2023-04-30T01:21:57 | 2023-04-30T01:21:57 | 279,608,406 | 8 | 2 | null | 2021-02-21T17:50:12 | 2020-07-14T14:29:32 | null | UTF-8 | R | false | false | 1,285 | r | query.tests.R | # manually run the following tests and check the results
library(ReactomeGraph4R)
login() # already built a connection to the reactome neo4j database
# fetch instance by class
url <- matchObject(schemaClass = "URL")
head(url[["databaseObject"]])
# check error
matchObject(schemaClass = "haha")
# fetch instance by name
matchObject(displayName = "RCOR1 [nucleoplasm]",
returnedAttributes=c("stId", "speciesName"))
# fetch instance by id
## Reactome id
matchObject(id = "R-HSA-9626034")
# fetch multiple ids
res <- multiObjects(ids = c("P02741", "Q9GZQ8"),
databaseName="UniProt", speedUp=TRUE)
res[, 1:5]
# get the preceding/following Events
matchPrecedingAndFollowingEvents("R-HSA-983150", depth=2, type="row")
# get hierarchy
matchHierarchy(id="R-HSA-1369062", type="graph")
# get interactors
matchInteractors(996766)
# get reactions associated with a pathway
matchReactionsInPathway("R-HSA-5682285", type="row")
# get referrals
matchReferrals("113454", type="row")
# get PhysicalEntity roles
matchPEroles(pe.id = "R-HSA-8944354", type = "graph")
# get diseases
matchDiseases(displayName="neuropathy", species="M. musculus", type="row")
# fetch Reactome instances using a pubmed id
matchPaperObjects(pubmed.id="20797626", type="graph")
|
6f877a51812c66f9779dc0743d12453f8c33424a | 1e5aaad5bb774e0084745cb06bb4b33ff96c944a | /R/lso.R | 19a07a2982bb61ddf20d865c24dab5ac4a4bb076 | [
"MIT"
] | permissive | Rkabacoff/qacr | cd66c7e1c6b188bdbd2e6cab618c1d77a5ed2a0c | d30d4cd9cbadbfba30f8b7c19b00eff6e9053ab2 | refs/heads/main | 2021-06-25T14:49:23.176096 | 2021-03-11T14:45:39 | 2021-03-11T14:45:39 | 218,363,729 | 0 | 6 | null | 2019-12-02T15:43:19 | 2019-10-29T19:09:40 | R | UTF-8 | R | false | false | 2,412 | r | lso.R | #' @title List object sizes and types
#'
#' @description
#' \code{lso} lists object sizes and types.
#'
#' @details
#' This function list the sizes and types of all objects in an environment.
#' By default, the list describes the objects in the current environment, presented in descending order
#' by object size and reported in megabytes (Mb).
#'
#' @param pos a number specifying the environment as a position in the search list.
#' @param pattern an optional \link{regular expression}. Only names matching pattern are returned. \link{glob2rx} can be used to convert wildcard patterns to regular expressions.
#' @param order.by column to sort the list by. Values are \code{"Type"}, \code{"Size"}, \code{"Rows"}, and \code{"Columns"}.
#' @param decreasing logical. If \code{FALSE}, the list is sorted in ascending order.
#' @param head logical. Should output be limited to \code{n} lines?
#' @param n if \code{head=TRUE}, number of rows should be displayed?
#' @export
#' @importFrom utils object.size
#' @return a data.frame with four columns (Type, Size, Rows, Columns) and object names as row names.
#' @author
#' Based on based on postings by Petr Pikal and David Hinds to the r-help list in 2004 and
#' modified Dirk Eddelbuettel, Patrick McCann, and Rob Kabacoff.
#' @references \url{http://stackoverflow.com/questions/1189759/expert-r-users-whats-in-your-rprofile}.
#' @examples
#' data(cardata)
#' data(cars74)
#' lso()
lso <- function (pos = 1, pattern, order.by = "Size",
decreasing=TRUE, head=TRUE, n = 10) {
napply <- function(names, fn) sapply(names, function(x)
fn(get(x, pos = pos)))
names <- ls(pos = pos, pattern = pattern)
if (length(names) > 0) {
obj.class <- napply(names, function(x) as.character(class(x))[1])
obj.mode <- napply(names, mode)
obj.type <- ifelse(is.na(obj.class), obj.mode, obj.class)
obj.size <- napply(names, object.size) / 10^6 # megabytes
obj.dim <- t(napply(names, function(x)
as.numeric(dim(x))[1:2]))
vec <- is.na(obj.dim)[, 1] & (obj.type != "function")
obj.dim[vec, 1] <- napply(names, length)[vec]
out <- data.frame(obj.type, obj.size, obj.dim)
names(out) <- c("Type", "Size", "Rows", "Columns")
out <- out[order(out[[order.by]], decreasing=decreasing), ]
names(out)[2] <- "Size_Mb"
if (head)
out <- head(out, n)
out
} else {
cat("No objects found.\n")
}
}
|
ccacd8f45756b9a5f6aa088ac06d26a427790ff0 | 871f932fccc7d5b6726964eadc30477782f36c0e | /man/align_englishCorpus.Rd | b6c487908805a2a7abdbb9dbafbe42b052d0703e | [
"MIT"
] | permissive | hrvg/wateReview | ed76a4b410799d90483b2f7dd2cfeb6c96733b51 | 1f77a33ed99e3f1be16d15189b81da92efd5495c | refs/heads/master | 2023-03-04T04:53:04.200965 | 2020-08-14T19:18:11 | 2020-08-14T19:18:11 | 134,775,109 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 709 | rd | align_englishCorpus.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/webscrapping.R
\name{align_englishCorpus}
\alias{align_englishCorpus}
\title{Align englishCorpus with matching records in both databases and assign webscrapped abstracts}
\usage{
align_englishCorpus(englishCorpus, EndNoteIdLDA, EndNoteIdcorpus, in_corpus)
}
\arguments{
\item{englishCorpus}{corpus of document}
\item{EndNoteIdLDA}{document id in the corpus from the query}
\item{EndNoteIdcorpus}{document id in the topic model}
}
\value{
englishCorpus with matching records in both databases and webscrapped abstracts
}
\description{
Align englishCorpus with matching records in both databases and assign webscrapped abstracts
}
|
e576cbae71f4ab1282d28ec1fd8bad8056b2b2cb | 11c5077aace42fa9c3b8bb6a63bc7b60ee04c8ef | /man/BaetenEtAl2013.Rd | 81591a3b4db3e4cfe8f6e7fab80b0d093b9c6dce | [] | no_license | cran/bayesmeta | 10318094d5a25ebd01393a7a5e0dcfd5171ed61e | ec196f04626460d2947e829f9f0507cd68d1f7a7 | refs/heads/master | 2023-07-07T22:28:01.653515 | 2023-06-27T08:40:02 | 2023-06-27T08:40:02 | 48,076,965 | 3 | 1 | null | null | null | null | UTF-8 | R | false | false | 3,490 | rd | BaetenEtAl2013.Rd | \name{BaetenEtAl2013}
\docType{data}
\alias{BaetenEtAl2013}
\title{Ankylosing spondylitis example data}
\description{Numbers of cases (patients) and events (responders)
in the placebo control groups of eight studies.}
\usage{data("BaetenEtAl2013")}
\format{The data frame contains the following columns:
\tabular{lll}{
\bold{study} \tab \code{character} \tab study label \cr
\bold{year} \tab \code{numeric} \tab publication year \cr
\bold{events} \tab \code{numeric} \tab number of responders \cr
\bold{total} \tab \code{numeric} \tab total number of patients
}
}
\details{A study was conducted in order to investigate a novel treatment
in ankylosing spondylitis (Baeten et al., 2013). The primary endpoint
related to \emph{treatment response}.
In order to formulate an informative prior distribution for the
response rate to be expected in the control group of the new study, a
systematic review of previous studies was consulted (McLeod et al.,
2007), and, after a meta-analysis of the estimated response
probabilities, the predictive distribution for the new study's
response probability was derived. The predictive distribution here
constitutes the \emph{meta-analytic-predictive (MAP) prior}
distribution (Schmidli et al., 2014). The data set contains the
relevant data from the eight \dQuote{historical} studies' placebo
groups.
Note that the original analysis (Baeten et al., 2013) involved a
binomial model, and the resulting posterior predictive distribution
was eventually approximated by a mixture of beta distributions.
}
\source{
D. Baeten et al.
Anti-interleukin-17A monoclonal antibody secukinumab in treatment of
ankylosing spondylitis: a randomised, double-blind, placebo-controlled
trial.
\emph{The Lancet}, \bold{382}(9906):1705-1713, 2013.
\doi{10.1016/S0140-6736(13)61134-4}.
}
\seealso{
\code{\link{uisd}}, \code{\link{ess}}.
}
\references{
C. McLeod et al.
Adalimumab, etanercept, and infliximab for the treatment of ankylosing
spondylitis: a systematic review and economic evaluation.
\emph{Health Technology Assessment}, \bold{11}(28), 2007.
\doi{10.3310/hta11280}.
H. Schmidli, S. Gsteiger, S. Roychoudhury, A. O'Hagan,
D. Spiegelhalter, B. Neuenschwander.
Robust meta-analytic-predictive priors in clinical trials with
historical control information.
\emph{Biometrics}, \bold{70}(4):1023-1032, 2014.
\doi{10.1111/biom.12242}.
}
\examples{
# load data:
data("BaetenEtAl2013")
# show data:
BaetenEtAl2013
\dontrun{
# compute effect sizes (logarithmic odds) from the count data:
as <- escalc(xi=events, ni=total, slab=study,
measure="PLO", data=BaetenEtAl2013)
# compute the unit information standard deviation (UISD):
uisd(as)
# perform meta-analysis
# (using uniform priors for effect and heterogeneity):
bm <- bayesmeta(as)
# show results (log-odds):
forestplot(bm, xlab="log-odds", zero=NA)
# show results (odds):
forestplot(bm, exponentiate=TRUE, xlab="odds", zero=NA)
# show posterior predictive distribution --
# in terms of log-odds:
bm$summary[,"theta"]
logodds <- bm$summary[c(2,5,6), "theta"]
logodds
# in terms of odds:
exp(logodds)
# in terms of probabilities:
(exp(logodds) / (exp(logodds) + 1))
# illustrate MAP prior density:
x <- seq(-3, 1, by=0.01)
plot(x, bm$dposterior(theta=x, predict=TRUE), type="l",
xlab="log-odds (response)", ylab="posterior predictive density")
abline(h=0, v=0, col="grey")
}
}
\keyword{datasets}
|
7321b0b66e793a6558c770314b013614ef4cf7ac | 48aea1547fb612b127d5b5def716d48398236159 | /analysis/CIMseqFPtesting/testthat/test_All-classes.R | 26e9c704536d643dd30af80bb1eb82878a2dbcf0 | [] | no_license | jasonserviss/CIMseq.testing | 6a1951a5d1cd53a22704df631138050bc4e057c6 | 7039f9b52fb9280bb811662aa19d4fe7f7bf8398 | refs/heads/master | 2021-03-30T17:46:24.443721 | 2020-01-27T09:55:25 | 2020-01-27T09:55:25 | 76,064,214 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 11,210 | r | test_All-classes.R | context("All-classes")
test_that("check that accessors output the expected result", {
expect_is(getData(CIMseqSinglets_test, "counts"), "matrix")
expect_is(getData(CIMseqSinglets_test, "counts.log"), "matrix")
expect_is(getData(CIMseqSinglets_test, "counts.cpm"), "matrix")
expect_is(getData(CIMseqSinglets_test, "counts.ercc"), "matrix")
expect_is(getData(CIMseqSinglets_test, "dim.red"), "matrix")
expect_is(getData(CIMseqSinglets_test, "classification"), "character")
expect_is(getData(CIMseqMultiplets_test, "counts"), "matrix")
expect_is(getData(CIMseqMultiplets_test, "counts.log"), "matrix")
expect_is(getData(CIMseqMultiplets_test, "counts.cpm"), "matrix")
expect_is(getData(CIMseqMultiplets_test, "counts.ercc"), "matrix")
expect_is(getData(CIMseqMultiplets_test, "features"), "integer")
expect_is(getData(CIMseqSwarm_test, "fractions"), "matrix")
expect_is(getData(CIMseqSwarm_test, "costs"), "numeric")
expect_is(getData(CIMseqSwarm_test, "convergence"), "character")
expect_is(getData(CIMseqSwarm_test, "stats"), "tbl_df")
expect_is(getData(CIMseqSwarm_test, "singletIdx"), "list")
expect_is(getData(CIMseqSwarm_test, "arguments"), "tbl_df")
})
test_that("check that replacement outputs the expected result", {
expect_error(getData(CIMseqSinglets_test, "counts") <- "a")
expect_error(getData(CIMseqSinglets_test, "counts.log") <- "a")
expect_error(getData(CIMseqSinglets_test, "counts.cpm") <- "a")
expect_error(getData(CIMseqSinglets_test, "counts.ercc") <- "a")
expect_error(getData(CIMseqSinglets_test, "dim.red") <- "a")
expect_error(getData(CIMseqSinglets_test, "classification") <- 1)
expect_error(getData(CIMseqMultiplets_test, "counts") <- "a")
expect_error(getData(CIMseqMultiplets_test, "counts.log") <- "a")
expect_error(getData(CIMseqMultiplets_test, "counts.cpm") <- "a")
expect_error(getData(CIMseqMultiplets_test, "counts.ercc") <- "a")
expect_error(getData(CIMseqMultiplets_test, "features") <- "a")
tmp <- CIMseqSinglets_test
getData(tmp, "counts") <- getData(CIMseqSinglets_test, "counts")
expect_is(tmp, "CIMseqSinglets")
tmp <- CIMseqSinglets_test
getData(tmp, "counts.log") <- function(){}
expect_is(tmp, "CIMseqSinglets")
tmp <- CIMseqSinglets_test
getData(tmp, "counts.cpm") <- function(){}
expect_is(tmp, "CIMseqSinglets")
tmp <- CIMseqSinglets_test
getData(tmp, "counts.ercc") <- getData(CIMseqSinglets_test, "counts.ercc")
expect_is(tmp, "CIMseqSinglets")
tmp <- CIMseqSinglets_test
getData(tmp, "dim.red") <- getData(CIMseqSinglets_test, "dim.red")
expect_is(tmp, "CIMseqSinglets")
tmp <- CIMseqSinglets_test
getData(tmp, "classification") <- getData(CIMseqSinglets_test, "classification")
expect_is(tmp, "CIMseqSinglets")
tmp <- CIMseqMultiplets_test
getData(tmp, "counts") <- getData(CIMseqMultiplets_test, "counts")
expect_is(tmp, "CIMseqMultiplets")
tmp <- CIMseqMultiplets_test
getData(tmp, "counts.log") <- function(){}
expect_is(tmp, "CIMseqMultiplets")
tmp <- CIMseqMultiplets_test
getData(tmp, "counts.cpm") <- function(){}
expect_is(tmp, "CIMseqMultiplets")
tmp <- CIMseqMultiplets_test
getData(tmp, "counts.ercc") <- getData(CIMseqMultiplets_test, "counts.ercc")
expect_is(tmp, "CIMseqMultiplets")
tmp <- CIMseqMultiplets_test
getData(tmp, "features") <- getData(CIMseqMultiplets_test, "features")
expect_is(tmp, "CIMseqMultiplets")
})
test_that("check that CIMseqSinglets concatenation outputs the expected result", {
#setup expected data
counts <- cbind(
getData(CIMseqSinglets_test, "counts"),
getData(CIMseqSinglets_test, "counts")
)
counts.ercc <- cbind(
getData(CIMseqSinglets_test, "counts.ercc"),
getData(CIMseqSinglets_test, "counts.ercc")
)
dim.red <- rbind(
getData(CIMseqSinglets_test, "dim.red"),
getData(CIMseqSinglets_test, "dim.red")
)
classification <- c(
getData(CIMseqSinglets_test, "classification"),
getData(CIMseqSinglets_test, "classification")
)
expected <- CIMseqSinglets(counts, counts.ercc, dim.red, classification)
#generate output
output <- c(CIMseqSinglets_test, CIMseqSinglets_test)
#test
expect_identical(getData(expected, "counts"), getData(output, "counts"))
expect_identical(getData(expected, "counts.log"), getData(output, "counts.log"))
expect_identical(getData(expected, "counts.cpm"), getData(output, "counts.cpm"))
expect_identical(getData(expected, "counts.ercc"), getData(output, "counts.ercc"))
expect_identical(getData(expected, "dim.red"), getData(output, "dim.red"))
expect_identical(getData(expected, "classification"), getData(output, "classification"))
#TEST, 2 if genes differ
CIMseqSinglets_test2 <- CIMseqSinglets(
getData(CIMseqSinglets_test, "counts")[1:100, ],
getData(CIMseqSinglets_test, "counts.ercc"),
getData(CIMseqSinglets_test, "dim.red"),
getData(CIMseqSinglets_test, "classification")
)
expect_error(c(CIMseqSinglets_test, CIMseqSinglets_test2))
#TEST 3, ERCC differ
CIMseqSinglets_test3 <- CIMseqSinglets(
getData(CIMseqSinglets_test, "counts"),
getData(CIMseqSinglets_test, "counts.ercc")[1:2, ],
getData(CIMseqSinglets_test, "dim.red"),
getData(CIMseqSinglets_test, "classification")
)
expect_error(c(CIMseqSinglets_test, CIMseqSinglets_test3))
#TEST 4, different dim.red
dr <- getData(CIMseqSinglets_test, "dim.red")
colnames(dr) <- c("dim1", "dim2")
CIMseqSinglets_test4 <- CIMseqSinglets(
getData(CIMseqSinglets_test, "counts"),
getData(CIMseqSinglets_test, "counts.ercc"),
dr,
getData(CIMseqSinglets_test, "classification")
)
expect_error(c(CIMseqSinglets_test, CIMseqSinglets_test4))
})
test_that("check that CIMseqMultiplets concatenation outputs the expected result", {
#setup expected data
counts <- cbind(
getData(CIMseqMultiplets_test, "counts"),
getData(CIMseqMultiplets_test, "counts")
)
counts.ercc <- cbind(
getData(CIMseqMultiplets_test, "counts.ercc"),
getData(CIMseqMultiplets_test, "counts.ercc")
)
features <- getData(CIMseqMultiplets_test, "features")
expected <- CIMseqMultiplets(counts, counts.ercc, features)
#generate output
output <- c(CIMseqMultiplets_test, CIMseqMultiplets_test)
#test
expect_identical(getData(expected, "counts"), getData(output, "counts"))
expect_identical(getData(expected, "counts.log"), getData(output, "counts.log"))
expect_identical(getData(expected, "counts.cpm"), getData(output, "counts.cpm"))
expect_identical(getData(expected, "counts.ercc"), getData(output, "counts.ercc"))
expect_identical(getData(expected, "features"), getData(output, "features"))
#TEST, 2 if genes differ
CIMseqMultiplets_test2 <- CIMseqMultiplets(
getData(CIMseqMultiplets_test, "counts")[1:100, ],
getData(CIMseqMultiplets_test, "counts.ercc"),
getData(CIMseqMultiplets_test, "features")
)
expect_error(c(CIMseqMultiplets_test, CIMseqMultiplets_test2))
#TEST 3, ERCC differ
CIMseqMultiplets_test3 <- CIMseqMultiplets(
getData(CIMseqMultiplets_test, "counts"),
getData(CIMseqMultiplets_test, "counts.ercc")[1:2, ],
getData(CIMseqMultiplets_test, "features")
)
expect_error(c(CIMseqMultiplets_test, CIMseqMultiplets_test3))
#TEST 4, different features
CIMseqMultiplets_test4 <- CIMseqMultiplets(
getData(CIMseqMultiplets_test, "counts"),
getData(CIMseqMultiplets_test, "counts.ercc"),
1:10
)
expect_warning(c(CIMseqMultiplets_test, CIMseqMultiplets_test4))
})
test_that("check that CIMseqSwarm concatenation outputs the expected result", {
#setup expected data
fractions <- rbind(
getData(CIMseqSwarm_test, "fractions"),
getData(CIMseqSwarm_test, "fractions")
)
costs <- c(
getData(CIMseqSwarm_test, "costs"),
getData(CIMseqSwarm_test, "costs")
)
convergence <- c(
getData(CIMseqSwarm_test, "convergence"),
getData(CIMseqSwarm_test, "convergence")
)
stats <- rbind(
getData(CIMseqSwarm_test, "stats"),
getData(CIMseqSwarm_test, "stats")
)
singletIdx <- c(getData(CIMseqSwarm_test, "singletIdx"))
arguments <- rbind(
getData(CIMseqSwarm_test, "arguments"),
getData(CIMseqSwarm_test, "arguments")
)
expected <- new("CIMseqSwarm",
fractions = fractions, costs = costs, convergence = convergence,
stats = stats, singletIdx = singletIdx, arguments = arguments
)
#generate output
output <- c(CIMseqSwarm_test, CIMseqSwarm_test)
#test
expect_identical(getData(expected, "fractions"), getData(output, "fractions"))
expect_identical(getData(expected, "costs"), getData(output, "costs"))
expect_identical(getData(expected, "convergence"), getData(output, "convergence"))
expect_identical(getData(expected, "stats"), getData(output, "stats"))
expect_identical(getData(expected, "singletIdx"), getData(output, "singletIdx"))
expect_identical(getData(expected, "arguments"), getData(output, "arguments"))
#TEST 2, non-matching classes
f <- getData(CIMseqSwarm_test, "fractions")
colnames(f) <- paste0("c", 1:ncol(f))
CIMseqSwarm_test2 <- new(
"CIMseqSwarm",
fractions = f,
costs = getData(CIMseqSwarm_test, "costs"),
convergence = getData(CIMseqSwarm_test, "convergence"),
stats = getData(CIMseqSwarm_test, "stats"),
singletIdx = getData(CIMseqSwarm_test, "singletIdx"),
arguments = getData(CIMseqSwarm_test, "arguments")
)
expect_error(c(CIMseqSwarm_test, CIMseqSwarm_test2))
#TEST 3; non-matching singletIdx
CIMseqSwarm_test3 <- new(
"CIMseqSwarm",
fractions = getData(CIMseqSwarm_test, "fractions"),
costs = getData(CIMseqSwarm_test, "costs"),
convergence = getData(CIMseqSwarm_test, "convergence"),
stats = getData(CIMseqSwarm_test, "stats"),
singletIdx = getData(CIMseqSwarm_test, "singletIdx")[1:200],
arguments = getData(CIMseqSwarm_test, "arguments")
)
expect_warning(c(CIMseqSwarm_test, CIMseqSwarm_test3))
})
test_that("check that CIMseqSwarm subsetting outputs the expected result", {
#setup expected data
s <- c("m.NJB00204.G04", "m.NJB00204.D07")
fractions <- getData(CIMseqSwarm_test, "fractions")[s, ]
costs <- getData(CIMseqSwarm_test, "costs")[s]
convergence <- getData(CIMseqSwarm_test, "convergence")[s]
stats <- filter(getData(CIMseqSwarm_test, "stats"), sample %in% s)
singletIdx <- getData(CIMseqSwarm_test, "singletIdx")
arguments <- getData(CIMseqSwarm_test, "arguments")
expected <- new(
"CIMseqSwarm",
fractions = fractions, costs = costs, convergence = convergence,
stats = stats, singletIdx = singletIdx, arguments = arguments
)
#generate output
output <- filterSwarm(CIMseqSwarm_test, s)
#test
expect_identical(getData(expected, "fractions"), getData(output, "fractions"))
expect_identical(getData(expected, "costs"), getData(output, "costs"))
expect_identical(getData(expected, "convergence"), getData(output, "convergence"))
expect_identical(getData(expected, "stats"), getData(output, "stats"))
expect_identical(getData(expected, "singletIdx"), getData(output, "singletIdx"))
expect_identical(getData(expected, "arguments"), getData(output, "arguments"))
})
|
23e4fa64b2ce3f9d01638619040993e7e05a3946 | 92e04fc53259efde079d353d9ff00ef21443eccb | /R/cel2kel.R | dcc93d384949be1fd88e9004b8d92de7132f0a1e | [] | no_license | arni-magnusson/biometry2 | 5cfd25e4598d17c31f03cd12a0f9a606917c8e91 | 7b5e5b0330446bf48d4dccd2049ce8f317cec846 | refs/heads/master | 2020-04-22T21:12:42.460484 | 2019-03-06T10:47:24 | 2019-03-06T10:47:24 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 456 | r | cel2kel.R | #' Celsius to Kelvin
#'
#' Converts temperatures from Celsius degrees to Kelvin scale.
#'
#' @param x Temperature in Celsius degrees.
#'
#' @return
#' Kelvin scale value, 273.15 added to Celsius degrees.
#'
#' @author
#' Vigdís Freyja Helmutsdóttir, MSc. student at the University of Iceland.
#'
#' @seealso
#' \link{Arithmetic}
#'
#' @examples
#' cel2kel(70)
#' cel2kel(-4)
#'
#' @export
cel2kel <- function(x) {
kel <- (x + 273.15)
kel
}
|
ac79beddbb802df94b0b8ac7468c86d8f674197e | 575923255a038d3739bfb851a9976464538808b2 | /functioning_program.R | 72e95fc0e0ad38f9153095ac1acedb0faf7374c6 | [] | no_license | tincorpdata/AdvanceedR | 443ffe454400f13693e3c429ef3bfb29c6d23bb9 | bfdfcd4fcf75dc0005c6062b54408d6220eccbee | refs/heads/master | 2020-04-20T10:30:33.007571 | 2019-02-02T12:40:24 | 2019-02-02T12:40:24 | 168,791,518 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 109 | r | functioning_program.R | #create a vector
v <- c(100, 222, 3333, 412, 235)
#Get the square root of each element
lapply(v, length(v))
|
9119894ee85f3f8107f70ad86829d50ccd5100b8 | b2f61fde194bfcb362b2266da124138efd27d867 | /code/dcnf-ankit-optimized/Results/QBFLIB-2018/A1/Database/Kronegger-Pfandler-Pichler/dungeon/dungeon_i25-m12-u3-v0.pddl_planlen=33/dungeon_i25-m12-u3-v0.pddl_planlen=33.R | 036402e351e279d57774536a0b3e606dc71254f4 | [] | no_license | arey0pushpa/dcnf-autarky | e95fddba85c035e8b229f5fe9ac540b692a4d5c0 | a6c9a52236af11d7f7e165a4b25b32c538da1c98 | refs/heads/master | 2021-06-09T00:56:32.937250 | 2021-02-19T15:15:23 | 2021-02-19T15:15:23 | 136,440,042 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 91 | r | dungeon_i25-m12-u3-v0.pddl_planlen=33.R | d992bbc93928b3ecc718c304527f4078 dungeon_i25-m12-u3-v0.pddl_planlen=33.qdimacs 24019 223421 |
aff5f1d1a5061e283d3538f1311f19b6aa42f90d | 1cf864651a3cad23eb3c7f25aecda77b9d51c7e5 | /R/seqcheck.R | 220b44a7c14fbfe46d7d56cc85f2c7a59af6a5e7 | [] | no_license | gobbios/EloRating | 98eec32ae178db6bca95d55691c5d66b525bce9a | ebb4957676b3ff5638e5eb9ca34464a480138902 | refs/heads/master | 2023-06-08T00:58:34.065438 | 2023-06-02T10:12:35 | 2023-06-02T10:12:35 | 79,722,236 | 3 | 0 | null | null | null | null | UTF-8 | R | false | false | 14,438 | r | seqcheck.R | # seqcheck 14_11_19
#' runs raw data diagnostics for Elo rating
#'
#' runs some diagnostics on the data supplied to \link{elo.seq}, to check whether \link{elo.seq} will run without errors
#'
#' @param winner either a factor or character vector with winner IDs of dyadic dominance interactions
#' @param loser either a factor or character vector with loser IDs of dyadic dominance interactions
#' @param Date character vector of form "YYYY-MM-DD" with the date of the respective interaction
#' @param draw logical, which interactions ended undecided (i.e. drawn or tied)? By default all \code{FALSE}, i.e. no undecided interactions occurred. Note that in this case, \code{winner}/\code{loser} values can be interchanged
#' @param presence optional data.frame, to supply data about presence and absence of individuals for part of the time the data collection covered. see details
#'
#' @details calender dates (for the sequence as well as in the first column of \code{presence}, if supplied) need to be in "YYYY-MM-DD" format!
#'
#' \code{seqcheck} will return two types of messages: warnings and errors. Errors will result in the data NOT working when supplied to \code{elo.seq}, and need to be fixed. Warning message do not necessarily lead to failure of executing \code{elo.seq}. Note that by default \code{seqcheck} is part of \code{elo.seq}. If any error or warning is produced by \code{seqcheck}, these data will not work in \code{\link{elo.seq}}. Some warning (but not error) messages can be ignored (see below) and if the \code{runcheck} argument in \code{elo.seq} is set to \code{FALSE} Elo-ratings will be calculated properly in such cases.
#'
#' The actual checks (and corresponding messages) that are performed are described in more detail here:
#'
#' Most likely (i.e. in our experience), problems are caused by mismatches between the interaction data and the corresponding presence data.
#'
#' Errors:\cr
#' \code{Presence starts AFTER data}: indicates that during interactions at the beginning of the sequence, no corresponding information was found in the presence data. Solution: augment presence data, or remove interactions until the date on which presence data starts
#'
#' \code{Presence stops BEFORE data}: refers to the corresponding problem towards the end of interaction and presence data
#'
#' \code{During the following interactions, IDs were absent...}: indicates that according to the presence data, IDs were absent (i.e. "0"), but interactions with them occured on the very date(s) according to the interaction data
#'
#' \code{The following IDs occur in the data sequence but NOT...}: there is/are no columns corresponding to the listed IDs in the presence data
#'
#' \code{There appear to be gaps in your presence (days missing?)...}: check whether your presence data includes a line for \emph{each date} starting from the date of the first interaction through to the date of the last interaction
#'
#' Warnings:
#'
#' \code{Presence continues beyond data}: indicates that presence and interaction data do not end on the same date.
#'
#' \code{Presence starts earlier than data}: indicates that presence and interaction data do not start on the same date.
#'
#' \code{The following IDs occur in the presence data but NOT...}: there are more ID columns in the presence data than IDs occuring in the interaction data
#'
#' \code{Date column is not ordered}: The dates are not supplied in ascending order. \code{\link{elo.seq}} will still work but the results won't be reliable because the interactions were not in the correct sequence.
#'
#' Other warnings/errors can result from inconsistencies in either the presence or sequence data, or be of a more general nature:
#'
#' Errors:
#'
#' \code{No 'Date' column found}: in the presence data, no column exists with the name/header "Date". Please rename (or add) the necessary column named "Date" to your presence data.
#'
#' \code{At least one presence entry is not 1 or 0}: presence data must come in binary form, i.e. an ID was either present ("1") or absent ("0") on a given date. No \code{NA}s or other values are allowed.
#'
#' \code{Your data vectors do not match in length}: at least one of the three mandatory arguments (winner, loser, Date) differs from one other in length. Consider handling your data in a data.frame, which avoids this error.
#'
#' Warnings:
#'
#' \code{IDs occur in the data with inconsistent capitalization}: because \code{R} is case-sensitive, "A" and "a" are considered different individuals. If such labelling of IDs is on purpose, ignore the warning and set \code{runcheck=FALSE} when calling \code{elo.seq()}
#'
#' \code{There is (are) X case(s) in which loser ID equals winner ID}: winner and loser represent the same ID
#'
#' \code{The following individuals were observed only on one day}: while not per se a problem for the calculation of Elo ratings, individuals that were observed only on one day (irrespective of the number of interactions on that day) cannot be plotted. \code{\link{eloplot}} will give a warning in such cases, too.
#'
#' @return returns textual information about possible issues with the supplied data set, or states that data are fine for running with \code{\link{elo.seq}}
#'
#' @author Christof Neumann
#' @export
#'
#' @examples
#' data(adv)
#' seqcheck(winner = adv$winner, loser = adv$loser, Date = adv$Date)
#' data(advpres)
#' seqcheck(winner = adv$winner, loser = adv$loser, Date = adv$Date,
#' presence = advpres)
#'
#' # create faulty presence data
#' # remove one line from presence data
#' faultypres <- advpres[-1, ]
#' # make all individuals absent on one day
#' faultypres[5, 2:8] <- 0
#' # run check
#' seqcheck(winner = adv$winner, loser = adv$loser, Date = adv$Date,
#' presence = faultypres)
#'
#' # fix first error
#' faultypres <- rbind(faultypres[1, ], faultypres)
#' faultypres$Date[1] <- "2010-01-01"
#'
#' # run check again
#' seqcheck(winner = adv$winner, loser = adv$loser, Date = adv$Date,
#' presence = faultypres)
#'
#' # fix presence on date for interaction number 6
#' faultypres[6, 2:8] <- 1
#'
#' # run check again
#' seqcheck(winner = adv$winner, loser = adv$loser, Date = adv$Date,
#' presence = faultypres)
#' # all good now
seqcheck <- function(winner, loser, Date, draw = NULL, presence = NULL) {
# Part 1:
# check some general issues
# creating checksum
checksum <- rep(0, 15)
names(checksum) <- c("IDcheck", "selfinteractions", "presence", "startpresence1", "startpresence2", "endpresence1", "endpresence2", "IDmatch", "IA_presencematch", "presenceentries", "datecol", "length", "singledayobs", "continouspres", "seqdateorder")
Date <- as.Date(as.character(Date))
# check whether dates for the interactions are in increasing order
checkval <- min(as.numeric(diff(Date, unit = "day")))
if (checkval < 0) checksum["seqdateorder"] <- 1
# check whether all vectors contain same number of entries
if (length(winner) != length(loser) | length(winner) != length(Date)) checksum["length"] <- 1
# check draw/tie vector
if (is.null(draw)) draw <- rep(FALSE, length(winner))
if (length(winner) != length(draw)) checksum["length"] <- 1
# the remaining checks are conditional on the fact that length of vectors match...
if (checksum["length"] == 0) {
datasequence <- data.frame(Winner = winner, Loser = loser, Date = Date)
# check for integrity of IDs
winners <- as.character(datasequence[, "Winner"])
losers <- as.character(datasequence[, "Loser"])
allIDs <- sort(unique(c(winners, losers)))
if (length(allIDs) == length(unique(tolower(allIDs)))) {
IDcheck <- "Everything seems alright with capitalization of IDs"
} else {
IDcheck <- "There seems to be a problem with capitalization of IDs?"
checksum["IDcheck"] <- 1
}
# check whether IDs had interactions with themselves...
selfinteractions <- paste("There is (are)",
length(which(winners == losers)),
"case(s) in which loser ID equals winner ID")
if (length(which(winners == losers)) > 0) checksum["selfinteractions"] <- 1
# check whether presence data is given
if (!is.null(presence)) {
presenceD <- "Presence data is supplied"
} else {
presenceD <- "Presence data is not supplied"
checksum["presence"] <- 1
}
# check whether there is a column named Date in the presence matrix
if (!is.null(presence)) {
if ("Date" %in% colnames(presence)) {
datecol <- "Date column found"
} else {
datecol <- "No 'Date' column found in supplied presence data"
checksum["datecol"] <- 1
}
}
# check whether there are gaps in the presence data...
if (!is.null(presence) & checksum["datecol"] == 0) {
if (nrow(presence) < as.numeric(diff(range(presence$Date))) + 1) {
checksum["continouspres"] <- 1
continouspres <- "There appear to be gaps in your presence data (missing days?)"
}
} else {
continouspres <- "not checked"
}
# check whether date range in presence is the same as in sequence data
START <- NA
END <- NA
if (!is.null(presence) & checksum["datecol"] == 0) {
DATESpres <- as.Date(as.character(presence[, which(colnames(presence) %in% c("Date", "date"))]))
DATESdata <- unique(as.Date(as.character(datasequence[, which(colnames(datasequence) %in% c("Date", "date"))])))
if (min(DATESpres) < min(DATESdata)) {
START <- "Presence starts earlier than data"
checksum["startpresence1"] <- 1
} # actually, not a problem per se
if (min(DATESpres) > min(DATESdata)) {
START <- "Presence starts AFTER data -> PROBLEM!"
checksum["startpresence2"] <- 1
}
if (min(DATESpres) == min(DATESdata)) {
START <- "Presence starts at the same date than data -> GOOD!"
}
if (max(DATESpres) < max(DATESdata)) {
END <- "Presence stops BEFORE data -> PROBLEM!"
checksum["endpresence1"] <- 1
}
if (max(DATESpres) > max(DATESdata)) {
END <- "Presence continues beyond data"
checksum["endpresence2"] <- 1
} # actually, not a problem per se
if (max(DATESpres) == max(DATESdata)) {
END <- "Presence stops at the same date than data -> GOOD!"
}
}
# check whether IDs match in data and presence
if (!is.null(presence)) {
IDdata <- sort(allIDs)
IDpres <- sort(names(presence[, 2:ncol(presence)]))
IDmatch <- "IDs in presence and data match -> GOOD!"
# check whether
if (length(which(!IDpres %in% IDdata)) > 0) {
IDmatch1 <- IDpres[which(!IDpres %in% IDdata)]
} else {
IDmatch1 <- "none"
}
if (length(which(!IDdata %in% IDpres)) > 0) {
IDmatch2 <- IDdata[which(!IDdata %in% IDpres)]
} else {
IDmatch2 <- "none"
}
if (IDmatch1[1] != "none" | IDmatch2[1] != "none") {
IDmatch <- c(paste("The following IDs occur in the presence data but NOT in the data sequence:", IDmatch1), paste("the following IDs occur in the data sequence but NOT in the presence data:", IDmatch2))
checksum[8] <- 1
}
}
# check whether IDs were actually present on the dates of their interactions
# note that IDs that occur in the data sequence BUT NOT in the presence are ignored here!
IA_presencematch <- NA
IA_presencematchN <- NA
nmatrows <- length(seq(from = min(as.Date(Date)), to = max(as.Date(Date)), by = "day"))
if (!is.null(presence) & checksum["datecol"] == 1) checksum["IA_presencematch"] <- 2
if (!is.null(presence) & checksum["datecol"] == 0) {
if (nmatrows == nrow(presence)) {
IA_presencematch <- c()
for (i in 1:nrow(datasequence)) {
if (sum(!(IDmatch2 %in% c(winners[i], losers[i]))) > 0 & IDmatch2[1] == "none") {
if (sum(presence[which(datasequence[i, "Date"] == presence[, "Date"]), c(winners[i], losers[i])], na.rm = TRUE) != 2) {
IA_presencematch <- c(IA_presencematch, i)
}
}
}
if (is.null(IA_presencematch)) {
IA_presencematch <- "All IDs were present on their interaction dates"
IA_presencematchN <- 0
} else {
IA_presencematchN <- IA_presencematch
IA_presencematch <- paste("During", length(IA_presencematchN), "interactions, IDs were absent according to the presence data:", sep = " ")
checksum["IA_presencematch"] <- 1
}
}
}
# check whether there are entries other than 0's and 1's in the presence
if (!is.null(presence)) {
temp <- as.numeric(apply(presence[, 2:ncol(presence)], 2, function(xx) length(which(xx == 0 | xx == 1))))
presenceentries <- "All presence entries are either 1 or 0 --> GOOD"
if (length(which(temp != nrow(presence))) > 0) {
presenceentries <- "At least one presence entry is not 1 or 0 --> PROBLEM"
checksum["presenceentries"] <- 1
}
}
# check for cases in which IDs were observed only on a single day (even though multiple times is possible, but that doesnt make a difference...)
temp <- rbind(rowSums(table(winner, Date) > 0)[allIDs], rowSums(table(loser, Date) > 0)[allIDs])
colnames(temp) <- allIDs
sIDs <- "none"
if (1 %in% colSums(temp, na.rm = TRUE)) {
checksum["singledayobs"] <- 1
sIDs <- colnames(temp)[colSums(temp, na.rm = TRUE) == 1]
}
if (!is.null(presence)) {
res <- list(checksum = checksum, IDcheck = IDcheck, selfinteractions = selfinteractions, presence = presenceD, startpresence = START, endpresence = END, IDmatch = IDmatch, IA_presencematch = IA_presencematch, IA_presencematchN = IA_presencematchN, presenceentries = presenceentries, IDmatch1 = IDmatch1, IDmatch2 = IDmatch2, datecol = datecol, singledaycases = sIDs)
class(res) <- "sequencecheck"
}
if (is.null(presence)) {
res <- list(checksum = checksum, IDcheck = IDcheck, selfinteractions = selfinteractions, presence = presenceD, singledaycases = sIDs, continouspres = continouspres)
class(res) <- "seqchecknopres"
}
} # end part (conditional on vector length match)
if (checksum["length"] == 1) {
res <- list(checksum = checksum, IDcheck = NA, selfinteractions = NA, presence = NA)
class(res) <- "seqchecknopres"
}
return(res)
}
|
fd2da016e8515914db69f3e3f29f882341123027 | 4592565db17d3d5a4bfa8fc820d7516beb4fa115 | /demo/seminr-pls-interaction.R | dcebfd7f143eabb77d77dff5e8d39e061550641f | [] | no_license | sem-in-r/seminr | 1b370286c58f4e658a02fb5df21fabe585fcfb4a | ae2524aae5f4f0bda3eb87faf80378af5baccea1 | refs/heads/master | 2023-04-04T00:29:48.969724 | 2022-06-30T17:02:07 | 2022-06-30T17:02:07 | 70,557,585 | 49 | 16 | null | 2022-10-13T15:22:28 | 2016-10-11T04:58:23 | R | UTF-8 | R | false | false | 3,947 | r | seminr-pls-interaction.R | # Simple Style: Separate declaration of measurement,interactions and structural model.
library(seminr)
# First, using the orthogonal method as per Henseler & Chin (2010). ----
# Creating our measurement model
mobi_mm <- constructs(
composite("Image", multi_items("IMAG", 1:5)),
composite("Expectation", multi_items("CUEX", 1:3)),
composite("Value", multi_items("PERV", 1:2)),
composite("Satisfaction", multi_items("CUSA", 1:3)),
interaction_term(iv = "Image", moderator = c("Expectation"), method = orthogonal),
interaction_term(iv = "Image", moderator = c("Value"), method = orthogonal)
)
# Structural model
# note: interactions should be the names of its main constructs joined by a '*' in between.
mobi_sm <- relationships(
paths(to = "Satisfaction",
from = c("Image", "Expectation", "Value",
"Image*Expectation", "Image*Value"))
)
# Load data, assemble model, and estimate
mobi_pls <- estimate_pls(data = mobi,
measurement_model = mobi_mm,
structural_model = mobi_sm)
summary(mobi_pls)
# Plot the model
plot(mobi_pls)
# Bootstrap the model
boot_mobi_pls <- bootstrap_model(seminr_model = mobi_pls,
nboot = 500)
summary(boot_mobi_pls)
# Plot the bootstrapped model
plot(boot_mobi_pls)
# Second, using the standardized product indicator method as per Henseler & Chin (2010). ----
# seminr syntax for creating measurement model
mobi_mm <- constructs(
composite("Image", multi_items("IMAG", 1:5)),
composite("Expectation", multi_items("CUEX", 1:3)),
composite("Value", multi_items("PERV", 1:2)),
composite("Satisfaction", multi_items("CUSA", 1:3)),
interaction_term(iv = "Image", moderator = c("Expectation"), method = product_indicator, weights = mode_A),
interaction_term(iv = "Image", moderator = c("Value"), method = product_indicator, weights = mode_A)
)
# structural model: note that name of the interactions construct should be
# the names of its two main constructs joined by a '*' in between.
mobi_sm <- relationships(
paths(to = "Satisfaction",
from = c("Image", "Expectation", "Value",
"Image*Expectation", "Image*Value"))
)
# Load data, assemble model, and estimate
mobi_pls <- estimate_pls(data = mobi,
measurement_model = mobi_mm,
structural_model = mobi_sm)
summary(mobi_pls)
# Plot the model
plot(mobi_pls)
# Bootstrap the model
boot_mobi_pls <- bootstrap_model(seminr_model = mobi_pls,
nboot = 500)
summary(boot_mobi_pls)
# Plot the bootstrapped model
plot(boot_mobi_pls)
# Third, using the two_stage method as per Henseler & Chin (2010). ----
# Creating our measurement model
mobi_mm <- constructs(
composite("Image", multi_items("IMAG", 1:5)),
composite("Expectation", multi_items("CUEX", 1:3)),
composite("Value", multi_items("PERV", 1:2)),
composite("Satisfaction", multi_items("CUSA", 1:3)),
interaction_term(iv = "Image", moderator = c("Expectation"), method = two_stage, weights = mode_A),
interaction_term(iv = "Image", moderator = c("Value"), method = two_stage, weights = mode_A)
)
# Structural model
# note: interactions should be the names of its main constructs joined by a '*' in between.
mobi_sm <- relationships(
paths(to = "Satisfaction",
from = c("Image", "Expectation", "Value",
"Image*Expectation", "Image*Value"))
)
# Load data, assemble model, and estimate
mobi_pls <- estimate_pls(data = mobi,
measurement_model = mobi_mm,
structural_model = mobi_sm)
summary(mobi_pls)
# Plot the model
plot(mobi_pls)
# Bootstrap the model
boot_mobi_pls <- bootstrap_model(seminr_model = mobi_pls,
nboot = 500)
summary(boot_mobi_pls)
# Plot the bootstrapped model
plot(boot_mobi_pls)
|
e7e7b863beb8377a2a5cc984602907c1f74f8740 | ff78fce4864f37a3c98ce1b518014760e8e3b6ad | /man/normalScores.Rd | aedf471f89fb5c585eabd89d6323e0db15c390f3 | [
"MIT"
] | permissive | rmnppt/simdr | 062c92381be86e22ebf29aaa876ffb1992ea2ca8 | 990c786b4ea911b86839e473a7c993dc4e3b5c92 | refs/heads/master | 2020-03-30T09:52:52.502042 | 2018-10-10T10:18:39 | 2018-10-10T10:18:39 | 151,096,622 | 1 | 0 | MIT | 2018-10-01T13:51:56 | 2018-10-01T13:51:56 | null | UTF-8 | R | false | true | 1,719 | rd | normalScores.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/normalScores.R
\name{normalScores}
\alias{normalScores}
\title{normalScores}
\usage{
normalScores(v, ties = "average", forwards = TRUE)
}
\arguments{
\item{v}{a numeric vector as the input variable}
\item{ties}{how to deal with ties, passed to ties.method argument in \code{\link[base]{rank}}}
\item{forwards}{lowest numerical value on left of the distribution?
i.e. the lowest number in input data is also the lowest number in the output data
default is TRUE}
}
\description{
Convert the raw indicators into normalised indicators using the inverse cumulative normal (probit) function.
The resulting variables should appear normally distributed.
In SIMD this function is used to transform indicators before combining them.
The function will take a numeric vector as input and return a numeric vector of equal length with the transformation applied.
}
\details{
This function calculates the normal scores for each indicator. The normal score is defined as follows:
\deqn{yi = inverse_probit ( ri / (n + 1) ) }
where: inverse_probit is the inverse cumulative normal (probit) function,
ri is the rank of the i'th observation and n is the number of non-missing observations for the ranking variable.
This is the inverse cumulative normal probability density of proportional ranks.
The resulting variable should appear normally distributed regardless of the input data.
We translated this approach using the \href{http://support.sas.com/documentation/cdl/en/proc/61895/HTML/default/viewer.htm#a000146840.htm}{SAS documentation}
as a guide.
}
\examples{
# (not run)
# PUT SOME CODE HERE
}
\keyword{SIMD,}
\keyword{openSIMD,}
\keyword{simdr}
|
f8e1e725ac26124b3ca2fdd4ebdf128fe35cc844 | d837f4be77881476003225c10e672acc1784e92f | /man/calHTqPCR.Rd | 93c964851e6fa3d527b2cd8ed896027e75fef827 | [] | no_license | zgzhao/Xtools | 3dd5368fddeb3451505c281ba0968a6c17fddcdf | adb039a28ecd6a999d5e5b90d6568fb2098b1319 | refs/heads/master | 2021-07-05T13:37:49.490087 | 2021-05-16T13:18:00 | 2021-05-16T13:18:00 | 48,232,055 | 1 | 3 | null | null | null | null | UTF-8 | R | false | true | 1,053 | rd | calHTqPCR.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/qPCR.R
\name{calHTqPCR}
\alias{calHTqPCR}
\title{Compare gene expression of sample pairs by limma method: FC and stats.}
\usage{
calHTqPCR(
dt,
ref.gene,
comp.strings = NULL,
comp.type = 1,
norm.method = "deltaCt",
verbose = FALSE
)
}
\arguments{
\item{dt}{Data of qPCRset class, read with readDataHTqPCR function. Please refer to readDataHTqPCR function info.}
\item{ref.gene}{String. Names of reference gene(s).}
\item{comp.strings}{String of length 1. Compare stirng related to sample name(s).}
\item{comp.type}{Integer. 1: single control; 2: paired compares.}
\item{norm.method}{String. Default: del}
\item{verbose}{pass to \code{\link{HTqPCR::normalizeCtData}}}
}
\value{
result: results of limmaCtData (list); contrast: contranst sample labels (named strings); d.norm: result of normalizeCtData.
}
\description{
Calculate and return major results of HTqPCR.
}
\details{
Return results includes d.norm, fold change and stats table.
}
\author{
ZG Zhao
}
|
9794c6708c97a979219805c1f4dc58da7341f3c4 | 0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb | /cran/paws.security.identity/man/cognitoidentityprovider_get_identity_provider_by_identifier.Rd | 3fc0329f5e407dc29bddaed4aa65d2b022eb403b | [
"Apache-2.0"
] | permissive | paws-r/paws | 196d42a2b9aca0e551a51ea5e6f34daca739591b | a689da2aee079391e100060524f6b973130f4e40 | refs/heads/main | 2023-08-18T00:33:48.538539 | 2023-08-09T09:31:24 | 2023-08-09T09:31:24 | 154,419,943 | 293 | 45 | NOASSERTION | 2023-09-14T15:31:32 | 2018-10-24T01:28:47 | R | UTF-8 | R | false | true | 687 | rd | cognitoidentityprovider_get_identity_provider_by_identifier.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cognitoidentityprovider_operations.R
\name{cognitoidentityprovider_get_identity_provider_by_identifier}
\alias{cognitoidentityprovider_get_identity_provider_by_identifier}
\title{Gets the specified IdP}
\usage{
cognitoidentityprovider_get_identity_provider_by_identifier(
UserPoolId,
IdpIdentifier
)
}
\arguments{
\item{UserPoolId}{[required] The user pool ID.}
\item{IdpIdentifier}{[required] The IdP identifier.}
}
\description{
Gets the specified IdP.
See \url{https://www.paws-r-sdk.com/docs/cognitoidentityprovider_get_identity_provider_by_identifier/} for full documentation.
}
\keyword{internal}
|
d3e79ee6a48e2c2a1a427a5c4b7896d63fda2386 | 6e5efc0b6b6b37c735c1c773531c41b51675eb10 | /man/GetSigTable.Anova.Rd | 6c4bc18d6928bc0859032946903b691a0da28112 | [
"GPL-2.0-or-later"
] | permissive | xia-lab/MetaboAnalystR | 09aa09c9e57d7da7d73679f5a515eb68c4158e89 | 9edbbd1e2edda3e0796b65adf440ad827abb7beb | refs/heads/master | 2023-08-10T06:08:56.194564 | 2023-08-01T15:13:15 | 2023-08-01T15:13:15 | 109,994,826 | 268 | 165 | MIT | 2023-03-02T16:33:42 | 2017-11-08T15:38:12 | R | UTF-8 | R | false | true | 345 | rd | GetSigTable.Anova.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/stats_univariates.R
\name{GetSigTable.Anova}
\alias{GetSigTable.Anova}
\title{Sig Table for Anova}
\usage{
GetSigTable.Anova(mSetObj = NA)
}
\arguments{
\item{mSetObj}{Input the name of the created mSetObj (see InitDataObjects)}
}
\description{
Sig Table for Anova
}
|
af7725b69dcba137e120f1c7bd79c3f1ff2bceb5 | f8e90f1d7e1765986d1e54518e36be3421340abf | /man/MiRKAT.R.Rd | 9af55b95b9c42ad427bbbcc1da2882f2060971f3 | [] | no_license | cran/MiRKAT | dc223b705b1e95a30ecf32be9187dfe1b2ce7895 | 7458a2346404eb5e6cde863bf2de58a8b9675837 | refs/heads/master | 2023-03-04T06:52:51.698101 | 2023-02-17T13:50:02 | 2023-02-17T13:50:02 | 98,353,574 | 3 | 2 | null | null | null | null | UTF-8 | R | false | true | 3,131 | rd | MiRKAT.R.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/MiRKAT_R.R
\name{MiRKAT.R}
\alias{MiRKAT.R}
\title{Robust MiRKAT (robust regression)}
\usage{
MiRKAT.R(y, X, Ks, omnibus = "kernel_om", returnKRV = FALSE, returnR2 = FALSE)
}
\arguments{
\item{y}{A numeric vector of the a continuous or dichotomous outcome variable.}
\item{X}{A numerical matrix or data frame, containing additional covariates that you want to adjust for Mustn't be NULL}
\item{Ks}{list of n by n kernel matrices (or a single n by n kernel matrix), where n is the sample size. It can be constructed from
microbiome data through distance metric or other approaches, such as linear kernels or Gaussian kernels.}
\item{omnibus}{A string equal to either "Cauchy" or "kernel_om" (or nonambiguous abbreviations thereof), specifying whether
to use the Cauchy combination test or an omnibus kernel to generate the omnibus p-value.}
\item{returnKRV}{A logical indicating whether to return the KRV statistic. Defaults to FALSE.}
\item{returnR2}{A logical indicating whether to return the R-squared coefficient. Defaults to FALSE.}
}
\value{
Returns p-values for each individual kernel matrix, an omnibus p-value if multiple kernels were provided, and measures of effect size KRV and R2.
\item{p_values}{labeled individual p-values for each kernel}
\item{omnibus_p}{omnibus p_value, calculated as for the KRV test}
\item{KRV}{A vector of kernel RV statistics (a measure of effect size), one for each candidate kernel matrix. Only returned if returnKRV = TRUE}
\item{R2}{A vector of R-squared statistics, one for each candidate kernel matrix. Only returned if returnR2 = TRUE}
}
\description{
A more robust version of MiRKAT utilizing a linear model by robust regression using an M estimator.
}
\details{
MiRKAT.R creates a kernel matrix using the linear model created with the function rlm, a robust regression function, then does
the KRV analysis on Ks and the newly formed kernel matrix representing the outome traits.
y and X should all be numerical matrices or vectors with the same number of rows, and mustn't be NULL.
Ks should be a list of n by n matrices or a single matrix. If you have distance metric from metagenomic data, each kernel can be
constructed through function D2K. Each kernel may also be constructed through other mathematical approaches.
Missing data is not permitted. Please remove all individuals with missing y, X, Ks prior to analysis
}
\examples{
# Generate data
library(GUniFrac)
data(throat.tree)
data(throat.otu.tab)
data(throat.meta)
unifracs = GUniFrac(throat.otu.tab, throat.tree, alpha = c(1))$unifracs
if (requireNamespace("vegan")) {
library(vegan)
BC= as.matrix(vegdist(throat.otu.tab, method="bray"))
Ds = list(w = unifracs[,,"d_1"], uw = unifracs[,,"d_UW"], BC = BC)
} else {
Ds = list(w = unifracs[,,"d_1"], uw = unifracs[,,"d_UW"])
}
Ks = lapply(Ds, FUN = function(d) D2K(d))
covar = cbind(throat.meta$Age, as.numeric(throat.meta$Sex == "Male"))
# Continuous phenotype
n = nrow(throat.meta)
y = rchisq(n, 2)
MiRKAT.R(y, X = covar, Ks = Ks)
}
\author{
Weijia Fu
}
|
30e9ae74fb8608c0c30b245195765058c2e025e6 | 4b219a116ff605fee45da5539dc133e4030b6ba9 | /man/pairwise_plot.Rd | accb7fbfe08112106b3405da7761f820527e6b57 | [] | no_license | vermouthmjl/rrr | a002910b1798d8ae96815dbcabc4ab4b92651450 | 28b1b15c7cf4a92aa1fe5f67229259302f5daa2f | refs/heads/master | 2021-01-25T08:13:37.288262 | 2017-06-19T14:45:32 | 2017-06-19T14:45:32 | 93,736,834 | 0 | 0 | null | 2017-06-08T10:26:32 | 2017-06-08T10:26:31 | null | UTF-8 | R | false | true | 2,159 | rd | pairwise_plot.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rrr.R
\name{pairwise_plot}
\alias{pairwise_plot}
\title{Pairwise Plots}
\usage{
pairwise_plot(x, y, type = "pca", pair_x = 1, pair_y = 2, rank = "full",
k = 0, interactive = FALSE, point_size = 2.5)
}
\arguments{
\item{x}{data frame or matrix of predictor variables}
\item{y}{data frame or matrix of response variables}
\item{type}{type of reduced-rank regression model to fit. \code{type = "identity"}, the default, uses \eqn{\mathbf{\Gamma} = \mathbf{I}} to fit a reduced-rank regression. \code{type = "pca"} fits a principal component analysis model as a special case of reduced-rank regression. \code{type = "cva"} fits a canonical variate analysis model as a special case of reduced-rank regression. \code{type = "lda"} fits a linear discriminant analysis model as a special case of reduced-rank regression.}
\item{pair_x}{variable to be plotted on the \eqn{X}-axis}
\item{pair_y}{variable to be plotted on the \eqn{Y}-axis}
\item{rank}{rank of coefficient matrix.}
\item{k}{small constant added to diagonal of covariance matrices to make inversion easier.}
\item{interactive}{logical. If \code{interactive = FALSE}, the default, plots a static pairwise plot. If \code{interactive = FALSE} plots an interactive pairwise plot.}
\item{point_size}{size of points in scatter plot.}
}
\value{
ggplot2 object if \code{interactive = FALSE}; plotly object if \code{interactive = TRUE}.
}
\description{
Pairwise Plots
}
\examples{
data(pendigits)
digits_features <- pendigits[,1:34]
digits_class <- pendigits[,35]
pairwise_plot(digits_features, digits_class, type = "pca", pair_x = 1, pair_y = 3)
library(dplyr)
data(COMBO17)
galaxy <- as_data_frame(COMBO17)
galaxy <- select(galaxy, -starts_with("e."), -Nr, -UFS:-IFD)
galaxy <- na.omit(galaxy)
galaxy_x <- select(galaxy, -Rmag:-chi2red)
galaxy_y <- select(galaxy, Rmag:chi2red)
pairwise_plot(galaxy_x, galaxy_y, type = "cva")
data(iris)
iris_x <- iris[,1:4]
iris_y <- iris[5]
pairwise_plot(iris_x, iris_y, type = "lda")
}
\references{
Izenman, A.J. (2008) \emph{Modern Multivariate Statistical Techniques}. Springer.
}
|
e19d2a106ecce9f1eb2f6dc0bbeb949b72aa4698 | 0ac5531423fc5ce450a336fa6c3da21e2300ce89 | /exercise-3/exercise.R | ef1216d225c961ef8fc93ab3737926e48149c69a | [
"MIT"
] | permissive | 4nierr/ch08-lists | 830e00f1ec5ef711338b671857ef7b16d1f102df | 15f7c2daccbd98144cabfa5e944d62bd41087ac4 | refs/heads/master | 2023-04-09T17:39:39.090668 | 2021-04-21T00:07:00 | 2021-04-21T00:07:00 | 359,978,060 | 0 | 0 | MIT | 2021-04-20T23:34:43 | 2021-04-20T23:34:43 | null | UTF-8 | R | false | false | 1,272 | r | exercise.R | # Exercise 2: `*apply()`
# Create a *list* of 10 random numbers. Use the `runif` function to make a vector of random numbers,
# then use `as.list()` to convert that to a list.
# Use `lapply()` to apply the `round()` function to each number, rounding it to the nearest .1
## Bonus
## Create a variable 'sentence' that contains a sentence of text (go for something longish).
## Make it lowercase
# Use the `strsplit()` function to split the sentence into vector of letters.
# Hint: split on `""` to split on everything
# Note: this will return a _list_ with 1 element (which is the vector of letters)
# Extract the vector of letters from the resulting list
# Use the `unique()` function to get a vector unique letters
## Define a function 'countOccurences' that takes in two parameters: a letter and a vector of letters.
## The function should return how many times that letter occurs in the vector
## Hint: use a logical vector as filter.
# Call your CountOccurances() function to see how many times the letter 'e' is in your sentence.
# Use `sapply()` to apply your CountOccurances() function to each unique letter in the vector to determine its frequency!
# Convert the result into a list (using `as.list`).
# Print the resulting list of frequencies
|
84982ba7f35f5bd014859fd757c5716829c43c25 | 2bec5a52ce1fb3266e72f8fbeb5226b025584a16 | /quantregRanger/R/RcppExports.R | 7f6feb993da51c8ec7a7c2d77c8f5c19d99f154a | [] | no_license | akhikolla/InformationHouse | 4e45b11df18dee47519e917fcf0a869a77661fce | c0daab1e3f2827fd08aa5c31127fadae3f001948 | refs/heads/master | 2023-02-12T19:00:20.752555 | 2020-12-31T20:59:23 | 2020-12-31T20:59:23 | 325,589,503 | 9 | 2 | null | null | null | null | UTF-8 | R | false | false | 1,455 | r | RcppExports.R | # Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
Findweights <- function(ONv, NNv, WEv, nobs, nnew, ntree, thres, counti, normalise) {
.Call('_quantregRanger_Findweights', PACKAGE = 'quantregRanger', ONv, NNv, WEv, nobs, nnew, ntree, thres, counti, normalise)
}
Findweightsfast <- function(OrdNv, NNv, filterednodes, index, newindex, WEv, nobs, nnew, ntree, thres, l) {
.Call('_quantregRanger_Findweightsfast', PACKAGE = 'quantregRanger', OrdNv, NNv, filterednodes, index, newindex, WEv, nobs, nnew, ntree, thres, l)
}
Findweightsinbag <- function(ONv, inbag, WEv, nobs, nnew, ntree, thres, counti, normalise) {
.Call('_quantregRanger_Findweightsinbag', PACKAGE = 'quantregRanger', ONv, inbag, WEv, nobs, nnew, ntree, thres, counti, normalise)
}
Findweightsinbagfast <- function(ONv, OrdNv, filterednodes, index, newindex, inbag, WEv, nobs, ntree, thres, l) {
.Call('_quantregRanger_Findweightsinbagfast', PACKAGE = 'quantregRanger', ONv, OrdNv, filterednodes, index, newindex, inbag, WEv, nobs, ntree, thres, l)
}
Findweightsinbagfastimp <- function(ONv, ONvp, OrdNv, filterednodes, index, newindex, inbag, WEv, WEvp, npred, nobs, ntree, thres, l, countbreak) {
.Call('_quantregRanger_Findweightsinbagfastimp', PACKAGE = 'quantregRanger', ONv, ONvp, OrdNv, filterednodes, index, newindex, inbag, WEv, WEvp, npred, nobs, ntree, thres, l, countbreak)
}
|
20a80d4b033285b3e0a9fe43abfb3f60ad906b11 | 8e80765a6b757263315ffb8e42718fbb1359c924 | /ch11/myCalculator/.Rproj.user/F2E7AEE4/sources/per/t/C2979E9D-contents | 491e29f36f0337aa076491c2d46c64fc797af301 | [] | no_license | sycho0311/R-Basic | 5c0c5499535b454f5399b99c8558356acfb56eb4 | 39ba67a34a0812faef7ef7a2e9b9ff15b9bd7cb9 | refs/heads/master | 2020-04-01T17:35:17.387095 | 2018-10-17T10:24:12 | 2018-10-17T10:24:12 | 147,784,310 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,279 | C2979E9D-contents | makeScoreData <- function() {
setwd("C:/Users/sycho/Desktop/R/ch11")
data <- read.table("student.txt", header = TRUE, stringsAsFactors = FALSE)
score1 <<- data.frame(학번=data$id, 이름=data$name, 국어=data$kor, 영어=data$eng, 수학=data$mat)
cat("성적 데이터 :\n")
print(score1)
}
# dataFrame에 있는 특정 행 또는 열에 대한 데이터에 어떠한 함수를 사용할지 결정
# 데이터의 범위 3~5열, 행 방향으로 mean함수를 사용하여 각 행에 대한 결과
calcSumMean <- function() {
totalValue <- apply(score1[, c(3:5)], 1, sum)
avgValue <<- apply(score1[, c(3:5)], 1, mean)
print(avgValue)
}
makeGrade <- function() {
# finalGrade 초기화
finalGrade <- c()
for (n in avgValue) {
if (n >= 90) {
Grade <- "A"
} else if (n >= 80) {
Grade <- "B"
} else if (n >= 70) {
Grade <- "C"
} else if (n >= 60) {
Grade <- "D"
} else {
Grade <- "F"
}
# 기존 finalGrade에 Grade 값 추가
finalGrade <- c(finalGrade, Grade)
}
print(finalGrade)
}
writeScore <- function() {
score2 <- cbind(score1, 평균=avgValue, 등급=finalGrade)
print(score2)
write.csv(score2, "student_add.csv")
}
makeScoreData()
calcSumMean()
makeGrade()
writeScore()
| |
914943a27548529974848c82b6b6ea06a9969a6d | ad632407b9728d158510388e341f022d0c4fbdab | /lib/plotChoropleth2.R | 09b7731921c060c93769f7ab670fcaa6c91aad60 | [] | no_license | ramnathv/Chicago-Mayoral-Elections | e06d2bc6b4b3b9eb3f4f0f6b48d327c48e47a236 | 85de84cfe2d256bdb665ae6c553adb64e7937bcc | refs/heads/master | 2021-01-20T05:08:13.694225 | 2012-03-21T15:59:33 | 2012-03-21T15:59:33 | 1,433,813 | 3 | 2 | null | null | null | null | UTF-8 | R | false | false | 3,794 | r | plotChoropleth2.R | # FUNCTION TO PLOT A CHOROPLETH MAP
# TODO. Split the function into pieces
# 1. merge_poly_df: merge shapefile and
# 2.
# HELPER FUNCTIONS
merge_poly_df <- function(.poly, id1, .df, id2, field, type){
require(ggplot2);
# create id for shapefile
id1 = llply(id1, as.name);
.poly@data$id = with(.poly@data, do.call('paste', c(id1, sep = "-")));
# if data file exists, create id
if (all(!is.na(.df))){
.df = .df[, c(unlist(id2), field)]
id2 = llply(id2, as.name);
.df$id = with(.df, do.call('paste', c(id2, sep = "-")));
}
if (type == 'ggplot'){
# fortify shape file and merge with data
if(all(is.na(.df))) {.df = .poly@data};
map = fortify(.poly, region = 'id');
map.data = merge(map, .df, by = 'id');
map.data = map.data[order(map.data$order),];
} else {
# merge data with shape file
if(all(!is.na(.df))) {
.data = .df[match(.poly@data$id, .df$id),];
.poly@data = data.frame(.poly@data, .data);
.poly@data$id.1 = NULL;
}
map.data = .poly@data;
}
return(map.data);
}
# fortify_poly_df <- function(.poly, id){
#
# map = fortify(.poly, region = 'id');
# map.data = merge(map, .poly@data, by = 'id');
# # map.data = na.omit(map.data);
# map.data = map.data[order(map.data$order),];
# return(map.data);
#
# }
find_fill_colors <- function(map.data, field, colpal, ...){
require(classInt)
x = list(...); x$var = map.data[,field];
intervals = do.call("classIntervals", x);
colors = brewer.pal(length(intervals$brks) - 1, colpal);
map.data$fill = findColours(intervals, colors, digits = 4);
attr(map.data$fill, 'at') = intervals$brks;
# attr(map.data$fill, 'intvl') = with(intervals, cut(var, brks));
# map.data$intvl = with(intervals, cut(var, brks))
return(map.data);
}
## FUNCTION TO SORT COLORS
# Source: http://goo.gl/3lg2
sort.colours <- function(col) {
require(colorspace)
c.rgb = col2rgb(col)
c.RGB = RGB(t(c.rgb) %*% diag(rep(1/255, 3)))
c.HSV = as(c.RGB, "HSV")@coords
order(c.HSV[, 1], c.HSV[, 2], c.HSV[, 3])
}
plotChoroplethB <- function(.poly, id1, field,
.df = NA, id2 = id1, fill = NA, title = "", legtitle = "Values",
colpal = 'PuRd', type = 'ggplot', ...){
# load required libraries
library(maptools); library(spatial); library(RColorBrewer);
library(classInt); library(ggplot2); gpclibPermit();
# extract relevant columns of .df if fill is unspecified
# if(all(is.na(fill))) {.df = .df[, c(unlist(id2), field)]};
map.data = merge_poly_df(.poly, id1, .df, id2, field, type);
if (all(is.na(fill))){
map.data = melt(map.data, measure.vars = field);
map.data = find_fill_colors(map.data, 'value', colpal, ...);
breaks = with(map.data, attr(fill, 'palette'));
labels = names(with(map.data, attr(fill, 'table')));
facet = facet_wrap(~ variable);
} else {
breaks = unique(as.character(map.data$fill));
breaks = breaks[sort.colours(colors)];
labels = breaks;
facet = geom_blank();
}
if (type == 'ggplot'){
map.data = map.data[order(map.data$order),];
p1 = ggplot(map.data, aes(long, lat, group = group)) +
geom_polygon(aes(fill = fill)) +
scale_fill_identity(name = legtitle, breaks = breaks, labels = labels) +
facet + theme_map() + opts(title = title);
} else {
.at = attr(map.data$fill, 'at');
map.data = merge_poly_df(.poly, id1, .df, id2, field, type);
.poly@data = map.data;
p1 = spplot(.poly, field, at = .at, col.regions = breaks, col = 'transparent');
}
return(p1);
} |
82c32610c066e0e936fea2dd0ec5afa67bcd1696 | c74d0ab31e84404f48be9e8db063d2ce7f2ae3e3 | /man/cgrcusum.Rd | f59e9ce70f702384f8d7ff84cbda8d397e26cfc1 | [] | no_license | cran/cgrcusum | a443fa0770610d91d3569f1a7de591eee939a5d2 | 30ef81221d07df54320b9089957be30c3872938b | refs/heads/master | 2023-09-05T11:29:53.223648 | 2021-11-22T07:00:09 | 2021-11-22T07:00:09 | 430,765,115 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 5,686 | rd | cgrcusum.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cgrcusum.R
\name{cgrcusum}
\alias{cgrcusum}
\title{Continuous time Generalized Rapid response CUSUM (CGR-CUSUM)}
\usage{
cgrcusum(
data,
coxphmod,
cbaseh,
ctimes,
h,
stoptime,
C,
pb = FALSE,
cmethod = "memory2"
)
}
\arguments{
\item{data}{\code{data.frame} containing the following named columns:
\itemize{
\item \code{entrytime} numeric - time of entry into study,
\item \code{survtime} numeric - time from entry until event,
\item \code{censorid} integer - (optional) censoring indicator (0 = right censored, 1 = observed),
} and optionally additional covariates used for risk-adjustment.}
\item{coxphmod}{(optional) a cox proportional hazards regression model as produced by
the function \code{\link[survival:coxph]{coxph()}}. Standard practice: \cr
\code{coxph(Surv(survtime, censorid) ~ covariates, data = data)}. \cr
Alternatively, a list with:
\itemize{
\item $formula (~ covariates)
\item $coefficients (named vector specifying risk adjustment coefficients
for covariates - names must be the same as in $formula and colnames of \code{data}).
}}
\item{cbaseh}{a function which returns the non risk-adjusted cumulative
baseline hazard \eqn{H_0(t)}{H_0(t)}. If \code{cbaseh} is missing but
\code{coxphmod} has been
specified as a survival object, this baseline hazard rate will be determined
using the provided \code{coxphmod}.}
\item{ctimes}{(optional) vector of construction times at which the value of the chart should be
determined. When not specified, the chart is constructed at all failure times.}
\item{h}{(optional) value of the control limit. The chart will only be
constructed until the value of the control limit has been reached or
surpassed.}
\item{stoptime}{(optional) time after which the value of the chart should no
longer be determined. Default = max(failure time). Useful when ctimes
has not been specified.}
\item{C}{(optional) a numeric value indicating how long after entering the study
patients should no longer influence the value of the chart. This is
equivalent to right-censoring every observation at time \code{entrytime} + C.}
\item{pb}{(optional) boolean indicating whether a progress bar should be shown.
Default = FALSE}
\item{cmethod}{One of the following:
\itemize{
\item "memory2" (default) Matrix formulation of the problem (faster for high volume/long time construction - less RAM than "memory")
\item "CPU" Calculates the value of the CGR-CUSUM for every time point from scratch. Recommended for small data volume (lower initialization time).
\item "memory" (outdated) Matrix formulation of the problem (faster for high volume/long time construction - may require much RAM)
}}
}
\value{
An object of class "cgrcusum" containing:
\itemize{
\item \code{CGR}: a data.frame with named columns:
\itemize{
\item $time (time of construction),
\item $value (value of the chart at $time),
\item $exp_theta_t (value of MLE \eqn{e^{\theta_t}}{e^(\theta_t)}),
\item $S_nu (time from which patients are considered for constructing the chart)
}
\item \code{call}: Contains the \code{call} used to obtain output;
\item \code{stopind}: (only if h specified) Boolean indicating whether the chart was stopped by the provided value of h;
\item \code{h}: Specified value for the control limit;
}There are \code{\link[cgrcusum:plot.cgrcusum]{plot}} and
\code{\link[cgrcusum:runlength.cgrcusum]{runlength}} methods for "cgrcusum" objects.
}
\description{
This function performs the CGR-CUSUM procedure
described in ARTICLE UNDER REVIEW FOR PUBLICATION. For detection purposes, it is sufficient
to only determine the value of the chart at the times of failure. This can be
achieved by leaving \code{ctimes} empty.
}
\details{
The CGR-CUSUM can be used to test for a change of unknown positive fixed size \eqn{\theta}{\theta}
in the subject-specific hazard rate from \eqn{h_i(t)}{h_i(t)} to \eqn{h_i(t) e^\theta}{h_i(t) exp(\theta)}
starting from some unknown patient \eqn{\nu}{\nu}. The starting time of the first patient
which had an increase in failure rate as well as the estimated increase in the
hazard rate are also given in the output.
The CGR-CUSUM is determined as:
\deqn{\max_{1 \leq \nu \leq n} \left( \hat{\theta}_{\geq \nu}(t) N_{\geq \nu}(t) - \left( \exp\left( \hat{\theta}_{\geq \nu}(t) \right) - 1 \right) \Lambda_{\geq \nu}(t)\right)}{max{1<=\nu<=n} (\theta_{>=\nu}(t)N_{>=\nu}(t)) - (exp(\theta_{>=\nu}(t))-1) \Lambda_{>=\nu}(t))}
with \deqn{N(\geq \nu)(t) = \sum_{i \geq \nu} N_i(t)}{N_{>=\nu}(t) = \sum_{i>=\nu} N_i(t)}
with \eqn{N_i(t)}{N_i(t)} the counting process for the failure at time t of subject i
and \deqn{\Lambda_{\geq \nu}(t) = \sum_{i \geq \nu} \Lambda_i(t)}{\Lambda_{>=\nu}(t) = \sum_{i>=\nu}\Lambda_i(t)} the
with \eqn{\Lambda_i(t)}{\Lambda_i(t)} the cumulative intensity of subject i at time t.
}
\examples{
require(survival)
tdat <- subset(surgerydat, Hosp_num == 1)
tcbaseh <- function(t) chaz_exp(t, lambda = 0.01)
varsanalysis <- c("age", "sex", "BMI")
exprfit <- as.formula(paste("Surv(survtime, censorid) ~" ,paste(varsanalysis, collapse='+')))
tcoxmod <- coxph(exprfit, data= surgerydat)
#Alternatively, cbaseh can be left empty when specifying coxphmod through coxph()
cgr <- cgrcusum(data = tdat, coxphmod = tcoxmod, cbaseh = tcbaseh, pb = TRUE)
plot(cgr)
}
\seealso{
\code{\link[cgrcusum]{plot.cgrcusum}}, \code{\link[cgrcusum]{runlength.cgrcusum}}
Other qcchart:
\code{\link{bercusum}()},
\code{\link{bkcusum}()},
\code{\link{funnelplot}()}
}
\author{
Daniel Gomon
}
\concept{qcchart}
|
fe6997767bd543671ac9dc0c60f60a579d7c143a | d696a2f40ea1dd93760dafb53e1de49c69f7e733 | /inst/event_detector/app.R | 576bba719f44e2802e035248262725c67ff59a06 | [] | no_license | ucvm/event_detector | a5f7c73e0b817a825209200aebd0227f50832cca | c8174c1970675384aaa0f3e8397fdb96d7fbeea9 | refs/heads/master | 2021-03-24T20:37:31.614165 | 2019-10-30T16:16:30 | 2019-10-30T16:16:30 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,643 | r | app.R | library(tidyr)
library(readr)
library(RColorBrewer)
library(magrittr)
library(tibble)
library(baseline)
library(pracma)
library(shiny)
library(shinythemes)
library(stringr)
library(dplyr)
library(ggplot2)
library(DT)
# library(multidplyr)
library(eventDetector)
# set env variable for zip to work
#Sys.setenv(R_ZIPCMD="/usr/bin/zip")
# setup multidplyr
# cluster = create_cluster(8)
# cluster %>% cluster_eval({
# library(eventDetector)
# library(dplyr)
# library(tidyr)
# })
ui = shinyUI(fluidPage(theme = shinytheme("spacelab"),
titlePanel("Event detector"),
sidebarLayout(
sidebarPanel(
h3("Data Load"),
fileInput("datafile", "Choose the file to upload (csv only)",
accept = c('application/csv', '.csv', 'application/text'),
multiple = TRUE),
uiOutput("channelui"),
uiOutput("sectionui"),
actionButton("loadbutton", "Load Data"),
hr(),
h3("Pre-filtering"),
numericInput("track_size", "Select the minimum track size to analyze", value = 30, step = 1),
h3("Peak detection parameters"),
numericInput("minpeakheight", "Select the minimum peak intensity", value = 100, min = 0),
numericInput("minpeakdistance", "Select the minimum distance between peaks (min)", value = 1, min = 1),
h3("Plotting parameters"),
checkboxInput("filterPeaks", "Show only tracks with peaks", FALSE),
hr(),
downloadButton("downloadData", "Download data")
),
mainPanel(
tabsetPanel(
tabPanel("Summary",
h3("Table"),
DT::dataTableOutput("summaryTable"),
br(),
h3("Track Plots"),
plotOutput("singlePlot", height = "600px")),
tabPanel("Peak Plots",
h3("Number of peaks"),
plotOutput("peak_num_plot"),
h3("Width of peaks"),
plotOutput("peak_width_plot")
)
)
)
)
)
)
server = shinyServer(function(input, output, session) {
options(shiny.maxRequestSize=70*1024^2)
# scan file and create ui for file load parameters
scan = reactive({
datafile = input$datafile
if (is.null(datafile)) {
return(NULL)
}
path = datafile$datapath
return(scan_file(path))
})
output$channelui = renderUI({
selectInput("channel", "Intensity Channel", choices = scan()$channels)
})
output$sectionui = renderUI({
selectInput("section", "Section name", choices = scan()$sections)
})
raw = eventReactive(input$loadbutton, {
datafile = input$datafile
if (is.null(datafile)) {
return(NULL)
}
path = datafile$datapath
return(read_data(path, channel = input$channel, section = input$section) %>% filter_by_tracks(10))
})
# event finder --------------------------------------------------------------------------------
# --- baseline correct (do this once when data loads)
baseline_corrected = reactive({
if (is.null(raw())) {
return(NULL)
}
# group the data
spectra = raw() %>%
group_by(Well, Label) %>%
nest()
# set up our progess indicator
progress = shiny::Progress$new(max = nrow(spectra))
progress$set(message = "Baseline correcting...", value = 0)
on.exit(progress$close())
updateProgress = function() {
value = progress$getValue()
value = value + 1
progress$set(value = value)
}
baseline_data = spectra %>%
mutate(spectra = purrr::map(data, ~baseline_correct(.x, updateProgress))) %>%
unnest(spectra) %>%
ungroup() %>%
dplyr::select(-data)
return(baseline_data)
})
# --- filter tracks
filtered = reactive({
if (is.null(baseline_corrected())) {
return(NULL)
}
baseline_corrected() %>% filter_by_tracks(input$track_size)
})
# --- find peaks
peaks = reactive({
if (is.null(filtered())) {
return(NULL)
}
peak_data = filtered() %>%
group_by(Well, Label) %>%
nest()
# set up our progess indicator
progress = shiny::Progress$new(max = nrow(peak_data))
progress$set(message = "Locating peaks...", value = 0)
on.exit(progress$close())
updateProgress = function() {
value = progress$getValue()
value = value + 1
progress$set(value = value)
}
peak_data %>%
mutate(peaks = purrr::map(data,
~detect_peaks(.x, minpeakheight = input$minpeakheight,
minpeakdistance = input$minpeakdistance,
updateProgress = updateProgress))) %>%
unnest(peaks) %>%
ungroup() %>%
dplyr::select(-data)
})
# Single plot ---------------------------------------------------------------------------------
# --- track plot
output$singlePlot = renderPlot({
rows = input$summaryTable_rows_selected
validate(
need(length(rows) > 0, "Please select wells to plot")
)
rows = as.integer(rows)
wells = well_stats()$Well[rows]
plot_tracks(filtered(), peak_data = peaks(), filter_peaks = input$filterPeaks,
show_peaks = TRUE, well = wells )
})
# Summary --------------------------------------------------------------------------
# --- get well stats
well_stats = reactive({
get_stats(filtered(), peaks())
})
# --- render the summary table
output$summaryTable = DT::renderDataTable({
validate(
need(peaks(), "Peaks not calculated")
)
well_stats() %>% datatable() %>% formatRound(c(2,5,9, 12))
})
# --- plot for the nubmer of peaks per well
output$peak_num_plot = renderPlot({
validate(
need(peaks(), "Peaks not calculated")
)
plot_number_of_peaks(well_stats(), peaks())
})
# --- plot of the width of peaks per well
output$peak_width_plot = renderPlot({
validate(
need(peaks(), "Peaks not calculated")
)
well_stats() %>% arrange(`Median peak width`) %>% plot_width_of_peaks(peaks())
})
# Download handlers ---------------------------------------------------------------------------
output$downloadData = downloadHandler(
filename = function() {"results.zip"},
content = function(fname) {
setwd(tempdir())
#cat(getwd(), file = stderr())
files = c("summary_data.csv", "processed_data.csv", "peak_data.csv")
write_csv(well_stats(), path = "summary_data.csv")
write_csv(filtered(), path = "processed_data.csv")
write_csv(peaks(), path = "peak_data.csv")
zip(zipfile = fname, files = files)
},
contentType = "application/zip"
)
})
# Run the application
shinyApp(ui = ui, server = server)
|
bd8bc26a154001845f0ee69772f785da4aa5a117 | e5cc3ca8b8c2525beb0231bcb9457b249182bfac | /R/os_line_ending.R | 78a7c12622d3892497be22b0e7f552378d59b479 | [
"MIT"
] | permissive | dpastoor/devutils | 06ce7aea5c2a7e125063ce6a6c39e18022488942 | 7bef165db81904d58939ae8bd2d8064956329474 | refs/heads/master | 2021-04-29T10:33:31.890824 | 2018-10-22T11:57:47 | 2018-10-22T11:57:47 | 77,639,744 | 3 | 0 | NOASSERTION | 2019-06-21T01:41:56 | 2016-12-29T21:02:27 | R | UTF-8 | R | false | false | 366 | r | os_line_ending.R | #' os specific line ending
#' @param force_value force a specific return value, to override the platform type
#' @details
#' returns a carriage return `\\r\\n` for windows and `\\n` for unix
#' @export
os_line_break <- function(force_value = NULL) {
if (!is.null(force_value)) {
return(force_value)
}
ifelse(.Platform$OS.type == "windows", "\r\n", "\n")
}
|
6f763edbab8c1f1f3d3b209800581beed0ba246e | 5c9f9e93106a21bb1c1937c9dd972c2ad21ae4cc | /man/coef.countgmifs.Rd | c8edf1bad6014d071e7c41a891fea09941189264 | [] | no_license | cran/countgmifs | f308da0c8baf395cfb6a2f671eab24f7b6113826 | f0b61cd6fc04b5845d0f84fae318cdf89fe34644 | refs/heads/master | 2020-03-27T03:55:49.565073 | 2020-01-08T13:20:02 | 2020-01-08T13:20:02 | 145,899,296 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 965 | rd | coef.countgmifs.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/coef.countgmifs.R
\name{coef.countgmifs}
\alias{coef.countgmifs}
\title{Extract Model Coefficients.}
\usage{
\method{coef}{countgmifs}(object, model.select = "BIC", ...)
}
\arguments{
\item{object}{an \code{countgmifs} fitted object.}
\item{model.select}{when \code{x} is specified any model along the solution path can be selected. The default is \code{model.select="BIC"} which calculates the predicted values using the coefficients from the model having the lowest BIC. Other options are \code{model.select="AIC"} or any numeric value from the solution path.}
\item{...}{other arguments.}
}
\description{
A generic function which extracts the model coefficients from a fitted model object fit using \code{countgmifs}
}
\seealso{
See Also \code{\link{countgmifs}}, \code{\link{predict.countgmifs}}, \code{\link{summary.countgmifs}}, \code{\link{plot.countgmifs}}
}
\keyword{methods}
|
6c60cdb62a8fc460103dbca3e62bd50290fb14e3 | caef4bb4ee1b367fbbcad10b478c832f514f1605 | /MAPA SC.R | f0f92b538e1bff7c13e8938f928cb97abd1a21c7 | [] | no_license | marcusfreire0504/Ciencia_de_Dados | 606939084e75ba6a885404a713cb8605bcf803d0 | c908a091391de0fdab2b007e152b231243e47b27 | refs/heads/master | 2022-11-04T22:32:56.251258 | 2020-06-13T18:19:21 | 2020-06-13T18:19:21 | null | 0 | 0 | null | null | null | null | ISO-8859-1 | R | false | false | 1,619 | r | MAPA SC.R | ###############################################################################
# Pratica: FDSC Fonte: http://metrico.statanything.com/ #
# INSTRUTOR: jkarlos/Raniere #
# Data: 14/12/2016 #
###############################################################################
install.packages("rgdal")
install.packages("leaflet")
install.packages("readOGR")
library(rgdal)
library(leaflet)
# no RStudio o diretorio de trabalho corrente do R aponta para o local
# aonde o script está
# From ftp://geoftp.ibge.gov.br/organizacao_do_territorio/malhas_territoriais/malhas_municipais/municipio_2015/UFs/SC/
sc <- readOGR("C:/Fernando/Estudo R/Municipios SC/42MUE250GC_SIR.shp", verbose = FALSE)
dir()
getwd()
# aonde estão dos dados?
print(sc@data)
# que dados temos la?
print(colnames(sc@data))
# um valor aleatrorio entre 0 a 1000 para cada municipio
sc@data$valor <- as.numeric(round(runif(nrow(sc@data), 0, 1000), 0))
# particionando os valores em 10 faixas
sc@data$bin <- cut(sc@data$valor, seq(0,1000, by = 100),
include.lowest = TRUE, dig.lab = 4)
# criar uma paleta de cores entre vermelho e branco
rbPal <- colorRampPalette(c('red', 'white'))
# cujas cores
sc@data$col <- rbPal(30)[as.numeric(cut(sc@data$valor, seq(0, 3000, by = 300)))]
# plota o mapa com as cores
plot(sc, col = sc@data$col)
demo(colorRampPalette)
|
c304e2471cbd10e8cdc5d9b617025b9204b2d8cc | b9e07cf265f508aabb564293ea4f6ce7b339ae72 | /man/dist_wlatlng_cpp.Rd | 3e278974a97aab72468953379552922afb3ee1b7 | [] | no_license | cran/skm | c8b11ba8cd20858bdbb8cfe35f0943f45008e095 | 7c6a5e7e3c6f37ae3db362c0fbee77f466d31b7b | refs/heads/master | 2021-01-13T05:33:27.730603 | 2017-01-23T07:22:33 | 2017-01-23T07:22:33 | 80,006,272 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 2,393 | rd | dist_wlatlng_cpp.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{dist_wlatlng_cpp}
\alias{distRpl_wlatlng_cpp}
\alias{distSgl_wlatlng_cpp}
\alias{dist_wlatlng_cpp}
\alias{dist_wlatlng_km_cpp}
\alias{dist_wlatlng_mi_cpp}
\title{dist_wlatlng_cpp}
\usage{
dist_wlatlng_mi_cpp(lat1, lng1, lat2, lng2)
dist_wlatlng_km_cpp(lat1, lng1, lat2, lng2)
distSgl_wlatlng_cpp(lat1, lng1, lat2, lng2, measure = "mi")
distRpl_wlatlng_cpp(lat1, lng1, lat2, lng2, measure = "mi",
distRpl_GS = 100L)
}
\arguments{
\item{lat1}{latitude of coordinate1}
\item{lng1}{longitude of coordinate1}
\item{lat2}{latitude of coordinate2}
\item{lng2}{longitude of coordinate2}
\item{measure}{"mi" (mile) or "km" (kilometer)}
\item{distRpl_GS}{The grain size of a parallel algorithm sets a minimum chunk size for parallelization.
In other words, at what point to stop processing input on separate threads (as sometimes creating more
threads can degrade the performance of an algorithm by introducing excessive synchronization overhead).
Default is 100.}
}
\description{
calculate distance between coordinate1<lat1, lng1> and coordinate2<lat2, lng2>
}
\details{
calculate the great circle distance between 2 points with Haversine formula,
which deliberately ignores elevation differences.
Haversine formula (from R.W. Sinnott, "Virtues of the Haversine",
Sky and Telescope, vol. 68, no. 2, 1984, p. 159):
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin^2(dlat/2) + cos(lat1) * cos(lat2) * sin^2(dlon/2)
c = 2 * arcsin(min(1,sqrt(a)))
d = R * c
dist_wlatlng_mi_cpp:
calculate distance between coordinate1<lat1, lng1> and coordinate2<lat2, lng2> in mile
dist_wlatlng_km_cpp:
calculate distance between coordinate1<lat1, lng1> and coordinate2<lat2, lng2> in kilometer
distSgl_wlatlng_cpp:
calculate distance between coordinate1<lat1, lng1> and coordinate2<lat2, lng2> in
mile (measure = "mi") or kilometer (measure = "km"), default is mile.
implement as serial computing over vector of lat1, lng1, lat2, lng2
distRpl_wlatlng_cpp:
calculate distance between coordinate1<lat1, lng1> and coordinate2<lat2, lng2> in
mile (measure = "mi") or kilometer (measure = "km"), default is mile.
implement as parallel computing over vector of lat1, lng1, lat2, lng2 via RcppParallel
}
|
df91aae673fe2540496c5d1466ede6bd11322883 | 52817a5a47ac90b694a6ca80e628470ae742b1e4 | /R/plot.all.R | 2c3b2bf25b1af113dbbd9fceb7f94df2eb4f0c34 | [] | no_license | dbgstat/ames | ff06f53f40f3b42324df6b71ee28bb09e7b8b692 | 56e8cb1669b0efd3c9d3f3e7bfbed05a1fc97fab | refs/heads/master | 2021-01-19T08:04:01.002032 | 2015-10-06T00:25:38 | 2015-10-06T00:25:38 | 41,557,881 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 875 | r | plot.all.R | #' (internal)
#'
#' @export
plot.all <- function(obj,nsim=1000){
if(obj$fit.method=="gnlm"){
plot.fitames(obj,
smooth=T,
deviance.plots=T,
std.deviance.plots=T,
pearson.plots=T,
std.pearson.plots=T,
qqplot=T,
lag.plot=T,lag=1,
diag.measures=T,
envelope=T,nsim=nsim,probs=c(0.005,0.995),
quiet=T
)
}else{
plot.fitames(obj,
smooth=T,
deviance.plots=F,
std.deviance.plots=F,
pearson.plots=T,
std.pearson.plots=T,
qqplot=T,
lag.plot=T,lag=1,
diag.measures=T,
envelope=T,nsim=nsim,probs=c(0.005,0.995),
quiet=T
)
}
}
|
4e132f7c20135479dd39bbfd0d0ca96d1933cab9 | 877c925b7a16807108258c3c36198db85febe943 | /server.R | c769e21f34c87c255f01ec660507209c82b52b2d | [] | no_license | manojprasad581/ShinyAppsAssignment | d556ffbb22de7fdf3b7cd5528cbe443a830ff0f9 | fee32a0b858b0db53c5682019ad3dca5161eae1b | refs/heads/master | 2022-04-09T08:28:34.014834 | 2020-03-28T23:11:30 | 2020-03-28T23:11:30 | 250,400,926 | 0 | 0 | null | 2020-03-28T23:11:31 | 2020-03-27T00:09:15 | HTML | UTF-8 | R | false | false | 1,660 | r | server.R | library(shiny)
library(leaflet)
library(dplyr)
data("quakes")
process_quakes_data <- function() {
# Based on magnitudes, add a color variable
quakes <- quakes %>% mutate (
color = case_when(
mag >= 4.0 & mag <= 4.9 ~ "blue",
mag >= 5.0 & mag <= 5.9 ~ "orange",
mag >= 6.0 & mag <= 6.9 ~ "red",
)
)
# Pick only top 15 observations from each color groups
quakes <- quakes %>%
group_by(color) %>%
arrange(desc(mag)) %>%
do(head(., n = 15))
quakes
}
quakes <- process_quakes_data()
mean_lat <- mean(quakes[["lat"]])
mean_long <- mean(quakes[["long"]])
# Define server logic required to draw a histogram
shinyServer(function(input, output) {
output$quakesPlot <- renderLeaflet({
if(length(input$intensity) == 0) {
# Plot an empty interactive map using Leaflet
quakes %>%
leaflet() %>%
addTiles() %>%
setView(lng = mean_long, lat = mean_lat, zoom = 04)
} else {
# Filter quakes dataset as per input$intensity
quakes <- quakes %>% filter(color == input$intensity)
# Plot an interactive map using Leaflet
quakes %>%
leaflet() %>%
addTiles() %>%
addCircleMarkers(color = quakes$color, popup = paste("Magnitude = ", quakes$mag), radius = ~ mag ^ 2,
stroke = FALSE, fillOpacity = 0.5) %>%
addLegend(labels = c("Mag = (6.0 ~ 6.9)", "Mag = (5.0 ~ 5.9)", "Mag = (4.0 ~ 4.9)"), colors = c("red", "orange", "blue"))
}
})
})
|
f64f5bc3dc0f2eaf807033d48813d9d9e27e7d8c | 1416a941d19862c1b6eef5745f1103c3a30ee2a0 | /tests/testthat/test-verb-arrange.R | 6a593032d8880231e370c069e9af712d1187f748 | [
"MIT"
] | permissive | shosaco/dbplyr | 2042cd755a99553b2e9c4d840ec1adbb5e44a837 | 2fbdddb3c11f9684d6fd9ced8f9b76b594c15b5d | refs/heads/master | 2021-07-21T05:19:36.759028 | 2020-09-21T13:47:24 | 2020-09-21T13:47:24 | 211,905,628 | 0 | 0 | NOASSERTION | 2019-09-30T16:34:27 | 2019-09-30T16:34:27 | null | UTF-8 | R | false | false | 1,981 | r | test-verb-arrange.R | context("arrange")
test_that("two arranges equivalent to one", {
mf <- memdb_frame(x = c(2, 2, 1), y = c(1, -1, 1))
mf1 <- mf %>% arrange(x, y)
mf2 <- mf %>% arrange(y) %>% arrange(x)
expect_equal_tbl(mf1, mf2)
})
# sql_render --------------------------------------------------------------
test_that("quoting for rendering ordered grouped table", {
out <- memdb_frame(x = 1, y = 2) %>% group_by(x) %>% arrange(y) %>% ungroup()
expect_match(out %>% sql_render, "^SELECT [*]\nFROM `[^`]*`\nORDER BY `y`$")
expect_equal(out %>% collect, tibble(x = 1, y = 2))
})
# sql_build ---------------------------------------------------------------
test_that("arrange generates order_by", {
out <- lazy_frame(x = 1, y = 1) %>%
arrange(x) %>%
sql_build()
expect_equal(out$order_by, sql('`x`'))
})
test_that("arrange converts desc", {
out <- lazy_frame(x = 1, y = 1) %>%
arrange(desc(x)) %>%
sql_build()
expect_equal(out$order_by, sql('`x` DESC'))
})
test_that("grouped arrange doesn't order by groups", {
out <- lazy_frame(x = 1, y = 1) %>%
group_by(x) %>%
arrange(y) %>%
sql_build()
expect_equal(out$order_by, sql('`y`'))
})
test_that("grouped arrange order by groups when .by_group is set to TRUE", {
lf <- lazy_frame(x = 1, y = 1, con = simulate_dbi())
out <- lf %>%
group_by(x) %>%
arrange(y, .by_group = TRUE) %>%
sql_build()
expect_equal(out$order_by, sql(c('`x`','`y`')))
})
# ops ---------------------------------------------------------------------
test_that("arranges captures DESC", {
out <- lazy_frame(x = 1:3, y = 3:1) %>% arrange(desc(x))
sort <- lapply(op_sort(out), get_expr)
expect_equal(sort, list(quote(desc(x))))
})
test_that("multiple arranges combine", {
out <- lazy_frame(x = 1:3, y = 3:1) %>% arrange(x) %>% arrange(y)
out <- arrange(arrange(lazy_frame(x = 1:3, y = 3:1), x), y)
sort <- lapply(op_sort(out), get_expr)
expect_equal(sort, list(quote(x), quote(y)))
})
|
7b8fe374f11aed2b911f0781f30a0e0f5351c655 | 08b2fbbcae2905aa361001743e78784727369046 | /R/select_helpers.R | 963fcc93c24acaabc01766710d648e1a3a325781 | [
"MIT"
] | permissive | shijianasdf/gtsummary | d0bd29d40b31d64ba77c4f19ff4aa74e37d92b1e | a6b4bf41553a336223c0d2656135349a2bf1de98 | refs/heads/master | 2022-12-11T16:06:26.278085 | 2020-09-14T18:11:35 | 2020-09-14T18:11:35 | 295,585,427 | 2 | 0 | NOASSERTION | 2020-09-15T01:50:55 | 2020-09-15T01:50:54 | null | UTF-8 | R | false | false | 2,608 | r | select_helpers.R | #' Select helper functions
#'
#' Set of functions to supplement the {tidyselect} set of functions for selecting
#' columns of data frames. `all_continuous()`, `all_continuous2()`, `all_categorical()`, and
#' `all_dichotomous()` may only be used with `tbl_summary()`, where each variable
#' has been classified into one of these three groups. All other helpers
#' are available throughout the package.
#' @name select_helpers
#' @rdname select_helpers
#' @param dichotomous Logical indicating whether to include dichotomous variables.
#' Default is `TRUE`
#' @param continuous2 Logical indicating whether to include continuous2 variables.
#' Default is `TRUE`
#' @export
#' @return A character vector of column names selected
#' @examples
#' select_ex1 <-
#' trial %>%
#' select(age, response, grade) %>%
#' tbl_summary(
#' statistic = all_continuous() ~ "{mean} ({sd})",
#' type = all_dichotomous() ~ "categorical"
#' )
# THE ENVIRONMENTS ARE CREATED IN `utils-gtsummary_core.R`
all_continuous <- function(continuous2 = TRUE) {
if (continuous2) con_types <- c("continuous", "continuous2")
else con_types <- "continuous"
meta_data_env$summary_type %>%
keep(meta_data_env$summary_type %in% con_types) %>%
names()
}
#' @rdname select_helpers
#' @export
all_continuous2 <- function() {
meta_data_env$summary_type %>%
keep(meta_data_env$summary_type %in% "continuous2") %>%
names()
}
#' @rdname select_helpers
#' @export
all_categorical <- function(dichotomous = TRUE) {
# return variable names if dichotomous included
if (dichotomous) {
x <-
keep(meta_data_env$summary_type, ~ . %in% c("categorical", "dichotomous")) %>%
names()
return(x)
}
# return variable names if dichotomous NOT included
meta_data_env$summary_type %>%
keep(meta_data_env$summary_type == "categorical") %>%
names()
}
#' @rdname select_helpers
#' @export
all_dichotomous <- function() {
meta_data_env$summary_type %>%
keep(meta_data_env$summary_type == "dichotomous") %>%
names()
}
#' @rdname select_helpers
#' @export
all_numeric <- function() {
which(data_env$numeric)
}
#' @rdname select_helpers
#' @export
all_character <- function() {
which(data_env$character)
}
#' @rdname select_helpers
#' @export
all_integer <- function() {
which(data_env$integer)
}
#' @rdname select_helpers
#' @export
all_double <- function() {
which(data_env$double)
}
#' @rdname select_helpers
#' @export
all_logical <- function() {
which(data_env$logical)
}
#' @rdname select_helpers
#' @export
all_factor <- function() {
which(data_env$factor)
}
|
2c1ab2ed9f95fc3f4011226b5da7347ee21703df | f6dcb066042632979fc5ccdd6aa7d796d3191003 | /Problem Sets/Student Submissions/Problem Set 2/1952890/q1_template.R | 49812de959671d944cc7951cc0bef7c812757d97 | [] | no_license | NikoStein/ADS19 | 45301bcd68d851053399621dd8a0be784e1cc899 | 90f2439c6de8569f8a69983e0a605fd94e2e9f0a | refs/heads/master | 2020-05-19T19:10:12.411585 | 2020-03-12T00:02:14 | 2020-03-12T00:02:14 | 185,165,255 | 0 | 4 | null | 2019-08-06T06:16:30 | 2019-05-06T09:26:01 | HTML | UTF-8 | R | false | false | 5,196 | r | q1_template.R | # Problem Set 2
# Question 1
require(tidyverse)
require(rvest)
library(robotstxt)
# paths_allowed(c(url))
# robots.txt nicht bei jeder HTML-Seite hinterlegt, dann kommt HTTP-Status: 404
#######################################################################################################################################
# Mögliche Methoden, um unerwünschte \n und Leerzeichen zu entfernen:
# tags %>%
# str_remove('\n')
# quotes_df %>%
# mutate(tags = str_remove_all(tags, '\n'))
# str_remove_all('\n') %>%
# str_replace_all(' +',' ') -> tags # ' +' entfernt alle Spaces, die >= 2 mal vorkommen und ersetzt diese mit einem Leerzeichen
# authorDetails$month <- gsub("([A-Za-z]+).*", "\\1", authorDetails$author_borndate)
# str_split_fixed(authorDetails$author_borndate, " ")
# am einfachsten: str_trim() oder str_squish()
# str_trim() removes whitespace from start and end of string
# str_squish() also reduces repeated whitespace inside a string.
#######################################################################################################################################
# a) Use rvest to scrape the first 10 quotes. Return a data frame with 3 columns (author, tags,quotes).
# b) Use your function to collect all 100 quotes from the website and store them in a data frame (quotes).
url = 'http://quotes.toscrape.com/'
getDataFromURL <- function(url) {
read_html(url) -> rawData
# quotes
rawData %>%
html_nodes('.text') %>%
html_text() -> quotes
# author
rawData %>%
html_nodes('.author') %>%
html_text() -> author
# tags
rawData %>%
html_nodes('.tags') %>%
html_text() %>%
str_squish() -> tags
quotes_df <- data.frame(quotes, author, tags)
return(quotes_df)
}
# page/ und die Nummer der Seite an die oben gegebene URL anhängen
# paste() fügt ein zusätzliches Leerzeichen ein, paste0() nicht
urls = paste0(url, 'page/', 1:10)
# Anzeigen der ersten 10 Zitate (url ohne paste0)
getDataFromURL(url) -> quotesTest
# map_df wendet für jede Werte von urls die Funktion getDataFromURL an
Hundret_quotes <- map_df(urls, getDataFromURL)
# c) Additionally, we want to collect more information on the authors.
# Therefore, your next task is to scrape the URLs of each author’s about-page.
url2 = 'http://quotes.toscrape.com' # ohne / am Ende
getDataFromURLWithLinks <- function(url) {
read_html(url) -> rawData
# Links werden über html_attr('href') identifiziert
rawData %>%
html_nodes('.quote span a') %>%
html_attr("href") -> links
# vollständiger Link
author_url = paste0(url2, links)
df_Links <- data.frame(author_url)
return(df_Links)
}
getDataFromURLWithLinks(url)
# Alle 10 Seiten durchgehen
urls = paste0(url, 'page/', 1:10)
AlleLinks <- map_df(urls, getDataFromURLWithLinks)
# d) Write a function to scrape the content of an about-page returning a data frame (authorDetails)
# with 3 columns (author, description, bornDate).
# Apply the function to scrape all about-pages.
getAuthorsWithDescription <- function (author_url) {
read_html(author_url) -> AuthorData
AuthorData %>%
html_nodes('.author-title') %>%
html_text() -> author_title
AuthorData %>%
html_nodes('.author-description') %>%
html_text() -> author_description
AuthorData %>%
html_nodes('.author-born-date') %>%
html_text() -> author_borndate
df_AuthorDescription <- data.frame(author_title, author_description, author_borndate)
return(df_AuthorDescription)
}
for (i in AlleLinks) {
authorDetails <- map_df(i, getAuthorsWithDescription)
}
# Duplikate aus dataframe entfernen
# Neues dataframe authorDetailsUnique
authorDetails [!duplicated(authorDetails$author_title), ] -> authorDetailsUnique
# e1) The authorDetails data frame stores the information on the birth data in one column (bornDate).
# Transform the data frame to store the information on the day, month and year in distinct columns.
authorDetailsUnique %>%
separate(author_borndate, c("Month", "Day","Year")) -> authorDetailsUniqueWithDate
# How many authors where born in the 19th century (1800-1899)?
author_count <- sum(authorDetailsUniqueWithDate$Year >= 1800 & authorDetailsUniqueWithDate$Year <= 1899, na.rm=TRUE) #na.rm=TRUE
sprintf("%s Autoren sind im 19. Jahrhuntert geboren", author_count)
# e2) Transform and summarize the quotes data set to answer the following questions:
# 1. Which author has the most quotes on the website?
quoteSummary <- as.data.frame(plyr::count(Hundret_quotes, "author"))
quoteSummary
# 2. How many quotes does an author have on average?
quoteSummary %>%
summarise(avg = mean(freq)) -> avg
sprintf("An author has %s quotes on average", avg)
# 3. Find all quotes that use the tag “life”
Hundret_quotes[grep("life",Hundret_quotes$tags),]
# e3) Join both data frames (you may need to transform keys first)
# Hundret_quotes and authorDetailsUniqueWithDate
authorDetails %>%
separate(author_borndate, c("Month", "Day","Year")) -> authorDetailsWithDate
authorDetailsWithDate$ID <- seq.int(nrow(authorDetailsWithDate))
Hundret_quotes$ID <- seq.int(nrow(Hundret_quotes))
merge(Hundret_quotes, authorDetailsWithDate, by='ID') -> MergeQuotes
|
61609ad1889bd3e0a15aa9ef6b623ee8fcd47603 | 8458a1896759bf1af507aec560a50be2c4f75d8a | /week_two/week_two_practice_problems_solutions.R | 409d5cf87d7be265e592e22503d3c17622c987cd | [] | no_license | seharjan/r-programming-and-rnaseq-workshop | 6bb5ee2cdfc1bc70e8d2e78fb915c7ce8e470b5b | 5461443190782a68420764813643178c3785bb4a | refs/heads/master | 2023-04-08T16:09:52.418883 | 2021-04-04T18:04:20 | 2021-04-04T18:04:20 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 10,771 | r | week_two_practice_problems_solutions.R | ### Problem One ###
#Create the vector fruit
fruit <- c('Apple', 'Orange', 'Passion fruit', 'Banana')
# prt 1
# Create the for statement
for ( i in fruit) {
print(i)
}
# prt 2
for(var in 1:length(fruit)) {
print(fruit[var])
}
# prt 3
# skip over the value "Orange"
for(var in 1:length(fruit)) {
if (fruit[var] == 'Orange'){
next
}
print(fruit[var])
}
# prt 4
#Complete the following function that take in input X and outputs the cube of X which is equal to Y
Cube_It <- function(x){
y <- x^3
print(paste0("The Cube of ", x, " is ", y ))
}
# now use function on the nubmer 42
Cube_It(42)
### Problem Two ###
# You have a class with several students -- They just took an exam!
# Here are the results (run the code):
students <- c("Johnny", "Frank", "Timmy", "Alice", "Susan")
num_grades <- c(88, 90, 76, 98, 85)
names(num_grades) <- students
num_grades
# Problem: Convert the numeric grades (e.g., 85) to letter grades (e.g., "B")
# Assume standard conversions... 90+ is "A", 80-89 is "B", etc
# Loop through the vector of numeric grades. Convert to letter grades.
# Save the results in a new vector, "letter_grades"
# Finally, make a dataframe, "student_grades", with columns made of "letter_grades", "num_grades", and "students"
# Save "student_grades" as a csv file.
# Solution #
letter_grades <- c()
for (i in 1:length(num_grades)) {
grade <- num_grades[i]
if (grade >= 90) {
letter_grades[i] <- "A"
} else if (grade >= 80) {
letter_grades[i] <- "B"
} else if (grade >= 70) {
letter_grades[i] <- "C"
} else if (grade >= 60) {
letter_grades[i] <- "D"
} else {
letter_grades[i] <- "F"
}
}
student_grades <- data.frame(letter_grades, num_grades, students)
write.csv(student_grades, file = "student_grades.csv")
############
### Problem Two ###
.....
### Challenge Problem ###
# You just discovered some genes which are associated with cancer progression. Cool!
# However... you only know the Ensembl gene IDs (e.g., ENSG00000141510) of these genes...
# These IDs are hard to read, so you need to convert them to gene symbols (e.g., TP53)
cancer_genes <- c("ENSG00000139618", "ENSG00000106462", "ENSG00000116288")
cancer_genes
# Problem: Make a function to convert Ensembl gene IDs to gene symbols.
# You have also been provided with a data.frame has the mapping of ids to symbols
id2symbol <- data.frame(
"Ensembl" = c("ENSG00000141510", "ENSG00000139618", "ENSG00000106462", "ENSG00000116288"),
"gene_symbol" = c("TP53", "BRCA2", "EZH2", "PARK7")
)
id2symbol
# Requirements:
# 1. The function, "gene_id_converter", takes one argument, "gene_id"
# 2. The function must return the corresponding gene symbol
# Solution #
gene_id_converter <- function(gene_id) {
# Get mapping between IDs and symbols
id2symbol <- data.frame(
"Ensembl" = c("ENSG00000141510", "ENSG00000139618", "ENSG00000106462", "ENSG00000116288"),
"gene_symbol" = c("TP53", "BRCA2", "EZH2", "PARK7")
)
# Conversion code
gene_symbol <- id2symbol$gene_symbol[id2symbol$Ensembl == gene_id]
# Return symbol
return(gene_symbol)
}
############
# Test -- this should output "TP53"
gene_id_converter(gene_id="ENSG00000141510")
### SUPER Challenge Problem ###
# Your gene_id_converter is working splendidly! But now you've got a new problem:
# Your colleague just sent you a ton of new genes to convert! None of these new
# genes are in your id2symbol data frame...
# Problem: Extend you gene_id_converter function to convert ANY ensembl gene id to a gene symbol
# To assist you, your advisor sent you a csv file that contains the mapping of all ids to symbols.
# This the mapping file and it's on Box
"gene_id_to_symbol.csv"
# Solution #
gene_id_converter <- function(gene_id) {
# Get mapping between IDs and symbols
id2symbol <- read.csv(file = "gene_id_to_symbol.csv")
# Conversion code
result <- id2symbol$gene_symbol[which(id2symbol$Ensembl == gene_id)]
# Return result
return(result)
}
############
# Test -- this should output "BRCA1"
gene_id_converter(gene_id="ENSG00000012048")
### ULTRA Challenge Problem ###
# Good work! Your converter is working perfectly! The only problem:
# You just got a new list of genes IDs from a collaborator.. and they're in Entrez format, not Ensembl!
# Fortunately, the csv file your advisor gave you also contains Entrez IDs!
entrez_genes_from_collaborator <- c(8243, 23657, 472)
entrez_genes_from_collaborator
# Problem: Extend you gene_id_converter function to convert Entrez gene ids to gene symbols.
# Requirement: Add a new argument, "type", which is used to specify whether the gene ID is "Entrez" or "Ensembl"
# Solution #
gene_id_converter <- function(gene_id, type) {
# Get mapping between IDs and symbols
id2symbol <- read.csv(file = "gene_id_to_symbol.csv")
# Conversion code
if (type == "Entrez") {
result <- id2symbol$gene_symbol[which(id2symbol$Entrez == gene_id)]
} else if (type == "Ensembl") {
result <- id2symbol$gene_symbol[which(id2symbol$Ensembl == gene_id)]
}
# Return result
return(result)
}
############
# Test 1 -- this should output "BRCA1"
gene_id_converter(gene_id="ENSG00000012048", type = "Ensembl")
# Test 2 -- this should output "ATM"
gene_id_converter(gene_id=472, type = "Entrez")
### EXTREME Challenge Problem ###
# Perfecto! Only one problem: now we need to convert symbols to ids!
# Problem: extend the gene_id_converter function so it can convert symbols to gene IDs!
# Requirement: instead of "gene_id", it should now accept the argument "gene" as this
# argument can be either a gene id or a symbol.
# The "type" argument should now accept the option "gene_symbol"
# Solution #
gene_id_converter <- function(gene, type) {
# Get mapping between IDs and symbols
id2symbol <- read.csv(file = "gene_id_to_symbol.csv")
# Conversion code
if (type == "Entrez") {
result <- id2symbol$gene_symbol[which(id2symbol$Entrez == gene)]
} else if (type == "Ensembl") {
result <- id2symbol$gene_symbol[which(id2symbol$Ensembl == gene)]
} else if (type == "gene_symbol") {
result <- id2symbol[which(id2symbol$gene_symbol == gene), c(1, 2)]
}
# Return result
return(result)
}
############
# Test 1 -- this should output "ENSG00000012048" and "672"
gene_id_converter(gene = "BRCA1", type = "gene_symbol")
# Test 2 -- this should output "BRCA1"
gene_id_converter(gene="ENSG00000012048", type = "Ensembl")
# Test 3 -- this should output "ATM"
gene_id_converter(gene=472, type = "Entrez")
### IMPOSSIBLE Challenge Problem ###
# Problem: extend the gene_id_converter function that it only needs one argument: "gene"
# Solution #
gene_id_converter <- function(gene) {
# Get mapping between IDs and symbols
id2symbol <- read.csv(file = "gene_id_to_symbol.csv")
# Determine the type
if (gene %in% id2symbol$Entrez) {
type <- "Entrez"
} else if (gene %in% id2symbol$Ensembl) {
type <- "Ensembl"
} else if (gene %in% id2symbol$gene_symbol) {
type <- "gene_symbol"
}
# Conversion code
if (type == "Entrez") {
result <- id2symbol$gene_symbol[which(id2symbol$Entrez == gene)]
} else if (type == "Ensembl") {
result <- id2symbol$gene_symbol[which(id2symbol$Ensembl == gene)]
} else if (type == "gene_symbol") {
result <- id2symbol[which(id2symbol$gene_symbol == gene), c(1, 2)]
}
# Return result
return(result)
}
############
# Test 1 -- this should output "CDKN2A"
gene_id_converter(gene = "ENSG00000147889")
# Test 2 -- this should output "CDKN2A"
gene_id_converter(gene = 1029)
# Test 3 -- this should output ENSG00000147889 and 1029
gene_id_converter(gene = "CDKN2A")
### GIVE UP ###
# Problem: Extend the function to convert individual genes and a vector of genes!
# Requirement: to reflect this change, the function argument should now be "genes" instead of "gene"
# Requirement: the function must return results as a list with the names as the original "genes"
# Solution #
gene_id_converter <- function(genes) {
# Get mapping between IDs and symbols
id2symbol <- read.csv(file = "gene_id_to_symbol.csv")
# Initialize the empty list
result_list <- list()
# Use a for-loop to convert the genes
for (i in 1:length(genes)) {
# Get the current gene
gene <- genes[i]
# Determine the type
if (gene %in% id2symbol$Entrez) {
type <- "Entrez"
} else if (gene %in% id2symbol$Ensembl) {
type <- "Ensembl"
} else if (gene %in% id2symbol$gene_symbol) {
type <- "gene_symbol"
}
# Conversion code
if (type == "Entrez") {
result <- id2symbol$gene_symbol[which(id2symbol$Entrez == gene)]
} else if (type == "Ensembl") {
result <- id2symbol$gene_symbol[which(id2symbol$Ensembl == gene)]
} else if (type == "gene_symbol") {
result <- id2symbol[which(id2symbol$gene_symbol == gene), c(1, 2)]
}
# Add result to result_list
result_list[[i]] <- result
}
# Add back in the original genes as the names
names(result_list) <- genes
# Return result
return(result_list)
}
############
# Test -- this should output a list:
#
# $ENSG00000147889
# [1] "CDKN2A"
#
# $`8243`
# [1] "SMC1A"
#
# $TP53
# Entrez Ensembl
# 5973 7157 ENSG00000141510
#
gene_id_converter(genes = c("ENSG00000147889", 8243, "TP53"))
### GIVE UP challenge problem ###
# Problem: Same thing. But use lapply.
# Solution #
gene_id_converter <- function(genes) {
# Get mapping between IDs and symbols
id2symbol <- read.csv(file = "gene_id_to_symbol.csv")
# Use lapply instead of a for-loop
result_list <- lapply(genes, function(gene) {
# Determine the type
if (gene %in% id2symbol$Entrez) {
type <- "Entrez"
} else if (gene %in% id2symbol$Ensembl) {
type <- "Ensembl"
} else if (gene %in% id2symbol$gene_symbol) {
type <- "gene_symbol"
}
# Conversion code
if (type == "Entrez") {
result <- id2symbol$gene_symbol[which(id2symbol$Entrez == gene)]
} else if (type == "Ensembl") {
result <- id2symbol$gene_symbol[which(id2symbol$Ensembl == gene)]
} else if (type == "gene_symbol") {
result <- id2symbol[which(id2symbol$gene_symbol == gene), c(1, 2)]
}
})
# Add back in the original genes as the names
names(result_list) <- genes
# Return result
return(result_list)
}
############
# Test -- this should output a list:
#
# $ENSG00000147889
# [1] "CDKN2A"
#
# $`8243`
# [1] "SMC1A"
#
# $TP53
# Entrez Ensembl
# 5973 7157 ENSG00000141510
#
gene_id_converter(genes = c("ENSG00000147889", 8243, "TP53"))
|
f6414e4af54bc14653e5da6f4f16922c2b1c7719 | 5870f3e6fb5898c3ee1a1b92d051a8baca53d887 | /Lab_Working_Files/Lab_1/Lab_1_Finished/SSC442_Lab_1.R | 1f14d708a0f3af9f0e7e6b9fe7d118f18c1bdd17 | [] | no_license | lathamri/SSC442-Baseball_Regression | 963b536bc97a6f74ddac8ec4dca979d3fe1d9b01 | 0e98285f79582632a0ae741f7d32f81b959b0739 | refs/heads/master | 2020-12-08T03:05:56.569895 | 2020-04-28T20:24:50 | 2020-04-28T20:24:50 | 232,866,377 | 4 | 1 | null | null | null | null | UTF-8 | R | false | false | 5,716 | r | SSC442_Lab_1.R | library(tidyverse)
library(ggplot2)
library(gapminder)
library(scales)
ggplot2::mpg
# typing geom_ will show all possible options of plots from ggplot2
# generates gapminder data set
head(gapminder)
# binds our variables of interest, gpdpercap and life exp
p = ggplot(data=gapminder,
mapping=aes(x=gdpPercap, y=lifeExp))
# generates a scatter plot for p
p + geom_point()
# binds our variables of interest, engine size and highway milage
q = ggplot(data=mpg,
mapping=aes(x=displ, y=hwy, color=class))
# generates a scatter plot for q
q + geom_point()
# It generates what you'd expect, as engine size increases you would
# likely see highway milages decrease as larger engines usually belong
# to larger vehicles which have lower highway milages.
# binds our variables of interest, class and drive type
r = ggplot(data=mpg,
mapping=aes(x=class, y=drv))
# generates a scatter plot of r
r + geom_point()
# This isn't a helpful plot because of the data bindings. It simply shows
# cars that have front, rear, or 4-wheel drive by class which doesn't tell
# us anyting intersting about our data. Perhaps a count of cars fitting into
# these categories would be more insightful.
p + geom_point() + geom_smooth()
?geom_smooth()
# Running different methods and aes for smooth
p + geom_point() + geom_smooth(method='gam')
p + geom_point() + geom_smooth(method='auto')
# this is the best for small data sets but is computationally inefficient
p + geom_point() + geom_smooth(method='loess', colour='red')
# log scaling to spread the data better
p + geom_point() +
geom_smooth(method='lm') +
scale_x_log10(labels = scales::dollar)
# scale_x_log10 description:
# The data appears to be logarithmic because of the grouping so we apply a log
# scale to the data to better visualize it. What ends up happening is instead
# of using a linear scale we change the x axis to use a logarithmic scale
# so that our data appears more linear without loss of generality.
?dollar
?scales
# This call changes our labels on the x-axis to dollars instead of the current
# scientific notation that comes with scale_x_log10.
# Other label changes can be applied according to the scales documentation.
# Some examples include byte size, dates, percents and more.
# Redefines p variables
p <- ggplot(data = gapminder,
mapping = aes(x = gdpPercap, y = lifeExp, color = 'yellow'))
p + geom_point() + scale_x_log10()
# Our mapping aes changes the color value for the data p but this is
# then looked as a data descriptor (obviously it's not). Rhe color
# is determined in the geom calls so we have to set the color value there.
# Interestingly the color call in mapping defines our data label as whatever you set
# the value to be, in this case yellow but any string works.
p <- ggplot(data = gapminder,
mapping = aes(x = gdpPercap, y = lifeExp))
p + geom_point(colour='yellow') + scale_x_log10()
# The above changes fix the issue and craft yellow data points.
p <- ggplot(data = gapminder,
mapping = aes(x = gdpPercap, y = lifeExp))
p + geom_point() +
geom_smooth(color = "orange", se = FALSE, size = 8, method = "lm") +
scale_x_log10()
# Color sets our approx. line to orange, se=False turns on conf. intervals,
# size sets the thickness of the line to 8 and method creates
# a linear model of the data.
p + geom_point(alpha = 0.3) +
geom_smooth(method = "gam") +
scale_x_log10(labels = scales::dollar) +
labs(x = "GDP Per Capita", y = "Life Expectancy in Years",
title = "Economic Growth and Life Expectancy",
subtitle = "Data Points are country-years",
caption = "Source: Gapminder")
p <- ggplot(data = gapminder,
mapping = aes(x = gdpPercap, y = lifeExp, color = continent, fill = continent))
p + geom_point()
p + geom_point() + scale_x_log10(labels = dollar)
p + geom_point() + scale_x_log10(labels = dollar) + geom_smooth()
# fill=continent is sorting the data by continent and applying unique colors to
# each point in the data and the smoothing lines as well.
# Personally I think it's a little messy and maybe silly to have the data share
# exact colors and slightly more translucent confidence intervals. I would prefer
# to change the alpha values of our dots to better allow us to see the trend lines.
# I've plotted this below but it still needs tweaks to capture the best
# interpretation for the dataset.
p + geom_point(alpha=0.3) + scale_x_log10(labels = dollar) + geom_smooth()
p <- ggplot(data = gapminder,
mapping = aes(x = gdpPercap, y = lifeExp))
p + geom_point(mapping = aes(color = continent)) + geom_smooth() + scale_x_log10()
# This code is giving us one regression line because of the mapping to
# color continent being assigned in the geom_point call. Coloring by
# continenet is a direct effect of the data and must be applied in the data
# section when we assign data to our p object.
p <- ggplot(data = gapminder,
mapping = aes(x = gdpPercap, y = lifeExp))
p + geom_point(mapping = aes(color = continent)) +
geom_smooth(mapping = aes(color = continent, fill = continent)) +
scale_x_log10() +
geom_smooth(mapping = aes(color = continent), method = "gam")
# Instead of assiging aes mappings for each geom we're better off assigning
# them all at once in our first point so they stick to the data rather
# than the geom. The following function demonstrates the difference.
# It's a much more readable function with the same graph.
p <- ggplot(data = gapminder,
mapping = aes(x = gdpPercap, y = lifeExp, color=continent, fill=continent))
p + geom_point() +
geom_smooth() +
scale_x_log10() +
geom_smooth(method = "gam")
|
21f114baa0bce0d30bc473f45d1fd1ada2cf90fa | 1a0f32c9ceab460326da48b823669ea2f1a64f72 | /man/remove_open_brackets.Rd | 4facaceadb8585b27a9f88ad8bac0f617959b730 | [] | no_license | kieranjmartin/getfunctionargs | b141fee17fa254b3a87bcfbc1e559c6cee73570b | 8a4efba283dbacaa8484fe522cb10194fe67b8d2 | refs/heads/master | 2020-03-22T20:12:53.495424 | 2018-07-13T13:58:33 | 2018-07-13T13:58:33 | 140,581,867 | 2 | 1 | null | 2018-07-13T13:58:34 | 2018-07-11T13:50:41 | R | UTF-8 | R | false | true | 339 | rd | remove_open_brackets.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/remove_open_close.R
\name{remove_open_brackets}
\alias{remove_open_brackets}
\title{remove_open_brackets}
\usage{
remove_open_brackets(stringin)
}
\arguments{
\item{stringin}{string to remove brackets from}
}
\description{
Function to remove opening brackets
}
|
a770342d5746153c610edd78435fa3cec3e0a84d | 68c08f993b6db072e64a9618f14aa0eef24025f3 | /unittests.R | 60f983a9e3e8f8d83d8a0932bc968ef5f11ae409 | [] | no_license | operdeck/polyhedra | e475d7cbc506907a3d3ee8384f25f0a6e2a800f8 | f4dd69c1dea48baa12075d680396f8140497837d | refs/heads/master | 2020-06-18T03:15:14.160366 | 2020-02-11T15:49:06 | 2020-02-11T15:49:06 | 196,147,591 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 328 | r | unittests.R | library(testthat)
source("utils.R")
source("geometry.R")
source("polyhedra.R")
source("layout.R")
source("tests/test_utils.R")
source("tests/test_geometry.R")
source("tests/test_polyhedra.R")
source("tests/test_draw.R")
source("tests/test_layout.R")
test_results <- test_dir("tests", reporter="summary")
#print(test_results) |
48516986e6ab173bd3bfeaa0b46ebc14eb4f5623 | e7600a22232d22511f3a77ce1da5f89d6a5a6440 | /src/recount/pca/irlba_noProj_noScaled_over100_noSingle.R | df876a344f3207b046764df795838eb51abdbae3 | [] | no_license | SuLab/expression_map | aba55c8d21e65fa4b000144126352e8df373e83a | 72e266417f114c4f2a1094419622aabd4f40765e | refs/heads/master | 2021-10-11T21:19:24.791487 | 2019-01-29T23:23:34 | 2019-01-29T23:23:34 | 110,739,279 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,495 | r | irlba_noProj_noScaled_over100_noSingle.R | library(irlba)
library(bigmemory)
library(bigalgebra)
library(Rcpp)
matmul <- function(A,B,transpose=FALSE){
if(is.null(dim(B))) B <- cbind(B)
if(is.null(dim(A))) A <- rbind(A)
if(transpose)
return(cbind((t(B) %*% A)[]))
## print(typeof(A))
## print(typeof(B))
## print(dim(A))
## print(dim(B))
cbind((A %*% B)[])
}
setwd('/gpfs/group/su/lhgioia/map')
tcga.tpm <- as.matrix(readRDS('data/recount/tcga/tpm_tcga.RDS'))
gtex.tpm <- as.matrix(readRDS('data/recount/gtex/tpm_gtex.RDS'))
## tpm.mat <- rbind(tcga.tpm,gtex.tpm)
tpm.mat <- matrix(0,nrow=70603,ncol=58037)
cnt <- 1
tpm.mat[cnt:nrow(tcga.tpm),] <- tcga.tpm
cnt <- cnt + nrow(tcga.tpm)
tpm.rownames <- rownames(tcga.tpm)
tpm.mat[cnt:(cnt + nrow(gtex.tpm)-1),] <- gtex.tpm
cnt <- cnt + nrow(gtex.tpm)
tpm.rownames <- c(tpm.rownames,rownames(gtex.tpm))
over50.projs <- readRDS('data/recount/recount_entries_over100_noSingle.RDS')
## for(file.id in list.files('data/recount/project_cnts')){
for(file.id in over50.projs){
file.tpm <- as.matrix(readRDS(sprintf('data/recount/project_cnts/%s/gene_counts_tpm.RDS',file.id)))
tpm.mat[cnt:(cnt + nrow(file.tpm) - 1),] <- file.tpm
cnt <- cnt + nrow(file.tpm)
tpm.rownames <- c(tpm.rownames,rownames(file.tpm))
## tpm.mat <- rbind(tpm.mat,file.tpm)
}
print(cnt)
print(dim(tpm.mat))
tpm.mat <- tpm.mat[1:length(tpm.rownames),]
print(dim(tpm.mat))
rownames(tpm.mat) <- tpm.rownames
## print('making big matrix')
## big.data <- as.big.matrix(tpm.mat,backingfile='all_tpm_noProj.bin',descriptorfile='all_tpm_noProj.desc',backingpath='/gpfs/group/su/lhgioia/map/results/recount/pca/tmp')
## for(i in 1:ncol(big.data)){
## big.data[,i] - big.data[,i] - mean(big.data[,i])
## }
for(i in 1:ncol(tpm.mat)){
tpm.mat[,i] - tpm.mat[,i] - mean(tpm.mat[,i])
}
## print('attaching big matrix')
## big.data <- attach.big.matrix('/gpfs/group/su/lhgioia/map/results/recount/pca/tmp/all_tpm_noProj.desc')
print ('beginning pca')
gene.pca <- irlba(tpm.mat,nv=250,nu=0,mult=matmul)
saveRDS(gene.pca,'/gpfs/group/su/lhgioia/map/results/recount/pca/recount_all_big_irlba_250_over100_noSingle.RDS')
## gene.pca <- readRDS('/gpfs/group/su/lhgioia/map/results/recount/pca/recount_all_big_irlba.RDS')
rot.gene.dat <- as.matrix(tpm.mat %*% gene.pca$v)
rownames(rot.gene.dat) <- rownames(tpm.mat)
saveRDS(rot.gene.dat,'/gpfs/group/su/lhgioia/map/results/recount/pca/recount_250_dim_noScaled_noProj_over100_noSingle.RDS')
|
4810ecf792ad49363696471b7f0603773e81b997 | 1f2429c4ee1ae623374f6d3fbecbc056b4ac5526 | /man/validate_canvas_size.Rd | 4997edca9ad4ea72d52cbc21a5c65af2705cf31a | [
"BSD-2-Clause"
] | permissive | nsh87/receptormarker | 1c47d9a6c461b41afd0a9d30fe7cddffd20e7ffd | 4f2c1e9e45b2c94da2c4a56f639b9ef1c1fec6dd | refs/heads/master | 2022-08-31T04:25:05.025729 | 2022-08-21T19:34:06 | 2022-08-21T19:34:06 | 35,985,771 | 4 | 9 | BSD-2-Clause | 2021-11-30T21:08:12 | 2015-05-21T02:18:12 | JavaScript | UTF-8 | R | false | true | 458 | rd | validate_canvas_size.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/radial_phylo.R
\name{validate_canvas_size}
\alias{validate_canvas_size}
\title{Validate a radial phylogram's canvas size}
\usage{
validate_canvas_size(canvas_size)
}
\arguments{
\item{canvas_size}{A \code{canvas_size} from the \code{\link{radial_phylo}}
function.}
}
\description{
An internal function that creates an error if \code{canvas_size}
is invalid.
}
\keyword{internal}
|
07b716483cc2e4d5c687e370ad13f027086e1a3b | f821f8d86e036b4bd2efdb4558410b147f46b8cd | /R/dm_plotPrecision.R | b96c7344c9fe5394dd7bd778e4f203f7f7c4c059 | [] | no_license | gosianow/DRIMSeq_before_BioC3.6 | 7e7d88c7b7e6dd795f73f3a02467780127937de6 | 5c55e9136c304e65f731f6d3fb1620f99fd23636 | refs/heads/master | 2021-06-16T06:27:25.714680 | 2017-05-24T15:15:29 | 2017-05-24T15:15:29 | 26,443,709 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,258 | r | dm_plotPrecision.R | #' @importFrom ggplot2 ggplot aes_string theme_bw xlab ylab theme element_text
#' guides guide_colorbar scale_colour_gradient geom_point geom_hline
#' @importFrom stats quantile na.omit
dm_plotPrecision <- function(genewise_precision, mean_expression,
nr_features = NULL, common_precision = NULL, low_color = "royalblue2",
high_color = "red2", na_value_color = "red2"){
if(!is.null(nr_features)){
df <- data.frame(mean_expression = log10(mean_expression + 1),
precision = log10(genewise_precision), nr_features = nr_features)
df_quant <- min(quantile(na.omit(df$nr_features), probs = 0.95), 30)
breaks <- seq(2, df_quant, ceiling(df_quant/10))
ggp <- ggplot(df, aes_string(x = "mean_expression", y = "precision",
colour = "nr_features" )) +
theme_bw() +
xlab("Log10 of mean expression") +
ylab("Log10 of precision") +
geom_point(alpha = 0.7, na.rm = TRUE) +
theme(axis.text = element_text(size=16),
axis.title = element_text(size=18, face="bold"),
legend.title = element_text(size=16, face="bold"),
legend.text = element_text(size = 14),
legend.position = "top") +
guides(colour = guide_colorbar(barwidth = 20, barheight = 0.5)) +
scale_colour_gradient(limits = c(2, max(breaks)),
breaks = breaks, low = low_color, high = high_color,
name = "Number of features", na.value = na_value_color)
}else{
df <- data.frame(mean_expression = log10(mean_expression + 1),
precision = log10(genewise_precision))
ggp <- ggplot(df, aes_string(x = "mean_expression", y = "precision")) +
theme_bw() +
xlab("Log10 of mean expression") +
ylab("Log10 of precision") +
geom_point(size = 1, alpha = 0.4, na.rm = TRUE) +
theme(axis.text = element_text(size=16),
axis.title = element_text(size=18, face="bold"),
legend.title = element_text(size=16, face="bold"),
legend.text = element_text(size = 14),
legend.position = "top")
}
if(!is.null(common_precision)){
ggp <- ggp + geom_hline(yintercept = log10(common_precision),
colour = "black", linetype = "dashed")
}
return(ggp)
}
|
15d9b7589124018b8da773b701e9eb5589c7e1ec | a5be19737a57491c0dfbe41d068558542b2d5e10 | /inst/doc/jordan.R | 9337e2d53dff8acca20aa34eee1cbce0edc56f05 | [] | no_license | cran/jordan | 02ed2dfc77ae1c2d23b7b37016a24d019ce6ee87 | 576f96a2d484e0f60d1a451a465ea6a7984c4380 | refs/heads/master | 2023-03-29T00:43:24.948503 | 2021-04-08T10:00:02 | 2021-04-08T10:00:02 | 355,965,712 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,740 | r | jordan.R | ## ----setup, include=FALSE-----------------------------------------------------
knitr::opts_chunk$set(echo = TRUE)
library("jordan")
## -----------------------------------------------------------------------------
x <- rrsm() # "Random Real Symmetric Matrix"
y <- rrsm()
z <- rrsm()
x
## -----------------------------------------------------------------------------
x*100
x + y*3
x + 100
## -----------------------------------------------------------------------------
x*y
## -----------------------------------------------------------------------------
x*(y+z) - (x*y + x*z)
## -----------------------------------------------------------------------------
LHS <- x*(y*z)
RHS <- (x*y)*z
LHS-RHS
## -----------------------------------------------------------------------------
LHS <- (x*y)*(x*x)
RHS <- x*(y*(x*x))
LHS-RHS
## -----------------------------------------------------------------------------
M1 <- as.1matrix(x[1])
(M2 <- as.1matrix(x[2]))
## -----------------------------------------------------------------------------
(M1 %*% M2 + M2 %*% M1)/2 - as.1matrix(x[1]*x[2])
## -----------------------------------------------------------------------------
jj <- as.1matrix(x[1]*x[2])
jj-t(jj)
## -----------------------------------------------------------------------------
as.1matrix(rqhm(n=1,d=2))
## -----------------------------------------------------------------------------
x <- rqhm()
y <- rqhm()
(x*y)*(x*x) - x*(y*(x*x))
## -----------------------------------------------------------------------------
I <- rspin()
J <- rspin()
K <- rspin()
I
I*J - J*I # commutative:
I*(J+K) - (I*J + I*K) # distributive:
I*(J*K) - (I*J)*K # not associative:
(I*J)*(I*I) - I*(J*(I*I)) # Obeys the Jordan identity
## -----------------------------------------------------------------------------
x <- ralbert()
y <- ralbert()
x
(x*y)*(x*x)-x*(y*(x*x)) # Jordan identity:
## ----label=define_U_and_diff--------------------------------------------------
U <- function(x){function(y){2*x*(x*y)-(x*x)*y}}
diff <- function(x,y,z){
LHS <- U(x)(U(y)(U(x)(z)))
RHS <- U(U(x)(y))(z)
return(LHS-RHS) # zero if Jacobson holds
}
## ----label=jacobsonverification,cache=TRUE------------------------------------
diff(ralbert(),ralbert(),ralbert()) # Albert algebra obeys Jacobson:
diff(rqhm(),rqhm(),rqhm()) # Quaternion Jordan algebra obeys Jacobson:
diff(rspin(),rspin(),rspin()) # spin factors obey Jacobson:
## ----label=defBH8G8-----------------------------------------------------------
B <- function(x,y,z){2*(x*(y*z) + (x*y)*z - (x*z)*y)} # bracket function
H8 <- function(x,y,z){B(U(x)(U(y)(z)),z,x*y) - U(x)(U(y)(U(z)(x*y)))}
G8 <- function(x,y,z){H8(x,y,z)-H8(y,x,z)}
## ----label=verifyG8special,cache=TRUE-----------------------------------------
G8(rqhm(1),rqhm(1),rqhm(1)) # Quaternion Jordan algebra obeys G8:
G8(rspin(1),rspin(1),rspin(1)) # Spin factors obey G8:
## ----cache=TRUE---------------------------------------------------------------
G8(ralbert(1),ralbert(1),ralbert(1)) # Albert algebra does not obey G8:
## ----label=defineH9G9---------------------------------------------------------
L <- function(x){function(y){x*y}}
U <- function(x){function(y){2*x*(x*y)-(x*x)*y}}
U2 <- function(x,y){function(z){L(x)(L(y)(z)) + L(y)(L(x)(z)) - L(x*y)(z)}}
H9 <- function(x,y,z){2*U(x)(z)*U2(y,x)(U(z)(y*y)) - U(x)(U(z)(U2(x,y)(U(y)(z))))}
G9 <- function(x,y,z){H9(x,y,z)-H9(y,x,z)}
## ----verifyG9identity,cache=TRUE----------------------------------------------
G9(rqhm(1),rqhm(1),rqhm(1)) # Quaternion Jordan algebra obeys G9:
## ----albertH9G9,cache=TRUE----------------------------------------------------
G9(ralbert(1),ralbert(1),ralbert(1)) # Albert algebra does not obey G9:
|
c44ea3b19e779c6e3aecebc37def65a386252cf6 | 37fcfce951487d3ba45d2ba3dbc22b6360939b77 | /R/xis_ecmo.R | eabf9ee01b4ec4195cd3c66a99b84ad93f8d5100 | [] | no_license | abresler/nbastatR | 576155fb58378c03010590c81806515161c03eb5 | aba9179ef644f263387c1536d6ddd26104d79cf4 | refs/heads/master | 2023-08-08T08:52:05.149224 | 2023-07-19T13:09:21 | 2023-07-19T13:09:21 | 43,844,510 | 307 | 94 | null | 2023-02-01T16:59:22 | 2015-10-07T21:00:59 | R | UTF-8 | R | false | false | 2,190 | r | xis_ecmo.R | .curl_chinazi <-
function(url = "https://stats.nba.com/stats/leaguegamelog?Counter=1000&Season=2019-20&Direction=DESC&LeagueID=00&PlayerOrTeam=P&SeasonType=Regular%20Season&Sorter=DATE") {
headers = c(
`Connection` = 'close',
`Accept` = 'application/json, text/plain, */*',
`x-nba-stats-token` = 'true',
`User-Agent` = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36',
`x-nba-stats-origin` = 'stats',
`Sec-Fetch-Site` = 'same-origin',
`Sec-Fetch-Mode` = 'cors',
`Referer` = 'https://downwiththechinazis.com',
`Accept-Encoding` = 'gzip, deflate, br',
`Accept-Language` = 'en-US,en;q=0.9'
)
headers <- c(
`Host` = 'stats.nba.com',
`User-Agent` = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv =72.0) Gecko/20100101 Firefox/72.0',
`Accept` = 'application/json, text/plain, */*',
`Accept-Language` = 'en-US,en;q=0.5',
`Accept-Encoding` = 'gzip, deflate, br',
`x-nba-stats-origin` = 'stats',
`x-nba-stats-token` = 'true',
`Connection` = 'keep-alive',
`Referer` = 'https =//stats.nba.com/',
`Pragma` = 'no-cache',
`Cache-Control` = 'no-cache'
)
# headers = c(
# `Connection` = 'close',
# `Pragma` = 'no-cache',
# `Cache-Control` = 'no-cache',
# `DNT` = '1',
# `Upgrade-Insecure-Requests` = '1',
# `User-Agent` = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.29 Safari/537.36',
# `Sec-Fetch-User` = '?1',
# `Accept` = 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
# `Sec-Fetch-Site` = 'cross-site',
# `Sec-Fetch-Mode` = 'navigate',
# `Referer` = 'https://downwiththechinazis.com',
# `Accept-Encoding` = 'gzip, deflate, br',
# `Accept-Language` = 'en-US,en;q=0.9'
# )
res <-
GET(url,
add_headers(.headers = headers))
json <-
res$content %>%
rawToChar() %>%
fromJSON(simplifyVector = T)
json
}
|
fe6bc5afb1a7677be4b81d55ba456031184d5813 | 7f6b92db5250b7939431fbd073cb9e40cbc7c5ad | /tests/testthat/test_constructor.R | f0f6ef70c809c447b50ee02df94cfd19314a3a39 | [] | no_license | kevinbenac/clusterExperiment | 30af008e8680907d7bd6095f4a8b535cc3ca1668 | fb70816bfea3f0eda13508c38d4a3caedfb993e9 | refs/heads/master | 2020-03-11T16:51:29.042619 | 2018-04-17T15:09:08 | 2018-04-17T15:09:08 | 130,130,171 | 0 | 0 | null | 2018-04-18T22:45:40 | 2018-04-18T22:45:40 | null | UTF-8 | R | false | false | 14,082 | r | test_constructor.R | context("Constructor")
test_that("saved rsecFluidigm is still valid object", {
data(rsecFluidigm)
expect_true(validObject(rsecFluidigm))
})
test_that("`ClusterExperiment` constructor works with matrix and SummarizedExperiments and SingleCellExperiment", {
expect_error(ClusterExperiment(mat), "is missing, with no default")
expect_error(ClusterExperiment(mat,as.numeric(numLabels), transformation=log), info="Error checking transFun")
expect_error(ClusterExperiment(mat, numLabels[1:2]),
"must be a matrix of rows equal")
expect_error(ClusterExperiment(as.data.frame(mat), numLabels),
"unable to find an inherited method for function")
#test character input
expect_silent(ccChar<-ClusterExperiment(mat, chLabels))
expect_is(primaryCluster(ccChar),"numeric")
expect_is(primaryClusterNamed(ccChar),"character")
expect_equal(sort(unique(primaryClusterNamed(ccChar))),sort(unique(chLabels)))
#test factor input
expect_silent(ClusterExperiment(mat, numLabels))
expect_is(cc, "ClusterExperiment")
expect_is(cc, "SummarizedExperiment")
expect_equal(nSamples(cc),ncol(mat))
expect_equal(nFeatures(cc),nrow(mat))
expect_equal(nClusterings(cc),2)
expect_equal(NCOL(clusterMatrix(ccSE)), NCOL(labMat))
expect_equal(colData(ccSE),colData(se))
expect_equal(rownames(ccSE),rownames(se))
expect_equal(colnames(ccSE),colnames(se))
expect_equal(metadata(ccSE),metadata(se))
expect_equal(rowData(ccSE),rowData(se))
expect_output(show(ccSE))
expect_silent(ccSCE<-ClusterExperiment(sce,as.numeric(numLabels)))
###Need to add things specific to sce, but first need to build a sce object with new things.
})
test_that("whichClusters works with clusterMatrix",{
expect_silent(x<-dim(clusterMatrix(ceSim)))
expect_equal(dim(clusterMatrix(ceSim,whichClusters="all")),x)
expect_equal(ncol(clusterMatrix(ceSim,whichClusters="workflow")),12)
expect_equal(ncol(clusterMatrix(ceSim,whichClusters=1:3)),3)
expect_equal(ncol(clusterMatrix(ceSim,whichClusters="dendro")),0)
})
test_that("adding clusters work as promised",{
##########
#addClusterings
expect_silent(c1 <- addClusterings(ccSE, rep(c(-1, 1, 2), each=5),clusterTypes="newUser"))
###Check retain SE info
expect_equal(colData(c1),colData(se))
expect_equal(rownames(c1),rownames(se))
expect_equal(colnames(c1),colnames(se))
expect_equal(metadata(c1),metadata(se))
expect_equal(rowData(c1),rowData(se))
#Other checks
expect_equal(NCOL(clusterMatrix(c1)), nClusterings(ccSE)+1)
expect_equal(unname(clusterTypes(c1)), unname(c(clusterTypes(ccSE),"newUser")))
expect_equal(length(clusteringInfo(c1)), nClusterings(ccSE)+1)
expect_equal(primaryCluster(c1), primaryCluster(ccSE))
primaryClusterIndex(c1) <- 3
expect_false(all(primaryCluster(c1)==primaryCluster(ccSE)))
####check adding a ClusterExperiment to existing CE
expect_error(addClusterings(ccSE,smSimCE),"Cannot merge clusters from different data") #assays don't match
expect_silent(c3<-addClusterings(ccSE,ccSE))
expect_equal(NCOL(clusterMatrix(c3)), nClusterings(ccSE)*2)
expect_equal(length(clusterTypes(c3)), nClusterings(ccSE)*2)
expect_equal(length(clusteringInfo(c3)), nClusterings(ccSE)*2)
expect_equal(primaryCluster(c3), primaryCluster(ccSE))
###Check retain SE info
expect_equal(colData(c3),colData(se))
expect_equal(rownames(c3),rownames(se))
expect_equal(colnames(c3),colnames(se))
expect_equal(metadata(c3),metadata(se))
expect_equal(rowData(c3),rowData(se))
#Other checks after adding CE object
expect_error(clusterLabels(c3)[1:2]<-c("User","User"),"cannot have duplicated clusterLabels")
clusterLabels(c3)[1:2]<-c("User1","User2")
clusterLabels(c3)[1]<-"User4"
expect_error(clusterLabels(c3)[1]<-"User2","cannot have duplicated clusterLabels")
expect_equal(length(clusterLabels(c3)),nClusterings(ccSE)*2)
###check adding matrix of clusters
expect_silent(c4<-addClusterings(ccSE,clusterMatrix(ccSE),clusterTypes="New"))
expect_silent(newLeng<-2*nClusterings(ccSE))
expect_equal(NCOL(clusterMatrix(c4)), newLeng)
expect_equal(length(clusterTypes(c4)), newLeng)
expect_equal(length(clusteringInfo(c4)), newLeng)
expect_equal(primaryCluster(c4), primaryCluster(ccSE))
###Check retain SE info
expect_equal(colData(c4),colData(se))
expect_equal(rownames(c4),rownames(se))
expect_equal(colnames(c4),colnames(se))
expect_equal(metadata(c4),metadata(se))
expect_equal(rowData(c4),rowData(se))
})
test_that("removing clusters work as promised",{
##########
#check removeClusterings
#single cluster
expect_silent(c4<-addClusterings(ccSE,clusterMatrix(ccSE),clusterTypes="New"))
expect_silent(c5<-removeClusterings(c4,1))
expect_equal(NCOL(clusterMatrix(c5)), nClusterings(c4)-1)
expect_equal(length(clusterTypes(c5)), nClusterings(c4)-1)
expect_equal(length(clusteringInfo(c5)), nClusterings(c4)-1)
expect_equal(primaryCluster(c4), primaryCluster(removeClusterings(c4,2)))
###Check retain SE info
expect_equal(colData(c5),colData(se))
expect_equal(rownames(c5),rownames(se))
expect_equal(colnames(c5),colnames(se))
expect_equal(metadata(c5),metadata(se))
expect_equal(rowData(c5),rowData(se))
#vector clusters
expect_silent(c6<-removeClusterings(c4,c(1,3)))
expect_equal(NCOL(clusterMatrix(c6)), nClusterings(c4)-2)
expect_equal(length(clusterTypes(c6)), nClusterings(c4)-2)
expect_equal(length(clusteringInfo(c6)), nClusterings(c4)-2)
###Check retain SE info
expect_equal(colData(c6),colData(se))
expect_equal(rownames(c6),rownames(se))
expect_equal(colnames(c6),colnames(se))
expect_equal(metadata(c6),metadata(se))
expect_equal(rowData(c6),rowData(se))
expect_error(removeClusterings(c4,c(1,nClusterings(c4)+1)),"invalid indices")
expect_silent(c7<-removeClusterings(c4,"User")) #two have "user" label
expect_equal(NCOL(clusterMatrix(c7)), nClusterings(c4)-2)
expect_equal(length(clusterTypes(c7)), nClusterings(c4)-2)
expect_equal(length(clusteringInfo(c7)), nClusterings(c4)-2)
##########
###Check removing a cluster assignment
##########
c1<-ccSE
cL<-clusterLegend(c1)
ind<-primaryClusterIndex(c1)
cL[[ind]][,"name"]<-letters[1:nrow(cL[[ind]])]
clusterLegend(c1)<-cL
rmCl<-c(2,3)
rmNms<-cL[[ind]][cL[[ind]][,"clusterIds"] %in% as.character(rmCl),"name"]
x<-removeClusters(c1,whichCluster="primary",clustersToRemove=rmCl)
expect_equal(sum(primaryCluster(x)==-1), sum(primaryCluster(c1) %in% c(-1,2,3)))
newnms<-clusterLegend(x)[[primaryClusterIndex(x)]][,"name"]
old<-clusterLegend(x)[[ind]]
oldnms<-old[!old[,"name"] %in% rmNms,"name"]
expect_equal(oldnms,newnms)
x<-removeClusters(c1,whichCluster="primary",clustersToRemove=rmNms)
expect_equal(sum(primaryCluster(x)==-1), sum(primaryCluster(c1) %in% c(-1,2,3)))
newnms<-clusterLegend(x)[[primaryClusterIndex(x)]][,"name"]
old<-clusterLegend(x)[[ind]]
oldnms<-old[!old[,"name"] %in% rmNms,"name"]
expect_equal(oldnms,newnms)
##########
#check removing unclustered samples
##########
#no -1 in primary cluster
matTemp<-abs(labMat)
expect_silent(ccTemp<-ClusterExperiment(mat,matTemp,transformation=function(x){x}))
expect_equal(ccTemp, removeUnclustered(ccTemp))
###This is giving me error with new SCE class, but once I put in browser to check it out, works!!! Some kind of unloadNamespace problem?
#-1 in primary cluster
whUn<-which(primaryCluster(ccSE) <0)
expect_silent(ccR<-removeUnclustered(ccSE))
expect_equal(NCOL(ccR), NCOL(ccSE)-length(whUn))
###Check retain SE info
expect_equal(colData(ccR),colData(se[,-whUn]) )
expect_equal(rownames(ccR),rownames(se))
expect_equal(colnames(ccR),colnames(se[,-whUn]))
expect_equal(metadata(ccR),metadata(se))
expect_equal(rowData(ccR),rowData(se))
})
test_that("subsetting works as promised",{
expect_equal(clusterMatrix(cc[1:2,1]),clusterMatrix(cc)[1,,drop=FALSE])
expect_equal(clusterMatrix(cc[1:2,-c(1, 2)]),clusterMatrix(cc)[-c(1, 2),])
#test subsetting of genes
expect_equal(clusterMatrix(cc[1:2,c(1, 2)]),clusterMatrix(cc)[c(1, 2),])
expect_equal(dim(cc[1:2,c(1, 2)]),c(2,2))
#test subsetting of samples
expect_equal(clusterMatrix(cc[,c(1, 2)]),clusterMatrix(cc)[c(1, 2),])
logVec<-rep(FALSE,length=nSamples(cc))
logVec[1:2]<-TRUE
expect_equal(clusterMatrix(cc[,logVec]),clusterMatrix(cc)[logVec,])
expect_equal(clusterMatrix(cc[,c("Sample 1" , "Sample 2")]),clusterMatrix(cc)[c(1, 2),])
##########
#checks SE info (which isn't biggest place needs unit tests since uses callNextMethod() so should work)
#therefore currently just really checks no errors.
expect_silent(x1<-ccSE[,5:8])
###Check retain SE info
expect_equal(colData(x1),colData(se[,5:8]) )
expect_equal(rownames(x1),rownames(se[,5:8]))
expect_equal(colnames(x1),colnames(se[,5:8]))
expect_equal(metadata(x1),metadata(se[,5:8]))
expect_equal(rowData(x1),rowData(se[,5:8]))
expect_silent(x2<-ccSE[1:2,])
###Check retain SE info
expect_equal(colData(x2),colData(se[1:2,]) )
expect_equal(rownames(x2),rownames(se[1:2,]))
expect_equal(colnames(x2),colnames(se[1:2,]))
expect_equal(metadata(x2),metadata(se[1:2,]))
expect_equal(rowData(x2),rowData(se[1:2,]))
expect_silent(x3<-ccSE[,3])
###Check retain SE info
expect_equal(colData(x3),colData(se[,3]) )
expect_equal(rownames(x3),rownames(se[,3]))
expect_equal(colnames(x3),colnames(se[,3]))
expect_equal(metadata(x3),metadata(se[,3]))
expect_equal(rowData(x3),rowData(se[,3]))
expect_silent(x4<-ccSE[3,])
###Check retain SE info
expect_equal(colData(x4),colData(se[3,]) )
expect_equal(rownames(x4),rownames(se[3,]))
expect_equal(colnames(x4),colnames(se[3,]))
expect_equal(metadata(x4),metadata(se[3,]))
expect_equal(rowData(x4),rowData(se[3,]))
expect_equal(clusterExperiment:::filterStats(sceSimData[1:10,]),head(clusterExperiment:::filterStats(sceSimData),10))
})
test_that("check clusterLegend manipulations work as promised", {
x<-clusterLegend(cc)
expect_silent(clusterLegend(cc)<-x)
expect_silent(c4<-addClusterings(ccSE,clusterMatrix(ccSE),clusterTypes="New",clusterLabels=c("new1","new2"),clusterLegend=clusterLegend(ccSE)))
expect_silent(clusterLegend(c4)[1:2]<-x[1:2])
expect_silent(clusterLegend(c4)[[1]]<-x[[1]])
#add wrong dimensions:
expect_error(clusterLegend(c4)[3]<-list(x[[1]][1:2,]),"each element of `clusterLegend` must be matrix with")
expect_error(clusterLegend(c4)[[3]]<-x[[1]][1:2,],"must be matrix with")
##########
#check adding clusterLegend directly to constructor
##########
newcc<-ClusterExperiment(as(cc,"SingleCellExperiment"),clusters=clusterMatrix(cc),clusterLegend=clusterLegend(cc))
expect_equal(cc,newcc)
newCL<-clusterLegend(cc)
newCL[[1]][,"name"]<-letters[1:nrow(newCL[[1]])]
cc1<-cc
clusterLegend(cc1)<-newCL
newClusters<-convertClusterLegend(cc1,output="matrixNames")
newCL[[1]][,"clusterIds"]<-newCL[[1]][,"name"]
newCL[[1]][,"name"]<-LETTERS[1:nrow(newCL[[1]])]
newcc<-ClusterExperiment(as(cc,"SingleCellExperiment"),clusters=newClusters,clusterLegend=newCL)
expect_equal(clusterLegend(newcc)[[1]][,c("name","color")], newCL[[1]][,c("name","color")])
})
test_that("accessing transformed data works as promised",{
expect_is(transformation(ccSE),"function")
expect_equal(transformData(cc), transformation(cc)(assay(cc)))
})
test_that("workflow functions work",
{
##########
#check workflow stuff
expect_silent(ppC<-addClusterings(cc,cbind(rep(c(-1, 1,2), each=5),rep(c(2, 1,3), each=5)),clusterTypes=c("clusterMany","mergeClusters")))
expect_equal(dim(workflowClusters(ppC)),c(nSamples(cc),2))
expect_silent(ppC<-addClusterings(cc,cbind(rep(c(-1, 1,2), each=5)),clusterTypes=c("clusterMany")))
expect_equal(dim(workflowClusters(ppC)),c(nSamples(cc),1))
expect_silent(ppC<-addClusterings(cc,cbind(rep(c(-1, 1,2), each=5),rep(c(2, 3,1), each=5)),clusterTypes=c("clusterMany","mergeClusters.1")))
expect_equal(dim(workflowClusters(ppC)),c(nSamples(cc),1))
expect_equal(dim(workflowClusters(ppC,iteration=NA)),c(nSamples(cc),2))
expect_null(workflowClusters(cc,iteration=NA))
expect_message(ceNew<-combineMany(ceSim,proportion=0.7))
expect_message(ceNew<-combineMany(ceNew,proportion=0.3,clusterLabel="combineMany,v2"))
expect_equal(clusterLabels(ceNew)[1:2],c("combineMany,v2","combineMany.1"))
expect_equal(clusterTypes(ceNew)[1:2],c("combineMany","combineMany.1"))
expect_silent(ceNew2<-setToCurrent(ceNew,whichCluster="combineMany.1"))
expect_equal(clusterLabels(ceNew2)[1:2],c("combineMany,v2","combineMany"))
expect_equal(clusterTypes(ceNew2)[1:2],c("combineMany.2","combineMany"))
expect_silent(ceNew3<-setToCurrent(ceNew2,whichCluster="combineMany.2"))
expect_equal(clusterLabels(ceNew3)[1:2],c("combineMany,v2","combineMany.3"))
expect_equal(clusterTypes(ceNew3)[1:2],c("combineMany","combineMany.3"))
expect_silent(ceNew4<-setToFinal(ceNew,whichCluster="combineMany,v2",clusterLabel="Final Version"))
expect_equal(primaryClusterIndex(ceNew4),1)
expect_equal(clusterLabels(ceNew4)[primaryClusterIndex(ceNew4)],"Final Version")
expect_equal(clusterTypes(ceNew4)[primaryClusterIndex(ceNew4)],"final")
expect_silent(ceNew5<-setToFinal(ceNew,whichCluster="combineMany.1",clusterLabel="Final Version"))
expect_equal(primaryClusterIndex(ceNew5),2)
expect_equal(clusterLabels(ceNew5)[primaryClusterIndex(ceNew5)],"Final Version")
expect_equal(clusterTypes(ceNew5)[primaryClusterIndex(ceNew5)],"final")
}) |
34ae402f05a3fbb8e662f12d7e2fd0787313957e | 0a906cf8b1b7da2aea87de958e3662870df49727 | /grattan/inst/testfiles/IncomeTax/libFuzzer_IncomeTax/IncomeTax_valgrind_files/1610125770-test.R | 34b9971537edf43e4a5185a115aa9b096aafac3e | [] | no_license | akhikolla/updated-only-Issues | a85c887f0e1aae8a8dc358717d55b21678d04660 | 7d74489dfc7ddfec3955ae7891f15e920cad2e0c | refs/heads/master | 2023-04-13T08:22:15.699449 | 2021-04-21T16:25:35 | 2021-04-21T16:25:35 | 360,232,775 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 209 | r | 1610125770-test.R | testlist <- list(rates = numeric(0), thresholds = c(7.0962053319034e-304, 3.87659826574302e-311, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), x = numeric(0))
result <- do.call(grattan::IncomeTax,testlist)
str(result) |
013c1fdee12c6dd7bc18cf0166ea7a305092fede | 2ad722dc0d6bb83593360ab88f0be65681e3aff7 | /R/range.R | f6811e673941776cfedb57ab7f4034533f1c87d2 | [] | no_license | glsnow/TeachingDemos | 30d3932285fa3f5666d703f6d71d56b6cd126c4c | c0474cc2825a7ff34211769a393025ce41eee485 | refs/heads/master | 2023-01-07T14:56:19.475878 | 2020-06-30T22:00:39 | 2020-06-30T22:00:39 | 198,704,527 | 0 | 1 | null | 2023-01-03T10:36:28 | 2019-07-24T20:20:48 | R | UTF-8 | R | false | false | 1,041 | r | range.R | `%<%` <- function(x,y) {
xx <- attr(x,'orig.y')
yy <- attr(y,'orig.x')
if(is.null(xx)) {
xx <- x
x <- rep(TRUE, length(x))
}
if(is.null(yy)) {
yy <- y
y <- rep(TRUE, length(y))
}
out <- x & y & (xx < yy)
attr(out, 'orig.x') <- xx
attr(out, 'orig.y') <- yy
out
}
`%<=%` <- function(x,y) {
xx <- attr(x,'orig.y')
yy <- attr(y,'orig.x')
if(is.null(xx)) {
xx <- x
x <- rep(TRUE, length(x))
}
if(is.null(yy)) {
yy <- y
y <- rep(TRUE, length(y))
}
out <- x & y & (xx <= yy)
attr(out, 'orig.x') <- xx
attr(out, 'orig.y') <- yy
out
}
x <- -3:3
-2 %<% x %<% 2
c( -2 %<% x %<% 2 )
x[ -2 %<% x %<% 2 ]
x[ -2 %<=% x %<=% 2 ]
x <- rnorm(100)
y <- rnorm(100)
x[ -1 %<% x %<% 1 ]
range( x[ -1 %<% x %<% 1 ] )
cbind(x,y)[ -1 %<% x %<% y %<% 1, ]
cbind(x,y)[ (-1 %<% x) %<% (y %<% 1), ]
cbind(x,y)[ ((-1 %<% x) %<% y) %<% 1, ]
cbind(x,y)[ -1 %<% (x %<% (y %<% 1)), ]
cbind(x,y)[ -1 %<% (x %<% y) %<% 1, ] # oops
|
c086d3e87d5b987293266dea5f86367186bba6dd | 64fb59f5a4fd596edb756efb966c8ce4607f930b | /render_map/ui.R | 26b811354823f552c6ff18ad321a7500031fe967 | [] | no_license | misrori/szechenyi_map | cc801fde45e14528d95a231a7416b35b0bef8b06 | 6b2346b7521bbc9137a01ab781c23358388505e2 | refs/heads/master | 2021-01-20T17:12:52.762750 | 2017-05-12T11:27:07 | 2017-05-12T11:27:07 | 90,867,808 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 460 | r | ui.R | library(rgdal)
library(data.table)
library(geojsonio)
library(leaflet)
library(shiny)
# Define UI for application that draws a histogram
shinyUI(fluidPage(
div(h1(textOutput('ezenvagyok'), align = "center")),
hr(),
leafletOutput("megye"),
hr(),
fluidRow(
column(6, plotlyOutput('summary_plot', height = 250), plotlyOutput('summary_plot2', height = 250))
),
textOutput('ezclik')
# Sidebar with a slider input for number of bins
))
|
dbf9d62d281517c5140e4d9734dcc13414e83be6 | 64df258d5c7cff7bdf5ee76b7e169c49bbd397d3 | /sig_vis_mat.R | a8ef282040ed9310e9c7bef57333a96ccc0c0d2e | [] | no_license | jpalowitch/configModel | 5663652f12dd57f15630a3513f55cce122bd3657 | d13d4712eddb4d2f7cd876490a1d24c71eec4ee1 | refs/heads/master | 2021-01-20T03:11:56.860290 | 2017-04-26T17:10:55 | 2017-04-26T17:10:55 | 89,507,250 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 491 | r | sig_vis_mat.R | sig_vis_mat <- function (G, membership) {
degs <- degree(G)
K <- max(membership)
comms <- lapply(1:K, function (i) which(membership == i))
sigmat <- matrix(0, ncol = K, nrow = K)
Gadj <- as.matrix(get.adjacency(G))
dT <- sum(degs)
for (i in 1:K) {
duB <- colSums(Gadj[comms[[i]], ])
means <- degs * sum(degs[comms[[i]]]) / dT
stats <- (duB - means) / length(comms[[i]])
sigmat[i, ] <- tapply(stats, membership, mean)
}
return(sigmat)
} |
fe21d66922fe23790170815165432b061505d620 | 53da081a6f1e43dbe889d4f89c55f908f64aac24 | /man/select_group.Rd | cfbbd15d42f35bf10d9aae261d922770d884f07a | [
"MIT"
] | permissive | potterzot/kgRainPredictR | c02b92928a072cb4447d7e4a7f9cb30a2c609b8e | 66c71bbfd6632093e93a7d3f63424db9f4fa9fba | refs/heads/master | 2021-01-10T04:39:29.968767 | 2015-10-07T20:37:05 | 2015-10-07T20:37:05 | 43,226,530 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 674 | rd | select_group.Rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/utils.R
\name{select_group}
\alias{select_group}
\title{Select a group of ids from a large data file.}
\usage{
select_group(dat, group_var, s, columns, num_chunks, chunk_size = 10000)
}
\arguments{
\item{dat}{the data.}
\item{group_var}{the group variable.}
\item{s}{vector of group identifiers to be selected.}
\item{columns}{the columns to return, or NULL for all.}
\item{num_chunks}{number of chunks to use.}
\item{chunk_size}{number of rows in each chunk. Overridden by \code{num_chunks}}
}
\value{
selected data.
}
\description{
Select a group of ids from a large data file.
}
|
b7fc6d155cf004ceaa0c28a299cab92a76a6cc8e | 530eea15263a914e4bed5e14291fda1b07939232 | /packages/fsProjAlzOptimalDiet/man/prepareModelConfMatrix.Rd | 69cd9456b5a0005855868f3da572acc142c91d94 | [] | no_license | seventm/fsproj_alzheimeroptimaldiet | cc3dddfcf79f00df5ae62bbdb7c558d6061b2c9e | ba0b3643e13999735bae57f5f2754db08399ff7f | refs/heads/master | 2020-11-23T23:21:05.141878 | 2019-05-24T10:32:20 | 2019-05-24T10:32:20 | 227,861,901 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 478 | rd | prepareModelConfMatrix.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/calculations.R
\name{prepareModelConfMatrix}
\alias{prepareModelConfMatrix}
\title{Prepare model configuration matrix}
\usage{
prepareModelConfMatrix(model.conf, params.matrix)
}
\arguments{
\item{model.conf}{list model configuration}
\item{params.matrix}{matrix with params}
}
\value{
matrix with model configuration
}
\description{
Prepare model configuration matrix
}
\author{
Konrad J. Debski
}
|
9f44f187743ecf853abca3e8ff2eac1cba907b35 | c93321125b04eb0f89cc536d0e19676916726d03 | /Shiny_App/Two_Means/bayesEst.R | 20586a83cb2cbb1c0bec0c10b853c042dc009e84 | [
"MIT"
] | permissive | melissa-wong/Stats_Examples | 32ddf1cb1e51e299e283a05d22d2d0cc61f08017 | 5ff94899deb307634059bd5967bc9c7c41320787 | refs/heads/master | 2023-06-02T12:53:36.374478 | 2021-06-21T12:58:49 | 2021-06-21T12:58:49 | 186,298,099 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,515 | r | bayesEst.R | library(BEST)
library(tidyverse)
library(gridExtra)
bayesPlots <- function(samplesX, samplesY)
{
priors <- BESTmcmc(samplesX, samplesY, doPriorsOnly = TRUE)
posteriors <- BESTmcmc(samplesX, samplesY)
priorMeans <- priors %>%
select(c(mu1, mu2)) %>%
rename(X=mu1, Y=mu2) %>%
pivot_longer(cols=c(X,Y), names_to = "population", values_to = "val") %>%
mutate(param = "Mean", dist = "prior")
postMeans <- posteriors %>%
select(c(mu1, mu2)) %>%
rename(X=mu1, Y=mu2) %>%
pivot_longer(cols=c(X,Y), names_to = "population", values_to = "val") %>%
mutate(param = "Mean", dist="posterior")
plotMeans <- rbind(priorMeans, postMeans) %>%
ggplot(mapping=aes(val, colour=dist, fill=dist)) +
geom_density(alpha=0.1) +
facet_grid( ~ population) +
labs(title="Mean")
priorSd <- priors %>%
select(c(sigma1, sigma2)) %>%
rename(X=sigma1, Y=sigma2) %>%
pivot_longer(cols=c(X,Y), names_to = "population", values_to = "val") %>%
mutate(param = "Std. Dev.", dist="prior")
postSd <- posteriors %>%
select(c(sigma1, sigma2)) %>%
rename(X=sigma1, Y=sigma2) %>%
pivot_longer(cols=c(X,Y), names_to = "population", values_to = "val") %>%
mutate(param = "Std. Dev.", dist="posterior")
plotSDs <- rbind(priorSd, postSd) %>%
ggplot(mapping=aes(val, colour=dist, fill=dist)) +
geom_density(alpha=0.1) +
facet_grid( ~ population) +
labs(title="Standard Deviation")
grid.arrange(plotMeans, plotSDs, ncol=1)
}
|
d90acb0ccca8678fdd8fcd1ee0e90d2e8a7977de | 079d0060b881b0efad6e02d0e3b089eb409f7063 | /pseudotime_analysis/tradeseq_slingshot.r | 671eb65d067979d4427d923dbd39438139b48178 | [
"MIT"
] | permissive | howchihlee/covid_brain_sc | dcfc0f923cc4265b3175364d0281301d5b5c6e7e | 7b548d810426290a3815bcd2ce6edaaeafc024cb | refs/heads/master | 2023-07-03T23:38:42.455680 | 2021-08-17T15:52:43 | 2021-08-17T15:52:43 | 382,200,749 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,873 | r | tradeseq_slingshot.r | library(BiocParallel)
library(tradeSeq)
library(mgcv)
library(clusterExperiment)
library(slingshot)
exprs = read.csv('./data/mic_gene_expression_raw.csv', row.names = 1)
counts = as.matrix(exprs)
meta = read.csv('./data/mic_diff_meta.csv')
rd = read.csv('./mic_diff_pca.csv')
ind_gene = colSums(counts) > (0.5 * nrow(counts) )
counts = counts[,ind_gene]
sds <- slingshot(rd[,1:2], meta$leiden)
pseudotime = slingPseudotime(sds)[,1]
#aicK = evaluateK(t(counts), sds = sds, BPPARAM = BiocParallel::bpparam())
sce <- fitGAM(counts = t(counts), pseudotime = pseudotime,
cellWeights = rep(1, nrow(counts)), verbose = TRUE,
parallel=TRUE, BPPARAM = BiocParallel::bpparam())
assoRes <- associationTest(sce)
assoRes$fdr = p.adjust(assoRes$pvalue, 'fdr')
head(assoRes)
saveRDS(assoRes, 'mic_sling_output/pseudotime_association.rds')
sigRes = assoRes[assoRes$fdr < 0.05,]
degs = rownames(sigRes[order(sigRes$pvalue),])
nPointsClus = 20
clusPat = clusterExpressionPatterns(sce, nPoints = nPointsClus,
genes = degs, clusterFunction = "hierarchical01",
ncores = 20,
)
write.csv(clusPat$yhatScaled,'mic_sling_output/yhat_scaled.csv')
dn_curves = (which(colSums(diff(t(clusPat$yhatScaled), ) <= 0, ) == (nPointsClus - 1) ))
up_curves = (which(colSums(diff(t(clusPat$yhatScaled), ) >= 0, ) == (nPointsClus - 1) ))
not_monotone = setdiff(1:length(degs), dn_curves)
not_monotone = setdiff(not_monotone, up_curves)
not_monotone_genes = rownames(clusPat$yhatScaled)[not_monotone]
clusPat_nm = clusterExpressionPatterns(sce, nPoints = nPointsClus,
genes = not_monotone_genes,
clusterFunction = "hierarchical01",
ncores = 20,
)
clusterLabels_nm <- primaryCluster(clusPat_nm$rsec)
clusterLabels_nm = clusterLabels_nm
out = data.frame(cluster = clusterLabels_nm - 1 )
rownames(out) = rownames(clusPat_nm$yhatScaled)
write.csv(out, './mic_sling_output/clusterLabel_nm.csv')
write.csv(clusPat_nm$yhatScaled, './mic_sling_output/yhat_scaled_nm.csv')
#merged_label = seq(length(degs))
merged_label = rep(-1, length(degs))
merged_label[not_monotone] = clusterLabels_nm - 1
merged_label[dn_curves] = max(clusterLabels_nm)
merged_label[up_curves] = max(clusterLabels_nm) + 1
out = data.frame(cluster = merged_label )
rownames(out) = rownames(clusPat$yhatScaled)
write.csv(out, './mic_sling_output/clusterLabel.csv')
write.csv(clusPat$yhatScaled, './mic_sling_output/yhat_scaled.csv')
pdf('fig/decreasing_genes_smooth_traj.pdf')
genes = c('MT.CO1', 'MT.ND4', 'CSMD1')
for (g in genes){
#print(plotSmoothers(sce, t(counts), gene = g))
print(plotSmoothers(sce, t(counts), gene = g))
}
dev.off()
|
750128d9374ca5d2d652451b0c26b227e04d4897 | b2f61fde194bfcb362b2266da124138efd27d867 | /code/dcnf-ankit-optimized/Results/DQBF-TRACK-2018/E1+A1/Experiments/scholl_C499.blif_0.20_1.00_9_2_henkin/scholl_C499.blif_0.20_1.00_9_2_henkin.R | 4454c8b991ada54ea7968cfb25979de7d8acb0a2 | [] | no_license | arey0pushpa/dcnf-autarky | e95fddba85c035e8b229f5fe9ac540b692a4d5c0 | a6c9a52236af11d7f7e165a4b25b32c538da1c98 | refs/heads/master | 2021-06-09T00:56:32.937250 | 2021-02-19T15:15:23 | 2021-02-19T15:15:23 | 136,440,042 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 722 | r | scholl_C499.blif_0.20_1.00_9_2_henkin.R | c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 2507
c
c Performing A1-Autarky iteration.
c Running Lingeling ...
c
c Remaining clauses count after A-Reduction: 2507
c
c Input Parameter (command line, file):
c input filename dqbf18//scholl_C499.blif_0.20_1.00_9_2_henkin.dqdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 879
c no.of clauses 2507
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 2507
c
c dqbf18//scholl_C499.blif_0.20_1.00_9_2_henkin.dqdimacs 879 2507 E1+A1 [] 0 68 811 NONE
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.