blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8014d33fe017a6263b844b775338ff9720f8a703 | 0748e639c10a783c8791fb9a8e768fc57c549286 | /cachematrix.R | 0e8aeeccbe1e9763b91268e009afa0b35f88a773 | [] | no_license | ikpunk/ProgrammingAssignment2 | 72e97d47191896157e35b0769e4f6fb7aa175b1e | 80d75e4e98f8add96c6da2d953b890454ebdbd75 | refs/heads/master | 2022-12-22T01:06:51.472538 | 2020-09-26T13:15:46 | 2020-09-26T13:15:46 | 298,346,756 | 0 | 0 | null | 2020-09-24T17:20:39 | 2020-09-24T17:20:38 | null | UTF-8 | R | false | false | 1,330 | r | cachematrix.R | ## this one is really close to the "mean" exapmle in the instructions but it works
## this function makes a list which contains 4 elements which are functions those set or get the matrix and its inverse
makeCacheMatrix <- function(x = matrix()) {
invM<-NULL
set<- function(y) {
x<<-y
invM<<- NULL ## Creating inverse matrix element
} ## now we're making a list contins setting and getting function of invM
get<- function() {x}
setinverse<-function(inverse) {invM<<-inverse}
getinverse<- function() {invM}
list(set=set, get=get, setinverse=setinverse, getinverse=getinverse)
}
## This function gets the getinverse inside of the list if it is not NULL
## If it is NULL "solve" function calculates the inverse of the matrix
cacheSolve <- function(x, ...) {
invM <- x$getinverse()
if(!is.null(invM)) {
message("getting cached data")
return(invM)
}
matris <- x$get()
invM <- solve(matris, ...) ##calculates inverse of the matrix
x$setinverse(invM) ##setting the value as setinverse in the list
invM ## returns the invM as the result.
## This function returns a matrix that is the inverse of 'x'
}
|
2b3456ad7178b639a61bc645d33646a1bde52de6 | 54d3a2509f7406c12be67877bf91a11d92628917 | /Econ591/data/membership.r | d5d43a1797d4132520d0c3c4927e5aae1d939d36 | [] | no_license | ochampo/Business-Plan | f07821a2b5a595937af23f87ba4e5d8553962ec2 | a2fa670563d686859dd10a6be745af643bb9fa26 | refs/heads/master | 2021-09-01T18:39:04.665430 | 2017-12-28T08:34:58 | 2017-12-28T08:34:58 | 114,740,316 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,472 | r | membership.r | install.packages("dslabs")
install.packages("dplyr")
install.packages("tidyverse")
install.packages("psych")
install.packages("ggplot2")
library(dplyr)
library(tidyverse)
library(ggplot2)
library(psych)
#rm(gapminder)
#CSUS2011 <- read.clipboard.tab()
#CSUS2011 <-food
GrowingMarket <- read.csv(file = "/home/o/ocampod/fall2017/Econ591/GrowingMarket.csv", header=TRUE, sep= ",")
View(GrowingMarket)
GrowingMarket[1,]
ggplot(data = GrowingMarket) +
geom_bar(mapping = aes(x = Healthclubs.Gym, y = Membership), stat = "identity")
#ggsave("/home/o/ocampod/fall2017/cs390-ochampoo/Data-Analytics/lab7/quest5.png")
#grow Gym membership
GrowingMarket <- mutate( GrowingMarket, percentage = Membership/Population*100)
######## Growing
######## Start Up Cost #### 4 Month to setup cost
startup <- read.csv(file = "/home/o/ocampod/fall2017/Econ591/4month.csv", header=TRUE, sep= ",")
starupcost4 <- mutate( startup, RandomCost = 10000)
starupcost4 <- mutate(starupcost4 , startupcost4 =CTO + COO + CMO + CFO + Rent + RandomCost+SBA.Loan)
starupcost4
sum(starupcost4$startupcost4)
View(startupcost4)
#### according to my data we should start off with 201,772 in intial cost.
#### Rev is 90,000k we open up Jan
### 201,772 +
################ 4 Month Startup Cost ###########
###### Calculating Total monthly Cost ################
View(totalCost)
totalCost
totalCost <- read.csv(file = "/home/o/ocampod/fall2017/Econ591/monthlcost.csv", header=TRUE, sep= ",")
totalCost <- mutate( totalCost, totalCostPart = SBA.Loan + Mortgage +CFO +COO +CMO+CEO +CTO+Metal.Hanger.100+ Perc.55.Gallons+ Gas+ Electric + Water+Wipes +bussinessInsurance +Cleaning.supplies +oil.Machines+ Wear.and.Tear+ Yoga.Instructor +Personal.Trainers +Saleperson +Market.Manger+ Receptionists +Dry.Clean.Staff)
totalCost
write.csv(totalCost,"/home/o/ocampod/fall2017/Econ591/data/totalCost.csv")
View(totalCost)
fixedCost
fixedCost <- read.csv(file = "/home/o/ocampod/fall2017/Econ591/data/fixedCost.csv", header=TRUE, sep= ",")
ggplot(data = fixedCost ) +
geom_bar(mapping = aes( x = reorder(Total.Cost,Month), y = Month ), stat = "identity")+
coord_flip()
ggsave("/home/o/ocampod/fall2017/Econ591/data/fixCost.png")
select(fixedCost,Items,Cost)
sum(fixedCost$Cost)### First month Cost
View(fixedCost)
####### Calculating Machine Cost/intialCost ##########
totalMachineCost
totalMachineCost <- read.csv(file = "/home/o/ocampod/fall2017/Econ591/intialCost.csv", header=TRUE, sep= ",")
View(totalMachineCost)
sapply(totalMachineCost, class)
totalMachineCost <- mutate( totalMachineCost, totalCost = Cost * Qty )
totalMachineCost[1,]
View(totalMachineCost)
write.csv(totalMachineCost,"/home/o/ocampod/fall2017/Econ591/data/TotalCost.csv")
sum(starupcost4$startupcost4)
sum(totalMachineCost$totalCost)
View(starupcost4)
View(tota)
sum(starupcost4$startupcost4)
sum(starupcost4$startupcost4)
sum(totalMachineCost$totalCost) + sum(starupcost4$startupcost4) + sum(fixedCost$totaCost)
sum(fixedCost$Cost)
#### Total Machine Cost and startup Cost 201,772
write.csv(intialCost,"/home/o/ocampod/fall2017/Econ591/data/TotalCost.csv")
View(intialCost)
### so my first month shoud be about 800,592 + 100369 with monthy cost of fixed month cost would be 900,961 to starup.
#### This is total revenue
totalRev <- read.csv(file = "/home/ochampo/fall2017/Business-Plan/Econ591/data/Calculation.csv", header=TRUE, sep= ",")
View(totalRev)
totalRev
totalRev <- mutate( totalRev, MonthlyMemebers= Week * NumMembers )### number of new
Intial.Memember
totalRev <- mutate( totalRev, Intial.Memember = MonthlyMemebers * 50)
totalRev <- mutate( totalRev, TotalrevDryCleaners = Week * costumers * MoneySpentDry )
dryCleaningRev <- select(totalRev,Month,costumers,MoneySpentDry,TotalrevDryCleaners)
View(dryCleaningRev)#### image for drycleaning Rev
totalRev <- mutate( totalRev, Gym.Total.Rev = Intial.Memember + IncomeByMonth )
gymrev <- select(totalRev,Month,MonthlyMemebers,Intial.Memember,IncomeByMonth,Gym.Total.Rev)### gym revenue fixed
RealRev <- select(totalRev,Month,TotalrevDryCleaners,Gym.Total.Rev,Totalrev)
View(RealRev)
View(gymrev)
totalRev <- mutate( totalRev, Totalrev = TotalrevDryCleaners+ IncomeByMonth + Intial.Memember)
View(totalRev)
totalRev[1,]
View(totalRev)
write.csv(totalRev,"/home/o/ocampod/fall2017/Econ591/data/Calculation.csv")
totalRev
ggplot(data = totalRev) +
geom_point(mapping = aes(x = reorder(Month,Totalrev) , y = Totalrev))
ggsave("/home/o/ocampod/fall2017/Econ591/data/TotalRevenue.png")
########## Total rev ##########
totalRevenueCombine <- read.csv(file = "/home/o/ocampod/fall2017/Econ591/data/Year1Profit.csv", header=TRUE, sep= ",")
View(totalRevenueCombine)
totalRevenueCombine <- mutate( totalRevenueCombine, budget = Real.Rev + SBA.Loan - MonthlyCost)
totalRevenueCombine <- mutate( totalRevenueCombine, Total.Gym.Rev = Total.Gym.Rev + Member.Fee)
postive <- select(totalRevenueCombine,Month,Total.Gym.Rev,Total.Dry.Cleaner.Rev,Real.Rev,MonthlyCost,SBA.Loan,budget)
View(postive)
totalRevenueCombine
write.csv(totalRevenueCombine,"/home/o/ocampod/fall2017/Econ591/data/Year1Profit.csv")
sum(totalRevenueCombine$budget)
totalRevenueCombine <- read.csv(file = "/home/o/ocampod/fall2017/Econ591/TotalRev.csv", header=TRUE, sep= ",")
totalRevenueCombine
####### Burn Rate ########
BurnRate <- read.csv(file = "/home/ochampo/fall2017/Business-Plan/Econ591/Business-Plan/BurnRate - Sheet1.csv", header=TRUE, sep= ",")
|
5f1cf0864599342ef4d1337e57061363f52c09a9 | 37c45fe9fcdfc7ffeba837f552b0c9f9cf1a08ee | /1b1vectors.R | 2e08973c2c6f92ba7a78abdb8129b0bdfad3b787 | [] | no_license | HarshitSG/analytics1 | 2f59ec8dff3200b4fa7645f16108200701eb4779 | 7f1ecac2dcc22c021e42a553b83353fc0441d85c | refs/heads/master | 2020-04-02T11:02:26.331392 | 2018-10-24T18:51:44 | 2018-10-24T18:51:44 | 154,367,762 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,224 | r | 1b1vectors.R | # Vectors and Basic Statistics - Univariate Analysis
#packages required - e1071, modeest
#single dimension of same data type
#x = c(3, 'Data', TRUE) # cannotr
#create numbers in sequence----
x1 <- 10:19
x1
(x1 = 10:19)
(x1 = sample(x=10:19))#pick in random order
x1
print(x1) #see the position numbers start from 1
(y = 10:100)
y[20]
y[1:10]
#subset/ filter values on condition, position----
x1[x1 > 15]
print(x1[x1 > 15])
x1[x1 < 13]
x1[(x1 < 14) | (x1 > 17)]
x1[(x1 > 14) & (x1 < 17)]
#Subset values on position
x1
length(x1)
x1[1] ; x1[10] ; x1[length(x1)-1]
x1[3:6]
x1[c(3,6,7)]
x1[seq(2,10,2)] #even number 2-10 with differnece of 2
x1[seq(1,10,2)] #odd number 1-10 with differnece of 2
x1
x1[-5]
x1[-c(1,3,7)]
x1[-c(1,length(x1))]
#sort
(x1=sample(x=10:200))
sort(x1)
sort(x1, decreasing=T) #descending order
sort(x1, decreasing=TRUE) #descending order
x1
rev(x1) #rev is different from decreasing order
#generate numbers different way----
(x2 = c(4,7,3,9,11))
#c - combine values in a vector variable
(x3 = seq(from=0, to=100, by=5))
?rep
rep(1,5); rep(c(1,3),5)
(x4 = c(rep(4,times=3), rep(7, times=4)))
(x5 = c(rep(c(1,2,3), times=c(5,4,3))))
ls() ; y ; x1 ; x2 ; x3
rep(1,5) ; rep(1,times=5) ; rep(times=5,1)
rep(c(1,3),5)
?c
rep(c(1,3),times=5)
rep(c(1,3),each=5)
(x4 = c(rep(4, times=3), rep(7, times(times=4))))
(x5 = c(rep(c(1,2,3,4),times=c(5,4,3,10))))
#Using probability Distributions
set.seed(1234) #help in generating smae pattern
(x6 = rnorm(n=1000000, mean=5, sd=1))
plot(density(x6))
abline(v=5, h=0.3, col=1:2) #h - horizontal line, v - vertical line, col=colour can be specified via names also
mean(x6)
sd(x6)
(x7 = runif(n=10, min=2, max=10))
plot(x7)
?distributions
#removing decimal places/ round----
floor(10.35)
(x6 = rnorm(n=10, mean=5, sd=1))
floor(x6) #lower integer value
ceiling(x6) #higher integer value
trunc(x6)
round(x6,digits=2) #round to decimal places
signif(x6,digits=3) #round to specified no of digits not rounding decimal
x6
x6b = c(-10.5, 4.56, 5.3)
x6b
trunc(x6b)
floor(x6b)
x6c = c(05.24,5.24,5.2453)
signif(x6c,3)
# Basic operations on vector----
x1=1:10
sum(x1)
cumsum(x1) # cumulative sum
cumprod(x1)
x1 * 2 # multiple by 2
x1 * c(2,4) #multiple 2 & 4 alternatively
x1
#similarly other operators can be used
x1/2
x1 ^ 2 #to power
x1 ** 2 #power of 2
x1 %% 2 #finding whether odd or even, #modulo/ remainder
sqrt(x1) #squear root
sin(x1)
#concatenate vectors
x1; x2
x1
x2
(x8 = c(x1, x2)) #combining vector
x8
#min, max, compare ----
min(x1)
max(x1)
mean(x1)
median(x1)
mode(x1) #this mode is not stats mode
length(x1) #no of values
x1==x2 #1|=4, 2|=7, and goon..... x2 will repeat after it finishes, that is from start
x1; x2
x1; c(x2,x2)
5 < 6
5 > 6
5 == 6
5 <= 6
#attributes----
str(x1) #structure of vector
class(x1)
typeof(x1)
summary(x1)
quantile(x1)
quantile(x1, c(.1,.3,.6,.7))
#decile 10%, percentile 1%
seq(0,1,.10)
quantile(x1, seq(0,1,.1))
quantile(x1, seq(0,1,.01))
x1
head(x1)
head(x1,n=3)
tail(x1)
tail(x1,n=3)
x1=sample(1:100)
(x1=sample(1:100))
head(x1,n=3)
tail(x1,n=3)
#missing values in vector (NA)----
(x9 = c(1,5,14,NA,20,17, NA,9)) #missing values are indicated by NA # importing filesfrom excel the blank, will be replaced by NA or if importing from other software, blank will show 99
(x9b = c(1,5,14,20,17,9))
length(x9)
sum(x9) #error
sum(x9,na.rm=T) # finding sum after removing NA
is.na(x9) #T & F #is there any values which are missing, show true false in all positions
#how many missing values
sum(c(TRUE,F,T,T,T,F,T,F)) # sum of trues #T=1 and F=0 and sum them depending on hte number of T's and F's
sum(is.na(x9)) # how many missing values
y1=sample(1:100)
y1[c(30,50,70,81)]=NA
y1
anyNA(y1) #is there any missing values?
sum(is.na(y1))
sum(y1, na.rm=T)
length(x9)
sum(x9, na.rm=T)
na.omit(x9)
?NA
na.exclude(x9)
length(x9)
x9b=na.omit(x9)
x9 ; length(x9b)
#impute
mean(x9, na.rm=T) #mean of x9 non missing values
is.na(x9) #where NA is there in x9
x9c=x9
x9[is.na(x9)] = mean(x9, na.rm=T) # where NA is there in x9 = mean of x9 non missing values
x9
#Other Vectors----
class(x1)
(x11 = c(10.4, 12.4, 15, 20)) #numeric - will have decimals
class(x11)
(x12 = c(3L,6L,9L, 15L)) #integer - no decimals (L means integer)
class(x12)
(x11b = c(10.4, 12.4, 15, 20))
class(x11b)
x11c=as.integer(x11b) #convert numeric to integer
class(x11c)
#character----
(x13 = c('henry', 'harvin', 'education'))
class(x13)
toupper(x13)
casefold(x13,upper=T)
(x14 = c("BUSINESS", "MARKETING", 'FINANCIAL'))
tolower(x14)
casefold(x14,upper=F)
#library(stringr)
chartr("BMF","bmF",x14) #replace BMF with bmF
?strsplit(x14, "E") #split at point E is found
#Logical Vectors----
(x20 = c(TRUE, FALSE, T, F, TRUE))
class(x20)
sum(x20) #how many T
x20[x20 ==T ] # T=1
table(x20) #T & F count
#names to vector elements----
x14
names(x14) = c('CBAP','CMAP',"CFAP") #headings
x14
x14["CMAP"] #value of label CMAP
#paste ----
paste("Product",1,sep="-")
paste("Coy",1,sep="$")
paste("Product",1:10,sep="-")
#Generate Sample values----
(x25 = ceiling(rnorm(10, mean=60, sd=10)))
(x26a = sample(c("Male","Female")))
(x26b = sample(c("Male","Female"), size=10))#error
(x26c = sample(c("Male","Female"), size=10, replace=T))
table(x26c)
(x26d = sample(c("Male","Female"), size=10, replace=T, prob=c(.6, .4)))
table(x26d) #will not be exactly as per prob as sample size is less
(x26e = sample(c("Male","Female"), size=10000, replace=T, prob=c(.6, .4))) #increase sample size
table(x26e) # approx as per prob
prop.table(table(x26e))
#sampling----
#using sampling with set.seed for repeating pattern
(x27a = sample(c("Phd","MBA", "BBA"), size=10, replace=T))
table(x27a)
(x27b = sample(c("Phd","MBA", "BBA"), size=10, replace=T))
table(x27b)
x27a==x27b #not same
#how to regenerate the sample pattern using seed value
set.seed(123) #123 can be any number
(x28a = sample(c("Phd","MBA", "BBA"), size=10, replace=T))
table(x28a)
set.seed(123) #repeat this
(x28b = sample(c("Phd","MBA", "BBA"), size=10, replace=T))
table(x28b)
x28a==x28b #all are same
#Univariate Analysis----
#Basic Statistics on Single Variable
#continuous values
(x30 = rnorm(100, mean=60, sd=5))
#discrete category values- unordered
(x31a = sample(c('Phd','MBA','BBA'), size=100, replace=T, prob=c(.1,.3, .6)))
(x31 = factor(x31a))
#ordered category
(x32a = sample(c('Excellent','Good','Satisfactory','Poor'), size=100, replace=T, prob=c(.3,.4,.2,.1)))
(x32 = factor(x32a, ordered=T, levels=c('Poor','Satisfactory','Good','Excellent')))
#summary
summary(x30)
summary(x31)
summary(x32)
table(x31)
prop.table(table(x31))
length(x30)
#plots-----
hist(x30)
hist(x30, breaks=10, col=1:10)
plot(x30)
boxplot(x30)
abline(h=summary(x30))
#categorical variable
table(x31)
barplot(table(x31), horiz=T)
barplot(table(x32), horiz=F, col=1:4)
pie(table(x32))
#Statistics----
mean(x30)
var(x30)
sd(x30)
sqrt(var(x30))
min(x30)
max(x30)
range(x30)
plot(density(x30))
e1071::skewness(x30)
e1071::kurtosis(x30)
Hmisc::describe(x30)
quantile(x30)
quantile(x30, prob=c(seq(0,1,by=.1)))
quantile(x30, prob=c(seq(0,1,by=.01)))
range(x30)
stem(x30)
#mode----
#mode of continous
modeest::mlv(x30,method='shorth')
#mode of categorical variable
x31
(courses = table(x31))
names(courses)[courses == max(courses)]
#Frequency Table----
#generate sample data using uniform distribution from 0 to 100
(x33 = runif(100, min=0, max=100))
range(x33)
length(x33)
# Divide range into step of 20 ie 5 levels
(breaks = seq(0,100,by=20))
length(breaks)
(x33.cut = cut(x33, breaks))
head(cbind(x33, x33.cut))
table(x33.cut)
(freqtable = cbind(table(x33.cut))) #see it vertically
barplot(freqtable, beside = T, names.arg = row.names(freqtable))
#give intervals a Label values A, B..
LETTERS[1:5]
(x33b.cut = cut(x33, breaks, labels=LETTERS[1:length(breaks)-1]))
x33b.cut
(table(x33b.cut))
(freqtable2 = cbind(table(x33b.cut)))
row.names(freqtable2)
barplot(freqtable2, beside = T, col=1:5, names.arg = row.names(freqtable2), ylim=c(0, max(freqtable2)+5))
title(main="Distribution of Values", sub='Frequency Table')
text(x=(1:nrow(freqtable2))+.5, y=freqtable2+3, freqtable2, col=1:5)
#Undersand other Data Structures and various analysis on them
#matrix, data.frame |
76f50ca54d007e94be1b84d73f517e902af3e2d2 | 74d23aff9362c2ecf3e6a5883d98b4db65546740 | /cachematrix.R | 7108b66b0a10cbafe61d55de77ef360c2602b2ff | [] | no_license | kimberleyanne/ProgrammingAssignment2 | 6806c6211e7af69342e6d4f43bd595548caae24d | eb97864d0cb639044118497b6186abfdb6bbfc3d | refs/heads/master | 2020-04-13T16:56:23.942533 | 2018-12-27T22:28:25 | 2018-12-27T22:28:25 | 163,334,309 | 0 | 0 | null | 2018-12-27T20:41:47 | 2018-12-27T20:41:46 | null | UTF-8 | R | false | false | 1,699 | r | cachematrix.R | ## The following functions make use of R's lexical scoping to cache the results of a time-intensive operation:
## calculating the inverse of a matrix.
## The makeCacheMatrix function creates a series of setter and getter functions and returns those functions in a named list.
## x and i are created in the makeCacheMatrix parent environment along with the set function (allows user to enter matrix
## for use in subsequent functions), get function (retrieves the matrix for which the inverse will be calculated),
## setinverse function (calculates the inverse of x matrix and saves this as i in the parent environment), and getinverse
## function (retrieves the inverse matrix i).
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y) {
x <<- matrix(y)
i <<- NULL
}
get <- function() x
setinverse <- function(solve) i <<- solve
getinverse <- function() i
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## The cacheSolve function will retrieve the inverse matrix stored in makeCacheMatrix's i using getinverse().
## If the inverse is not null (i.e., i was alread calculated and is available) cacheSolve returns i (the inverse).
## If the inverse is null (i.e., i hasn't been calculated), cacheSolve will apply the get() function from
## makeCacheMatrix to create the data object, determine the matrix inverse using solve(), save
## the inverse using setinverse(), and return the inverse i.
cacheSolve <- function(x, ...) {
i <- x$getinverse()
if(!is.null(i)) {
message("getting cached data")
return(i)
}
data <- x$get()
i <- solve(data, ...)
x$setinverse(i)
i
}
|
8ee7672b1cde3cd9d0936deaac688ce76acf251a | 85a4fea32070f0cd1f5d20a15f6d95e843bf978d | /Capastone/R script.r | 847232c6a4336316875cbca251201ef4b4989506 | [] | no_license | ningyuliu114/Final-Capstone-Project-Submission---Ning | a6470a24cc56d88b66a0d057f770f6fbb77554ca | 0c1a72b41a013297903060add8a1e1797e02f1d5 | refs/heads/master | 2020-03-19T22:21:39.159391 | 2018-06-11T18:50:34 | 2018-06-11T18:50:34 | 136,967,503 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,647 | r | R script.r | #rm(list=ls())
#install.packages('car')
#Single Detached Housing
#graph Data vs Index
attach(Single_Detached_Houseing)
View(Single_Detached_Houseing)
#Analyse of Index
summary(Index)
#Analyse of Growth (%Y-0-Y)
summary(`Growth (%Y-O-Y)`)
#graph DatE vs Index
xS<- c(Single_Detached_Houseing$Date)
SingleyS<- c(Single_Detached_Houseing$Index)
plot(xS, SingleyS,type = "o", col = "red", xlab = "date", ylab = "Index", main = "Single Detached Housing")
#Graph Date vs Growth (%Y-O-Y)
singley1<- c(`Growth (%Y-O-Y)`)
plot(x, singley1,type = "o", col = "blue", xlab = "date", ylab = "Growth (%Y-O-Y) ", main = "Single Detached Housing")
#variance and std for Single Detached Housing
var(Index)
sd(Index)
var(`Growth (%Y-O-Y)`)
sd(`Growth (%Y-O-Y)`)
#Creating A Table for Index
SummarySI<- c('Min.','1st Qu.','Median','Mean','3rd Qu.','Max.','Var','std')
ValueSI<-c('100.4','108.3','121.7','119.6','129.2','138.1','123.5621','11.11585')
TableSI<- data.frame(SummarySI, ValueSI)
View(TableSI)
#Creating A Table for Growth
SummarySG<- c('Min.','1st Qu.','Median','Mean','3rd Qu.','Max.','Var','std')
ValueSG<-c('-3.200','1.100','3.600','3.568','5.800','10.600','9.455322','3.074951')
TableSG<- data.frame(SummarySG, ValueSG)
View(TableSG)
#Addng regression line for Single_Detached_Houseing
plot(Single_Detached_Houseing$Index,Single_Detached_Houseing$`Growth (%Y-O-Y)`, main = 'Regression for Single_Detached_Houseing', xlab = 'index',ylab = 'growth')
abline(lm(`Growth (%Y-O-Y)`~ Index,data = land), col = 'red')
#Town House Data
attach(Town_House_including_land_)
View(Town_House_including_land_)
#Analyse of Index
summary(Index)
#Analyse of Growth (%Y-0-Y)
summary(`Growth (%Y-O-Y)`)
#Town House
#graph Data vs Index
x<- c(Date)
Towny<- c(Index)
plot(x, Towny,type = "o", col = "red", xlab = "date", ylab = "Index", main = "Town House (including land)")
#Graph Data vs Growth (%Y-O-Y)
Towny1<- c(`Growth (%Y-O-Y)`)
plot(x, Towny1,type = "o", col = "blue", xlab = "date", ylab = "Growth (%Y-O-Y) ", main = "Town House (including land)")
#variance and std
var(Index)
sd(Index)
var(`Growth (%Y-O-Y)`)
sd(`Growth (%Y-O-Y)`)
#Creating A Table for Index
SummaryTI<- c('Min.','1st Qu.','Median','Mean','3rd Qu.','Max.','Var','std')
ValueTI<-c('97.7','105.8','124.9','121.9','129.2','135.9','239.5359','15.47695')
TableTI<- data.frame(SummaryTI, ValueTI)
View(TableTI)
#Creating A Table for Growth
SummaryTG<- c('Min.','1st Qu.','Median','Mean','3rd Qu.','Max.','Var','std')
ValueTG<-c('-1.400 ','2.300','3.800','4.659','7.200','14.000','13.43828','3.665826')
TableTG<- data.frame(SummaryTG, ValueTG)
View(TableTG)
#Addng regression line for Town_House_including_land_
plot(Town_House_including_land_$Index,Town_House_including_land_$`Growth (%Y-O-Y)`, main = 'Regression for Town_House_including_land_', xlab = 'index',ylab = 'growth')
abline(lm(`Growth (%Y-O-Y)`~ Index,data = land), col = 'red')
#Condo
attach(Condominium)
View(Condominium)
#Analyse of Index
summary(Index)
#Condominium
#Analyse of Growth (%Y-0-Y)
summary(`Growth (%Y-O-Y)`)
#Condominium
#graph Data vs Index
x<- c(Date)
Condoy<- c(Index)
plot(x, Condoy,type = "o", col = "red", xlab = "date", ylab = "Index", main = "Condominium")
#Graph Data vs Growth (%Y-O-Y)
Condoy1<- c(`Growth (%Y-O-Y)`)
plot(x, Condoy1,type = "o", col = "blue", xlab = "date", ylab = "Growth (%Y-O-Y) ", main = "Condominium")
#variance and std
var(Index)
sd(Index)
var(`Growth (%Y-O-Y)`)
sd(`Growth (%Y-O-Y)`)
#Creating A Table for Index
SummaryCI<- c('Min.','1st Qu.','Median','Mean','3rd Qu.','Max.','Var','std')
ValueCI<-c('112.4','126.0','139.8','141.8','160.8','181.6','427.0431','20.66502')
TableCI<- data.frame(SummaryCI, ValueCI)
View(TableCI)
#Creating A Table for Growth
SummaryCG<- c('Min.','1st Qu.','Median','Mean','3rd Qu.','Max.','Var','std')
ValueCG<-c('-3.600 ','2.900','6.100','5.903','8.400','14.400','15.42572','3.927559')
TableCG<- data.frame(SummaryCG, ValueCG)
View(TableCG)
#Addng regression line for Condominium
plot(Condominium$Index,Condominium$`Growth (%Y-O-Y)`, main = 'Regression for Condominium', xlab = 'index',ylab = 'growth')
abline(lm(`Growth (%Y-O-Y)`~ Index,data = land), col = 'red')
#land
attach(land)
View(land)
#Analyse of Index
summary(Index)
#land
#Analyse of Growth (%Y-0-Y)
summary(`Growth (%Y-O-Y)`)
#land
#graph Data vs Index
x<- c(Date)
Landy<- c(Index)
plot(x, Landy,type = "o", col = "red", xlab = "date", ylab = "Index", main = "land")
#Graph Data vs Growth (%Y-O-Y)
landy1<- c(`Growth (%Y-O-Y)`)
plot(x, landy1,type = "o", col = "blue", xlab = "date", ylab = "Growth (%Y-O-Y) ", main = "land")
#variance and std
var(Index)
sd(Index)
var(`Growth (%Y-O-Y)`)
sd(`Growth (%Y-O-Y)`)
#Creating A Table for Index
SummaryLI<- c('Min.','1st Qu.','Median','Mean','3rd Qu.','Max.','Var','std')
ValueLI<-c('102.8','116.7','137.7','139.3','167.1','178.3','594.0412','24.37296')
TableLI<- data.frame(SummaryLI, ValueLI)
View(TableLI)
#Creating A Table for Growth
SummaryLG<- c('Min.','1st Qu.','Median','Mean','3rd Qu.','Max.','Var','std')
ValueLG<-c('-5.500 ','1.900','6.293','9.900','16.900','14.400','24.97963','4.997963')
TableLG<- data.frame(SummaryLG, ValueLG)
View(TableLG)
#Addng regression line for land
plot(land$Index,land$`Growth (%Y-O-Y)`, main = 'Regression for Land', xlab = 'index',ylab = 'growth')
abline(lm(`Growth (%Y-O-Y)`~ Index,data = land), col = 'red')
|
5eedcdad812bf189f4776d9ed67838b0c3cce556 | 037eb02d33f2dd6eb0e257106d1ce9fcb235e2be | /bin/directed_plots.R | a5803996575b6643adfc9b4fd45b0501bb4cbaaf | [] | no_license | CSB-IG/KEGG-IntegratedNetwork | 703d0d87d119bf1381a40ff2914a97408b68a0df | b1d8e2ca0b8104c69d34ac7f5d47440f49ff935d | refs/heads/main | 2022-12-19T22:03:29.953738 | 2020-10-21T03:18:24 | 2020-10-21T03:18:24 | 264,039,832 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,286 | r | directed_plots.R | ######### TOPOLOGY ALL NETWORK -MINUS ISOLATED NODES
#############
#############
plot_all_network_directed <- function(nodes.file, pdf.file){
pdf(pdf.file, onefile = T)
nodes.all <- read.table(nodes.file, sep=",", header=T, stringsAsFactors = F)
######### LOG-LOG DEGREE DISTRIBUTION
table.k.log <- table(log10(nodes.all$Indegree + nodes.all$Outdegree ))
max.table.k.log <- max(as.numeric(names(table.k.log)))
min.table.k.log <- min(as.numeric(names(table.k.log)))
partitions <- seq(floor(min.table.k.log), ceiling(max.table.k.log), length.out=4)
y <- as.vector(log10(table.k.log))
x <- as.numeric(names(table.k.log))
#plot(log10(table.k.log), type="p", xaxt="n", xlab = "log10(k)", ylab="log10(Frequency)", col="blue")
#axis(side = 1, at = partitions, labels = partitions)
#abline(lm(y ~ x ), lwd = 4)
data <- data.frame(x=x, y=y)
print(ggplot(data = data, aes(x = x, y = y)) +
geom_point(color='blue') +
geom_smooth(method = "lm", se = FALSE, col="black") +
labs( x = "log10(k)", y = "log10(Frequency)") +
ylim(0, max(data$y)) +
theme_bw(base_size = 25))
dev.off()
}
######### TOPOLOGY GIANT CONNECTED COMPONENT
#############
#############
plot_gcc_network_directed <- function(nodes.file, edges.file, pdf.file){
pdf(pdf.file, onefile = T)
edges <- read.table(edges.file, sep=",", header=T, stringsAsFactors = F)
nodes <- read.table(nodes.file, sep=",", header=T, stringsAsFactors = F)
dfP <- as.data.frame(edges$NPATHWAYS)
print(ggplot(dfP, aes(edges$NPATHWAYS)) + stat_ecdf(geom = "point", size = 1, color='blue') +
xlab( "Number of pathways per edge") +
ylab("Cumulative frequency") +
geom_hline(yintercept = 1, linetype="dashed",
color = "black", size=0.3) +
theme_bw(base_size = 25))
dfE <- as.data.frame(log10(edges$EdgeBetweenness))
print(ggplot(dfE, aes(log10(edges$EdgeBetweenness))) + stat_ecdf(geom = "point", size = 0.3, color='blue') +
xlab("log10(Edge Betweenness)") +
ylab("Cumulative frequency") +
geom_hline(yintercept = 1, linetype="dashed", color = "black", size=0.3) +
theme_bw(base_size = 25))
dfEBV <- data.frame(logEB = log10(edges$EdgeBetweenness), vias = edges$NPATHWAYS)
print(ggplot(dfEBV, aes(x=logEB, y=vias)) +
geom_point(size = 0.5, color='blue')+
labs( x = "log10(Edge Betweenness)", y = "Number of pathways") +
theme_bw(base_size = 25))
plot(edges$EdgeBetweenness, edges$NPATHWAYS, xlab ="log10(Edge Betweenness)", ylab = "Number of pathways", cex = 0.5)
cor.test(log10(edges$EdgeBetweenness), edges$NPATHWAYS, alternative = "two.sided", method="spearman")
###### LOG-LOG DEGREE DISTRIBUTION
table.k.log <- table(log10(nodes$Indegree + nodes$Outdegree))
max.table.k.log <- max(as.numeric(names(table.k.log)))
min.table.k.log <- min(as.numeric(names(table.k.log)))
partitions <- seq(floor(min.table.k.log), ceiling(max.table.k.log), length.out=4)
y <- as.vector(log10(table.k.log))
x <- as.numeric(names(table.k.log))
#plot(log10(table.k.log), type="p", xaxt="n", xlab = "log10(k)", ylab="log10(Frequency)", col="blue")
#axis(side = 1, at = partitions, labels = partitions)
#abline(lm(y ~ x ), lwd = 4)
data <- data.frame(x=x, y=y)
print(ggplot(data = data, aes(x = x, y = y)) +
geom_point(color='blue') +
geom_smooth(method = "lm", se = FALSE, col="black") +
labs( x = "log10(k)", y = "log10(Frequency)") +
ylim(0, max(data$y)) +
theme_bw(base_size = 25))
####### SHORTEST-PATH-LENGTH
print(ggplot(as.data.frame(nodes$AverageShortestPathLength), aes(x=nodes$AverageShortestPathLength)) +
geom_histogram(color="darkblue", fill="white") +
labs( x = "Average Shortest Path Length", y = "Frequency") +
theme_bw(base_size = 25))
####### CORE-PERIPHERY
print(ggplot(as.data.frame(nodes$ClosenessCentrality), aes(x=nodes$ClosenessCentrality)) +
geom_histogram(color="darkblue", fill="white", bins = 50) +
labs( x = "Closeness Centrality", y = "Frequency") +
theme_bw(base_size = 25))
dev.off()
}
|
2f44858b434c220b0fecfb16dcdedc01267debf6 | 67ad46995ca24ec2891ac13cf2e70b06e94f0932 | /man/noaa_api_data.Rd | c243db40f13e7eebef7c90bc71b632a363bce6e6 | [] | no_license | heike/wilyscraper | f76067574faa8042fb7d9839210c1e3e7fcef94f | b562b8a3d783d7473b028d14be6c6473ffb6498f | refs/heads/master | 2020-03-26T15:30:32.240404 | 2018-08-29T22:03:52 | 2018-08-29T22:03:52 | 145,047,684 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 834 | rd | noaa_api_data.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/noaa-weather-scraper.R
\name{noaa_api_data}
\alias{noaa_api_data}
\title{Get data from NOAA weather API for Ames, IA}
\usage{
noaa_api_data(location = NULL, var = c("TMAX", "TMIN"),
date_min = "2008-01-01", date_max = Sys.Date(), quiet = T)
}
\arguments{
\item{var}{variables to download (TMAX, TMIN by default, see rnoaa
documentation for other options)}
\item{date_min}{ymd format, minimum date}
\item{date_max}{ymd format, maximum date}
\item{quiet}{print messages?}
\item{lat_long_df}{data frame with columns id, latitude, and longitude}
}
\description{
Get data from NOAA weather API for Ames, IA
}
\examples{
\dontrun{
# Get Ames minimum and maximum temperature for 2017
noaa_api_data(date_min = "2017-01-01", date_max = "2017-12-31")
}
}
|
b1ba20df3fad133ec0733ba3525616194d882c95 | 23cfd470af3f14fd76753c963a1ad7656c8c51e2 | /apportion.R | 8419b3b32936326673bb2e31b941cd664e8c2b9e | [] | no_license | sciav/uncap | 1c71b9ba2450fb1abfc47e6a729a44fe02c639c3 | 5b6d0edb655e3e534b3c5bcdd9752dc4ed072e0c | refs/heads/main | 2023-03-03T19:27:06.127461 | 2021-02-17T07:58:45 | 2021-02-17T07:58:45 | 339,642,113 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,996 | r | apportion.R | state_list <- c("Alabama", "Alaska", "Arizona", "Arkansas", "California",
"Colorado", "Connecticut", "Delaware", "Florida", "Georgia",
"Hawaii", "Idaho", "Illinois", "Indiana", "Iowa", "Kansas",
"Kentucky", "Louisiana", "Maine", "Maryland", "Massachusetts",
"Michigan", "Minnesota", "Mississippi", "Missouri", "Montana",
"Nebraska", "Nevada", "New Hampshire", "New Jersey",
"New Mexico", "New York", "North Carolina", "North Dakota",
"Ohio", "Oklahoma", "Oregon", "Pennsylvania", "Rhode Island",
"South Carolina", "South Dakota", "Tennessee", "Texas", "Utah",
"Vermont", "Virginia", "Washington", "West Virginia",
"Wisconsin", "Wyoming")
apportion <- function(states,
nseats = 435,
new_states = NULL) {
seat_cap <- max(nseats * (ceiling(states[["Pop"]] / sum(states[["Pop"]], na.rm = TRUE) * 100) + 5), na.rm = TRUE) %/% 100
n <- seq_len(seat_cap)
mult <- 1 / sqrt(n * (n - 1))
priority <- outer(states[["Pop"]], mult)
rownames(priority) <- states[["State"]]
st_dat <- states[["State"]]
non_states <- setdiff(st_dat, c(state_list, new_states))
priority[non_states, ] <- 0
ap <- order(priority, decreasing = TRUE)[seq_len(nseats)]
seats <- table(rep(states[["State"]], length.out = max(ap, na.rm = TRUE))[ap],
dnn = "State")
df <- as.data.frame(seats, responseName = "Seats")
df[["Population"]] <- states[["Pop"]]
df[c("Population %", "Seat %", "Rep Factor")] <- list(
pop_prop <- states[["Pop"]] / sum(states[["Pop"]], na.rm = TRUE),
seat_prop <- seats / nseats,
seat_prop / pop_prop)
ev <- df[["Seats"]] + 2 * (df[["Seats"]] > 0)
if ("District of Columbia" %in% non_states) {
ev[df[["State"]] == "District of Columbia"] <- 3
}
df[c("Electoral Votes", "Electoral Vote %")] <- list(ev,
ev_prop <- ev / sum(ev, na.rm = TRUE))
df[["EV Factor"]] <- ev_prop / pop_prop
df <- df[, c("State", "Population", "Population %", "Seats", "Seat %", "Rep Factor", "Electoral Votes", "Electoral Vote %", "EV Factor")]
rbind(df,
data.frame("State" = "United States",
"Population" = sum(states[["Pop"]]),
"Population %" = 1,
"Seats" = nseats,
"Seat %" = 1,
"Rep Factor" = 1,
"Electoral Votes" = sum(ev, na.rm = TRUE),
"Electoral Vote %" = 1,
"EV Factor" = 1, check.names = FALSE))
}
get_ev_results <- function(election_results, apportionment, prw) {
if (!is.null(prw)) {
new_dat <- list(Year = 0, State = "Puerto Rico", Winner = prw)
election_results <- rbind(election_results, as.data.frame(new_dat))
}
full <- merge(election_results, apportionment)
with(full, tapply(`Electoral Votes`, Winner, sum))
}
|
4f4770f1aebf71b23803288a8f905ef197f0c8e1 | 12c545c0c8eb79240f43875505bddeffe2018950 | /man/install_torch.Rd | ab208e51bed5e8d19dd810b068dffa2b9fe89dbd | [
"MIT"
] | permissive | snapbuy/torch | 9322b8ee7e5ba1a31ac02a9151d4fc587a460ba7 | 8d968099ef78242062d4d301b2fc8364f2de2eb5 | refs/heads/master | 2023-08-26T16:41:10.695600 | 2021-10-19T22:56:58 | 2021-10-19T22:56:58 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,451 | rd | install_torch.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/install.R
\name{install_torch}
\alias{install_torch}
\title{Install Torch}
\usage{
install_torch(
version = "1.9.0",
type = install_type(version = version),
reinstall = FALSE,
path = install_path(),
timeout = 360,
...
)
}
\arguments{
\item{version}{The Torch version to install.}
\item{type}{The installation type for Torch. Valid values are \code{"cpu"} or the 'CUDA' version.}
\item{reinstall}{Re-install Torch even if its already installed?}
\item{path}{Optional path to install or check for an already existing installation.}
\item{timeout}{Optional timeout in seconds for large file download.}
\item{...}{other optional arguments (like \code{`load`} for manual installation).}
}
\description{
Installs Torch and its dependencies.
}
\details{
When using \code{path} to install in a specific location, make sure the \code{TORCH_HOME} environment
variable is set to this same path to reuse this installation. The \code{TORCH_INSTALL} environment
variable can be set to \code{0} to prevent auto-installing torch and \code{TORCH_LOAD} set to \code{0}
to avoid loading dependencies automatically. These environment variables are meant for advanced use
cases and troubleshooting only.
When timeout error occurs during library archive download, or length of downloaded files differ from
reported length, an increase of the \code{timeout} value should help.
}
|
69b8f6c5eb9d8030426889fc100d8ff22fabc86c | 72e8bd721b82c0e8239bf00129dbc1f00f19e2f2 | /testData/r_skeletons/mnormt.R | 18dff4049fb35416a1a12b74f90df25f4c224795 | [
"BSD-2-Clause",
"MIT"
] | permissive | yoongkang0122/r4intellij | 59dc560db7a1ea7aead2be8a98bbbba4e4051259 | e1dc26b462f94e3884d07ba5321eefa7263d1ad9 | refs/heads/master | 2021-01-25T09:20:56.708602 | 2017-05-13T05:52:49 | 2017-05-13T05:52:49 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 10,596 | r | mnormt.R | ##
## Exported symobls in package `mnormt`
##
## Exported package methods
biv.nt.prob <- function (df, lower, upper, mean, S)
{
if (any(dim(S) != c(2, 2)))
stop("dimensions mismatch")
if (length(mean) != 2)
stop("dimensions mismatch")
if (round(df) != df)
warning("non integer df is rounded to integer")
nu <- if (df < Inf)
as.integer(round(df))
else 0
if (df == Inf)
nu <- 0
sd <- sqrt(diag(S))
rho <- cov2cor(S)[1, 2]
lower <- as.double((lower - mean)/sd)
upper <- as.double((upper - mean)/sd)
if (any(lower > upper))
stop("lower>upper integration limits")
if (any(lower == upper))
return(0)
infin <- c(2, 2)
infin <- replace(infin, (upper == Inf) & (lower > -Inf),
1)
infin <- replace(infin, (upper < Inf) & (lower == -Inf),
0)
infin <- replace(infin, (upper == Inf) & (lower == -Inf),
-1)
infin <- as.integer(infin)
if (any(infin == -1)) {
if (all(infin == -1))
return(1)
k <- which(infin != -1)
return(pt(upper[k], df = df) - pt(lower[k], df = df))
}
lower <- replace(lower, lower == -Inf, 0)
upper <- replace(upper, upper == Inf, 0)
rho <- as.double(rho)
prob <- as.double(0)
a <- .Fortran("smvbvt", prob, nu, lower, upper, infin, rho,
PACKAGE = "mnormt")
return(a[[1]])
}
sadmvn <- function (lower, upper, mean, varcov, maxpts = 2000 * d, abseps = 1e-06,
releps = 0)
{
if (any(lower > upper))
stop("lower>upper integration limits")
if (any(lower == upper))
return(0)
d <- as.integer(if (is.matrix(varcov)) ncol(varcov) else 1)
varcov <- matrix(varcov, d, d)
sd <- sqrt(diag(varcov))
rho <- cov2cor(varcov)
lower <- as.double((lower - mean)/sd)
upper <- as.double((upper - mean)/sd)
if (d == 1)
return(pnorm(upper) - pnorm(lower))
infin <- rep(2, d)
infin <- replace(infin, (upper == Inf) & (lower > -Inf),
1)
infin <- replace(infin, (upper < Inf) & (lower == -Inf),
0)
infin <- replace(infin, (upper == Inf) & (lower == -Inf),
-1)
infin <- as.integer(infin)
if (any(infin == -1)) {
if (all(infin == -1))
return(1)
k <- which(infin != -1)
d <- length(k)
lower <- lower[k]
upper <- upper[k]
if (d == 1)
return(pnorm(upper) - pnorm(lower))
rho <- rho[k, k]
infin <- infin[k]
if (d == 2)
return(biv.nt.prob(0, lower, upper, rep(0, 2), rho))
}
lower <- replace(lower, lower == -Inf, 0)
upper <- replace(upper, upper == Inf, 0)
correl <- as.double(rho[upper.tri(rho, diag = FALSE)])
maxpts <- as.integer(maxpts)
abseps <- as.double(abseps)
releps <- as.double(releps)
error <- as.double(0)
value <- as.double(0)
inform <- as.integer(0)
result <- .Fortran("sadmvn", d, lower, upper, infin, correl,
maxpts, abseps, releps, error, value, inform, PACKAGE = "mnormt")
prob <- result[[10]]
attr(prob, "error") <- result[[9]]
attr(prob, "status") <- switch(1 + result[[11]], "normal completion",
"accuracy non achieved", "oversize")
return(prob)
}
sadmvt <- function (df, lower, upper, mean, S, maxpts = 2000 * d, abseps = 1e-06,
releps = 0)
{
if (df == Inf)
return(sadmvn(lower, upper, mean, S, maxpts, abseps,
releps))
if (any(lower > upper))
stop("lower>upper integration limits")
if (any(lower == upper))
return(0)
if (round(df) != df)
warning("non integer df is rounded to integer")
df <- as.integer(round(df))
d <- as.integer(if (is.matrix(S)) ncol(S) else 1)
S <- matrix(S, d, d)
sd <- sqrt(diag(S))
rho <- cov2cor(S)
lower <- as.double((lower - mean)/sd)
upper <- as.double((upper - mean)/sd)
if (d == 1)
return(pt(upper, df) - pt(lower, df))
infin <- rep(2, d)
infin <- replace(infin, (upper == Inf) & (lower > -Inf),
1)
infin <- replace(infin, (upper < Inf) & (lower == -Inf),
0)
infin <- replace(infin, (upper == Inf) & (lower == -Inf),
-1)
infin <- as.integer(infin)
if (any(infin == -1)) {
if (all(infin == -1))
return(1)
k <- which(infin != -1)
d <- length(k)
lower <- lower[k]
upper <- upper[k]
if (d == 1)
return(pt(upper, df = df) - pt(lower, df = df))
rho <- rho[k, k]
infin <- infin[k]
if (d == 2)
return(biv.nt.prob(df, lower, upper, rep(0, 2), rho))
}
lower <- replace(lower, lower == -Inf, 0)
upper <- replace(upper, upper == Inf, 0)
correl <- rho[upper.tri(rho, diag = FALSE)]
maxpts <- as.integer(maxpts)
abseps <- as.double(abseps)
releps <- as.double(releps)
error <- as.double(0)
value <- as.double(0)
inform <- as.integer(0)
result <- .Fortran("sadmvt", d, df, lower, upper, infin,
correl, maxpts, abseps, releps, error, value, inform,
PACKAGE = "mnormt")
prob <- result[[11]]
attr(prob, "error") <- result[[10]]
attr(prob, "status") <- switch(1 + result[[12]], "normal completion",
"accuracy non achieved", "oversize")
return(prob)
}
dmnorm <- function (x, mean = rep(0, d), varcov, log = FALSE)
{
d <- if (is.matrix(varcov))
ncol(varcov)
else 1
if (d == 1)
return(dnorm(x, mean, sqrt(varcov), log = log))
x <- if (is.vector(x))
t(matrix(x))
else data.matrix(x)
if (ncol(x) != d)
stop("mismatch of dimensions of 'x' and 'varcov'")
if (is.matrix(mean)) {
if ((nrow(x) != nrow(mean)) || (ncol(mean) != d))
stop("mismatch of dimensions of 'x' and 'mean'")
}
if (is.vector(mean))
mean <- outer(rep(1, nrow(x)), as.vector(matrix(mean,
d)))
X <- t(x - mean)
conc <- pd.solve(varcov, log.det = TRUE)
Q <- colSums((conc %*% X) * X)
log.det <- attr(conc, "log.det")
logPDF <- as.vector(Q + d * logb(2 * pi) + log.det)/(-2)
if (log)
logPDF
else exp(logPDF)
}
rmnorm <- function (n = 1, mean = rep(0, d), varcov, sqrt = NULL)
{
sqrt.varcov <- if (is.null(sqrt))
chol(varcov)
else sqrt
d <- if (is.matrix(sqrt.varcov))
ncol(sqrt.varcov)
else 1
mean <- outer(rep(1, n), as.vector(matrix(mean, d)))
drop(mean + t(matrix(rnorm(n * d), d, n)) %*% sqrt.varcov)
}
rmt <- function (n = 1, mean = rep(0, d), S, df = Inf, sqrt = NULL)
{
sqrt.S <- if (is.null(sqrt))
chol(S)
else sqrt
d <- if (is.matrix(sqrt.S))
ncol(sqrt.S)
else 1
x <- if (df == Inf)
1
else rchisq(n, df)/df
z <- rmnorm(n, rep(0, d), sqrt = sqrt.S)
mean <- outer(rep(1, n), as.vector(matrix(mean, d)))
drop(mean + z/sqrt(x))
}
pmt <- function (x, mean = rep(0, d), S, df = Inf, ...)
{
d <- NCOL(S)
x <- if (is.vector(x))
matrix(x, 1, d)
else data.matrix(x)
n <- nrow(x)
if (is.vector(mean))
mean <- outer(rep(1, n), as.vector(matrix(mean, d)))
if (d == 1)
p <- as.vector(pt((x - mean)/sqrt(S), df = df))
else {
pv <- numeric(n)
for (j in 1:n) p <- pv[j] <- if (d == 2)
biv.nt.prob(df, lower = rep(-Inf, 2), upper = x[j,
], mean[j, ], S)
else sadmvt(df, lower = rep(-Inf, d), upper = x[j, ],
mean[j, ], S, ...)
if (n > 1)
p <- pv
}
return(p)
}
pmnorm <- function (x, mean = rep(0, d), varcov, ...)
{
d <- NCOL(varcov)
x <- if (is.vector(x))
matrix(x, 1, d)
else data.matrix(x)
n <- nrow(x)
if (is.vector(mean))
mean <- outer(rep(1, n), as.vector(matrix(mean, d)))
if (d == 1)
p <- as.vector(pnorm(x, mean, sqrt(varcov)))
else {
pv <- numeric(n)
for (j in 1:n) p <- pv[j] <- if (d == 2)
biv.nt.prob(0, lower = rep(-Inf, 2), upper = x[j,
], mean[j, ], varcov)
else sadmvn(lower = rep(-Inf, d), upper = x[j, ], mean[j,
], varcov, ...)
if (n > 1)
p <- pv
}
return(p)
}
pd.solve <- function (x, silent = FALSE, log.det = FALSE)
{
if (is.null(x))
return(NULL)
if (any(is.na(x))) {
if (silent)
return(NULL)
else stop("NA's in x")
}
if (length(x) == 1)
x <- as.matrix(x)
if (!is.matrix(x)) {
if (silent)
return(NULL)
else stop("x is not a matrix")
}
if (max(abs(x - t(x))) > .Machine$double.eps) {
if (silent)
return(NULL)
else stop("x appears to be not symmetric")
}
x <- (x + t(x))/2
u <- try(chol(x, pivot = FALSE), silent = silent)
if (class(u) == "try-error") {
if (silent)
return(NULL)
else stop("x appears to be not positive definite")
}
inv <- chol2inv(u)
if (log.det)
attr(inv, "log.det") <- 2 * sum(log(diag(u)))
dimnames(inv) <- rev(dimnames(x))
return(inv)
}
dmt <- function (x, mean = rep(0, d), S, df = Inf, log = FALSE)
{
if (df == Inf)
return(dmnorm(x, mean, S, log = log))
d <- if (is.matrix(S))
ncol(S)
else 1
if (d == 1) {
y <- dt((x - mean)/sqrt(S), df = df, log = log)
if (log)
y <- (y - 0.5 * logb(S))
else y <- y/sqrt(S)
return(y)
}
x <- if (is.vector(x))
t(matrix(x))
else data.matrix(x)
if (ncol(x) != d)
stop("mismatch of dimensions of 'x' and 'varcov'")
if (is.matrix(mean)) {
if ((nrow(x) != nrow(mean)) || (ncol(mean) != d))
stop("mismatch of dimensions of 'x' and 'mean'")
}
if (is.vector(mean))
mean <- outer(rep(1, nrow(x)), as.vector(matrix(mean,
d)))
X <- t(x - mean)
S.inv <- pd.solve(S, log.det = TRUE)
Q <- colSums((S.inv %*% X) * X)
logDet <- attr(S.inv, "log.det")
logPDF <- (lgamma((df + d)/2) - 0.5 * (d * logb(pi * df) +
logDet) - lgamma(df/2) - 0.5 * (df + d) * logb(1 + Q/df))
if (log)
logPDF
else exp(logPDF)
}
## Package Data
# none
## Package Info
.skeleton_package_title = "The Multivariate Normal and t Distributions"
.skeleton_package_version = "1.5-5"
.skeleton_package_depends = ""
.skeleton_package_imports = ""
## Internal
.skeleton_version = 5
## EOF |
425834e071f5999c44e0d7deaaa23e5295f50d48 | 52fad9a4cadc7a8072def98e192e3b6c0df179f0 | /data-raw/DATASET.R | 3a592b6dae8f6fa4aeb463e26930aec39276dd20 | [
"MIT"
] | permissive | jmszetela/traitstrap | a910dd68afb9fb62fb6652fcf67afd91555a7dab | 43431e38450b5f3adf81649b99b4e867011dc736 | refs/heads/main | 2023-04-22T11:57:06.693264 | 2021-05-06T22:16:24 | 2021-05-06T22:16:24 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 309 | r | DATASET.R | library("tidyverse")
#### community ####
load("data-raw/community.RData")
community <- community %>%
mutate(Cover = as.numeric(Cover)) %>%
filter(!is.na(Cover))
usethis::use_data(community)
#### trait ####
load("data-raw/trait.RData")
trait <- trait %>% filter(!is.na(Value))
usethis::use_data(trait)
|
b2a6f4dc87dd244c10010fcd70676efe4a849497 | 364dcb95aac6dff3f8548768dc99bba945ec81b6 | /data-raw/luv_colours.R | ff25676a6b7fc385fd84304952ca8227bf33b967 | [
"MIT"
] | permissive | tidyverse/ggplot2 | 3ef62b72861c246b13ffc2d95678079984fe65c0 | c76b9aeda648e9b6022b7169021e854c3d3890cb | refs/heads/main | 2023-08-31T07:08:20.846510 | 2023-08-17T16:19:44 | 2023-08-17T16:19:44 | 19,438 | 4,632 | 1,971 | NOASSERTION | 2023-09-14T13:25:40 | 2008-05-25T01:21:32 | R | UTF-8 | R | false | false | 159 | r | luv_colours.R | luv_colours <- as.data.frame(convertColor(t(col2rgb(colors())), "sRGB", "Luv"))
luv_colours$col <- colors()
devtools::use_data(luv_colours, overwrite = TRUE)
|
cd27bae3ab7da1e245f80fa1b9c5931c1ce97c45 | 184180d341d2928ab7c5a626d94f2a9863726c65 | /issuestests/FDRSeg/inst/testfiles/smuce_cpp/smuce_cpp_output/log_d54768b7cb0aa48ca59c3cb777a9613367bd064c/smuce_cpp-test.R | 5fc20e3485363d262b31065123ea11d4cfc4a511 | [] | no_license | akhikolla/RcppDeepStateTest | f102ddf03a22b0fc05e02239d53405c8977cbc2b | 97e73fe4f8cb0f8e5415f52a2474c8bc322bbbe5 | refs/heads/master | 2023-03-03T12:19:31.725234 | 2021-02-12T21:50:12 | 2021-02-12T21:50:12 | 254,214,504 | 2 | 1 | null | null | null | null | UTF-8 | R | false | false | 571 | r | smuce_cpp-test.R | testlist <- list(q = 8.72400335545415e-16, sd = 7.84869121851595e-78, Y = c(-9.9122759758109e-57, -1.64035674249925e-118, 1.05390745649826e+182, 8.05606797801028e-38, -3.84354256622327e+136, 2.36081589563427e-100, 3.01724164497879e-124, 9.57301250646523e+229, -7.99809293195875e-243, 3.06030363511005e-110, 3.27695505950329e-93, -2.10373847983487e+36, -5.29514598795293e-20, 9.81127061830259e+30, -3403.88378037291, -364839.827923978, -8.45393891276032e+179, -4.53552102535884e+269, 3.95634524110619e-268))
result <- do.call(FDRSeg:::smuce_cpp,testlist)
str(result) |
8c871994c18bfb653a8f14e1697b3e23448df8c9 | cee79f82bf59e1164624d887a20de3089e335ee2 | /utils.R | d4b5feb45082e5d81ed95d44c8ad6d7cb1de2901 | [] | no_license | SergeyPry/NextWordPredict | b5c11580b1f91afdc19e9f3997d71960bf47d7ab | d8d2de929a57d81a25f5170770b1ef6fcd475443 | refs/heads/master | 2021-01-22T23:00:58.077573 | 2017-09-05T02:12:31 | 2017-09-05T02:12:31 | 102,422,569 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 12,460 | r | utils.R | # read in the profanity words
profanityWords <-read.csv("swearWords.txt", header = F)
profanityWords <- profanityWords$V1
# define a function to clean up the input
cleanInput <- function(inputText, profanityWords) {
doc <- VCorpus(VectorSource(inputText))
doc <- tm_map(doc, removeNumbers)
doc <- tm_map(doc, content_transformer(tolower))
doc <- tm_map(doc, removePunctuation)
doc <- tm_map(doc, removeWords, profanityWords)
doc <- tm_map(doc, stripWhitespace)
outputText <- as.character(doc[[1]])
if (nchar(outputText) > 0) {
return(outputText)
} else {
return("")
}
}
# define the function to do the prediction
predictNextWord <-function(input, maxResults = 5) {
# clean input and verify that the input is still valid
input <- str_trim(input, side= "both")
input <- cleanInput(input, profanityWords)
if(input == ''|input == "na na") return('Warning: Just input something')
# figure out how many words there are in the input
wordList <- strsplit(input, split=" ")[[1]]
numWords <- length(wordList)
# Case 1: there are 3 or more words in the input
# Since our maximum N-gram is 4-gram, then we can match the trigrams against 4-grams and figure out
# which match and so on.
# find a subset of four-gram dataset which matches a trigram derived from our input
input <- paste(wordList[numWords-2], wordList[numWords-1], wordList[numWords], sep = ' ')
input <- str_trim(input, side= "both")
sub4 <- sqldf(paste("SELECT * FROM Quadgrams WHERE trigram LIKE '", input, "'", sep = "", collapse = NULL),
dbname ="NGrams.sqlite")
# find a subset of trigram dataset which matches a bigram derived from our input
input2 <- paste(wordList[numWords-1], wordList[numWords], sep = " ")
input2 <- str_trim(input2, side= "both")
sub3 <- sqldf(paste("SELECT * FROM Trigrams WHERE bigram LIKE '", input2, "'", sep = "", collapse = NULL),
dbname ="NGrams.sqlite")
# find a subset of bigram dataset which matches a unigram derived from our input
input3 <- wordList[numWords]
sub2 <- sqldf(paste("SELECT * FROM Bigrams WHERE unigram LIKE '", input3, "'", sep = "", collapse = NULL),
dbname ="NGrams.sqlite")
# define scores for the unigram data
unigrams <- sqldf("SELECT * FROM Unigrams", dbname ="NGrams.sqlite")
unigrams$s <- unigrams$freq/nrow(unigrams)*0.16
if(nrow(sub4) == 0) {
if(nrow(sub3) ==0){
if(nrow(sub2) == 0){
# select top 5 unigrams
useuni <- unigrams[order(unigrams$s,decreasing = T),]
return(useuni[1:5,]$word)
} else{ # the last word matched some bigrams
# get 1-gram data
input1gram <- sqldf(paste("SELECT * FROM Unigrams WHERE word LIKE '", input3, "'", sep = "", collapse = NULL),
dbname ="NGrams.sqlite")
if(nrow(input1gram) > 0){
sub2$s <- 0.4*0.4*sub2$freq/input1gram$freq
} else{
sub2$s <- 0.4*0.4*0.4*sub2$freq
}
# define vectors with the results
names <- c(word(sub2$word, -1))
score <- c(sub2$s)
}
} else { # a bigram from the input matched entry/entries in a 3-gram table
# define the scores for the sub3 hits, which are a subset of freq3, which matched a bigram from the input
input2gram <- sqldf(paste("SELECT * FROM Bigrams WHERE word LIKE '", input2, "'", sep = "", collapse = NULL),
dbname ="NGrams.sqlite")
if(nrow(input2gram) > 0){
sub3$s <- 0.4*sub3$freq/input2gram$freq
} else {
sub3$s <- 0.4*0.4*sub3$freq
}
# obtain data for the sub2 dataset if there is not enough data already
if(nrow(sub3) < maxResults){
input1gram <- sqldf(paste("SELECT * FROM Unigrams WHERE word LIKE '", input3, "'", sep = "", collapse = NULL),
dbname ="NGrams.sqlite")
if(nrow(input1gram) > 0){
sub2$s <- 0.4*0.4*sub2$freq/input1gram$freq
} else{
sub2$s <- 0.4*0.4*0.4*sub2$freq
}
# define vectors with the results
names <- c(word(sub3$word, -1), word(sub2$word, -1))
score <- c(sub3$s, sub2$s)
} else {
# define vectors with the results
names <- c(word(sub3$word, -1))
score <- c(sub3$s)
}
}
} else { # a trigram from the input matched entry/entries in a 4-gram table
# logically, at least one of trigrams, bigrams and unigrams will exist
# define scores for the sub4 hits, which are a subset of freq4, which matched a trigram from the input
input3gram <- sqldf(paste("SELECT * FROM Trigrams WHERE word LIKE '", input, "'", sep = "", collapse = NULL),
dbname ="NGrams.sqlite")
if(nrow(input3gram) > 0){
sub4$s <- sub4$freq/input3gram$freq
} else{
sub4$s <- 0.4*sub4$freq
}
names <- c(word(sub4$word, -1))
score <- c(sub4$s)
# define the scores for the sub3 hits, which are a subset of freq3, which matched a bigram from the input
if(nrow(sub4) < maxResults){
input2gram <- sqldf(paste("SELECT * FROM Bigrams WHERE word LIKE '", input2, "'", sep = "", collapse = NULL),
dbname ="NGrams.sqlite")
if(nrow(input2gram) > 0){
sub3$s <- 0.4*sub3$freq/input2gram$freq
} else {
sub3$s <- 0.4*0.4*sub3$freq
}
names <- c(names, word(sub3$word, -1))
score <- c(score, sub3$s)
}
# process the next level
if( (nrow(sub4) + nrow(sub3)) < maxResults){
input1gram <- sqldf(paste("SELECT * FROM Unigrams WHERE word LIKE '", input3, "'", sep = "", collapse = NULL),
dbname ="NGrams.sqlite")
if(nrow(input1gram) > 0){
sub2$s <- 0.4*0.4*sub2$freq/input1gram$freq
} else{
sub2$s <- 0.4*0.4*0.4*sub2$freq
}
# define vectors with the results
names <- c(names, word(sub2$word, -1))
score <- c(score, sub2$s)
}
}
predictWord <- data.frame(next_word=names,score=score,stringsAsFactors = F)
predictWord <- predictWord[order(predictWord$score,decreasing = T),]
# in case replicated
final <- unique(predictWord$next_word)
return(final[1:maxResults])
}
# Another version of the prediction function for the larger database
predictNextWord2 <-function(input, maxResults = 5) {
# clean input and verify that the input is still valid
input <- str_trim(input, side= "both")
input <- cleanInput(input, profanityWords)
if(input == ''|input == "na na") return('Warning: Just input something')
# figure out how many words there are in the input
wordList <- strsplit(input, split=" ")[[1]]
numWords <- length(wordList)
# Case 1: there are 3 or more words in the input
# Since our maximum N-gram is 4-gram, then we can match the trigrams against 4-grams and figure out
# which match and so on.
# find a subset of four-gram dataset which matches a trigram derived from our input
input <- paste(wordList[numWords-2], wordList[numWords-1], wordList[numWords], sep = ' ')
input <- str_trim(input, side= "both")
sub4 <- sqldf(paste("SELECT * FROM Quadgrams WHERE trigram LIKE '", input, "'", sep = "", collapse = NULL),
dbname ="NGrams2.sqlite")
# find a subset of trigram dataset which matches a bigram derived from our input
input2 <- paste(wordList[numWords-1], wordList[numWords], sep = " ")
input2 <- str_trim(input2, side= "both")
sub3 <- sqldf(paste("SELECT * FROM Trigrams WHERE bigram LIKE '", input2, "'", sep = "", collapse = NULL),
dbname ="NGrams2.sqlite")
# find a subset of bigram dataset which matches a unigram derived from our input
input3 <- wordList[numWords]
sub2 <- sqldf(paste("SELECT * FROM Bigrams WHERE unigram LIKE '", input3, "'", sep = "", collapse = NULL),
dbname ="NGrams2.sqlite")
# define scores for the unigram data
unigrams <- sqldf("SELECT * FROM Unigrams", dbname ="NGrams2.sqlite")
unigrams$s <- unigrams$freq/nrow(unigrams)*0.16
if(nrow(sub4) == 0) {
if(nrow(sub3) ==0){
if(nrow(sub2) == 0){
# select top 5 unigrams
useuni <- unigrams[order(unigrams$s,decreasing = T),]
return(useuni[1:5,]$word)
} else{ # the last word matched some bigrams
# get 1-gram data
input1gram <- sqldf(paste("SELECT * FROM Unigrams WHERE word LIKE '", input3, "'", sep = "", collapse = NULL),
dbname ="NGrams2.sqlite")
if(nrow(input1gram) > 0){
sub2$s <- 0.4*0.4*sub2$freq/input1gram$freq
} else{
sub2$s <- 0.4*0.4*0.4*sub2$freq
}
# define vectors with the results
names <- c(word(sub2$word, -1))
score <- c(sub2$s)
}
} else { # a bigram from the input matched entry/entries in a 3-gram table
# define the scores for the sub3 hits, which are a subset of freq3, which matched a bigram from the input
input2gram <- sqldf(paste("SELECT * FROM Bigrams WHERE word LIKE '", input2, "'", sep = "", collapse = NULL),
dbname ="NGrams2.sqlite")
if(nrow(input2gram) > 0){
sub3$s <- 0.4*sub3$freq/input2gram$freq
} else {
sub3$s <- 0.4*0.4*sub3$freq
}
# obtain data for the sub2 dataset if there is not enough data already
if(nrow(sub3) < maxResults){
input1gram <- sqldf(paste("SELECT * FROM Unigrams WHERE word LIKE '", input3, "'", sep = "", collapse = NULL),
dbname ="NGrams2.sqlite")
if(nrow(input1gram) > 0){
sub2$s <- 0.4*0.4*sub2$freq/input1gram$freq
} else{
sub2$s <- 0.4*0.4*0.4*sub2$freq
}
# define vectors with the results
names <- c(word(sub3$word, -1), word(sub2$word, -1))
score <- c(sub3$s, sub2$s)
} else {
# define vectors with the results
names <- c(word(sub3$word, -1))
score <- c(sub3$s)
}
}
} else { # a trigram from the input matched entry/entries in a 4-gram table
# logically, at least one of trigrams, bigrams and unigrams will exist
# define scores for the sub4 hits, which are a subset of freq4, which matched a trigram from the input
input3gram <- sqldf(paste("SELECT * FROM Trigrams WHERE word LIKE '", input, "'", sep = "", collapse = NULL),
dbname ="NGrams2.sqlite")
if(nrow(input3gram) > 0){
sub4$s <- sub4$freq/input3gram$freq
} else{
sub4$s <- 0.4*sub4$freq
}
names <- c(word(sub4$word, -1))
score <- c(sub4$s)
# define the scores for the sub3 hits, which are a subset of freq3, which matched a bigram from the input
if(nrow(sub4) < maxResults){
input2gram <- sqldf(paste("SELECT * FROM Bigrams WHERE word LIKE '", input2, "'", sep = "", collapse = NULL),
dbname ="NGrams2.sqlite")
if(nrow(input2gram) > 0){
sub3$s <- 0.4*sub3$freq/input2gram$freq
} else {
sub3$s <- 0.4*0.4*sub3$freq
}
names <- c(names, word(sub3$word, -1))
score <- c(score, sub3$s)
}
# process the next level
if( (nrow(sub4) + nrow(sub3)) < maxResults){
input1gram <- sqldf(paste("SELECT * FROM Unigrams WHERE word LIKE '", input3, "'", sep = "", collapse = NULL),
dbname ="NGrams2.sqlite")
if(nrow(input1gram) > 0){
sub2$s <- 0.4*0.4*sub2$freq/input1gram$freq
} else{
sub2$s <- 0.4*0.4*0.4*sub2$freq
}
# define vectors with the results
names <- c(names, word(sub2$word, -1))
score <- c(score, sub2$s)
}
}
predictWord <- data.frame(next_word=names,score=score,stringsAsFactors = F)
predictWord <- predictWord[order(predictWord$score,decreasing = T),]
# in case replicated
final <- unique(predictWord$next_word)
return(final[1:maxResults])
}
|
da5c8136dcc92f054e47a2022d5dd01e5b95bf9a | 7effe9458b70b727f114f57279a4e858e87be451 | /man/get_palette.Rd | e2d98e398bae81f1528eb8571979eb7ab09c8fcb | [
"MIT"
] | permissive | saisaitian/dseqr | 09f26c346545e36d1614dbd49664d3d71da60592 | ff2f99f35331035eed5ab688f20b752031eb00d8 | refs/heads/master | 2023-06-21T17:10:15.590518 | 2021-08-05T23:48:47 | 2021-08-05T23:48:47 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 447 | rd | get_palette.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot_scseq.R
\name{get_palette}
\alias{get_palette}
\title{Get a pallete for cluster plots}
\usage{
get_palette(levs, dark = FALSE, with_all = FALSE)
}
\arguments{
\item{levs}{Character vector of levels to get colour pallete for.}
}
\value{
Character vector with colour codes of \code{length(levs)}.
}
\description{
Get a pallete for cluster plots
}
\keyword{internal}
|
6cc37ee504b23d5ca0c36fb8648b226a0daf964d | c091c70f8c4dfa1a4eb474ab20f5e30199858343 | /man/cv.gmf.Rd | 76e87ef5897dea90f644544b0d8e63d1c8d2a06d | [] | no_license | andland/generalizedPCA | d25a21794794560d0eecfc1d777f0e5ec28c18de | 36bccdd4bf82757b4a69c09cc763736a9d4aa4cd | refs/heads/master | 2021-01-17T15:06:55.758311 | 2018-08-04T15:56:26 | 2018-08-04T15:56:26 | 32,023,622 | 28 | 2 | null | null | null | null | UTF-8 | R | false | true | 1,202 | rd | cv.gmf.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generalizedMF.R
\name{cv.gmf}
\alias{cv.gmf}
\title{CV for generalized MF}
\usage{
cv.gmf(x, ks, family = c("gaussian", "binomial", "poisson", "multinomial"),
folds = 5, quiet = TRUE, ...)
}
\arguments{
\item{x}{matrix of either binary, count, or continuous data}
\item{ks}{the different dimensions \code{k} to try}
\item{family}{exponential family distribution of data}
\item{folds}{if \code{folds} is a scalar, then it is the number of folds. If
it is a vector, it should be the same length as the number of rows in \code{x}}
\item{quiet}{logical; whether the function should display progress}
\item{...}{Additional arguments passed to generalizedMF}
}
\value{
A matrix of the CV deviance with \code{k} in rows
}
\description{
Run cross validation on dimension for generalized MF
}
\examples{
# construct a low rank matrix in the logit scale
rows = 100
cols = 10
set.seed(1)
mat_logit = outer(rnorm(rows), rnorm(cols))
# generate a binary matrix
mat = (matrix(runif(rows * cols), rows, cols) <= inv.logit.mat(mat_logit)) * 1.0
\dontrun{
deviances = cv.gmf(mat, ks = 1:9, family = "binomial")
plot(deviances)
}
}
|
ffbb8c9014b3dd5c272517ea05ac19332dfebce5 | effe14a2cd10c729731f08b501fdb9ff0b065791 | /paws/man/guardduty_delete_ip_set.Rd | bae3acb6d3c5dcbe00cde9b320b846f6ceaa61b3 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | peoplecure/paws | 8fccc08d40093bb25e2fdf66dd5e38820f6d335a | 89f044704ef832a85a71249ce008f01821b1cf88 | refs/heads/master | 2020-06-02T16:00:40.294628 | 2019-06-08T23:00:39 | 2019-06-08T23:00:39 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 642 | rd | guardduty_delete_ip_set.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/guardduty_operations.R
\name{guardduty_delete_ip_set}
\alias{guardduty_delete_ip_set}
\title{Deletes the IPSet specified by the IPSet ID}
\usage{
guardduty_delete_ip_set(DetectorId, IpSetId)
}
\arguments{
\item{DetectorId}{[required] The unique ID of the detector the ipSet is associated with.}
\item{IpSetId}{[required] The unique ID of the ipSet you want to delete.}
}
\description{
Deletes the IPSet specified by the IPSet ID.
}
\section{Request syntax}{
\preformatted{svc$delete_ip_set(
DetectorId = "string",
IpSetId = "string"
)
}
}
\keyword{internal}
|
c0c116cf494289b86923e93b82f1d8b47d5e022b | c90e263ccf61d5d6c0bc33c715c85beec9cf6ef1 | /man/Cen.Rd | 0b722aacf3a58b2314c3cfc384dfa75fd0e423d3 | [] | no_license | cran/NADA | d68537895922ff0d882fbd1e4db0ea5bc8b83bb1 | b3584c690049bd498b427db75ebee4fadcb24bb7 | refs/heads/master | 2020-12-24T13:18:24.287687 | 2020-03-22T08:46:09 | 2020-03-22T08:46:09 | 17,681,060 | 2 | 4 | null | null | null | null | UTF-8 | R | false | false | 4,264 | rd | Cen.Rd | \name{Cen}
\alias{Cen}
\title{
Create a Censored Object
}
\description{
Create a censored object, usually used as a response variable in a model
formula.
}
%\synopsis{Cen(obs, obs2, censored, type, origin)}
\usage{
Cen(obs, censored, type = "left")
%or
%Cen(time, time2, event, type=, origin=0)
%is.Cen(x)
}
\arguments{
\item{obs}{
A numeric vector of observations. This includes both censored
and uncensored observations.
}
\item{censored}{
A logical vector indicating TRUE where an observation in obs is
censored (a less-than value) and FALSE otherwise.
}
%\item{x}{
% A \code{Cen} object constructed using the \code{Cen} function.
%}
%\item{time2}{
%ending time of the interval for interval censored or counting process
%data only.
%Intervals are
%assumed to be open on the left and closed on the right, \code{(start, end]}.
%For counting process data,
%\code{event} indicates whether an event occurred at the end of the interval.
%}
\item{type}{
character string specifying the type of censoring. Possible values are
\code{"right"}, \code{"left"}, \code{"counting"}, \code{"interval"},
or \code{"interval2"}. The default is \code{"left"}.
%or \code{"counting"} depending on whether
%the \code{time2} argument is absent or present, respectively.
}
%\item{origin}{
%for counting process data, the hazard function origin. This is most often
%used in conjunction with a model containing time dependent strata in order
%to align the subjects properly when they cross over from one strata to
%another.
%}
}
\value{
An object of class \code{Cen}.
% There are methods for \code{print},
% \code{is.na}, and subscripting censored objects. To include a
% censored object inside a data frame, use the \code{I()} function.
% \code{Cen} objects are implemented as a matrix of 2 or 3 columns.
% In the case of \code{is.Cen}, a logical value \code{T} if \code{x}
% inherits from class \code{"Cen"}, otherwise an \code{F}.
}
\section{details}{
This, and related routines, are front ends to routines in the
\code{survival} package. Since the survival routines can not handle
left-censored data, these routines transparently handle ``flipping"
input data and resultant calculations. The \code{Cen} function provides
part of the necessary framework for flipping.
}
%\section{DETAILS}{
% In theory it is possible to represent interval censored data without a
% third column containing the explicit status. Exact, right censored,
% left censored and interval censored observation would be represented
% as intervals of (a,a), (a, infinity), (-infinity,b), and (a,b)
% respectively; each specifying the interval within which the event
% is known to have occurred.
%
% If \code{type = "interval2"} then the representation given above is
% assumed, with NA taking the place of infinity. If `type="interval"
% \code{event} must be given. If \code{event} is \code{0}, \code{1},
% or \code{2}, the relevant information is assumed to be contained in
% \code{time}, the value in \code{time2} is ignored, and the second column
% of the result will contain a placeholder.
%
% Presently, the only methods allowing interval censored data are the
% parametric models computed by \code{cenreg}, so the distinction
% between open and closed intervals is unimportant. The distinction
% is important for counting process data and the Cox model.
%
% The function tries to distinguish between the use of 0/1 and
% 1/2 coding for left and right censored data using \code{if
% (max(status)==2)}. If 1/2 coding is used and all the subjects are
% censored, it will guess wrong. Use 0/1 coding in this case.
%}
\references{
Helsel, Dennis R. (2005). Nondectects and Data Analysis; Statistics for
censored environmental data. John Wiley and Sons, USA, NJ.
}
\author{
R. Lopaka Lee <rclee@usgs.gov>
Dennis Helsel <dhelsel@practicalstats.com>
}
\seealso{
\code{\link{cenfit}},
\code{\link{flip-methods}}
}
\examples{
obs = c(0.5, 0.5, 1.0, 1.5, 5.0, 10, 100)
censored = c(TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE)
Cen(obs, censored)
flip(Cen(obs, censored))
}
\keyword{survival}
|
94d11f6b542166807fa4f48e10477c60cbaa53c9 | f408753b951e4d384b5960d6db31244615bdc3f8 | /estimate/server.R | df213d7f91d12b7818fa79ee91d0aac3803a2788 | [
"MIT"
] | permissive | wwwaylon/estimate | 9f9ae2cb2799c361baea1c203d83ba14a3d34f5d | ae76866b776d0ec4d3c886a0b81f621f1d35d03c | refs/heads/main | 2023-01-18T15:53:35.847049 | 2020-12-02T07:44:53 | 2020-12-02T07:44:53 | 317,754,182 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,694 | r | server.R | library(shiny)
library(shinythemes)
shinyServer(function(input, output, session){
vals <- reactiveValues()
observe({
#Example 1
vals$protocol_LB <- input$num_1[1]
vals$protocol_UB <- input$num_1[2]
vals$p_protocol <- input$per_1
vals$data_LB <- input$num_2[1]
vals$data_UB <- input$num_2[2]
vals$p_data <- input$per_2
vals$random_LB <- input$num_3[1]
vals$random_UB <- input$num_3[2]
vals$p_random <- input$per_3
vals$newReport <- input$num_4
vals$reports <- input$num_5
vals$r_reports <- input$r_num_5
vals$newDSMB <- input$num_6
vals$DSMB <- input$num_7
vals$r_DSMB <- input$r_num_7
vals$stand_LB <- input$num_8[1]
vals$stand_UB <- input$num_8[2]
vals$p_stand <- input$per_4
vals$prep_LB <- input$num_9[1]
vals$prep_UB <- input$num_9[2]
vals$p_prep <- input$per_5
vals$table_LB <- input$num_10[1]
vals$table_UB <- input$num_10[2]
vals$p_table <- input$per_6
vals$paper_LB <- input$num_11[1]
vals$paper_UB <- input$num_11[2]
vals$p_paper <- input$per_7
vals$adv_LB <- input$num_12[1]
vals$adv_UB <- input$num_12[2]
vals$p_adv <- input$per_8
vals$misc <- input$num_13
#Example 2
vals$M1_LB <- input$Mnum_1[1]
vals$M1_UB <- input$Mnum_1[2]
vals$p_M1 <- input$Mper_1
vals$M2_LB <- input$Mnum_2[1]
vals$M2_UB <- input$Mnum_2[2]
vals$p_M2 <- input$Mper_2
vals$M3_LB <- input$Mnum_3[1]
vals$M3_UB <- input$Mnum_3[2]
vals$p_M3 <- input$Mper_3
vals$M4_LB <- input$Mnum_4[1]
vals$M4_UB <- input$Mnum_4[2]
vals$p_M4 <- input$Mper_4
vals$M5_LB <- input$Mnum_5[1]
vals$M5_UB <- input$Mnum_5[2]
vals$p_M5 <- input$Mper_5
vals$M6_LB <- input$Mnum_6[1]
vals$M6_UB <- input$Mnum_6[2]
vals$p_M6 <- input$Mper_6
vals$M7_LB <- input$Mnum_7[1]
vals$M7_UB <- input$Mnum_7[2]
vals$p_M7 <- input$Mper_7
vals$M8_LB <- input$Mnum_8[1]
vals$M8_UB <- input$Mnum_8[2]
vals$p_M8 <- input$Mper_8
vals$M9_LB <- input$Mnum_9[1]
vals$M9_UB <- input$Mnum_9[2]
vals$p_M9 <- input$Mper_9
vals$M10_LB <- input$Mnum_10[1]
vals$M10_UB <- input$Mnum_10[2]
vals$p_M10 <- input$Mper_10
vals$M11_LB <- input$Mnum_11[1]
vals$M11_UB <- input$Mnum_11[2]
vals$p_M11 <- input$Mper_11
vals$M12_LB <- input$Mnum_12[1]
vals$M12_UB <- input$Mnum_12[2]
vals$p_M12 <- input$Mper_12
vals$M13_LB <- input$Mnum_13[1]
vals$M13_UB <- input$Mnum_13[2]
vals$p_M13 <- input$Mper_13
vals$M14_LB <- input$Mnum_14[1]
vals$M14_UB <- input$Mnum_14[2]
vals$p_M14 <- input$Mper_14
vals$M15_LB <- input$Mnum_15[1]
vals$M15_UB <- input$Mnum_15[2]
vals$p_M15 <- input$Mper_15
vals$M16_LB <- input$Mnum_16[1]
vals$M16_UB <- input$Mnum_16[2]
vals$p_M16 <- input$Mper_16
vals$M17 <- input$Mnum_17
vals$M18 <- input$Mnum_18
vals$r_M18 <- input$Mr_num_18
vals$M19 <- input$Mnum_19
})
#Example 1
output$text_calTotal <- renderText({
paste("Total Example 1 Estimate =", round( (
(1/(((vals$p_protocol/100)/vals$protocol_LB) + ((1-(vals$p_protocol/100))/vals$protocol_UB)))
+(1/(((vals$p_data/100)/vals$data_LB) + ((1-(vals$p_data/100))/vals$data_UB)))
+(1/(((vals$p_random/100)/vals$random_LB) + ((1-(vals$p_random/100))/vals$random_UB)))
+vals$newReport
+(vals$reports*vals$r_reports)
+vals$newDSMB
+(vals$DSMB*vals$r_DSMB)
+(1/(((vals$p_stand/100)/vals$stand_LB) + ((1-(vals$p_stand/100))/vals$stand_UB)))
+(1/(((vals$p_prep/100)/vals$prep_LB) + ((1-(vals$p_prep/100))/vals$prep_UB)))
+(1/(((vals$p_table/100)/vals$table_LB) + ((1-(vals$p_table/100))/vals$table_UB)))
+(1/(((vals$p_paper/100)/vals$paper_LB) + ((1-(vals$p_paper/100))/vals$paper_UB)))
+(1/(((vals$p_adv/100)/vals$adv_LB) + ((1-(vals$p_adv/100))/vals$adv_UB)))
+vals$misc), digits = 2), "hours" )
})
#Example 2
output$MetaTotal <- renderText({
paste("Total Example 2 estimate =", round( (
(1/(((vals$p_M1/100)/vals$M1_LB) + ((1-(vals$p_M1/100))/vals$M1_UB)))
+(1/(((vals$p_M2/100)/vals$M2_LB) + ((1-(vals$p_M2/100))/vals$M2_UB)))
+(1/(((vals$p_M3/100)/vals$M3_LB) + ((1-(vals$p_M3/100))/vals$M3_UB)))
+(1/(((vals$p_M4/100)/vals$M4_LB) + ((1-(vals$p_M4/100))/vals$M4_UB)))
+(1/(((vals$p_M5/100)/vals$M5_LB) + ((1-(vals$p_M5/100))/vals$M5_UB)))
+(1/(((vals$p_M6/100)/vals$M6_LB) + ((1-(vals$p_M6/100))/vals$M6_UB)))
+(1/(((vals$p_M7/100)/vals$M7_LB) + ((1-(vals$p_M7/100))/vals$M7_UB)))
+(1/(((vals$p_M8/100)/vals$M8_LB) + ((1-(vals$p_M8/100))/vals$M8_UB)))
+(1/(((vals$p_M9/100)/vals$M9_LB) + ((1-(vals$p_M9/100))/vals$M9_UB)))
+(1/(((vals$p_M10/100)/vals$M10_LB) + ((1-(vals$p_M10/100))/vals$M10_UB)))
+(1/(((vals$p_M11/100)/vals$M11_LB) + ((1-(vals$p_M11/100))/vals$M11_UB)))
+(1/(((vals$p_M12/100)/vals$M12_LB) + ((1-(vals$p_M12/100))/vals$M12_UB)))
+(1/(((vals$p_M13/100)/vals$M13_LB) + ((1-(vals$p_M13/100))/vals$M13_UB)))
+(1/(((vals$p_M14/100)/vals$M14_LB) + ((1-(vals$p_M14/100))/vals$M14_UB)))
+(1/(((vals$p_M15/100)/vals$M15_LB) + ((1-(vals$p_M15/100))/vals$M15_UB)))
+(1/(((vals$p_M16/100)/vals$M16_LB) + ((1-(vals$p_M16/100))/vals$M16_UB)))
+vals$M17+(vals$M18*vals$r_M18)+vals$M19), digits = 2), "hours" )
})
# calculate the estimated workload in hours per week
output$estimatedworkload <- renderText({
# set reading rate in pages per hour
# if user has not opted to manually input a value...
if(input$setreadingrate==F){
# use the values in the queryrate array above to select a reading rate
queryrate.sel <- queryrate[as.numeric(input$difficulty), as.numeric(input$readingpurpose), as.numeric(input$datasource)]
}else{
# if user selects manual override, use the manually input value
queryrate.sel <- input$overridequeryrate
}
# set writing rate in hours per page
# if user has not opted to manually input a value...
if(input$setvalidationrate==F){
# use the values in the validationrate array above to select a writing rate
validationrate.sel <- validationrate[as.numeric(input$referentialintegrity), as.numeric(input$draftrevise), as.numeric(input$validationmethods)]
}else{
# if user selects manual override, use the manually input value
validationrate.sel <- input$overridevalidationrate
}
# calculate hours spent working out of class per week using inputted values from UI
expr = paste(round(
(as.numeric(input$numberofencounters)/as.numeric(queryrate.sel)) +
( (as.numeric(validationrate.sel)*as.numeric(input$numberofvariables)) / as.numeric(input$classweeks) ) +
( (as.numeric(input$exams)*as.numeric(input$examhours)) / as.numeric(input$classweeks)) +
( (as.numeric(input$otherassign)*as.numeric(input$otherhours)) / as.numeric(input$classweeks) ),
digits=2),"hrs/staff")
})
# generate a displayable value for the reading rate used from the queryrate matrix
output$queryrate.out <- renderText({
expr = paste(queryrate[
as.numeric(input$difficulty), as.numeric(input$readingpurpose), as.numeric(input$datasource)
], "variables per hour")
})
# generate a displayable value for the writing rate used from the validationrate matrix
output$validationrate.out <- renderText({
expr = paste(validationrate[
as.numeric(input$referentialintegrity), as.numeric(input$draftrevise), as.numeric(input$validationmethods)
], "hours per variable")
})
# an array giving data to use on pages per hour according to difficulty, purpose, and density
queryrate <- array(
data<-c(67,47, 33, 33, 24, 17, 17, 12, 9, 50, 35, 25, 25, 18, 13, 13, 9, 7, 40, 28, 20, 20, 14, 10, 10, 7, 5),
dim=c(3,3,3),
dimnames = list(c("No New Concepts","Some New Concepts","Many New Concepts"),
c("Survey","Learn","Engage"),
c("450 Words (Paperback)","600 Words (Monograph)","750 Words (Textbook)")
)
)
# an array giving data to use on hours per page according to difficulty, purpose, and density
validationrate <- array(
data<-c(0.75, 1.5, 1, 2, 1.25, 2.5, 1.5, 3, 2, 4, 2.5, 5, 3, 6, 4, 8, 5, 10),
dim=c(2,3,3),
dimnames = list(c("250 Words (D-Spaced)", "500 Words (S-Spaced)"),
c("No Drafting", "Minimal Drafting", "Extensive Drafting"),
c("Reflection; Narrative", "Argument", "Research")
)
)
})
|
2859d94c243b8bef5f7e2854aab2d1c1244426ca | 05c0a5cd39167706f9a31102d47e6e48cd411ef7 | /start.R | 781eba432a41221402d0dc1b5be7aeaa2edc506c | [] | no_license | gusmmm/entrez_critical_care | 46c0b2245ee62aaa24bd058a2a864b1fcb9d859e | 7c61f62f4c6a76d403a8d6cf90b1489d4e20cf1b | refs/heads/master | 2020-03-20T21:30:00.099179 | 2018-06-19T09:30:13 | 2018-06-19T09:30:13 | 137,744,145 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,119 | r | start.R | # tools for text analysis of pubmed data
# source: pubmed abstracts and metadata
# objective - create tools to information extraction, visualization and knowledge creation
### packages
library(tidyverse)
library(rentrez)
library(tidytext)
library(XML)
### querying pubmed
# example: septic shock
# date: the month of may 2018
q <- '(septic shock AND ("2018/05/01"[PDAT] : "2018/05/31"[PDAT])")'
search_results <- entrez_search(db="pubmed", term = q, retmax = 1000, use_history = T)
# the search results
# search_results$ids
# getting the data from the search_results
q_summary <- entrez_summary(db="pubmed",web_history = search_results$web_history)
q_data <- entrez_fetch(db="pubmed", id = search_results$ids, rettype = "xml")
data_xml <- xmlParse(q_data)
xtop <- xmlRoot(data_xml)
#xtop
metadata_df <- data_frame(uid = sapply(q_summary, function(x) x$uid), title = sapply(q_summary, function(x) x$title))
#abstract_df <- data_frame(uid = xmlSApply(xtop, function(x) {x[[2]][['ArticleIdList']][[1]] %>% xmlValue()}),
# abstract = xmlSApply(xtop, function(x) xmlValue(x[[1]][['Article']][['Abstract']]) ))
# create the abstract data_frame
# This inserted an empty space between abstract sections
abstract_df <- data_frame(uid = xmlSApply(xtop, function(x) {x[[2]][['ArticleIdList']][[1]] %>% xmlValue()}),
abstract = xmlSApply(xtop, function(x) {
if(is.na(xmlValue(x[[1]][['Article']][['Abstract']]))) { "vazio" }
else { getChildrenStrings(node = x[[1]][['Article']][['Abstract']]) %>% paste(collapse = " ") }
}
))
final_df <- inner_join(metadata_df,abstract_df)
# remove empty abstracts from final_df
final_df <- final_df[final_df$abstract !="vazio",]
#### remove all numbers from the abstracts
#a <- str_replace_all(string = final_df$abstract, pattern = "[0-9]+",replacement = "__NUMBER__")
#a <- str_replace_all(string = a, pattern = "__NUMBER__\\.__NUMBER__",replacement = "__NUMBER__")
#final_df$abstract <- str_replace_all(string = a, pattern = "__NUMBER__%",replacement = "__NUMBER__")
|
bf53190a98b4cc12362e8da08b7b677fbfbf06fd | 2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0 | /fuzzedpackages/bigstatsr/tests/testthat/test-transpose.R | 6258d60e38c35d991b815956c2a34801541b4c14 | [] | no_license | akhikolla/testpackages | 62ccaeed866e2194652b65e7360987b3b20df7e7 | 01259c3543febc89955ea5b79f3a08d3afe57e95 | refs/heads/master | 2023-02-18T03:50:28.288006 | 2021-01-18T13:23:32 | 2021-01-18T13:23:32 | 329,981,898 | 7 | 1 | null | null | null | null | UTF-8 | R | false | false | 698 | r | test-transpose.R | ################################################################################
context("TRANSPOSE")
set.seed(SEED)
options(bigstatsr.downcast.warning = FALSE)
################################################################################
# Simulating some data
x <- matrix(rnorm(2e4, 100, 5), 200, 100)
################################################################################
test_that("Equality with t()", {
for (t in TEST.TYPES) {
X <- `if`(t == "raw", asFBMcode(x), big_copy(x, type = t))
test <- big_transpose(X)
expect_identical(test[], t(X[]))
}
})
################################################################################
|
22b8030ee7e577e994fbcf8cb2fa9b879727a717 | 6d33a4b4474bf05ec47a94cf03911cf00f493c17 | /Classification/Code/R/analysisScripts/bootstrap_resamples_output_rds.R | 40239169b896ca6f79297a82143ce9071056ee2b | [] | no_license | Genuity-Science/unconventionalML | a6d394e1f88b0a9174fb593ba9a4ddec96159e2f | 7e9d1eb5c13524b66da787fc88f2938831d7308a | refs/heads/master | 2023-08-05T22:02:55.025946 | 2021-09-22T16:28:07 | 2021-09-22T16:28:07 | 310,676,584 | 1 | 1 | null | 2020-11-06T20:19:57 | 2020-11-06T18:31:50 | R | UTF-8 | R | false | false | 3,495 | r | bootstrap_resamples_output_rds.R | # Output RDS file with metrics for bootstrap resamples. Run after
# doing analysis code in Matlab. Used to be consistent in how the performance
# metrics are calculated for the classical algorithms. Saves as .RDS file
# Author: Richard Li
rm(list=ls())
gc()
library(caret)
library(HandTill2001)
library(R.matlab)
datasets = c("brcaMatchedTN","ERpn","kirckirp","luadlusc","lumAB","lumAB_gene")
# change the string to change method. Assumes that all saved in the same format; i.e.,
# ${method_str}_nsols20_pred_for_R.mat, where method_str can be "dw","sa","rand".
# Any user-defined string is fine, as long as as this fileformat
meth = "field" #'dw' 'sa' 'rand' 'field'
sfx = "" # '_nsols_20_ntotsols_1000'
base_dir = '~/Dropbox-Work/Wuxi/Results/bootstrap_resamples/'
all_info = data.frame()
# define positive classes and levels
positive_classes = c("tumor","Positive","kirc","luad","Luminal_A","Luminal_A")
classes_levels = list(c("normal","tumor"),c("Negative","Positive"),c("kirp","kirc"),c("lusc","luad"),c("Luminal_B","Luminal_A"),c("Luminal_B","Luminal_A"))
n_splits = 100
for (n in 1:length(datasets)) {
dir=paste(base_dir,datasets[[n]],"_bootstrap_resamples/",sep='')
mat = readMat(paste(dir,meth,sfx,'_pred_for_R.mat',sep=""))
pos_class=positive_classes[[n]]
classes = classes_levels[[n]]
info=data.frame(dataset=character(),method=character(),tr_acc=double(),tst_acc=double(),
tr_bacc=double(),tst_bacc=double(),tr_auroc=double(), tst_auroc=double(),
tr_prec=double(), tst_prec=double(), tr_recall=double(), tst_recall=double(),
tr_F1=double(),tst_F1=double(),stringsAsFactors=FALSE)
for (j in 1:n_splits) {
response_train = mat$y.pred.trains[,j]
response_test = mat$y.pred.tests[,j]
class_train = factor(mat$y.trains[,j])
class_test = factor(mat$y.tests[,j])
pred_train = factor(response_train >= 0.5)
pred_test = factor(response_test >= 0.5)
levels(pred_train) = classes
levels(pred_test) = classes
levels(class_train) = classes
levels(class_test) = classes
cm_train = confusionMatrix(pred_train, class_train, positive = pos_class)
cm_test = confusionMatrix(pred_test, class_test, positive = pos_class)
if(pos_class == classes[[1]]) {
auc_train_pred = 1-response_train
auc_test_pred = 1-response_test
} else {
auc_train_pred = response_train
auc_test_pred = response_test
}
# ROC train and test
roc.train = auc(bincap(response = class_train, predicted = auc_train_pred,true=pos_class))
roc.test = auc(bincap(response = class_test, predicted = auc_test_pred,true=pos_class))
info[j,'dataset'] = datasets[[n]]
info[j,'method']=meth
info[j,'tr_acc']=cm_train$overall["Accuracy"]
info[j,'tst_acc']=cm_test$overall["Accuracy"]
info[j,'tr_bacc']=mean(cm_train$byClass['Balanced Accuracy'])
info[j,'tst_bacc']=mean(cm_test$byClass['Balanced Accuracy'])
info[j,'tr_prec']=mean(cm_train$byClass['Precision'])
info[j,'tst_prec']=mean(cm_test$byClass['Precision'])
info[j,'tr_recall']=mean(cm_train$byClass['Recall'])
info[j,'tst_recall']=mean(cm_test$byClass['Recall'])
info[j,'tr_F1']=mean(cm_train$byClass['F1'])
info[j,'tst_F1']=mean(cm_test$byClass['F1'])
info[j,'tr_auroc']=roc.train
info[j,'tst_auroc']=roc.test
}
all_info = rbind(all_info,info)
}
saveRDS(all_info,paste(base_dir,"bootstrap_resamples_",meth,sfx,".RDS",sep=""))
|
681ec31c5047614c835ac591176a06f4f95ff378 | 77271f2debe395f63b691a72feb6a94784885649 | /judge.R | 8a9a5b93c7ff9a8e2fd748d869fea62786853844 | [] | no_license | linazzzz/Judges | 17354b692aad39da8049d806d685042b504e42d6 | 1f349a484256f571510ac4dc4b9b8d645a47eecf | refs/heads/master | 2021-01-23T15:43:20.957759 | 2015-10-21T23:08:48 | 2015-10-21T23:08:48 | 43,928,136 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,850 | r | judge.R | library(ggplot2)
library(plyr)
library(reshape2)
## read data as a list
library(readxl)
data1<-read_excel("Refined Version.xlsx",sheet = 7)
data<-list()
Y<-c("09","10","11","12","13","14")
t<-1
for(i in 1:5)
{
data[[t]]<-read_excel("Refined Version.xlsx",sheet = i+1)
t=t+1
}
data[[6]]<-read_excel("Record 2014.xlsx",sheet = 1)
## create null observation value
Moneybyyear<-rep(0,6)
Maxbyyear<-rep(0,6)
Minbyyear<-rep(0,6)
meanbyyear<-rep(0,6)
medianbyyear<-rep(0,6)
## data cleanning
## change to vector
dim(data[[1]])
length(data[[1]][,1])##row
length(data[[1]][1,])##cols
for(i in 1:6)
{
for(j in 1:length(data[[i]][1,]))
data[[i]][,j]<-as.vector(data[[i]][,j])
}
##calculate the simple Descriptive statistics
for(k in 1:6)
{
Moneybyyear[k]<-sum(data[[k]][,6],na.rm = TRUE)
Maxbyyear[k]<-max(data[[k]][,6],na.rm = TRUE)
Minbyyear[k]<-min(data[[k]][,6],na.rm = TRUE)
meanbyyear[k]<-mean(data[[k]][,6],na.rm = TRUE)
medianbyyear[k]<-median(data[[k]][,6],na.rm = TRUE)
}
y<-c(2009,2010,2011,2012,2013,2014)
Total<-data.frame(y,Moneybyyear,Maxbyyear,Minbyyear,meanbyyear,medianbyyear)
## make some simple plot
ggplot(Total,aes(x=y))+xlab("year")+geom_histogram(aes(y=Moneybyyear),stat="identity",binwidth=0.5,colour="blue",fill="white")
ggplot(Total,aes(x=y))+xlab("year")+ylab("change")+geom_line(aes(y=Minbyyear),colour="blue")+geom_line(aes(y=Maxbyyear),colour="red")+geom_line(aes(y=meanbyyear),colour="black")+geom_line(aes(y=medianbyyear),colour="green")
## combind ratio
#test
## create a new dataframe for total&extra income&gift..
id1<-rep("T",6)
id2<-rep("E",6)
id3<-rep("G",6)
df<-data.frame(year=c(y,y,y),value=c(medianbyyear,Minbyyear,meanbyyear),id=c(id1,id2,id3))
df
## plot to see the change of total &extra income&gift by year
qplot(year, value, data = df, geom = c("line", "point"), id = id, colour = id)
|
1a27a12a5ddf6ee2a2022b5c2cf962ca0acbd5fb | fbe57536cc2d84e69a5bf799c88fcb784e853558 | /R/sample.size.mean.t.onesample.R | 0985736e7c5bca0d21ce58590030d21373ff5ea7 | [
"MIT"
] | permissive | burrm/lolcat | 78edf19886fffc02e922b061ce346fdf0ee2c80f | abd3915791d7e63f3827ccb10b1b0895aafd1e38 | refs/heads/master | 2023-04-02T11:27:58.636616 | 2023-03-24T02:33:34 | 2023-03-24T02:33:34 | 49,685,593 | 5 | 2 | null | 2016-10-21T05:14:49 | 2016-01-15T00:56:55 | R | UTF-8 | R | false | false | 2,671 | r | sample.size.mean.t.onesample.R | sample.size.mean.t.onesample <- function(effect.size
,variance.est = 1
,alpha = .05
,beta = .1
,alternative = c("two.sided","less","greater")
,details = TRUE
,power.from.actual = F #report parameter power instead of true power
,max.iter = 1000000
) {
validate.htest.alternative(alternative = alternative)
se.est <- sqrt(variance.est)
df <- 1
if (effect.size > 0 && alternative == "less") {
df <- NA
n <- NA
actual <- NA
power <- NA
} else if (effect.size < 0 && alternative == "greater") {
df <- NA
n <- NA
actual <- NA
power <- NA
} else {
target.power <- 1-beta
power <- power.mean.t.onesample(sample.size = df + 1,
effect.size = effect.size,
variance = variance.est,
alpha=alpha,
alternative = alternative,
details=FALSE
)
n <- df +1
actual <- n
while (power < target.power & df <= max.iter) {
df <- df+1
n <- df +1
actual <- n
power <- power.mean.t.onesample(sample.size = n,
effect.size = effect.size,
variance = variance.est,
alpha=alpha,
alternative = alternative,
details=FALSE
)
}
}
if (power.from.actual) {
power <- 1- beta
} else {
}
if (details) {
ret <- as.data.frame(list(test="t"
,type = "one.sample"
,alternative = alternative[1]
,sample.size = n
,actual = actual
,df = n-1
,effect.size = effect.size
,variance = variance.est
,alpha = alpha
,conf.level = 1-alpha
,beta = beta
,power = power
))
# if (include.z) {
# z.res$df <- c(NA)
# ret <-rbind(ret,z.res)
# ret<- ret[2:1,]
# rownames(ret) <- 1:2
# }
ret
}
else {
n
}
}
#sample.size.mean.t.onesample(effect.size = 2,se.est = 1)
|
a96a1014192165977946e79d6b5e3d40d95a26c3 | 15ee05361f643750a6906120b0301a55cd7c9475 | /man/bhai.prettyTable.Rd | 9e51bf954c5985b7e524c530dd9d3f7b586f2e85 | [] | no_license | cran/BHAI | 97945f92bbe2b6df311f24511bd269b4bf707b46 | 1c9b40ccc1ee38958c9f050b8609f92b2acb25fc | refs/heads/master | 2020-08-06T21:17:57.476059 | 2019-10-06T09:20:02 | 2019-10-06T09:20:02 | 213,158,409 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,482 | rd | bhai.prettyTable.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/summary.R
\name{bhai.prettyTable}
\alias{bhai.prettyTable}
\title{Create summary table}
\usage{
bhai.prettyTable(pps, pop_norm=FALSE, conf.int=TRUE)
}
\arguments{
\item{pps}{The PPS object containing the data.}
\item{pop_norm}{Indicating whether statistics should be computed per 100,000 population, default: TRUE.}
\item{conf.int}{Specifying whether confidence intervals should be computed, default: TRUE.}
}
\value{
A data.frame containing the summarised results.
}
\description{
Create BHAI summary table
}
\examples{
data(german_pps_2011_repr)
german_pps_repr = PPS(num_hai_patients = num_hai_patients,
num_hai_patients_by_stratum = num_hai_patients_by_stratum,
num_hai_patients_by_stratum_prior = num_hai_patients_by_stratum_prior,
num_survey_patients = num_survey_patients,
length_of_stay = length_of_stay,
loi_pps = loi_pps,
mccabe_scores_distr = mccabe_scores_distr,
mccabe_life_exp = mccabe_life_exp,
hospital_discharges = hospital_discharges,
population = population,
country="Germany (representative sample)")
german_pps_repr
set.seed(3)
# The following example is run only for illustratory reasons
# Note that you should never run the function with only 10 Monte-Carlo simulations in practice!
result = bhai(german_pps_repr, nsim=10)
bhai.prettyTable(result)
}
\seealso{
\code{\linkS4class{PPS}}
}
|
25894ea33ea8c835e90ff9e81dcb49e819543514 | ae2d63ad7736caba5d3ab1056314bc972c5c6342 | /Data Generating Code.R | 706c9c23eb6b8ef698f3f435039e10294e72dcc9 | [] | no_license | jbtiez/Bayes_Project | 22493a3d9dbd252658e7c9d34e9c27ef4e776abb | ae47ad86aa6d24daca60db176893dfde5094775d | refs/heads/master | 2020-09-26T21:57:20.349181 | 2019-12-10T00:31:05 | 2019-12-10T00:31:05 | 226,350,270 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,640 | r | Data Generating Code.R | #creating a blank dataframe
setwd('C:/Users/Jason/downloads')
alb <- read.csv('mtt.csv')
ed_data <- data.frame(matrix(NA,6114,8)) #creating a blank dataframe to put our student data
names(ed_data) <- c("School", "Year", "Race", "Gender", "Disadvantaged", "ESOL", "Disabled","Passed" )
#Doing the binary variables
start = 1
for(i in 1:170){ #running the for loop for each row
N = alb[i,17] #getting the total number of students in each group
end1 = start+N-1
if(alb[i,8] == "M"){
gender = "Male"
} else {
gender = "Female"
}
if(alb[i,10] == "N"){
disadv = 0
} else {
disadv = 1
}
if(alb[i,11] == "N"){
esl = 0
} else {
esl = 1
}
if(alb[i,12] == "N"){
disab = 0
} else {
disab = 1
}
for(j in start:end1){
ed_data[j,4] = gender
ed_data[j,5] = disadv
ed_data[j,6] = esl
ed_data[j,7] = disab
}
start = end1 + 1
}
#putting in the exam results
start2 = 1
for(i in 1:170){
NP = alb[i,16]
end2 = start2+NP-1
print(start2) #printing for diagnostic purposes (to see if function is running correctly)
print(end2)
for(j in start2:end2){
ed_data[j,8] = 1
}
NF = alb[i,15]
if(NF != 0){ #if statement is necessary so counter doesn't go backwards when there are no fails
start2 = end2 + 1
end2 = start2 + NF - 1
for(k in start2:end2){
ed_data[k,8] = 0
}
}
print(start2)
print(end2)
start2 = end2+1
}
#putting in the school
start3 = 1
for(i in 1:170){ #running the for loop for each row
N = alb[i,17] #getting the total number of students in each group
end3 = start3+N-1
if(alb[i,5] == "Albemarle High"){
school = "Albemarle"
}
else if(alb[i,5] == "Charlottesville High"){
school = "Charlottesville"
}
else if(alb[i,5] == "Monticello High"){
school = "Monticello"
}
else {
school = "Western"}
print(start3)
print(end3)
for(j in start3:end3){
ed_data[j,1] = school
}
start3 = end3 + 1
}
#putting in the Race and Year
start4 = 1
for(i in 1:170){ #running the for loop for each row
N = alb[i,17] #getting the total number of students in each group
end4 = start4+N-1
if(alb[i,7] == "Black, not of Hispanic origin"){
race = "Black"
}
else if(alb[i,7] == "Hispanic"){
race = "Hispanic"
}
else if(alb[i,7] == "Non-Hispanic, two or more races"){
race = "Other"
}
else if(alb[i,7] == "White, not of Hispanic origin"){
race = "White"
}
if(alb[i,1] == "2009-2010"){
year = "2010"
}
else if(alb[i,1] == "2010-2011"){
year = "2011"
}
else if(alb[i,1] == "2011-2012"){
year = "2012"
}
else if(alb[i,1] == "2012-2013"){
year = "2013"
} else if(alb[i,1] == "2013-2014"){
year = "2014"
} else if(alb[i,1] == "2014-2015"){
year = "2015"
}
else if(alb[i,1] == "2015-2016"){
year = "2016"
}
else if(alb[i,1] == "2016-2017"){
year = "2017"
}
else if(alb[i,1] == "2017-2018"){
year = "2018"
} else if(alb[i,1] == "2018-2019"){
year = "2019"
}
print(start4)
print(end4)
for(j in start4:end4){
ed_data[j,3] = race
ed_data[j,2] = year
}
start4 = end4 + 1
}
#checking the overall totals and confirming they match the original data
table(ed_data$Year)
table(ed_data$School)
table(ed_data$Race)
table(ed_data$Gender)
table(ed_data$Disadvantaged)
table(ed_data$ESOL)
table(ed_data$Disabled)
table(ed_data$Passed)
write.csv(ed_data, 'ed_data_final.csv')
#assign('ed_data',ed_data,envir=.GlobalEnv)
|
d4116cd9ac58668d77dc0b6dcff0248a5996e10c | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/bartMachine/examples/get_var_props_over_chain.Rd.R | 87984fd810b24105456c3c80d22ccaa813215a64 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 569 | r | get_var_props_over_chain.Rd.R | library(bartMachine)
### Name: get_var_props_over_chain
### Title: Get the Variable Inclusion Proportions
### Aliases: get_var_props_over_chain
### Keywords: ~kwd1 ~kwd2
### ** Examples
#generate Friedman data
set.seed(11)
n = 200
p = 10
X = data.frame(matrix(runif(n * p), ncol = p))
y = 10 * sin(pi* X[ ,1] * X[,2]) +20 * (X[,3] -.5)^2 + 10 * X[ ,4] + 5 * X[,5] + rnorm(n)
##build BART regression model
bart_machine = bartMachine(X, y, num_trees = 20)
#Get variable inclusion proportions
var_props = get_var_props_over_chain(bart_machine)
print(var_props)
|
6c439c197560de91f222051eca705283abe901ab | d8b57d52cc20bc4900adb4032c1e56ce383d8798 | /R/AllMethods.R | 127ac9764dfb92bdbd8ded9ea0d51a70a846cf31 | [] | no_license | imkeller/gscreend | f6c3c67684fd3be5ffa3a5d9f763b7e2694dcfcb | 665812b89e5f2a893e4c4716d3ae86567f2d567c | refs/heads/master | 2021-08-27T21:40:34.038997 | 2021-08-09T10:04:00 | 2021-08-09T10:04:00 | 175,824,619 | 13 | 2 | null | null | null | null | UTF-8 | R | false | false | 4,217 | r | AllMethods.R | #' Accessor function for the sgRNA slot of the PoolScreenExp class
#'
#' @param x PoolScreenExp object
#'
#' @return sgRNA slot of the object
#' @export
#'
#' @examples # import a PoolScreenExp object that has been generated using
#' # RunGscreend()
#' pse_an <- readRDS(
#' system.file('extdata', 'gscreend_analysed_experiment.RData',
#' package = 'gscreend'))
#'
#' sgRNAData(pse_an)
#'
setMethod("sgRNAData", "PoolScreenExp", function(x) {
# I assume I am allowed to use the
# direct slot access here, because there is no other way?
se <- x@sgRNAData
se
})
# Write into the sgRNA slot
setReplaceMethod("sgRNAData", "PoolScreenExp", function(x, value) {
# I assume I am allowed to use the
# direct slot access here, because there is no other way?
x@sgRNAData <- value
x
})
#' Accessor function for the Gene slot of the PoolScreenExp class
#'
#' @param x PoolScreenExp object
#'
#' @return Gene slot of the object
#' @export
#'
#' @examples # import a PoolScreenExp object that has been generated using
#' # RunGscreend()
#' pse_an <- readRDS(
#' system.file('extdata', 'gscreend_analysed_experiment.RData',
#' package = 'gscreend'))
#'
#' GeneData(pse_an)
#'
setMethod("GeneData", "PoolScreenExp", function(x) {
# I assume I am allowed to use the
# direct slot access here, because there is no other way?
se <- x@GeneData
se
})
# Write into the Gene slot
setReplaceMethod("GeneData", "PoolScreenExp", function(x, value) {
# I assume I am allowed to use the
# direct slot access here, because there is no other way?
x@GeneData <- value
x
})
# Fitting options slot
setMethod("FittingOptions", "PoolScreenExp", function(x) {
# I assume I am allowed to use the
# direct slot access here, because there is no other way?
se <- x@FittingOptions
se
})
setReplaceMethod("FittingOptions", "PoolScreenExp", function(x, value) {
# I assume I am allowed to use the
# direct slot access here, because there is no other way?
x@FittingOptions <- value
x
})
# Fitting intervals slot
setMethod("FittingIntervals", "PoolScreenExp", function(x) {
# I assume I am allowed to use the
# direct slot access here, because there is no other way?
se <- x@FittingIntervals
se
})
setReplaceMethod("FittingIntervals", "PoolScreenExp", function(x, value) {
# I assume I am allowed to use the
# direct slot access here, because there is no other way?
x@FittingIntervals <- value
x
})
# slot containing fitted parameters
setMethod("LFCModelParameters", "PoolScreenExp", function(x) {
# I assume I am allowed to use the
# direct slot access here, because there is no other way?
se <- x@LFCModelParameters
se
})
setReplaceMethod("LFCModelParameters", "PoolScreenExp", function(x, value) {
# I assume I am allowed to use the
# direct slot access here, because there is no other way?
x@LFCModelParameters <- value
x
})
setMethod("normcounts", "PoolScreenExp", function(x)
assays(sgRNAData(x))$normcounts)
setMethod("normcounts<-", "PoolScreenExp", function(x, value) {
assays(sgRNAData(x))$normcounts <- value
x
})
# get the reference counts for LFC calculation
setMethod("refcounts", "PoolScreenExp", function(x) {
se <- sgRNAData(x)
# filter to get T0 samples
assays(se[, se$timepoint == "T0"])$normcounts
})
# get the reference counts for LFC calculation
setMethod("samplecounts", "PoolScreenExp", function(x) {
se <- sgRNAData(x)
# filter to get T0 samples
# names have to be checked upon input
assays(se[, se$timepoint == "T1"])$normcounts
})
setMethod("samplelfc", "PoolScreenExp", function(x) {
se <- sgRNAData(x)
# filter to get T0 samples
# names have to be checked upon input
assays(se[, se$timepoint == "T1"])$lfc
})
setMethod("samplelfc", "PoolScreenExp", function(x) {
se <- sgRNAData(x)
# filter to get T0 samples
# names have to be checked upon input
assays(se[, se$timepoint == "T1"])$lfc
})
setMethod("samplepval", "PoolScreenExp", function(x) {
se <- sgRNAData(x)
# filter to get T0 samples
# names have to be checked upon input
assays(se[, se$timepoint == "T1"])$pval
})
|
6786b3570405335c5b68e55394792a152acb69b0 | 7615b6157c4995bfbf3b0b0be3dcb92e39cc03e8 | /R/shinit.R | 65cd713aec987fd48a357484993f9cd7269e3d2d | [] | no_license | cecilieha/shinyrAtlantis | ab7a551269839ce2bdd52d349aa674279ca19cb7 | 88647bf809c892b09e9f00e5b68d12e649ca7dae | refs/heads/master | 2020-03-28T22:37:17.443178 | 2018-09-18T06:31:31 | 2018-09-18T06:31:31 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 59,488 | r | shinit.R | # 10/06/2016
#' @title Shiny application for viewing Atlantis initialisation data
#'
#' @description
#' Takes data from a .bgm box geometry file and a netCDF Atlantis input parameter file and provides
#' a visualisation of the data in the form of a shiny application. The two data
#' files must first be pre-processed by \code{\link[shinyrAtlantis]{make.sh.init.object}},
#' which generates a list object that is the parameter to \code{sh.init}
#' (see Examples).
#'
#' The \emph{Habitat} tab displays the number of water layers and the habitat
#' cover for each box. Use this tab to check that reef + flat + soft cover sum to 1 for the interior boxes.
#'
#' The \emph{Abiotic (2D)} and \emph{Abiotic (3D)} tabs display benthic and pelagic abiotic variables.
#'
#' The \emph{Biotic (2D)} and \emph{Biotic (3D)} tabs display benthic and pelagic biotic variables.
#'
#' The \emph{Nitrogen} tab displays the initial nitrogen content of biotic groups that are modelled using cohorts.
#'
#' @param input.object R list object generated from \code{\link[shinyrAtlantis]{make.sh.init.object}}.
#'
#' @return Object of class 'shiny.appobj' see \code{\link[shiny]{shinyApp}}.
#'
#' @examples
#' \dontrun{
#' bgm.file <- "VMPA_setas.bgm"
#' nc.file <- "INIT_VMPA_Jan2015.nc"
#' input.object <- make.sh.init.object(bgm.file, nc.file)
#' sh.init(input.object)
#' }
#' @export
#' @importFrom ggplot2 guide_legend ylim
#' @importFrom ncdf4 nc_open nc_close ncvar_get
sh.init <- function(input.object){
# set up layer indices when plotting 3D values
depth.layers <- matrix(NA, nrow = input.object$numlevels,
ncol = input.object$numboxes)
for (i in 1:input.object$numboxes) {
if (!is.na(input.object$box.info$layers.water[i]) &
(input.object$box.info$layers.water[i] > 0)) {
depth.layers[1:input.object$box.info$layers.water[i],i] <-
input.object$box.info$layers.water[i]:1
}
}
has.water.layers <- input.object$box.info$layers.water > 0 # box has water
depth.layers[input.object$numlevels,has.water.layers] <- input.object$numlevels
# set up species sub-choice indices
ndx.2.start <- rep(0, length(input.object$species.2.names))
ndx.2.end <- cumsum(input.object$species.2.groups)
ndx.2.start[1] <- 1
ndx.2.start[2:length(input.object$species.2.names)] <- 1 + ndx.2.end[1:(length(input.object$species.2.names)-1)]
ndx.3.start <- rep(0, length(input.object$species.3.names))
ndx.3.end <- cumsum(input.object$species.3.groups)
ndx.3.start[1] <- 1
ndx.3.start[2:length(input.object$species.3.names)] <- 1 + ndx.3.end[1:(length(input.object$species.3.names)-1)]
Nums.Choices <- c("Raw data", "Divide by box area")
# determine which 3D biotic groups can have transformable data (contains _Nums)
Nums.names <- rep(FALSE, length(input.object$species.3.names.full))
Nums.names[grep(pattern = "_Nums$", x = input.object$species.3.names.full)] <- TRUE
# create HTML text for viewing on help tab
txtHelp <- "<p>This program displays data from the .nc file used to provide initial conditions for an <b>Atlantis</b> run.</p>"
txtHelp <- paste(txtHelp, "<p>Plots have a zoom feature. Draw a box and double click to zoom into the box. Double click to reset zoom.</p>")
txtHelp <- paste(txtHelp, "<p>Reef, soft, and flat values are expected to sum to one (100% cover) within the domain.</p>")
txtHelp <- paste(txtHelp, "<p>The code attempts to separate variables into biotic and abiotic groups. Biotic groups are those where there exists a variable that contains _N in the name as this indicates the presence of nitrogen. The code also attempts to distinguish data where a vertical dimension is present (3D). Some biotic groups may be presented under the 2D and 3D panels.</p>")
txtHelp <- paste(txtHelp, "<p>If more than one time dimension is detected then only the first time layer is displayed.</p>")
txtHelp <- paste(txtHelp, "<p>Currently the code <em>assumes a single sediment layer</em>.</p>")
txtHelp <- paste(txtHelp, "<p>When a variable is associated with a vertical dimension the number of water layers in each box determines where the data is displayed in the table presented below the plot. ")
txtHelp <- paste(txtHelp, "For example, suppose that at most there are seven layers: a sediment layer and six water layers. ")
txtHelp <- paste(txtHelp, "If a box has four water layers, then the input data in the netCDF file is assumed to have the form:</p>")
txtHelp <- paste(txtHelp, "<ul style=\"list-style-type:none\"><li>layer 0, layer 1, layer 2, layer 3, 0, 0, sediment</li></ul>")
txtHelp <- paste(txtHelp, "<p>where layer 0 is the layer nearest the sediment and layer 3 is the surface layer. The last value is always assumed to be the sediment layer and the first value is always assumed to be the layer closet to the sediment. Any values located by the zeros are ignored. Thus, <b>the vertical data presented in the table is not displayed in the same order as it is stored in the netCDF file</b>.</p>")
txtHelp <- paste(txtHelp, "<p>When plotting and tabulating vertical profiles, values are presented for cells where nominal_dz > 0 (irrespective of numlayers associated with the box).</p>")
txtHelp <- paste(txtHelp, "<p>Some checks that the vertical data make biological sense include: water is warmer in the surface layers and dz values (other than at layer 0) equate across boxes. Also, in shallower waters (< 1000m) oxygen tends to decrease with depth, whereas salinity, silica, nitrogen, and phosphorous all tend to increase with depth.</p>")
txtHelp <- paste(txtHelp, "<p>3D biotic data describing numbers (i.e., variable name contains _Nums) may be transformed by dividing by box area. This transformation shows 2D densities, which gives a better sense of whether a species is uniformly distributed throughout its horizontal range (assuming the depth range [m] is fixed throughout its horizontal range).</p>")
plotHeight3D <- paste(as.character(350 * ((input.object$numlevels - 1) %/% 2 + 1)),
"px", sep = "") # calculate a reasonable overall plot size for 3D plots
plotHeightNitrogen <- paste(as.character(125 * ((length(unique(input.object$df.nitrogen$Species)) - 1) %/% 6 + 1)),
"px", sep = "") # calculate a reasonable overall plot size for 3D plots
# set up consistent association between colours used to categorise cover check
myColors <- c("#deebf7", "#de2d26", "#a1d99b", "#a50f15")
names(myColors) <- levels(input.object$box.info$cover.check)
colScale <- scale_fill_manual(name = "check",values = myColors)
# copy relevant box info worth showing
df.cover <- input.object$box.info[c(1:11,13,17)]
df.cover$z <- -df.cover$z
names(df.cover) <- c("boxid", "reef", "flat", "soft", "canyon", "cover",
"check", "layers (total)", "layers (water)", "depth (nc)", "depth (bgm)",
"area", "numlayers")
df.cover <- df.cover[c(1:9, 13, 10, 11, 12)] # reorder columns for display
shinyApp(
# USER INPUT FUNCTION
ui = navbarPage(
title = "Atlantis initialisation viewer",
# Habitat
tabPanel("Habitat",
fluidPage(
fluidRow(
column(6, h4("Water layers")),
column(6, h4("Habitat cover"))
),
fluidRow(
column(6,
plotOutput("plotLayers",
height = "475px",
dblclick = "plotLayers_dblclick",
brush = brushOpts(
id = "plotLayers_brush",
resetOnNew = TRUE
)
)
),
column(6,
plotOutput("plotCover",
height = "475px",
dblclick = "plotCover_dblclick",
brush = brushOpts(
id = "plotCover_brush",
resetOnNew = TRUE
)
)
)
),
hr(),
fluidRow(column(12,
HTML("<p><b>Notes</b></p><ul><li>reef + flat + soft are expected to sum to 1 within the model domain.</li><li>layers (total) is calculated from the number of non-zero terms in the variable nominal_dz from the .nc file. Assuming a single sediment layer, layers (water) = layers (total) - 1, which should equate to numlayers.</li><li>Bracketed terms associated with the depths indicate where the data were taken from and they should be the same for both the .nc (using nominal_dz) and .bgm files.</li></ul>"))),
fluidRow(
column(12, DT::dataTableOutput("table.box.info"))
)
)
),
# Abiotic 2D
tabPanel("Abiotic (2D)",
sidebarLayout(
sidebarPanel(width = 3,
selectInput(inputId = "SI.NS2", label = "Group",
choices = input.object$nonspecies.2.names),
h5("Group attributes"),
htmlOutput("txtnSp2att")
),
mainPanel(
fluidRow(column(12,
plotOutput("plot2D",
height = "625px",
dblclick = "plot2D_dblclick",
brush = brushOpts(
id = "plot2D_brush",
resetOnNew = TRUE
)
)
)),
hr(),
fluidRow(column(4, DT::dataTableOutput("table.2D")))
)
)
),
# Abiotic 3D
tabPanel("Abiotic (3D)",
sidebarLayout(
sidebarPanel(width = 3,
selectInput(inputId = "SI.NS3", label = "Group",
choices = input.object$nonspecies.3.names),
h5("Group attributes"),
htmlOutput("txtnSp3att")
),
mainPanel(
fluidRow(column(12,
plotOutput("plot3D",
height = plotHeight3D,
dblclick = "plot3D_dblclick",
brush = brushOpts(
id = "plot3D_brush",
resetOnNew = TRUE
)
)
)),
fluidRow(column(12,
HTML("<p>Numbers in the panel headers indicate the depth at the bottom of the water layer.</p>"))
),
hr(),
fluidRow(column(12, DT::dataTableOutput("table.3D")))
)
)
),
# Biotic 2D
tabPanel("Biotic (2D)",
sidebarLayout(
sidebarPanel(width = 3,
selectInput(inputId = "SI2.S", label = "Group",
choices = input.object$species.2.names),
selectInput(inputId = "SI2.SG", label = "Sub-group",
choices = input.object$species.2.names.full[
1:input.object$species.2.groups[1]]),
h5("Sub-group attributes"),
htmlOutput("txtSp2att")
),
mainPanel(
fluidRow(column(12,
plotOutput("plotSpecies2",
height = "625px",
dblclick = "plotSpecies2_dblclick",
brush = brushOpts(
id = "plotSpecies2_brush",
resetOnNew = TRUE
)
)
)),
hr(),
fluidRow(column(4, DT::dataTableOutput("table.species2")))
)
)
),
# Biotic 3D
tabPanel("Biotic (3D)",
sidebarLayout(
sidebarPanel(width = 3,
selectInput(inputId = "SI3.S", label = "Group",
choices = input.object$species.3.names),
selectInput(inputId = "SI3.SG", label = "Sub-group",
choices = input.object$species.3.names.full[
1:input.object$species.3.groups[1]]),
selectInput(inputId = "SI3.Tr", label = "Data transformation",
choices = c("Raw data")),
h5("Sub-group attributes"),
htmlOutput("txtSp3att")
),
mainPanel(
fluidRow(column(12,
plotOutput("plotSpecies3",
height = plotHeight3D,
dblclick = "plotSpecies3_dblclick",
brush = brushOpts(
id = "plotSpecies3_brush",
resetOnNew = TRUE
)
)
)),
fluidRow(column(12,
HTML("<p>Numbers in the panel headers indicate the depth at the bottom of the water layer.</p>"))
),
hr(),
fluidRow(column(12, DT::dataTableOutput("table.species3")))
)
)
),
# Nitrogen (structural and reserve)
tabPanel("Nitrogen",
fluidPage(
plotOutput("plotNitrogen", height = plotHeightNitrogen)
)
),
# NUms
tabPanel("Nums",
fluidPage(
plotOutput("plotNums", height = plotHeightNitrogen)
)
),
tabPanel("Help",
fluidPage(
HTML(txtHelp)
)
),
tabPanel(actionButton("exitButton", "Exit"))
),
# SERVER FUNCTION
server = function(input, output, session) {
values <- reactiveValues()
values$nsp2att <- ""
values$nsp3att <- ""
values$sp2att <- ""
values$sp3att <- ""
# reactive variables used to set plot ranges
ranges <- reactiveValues(x = NULL, y = NULL)
rangesCover <- reactiveValues(x = NULL, y = NULL)
ranges2D <- reactiveValues(x = NULL, y = NULL)
ranges3D <- reactiveValues(x = NULL, y = NULL)
rangesSpecies2 <- reactiveValues(x = NULL, y = NULL)
rangesSpecies3 <- reactiveValues(x = NULL, y = NULL)
observeEvent(input$exitButton, {
stopApp()
})
# register change in 2D abiotic sub-group and update displayed netCDF attributes
observeEvent(input$SI.NS2, {
indx <- which(input.object$nonspecies.2.names == input$SI.NS2)
values$nsp2att <- input.object$nonspecies.2.att[indx]
})
# register change in 3D abiotic sub-group and update displayed netCDF attributes
observeEvent(input$SI.NS3, {
indx <- which(input.object$nonspecies.3.names == input$SI.NS3)
values$nsp3att <- input.object$nonspecies.3.att[indx]
})
# register change in 2D biotic sub-group and update displayed netCDF attributes
observeEvent(input$SI2.SG, {
indx <- which(input.object$species.2.names.full == input$SI2.SG)
values$sp2att <- input.object$species.2.att[indx]
})
# register change in 3D biotic sub-group and update displayed netCDF attributes
observeEvent(input$SI3.SG, {
indx <- which(input.object$species.3.names.full == input$SI3.SG)
values$sp3att <- input.object$species.3.att[indx]
if (Nums.names[indx]) {
updateSelectInput(session, "SI3.Tr", choices = Nums.Choices)
} else {
updateSelectInput(session, "SI3.Tr", choices = "Raw data")
}
})
# register change in 3D biotic group and update sub-group options
observe({
i <- which(input.object$species.3.names == input$SI3.S)
updateSelectInput(session, "SI3.SG",
choices = input.object$species.3.names.full[ndx.3.start[i]:ndx.3.end[i]])
})
# register change in 2D biotic group and update sub-group options
observe({
i <- which(input.object$species.2.names == input$SI2.S)
updateSelectInput(session, "SI2.SG",
choices = input.object$species.2.names.full[ndx.2.start[i]:ndx.2.end[i]])
})
# When a double-click happens, check if there's a brush on the plot.
# If so, zoom to the brush bounds; if not, reset the zoom.
observeEvent(input$plotLayers_dblclick, {
brush <- input$plotLayers_brush
if (!is.null(brush)) {
ranges$x <- c(brush$xmin, brush$xmax)
ranges$y <- c(brush$ymin, brush$ymax)
} else {
ranges$x <- NULL
ranges$y <- NULL
}
})
# When a double-click happens, check if there's a brush on the plot.
# If so, zoom to the brush bounds; if not, reset the zoom.
observeEvent(input$plotCover_dblclick, {
brush <- input$plotCover_brush
if (!is.null(brush)) {
rangesCover$x <- c(brush$xmin, brush$xmax)
rangesCover$y <- c(brush$ymin, brush$ymax)
} else {
rangesCover$x <- NULL
rangesCover$y <- NULL
}
})
# When a double-click happens, check if there's a brush on the plot.
# If so, zoom to the brush bounds; if not, reset the zoom.
observeEvent(input$plot2D_dblclick, {
brush <- input$plot2D_brush
if (!is.null(brush)) {
ranges2D$x <- c(brush$xmin, brush$xmax)
ranges2D$y <- c(brush$ymin, brush$ymax)
} else {
ranges2D$x <- NULL
ranges2D$y <- NULL
}
})
# When a double-click happens, check if there's a brush on the plot.
# If so, zoom to the brush bounds; if not, reset the zoom.
observeEvent(input$plot3D_dblclick, {
brush <- input$plot3D_brush
if (!is.null(brush)) {
ranges3D$x <- c(brush$xmin, brush$xmax)
ranges3D$y <- c(brush$ymin, brush$ymax)
} else {
ranges3D$x <- NULL
ranges3D$y <- NULL
}
})
# When a double-click happens, check if there's a brush on the plot.
# If so, zoom to the brush bounds; if not, reset the zoom.
observeEvent(input$plotSpecies2_dblclick, {
brush <- input$plotSpecies2_brush
if (!is.null(brush)) {
rangesSpecies2$x <- c(brush$xmin, brush$xmax)
rangesSpecies2$y <- c(brush$ymin, brush$ymax)
} else {
rangesSpecies2$x <- NULL
rangesSpecies2$y <- NULL
}
})
# When a double-click happens, check if there's a brush on the plot.
# If so, zoom to the brush bounds; if not, reset the zoom.
observeEvent(input$plotSpecies3_dblclick, {
brush <- input$plotSpecies3_brush
if (!is.null(brush)) {
rangesSpecies3$x <- c(brush$xmin, brush$xmax)
rangesSpecies3$y <- c(brush$ymin, brush$ymax)
} else {
rangesSpecies3$x <- NULL
rangesSpecies3$y <- NULL
}
})
# display 2D 1biotic netCDF variable attributes
output$txtnSp2att <- renderUI({
HTML(values$nsp2att)
})
# display 3D abiotic netCDF variable attributes
output$txtnSp3att <- renderUI({
HTML(values$nsp3att)
})
# display 2D biotic netCDF variable attributes
output$txtSp2att <- renderUI({
HTML(values$sp2att)
})
# display 3D biotic netCDF variable attributes
output$txtSp3att <- renderUI({
HTML(values$sp3att)
})
# display 2D habitat data in table form
output$table.box.info <- DT::renderDataTable({
DT::datatable(df.cover, rownames = FALSE)
})
# display number of water layers
output$plotLayers <- renderPlot({
ggplot(data = input.object$df.map,
aes(x = x, y = y, group = boxid, fill = layers.water)) +
geom_polygon(colour = "black", size = 0.25) +
scale_fill_gradient(low="#fee6ce", high="#d94801",
na.value="tomato", guide=guide_legend()) +
labs(fill = "layers (water)") +
geom_text(aes(x = x.in, y = y.in, label = boxid), size = 2.5) +
theme_bw() + xlab("") + ylab("") +
theme(plot.background = element_blank()) +
coord_cartesian(xlim = ranges$x, ylim = ranges$y) +
scale_y_continuous(breaks=NULL) + scale_x_continuous(breaks=NULL)
})
# display cover check
output$plotCover <- renderPlot({
ggplot(data = input.object$df.map,
aes(x = x, y = y, group = boxid, fill = cover.check)) +
geom_polygon(colour = "white", size = 0.25) +
colScale +
labs(fill = "reef + flat + soft") +
geom_text(aes(x = x.in, y = y.in, label = boxid), size = 2.5) +
theme_bw() + xlab("") + ylab("") +
theme(plot.background = element_blank()) +
coord_cartesian(xlim = rangesCover$x, ylim = rangesCover$y) +
scale_y_continuous(breaks=NULL) + scale_x_continuous(breaks=NULL)
})
# display 2D abiotic data in plot form
output$plot2D <- renderPlot({
boxid <- 0:(input.object$numboxes-1)
indx <- which(input.object$nonspecies.2.names == input$SI.NS2)
data.2D <- input.object$nonspecies.2.data[indx,]
data.2D[!has.water.layers] <- NA
df.plot <- data.frame(
boxid = boxid,
vals = data.2D
)
df.plot <- left_join(input.object$df.map, df.plot, by = "boxid")
ggplot(data = df.plot,
aes(x = x, y = y, group = boxid, fill = vals)) +
geom_polygon(colour = "black", size = 0.25) +
scale_fill_gradient(low="#fee6ce", high="#d94801",
na.value="black", guide=guide_legend()) +
labs(fill = "value") +
theme_bw() + xlab("") + ylab("") +
theme(plot.background = element_blank()) +
coord_cartesian(xlim = ranges2D$x, ylim = ranges2D$y) +
scale_y_continuous(breaks=NULL) + scale_x_continuous(breaks=NULL)
})
# display 2D abiotic data in table form
output$table.2D <- DT::renderDataTable({
boxid <- 0:(input.object$numboxes-1)
indx <- which(input.object$nonspecies.2.names == input$SI.NS2)
data.2D = input.object$nonspecies.2.data[indx,]
df.plot <- data.frame(
boxid = boxid,
vals = data.2D
)
names(df.plot) <- c("boxid", "value")
DT::datatable(df.plot, rownames = FALSE)
})
# display 3D abiotic data in plot form
output$plot3D <- renderPlot({
boxid <- 0:(input.object$numboxes-1)
level <- 1:input.object$numlevels
indx <- which(input.object$nonspecies.3.names == input$SI.NS3)
data.3D <- input.object$nonspecies.3.data[indx, , ]
for (i in 1:input.object$numlevels) {
vals <- rep(NA, input.object$numboxes) # reset values
for (j in 1:input.object$numboxes) {
if (is.na(depth.layers[i,j])) {
vals[j] <- NA
} else {
vals[j] <- data.3D[j , depth.layers[i,j]]
}
}
df.level <- data.frame(
boxid = boxid,
depth = rep(input.object$depths[i], input.object$numboxes),
vals = vals
)
if (i == 1) {
df.plot <- df.level
} else{
df.plot <- rbind(df.plot, df.level)
}
}
df.plot <- left_join(input.object$df.map, df.plot, by = "boxid")
ggplot(data = df.plot,
aes(x = x, y = y, group = boxid, fill = vals)) +
geom_polygon(colour = "black", size = 0.25) +
scale_fill_gradient(low="#fee6ce", high="#d94801",
na.value="black", guide=guide_legend()) +
labs(fill = "value") +
facet_wrap( ~ depth, ncol = 2) +
theme_bw() + xlab("") + ylab("") +
theme(plot.background = element_blank()) +
coord_cartesian(xlim = ranges3D$x, ylim = ranges3D$y) +
scale_y_continuous(breaks=NULL) + scale_x_continuous(breaks=NULL)
})
# display 3D abiotic data in table form
output$table.3D <- DT::renderDataTable({
boxid <- 0:(input.object$numboxes-1)
level <- 1:input.object$numlevels
indx <- which(input.object$nonspecies.3.names == input$SI.NS3)
data.3D <- input.object$nonspecies.3.data[indx, , ]
for (i in 1:input.object$numlevels) {
vals <- rep(NA, input.object$numboxes) # reset values
for (j in 1:input.object$numboxes) {
if (is.na(depth.layers[i,j])) {
vals[j] <- NA
} else {
vals[j] <- data.3D[j , depth.layers[i,j]]
}
}
df.level <- data.frame(
boxid = boxid,
vals = vals
)
if (i == 1) {
df.plot <- df.level
} else{
df.plot <- cbind(df.plot, vals)
}
}
names(df.plot) <- c("boxid", input.object$depths)
DT::datatable(df.plot, rownames = FALSE)
})
# display 2D biotic data in plot form
output$plotSpecies2 <- renderPlot({
boxid <- 0:(input.object$numboxes-1)
indx <- which(input.object$species.2.names.full == input$SI2.SG)
data.2D <- input.object$species.2.data[indx, ]
data.2D[!has.water.layers] <- NA
df.plot <- data.frame(
boxid = boxid,
vals = data.2D
)
df.plot <- left_join(input.object$df.map, df.plot, by = "boxid")
ggplot(data = df.plot,
aes(x = x, y = y, group = boxid, fill = vals)) +
geom_polygon(colour = "black", size = 0.25) +
scale_fill_gradient(low="#fee6ce", high="#d94801",
na.value="black", guide=guide_legend()) +
labs(fill = "value") +
theme_bw() + xlab("") + ylab("") +
theme(plot.background = element_blank()) +
coord_cartesian(xlim = rangesSpecies2$x, ylim = rangesSpecies2$y) +
scale_y_continuous(breaks=NULL) + scale_x_continuous(breaks=NULL)
})
# display 2D biotic data in table form
output$table.species2 <- DT::renderDataTable({
boxid <- 0:(input.object$numboxes-1)
indx <- which(input.object$species.2.names.full == input$SI2.SG)
data.2D <- input.object$species.2.data[indx, ]
df.plot <- data.frame(
boxid = boxid,
vals = data.2D
)
names(df.plot) <- c("boxid", "value")
DT::datatable(df.plot, rownames = FALSE)
})
# display 3D biotic data in plot form
output$plotSpecies3 <- renderPlot({
boxid <- 0:(input.object$numboxes-1)
level <- 1:input.object$numlevels
indx <- which(input.object$species.3.names.full == input$SI3.SG)
data.3D <- input.object$species.3.data[indx, , ]
for (i in 1:input.object$numlevels) {
vals <- rep(NA, input.object$numboxes) # reset values
for (j in 1:input.object$numboxes) {
if (is.na(depth.layers[i,j])) {
vals[j] <- NA
} else {
vals[j] <- data.3D[j , depth.layers[i,j]]
if (input$SI3.Tr == "Divide by box area") {
vals[j] <- vals[j] / input.object$box.info$area[j]
}
}
}
df.level <- data.frame(
boxid = boxid,
depth = rep(input.object$depths[i], input.object$numboxes),
vals = vals
)
if (i == 1) {
df.plot <- df.level
} else{
df.plot <- rbind(df.plot, df.level)
}
}
df.plot <- left_join(input.object$df.map, df.plot, by = "boxid")
ggplot(data = df.plot,
aes(x = x, y = y, group = boxid, fill = vals)) +
geom_polygon(colour = "black", size = 0.25) +
scale_fill_gradient(low="#fee6ce", high="#d94801",
na.value="black", guide=guide_legend()) +
labs(fill = "value") +
facet_wrap( ~ depth, ncol = 2) +
theme_bw() + xlab("") + ylab("") +
theme(plot.background = element_blank()) +
coord_cartesian(xlim = rangesSpecies3$x, ylim = rangesSpecies3$y) +
scale_y_continuous(breaks=NULL) + scale_x_continuous(breaks=NULL)
})
# display 3D biotic data in table form
output$table.species3 <- DT::renderDataTable({
boxid <- 0:(input.object$numboxes-1)
level <- 1:input.object$numlevels
indx <- which(input.object$species.3.names.full == input$SI3.SG)
data.3D <- input.object$species.3.data[indx, , ]
for (i in 1:input.object$numlevels) {
vals <- rep(NA, input.object$numboxes) # reset values
for (j in 1:input.object$numboxes) {
if (is.na(depth.layers[i,j])) {
vals[j] <- NA
} else {
vals[j] <- data.3D[j , depth.layers[i,j]]
if (input$SI3.Tr == "Divide by box area") {
vals[j] <- vals[j] / input.object$box.info$area[j]
}
}
}
df.level <- data.frame(
boxid = boxid,
vals = vals
)
if (i == 1) {
df.plot <- df.level
} else{
df.plot <- cbind(df.plot, vals)
}
}
names(df.plot) <- c("boxid", input.object$depths)
DT::datatable(df.plot, rownames = FALSE)
})
# display nitrogen
output$plotNitrogen <- renderPlot({
ggplot(data = input.object$df.nitrogen,
aes(x = Cohort, y = N.Value, color = N.Type)) +
geom_point() + geom_line() + ylim(0,NA) +
facet_wrap( ~ Species, ncol=6, scales="free_y") +
labs(color = "Source") +
xlab("Cohort") + ylab("Nitrogen (mg N)") +
# theme(plot.background = element_blank()) +
theme_bw()
})
# display Nums
output$plotNums <- renderPlot({
ggplot(data = input.object$df.nums,
aes(x = Cohort, y = Nums.Value)) +
geom_point() + geom_line() + ylim(0,NA) +
facet_wrap( ~ Species, ncol=6, scales="free_y") +
xlab("Cohort") + ylab("Total numbers (individuals)") +
theme(plot.background = element_blank())
})
}
)
}
# +====================================================+
# | make.init.map : collect data for displaying maps |
# +====================================================+
make.init.map <- function(bgm.file){
bgm <- readLines(bgm.file) # read in the geometry file
numboxes <- 0
j <- grep(pattern = "nbox", x = bgm, value = FALSE) # file row(s)
if (length(j) > 0) { # found rows with nbox
jnew <- NULL
for (jj in 1:length(j)) {
# Valid row is when tmplt is the first entry and second is a number
text.split <- unlist(str_split(
gsub(pattern = "[\t ]+", x = bgm[j[jj]], replacement = " "), " "))
if (text.split[1] == "nbox") {
jnew <- c(jnew,j[jj]) # add the row that satisfies the criteria
}
}
j <- jnew # use this list of rows as they are valid
if (length(j) == 1) { # a single row is found
text.split <- unlist(str_split(
gsub(pattern = "[\t ]+", x = bgm[j], replacement = " "), " "))
numboxes <- as.numeric(text.split[2])
}
}
# Extract the box vertices
map.vertices <- data.frame()
for(i in 1:numboxes){
txt.find <- paste("box", i - 1, ".vert", sep = "")
j <- grep(txt.find, bgm)
for (jj in 1:length(j)) {
text.split <- unlist(str_split(
gsub(pattern = "[\t ]+", x = bgm[j[jj]], replacement = " "), " "))
if (text.split[1] == txt.find) {
map.vertices <- rbind(map.vertices, cbind(i - 1, as.numeric(text.split[2]),
as.numeric(text.split[3])))
}
}
}
names(map.vertices) <- c("boxid", "x", "y")
# find the depths and areas, and identify island boxes
box.indices <- rep(0, numboxes)
for(i in 1:numboxes){ # box depth
box.indices[i] <- grep(paste("box", i - 1, ".botz", sep = ""), bgm)
}
z.tmp <- strsplit(bgm[box.indices], "\t")
z <- as.numeric(sapply(z.tmp,`[`,2))
box.data <- data.frame(boxid = 0:(numboxes-1), z = z)
box.data <- mutate(box.data, is.island = (z >= 0.0))
for(i in 1:numboxes){ # box area
box.indices[i] <- grep(paste("box", i - 1, ".area", sep = ""), bgm)
}
a.tmp <- strsplit(bgm[box.indices], "\t")
a <- as.numeric(sapply(a.tmp,`[`,2))
box.data$area <- a
box.data <- mutate(box.data, volume = -z*area)
# read in the internal coordinates from bgm file
box.indices <- rep(0, numboxes)
x.in <- rep(0, numboxes)
y.in <- rep(0, numboxes)
for(i in 1:numboxes){
j <- grep(paste("box", i - 1, ".inside", sep = ""), bgm)
text.split <- unlist(str_split(
gsub(pattern = "[\t ]+", x = bgm[j], replacement = " "), " "))
x.in[i] <- as.numeric(text.split[2])
y.in[i] <- as.numeric(text.split[3])
}
box.data$x.in <- x.in # add internal y-location
box.data$y.in <- y.in # add internal y-location
box.data$boxid <- factor(box.data$boxid) # make boxid a factor
# return a list of three objects: integer, data frame, data frame
return(list(numboxes = numboxes,
map.vertices = map.vertices,
box.data = box.data))
}
# +======================================================+
# | make.init.cover : collect cover data to display |
# +======================================================+
make.init.cover <- function(box.data, map.vertices, nc.file) {
nc.out <- nc_open(nc.file) # open .nc file
n.vars <- nc.out$nvars # number of variables in the .nc file
var.names <- rep(NA, n.vars) # store all variable names
for (i in 1:n.vars) { # find all variables
var.names[i] <- nc.out$var[[i]]$name # add variable name to the vector
}
numlayers <- ncvar_get(nc.out, "numlayers")
if (length(dim(numlayers)) > 1) {numlayers <- numlayers[,1]} # first time only
reef <- ncvar_get(nc.out, "reef") # reef cover per box
if (length(dim(reef)) > 1) {reef <- reef[,1]}
reef[is.na(reef)] <- nc.out$var[[which(var.names == "reef")]]$missval
flat <- ncvar_get(nc.out, "flat") # flat cover per box
if (length(dim(flat)) > 1) {flat <- flat[,1]}
flat[is.na(flat)] <- nc.out$var[[which(var.names == "flat")]]$missval
soft <- ncvar_get(nc.out, "soft") # soft cover per box
if (length(dim(soft)) > 1) {soft <- soft[,1]}
soft[is.na(soft)] <- nc.out$var[[which(var.names == "soft")]]$missval
canyon <- ncvar_get(nc.out, "canyon") # canyon cover per box
if (length(dim(canyon)) > 1) {canyon <- canyon[,1]}
canyon[is.na(canyon)] <- nc.out$var[[which(var.names == "canyon")]]$missval
numboxes <- length(numlayers) # number of boxes
boxid <- 0:(numboxes-1) # box ids
# create a data frame containing the coverages
box.info <- data.frame(boxid, reef, flat, soft, canyon) # .nc box info
box.info <- box.info %>% mutate(cover = reef+flat+soft) # add total cover
# add cover check
cover.check <- rep(NA, length(box.info$boxid))
for (i in 1:length(box.info$boxid)) {
if (!is.na(box.info$cover[i])) {
if (box.info$cover[i] == 0) {
cover.check[i] <- "= 0"
} else if (box.info$cover[i] <= 0.999) {
cover.check[i] <- "< 1"
} else if (box.info$cover[i] >= 1.001) {
cover.check[i] <- "> 1"
} else {
cover.check[i] <- "= 1"
}
} else {
cover.check[i] <- "missing"
}
}
box.info$cover.check <- cover.check # add the cover.check to box.info
box.info$cover.check <- factor(box.info$cover.check,
levels = c("= 0", "< 1", "= 1", "> 1")) # make a factor with specified levels
nominal_dz <- ncvar_get(nc.out, "nominal_dz") # layer depths per box
max.z.levels <- dim(nominal_dz)[1] # number of depth levels
df.dz <- data.frame(boxid) # first column of data frame
for (i in 1:max.z.levels) { # add depths of each level for each box
df.dz[, as.character(i)] <- nominal_dz[i,] # add column for each level
}
df.dz[is.na(df.dz)] <- 0
df.dz.present <- as.data.frame(df.dz[2:(max.z.levels+1)] > 0)
df.dz.present$boxid <- boxid
df.dz.present <- df.dz.present[c(max.z.levels+1, 1:max.z.levels)]
df.present <- tidyr::gather(df.dz.present, "level", "present", 2:(max.z.levels+1))
box.summary.1 <- df.present %>%
group_by(boxid) %>%
dplyr::summarise(layers.total = sum(present, na.rm = TRUE)) %>% # depth includes sediment
mutate(layers.water = layers.total - 1) # layers in water column
box.summary.1$layers.water <- pmax(rep(0,numboxes), box.summary.1$layers.water)
# create a new long format data frame from the wide format data frame
df.box <- tidyr::gather(df.dz, "lvl", "dz", 2:(max.z.levels+1))
# layers.total is based on non-zero nominal_dz values
# this code assumes a single sediment layer
df.box$lvl <- as.integer(df.box$lvl)
box.summary.2 <- dplyr::filter(df.box, (lvl < max.z.levels)) %>%
dplyr::group_by(boxid) %>%
dplyr::summarise(depth.total = sum(dz, na.rm = TRUE))
# limit water layers to not be less than zero
box.summary <- dplyr::left_join(box.summary.1, box.summary.2, by = "boxid")
deepest.box <- which(box.summary$depth.total == max(box.summary$depth.total))[1] # id
dz <- nominal_dz[,deepest.box] # vector of depths from the deepest box
numlevels <- length(dz)
if (max.z.levels > 0) {
depth.sediment <- dz[max.z.levels] # last element is assumed sediment depth
} else {
depth.sediment <- 0
}
# create a list of depths
depths <- c(as.character(cumsum(dz[(max.z.levels-1):1])), "sediment")
# Add depth and layers to box.info
box.info <- box.info %>% dplyr::left_join(box.summary, "boxid")
# boxes with no water levels are missing so add the data
na.boxes <- is.na(box.info$layers.water)
box.info$layers.water[na.boxes] <- 0
box.info$layers.total[na.boxes] <- 0 # no water layers so no sediment layer
box.info$depth.total[na.boxes] <- 0.0
# add box.data calculated from the .bgm file to box.info
box.data$boxid <- boxid # make boxid an integer for joining
box.info <- box.info %>% dplyr::left_join(box.data, "boxid")
numlayers[is.na(numlayers)] <- nc.out$var[[which(var.names == "numlayers")]]$missval
box.info$numlayers <- numlayers
map.vertices$boxid <- as.integer(map.vertices$boxid) # enforce integer
# create data frame for plotting
df.map <- merge(map.vertices, box.info, by = "boxid")
nc_close(nc.out)
return(list(
numlevels = numlevels,
depths = depths,
box.info = box.info,
df.map = df.map
))
}
# +======================================================+
# | make.init.data.ini : collect remaining data to display |
# +======================================================+
make.init.data <- function(nc.file, numboxes, numlevels) {
nc.out <- nc_open(nc.file) # open .nc file
n.vars <- nc.out$nvars # number of variables in the .nc file
var.names <- rep(NA, n.vars) # store all variable names
for (i in 1:n.vars) { # find all variables
var.names[i] <- nc.out$var[[i]]$name # add variable name to the vector
}
# find biotic variable names (step 1 of two steps)
# find variable names ending in _N or _Nx
species.names <- var.names[grep(pattern = "(_N)*_N[0-9]*$", x = var.names)]
for (i in 1:length(species.names)) {
species.old <- species.names[i]
str_sub(species.old, start = str_length(species.old)-1,
end = str_length(species.old)) <- "" # remove _N
if (substr(species.old, nchar(species.old), nchar(species.old)) == "_") {
str_sub(species.old, start = str_length(species.old),
end = str_length(species.old)) <- "" # remove _N
}
species.names[i] <- species.old # replace name with _N removed
}
species.names <- unique(species.names) # remove duplicates from cohorts
# find all abiotic variables
i.species <- NA
for (sp in species.names) {
i.species <- c(i.species, grep(pattern = sp, x = var.names))
}
i.species <- unique(i.species)
i.species <- i.species[-1] # remove NA in row 1
non.species <- rep(TRUE, n.vars) # start off assuming all variables are abiotic
non.species[i.species] <- FALSE # flag the biotic variables
nonspecies.names <- var.names[non.species == TRUE] # get abiotic names
# find odd species in the nonspecies.names list (step 2)
# biotic variables of the form _N[number] still classified as abiotic
odd.species <- nonspecies.names[grep(pattern = "_N", x = nonspecies.names)]
if (length(odd.species > 0)) {
odd.species <- str_split(odd.species, pattern = "_N")
odd.name <- NA
for (i in 1:length(odd.species)) {
odd.name <- c(odd.name, odd.species[[i]][1])
}
odd.species <- unique(na.omit(odd.name))
species.names <- c(species.names, odd.species) # add odd species to species list
}
# recalculate abiotic variable names
i.species <- NA
for (sp in species.names) {
i.species <- c(i.species, grep(pattern = sp, x = var.names))
}
i.species <- unique(i.species[-1]) # remove NA in row 1
non.species <- rep(TRUE, n.vars) # initially assume all are nonspecies
non.species[i.species] <- FALSE # remove all species
i.nonspecies <- which(non.species == TRUE)
nonspecies.names <- var.names[i.nonspecies]
# Light_Adaption variables are considered non-species (fix)
# split abiotic variables into 2D and 3D groups
nonspecies.2.d <- NA # number of dimensions
nonspecies.3.d <- NA # number of dimensions
nonspecies.2.names <- NA
nonspecies.3.names <- NA
nonspecies.2.b <- NA # box dimension
nonspecies.3.b <- NA # box dimension
nonspecies.3.z <- NA # depth dimension
for (i in i.nonspecies) {
d <- nc.out$var[[i]]$ndims
found.depth <- FALSE
for (j in 1:d) {
if (nc.out$var[[i]]$dim[[j]]$name == "b") {
b.indx <- j
}
if (nc.out$var[[i]]$dim[[j]]$name == "z") {
z.indx <- j
found.depth <- TRUE # a 3D variable
}
}
if (found.depth) {
nonspecies.3.d <- c(nonspecies.3.d, d)
nonspecies.3.names <- c(nonspecies.3.names, nc.out$var[[i]]$name)
nonspecies.3.b <- c(nonspecies.3.b, b.indx)
nonspecies.3.z <- c(nonspecies.3.z, z.indx)
} else {
nonspecies.2.d <- c(nonspecies.2.d, d)
nonspecies.2.names <- c(nonspecies.2.names, nc.out$var[[i]]$name)
nonspecies.2.b <- c(nonspecies.2.b, b.indx)
}
}
nonspecies.2.d <- nonspecies.2.d[-1] # remove starting NA
nonspecies.2.names <- nonspecies.2.names[-1] # remove starting NA
nonspecies.2.b <- nonspecies.2.b[-1] # remove starting NA
nonspecies.3.d <- nonspecies.3.d[-1] # remove starting NA
nonspecies.3.names <- nonspecies.3.names[-1] # remove starting NA
nonspecies.3.b <- nonspecies.3.b[-1] # remove starting NA
nonspecies.3.z <- nonspecies.3.z[-1] # remove starting NA
# create matrices identifying box-layers containing water
numlayers.all <- ncvar_get(nc.out, "numlayers")
numlayers.all[is.na(numlayers.all)] <- 0
max.numlayers <- dim(ncvar_get(nc.out, "volume"))[1] # includes sediment
# 2D
not.valid.2 <-rep(TRUE, numboxes)
not.valid.2[numlayers.all > 0] <- FALSE
# 3D
not.valid.3 <- matrix(data = TRUE, nrow = numlevels, ncol = numboxes)
for (i in 1:numboxes) {
if (numlayers.all[i] > 0) { # some water layers
not.valid.3[max.numlayers,i] <- FALSE
for (j in 1:numlayers.all[i]) {
not.valid.3[j,i] <- FALSE
}
}
}
# extract abiotic 2D data
nonspecies.2.n <- length(nonspecies.2.names)
nonspecies.2.data <- array(data = NA, dim = c(nonspecies.2.n, numboxes))
nonspecies.2.att <- rep("", nonspecies.2.n)
for (i in 1:nonspecies.2.n) {
tmp <- ncvar_get(nc.out, nonspecies.2.names[i]) # get all variable data
# add the fill value to the missing values
if (ncatt_get(nc.out, varid = nonspecies.2.names[i], attname = "_FillValue")$hasatt) {
tmp[is.na(tmp)] <- ncatt_get(nc.out, varid = nonspecies.2.names[i],
attname = "_FillValue")$value
} else {
tmp[is.na(tmp)] <- 0
}
tmp[not.valid.2] <- NA
tmp.dim <- length(dim(tmp))
if (nonspecies.2.d[i] == 2) {
# expect to be stored as a 2D array
if (tmp.dim == 2) {
if (nonspecies.2.b[i] == 1) {
nonspecies.2.data[i,] <- tmp[ ,1] # spatial data is in first dimension
} else {
nonspecies.2.data[i,] <- tmp[1, ] # spatial data is in first dimension
}
} else {
nonspecies.2.data[i,] <- tmp # actually a 1D array!
}
} else {
# expect stored as a 3D array
if (nonspecies.2.b == 1) {
nonspecies.2.data[i,] <- tmp[ ,1,1] # spatial data is in first dimension
} else {
nonspecies.2.data[i,] <- tmp[1, ,1] # spatial data is in first dimension
}
}
# get the attribute details and store as HTML text
tmp <- ncatt_get(nc.out, nonspecies.2.names[i]) # attribute names
tmp.n <- length(tmp) # number of attributes
tmp.names <- names(tmp)
txt <- ""
for (k in 1:tmp.n) {
txt.tmp <- paste(tmp.names[k], ": ", as.character(tmp[k]), sep = "")
txt <- paste(txt, txt.tmp, sep = "<br/>")
}
nonspecies.2.att[i] <- txt
}
# extract abiotic 3D data
nonspecies.3.n <- length(nonspecies.3.names)
nonspecies.3.data <- array(data = NA, dim = c(nonspecies.3.n, numboxes, numlevels))
nonspecies.3.att <- rep("", nonspecies.3.n)
for (i in 1:nonspecies.3.n) {
tmp <- ncvar_get(nc.out, nonspecies.3.names[i]) # get all variable data
# add the fill value to the missing values
if (ncatt_get(nc.out, varid = nonspecies.3.names[i], attname = "_FillValue")$hasatt) {
tmp[is.na(tmp)] <- ncatt_get(nc.out, varid = nonspecies.3.names[i],
attname = "_FillValue")$value
} else {
tmp[is.na(tmp)] <- 0
}
tmp[not.valid.3] <- NA
tmp.dim <- length(dim(tmp))
if (nonspecies.3.d[i] == 2) {
# expect to be stored as a 2D array
for (j in 1:numlevels) {
if (nonspecies.3.z[i] == 1) { # depth index is first dimension
nonspecies.3.data[i, ,j] <- tmp[j, ]
} else { # depth index is second dimension
nonspecies.3.data[i, ,j] <- tmp[ ,j]
}
}
} else {
# expect to be stored as a 3D array
if (tmp.dim == 2) {
# only two dimensions!
for (j in 1:numlevels) {
if (nonspecies.3.z[i] == 1) { # depth index is first dimension
nonspecies.3.data[i, ,j] <- tmp[j, ]
} else { # depth index is second dimension
nonspecies.3.data[i, ,j] <- tmp[ ,j]
}
}
} else {
for (j in 1:numlevels) {
if (nonspecies.3.z[i] == 1) { # depth index is first dimension
nonspecies.3.data[i, ,j] <- tmp[j, ,1]
} else { # depth index is second dimension
nonspecies.3.data[i, ,j] <- tmp[ ,j,1]
}
}
}
}
tmp <- ncatt_get(nc.out, nonspecies.3.names[i])
tmp.n <- length(tmp)
tmp.names <- names(tmp)
txt <- ""
for (k in 1:tmp.n) {
txt.tmp <- paste(tmp.names[k], ": ", as.character(tmp[k]), sep = "")
txt <- paste(txt, txt.tmp, sep = "<br/>")
}
nonspecies.3.att[i] <- txt
}
# split species into 2D and 3D groups
species.2.names.full <- NA # full names (2D)
species.3.names.full <- NA # full names (3D)
species.n <- length(species.names)
species.2.N <- rep(0, species.n) # number of subgroups per species group (2D)
species.3.N <- rep(0, species.n) # number of subgroups per species group (2D)
for (i in 1:species.n) { # go through each species
sp <- species.names[i] # extract the species group name
rgexprn <- paste("^", sp, sep="") # what if superset names (see below)?
j <- grep(rgexprn, var.names) # indices associated with species group
tmp.names <- gsub(sp, "", var.names[j]) # remove species name
j2 <- NULL
for (k in 1:length(j)) {
if (substr(tmp.names[k], 1, 1) == "_" |
substr(tmp.names[k], 1, 1) %in% as.character(1:9)) { # valid variable
j2 <- c(j2, j[k])
}
}
for (k in j2) {
tmp <- ncvar_get(nc.out, var.names[k]) # get the associated data set
tmp.dims <- length(dim(tmp)) # dimensions of the associated data
# TO DO: check that these assumptions about dimensions are correct
if (tmp.dims == 1) {
# 2D species distribution with no time replicates
species.2.names.full <- c(species.2.names.full, var.names[k])
species.2.N[i] <- species.2.N[i] + 1
} else if (tmp.dims == 2) {
if (dim(tmp)[1] == numlevels) {
# 3D with no time replicates
species.3.names.full <- c(species.3.names.full, var.names[k])
species.3.N[i] <- species.3.N[i] + 1
} else {
# 2D with time replicates
species.2.names.full <- c(species.2.names.full, var.names[k])
species.2.N[i] <- species.2.N[i] + 1
}
} else if (tmp.dims ==3) {
# 3D species distribtion with time replicates
species.3.names.full <- c(species.3.names.full, var.names[k])
species.3.N[i] <- species.3.N[i] + 1
}
}
}
species.2.names.full <- species.2.names.full[-1] # remove starting NA
species.3.names.full <- species.3.names.full[-1] # remove starting NA
species.2.names <- NA # names (2D)
species.3.names <- NA # names (3D)
for (i in 1:species.n) { # gp through each species
if (species.2.N[i] > 0) {
species.2.names <- c(species.2.names, species.names[i])
}
if (species.3.N[i] > 0) {
species.3.names <- c(species.3.names, species.names[i])
}
}
species.2.names <- species.2.names[-1] # remove starting NA
species.3.names <- species.3.names[-1] # remove starting NA
# 2D species data
species.2.n <- length(species.2.names)
species.2.all.n <- sum(species.2.N)
species.2.groups <- rep(0, species.2.n) # number of subgroups per species group (2D)
i <- 0
for (j in 1:species.n) {
if (species.2.N[j] > 0) {
i <- i + 1
species.2.groups[i] <- species.2.N[j]
}
}
species.2.data <- array(data = NA, dim = c(species.2.all.n, numboxes))
species.2.att <- rep("", species.2.all.n)
i <- 0
for (sp in species.2.names.full) {
i <- i + 1
tmp <- ncvar_get(nc.out, sp) # get the associated data set
# add the fill value to the missing values
if (ncatt_get(nc.out, varid = sp, attname = "_FillValue")$hasatt) {
tmp[is.na(tmp)] <- ncatt_get(nc.out, varid = sp, attname = "_FillValue")$value
} else {
tmp[is.na(tmp)] <- 0
}
tmp[not.valid.2] <- NA # remove data from non-data boxes
tmp.dims <- length(dim(tmp)) # dimensions of the associated data
if (tmp.dims == 1) {
# 2D species distribution with no time replicates
species.2.data[i, ] <- tmp
} else {
# 2D with time replicates
species.2.data[i, ] <- tmp[ ,1]
}
tmp <- ncatt_get(nc.out, sp)
tmp.n <- length(tmp)
tmp.names <- names(tmp)
txt <- ""
for (k in 1:tmp.n) {
txt.tmp <- paste(tmp.names[k], ": ", as.character(tmp[k]), sep = "")
txt <- paste(txt, txt.tmp, sep = "<br/>")
}
species.2.att[i] <- txt # add 2D species attribure info
}
# 3D species data
species.3.n <- length(species.3.names)
species.3.all.n <- sum(species.3.N)
species.3.groups <- rep(0, species.3.n) # number of subgroups per species group (2D)
i <- 0
for (j in 1:species.n) {
if (species.3.N[j] > 0) {
i <- i + 1
species.3.groups[i] <- species.3.N[j]
}
}
species.3.data <- array(data = NA, dim = c(species.3.all.n, numboxes, numlevels))
species.3.att <- rep("", species.3.all.n)
i <- 0
for (sp in species.3.names.full) {
i <- i + 1
tmp <- ncvar_get(nc.out, sp) # get the associated data set
# add the fill value to the missing values
if (ncatt_get(nc.out, varid = sp, attname = "_FillValue")$hasatt) {
tmp[is.na(tmp)] <- ncatt_get(nc.out, varid = sp, attname = "_FillValue")$value
} else {
tmp[is.na(tmp)] <- 0
}
tmp[not.valid.3] <- NA
tmp.dims <- length(dim(tmp)) # dimensions of the associated data
if (tmp.dims == 2) {
# 3D species distribution with no time replicates
for (j in 1:numlevels) {
species.3.data[i, ,j] <- tmp[j, ]
}
} else {
# 3D with time replicates
for (j in 1:numlevels) {
species.3.data[i, ,j] <- tmp[j, ,1]
}
}
tmp <- ncatt_get(nc.out, sp)
tmp.n <- length(tmp)
tmp.names <- names(tmp)
txt <- ""
for (k in 1:tmp.n) {
txt.tmp <- paste(tmp.names[k], ": ", as.character(tmp[k]), sep = "")
txt <- paste(txt, txt.tmp, sep = "<br/>")
}
species.3.att[i] <- txt
}
# get structural and reserve nitrogen weights across cohorts
Species <- NA # species name
Cohort <- NA # cohort index
N.Type <- NA # structural or reserve
N.Value <- NA # mg N
for (sp in species.3.names) {
for (i in 1:30) { # only consider up to 30 cohorts
# look for structural data
txt.find <- paste(sp, as.character(i), "_ResN", sep = "")
j <- grep(txt.find, species.3.names.full)
if (length(j) == 1) {
Species <- c(Species, sp)
Cohort <- c(Cohort, i)
N.Type <- c(N.Type, "Reserve")
tmp <- ncvar_get(nc.out, txt.find) # get the associated data set
tmp.dims <- length(dim(tmp)) # dimensions of the associated data
if (tmp.dims == 3) {
tmp <- tmp[ , ,1] # remove time dimension
}
if (ncatt_get(nc.out, varid = txt.find, attname = "_FillValue")$hasatt) {
tmp[is.na(tmp)] <- ncatt_get(nc.out, varid = txt.find, attname = "_FillValue")$value
} else {
tmp[is.na(tmp)] <- 0
}
tmp[not.valid.3] <- NA
if (sum(!is.na(tmp)) > 0) {
N.val <- max(tmp[ , ], na.rm = TRUE)
} else {
N.val <- 0.0 # No data provided
}
N.Value <- c(N.Value, N.val)
}
# look for reserve nitrogen
txt.find <- paste(sp, as.character(i), "_StructN", sep = "")
j <- grep(txt.find, species.3.names.full)
if (length(j) == 1) {
Species <- c(Species, sp)
Cohort <- c(Cohort, i)
N.Type <- c(N.Type, "Structural")
tmp <- ncvar_get(nc.out, txt.find) # get the associated data set
tmp.dims <- length(dim(tmp)) # dimensions of the associated data
if (tmp.dims == 3) {
tmp <- tmp[ , ,1] # remove time dimension
}
if (ncatt_get(nc.out, varid = txt.find, attname = "_FillValue")$hasatt) {
tmp[is.na(tmp)] <- ncatt_get(nc.out, varid = txt.find, attname = "_FillValue")$value
} else {
tmp[is.na(tmp)] <- 0
}
tmp[not.valid.3] <- NA
if (sum(!is.na(tmp)) > 0) {
N.val <- max(tmp[ , ], na.rm = TRUE)
} else {
N.val <- 0.0 # No data provided
}
N.Value <- c(N.Value, N.val)
}
}
}
df.nitrogen <- data.frame(Species, Cohort, N.Type, N.Value)
df.nitrogen <- df.nitrogen[-1,]
# get numbers across cohorts
Species <- NA # species name
Cohort <- NA # cohort index
Nums.Value <- NA # mg N
for (sp in species.3.names) {
for (i in 1:30) { # only consider up to 30 cohorts
# look for structural data
txt.find <- paste(sp, as.character(i), "_Nums", sep = "")
j <- grep(txt.find, species.3.names.full)
if (length(j) == 1) {
Species <- c(Species, sp)
Cohort <- c(Cohort, i)
N.Type <- c(N.Type, "Reserve")
tmp <- ncvar_get(nc.out, txt.find) # get the associated data set
tmp.dims <- length(dim(tmp)) # dimensions of the associated data
if (tmp.dims == 3) {
tmp <- tmp[ , ,1] # remove time dimension
}
if (ncatt_get(nc.out, varid = txt.find, attname = "_FillValue")$hasatt) {
tmp[is.na(tmp)] <- ncatt_get(nc.out, varid = txt.find, attname = "_FillValue")$value
} else {
tmp[is.na(tmp)] <- 0
}
tmp[not.valid.3] <- NA
if (sum(!is.na(tmp)) > 0) {
N.val <- sum(tmp[ , ], na.rm = TRUE)
} else {
N.val <- 0.0 # No data provided
}
Nums.Value <- c(Nums.Value, N.val)
}
}
}
df.nums <- data.frame(Species, Cohort, Nums.Value)
df.nums <- df.nums[-1,]
nc_close(nc.out)
return(list(
nonspecies.2.names = nonspecies.2.names,
nonspecies.3.names = nonspecies.3.names,
nonspecies.2.data = nonspecies.2.data,
nonspecies.3.data = nonspecies.3.data,
nonspecies.2.att = nonspecies.2.att,
nonspecies.3.att = nonspecies.3.att,
species.2.names = species.2.names,
species.3.names = species.3.names,
species.2.names.full = species.2.names.full,
species.3.names.full = species.3.names.full,
species.2.groups = species.2.groups,
species.3.groups = species.3.groups,
species.2.data = species.2.data,
species.3.data = species.3.data,
species.2.att = species.2.att,
species.3.att = species.3.att,
df.nitrogen = df.nitrogen,
df.nums = df.nums
))
}
# +=====================================================+
# | make.sh.init.object : collect all data to display |
# +=====================================================+
#' @title Function that generates a list object used by sh.init
#'
#' @description
#' Takes data from a box geometry model and a netCDF Atlantis input parameter file and generates a
#' list object that is the parameter to \code{\link[shinyrAtlantis]{sh.init}} (see Examples).
#'
#' @param bgm.file Box geometry model (.bgm) file used by Atlantis that defines box boundaries and depths.
#' @param nc.file NetCDF (.nc) file used by Atlantis to set initial conditions.
#'
#' @return R list object.
#'
#' @examples
#' \dontrun{
#' bgm.file <- "VMPA_setas.bgm"
#' nc.file <- "INIT_VMPA_Jan2015.nc"
#' input.object <- make.sh.init.object(bgm.file, nc.file)
#' sh.init(input.object)
#' }
#' @export
#' @importFrom magrittr %>%
#' @importFrom dplyr group_by
#' @importFrom stringr str_sub
#' @importFrom ncdf4 ncatt_get
#' @importFrom stringr str_length
make.sh.init.object <- function(bgm.file, nc.file) {
cat("-- Extracting map data\n")
map.object <- make.init.map(bgm.file)
cat("-- Extracting cover variables\n")
cover.object <- make.init.cover(map.object$box.data, map.object$map.vertices,
nc.file)
cat("-- Extracting additional variables (this may take a few minutes)\n")
data.object <- make.init.data(nc.file, map.object$numboxes, cover.object$numlevels)
return(list(
numboxes = map.object$numboxes,
numlevels = cover.object$numlevels,
depths = cover.object$depths,
box.info = cover.object$box.info,
df.map = cover.object$df.map,
nonspecies.2.names = data.object$nonspecies.2.names,
nonspecies.3.names = data.object$nonspecies.3.names,
nonspecies.2.data = data.object$nonspecies.2.data,
nonspecies.3.data = data.object$nonspecies.3.data,
nonspecies.2.att = data.object$nonspecies.2.att,
nonspecies.3.att = data.object$nonspecies.3.att,
species.2.names = data.object$species.2.names,
species.3.names = data.object$species.3.names,
species.2.names.full = data.object$species.2.names.full,
species.3.names.full = data.object$species.3.names.full,
species.2.groups = data.object$species.2.groups,
species.3.groups = data.object$species.3.groups,
species.2.data = data.object$species.2.data,
species.3.data = data.object$species.3.data,
species.2.att = data.object$species.2.att,
species.3.att = data.object$species.3.att,
df.nitrogen = data.object$df.nitrogen,
df.nums = data.object$df.nums
))
}
|
47d5603c08525dbfbdd62793d27bdb98dac5485c | 831851a1edec68b8528f642de951dc5650dab78a | /spons/test.r.R | 9f6528561a597944aef5c56f6d88752b9b79418a | [] | no_license | cmason30/yorg-lab | 130e6852069f6bc4da69575b0b62207cbc33b627 | 95dd418dc1cf1e523af232736044c2613becda27 | refs/heads/master | 2021-08-18T04:01:04.380774 | 2021-07-19T01:37:50 | 2021-07-19T01:37:50 | 241,714,243 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 110 | r | test.r.R | print('hello world')
mydata <- read.csv('/Users/colinmason/Desktop/yorglab/testwork/the_mastersheet.csv')
|
1a84c1ed63c1196f178f4e4341bbb7bf35726737 | c81ff5da3b2645e7d8a19671703042c7b4c45455 | /man/iquery.Rd | f966a1435b3831d6fc20efacc328449b3320734d | [] | no_license | apoliakov/SciDBR | f4a193dc661bc5bf98e0dd3419091801cc5ecae2 | 2fb508ee32bb61aee24883f52143ac4fb8864d7c | refs/heads/master | 2021-01-13T06:38:36.283935 | 2017-03-30T22:24:54 | 2017-03-30T22:24:54 | 78,246,920 | 0 | 0 | null | 2017-03-30T22:24:54 | 2017-01-06T23:37:08 | R | UTF-8 | R | false | true | 1,456 | rd | iquery.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utility.R
\name{iquery}
\alias{iquery}
\title{Run a SciDB query, optionally returning the result.}
\usage{
iquery(db, query, return = FALSE, binary = TRUE, ...)
}
\arguments{
\item{db}{a scidb database connection from \code{\link{scidbconnect}}}
\item{query}{a single SciDB query string or scidb array object}
\item{return}{if \code{TRUE}, return the result}
\item{binary}{set to \code{FALSE} to read result from SciDB in text form}
\item{...}{additional options passed to \code{read.table} when \code{binary=FALSE},
or optional result schema when \code{binary=TRUE} (see note below).}
}
\description{
Run a SciDB query, optionally returning the result.
}
\note{
When \code{query} is an arbitrary AFL query string and \code{binary=TRUE},
optionally specify \code{schema} with a valid result array schema to skip
an extra metadata lookup query (see \code{\link{scidb}}).
Setting \code{return=TRUE} wrapes the AFL \code{query} expression with a SciDB
save operator, saving the data on the SciDB server in either binary or text
format depending on the value of the \code{binary} parameter. Please note that
some AFL expressions may not be "saved" using the AFL save operator, including
for instance the AFL create_array operator. Trying to return the result of such
a SciDB expression will result in a run-time error.
}
\seealso{
\code{\link{scidb}} \code{\link{as.R}}
}
|
77ada87312ff94ff1bcbbf1a14617457c1dcd47b | a1f3381c7f961b060ff3d309968f3df9cdba5ce4 | /Csmart/cslmsearch.r | 3d1157a51bd087c0ca3aa42822feb00d3b272da0 | [] | no_license | cdzhang/gpuModelSearch | b75a971dea233796e6754ffd14ac602111c77e5e | 9d06f1bc831617736d73fecdabf4a73c0b471308 | refs/heads/master | 2021-05-15T15:39:08.643518 | 2012-09-11T17:53:10 | 2012-09-11T17:53:10 | 107,405,668 | 1 | 0 | null | 2017-10-18T12:31:41 | 2017-10-18T12:31:41 | null | UTF-8 | R | false | false | 2,333 | r | cslmsearch.r | library(gputools)
gpuCSlmsearch <- function(X, Y, g=NULL, sort="AIC", nsave=1000){
if(is.null(g))
g <- max((ncol(X)-1)^2, nrow(X))
out <- gpuCSlmsearch.fit(X, Y, g, sort, nsave)
return(out)
}
gpuCSlmsearch.fit <- function(X, Y, g, sorttype, nsave){
if(!is.loaded("CSlmsearch"))
dyn.load("~/gpuModelSearch/Csmart/smartlmsearch.so")
nsave <- min(nsave, 2^(ncol(X)-1))
mode(g) <- "integer"
mode(X) <- "single"
mode(Y) <- "single"
mode(nsave) <- "integer"
n <- as.integer(nrow(X))
p <- as.integer(ncol(X))
ycols <- as.integer(ncol(Y))
binids <- matrix(0L,ncol=p-1, nrow=nsave)
if(sorttype=="AIC"){
sort <- 1L
}
else if(sorttype=="BIC"){
sort <- 2L
}
else{
sort <- 3L
}
aics <- rep(1000000000000, nsave)
bics <- aics
lmls <- -aics
models <- integer(nsave)
probs <- rep(0,nsave)
otherprob <- 0
mode(aics) <- "single"
mode(bics) <- "single"
mode(lmls) <- "single"
z <- .C("CSlmsearch", X, n, p, Y, ycols, g, aic=aics, bic=bics, lml=lmls,
probs=probs, otherprob=otherprob, id=models, bin=binids, nsave, sort)
attr(z$aic, "Csingle") <- NULL
attr(z$bin, "Csingle") <- NULL
attr(z$lml, "Csingle") <- NULL
attr(z$bin, "Csingle") <- NULL
out <- data.frame(ID=z$id, BinaryID=modelidchar(z$bin, nsave),
AIC=z$aic,
AICrank=rank(z$aic,ties.method="first"),
BIC=z$bic,
BICrank=rank(z$bic,ties.method="first"),
LogMargLike=z$lml,
LMLrank=rank(-z$lml,ties.method="first"),
PostProb=z$probs,
Variables=modelvars(z$bin, colnames(X)[-1], nsave))
if(sorttype=="AIC"){
out <- out[order(out$AICrank),]
}
else if(sorttype=="BIC"){
out <- out[order(out$BICrank),]
}
else{
out <- out[order(out$LMLrank),]
}
out <- list(Models=out, OtherProb=z$otherprob)
return(out)
}
modelidchar <- function(binid, nsave){
out <- rep("a",nsave)
for(i in 1:nsave){
out[i] <- paste(binid[i,], collapse="")
}
return(out)
}
modelvars <- function(binid, colnam, nsave){
out <- rep("a", nsave)
for(i in 1:nsave){
out[i] <- paste(c("Int", colnam[which(rev(binid[i,])==1)]), collapse=" ")
}
return(out)
}
|
ba1cee4d594ffde19386751c3338128aef0e6037 | 9f35ee6ed29a04bf6c5bafd7ff09a076f626dc01 | /Exp_pattern/2.4.ICP_exp_in_validate_data-tumor-immune.GSE103322.R | a58afe292ddc08ca188d967e89913408dffaa286 | [] | no_license | mywanuo/immune-cp | fb4fbb9ef20120a1d2cad65f524d0a34bc2b5d9d | 9fb0ee0eff8636008eb587b20d5d2c9e3da18dad | refs/heads/master | 2022-10-28T11:44:24.513357 | 2020-06-19T10:00:30 | 2020-06-19T10:00:30 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 24,073 | r | 2.4.ICP_exp_in_validate_data-tumor-immune.GSE103322.R | ######################## verify the ICP expression site in head and neck single cell data set ############
######################## GSE103322_HNSCC
###### To understand the diversity of expression states within head and neck cancers, we profiled 5902 single cells from 18 patients with oral cavity tumors by single cell RNA-seq.
library(magrittr)
basic_path <- "/home/huff/project"
immune_path <- file.path(basic_path,"immune_checkpoint")
gene_list_path <-file.path(immune_path,"checkpoint/20171021_checkpoint")
res_path <- file.path(immune_path,"result_20171025/ICP_exp_patthern-byratio")
res_path <- file.path(immune_path,"result_20171025/ICP_exp_patthern-byMeanUQ")
data_path <- file.path(basic_path,"data/single_cell_RNAseq/GSE103322_HNSCC")
# load image --------------------------------------------------------------
load(file.path(
res_path,"pattern_validation","GSE103322_HNSCC.TI.compare.Rdata")
)
#### gene list ------------------------------------------------------------------------
gene_list <- read.table(file.path(gene_list_path, "all.entrez_id-gene_id"),header=T)
gene_list_exp_site <- readr::read_tsv(file.path(res_path,"pattern_info","ICP_exp_pattern_in_immune_tumor_cell-by-FC-pvalue.tsv")) %>%
dplyr::select(entrez_ID,symbol,Exp_site,`log2FC(I/T).mean`,`log2FC(I/T).mid`) %>%
dplyr::inner_join(gene_list,by="symbol")
# load expresion data -----------------------------------------------------
# sample info
sample_info <- readr::read_tsv(file.path(data_path,"sample_anno.txt"),col_names = F) %>%
t() %>%
as.data.frame() %>%
dplyr::as.tbl() %>%
dplyr::select(V1,V2) %>%
dplyr::rename("sample"="V1","GSM"="V2") %>%
.[-1,]
sample_info.class <- readr::read_tsv(file.path(data_path,"cell_types.class.txt")) %>%
tidyr::gather(-X1,key="sample",value="Class") %>%
tidyr::spread(key="X1",value="Class") %>%
dplyr::mutate(cell_source = ifelse(`classified as cancer cell`==1,"Tumor","Non cancer")) %>%
dplyr::mutate(cell_source = ifelse(`non-cancer cell type` %in% c("T cell","B cell","Macrophage","Mast", "Dendritic","myocyte"),"Immune",cell_source)) %>%
dplyr::mutate(cell_source = ifelse(`non-cancer cell type` %in% c( "-Fibroblast","Endothelial"),"Stromal",cell_source)) %>%
dplyr::select(sample,cell_source)
# exp data
ICP_exp_in_GSE103322 <- readr::read_tsv(file.path(data_path,"GSE103322_HNSCC_all_data.txt"))
ICP_exp_in_GSE103322 %>%
.[-c(1:5),] %>%
dplyr::rename("symbol" ="X1") %>%
dplyr::mutate(symbol = purrr::map(symbol,.f=function(.x){gsub("\\'","",.x)})) %>%
tidyr::unnest() %>%
dplyr::filter(symbol %in% gene_list_exp_site$symbol) -> ICP_exp_in_GSE103322
# compare ICP between tumor and immune cells ------------------------------
### function to compare ICP exp between tumor and immune cells, FC and plot
fn_compare_TI_FC <- function(.data){
# data filter
.data %>%
dplyr::filter(cell_source %in% c("Tumor","Immune")) -> .data
# mean exp
.data %>%
dplyr::filter(cell_source == "Immune") %>%
.$Exp %>%
mean() -> mean_immune_exp
.data %>%
dplyr::filter(cell_source == "Tumor") %>%
.$Exp %>%
mean() -> mean_tumor_exp
# UQ exp
.data %>%
dplyr::filter(cell_source == "Immune") %>%
.$Exp %>%
quantile(0.75) -> UQ_immune_exp
.data %>%
dplyr::filter(cell_source == "Tumor") %>%
.$Exp %>%
quantile(0.75) -> UQ_tumor_exp
# test
broom::tidy(
wilcox.test(Exp ~ cell_source, data = .data, alternative = "two.sided") #Comparing the means of two independent groups:Unpaired Two-Samples Wilcoxon Test (non-parametric)
) %>%
dplyr::mutate(mean_immune_exp=mean_immune_exp,mean_tumor_exp=mean_tumor_exp,
UQ_immune_exp=UQ_immune_exp, UQ_tumor_exp=UQ_tumor_exp) %>%
dplyr::mutate(`log2FC(I/T).mean` = log2((mean_immune_exp+0.01)/(mean_tumor_exp+0.01)),
`log2FC(I/T).UQ` = log2((UQ_immune_exp+0.01)/(UQ_tumor_exp+0.01)))
}
ICP_exp_in_GSE103322 %>%
tidyr::gather(-symbol,key="sample",value="Exp") %>%
dplyr::mutate(Exp = as.numeric(Exp)) %>%
dplyr::inner_join(sample_info.class,by="sample") %>%
tidyr::nest(-symbol) %>%
dplyr::mutate(test = purrr::map(data,fn_compare_TI_FC)) %>%
dplyr::select(-data) %>%
tidyr::unnest() -> ICP_exp_in_GSE103322.wilcox.test.FC.TI
ICP_exp_in_GSE103322.wilcox.test.FC.TI %>%
dplyr::mutate(Exp_site = ifelse(`log2FC(I/T).UQ` >=1, "Immune cell dominate","Immune and tumor cell almost")) %>%
dplyr::mutate(Exp_site = ifelse(`log2FC(I/T).UQ` <=(-1), "Tumor cell dominate",Exp_site)) %>%
dplyr::inner_join(gene_list_exp_site, by="symbol") %>%
dplyr::select(symbol, Exp_site.x, Exp_site.y,`log2FC(I/T).UQ`) %>%
dplyr::mutate(fit_fantom=ifelse(Exp_site.x==Exp_site.y,"yes","no")) %>%
readr::write_tsv(file.path(res_path,"predict_res_validate_by_GSE103322.tsv"))
## define genes exp site by fold change and pvalue ----
# fn_define_exp_site <- function(symbol,fc,pvalue,tumor_ratio,immune_ratio,mean_cell_line, mean_immune_exp){
# print(symbol)
# if(is.na(pvalue)){
# tmp <- "Not_sure"
# } else {
# if(fc>=1 && pvalue<=0.05){
# if(tumor_ratio<0.25){
# if(immune_ratio>=0.5){
# tmp <- "Mainly_exp_on_Immune"
# } else{
# tmp <- "Both_exp_on_Tumor_Immune"
# }
# } else if(tumor_ratio>=0.25){
# tmp <- "Both_exp_on_Tumor_Immune"
# }
# }else if(fc<=(-1) && pvalue<=0.05){
# if(immune_ratio<0.25){
# if(tumor_ratio>=0.5){
# tmp <- "Mainly_exp_on_Tumor"
# } else{
# tmp <- "Both_exp_on_Tumor_Immune"
# }
# } else{
# tmp <- "Both_exp_on_Tumor_Immune"
# }
# }else if(fc>(-1) && fc<1){
# tmp <- "Both_exp_on_Tumor_Immune"
# } else {
# tmp <- "Both_exp_on_Tumor_Immune"
# }
# }
# tmp
# }
# ICP_exp_in_GSE103322.wilcox.test.FC.TI %>%
# dplyr::mutate(Exp_site = purrr::pmap(list(symbol,`log2FC(I/T)`,p.value,`tumor_ratio_diff(U-D)`,`immune_ratio_diff(U-D)`,mean_tumor_exp,mean_immune_exp),fn_define_exp_site)) %>%
# tidyr::unnest() -> ICP_Exp_site_by_DE_Fc_and_ratio_in_GSE103322
# get p value of ICP pattern in validation data -----
fantom_res.expsite <- readr::read_tsv(file.path(res_path,"pattern_info","ICP_exp_pattern_in_immune_tumor_cell-by-FC-pvalue.tsv")) %>%
dplyr::select(symbol,Exp_site) %>%
dplyr::rename("FANTOM_res"="Exp_site")
# ICP_Exp_site_by_DE_Fc_and_ratio_in_GSE103322 %>%
# dplyr::select(symbol,Exp_site) %>%
# dplyr::rename("validation_res"="Exp_site") %>%
# dplyr::inner_join(fantom_res.expsite, by ="symbol") %>%
# dplyr::mutate(true_pos = ifelse(validation_res==FANTOM_res,"Ture","False")) %>%
# .$true_pos %>%
# table() %>%
# as.data.frame() %>%
# readr::write_tsv(file.path(res_path,"pattern_validation","6.3.validation_accuracy.tsv"))
# boxplot ------------------------
strip_color <- data.frame(Exp_site = unique(gene_list_exp_site$Exp_site),
site_cplor = c("green", "orange", "pink"),
rank = c(3,2,1))
my_theme <- theme(
panel.background = element_rect(fill = "white",colour = "black"),
panel.grid.major=element_blank(),
axis.text.y = element_text(size = 10,colour = "black"),
axis.text.x = element_text(size = 10,colour = "black"),
# legend.position = "none",
legend.text = element_text(size = 10),
legend.title = element_text(size = 12),
legend.background = element_blank(),
legend.key = element_rect(fill = "white", colour = "black"),
plot.title = element_text(size = 20),
axis.text = element_text(colour = "black"),
strip.background = element_rect(fill = "white",colour = "black"),
strip.text = element_text(size = 10),
text = element_text(color = "black")
)
library(ggbeeswarm)
ICP_exp_in_GSE103322 %>%
tidyr::gather(-symbol,key="sample",value="Exp") %>%
dplyr::inner_join(sample_info.class,by="sample") %>%
dplyr::filter(cell_source %in% c("Tumor","Immune")) %>%
dplyr::inner_join(gene_list_exp_site,by="symbol") %>%
dplyr::inner_join(ICP_exp_in_GSE103322.wilcox.test.FC.TI,by="symbol") -> ready_for_draw
ready_for_draw %>%
dplyr::select(symbol,Exp_site,`log2FC(I/T).mean.x`) %>%
dplyr::inner_join(strip_color,by="Exp_site") %>%
dplyr::arrange(rank,`log2FC(I/T).mean.x`)-> symbol_rank
ready_for_draw <- within(ready_for_draw,symbol <- factor(symbol,levels = unique(symbol_rank$symbol)))
with(ready_for_draw, levels(symbol))
ready_for_draw %>%
dplyr::select(symbol,Exp_site) %>%
unique() -> color_bac
color_bac$cell_source <- color_bac$Exp <- 1
ready_for_draw %>%
# dplyr::mutate(cell_source = ifelse(cell_source=="Cancer cells", "Tumor","Immune")) %>%
ggplot(aes(x=cell_source, y=Exp)) +
# geom_quasirandom(size=0.1) +
geom_violin(size = 0.25) +
geom_rect(data=color_bac,aes(fill = Exp_site),xmin=-Inf,xmax=Inf,ymin=-Inf,ymax=Inf,alpha=0.1) +
# geom_violin() +
facet_wrap(~symbol,scales = "free_y", ncol = 7) +
scale_fill_manual(
# values = site_cplor,
values = c("yellow","green","pink","blue", "red"),
# values = c("#008B00", "#00EE00", "#CD8500", "#FF4500"),
breaks =c("Immune and tumor cell almost", "Immune cell dominate","Tumor cell dominate")
) +
my_theme +
labs(y="Expression",title="GSE103322, HNSCC") +
theme(
axis.title.x = element_blank(),
legend.position = "bottom"
)
ggsave(file.path(res_path,"pattern_validation","6.2.GSE103322.ICP_exp-T-I_compare.pdf"),device = "pdf",height = 20,width = 16, units = c("cm"))
ggsave(file.path(res_path,"pattern_validation","6.2.GSE103322.ICP_exp-T-I_compare.png"),device = "png",height = 20,width = 16, units = c("cm"))
# correlation between FC got from fantom and melanoma ---------------------------------
fantom_res <- readr::read_tsv(file.path(res_path,"pattern_info","ICP_exp_pattern_in_immune_tumor_cell-by-FC-pvalue.tsv")) %>%
dplyr::select(symbol,mean_cell_line, mean_immune_exp,mid_cell_line,mid_immune_exp,`log2FC(I/T).mean`,`log2FC(I/T).mid`) %>%
dplyr::rename("log2FC(I/T).UQ"="log2FC(I/T).mid","UQ_tumor_exp"="mid_cell_line","UQ_immune_exp"="mid_immune_exp","mean_tumor_exp"="mean_cell_line") %>%
tidyr::gather(-symbol,key="data_type",value="Fantom5")
ICP_exp_in_GSE103322.wilcox.test.FC.TI %>%
dplyr::select(symbol,mean_tumor_exp, mean_immune_exp,UQ_immune_exp, UQ_tumor_exp,`log2FC(I/T).mean`,`log2FC(I/T).UQ`) %>%
tidyr::gather(-symbol,key="data_type",value="GSE103322") %>%
dplyr::inner_join(fantom_res,by=c("symbol","data_type")) -> correlation.ready
# spearman correlation
correlation.ready %>%
# dplyr::filter(symbol != 'BTNL3') %>%
tidyr::nest(-data_type) %>%
dplyr::mutate(cpm_cor = purrr::map(data,.f=function(.x){
broom::tidy(
cor.test(.x$GSE103322,.x$Fantom5,method = "spearman")
)
})) %>%
dplyr::select(-data) %>%
tidyr::unnest() -> cor.res
# plot
library(ggplot2)
library(ggpubr)
correlation.ready %>%
dplyr::inner_join(gene_list_exp_site,by="symbol") %>%
dplyr::group_by(data_type) %>%
dplyr::mutate(y=(max(Fantom5)-min(Fantom5))*0.85+min(Fantom5),x=min(GSE103322)+(max(GSE103322)-min(GSE103322))*0.4) %>%
dplyr::select(data_type,x,y) %>%
unique() %>%
dplyr::inner_join(cor.res,by="data_type") %>%
dplyr::select(data_type,x,y,estimate,p.value) %>%
dplyr::mutate(label = paste("r = ",signif(estimate,2),", p = ",signif(p.value,2),sep="")) -> cor_text
correlation.ready %>%
dplyr::inner_join(gene_list_exp_site,by="symbol") %>%
dplyr::filter(Exp_site!="Not_sure", data_type %in% c("log2FC(I/T).mean","log2FC(I/T).UQ")) %>%
ggplot(aes(x=GSE103322,y=Fantom5)) +
geom_jitter(aes(color=Exp_site)) +
geom_smooth(se = F, method = "lm") +
geom_text(aes(x=x,y=y,label = label),data=cor_text %>% dplyr::filter(data_type == "log2FC(I/T)")) +
facet_wrap(~data_type,scales = "free") +
scale_color_manual(values=c("#CD661D", "#008B00", "#FF69B4", "#1874CD","#CD3333")) +
my_theme +
labs(x="GSE72056",
y="FANTOM5",
title = "FANTOM5 vs. GSE103322") +
theme(
legend.position = "bottom",
legend.key.width = unit(0.2,"inches"),
legend.key.height=unit(0.2,"inches"),
legend.text = element_text(size=8),
legend.title = element_blank()
)
ggsave(file.path(res_path,"pattern_validation","6.1.GSE103322-Fantom5.correlation.pdf"),device = "pdf",height = 4,width = 6)
ggsave(file.path(res_path,"pattern_validation","6.1.GSE103322-Fantom5.correlation.png"),device = "png",height = 4,width = 6)
ICP_exp_in_GSE103322.wilcox.test.FC.TI %>%
dplyr::inner_join(gene_list_exp_site,by="symbol") %>%
dplyr::mutate(log2Immune.UQ=log2(UQ_immune_exp+0.01),log2Tumor.UQ=log2(UQ_tumor_exp+0.01)) %>%
ggplot(aes(x=`log2Immune.UQ`,y=`log2Tumor.UQ`)) +
geom_jitter(aes(color = Exp_site),width = 0.1,height = 0.1) +
geom_abline(intercept = 1, slope = 1,linetype = 2) +
geom_abline(intercept = -1, slope = 1,linetype = 2) +
geom_text(aes(x=x,y=y,label=label),
data=tibble::tibble(x=c(2,2),
y=c(7,-4),
label=c("log2(I/T)<-1","log2(I/T)>1"))) +
# geom_smooth(method = "lm") +
# geom_text_repel(aes(x=`log2FC(I/T).mean`,y=`log2FC(I/T).mid`,label=symbol)) +
# geom_label(x=4,y=10,aes(label=label),data = cor_label) +
# geom_hline(yintercept = c(-2,2),linetype = 2) +
# geom_vline(xintercept = c(-1,1),linetype = 2) +
labs(x=TeX("log_2 (UQ(Immune)+0.01)"),
y=TeX("log_2 (UQ(Tumor)+0.01)"),
title = "Classification of ICPs' expression pattern, GSE103322") +
scale_color_manual(values = c("#CD950C", "#66CD00", "#EE2C2C"),
name = "ICPs expression pattern") +
my_theme +
theme(
plot.title = element_text(size=15)
)
ggsave(file.path(res_path,"classify_ICP_exp_pattern_onlybyUQ.GSE103322.pdf"),device = "pdf",height = 4, width = 8)
ggsave(file.path(res_path,"classify_ICP_exp_pattern_onlybyUQ.GSE103322.png"),device = "png",height = 4, width = 8)
# save image --------------------------------------------------------------
save.image(file.path(
res_path,"pattern_validation","GSE103322_HNSCC.TI.compare.Rdata")
)
#>>>>>>>>>>>>>>>>>>>>>>> HAVE NOT RUN
# tSNE: use ICP exp to distingrush tumor and immune cells -----------------
library("Rtsne")
# filter repeat samples
ICP_exp_in_GSE103322 %>%
tidyr::gather(-symbol,key="sample",value="exp") %>%
dplyr::group_by(sample) %>%
dplyr::mutate(mean = mean(exp)) %>%
dplyr::select(sample,mean) %>%
dplyr::filter(mean == 0) %>%
unique() -> duplicated_samples # with same ICP exp
# data prepare
ICP_exp_in_GSE103322 %>%
tidyr::gather(-symbol,key="sample",value="exp") %>%
dplyr::filter(!sample %in% duplicated_samples$sample) %>%
tidyr::spread(key="symbol",value="exp") %>%
dplyr::inner_join(sample_info.class,by="sample") %>%
dplyr::filter(cell_source %in% c("Cancer cells","Immune cells")) -> data_for_tSNE
# data_for_tSNE %>%
# dplyr::select(-sample,-sample_type,-cell_type,-cell_source,-tumor) %>%
# as.data.frame() %>%
# as.matrix() -> data_for_tSNE.matx
# normalization
data_for_tSNE.matx.normalize <- normalize_input(data_for_tSNE[,2:66] %>% as.matrix())
# colMeans(data_for_tSNE.matx.normalize)
# range(data_for_tSNE.matx.normalize)
# do tSNE
tsne_res <- Rtsne(data_for_tSNE.matx.normalize,dims=2,pca=FALSE,theta=0.0)
# Show the objects in the 2D tsne representation
plot(tsne_res$Y,col=factor(data_for_tSNE$cell_source), asp=1)
tsne_res$Y %>%
as.data.frame() %>%
dplyr::as.tbl() %>%
# dplyr::rename("tSNE 1"="V1","tSNE 2"="V2") %>%
dplyr::mutate(sample = data_for_tSNE$sample,
cell_source = data_for_tSNE$cell_source,
cell_type = data_for_tSNE$cell_type,
tumor = data_for_tSNE$tumor) %>%
ggplot(aes(x=V1,y= V2)) +
geom_jitter(aes(color=cell_type),size=0.5) +
xlab("tSNE 1") +
ylab("tSNE 2") +
ggpubr::color_palette("jco") +
my_theme
ggsave(filename = file.path(res_path,"pattern_validation","5.3.GSE72056.ICP_exp-T-I_tSNE.png"),device = "png",width = 6,height = 4)
ggsave(filename = file.path(res_path,"pattern_validation","5.3.GSE72056.ICP_exp-T-I_tSNE.pdf"),device = "pdf",width = 6,height = 4)
# why some tumor cells overlap with immune cell?
tsne_res$Y %>%
as.data.frame() %>%
dplyr::as.tbl() %>%
# dplyr::rename("tSNE 1"="V1","tSNE 2"="V2") %>%
dplyr::mutate(sample = data_for_tSNE$sample,
cell_source = data_for_tSNE$cell_source,
cell_type = data_for_tSNE$cell_type,
tumor = paste("Mel",data_for_tSNE$tumor)) %>%
dplyr::mutate(cell_type = ifelse(cell_type!="Tumor cell","Immune cell",cell_type)) %>%
dplyr::mutate(tumor = ifelse(cell_type== "Immune cell","Immune cell",tumor)) %>%
ggplot(aes(x=V1,y= V2)) +
geom_jitter(aes(color=tumor),size=0.2) +
xlab("tSNE 1") +
ylab("tSNE 2") +
ggpubr::color_palette(palette = c("grey77", "#000000", "#0000FF", "#8B2323", "#CDAA7D", "#8EE5EE", "#76EE00","#D2691E", "#8B008B", "#6E8B3D","#FAEBD7", "#006400", "#FFD700", "#EE00EE", "#FFB6C1", "#FFBBFF", "#00F5FF", "#76EEC6","#DB7093", "#FF3030"),name="Sample type") +
my_theme
ggsave(filename = file.path(res_path,"pattern_validation","5.3.GSE72056.ICP_exp-T-I_tSNE-patients-colored.png"),device = "png",width = 10,height = 6)
ggsave(filename = file.path(res_path,"pattern_validation","5.3.GSE72056.ICP_exp-T-I_tSNE-patients-colored.pdf"),device = "pdf",width = 10,height = 6)
# PCA analysis ------------------------------------------------------------
library("FactoMineR")
library("factoextra")
ICP_exp_in_GSE72056 %>%
tidyr::gather(-symbol,key="sample",value="exp") %>%
dplyr::arrange(symbol) %>%
dplyr::filter(!sample %in% duplicated_samples$sample) %>%
tidyr::spread(key="symbol",value="exp") %>%
dplyr::inner_join(sample_info.class,by="sample") %>%
dplyr::filter(cell_source %in% c("Tumor","Immune")) -> data_for_PCA
res.pca <- PCA(data_for_PCA[,2:66] %>% as.matrix(), scale.unit = T,graph = FALSE)
# plot -- variables
fviz_pca_var(res.pca,
# geom.var = "text",
labelsize = 2,
col.var = ICP_exp_in_GSE72056 %>%
dplyr::select(symbol) %>%
dplyr::inner_join(gene_list_exp_site,by="symbol") %>%
dplyr::inner_join(data.frame(Exp_site = c("Only_exp_on_Immune","Mainly_exp_on_Immune","Both_exp_on_Tumor_Immune","Mainly_exp_on_Tumor","Only_exp_on_Tumor","Not_sure"),
rank = c(5,4,3,2,1,0)), by = "Exp_site") %>%
# dplyr::filter(Exp_site!="Not_sure") %>%
dplyr::arrange(symbol) %>%
.$Exp_site,
repel = TRUE,
legend.title = list(color = "Exp. site")
) +
my_theme +
ggpubr::color_palette("npg") # Variable colors
ggsave(filename = file.path(res_path,"pattern_validation","5.4.1.GSE72056.ICP_exp-T-I_PCA-variables.png"),device = "png",width = 6,height = 4)
ggsave(filename = file.path(res_path,"pattern_validation","5.4.1.GSE72056.ICP_exp-T-I_PCA-variables.pdf"),device = "pdf",width = 6,height = 4)
# plot -- Individuas
fviz_pca_ind(res.pca,
geom.ind = "point",
pointsize = 2.5,
pointshape = 21,
fill.ind = data_for_PCA$cell_type,
legend.title = list(fill = "Cell type")
)+
my_theme +
ggpubr::fill_palette("jco")
ggsave(filename = file.path(res_path,"pattern_validation","5.4.2.GSE72056.ICP_exp-T-I_PCA-Individuals.png"),device = "png",width = 6,height = 4)
ggsave(filename = file.path(res_path,"pattern_validation","5.4.2.GSE72056.ICP_exp-T-I_PCA-Individuals.pdf"),device = "pdf",width = 6,height = 4)
# vertical comparison of ICPs in tumor and immune -------------------------
fn_plot_ICP_exp_in_dataset <- function(.data,ylab,facet,title,filename){
.data %>%
dplyr::group_by(symbol) %>%
dplyr::mutate(mid = quantile(Exp,0.5)) %>%
dplyr::arrange(mid) %>%
dplyr::select(symbol,mid) %>%
unique() %>%
dplyr::inner_join(gene_list_exp_site,by="symbol") %>%
dplyr::inner_join(strip_color,by="Exp_site") -> .symbol_rank
.data %>%
dplyr::mutate(Exp = log2(Exp+1)) %>%
ggplot(aes(x=symbol,y=Exp)) +
geom_boxplot(outlier.colour = "grey",outlier.size = 0.5) +
facet_wrap(as.formula(facet)) +
rotate() +
ggtitle(title) +
ylab(ylab) +
xlab("Symbol") +
scale_x_discrete(limits = .symbol_rank$symbol) +
my_theme +
theme(
axis.text.y = element_text(size = 10,colour = .symbol_rank$site_cplor),
plot.title = element_text(size=12)
)
ggsave(file.path(res_path,"pattern_validation",paste(filename,"pdf",sep=".")),device = "pdf",width = 4, height = 10)
ggsave(file.path(res_path,"pattern_validation",paste(filename,"png",sep=".")),device = "png", width = 4, height = 10)
}
ICP_exp_in_GSE72056 %>%
tidyr::gather(-symbol,key="sample",value="Exp") %>%
dplyr::arrange(symbol) %>%
dplyr::filter(!sample %in% duplicated_samples$sample) %>%
dplyr::inner_join(sample_info.class,by="sample") %>%
dplyr::filter(cell_source %in% c("Tumor","Immune")) %>%
fn_plot_ICP_exp_in_dataset(ylab="Expression",facet="~cell_source",title="GSE72056, ICP expression",filename="5.5.GSE72056.ICP_exp.T-I")
# correlation between ICP exp in tumor and immune -------------------------
ICP_exp_in_GSE72056 %>%
tidyr::gather(-symbol,key="sample",value="Exp") %>%
dplyr::arrange(symbol) %>%
dplyr::filter(!sample %in% duplicated_samples$sample) %>%
dplyr::inner_join(sample_info.class,by="sample") %>%
dplyr::filter(cell_source %in% c("Tumor","Immune")) %>%
dplyr::group_by(symbol,cell_source) %>%
dplyr::mutate(Mean_exp = mean(Exp)) %>%
dplyr::select(symbol,cell_source,Mean_exp) %>%
unique() %>%
dplyr::ungroup() %>%
tidyr::spread(key="cell_source",value="Mean_exp") -> ready_for_cor
ready_for_cor %>%
readr::write_tsv(file.path(res_path,"pattern_validation","ICP_mean_exp_in_GSE72056.tsv"))
broom::tidy(
cor.test(ready_for_cor$Immune,ready_for_cor$Tumor,method = "spearman")
) %>%
dplyr::mutate(y=(max(ready_for_cor$Immune)-min(ready_for_cor$Immune))*0.85+min(ready_for_cor$Immune),x=min(ready_for_cor$Tumor)+(max(ready_for_cor$Tumor)-min(ready_for_cor$Tumor))*0.4) %>%
dplyr::mutate(label = paste("r = ",signif(estimate,2),", p = ",signif(p.value,2))) -> cor_anno
ready_for_cor %>%
dplyr::inner_join(gene_list_exp_site,by="symbol") %>%
tidyr::nest(-Exp_site) %>%
dplyr::mutate(spm = purrr::map(data,.f=function(.x){
if(nrow(.x)>5){
broom::tidy(
cor.test(.x$Immune,.x$Tumor,method = "spearman")) %>%
dplyr::mutate(y=(max(.x$Immune)-min(.x$Immune))*0.85+min(.x$Immune),
x=2.72) %>%
dplyr::mutate(label = paste("r = ",signif(estimate,2),", p = ",signif(p.value,2)))
}else{
tibble::tibble()
}
})) %>%
dplyr::select(-data) %>%
tidyr::unnest() -> cor_anno.specific
broom::tidy(
cor.test(ready_for_cor$Immune,ready_for_cor$Tumor,method = "spearman")
) %>%
dplyr::mutate(y=(max(ready_for_cor$Immune)-min(ready_for_cor$Immune))*0.75+min(ready_for_cor$Immune),x=min(ready_for_cor$Tumor)+(max(ready_for_cor$Tumor)-min(ready_for_cor$Tumor))*0.4) %>%
dplyr::mutate(label = paste("r = ",signif(estimate,2),", p = ",signif(p.value,2))) -> cor_anno
ready_for_cor %>%
dplyr::inner_join(gene_list_exp_site,by="symbol") %>%
dplyr::filter(Exp_site!="Not_sure") %>%
ggplot(aes(x=Tumor,y=Immune)) +
geom_jitter(aes(color=Exp_site),size=0.5) +
geom_smooth(se= F, method = "lm", aes(color=Exp_site,group=Exp_site)) +
geom_smooth(se = F, method = "lm",color= "black") +
geom_text(aes(x=x,y=y,label = label),data=cor_anno,color="black") +
geom_text(aes(x=x,y=y,label = label,color=Exp_site),data = cor_anno.specific) +
scale_color_manual(values=c("#CD661D", "#008B00", "#FF69B4", "#1874CD","#CD3333")) +
my_theme +
xlab("Mean exppression in tumor cells") +
ylab("Mean exppression in immune cells")
ggsave(file.path(res_path,"pattern_validation","5.6.GSE72056-T-I-meanExp.correlation.pdf"),device = "pdf",height = 6,width = 8)
ggsave(file.path(res_path,"pattern_validation","5.6.GSE72056-T-I-meanExp.correlation.png"),device = "png",height = 6,width = 8)
|
862e8360c4445674bae9451ae5783434f0ca7aaf | cb08a766e756654412e156e036e35472979dd82f | /_site/code/feb27-inclass.R | 9afb88ffceef11a258b00f767f0a88a0ac97d976 | [] | no_license | Stat480-at-ISU/Stat480-at-ISU.github.io | 4572dcda1e850d921aa0c0fa49df303f7059fba7 | 8d857269be947ef7ef0752eee13aa6e7fa591872 | refs/heads/master | 2023-04-13T05:59:11.157492 | 2020-04-30T13:14:21 | 2020-04-30T13:14:21 | 234,110,711 | 0 | 1 | null | 2023-04-12T00:10:01 | 2020-01-15T15:28:00 | HTML | UTF-8 | R | false | false | 3,247 | r | feb27-inclass.R | # class 2/27/20
# slides: more dplyr ---------------------
library(dplyr)
library(nycflights13)
data(flights, package = "nycflights13")
str(flights)
flights %>% glimpse()
?flights
## CREATE SUMMARIES
flights %>%
group_by(dest) %>%
summarise(
dist = mean(distance, na.rm = TRUE),
delay = mean(arr_delay, na.rm = TRUE)
)
## GROUPING HELPER FUNCTIONS
flights %>% group_by(dest) %>% summarise(count = n())
flights %>% group_by(dest) %>% tally()
flights %>% count(dest)
flights %>% count(tailnum)
flights %>% count(tailnum, wt = distance)
flights %>%
group_by(tailnum) %>%
summarise(total_dist = sum(distance))
## GROUPED MUTATES & FILTERS
flights %>%
group_by(dest) %>%
filter(n() >365)
flights %>%
group_by(dest) %>%
filter(n() < 365) %>%
select(distance)
flights %>%
group_by(dest) %>%
filter(n() < 365) %>%
ungroup() %>%
select(distance)
## YOUR TURN
# Calculate the average delay per date.
flights %>%
group_by(year, month, day) %>%
summarise(avg_delay = mean(arr_delay, na.rm = TRUE),
n_flights = n()) %>%
arrange(desc(avg_delay))
# What time of day should you fly if you want to
# avoid delays as much as possible?
flights %>%
group_by(hour) %>%
summarise(avg_delay = mean(arr_delay, na.rm = TRUE)) %>%
arrange(avg_delay)
flights %>%
filter(hour == 1)
# Explore the relationship between the distance and average delay
# for each destination.
# Also calculate the number of flights flown to each destination.
flights %>%
group_by(dest) %>%
summarise(n_flights = n(),
avg_delay = mean(arr_delay, na.rm = TRUE),
avg_distance = mean(distance, na.rm = TRUE)) %>%
arrange(desc(avg_distance)) %>%
ggplot() +
geom_hline(aes(yintercept = 0), alpha = 0.7, color = "blue") +
geom_point(aes(x = avg_distance, y = avg_delay,
size = n_flights), alpha = .4)
## YOUR TURN
# Which carrier has the worst delays?
flights %>%
group_by(carrier) %>%
summarise(max_delay = max(arr_delay, na.rm = TRUE)) %>%
arrange(desc(max_delay))
flights %>%
group_by(carrier) %>%
summarise(avg_delay = mean(arr_delay, na.rm = TRUE)) %>%
arrange(desc(avg_delay))
# Rank airlines by the number of destinations that they
# fly to, considering only those airports that are flown
# to by two or more airlines.
flights %>%
group_by(dest) %>%
mutate(n_carriers = n_distinct(carrier)) %>%
filter(n_carriers > 1) %>%
group_by(carrier) %>%
summarise(n_dest = n_distinct(dest)) %>%
arrange(desc(n_dest))
# Look at the number of cancelled flights per day.
# Is there a pattern? Is the proportion of cancelled
# flights related to the average delay?
flights %>%
mutate(cancelled = (is.na(arr_delay) | is.na(dep_delay))) %>%
group_by(year, month, day) %>%
summarise(n_cancelled = sum(cancelled),
n_flights = n()) %>%
ggplot() + geom_point(aes(x = n_flights, y = n_cancelled))
flights %>%
mutate(cancelled = (is.na(arr_delay) | is.na(dep_delay))) %>%
group_by(year, month, day) %>%
summarise(prop_cancelled = mean(cancelled),
avg_delay = mean(arr_delay, na.rm = TRUE)) %>%
ggplot() + geom_point(aes(x = avg_delay, y = prop_cancelled))
|
ff0b421dbedab52f3cc8dc51a159983b8060127a | c78d6c58e47ff01700e082dde12bd08523352cc2 | /plot2.R | 1fec5684df7fb63206558883c751cff3987d6efd | [] | no_license | kaniapiatkowska/ExData_Plotting1 | e5786f12120af6a2bb7d888e0813805e1a7236bf | 4d705024285651831e7f605d01f17580496a5168 | refs/heads/master | 2020-12-28T19:34:02.917674 | 2019-11-04T11:22:47 | 2019-11-04T11:22:47 | 45,624,088 | 0 | 0 | null | 2015-11-05T16:30:07 | 2015-11-05T16:30:07 | null | UTF-8 | R | false | false | 856 | r | plot2.R | #Loading the data
setwd("C:/Dane/MOJE/Zuzka/Coursera/DataScientistToolbox/Rscripts/Course4")
file<-"https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(file, destfile = "./exdata_data_household_power_consumption.zip")
unzip("./exdata_data_household_power_consumption.zip", overwrite = TRUE)
data<-subset(read.csv("./household_power_consumption.txt", header = TRUE,
sep = ";", na.strings = "?"), Date=="1/2/2007" | Date=="2/2/2007")
data$Date<-strptime(paste(data$Date, data$Time), "%e/%m/%Y %H:%M:%S")
library(dplyr)
data<-select(data, -Time)
#plot2
png(filename = "plot2.png", width = 480, height = 480, units = "px")
with(data, plot(Date, Global_active_power, type = "l", xlab = NA,
ylab = "Global Active Power (kilowatts)"))
dev.off() |
a2acc10862f2e5415eef6e42a5a990075841dc0c | 9aafde089eb3d8bba05aec912e61fbd9fb84bd49 | /codeml_files/newick_trees_processed/8677_3/rinput.R | b0806704b5aea30a4148df10c54d2f730bd2bec7 | [] | no_license | DaniBoo/cyanobacteria_project | 6a816bb0ccf285842b61bfd3612c176f5877a1fb | be08ff723284b0c38f9c758d3e250c664bbfbf3b | refs/heads/master | 2021-01-25T05:28:00.686474 | 2013-03-23T15:09:39 | 2013-03-23T15:09:39 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 135 | r | rinput.R | library(ape)
testtree <- read.tree("8677_3.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="8677_3_unrooted.txt") |
2d04999bf39c9fadc8620e2e5774b5ad944a3fbe | 2b7c639c59425cc3f1a7b0a0c11b3e390da4f65c | /ui.R | aa1e2511c42e6a160a5b98fafc1eede1d92908a3 | [
"MIT"
] | permissive | tosaddler/pubchem-heatmap | f0760b4ab5f9bcfb1f8d30b68feea5c9d2bead6f | ab637e8c764ad21c596fad30a57adce43869fc58 | refs/heads/master | 2021-06-17T16:27:35.958861 | 2020-04-22T21:10:40 | 2020-04-22T21:10:40 | 99,131,010 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,302 | r | ui.R | library(shiny)
library(heatmaply)
shinyUI(
navbarPage("pubchem-heatmap",
tabPanel("Heatmap",
fluidPage(
tags$head(tags$script('
var dimension = [0, 0];
$(document).on("shiny:connected", function(e) {
dimension[0] = window.innerWidth;
dimension[1] = window.innerHeight;
Shiny.onInputChange("dimension", dimension);
});
$(window).resize(function(e) {
dimension[0] = window.innerWidth;
dimension[1] = window.innerHeight;
Shiny.onInputChange("dimension", dimension);
});
')),
column(3,
tabsetPanel(
tabPanel("Input and Categories",
HTML("<br>"),
textAreaInput( inputId = "chemid",
label = "Input PubChem CIDs",
value = "6618\n1983\n120228"),
actionButton( inputId = "update",
label = "Update"),
HTML("<br><br>"),
checkboxInput( inputId = "pharm_bio",
label = "Pharmacology and Biochemistry",
value = TRUE),
conditionalPanel( condition = "input.pharm_bio == true",
checkboxGroupInput(inputId = "pharm_bio_sections",
label = NULL,
choices = c("Pharmacology",
"Absorption, Distribution and Excretion",
"Metabolism/Metabolites",
"Biological Half-Life",
"Mechanism of Action"))
),
checkboxInput( inputId = "use_manufacturing",
label = "Use and Manufacturing",
value = TRUE),
conditionalPanel( condition = "input.use_manufacturing == true",
checkboxGroupInput(inputId = "use_man_sections",
label = NULL,
choices = c("Methods of Manufacturing",
"Consumption"))
),
checkboxInput( inputId = "identification",
label = "Identification",
value = TRUE),
conditionalPanel( condition = "input.identification == true",
checkboxGroupInput(inputId = "identification_sections",
label = NULL,
choices = c("Analytic Laboratory Methods",
"Clinical Laboratory Methods"))
),
checkboxInput( inputId = "safety",
label = "Safety and Hazards",
value = TRUE),
conditionalPanel( condition = "input.safety == true",
checkboxGroupInput(inputId = "safety_sections",
label = NULL,
choices = c("Hazards Identification",
"Safety and Hazard Properties",
"First Aid Measures",
"Accidental Release Measures",
"Handling and Storage",
"Exposure Control and Personal Protection",
"Stability and Reactivity",
"Regulatory Information"))
),
checkboxInput( inputId = "toxicity",
label = "Toxicity",
value = TRUE),
conditionalPanel( condition = "input.toxicity == true",
checkboxGroupInput(inputId = "toxicity_sections",
label = NULL,
choices = c("Toxicological Information",
"Ecological Information"))
),
checkboxInput( inputId = "literature",
label = "Literature",
value = TRUE),
conditionalPanel( condition = "input.literature == true",
checkboxGroupInput(inputId = "literature_sections",
label = NULL,
choices = c("PubMed Citations",
"Metabolite References",
"Springer Nature References"))
),
checkboxInput( inputId = "bio_path",
label = "Biomolecular Interactions and Pathways",
value = TRUE),
conditionalPanel( condition = "input.bio_path == true",
checkboxGroupInput( inputId = "bio_path_sections",
label = NULL,
choices = c("Biosystems and Pathways"))
)
),
tabPanel("Options",
HTML("<br>"),
radioButtons( inputId = "normalization",
label = "Normalization:",
choices = c("Only this data",
"Database averages")),
checkboxInput( inputId = "clustering",
label = "Cluster compounds",
value = FALSE),
checkboxInput( inputId = "bypass",
label = "Bypass database",
value = FALSE),
checkboxInput( inputId = "chem.names",
label = "Use compound names",
value = TRUE),
numericInput( inputId = "chem.names.length",
label = "Compound name length",
value = 30,
min = 1,
max = 200),
selectInput(inputId = "chem.names.side",
label = "Compound name side",
choices = list("Left" = "left", "Right" = "right"),
selected = "left"),
numericInput( inputId = "plot_width",
label = 'Plot Width',
value = 500,
min = 0,
max = Inf,
step = 1),
numericInput( inputId = 'plot_height',
label = 'Plot Height',
value = 500,
min = 0,
step = 1),
downloadButton("download", "Download CSV Table")
)
)
),
column(9,
plotlyOutput("heatmap", height = "auto")
# verbatimTextOutput("click"),
# verbatimTextOutput("clickValue"),
# verbatimTextOutput("zoom")
)
)
),
tabPanel("Wordcloud",
fluidPage(
sidebarLayout(
sidebarPanel(
),
mainPanel(
)
)
)
)
)
)
|
6df9c3bbb599d5b9e440a67bf50080e95d76e6e0 | 9354e6fdd0b3476ca2f99139de5b76fd10844bd3 | /create_grids.R | cd922b61dc788a79057fc81e38fb50638b7674a7 | [] | no_license | nwdaudt/PhD_seabirdsAustralasia | 163f1f5a65f48351d491856d6bacbb6f9aeac476 | d976d379032492644b5f3b27a340747a58e32867 | refs/heads/main | 2023-08-24T16:16:30.892854 | 2021-09-08T01:09:16 | 2021-09-08T01:09:16 | 339,901,391 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,570 | r | create_grids.R | library(sf)
library(tidyverse)
library(mapview)
## Create the four corners from the grid area
a <- as.data.frame(matrix(c(109, 109, 162, 162, -8.5, -49, -49, -8.5), ncol = 2))
# view(a) ## check it
a2 <- a %>%
sf::st_as_sf(coords = c("V1","V2"), crs = 4326)
# mapview::mapview(a2) ## check it spatially
## Squared grid
grid <-
sf::st_make_grid(a2, cellsize = c(1, 1), crs = 4326)
# mapview::mapview(grid)
## Hexagonal grid
grid_hex <-
sf::st_make_grid(a2, cellsize = c(1, 1), crs = 4326, square = FALSE)
mapview::mapview(grid_hex)
### ****************************************************************************
### ***************************** For trials **********************************
### ****************************************************************************
a <- as.data.frame(matrix(c(-180, -180, 180, 180, 90, -90, 90, -90), ncol = 2))
a2 <- a %>%
sf::st_as_sf(coords = c("V1","V2"), crs = 4326)
mapview::mapview(a2)
## Hexagonal grid -- 10
grid_hex10 <-
sf::st_make_grid(a2, cellsize = c(10, 10), crs = 4326, square = FALSE)
mapview::mapview(grid_hex10)
## Hexagonal grid -- 15
grid_hex15 <-
sf::st_make_grid(a2, cellsize = c(15, 15), crs = 4326, square = FALSE)
mapview::mapview(grid_hex15)
### Overlay in a ggplot Fig.
world <- ggplot2::map_data("world")
map <-
ggplot2::ggplot() +
ggplot2::geom_sf(data = grid_hex15) +
ggplot2::geom_map(data = world, map = world,
aes(x = long, y = lat, map_id = region),
color = "black", fill = "black", size = 0.1) +
theme_bw()
|
af21412c7faf59e39a02f78b71517152ea030b64 | ee7a448d0cdfe0478a82ba713e1cc6a5c9e0db4a | /01_materials/images/norms/R/03_analysis.R | e5c905ddafc73091e82018bd50fa0a0dcb3a1f8b | [
"CC-BY-4.0"
] | permissive | gpwilliams/levenik | ce46b00534b564280ff4f1023e69c2082d842344 | 2eeeb6a6a96849e408f1152ac3391b6caaffedf5 | refs/heads/master | 2021-08-06T08:39:03.528594 | 2021-07-28T15:46:01 | 2021-07-28T15:46:01 | 165,866,366 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,415 | r | 03_analysis.R | # load data ----
f_data <- read_csv(f_path)
w_data <- read_csv(w_path)
rp_data <- readRDS(rp_subset_output_path)
# subset to colour items only
rp_data[2:5] <- rp_data[2:5] %>%
map(~ .x %>% select(item_number, concept, contains("color")))
# create summaries ----
# our compression ----
w_data %>% summarise(GIF_mean = mean(GIF), GIF_sd = sd(GIF))
# forsythe et al ----
f_data %>%
summarise_at(
vars(familiarity:GIF),
list(mean = mean, sd = sd),
na.rm = TRUE
)
# rossion and pourtois ----
# colour diagnosticity
colour_ratings <- rp_data[[1]] %>%
summarise(mean = mean(rating), sd = sd(rating)) %>%
mutate(rating = "colour_diagnosticity")
# complexity, familiarity, imagery
cfi_ratings <- rp_data[2:4] %>% map(
~ .x %>%
summarise(
mean = mean(mean_color), sd = sd(mean_color)
)
) %>%
bind_rows(.id = "rating")
# naming
name_ratings <- rp_data[[5]] %>%
summarise_at(
vars(contains("color")),
list(mean = mean, sd = sd)
) %>%
gather(key = "rating") %>%
mutate(
rating_type = case_when(
str_detect(rating, "mean") ~ "mean",
str_detect(rating, "sd") ~ "sd")
) %>%
mutate(rating = str_replace_all(rating, c("_mean" = "", "_sd" = ""))) %>%
spread(rating_type, value)
# join together into one summary table
all_ratings <- bind_rows(
name_ratings,
cfi_ratings,
colour_ratings
)
write_csv(all_ratings, all_ratings_output_path) |
ce57ed45c1bcdeaad3eea6d2310c65aa6c76a03f | 7da38e61a45196c99c83f2a495197dd38df599d5 | /app.R | 1cfed27bf962ee389587f2859d3b98e781f1745c | [] | no_license | ax42/k8s-axDemo | b378f34d6408fe9a0ea8b694dd5c797dd2b987e7 | 154651effa84ef5f0d065fdecac0c7450779f337 | refs/heads/master | 2020-06-20T22:33:49.993063 | 2019-08-06T19:53:47 | 2019-08-06T19:53:47 | 197,273,332 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,240 | r | app.R | library(shiny)
library(dplyr)
library(ggplot2)
library(httr)
library(base64enc)
# Define UI for application that draws a histogram
ui <- fluidPage(
titlePanel("k8s demo app"),
fluidRow(
column(2,
tabsetPanel(type = "tabs",
tabPanel("Parameters",
sliderInput("numPoints", "Number of points", 100, 6000, 1000, 50,
animate = animationOptions(100, loop = TRUE)),
checkboxInput("useAPIRandom", label = "Use API for random numbers", value = FALSE),
sliderInput("numCats", "Number of categories", 1, 6, 2, 1),
sliderInput("xMean", "x-axis mean", -10, 10, 0, .1),
sliderInput("xStdDev", "x-axis std dev", 0, 10, 1, .1),
sliderInput("yMean", "y-axis mean", -10, 10, 0, .1),
sliderInput("yStdDev", "y-axis std dev", 0, 10, 2, .1)
),
tabPanel("API",
# checkboxInput("useAPI", label = "Use API", value = FALSE),
textInput("apiURL", "API endpoint", "http://localhost:8001/"),
# textInput("apiURL", "API endpoint", "http://walker:8001/"),
textOutput("apiNodename")
# textOutput("apiSum")
)
)
),
column(5,
h3("Local"),
tableOutput("summaryTable"),
plotOutput("dotPlot")
),
column(5,
h3("API"),
checkboxInput("useAPITable", label = "Use API", value = FALSE),
tableOutput("summaryTableAPI"),
checkboxInput("useAPIPlot", label = "Use API", value = FALSE),
uiOutput("dotPlotAPI")
)
)
)
server <- function(input, output) {
output$summaryTable <- renderTable({
srcData() %>%
group_by(cat) %>%
summarise(n = n(),
xMean = mean(x), #StdDev = sd(x),
yMean = mean(y), #yStdDev = sd(y)
)
})
apiPOST <- function(url, body) {
resp <- POST(url, body = body, content_type_json(), encode = "json")
# print(resp)
resp
}
apiSum <- function(url, body) {
url$path <- "sum"
as.numeric(content(apiPOST(build_url(url), body)))
}
apiCount <- function(url, body) {
url$path <- "count"
as.numeric(content(apiPOST(build_url(url), body)))
}
apiMean <- function(url, body) {
n <- apiCount(url, body)
s <- apiSum(url, body)
return(s/n)
}
output$summaryTableAPI <- renderTable({
if (input$useAPITable) {
url <- parse_url(input$apiURL)
body = list(d = srcData()$x)
# resp <- POST(build_url(url), body = body, encode = "json", content_type_json(), verbose())
srcData() %>%
group_by(cat) %>%
summarise(n = apiCount(url, list(d = .data$x)),
xMean = apiMean(url, list(d = .data$x)),
yMean = apiMean(url, list(d = .data$y))
)
}
})
output$dotPlot <- renderPlot({
# print(srcData())
ggplot(srcData(), aes(x, y)) + geom_point(alpha = 0.2) +
facet_wrap(~cat) +
theme(legend.position = "none")
})
# output$dotPlotAPI <- renderPlot({
output$dotPlotAPI <- renderUI({
if (input$useAPIPlot) {
url <- parse_url(input$apiURL)
body <- list(x = srcData()$x, y = srcData()$y, cat = srcData()$cat)
url$path <- "plot"
resp <- POST(url, body = body, encode = "json")
tags$img(src = paste0("data:image/png;base64,",
base64encode(resp[["content"]])))
}
})
randomValue <- function(mean, sd) {
if (input$useAPIRandom) {
url <- parse_url(input$apiURL)
url$path <- "randomNorm"
resp <- GET(build_url(url), query = list(m = mean, sd = sd))
return(as.numeric(content(resp)))
}
else return (rnorm(1, mean, sd))
}
srcData <- reactive({
data.frame(
x = replicate(input$numPoints, randomValue(input$xMean, input$xStdDev)),
y = replicate(input$numPoints, randomValue(input$yMean, input$yStdDev)),
cat = replicate(input$numPoints, sample(1:input$numCats, 1))
)
})
output$apiNodename <- renderText({
# if (input$useAPITable) {
url <- parse_url(input$apiURL)
url$path <- "nodename"
resp <- GET(build_url(url))
return(paste("node:", content(resp, "text")))
# }
})
output$apiSum <- renderText({
if (input$useAPITable) {
url <- parse_url(input$apiURL)
url$path <- "sum"
body = list(d = c(1,2,3,4,5))
resp <- POST(build_url(url), body = body, encode = "json", content_type_json(), verbose())
return(content(resp, "text"))
}
})
}
# Run the application
shinyApp(ui = ui, server = server)
|
5b9e9472232abed9ae86e5bce7112da8e66ccb0f | 86f5758c21a2371e67c1a55e28dc1d2d349a6e6c | /src/rev_figure_S1.R | 147966d60c085f378cc6f00aa17f9438d15bbcf3 | [] | no_license | fredlandfors/VIPVIZA-LPL | 3b6974e99e0e56a46193d72d98d379c25a0d6d02 | 59bd88e6cca1518845ca84a6e278da173739e967 | refs/heads/master | 2023-07-30T10:31:44.591032 | 2021-09-13T12:06:32 | 2021-09-13T12:06:32 | 305,151,620 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,002 | r | rev_figure_S1.R | # Reviewed manuscript ----
check_packages(
bioc_packages = c(),
cran_packages = c("ggstatsplot", "cowplot", "ggplot2", "wesanderson",
"boot")
)
library(cowplot)
## Get data ----
itc <- merge(
sampleMetadata[c("ID", "ITC")],
missData.dataMatrix,
by = "ID"
)
## Fig A: Scatter ----
### Subset data ----
itc.a <- itc[c("ID", "ITC", "VLDL_D", "Serum_TG")]
### Scale ----
itc.a$VLDL_D <- scale(itc.a$VLDL_D)
itc.a$Serum_TG <- scale(itc.a$Serum_TG)
### Define graphical parameters ----
fig_4_linesize <- 0.5
fig_4_pointsize <- 1
### Define multiple comparisons correction ----
p_correct_n <- 22
### Define statistical tests function ----
calc_fig_4_test <- function(df, x, y, x_unit = "1-X") {
form1 <- formula(paste0(y, " ~ ", x))
fit1 <- lm(form1, data = df)
sum1 <- summary(fit1)
options(scipen = 3, digits = 4)
string1 <- paste0(
"Effect = ",
round(sum1$coefficients[x, "Estimate"], 2),
" \U00B5J/s per ", x_unit,
", R\U00B2 = ",
round(sum1$r.squared, 2)
)
options(scipen = 0, digits = 7)
return(string1)
}
### Draw individual plots ----
itc.a.p1 <- ggstatsplot::ggscatterstats(
data = itc.a,
x = "VLDL_D",
y = "ITC",
xlab = "Mean VLDL particle diameter (1-SD)",
ylab = "LPL activity (\U00B5J/s)",
title = "",
smooth.line.args = list(size = fig_4_linesize, color = "black"),
point.args = list(size = fig_4_pointsize),
ggtheme = ggplot2::theme_classic(),
marginal.type = "histogram",
xfill = "white",
yfill = "white",
messages = FALSE,
ggplot.component = list(
scale_y_continuous(limits = c(0, 2)),
theme(
text = element_text(family = "Helvetica", size = 6),
axis.title = element_text(family = "Helvetica", size = 6, face = "bold"),
axis.text = element_text(family = "Helvetica", size = 6, colour = "black", face = "plain")
),
scale_x_continuous(breaks = seq(-2, 4, 1), limits = c(-2, 4))
),
ggstatsplot.layer = FALSE,
output = "plot",
bf.message = FALSE,
results.subtitle = FALSE
)
itc.a.p2 <- ggstatsplot::ggscatterstats(
data = itc.a,
x = "Serum_TG",
y = "ITC",
xlab = "Total triglycerides (1-SD)",
ylab = "LPL activity (\U00B5J/s)",
title = "",
smooth.line.args = list(size = fig_4_linesize, color = "black"),
point.args = list(size = fig_4_pointsize),
ggtheme = ggplot2::theme_classic(),
marginal.type = "histogram",
xfill = "white",
yfill = "white",
messages = FALSE,
ggplot.component = list(
scale_y_continuous(limits = c(0, 2)),
theme(
text = element_text(family = "Helvetica", size = 6),
axis.title = element_text(family = "Helvetica", size = 6, face = "bold"),
axis.text = element_text(family = "Helvetica", size = 6, colour = "black", face = "plain")
),
scale_x_continuous(breaks = seq(-2, 4, 1), limits = c(-2, 4))
),
ggstatsplot.layer = FALSE,
output = "plot",
bf.message = FALSE,
results.subtitle = FALSE
)
### Bootstrap 95 % CI:s for R2 ----
library(boot)
set.seed(123)
fit.vldld <- function(data, ind) {
model <- lm(ITC ~ VLDL_D, data = data[ind, ])
c(coef(model), rsq = summary(model)$r.squared)
}
boot.r2.vldld <- boot(itc, R = 10000, statistic = fit.vldld, sim = "ordinary")
boot.ci(boot.r2.vldld, index = 3, type = "perc")
fit.stg <- function(data, ind) {
model <- lm(ITC ~ Serum_TG, data = data[ind, ])
c(coef(model), rsq = summary(model)$r.squared)
}
boot.r2.stg <- boot(itc, R = 10000, statistic = fit.stg)
boot.ci(boot.r2.stg, index = 3, type = "perc")
violin <- data.frame(
VLDL_D = data.frame(boot.r2.vldld$t)[,"X3"],
Tot_TG = data.frame(boot.r2.stg$t)[,"X3"]
)
violin.reshape <- reshape(
violin,
varying = names(violin),
timevar = "time",
idvar = "boot",
v.names = "r2",
direction = "long"
)
violin.reshape$var <- ifelse(
violin.reshape$time == 1,
"VLDL-D",
"Tot.TG"
)
# r2.1 - r2.2
fit.compare <- function(data, ind) {
model.1 <- lm(ITC ~ VLDL_D, data = data[ind, ])
model.2 <- lm(ITC ~ Serum_TG, data = data[ind, ])
r2.diff <- summary(model.1)$r.squared - summary(model.2)$r.squared
return(r2.diff)
}
boot.r2.compare <- boot(itc, R = 10000, statistic = fit.compare, sim = "ordinary")
boot.ci(boot.r2.compare, index = 1, type = "perc", conf = 1 - 0.05/22)
r2.diff <- data.frame(
r2 = boot.r2.compare$t,
var = rep("VLDL-D - Tot.TG")
)
r2.diff.2 <- rbind(
violin.reshape[c("r2", "var")],
r2.diff
)
dens.diff <- ggplot(data = r2.diff, aes(x = r2 * 100, fill = var)) +
ggtitle("Model comparison: \n Mean VLDL diameter vs. Total triglycerides") +
xlab("VLDL-D - Tot.TG explained variance diff. (R\U00B2 %)") +
scale_x_continuous(limits = c(-10, 50), breaks = seq(0, 50, 10)) +
ylab("Density") +
geom_density(alpha = 0.5) +
geom_vline(xintercept = 0, linetype = "dashed", color = "black") +
geom_vline(xintercept = 0.0507 * 100, linetype = "dashed", color = "grey75") +
geom_vline(xintercept = 0.3263 * 100, linetype = "dashed", color = "grey75") +
geom_vline(xintercept = mean(r2.diff$r2) * 100, linetype = "dashed", color = "white") +
scale_fill_manual(values = c("black")) +
labs(fill = "R\U00B2") +
theme_classic() +
theme(
plot.title = element_text(
color = "black",
size = 6,
family = "Helvetica",
face = "bold",
hjust = 0.5
),
legend.title = element_text(size = 5, family = "Helvetica", face = "bold"),
legend.text = element_text(size = 5, family = "Helvetica", face = "plain"),
legend.background = element_blank(),
legend.key.size = unit(0.5, "line"),
legend.direction = "vertical",
legend.position = c(1, 1),
legend.justification = c("right", "top"),
legend.margin = margin(1, 1, 1, 1),
strip.background = element_blank(),
strip.text = element_text(size = 6, family = "Helvetica", face = "bold"),
axis.text.x = element_text(
colour = "black", size = 6, family = "Helvetica", face = "plain"
),
axis.text.y = element_text(
colour = "black", size = 6, family = "Helvetica", face = "plain"
),
axis.title = element_text(size = 6, family = "Helvetica", face = "bold")
)
### Draw complete plot w. stats ----
pdf.options(encoding = "ISOLatin1.enc")
itc.a.p <- plot_grid(
itc.a.p1,
itc.a.p2,
dens.diff,
nrow = 1,
rel_widths = c(1,1,1),
labels = c("A.", "B.", "C.")
)
itc.a_ggdraw <- cowplot::ggdraw(itc.a.p) +
cowplot::draw_label(
paste0("Model statistics: \n", calc_fig_4_test(itc.a, "VLDL_D", "ITC", x_unit = "nm")),
x = 1 / 6, y = 0.99, hjust = 0.45, vjust = 1,
fontfamily = "Helvetica", fontface = "bold", color = "black", size = 6,
) +
cowplot::draw_label(
paste0("Model statistics: \n", calc_fig_4_test(itc.a, "Serum_TG", "ITC", x_unit = "mmol/L")),
x = 3 / 6, y = 0.99, hjust = 0.45, vjust = 1,
fontfamily = "Helvetica", fontface = "bold", color = "black", size = 6,
)
### Save to pdf ----
cowplot::save_plot(
"./out/figure_S1.pdf",
itc.a_ggdraw,
base_height = 2.25,
base_width = 7.1,
dpi = 1000
)
|
896237bb3e66dc8324b025a73b72f177ec90fe52 | a2d95f7b0d4455be280aa7a0e14b0acb576cbe61 | /src/rserve/R/performArima.R | 44ca02996bc6c4a03110fb187878e88da8aeb55b | [] | no_license | sallgoood/jARIMA | b4b2d9dae583c9c5d290a862cd54775d65e3bf7f | fc78cf0a2609039f40df2b66a017eb59b4d48ed9 | refs/heads/master | 2023-04-23T20:27:36.929584 | 2019-04-14T19:22:55 | 2019-04-14T19:22:55 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 197 | r | performArima.R |
performArima <- function(tsData) {
fut <- tsData %>%
auto.arima() %>%
forecast(1) %>%
as.data.frame()
return(c(
fut$`Hi 95` %>% as.double(),
fut$`Lo 95` %>% as.double()))
} |
7b66319586329385df76c3031aaeb42fd014e94e | 71c2db324708ac9f49cdb6bea15a33c901837703 | /R/est_mtfa.R | 2f9448d34eec1e0aebab3678ab9a4942c35dd7dc | [] | no_license | suren-rathnayake/EMMIXmfa | 8e3e5515a756755ca7530e79b14b926ce542d20a | 413ff590398c58e3c7596205b102cceaa827643b | refs/heads/master | 2021-07-09T17:35:17.106766 | 2019-02-23T22:31:33 | 2019-02-23T22:31:33 | 125,965,418 | 3 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,532 | r | est_mtfa.R | # est.mtfa <- function(Y, g, q, itmax, tol, pivec, B, mu, D, sigma_type,
# D_type, v, df_update, conv_measure, ...) {
est.mtfa <- function (init_para, Y, itmax, tol,
v, df_update, conv_measure, ...) {
p <- ncol(Y)
n <- nrow(Y)
fit <- c(init_para, list(v = v, df_update = df_update))
# fit <- list(g = g, q = q, pivec = pivec, B = B, mu = mu, D = D,
# sigma_type = sigma_type, D_type = D_type, v = v,
# df_update = df_update)
# loglikeNtau <- try(do.call('logL_tau.mtfa', c(list(Y=Y), fit)),
# silent = TRUE)
loglikeNtau <- do.call('logL_tau.mtfa', c(list(Y=Y), fit))
if ((class(loglikeNtau) == "try-error")||
(class(loglikeNtau) == 'character')) {
FIT <- paste('in computing the log-likelihood before EM-steps')
class(FIT) <- "error"
return(FIT)
}
if (class(loglikeNtau$logL) == 'character') {
FIT <- paste('in computing the log-likelihood before EM-steps',
loglikeNtau$logL)
class(FIT) <- "error"
return(FIT)
}
fit <- append(fit, loglikeNtau)
for (niter in 1 : itmax) {
FIT <- do.call('Mstep.mtfa', c(list(Y=Y), fit))
if (class(FIT) == 'error') {
FIT <- paste('in ', niter,
'iteration of the M-step', FIT)
class(FIT) <- "error"
return(FIT)
}
loglikeNtau <- try(do.call('logL_tau.mtfa', c(list(Y=Y), FIT)),
silent=TRUE)
if ((class(loglikeNtau) == "try-error") ||
(class(loglikeNtau) == 'character')) {
FIT <- paste('in computing the log-likelihood after the ', niter,
'th the M-step', FIT$logL, sep='')
class(FIT) <- "error"
return(FIT)
}
FIT <- append(FIT, loglikeNtau)
if ((class(FIT$logL)=="NULL") || (class(FIT$logL) == 'character')) {
FIT <- paste('in computing the log-likelihood after the ', niter,
'th the M-step', FIT$logL, sep='')
class(FIT) <- "error"
return(FIT)
} else {
if ((FIT$logL == -Inf) || is.na(FIT$logL)) {
FIT <- paste('the log-likelihood computed after the ', niter,
'th iteration of the M-step is not finite', sep='')
class(FIT) <- "error"
return(FIT)
}
}
if ((conv_measure == "diff") && (abs(FIT$logL-fit$logL) < tol))
break
if ((conv_measure == "ratio") && (abs((FIT$logL-fit$logL) / FIT$logL) < tol))
break
fit <- FIT
}
class(FIT) <- "mtfa"
return(FIT)
}
|
359f8b4bdfe88bf1889041616fc3b9a3d07bc0c6 | 46d83ecc49e9c5bacb8cfa394378cd6e6ec6735d | /man/sta_lta_calc.Rd | fcc023209bf3c5fcc0d68d62557e14dec167a04e | [
"MIT"
] | permissive | wltcwpf/hvsrProc | 33e6b9d54079636d07c7650ac556b1b0e987edef | 84f47382d77b658149616f1c7166372f9260033f | refs/heads/master | 2023-08-17T16:29:58.269725 | 2023-08-16T19:14:40 | 2023-08-16T19:14:40 | 358,467,058 | 10 | 1 | null | null | null | null | UTF-8 | R | false | true | 645 | rd | sta_lta_calc.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{sta_lta_calc}
\alias{sta_lta_calc}
\title{STA to LTA calculation}
\usage{
sta_lta_calc(ts, short_term, long_term, moving_term)
}
\arguments{
\item{ts}{An array of time series}
\item{short_term}{An integer, short term length for STA (number of points)}
\item{long_term}{An integer, long term length for LTA (number of points)}
\item{moving_term}{An integer, moving step of the preceding point (number of points)}
}
\value{
A vector, the STA/LTA ratios
}
\description{
Calculate the ratio of Short Term Average (STA) to Long Term Average (LTA)
}
|
d8ab046b4a9d2a712b199a83646ff16606949b99 | ec8aac4bc5a9e3f6ea44b2f017e79e86bf948ec1 | /R/getDataSelic.R | 055b07eda70c58f1b8b3b18f6bf078a9afa4abff | [] | no_license | lojadedados/rtesourodescomplicado | d8ece192aba62ad5783a64234767e14bf52cfbbd | e96c153033c17a9863a99c3f51caca84cf4e2aae | refs/heads/master | 2021-01-12T11:29:32.973793 | 2016-12-15T13:13:00 | 2016-12-15T13:13:00 | 72,937,021 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,285 | r | getDataSelic.R | #' @title get data Selic
#' @description Funcao para recuperar dados Selic
#' @author Rodrigo Almeida
#' @export
#' @param updateDataSeNaoDisponivel: TRUE se deseja fazer download do arquivo de dados caso o mesmo não esteja disponível, FALSE caso o arquivo já esteja disponível.
getDataSelic <- function(updateDataSeNaoDisponivel = FALSE) {
if (!existsDataSelic() && !isTRUE(updateDataSeNaoDisponivel)) {
stop("O arquivo de dados não está disponível. Utiliza a função updateData() ou utilize getDataSelic(updateDataSeNaoDisponivel = TRUE) para fazer o download do mesmo.")
}
if (isTRUE(updateDataSeNaoDisponivel)) {
if (!existsDataSelic()) {
updateData();
}
}
dataCarregamentoSelic <<- format(Sys.Date(), "%d/%m/%Y %H:%M")
tdData <- read.csv2("data/selic.csv", sep = ";")
colnames(tdData) <- c("data","taxa")
#tdData$Data.Vencimento <- as.POSIXct(strptime(x = as.character(tdData$Data.Vencimento), format = "%d/%m/%Y"))
#TODO verificar se em algum outro local, utilizamos data de forma errada
#tdData$Data.Base <- as.POSIXct(strptime(x = as.character(tdData$Data.Base), format = "%d/%m/%Y"))
tdData$data <- as.Date(tdData$data, format = "%d/%m/%Y")
#ordenando pela data
tdData <- tdData[order(tdData$data),]
tdData
}
|
c3d7ee4e5ced9fc4dc69efca0929cc0fb6eab25b | 65e8918b036ddd7dc691cb7ad3b2f3d4e39d26d8 | /rProgramming/code/corr.R | 27956facbd0a27516d5f44162d584a991dc1d44c | [] | no_license | jaeddy/jhuDataScience | e782eadbca8dabd756a2c63d6ad41aacd86c36a9 | 2e76db6afd48837f69a2a5f1bc625ba66b3f0ebe | refs/heads/master | 2021-01-22T23:20:16.846429 | 2017-01-21T00:02:44 | 2017-01-21T00:02:44 | 25,954,556 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,167 | r | corr.R | corr <- function(directory, threshold = 0) {
## 'directory' is a character vector of length 1 indicating
## the location of the CSV files
## 'threshold' is a numeric vector of length 1 indicating the
## number of completely observed observations (on all
## variables) required to compute the correlation between
## nitrate and sulfate; the default is 0
## Return a numeric vector of correlations
dirFiles <- dir(directory)
dirIDname <- gsub(".csv", "", dirFiles)
dirIDnum <- as.numeric(dirIDname)
idCompCases <- complete(directory)
corIDs <- idCompCases$nobs > threshold
pollutantCorr <- vector(mode = "numeric", length = sum(corIDs))
idx <- 1
for(i in 1:length(dirFiles)){
if(corIDs[i] == TRUE){
idFile <- dirFiles[dirIDnum %in% i]
filePath <- paste(directory, "/", idFile, sep = "")
idTable <- read.csv(filePath)
corrDat <- idTable[c("sulfate", "nitrate")]
pollutantCorr[idx] <- cor(corrDat[["sulfate"]], corrDat[["nitrate"]],
use = "complete.obs")
idx <- idx + 1
}
}
#pollutantCorr <- round(pollutantCorr, digits = 5)
pollutantCorr
} |
4c638fefb528e9cba92e87099c22681c1575676c | ab9bd6a16ebbd0668039981ea12af8a45c932618 | /R/mw_conc_fltr.R | faf7fd9c73849f076a43b943bf491ff2a4d9b93f | [] | no_license | KeesVanImmerzeel/mipwelcona | 5899ea62d4fb3f6136d5f43a3116548f4edb2e13 | ba6877fda6f1bb48462a4bfcd2aa00507ad23d12 | refs/heads/master | 2023-01-04T07:15:28.045119 | 2020-10-29T20:53:59 | 2020-10-29T20:53:59 | 299,615,620 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,232 | r | mw_conc_fltr.R | #' Calculate concentrations of filter at times as specified in table created with \code{\link{mw_conc_streamlines}}
#'
#' @param fltr_nr (unique) Well number (numeric).
#' @param sl_fltr_table Dataframe as created with the function \code{\link{mw_create_sl_fltr_table}}.
#' @param conc_streamlines Table (tibble) as created with the function \code{\link{mw_conc_init}}.
#' @return Dataframe with the following variables (columns):
#' * FLTR_NR: Streamline number (integer)
#' * TIME: Time, days (numeric)
#' * CONC: Concentration (numeric)
# @examples
# x <- .mw_conc_fltr( fltr_nr=1, sl_fltr_table=chk_sl_fltr_table, conc_streamlines=chk_mw_conc_streamlines)
# @export
.mw_conc_fltr <-
function(fltr_nr,
sl_fltr_table,
conc_streamlines) {
sl_nrs <-
sl_fltr_table %>% dplyr::filter(FLTR_NR == fltr_nr) %>% dplyr::select(-c(FLTR_NR)) %>% dplyr::pull(1)
if (length(sl_nrs) < 1) {
return(NA)
}
conc_streamlines %<>% dplyr::filter(SL_NR %in% sl_nrs)
df <-
conc_streamlines %>% dplyr::group_by(TIME) %>% dplyr::summarize(CONC =
mean(CONC), .groups = "drop")
return(cbind(FLTR_NR = fltr_nr, df))
}
|
a8c03034f44bcc128a33f1b0b2a68ee91c10a7f8 | b66de58525899583058979bc7da771f36fe7e0db | /man/selectRunning.Rd | 453bc2a34b93d2d6ab385a816fcf7b822c381913 | [] | no_license | keyonghu/openair | 4e90d78e23f170e3299870fd88bcd616cdee14cf | a432ec912180678b7f3034eaec98c9b0e36485b9 | refs/heads/master | 2021-05-26T03:10:21.854502 | 2020-04-06T08:04:48 | 2020-04-06T08:04:48 | 254,028,347 | 1 | 0 | null | 2020-04-08T08:24:50 | 2020-04-08T08:24:50 | null | UTF-8 | R | false | true | 1,921 | rd | selectRunning.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/selectRunning.R
\name{selectRunning}
\alias{selectRunning}
\title{Function to extract run lengths greater than a threshold}
\usage{
selectRunning(mydata, pollutant = "nox", run.len = 5, threshold = 500)
}
\arguments{
\item{mydata}{A data frame with a \code{date} field and at least one
numeric \code{pollutant} field to analyse.}
\item{pollutant}{Name of variable to process. Mandatory.}
\item{run.len}{Run length for extracting contiguous values of
\code{pollutant} above the \code{threshold} value.}
\item{threshold}{The threshold value for \code{pollutant} above which data
should be extracted.}
}
\value{
Returns a data frame that meets the chosen criteria. See examples
below.
}
\description{
Utility function to extract user-defined run lengths (durations) above a
threshold
}
\details{
This is a utility function to extract runs of values above a certain
threshold. For example, for a data frame of hourly NOx values we would like
to extract all those hours where the concentration is at least 500ppb for
contiguous periods of 5 or more hours.
This function is useful, for example, for selecting pollution episodes from
a data frame i.e. where concentrations remain elevated for a certain period
of time. It may also be of more general use when analysing air pollution
data. For example, \code{selectRunning} could be used to extract continuous
periods of rainfall --- which could be important for particle
concentrations.
}
\examples{
## extract those hours where there are at least 5 consecutive NOx
## concentrations above 500ppb
mydata <- selectRunning(mydata, run.len = 5, threshold = 500)
## make a polar plot of those conditions...shows that those
## conditions are dominated by low wind speeds, not
## in-canyon recirculation
\dontrun{polarPlot(mydata, pollutant = "nox")}
}
\author{
David Carslaw
}
\keyword{methods}
|
3fddc3826665d6f90556f53bd30b667846a92781 | 7c039033d523e95f98edabe5f1c0a83130da5b42 | /regression_and_other_stories/david/exercises_8.R | 0099be9194676f71aec1afe45b94e788d5391594 | [] | no_license | davidlindero/reproducibilitea_bookclub | 664d21b203b36755b8e09820ee42734ae13beab2 | eafcee5d8cb52364a28744cf4dc27f5f674505ea | refs/heads/main | 2023-03-09T23:04:33.832278 | 2021-02-25T15:16:02 | 2021-02-25T15:16:02 | 335,567,277 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,081 | r | exercises_8.R | # library("rstanarm")
# source("./exercises_6.R")
rss <- function(x, y, a, b){
resid <- y - (a + b*x)
return(sum(resid^2))
}
datapath <- c("../../../ROS-Examples/ElectionsEconomy/data/hibbs.dat")
hibbs <- read.table(datapath, header=TRUE)
head(hibbs)
# ressum <- rss(hibbs$vote, hibbs$growth, 1, 1)
a_hat <- 46.3
b_hat <- 3
b_vals <- seq(-10, 10, by=0.1)
a_vals <- seq(20, 60, by=0.1)
# for (b_vals in seq(-10, 10) {
b_rss <- function(b){
return(rss(hibbs$growth, hibbs$vote, a_hat, b))
}
a_rss <- function(a){
return(rss(hibbs$growth, hibbs$vote, a, b_hat))
}
bs_ressum <- sapply(b_vals, b_rss)
as_ressum <- sapply(a_vals, a_rss)
pdf('./plots/a_values.pdf')
plot(a_vals, as_ressum, pch="*")
# legend("topright", legend=c("Intercept", "x_data", "sigma"), col=c("blue", "red", "green"), pch=c("*", "*", "*"), inset=c(0.02, 0.02))
dev.off()
pdf('./plots/b_values.pdf')
plot(b_vals, bs_ressum, pch="*")
# legend("topright", legend=c("Intercept", "x_data", "sigma"), col=c("blue", "red", "green"), pch=c("*", "*", "*"), inset=c(0.02, 0.02))
dev.off()
# } |
3b98b6bdc5fc75bef0c238d6055b838380d001a3 | 6d03d53a99e228c29a9cdadbb58508de30905e16 | /man/as_shadow.data.frame.Rd | 77848065f76a39075f5932c3415e321cc14b5525 | [] | no_license | rpodcast/naniar | 4e8f6547d4aed9cbe7d7b189ce93cd25ea76b554 | b67795b110a25315894e02c433433e3965127d68 | refs/heads/master | 2021-06-22T06:39:47.063573 | 2017-07-31T08:52:15 | 2017-07-31T08:52:15 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 491 | rd | as_shadow.data.frame.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/shadows.R
\name{as_shadow.data.frame}
\alias{as_shadow.data.frame}
\title{Create shadow data}
\usage{
\method{as_shadow}{data.frame}(data, ...)
}
\arguments{
\item{data}{dataframe}
\item{...}{selected variables to use}
}
\description{
Return a tibble that in shadow matrix form, where the variables are the same but have a suffix _NA attached to indicate their difference.
}
\examples{
as_shadow(airquality)
}
|
0e5ecbb50db3050ba2a40bd6ff234e4906b1cfce | d5374bc17cf01b39d09c3f0e8f71698a33a59719 | /BDM/2/lab2.R | a81b621f12463190127f31fe1a29e548364cc46a | [] | no_license | MaryanaYavorska/BDM | 8abe732e2b2c2a187f2dae128e6faa609cbb2a09 | 4c1d7b35eef57be49b3ca7ccd3d23817224ed08e | refs/heads/master | 2020-05-04T12:29:10.458276 | 2019-05-10T15:14:13 | 2019-05-10T15:14:13 | 179,123,542 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 628 | r | lab2.R | library (dplyr)
library (ggplot2)
movie_body_counts <- read.csv ( 'filmdeathcounts.csv')
head(movie_body_counts)
str(movie_body_counts)
movie_body_counts$body_per_min <- movie_body_counts$Body_Count/movie_body_counts$Length_Minutes
ggplot(movie_body_counts, aes(x=Body_Count)) + geom_histogram(bins=20, color="darkred", fill="lightyellow")
movie_body_counts %>%
top_n(n = 10, Body_Count) %>% arrange(desc(Body_Count))
movie_body_counts %>%
top_n(n = 10, body_per_min) %>% arrange(desc(body_per_min))
ggplot(movie_body_counts, aes(x=IMDB_Rating)) + geom_histogram(bins=10, color="black", fill="purple")
|
2ac3d67e4e5df80e734d7c14a6da5625d28fb196 | f359c78df08d0c541e17115d8469694b094207ef | /R/valueID.R | e2230fcb8390790b7d68daeb8f68191d73728dd4 | [] | no_license | patrickweigelt/bRacatus | 851845474d646f20218996b9317f3f83c977d23f | 9742a169a2b506e0fa86068197b82ea2986e77eb | refs/heads/master | 2022-12-03T21:39:41.397792 | 2020-07-30T15:47:49 | 2020-07-30T15:47:49 | 287,520,743 | 0 | 0 | null | 2020-08-14T11:48:46 | 2020-08-14T11:48:45 | null | UTF-8 | R | false | false | 919 | r | valueID.R | #' valueID
#'
#' Extracts signal values and ID from each cell
#'
#' @param checklists_raster List containing rasterised checklists for presence, native and alien reference regions
#' @return A list with cell IDs and signal values for all cells where the species is present, native and alien.
#' @examples
#' country_checklist <- countryChecklist(c("Brazil","Argentina","Uruguay","Paraguay"),
#' c("native","alien","unknown","native"))
#'
#' rasterised_checklist <- rasteriseChecklists(country_checklist)
#'
#' value_IDs <- valueID(rasterised_checklist)
#' @export
valueID <- function(checklists_raster){
ID_prob <- list()
for(i in 1:length(checklists_raster))
{
cell_ID <- which(checklists_raster[[i]][]!=0)
prob <- checklists_raster[[i]][which(checklists_raster[[i]][]!=0)]
ID_prob[[i]] <- data.frame(cell_ID=cell_ID,prob=prob)
}
names(ID_prob) <- names(checklists_raster)
return(ID_prob)
}
|
d1aab0ad014813034ee7cdb2ac952387f3dc3b58 | 756ae4c4bf31cca6824f839f23b81dc037443868 | /cachematrix.R | db1e016d1040d7a764f1ac76b3273f831ccc2b6a | [] | no_license | lipgee/ProgrammingAssignment2 | b5f39d92037666453b43db57f77e5960ae7f9f33 | 2ef59222ffc205cc9dc2a2e37eed4b221101daa3 | refs/heads/master | 2020-12-03T05:10:57.418770 | 2015-05-20T01:38:04 | 2015-05-20T01:38:04 | 35,819,516 | 0 | 0 | null | 2015-05-18T13:29:01 | 2015-05-18T13:29:00 | null | UTF-8 | R | false | false | 1,900 | r | cachematrix.R | ## The following two functions named makeCacheMatrix and cacheSolve
## is to help in matrix computation activity. Generally the operation
## to inverse a matrix is consider a resource intensive and potential
## time consuming activity.
## makeCacheMatrix function is to cache a matrix object
## Couple of features have been built around the matrix object,
## ie: set, get, setsolve, getsolve
## cacheSolve is a function that will inverse the matrix by using
## solve() function. If a particular matrix has been inverse before,
## it will take the inversed result directly from the cache and skip
## the processing. This helps in speed up the computation on same matrix
## data
## Listed below are the example steps to utilize these functions:
## Step 1: Create a matrix object and have it cached
## a <- makeCacheMatrix(matrix(1:4,2,2))
## Step 2: Call cacheSolve function to inverse the matrix
## cacheSolve(a)
## makeCacheMatrix function cache the matrix object
## this function takes an argument of matrix object type
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setsolve <- function(solve) m <<- solve
getsolve <- function() m
list(set = set, get = get,
setsolve = setsolve,
getsolve = getsolve)
}
## cacheSolve inverse a matrix, if the matrix has been inversed before,
## the matrix inverse result will be taken directly from cache and
## computation will be skipped.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getsolve()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setsolve(m)
m
}
|
4cd4d733809a98a5ec4fd8a7d94eb96077412cfd | 27349f4446f5ad940c80fce8e30498295a1ce76d | /R/method-merge.R | 52c6468c5f278150de1d677073826c14edec7c76 | [] | no_license | heavywatal/tidytree | f3e3cab5cb9299b1eceb2b2a673d4b07efe60948 | d19c8adc0e8670a1c677b4d775487deeaa699751 | refs/heads/master | 2021-07-13T02:22:06.128885 | 2020-07-03T02:34:59 | 2020-07-03T02:34:59 | 162,420,903 | 0 | 0 | null | 2018-12-19T10:23:00 | 2018-12-19T10:22:59 | null | UTF-8 | R | false | false | 149 | r | method-merge.R | ##' @method merge tbl_tree
##' @export
merge.tbl_tree <- function(x, y, ...) {
res <- NextMethod()
class(res) <- class(x)
return(res)
}
|
fa2f4da2020957869b44de13fc0469892b490a98 | 4484cb4a9492b9bcb687d3e77f3cbcb4185ab0aa | /R/splitstree.R | 2b17be6e03dd2933ea3a2b2dd5017d0f070cbfb1 | [] | no_license | IVS-UZH/RSplitsTree | f690e1f547b5c16068e8e7f57cd87f6036c4dc34 | 172a22dd2e9adb312b3cfcc658b73d798e028947 | refs/heads/master | 2020-05-21T19:27:35.767148 | 2018-08-21T07:12:59 | 2018-08-21T07:12:59 | 61,041,394 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,110 | r | splitstree.R | #' Convert an object to SplitsTree-compatible NEXUS input
#' and launch SplitsTree if required
#' @export
#'
#' @param dist A distance object (usually of class 'dist')
#' @param nexus.file A name of the file where the NEXUS file will be written to (see notes)
#' @param plot Set this to 'PDF' or 'SVG' to automatically invoke SplitsTree4 and generate a graphics file
#' @param splitstree.path Path to the SplitsTree4 binary (see notes)
#' @return The name of the generated NEXUS file
#'
#' @note If the name of the output file (\code{nexus.file}) is omitted, the function attempts to derive
#' the file name automatically. Beware that if the file with this name already exists, it will be
#' overwritten without a warning — so extra care needs to be taken if you have extra `.nex` files
#' in the output directory.
#'
#' If you are generating graphical output, the \code{splitstree.path} neeeds to point to the
#' command-line
#' executable of the SplitsTree4 package. The location and name of the executable is system- and
#' installation-dependent. The package attempts to guess the standard location for your system, but
#' if it fails, please provide the path explicitly. Note that on as OS X, a suitable executable is
#' located within the SpltisTree.app application bundle, with the path
#'`SplitsTree.app/Contents/MacOS/JavaApplicationStub` (or alternatively, you can install the Linux
#' SplitsTree4 package on Mac to get an executable in /usr/local/bin/SplitsTree)
#'
#' @examples
#' library(cluster)
#' data(agriculture)
#' agriculture.dist <- daisy(agriculture)
#' splitstree(agriculture.dist, plot='PDF')
splitstree <- function(dist, nexus.file = NULL, plot = FALSE, splitstree.path = getOption('splitstree.path', NULL)) {
# -----------------------------------------------------
# Validate the input
# -----------------------------------------------------
# generate an appropriate file name, if none is provided
if(missing(nexus.file)) {
if(is.symbol(substitute(dist)))
nexus.file <- paste0(gsub("\\.", "-", deparse(substitute(dist))), '.nex')
else
nexus.file <- 'splitstree-output.nex'
}
# check if plot is a correct value
if(!identical(plot, FALSE)) {
plot <- match.arg(plot, c('PDF', 'SVG'))
if(!file.exists(splitstree.path)) {
stop("'splitstree.path' needs to point to SplitsTree4 unix executable file!")
}
}
# -----------------------------------------------------
# Generate the NEXUS file
# -----------------------------------------------------
# clean up the labels (SplitsTree can't deal with certain characters)
attr(dist, "Labels") <- local({
labels <- attr(dist, "Labels")
labels <- gsub("(?!/)[[:punct:]]", "_", labels, perl=T)
labels <- gsub("[[:space:]]", "_", labels, perl=T)
labels <- gsub("\\_\\_", "-", labels, perl=T)
labels <- gsub("\\_$", "", labels, perl=T)
labels <- gsub("á", "a", labels, perl=T)
labels <- gsub("à", "a", labels, perl=T)
labels <- gsub("â", "a", labels, perl=T)
labels <- gsub("ã", "a", labels, perl=T)
labels <- gsub("é", "e", labels, perl=T)
labels <- gsub("è", "e", labels, perl=T)
labels <- gsub("ê", "e", labels, perl=T)
labels <- gsub("ẽ", "e", labels, perl=T)
labels <- gsub("í", "i", labels, perl=T)
labels <- gsub("ì", "i", labels, perl=T)
labels <- gsub("î", "i", labels, perl=T)
labels <- gsub("ĩ", "i", labels, perl=T)
labels <- gsub("ó", "o", labels, perl=T)
labels <- gsub("ò", "o", labels, perl=T)
labels <- gsub("ô", "o", labels, perl=T)
labels <- gsub("õ", "o", labels, perl=T)
labels <- gsub("ñ", "ny", labels, perl=T)
labels
})
# generate the NEXUS data (as a text string)
nexus.data <- capture.output({
taxa.labels <- attr(dist, "Labels")
n.taxa <- attr(dist, "Size")
# write the NEXUS header
cat('#nexus\n\n')
# write the Taxa block
cat('BEGIN Taxa;\n')
cat('DIMENSIONS ntax=', n.taxa, ';\n', sep='')
cat('TAXLABELS\n')
cat(paste0(" [", seq_along(taxa.labels), "] '", taxa.labels, "'"), sep='\n')
cat(';\n')
cat('END;\n')
# write the Distances block
cat('BEGIN Distances;\n')
cat('DIMENSIONS ntax=', n.taxa, ';\n', sep='')
cat('FORMAT labels=no diagonal triangle=both;\n')
cat('MATRIX\n')
write.table(as.matrix(dist), row.names = F, col.names=F, sep='\t')
cat(';\n')
cat('END;\n')
})
# save the nexus file
writeLines(nexus.data, nexus.file)
if(!identical(plot, FALSE)) {
# get the name of the plot file
plot.file <- paste0(gsub('\\.nex$', '', nexus.file), '.', tolower(plot))
# plotting commands to be passed to splitstree
splitstree_script <- paste0(
"EXECUTE file='", file.path(getwd(), nexus.file), "'\n",
"EXPORTGRAPHICS format=", plot, " file='", file.path(getwd(), plot.file), "' REPLACE=yes\n",
"QUIT")
# run splitstree
system(paste(splitstree.path, ' +g false -S -i', nexus.file),
input = splitstree_script)
}
invisible(nexus.file)
} |
45b247902da6b2db42799a0dac9127e79f799dba | d434ec91242aad694c4e2d78580b60a9da3ce29a | /R/remove_these_str.R | 77b18dcbafadb7de6100b4f17aaf489e56ed61b5 | [
"MIT",
"BSD-3-Clause",
"LGPL-3.0-only",
"GPL-1.0-or-later",
"GPL-3.0-only",
"GPL-2.0-only",
"LGPL-2.0-only"
] | permissive | rmsharp/rmsutilityr | 01abcdbc77cb82eb4f07f6f5d8a340809625a1c5 | d5a95e44663e2e51e6d8b0b62a984c269629f76c | refs/heads/master | 2021-11-20T08:45:23.483242 | 2021-09-07T17:28:22 | 2021-09-07T17:28:22 | 97,284,042 | 0 | 2 | MIT | 2021-09-07T17:28:22 | 2017-07-15T01:17:14 | R | UTF-8 | R | false | false | 894 | r | remove_these_str.R | #' Remove these strings
#'
#' Modified from rmsutilityr::remove_strings() by R. Mark Sharp. The
#' modification was to remove a package dependency using the standard
#' relational opporator "==" instead of stri_detect_regex().
#' @param .str character vector that have tokens removed that match
#' tokens within the \code{expunge} vector.
#' @param expunge character vector of tokens to be removed from the
#' \code{.str} vector if present.
#' @param ignore_case boolean that determines whether or not case is ignored.
#' Defaults to FALSE.
#' @export
remove_these_str <- function(.str, expunge, ignore_case = FALSE) {
if (ignore_case) {
tmp_str <- tolower(.str)
tmp_expunge <- tolower(expunge)
}
else {
tmp_str <- .str
tmp_expunge <- expunge
}
keep <- rep(TRUE, length(.str))
for (exp_str in tmp_expunge) {
keep <- !tmp_str == exp_str & keep
}
.str[keep]
}
|
4f707ebb173552c5ddd2a01fe94d0674b2db9920 | 9e2dfc0f004d39a468a65be72666a70ac13a98cd | /plot4.R | fbbc847ea04b35b28342d11243851a55f39cf379 | [] | no_license | alanwafer/ExData_Plotting1 | 915a6fa588910ee104c1a89148c2dc94c17854b1 | fc90a827242e6630a02d0a29950327155fcae873 | refs/heads/master | 2021-01-14T09:14:16.371422 | 2015-10-06T16:11:47 | 2015-10-06T16:11:47 | 43,752,932 | 0 | 0 | null | 2015-10-06T13:38:33 | 2015-10-06T13:38:33 | null | UTF-8 | R | false | false | 1,764 | r | plot4.R | ##Downloading Dataset from Website
if(!file.exists("exdata-data-household_power_consumption.zip")) {
temp <- tempfile()
download.file("http://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip",temp)
file <- unzip(temp)
unlink(temp)
}
##Reading All Data
data_full <- read.table(file, header=T, sep=";")
##Formatting Date and Filtering for relevant dates
data_full$Date <- as.Date(data_full$Date, format="%d/%m/%Y")
df <- subset(data_full, subset=(Date >= "2007-02-01" & Date <= "2007-02-02"))
rm(data_full)
##Formatting Rest of Variables In Advance
df <- transform(df, timestamp=as.POSIXct(paste(Date, Time)))
df$Global_active_power <- as.numeric(as.character(df$Global_active_power))
df$Global_reactive_power <- as.numeric(as.character(df$Global_reactive_power))
df$Voltage <- as.numeric(as.character(df$Voltage))
df$Sub_metering_1 <- as.numeric(as.character(df$Sub_metering_1))
df$Sub_metering_2 <- as.numeric(as.character(df$Sub_metering_2))
df$Sub_metering_3 <- as.numeric(as.character(df$Sub_metering_3))
###PLOT 4
par(mfrow=c(2,2))
plot(df$timestamp,df$Global_active_power, type="l", xlab="", ylab="Global Active Power")
plot(df$timestamp,df$Voltage, type="l", xlab="datetime", ylab="Voltage")
plot(df$timestamp,df$Sub_metering_1, type="l", xlab="", ylab="Energy sub metering")
lines(df$timestamp,df$Sub_metering_2,col="red")
lines(df$timestamp,df$Sub_metering_3,col="blue")
legend("topright", col=c("black","red","blue"), c("Sub_metering_1 ","Sub_metering_2 ", "Sub_metering_3 "),lty=c(1,1), bty="n", cex=.5)
#PLOT 4
plot(df$timestamp,df$Global_reactive_power, type="l", xlab="datetime", ylab="Global_reactive_power")
#OUTPUT
dev.copy(png, file="plot4.png", width=480, height=480)
dev.off()
|
17a7d9f753124cffc97a3c4472c25518477d6805 | 8b4bfdbc8b3d003393f451e890f2e508c73c612d | /cachematrix.R | 45a258e2e67270dea31da4217c60ba416b71feab | [] | no_license | JamesTM5/ProgrammingAssignment2 | b8a157aa28244d0c2ccc85b18819be587af637e6 | 78e67f560e2b933f2b1973a99bbf5ae266ef1876 | refs/heads/master | 2021-01-15T08:58:48.893520 | 2015-04-25T21:01:55 | 2015-04-25T21:01:55 | 34,580,626 | 0 | 0 | null | 2015-04-25T18:31:03 | 2015-04-25T18:31:03 | null | UTF-8 | R | false | false | 1,212 | r | cachematrix.R | ## Overall these two functions compute the inverse of a matrix in the least processor-intensive way. The first function enables the use
# of a cached matrix rather than requiring the re-computation of one which has already been processed. The second draws on that to
# solve a matrix inversion task, returning the inverse of it's argument.
## makeCacheMatrix is a function designed to take an argument which is a matrix, and store the inverse of that matrix for future use as
#part of the cacheSolve function detailed in the commentry below.
makeCacheMatrix <- function(x = matrix()) {
m<-NULL
set <- function(y) {
get<- function() x
setinverse <- function()solve(x)
m <<- solve(x)
getinverse <- function(m)
matrix(set=set, get=get)
setinverse = setinverse
getinverse = getinverse
}
}
## cacheSolve is designed to return the inverse of a given matrix in the most efficient way possible. If that is by drawing on the cached
# result of makeCacheMatrix, then it does so.
cacheSolve <- function(x, ...) {
m <- x$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinverse(m)
m
}
|
7387ba547b768b16b5aafb7f0601f79330ba258b | 29585dff702209dd446c0ab52ceea046c58e384e | /HIest/R/HIsurf.R | 9275d4a47f48bd0c773bcd6d6bf221c5c0dcac31 | [] | no_license | ingted/R-Examples | 825440ce468ce608c4d73e2af4c0a0213b81c0fe | d0917dbaf698cb8bc0789db0c3ab07453016eab9 | refs/heads/master | 2020-04-14T12:29:22.336088 | 2016-07-21T14:01:14 | 2016-07-21T14:01:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 436 | r | HIsurf.R | HIsurf <-
function (G, P, type, size)
{
S <- H <- seq(from = 0, to = 1, length.out = size)
surf <- matrix(NA, ncol = size, nrow = size)
for (i in 1:size) {
for (j in 1:size) {
surf[i, j] <- HILL(c(S[i], H[j]), G, P, type)
}
}
for (i in 1:size) {
for (j in 1:size) {
if (H[j] > min(2 * S[i], 2 - 2 * S[i]))
surf[i, j] <- NA
}
}
surf
}
|
60c1217e752e1fe62f9a858d4e19d4f8e8a64de6 | 7e1cb28c62a4218bd5cde8ddd4f6a8fd3dde1ba3 | /src/doPhyloseq.R | beeee35f6d091380ac0aa456aebb5e34c99d1be2 | [] | no_license | NielInfante/WV_16S | 3c37585bde762af2f2177fbbc4c1963da5c4f881 | 86be1725fc83c59e5d719f94b4676cd88c67ea18 | refs/heads/master | 2020-04-08T13:02:29.667003 | 2019-08-29T15:00:25 | 2019-08-29T15:00:25 | 159,372,538 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,724 | r | doPhyloseq.R | library(phyloseq)
library(ggplot2)
library("gridExtra")
library(dplyr)
setwd('~/')
# Play with shiny, if you like
#install.packages("shiny")
#shiny::runGitHub("shiny-phyloseq","joey711")
# Load Data
ps <- readRDS('Data/PhyloseqObject.rds')
# Refactor data - I want to put in more metadata - Old, metadata in phyloseq should be good
#tree <- phy_tree(ps)
#tax <- tax_table(ps)
#otu <- otu_table(ps)
#sam <- sample_data(ps)
#m2 <- read.table("~/projects/Cuff/Rashel/Both/meta", header=T, sep="\t", stringsAsFactors = F)
#rownames(m2) <- m2$SampleID
#m2$Experiment <- substr(m2$SampleID,1,1)
#m2$Age_cat <- cut(m2$Age, breaks=c(0,18,22,26,30))
#m2$Group <- substr(m2$Individual, 1, 2)
#ps <- phyloseq(otu, sample_data(m2), tax,tree)
##### Filtering
rank_names(ps)
# Create table, number of features for each phyla
table(tax_table(ps)[, "Phylum"], exclude = NULL)
# Get rid of NA's and uncharacterized phyla - not always the best thing to do.
ps0 <- subset_taxa(ps, !is.na(Phylum) & !Phylum %in% c("", "uncharacterized"))
table(tax_table(ps0)[, "Phylum"], exclude = NULL)
# Compute prevalence of each feature, store as data.frame
prevdf = apply(X = otu_table(ps0),
MARGIN = ifelse(taxa_are_rows(ps0), yes = 1, no = 2),
FUN = function(x){sum(x > 0)})
# Add taxonomy and total read counts to this data.frame
prevdf = data.frame(Prevalence = prevdf,
TotalAbundance = taxa_sums(ps0),
tax_table(ps0))
# Are any phyla minimally represented?
plyr::ddply(prevdf, "Phylum", function(df1){cbind(mean(df1$Prevalence),sum(df1$Prevalence))})
# or
myPrev <- prevdf %>% group_by(Phylum) %>% summarize(n=n(),sum=sum(Prevalence),
mean=mean(Prevalence),
max=max(Prevalence),
totAbund=sum(TotalAbundance))
myPrev %>% print(n=100)
# If so, filter them out
# Define phyla to filter
filterPhyla <- myPrev %>% filter(max <= 3) %>% select(Phylum)
#filterPhyla = c("Fusobacteria", "Deinococcus-Thermus")
# Filter entries with unidentified Phylum.
ps1 = subset_taxa(ps0, !Phylum %in% filterPhyla$Phylum)
# Don't do any filtering
#ps1 <- ps0
## Prevalence Filtering
# Subset to the remaining phyla
prevdf1 = subset(prevdf, Phylum %in% get_taxa_unique(ps1, "Phylum"))
ggplot(prevdf1, aes(TotalAbundance, Prevalence / nsamples(ps0),color=Phylum)) +
# Include a guess for parameter
geom_hline(yintercept = 0.05, alpha = 0.5, linetype = 2) + geom_point(size = 2, alpha = 0.7) +
scale_x_log10() + xlab("Total Abundance") + ylab("Prevalence [Frac. Samples]") +
facet_wrap(~Phylum) + theme(legend.position="none")
# Define prevalence threshold as 5% of total samples
prevalenceThreshold = 0.05 * nsamples(ps)
prevalenceThreshold
# Execute prevalence filter, using `prune_taxa()` function
keepTaxa = rownames(prevdf1)[(prevdf1$Prevalence >= prevalenceThreshold)]
ps2 = prune_taxa(keepTaxa, ps)
# Just check what this does
# Compute prevalence of each feature, store as data.frame
prevdf2 = apply(X = otu_table(ps2),
MARGIN = ifelse(taxa_are_rows(ps2), yes = 1, no = 2),
FUN = function(x){sum(x > 0)})
# Add taxonomy and total read counts to this data.frame
prevdf2 = data.frame(Prevalence = prevdf2,
TotalAbundance = taxa_sums(ps2),
tax_table(ps2))
# skipping prevelance filtering
ps2 <- ps1
# Agglomerate taxa
# I don't think I will do this at this time, but I'm including it for later reference
# How many genera would be present after filtering?
length(get_taxa_unique(ps2, taxonomic.rank = "Genus")) # combine all features that descend from the same genus
ps3 = tax_glom(ps2, "Genus", NArm = TRUE)
# Or can do it using tree height, if you don't trust the taxonomy
h1 = 0.4
ps4 = tip_glom(ps2, h = h1)
# See what the agglomerated trees look like:
multiPlotTitleTextSize = 8
p2tree = plot_tree(ps2, method = "treeonly",
ladderize = "left",
title = "Before Agglomeration") +
theme(plot.title = element_text(size = multiPlotTitleTextSize))
p3tree = plot_tree(ps3, method = "treeonly",
ladderize = "left", title = "By Genus") +
theme(plot.title = element_text(size = multiPlotTitleTextSize))
p4tree = plot_tree(ps4, method = "treeonly",
ladderize = "left", title = "By Height") +
theme(plot.title = element_text(size = multiPlotTitleTextSize))
# group plots together
grid.arrange(nrow = 1, p2tree, p3tree, p4tree)
###
#ps2 <- ps1
### Abundance value transformation
## Normalize by read counts, etc
plot_abundance = function(physeq,title = "",
Facet = "Order", Color = "Phylum"){
# Arbitrary subset, based on Phylum, for plotting
p1f = subset_taxa(physeq, Phylum %in% c("p__Firmicutes"))
mphyseq = psmelt(p1f)
mphyseq <- subset(mphyseq, Abundance > 0)
ggplot(data = mphyseq, mapping = aes_string(x = "Genotype",y = "Abundance",
color = Color, fill = Color)) +
geom_violin(fill = NA) +
geom_point(size = 1, alpha = 0.3,
position = position_jitter(width = 0.3)) +
facet_wrap(facets = Facet) + scale_y_log10()+
theme(legend.position="none")
}
# Transform to relative abundance. Save as new object.
psra = transform_sample_counts(ps, function(x){x / sum(x)})
plotBefore = plot_abundance(ps2,"")
plotAfter = plot_abundance(ps2ra,"")
# Combine each plot into one graphic.
grid.arrange(nrow = 2, plotBefore, plotAfter)
# Subset by Taxonomy
psOrd = subset_taxa(ps2ra, Order == "o__Erysipelotrichales")
plot_abundance(psOrd, Facet = "Genus", Color = NULL)
### Moving On
ps <-ps2
saveRDS(ps, file='Data/Phyloseq_filtered.rds')
|
2defe60191c25f6f40f740e298ea5abc5d8ad09a | b8180cbb66ddc84d8960b63c0605d9e57403a5b1 | /R_Code/scriptDayTwo.R | 4d4d90a4f891ea5dbd9d4c53270c0134cd519d1c | [] | no_license | compStat/programming-course | 12b41e362391d069914557f1f610af386ab49aae | cf50f8cd69430b071773fd565a0836e460bddafe | refs/heads/master | 2021-01-20T06:43:07.083209 | 2017-05-03T15:10:54 | 2017-05-03T15:10:54 | 89,914,758 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,581 | r | scriptDayTwo.R | ## Funktionen Recap
someFunction <- function(x) {
if (missing(x)) stop("x is missing")
names(x) <- "x"
x
}
dump <- someFunction()
extract <- function(df, i, j) {
rowInd <- if (missing(i)) 1:nrow(df) else i
colInd <- if (missing(j)) 1:ncol(df) else j
df[rowInd, colInd, drop = FALSE]
}
constructInd <- function(df) {
">"(df$x, 5)
}
df <- data.frame(x = 1:10)
df <- extract(df, constructInd(df))
## Generic Funcitons
numericVector <- c(1, 3, 6, 4, 9)
mean(numericVector)
characterVector <- c("ja", "mehr", "wörter")
meanCharacter <- function(x) mean(nchar(x))
meanCharacter(characterVector)
mean(characterVector)
mean <- function(x, ...) {
if (is.numeric(x) || is.logical(x)) {
base::mean(x, ...)
} else if (is.character(x)) {
mean(nchar(x), ...)
} else {
stop("...")
}
}
mean(characterVector)
mean <- function(x, ...) UseMethod("mean")
mean.default <- function(x, ...) {
sum(x) / length(x)
}
mean.character <- function(x, ...) {
mean(nchar(x), ...)
}
mean.logical <- function(x, ...) {
stop("Fehler")
}
mean(1:10)
mean(TRUE)
mean("a")
## S3 Classes
# see package
## Debugging
f <- function(x) {
x - g(x)
}
g <- function(y) {
y * h(y)
}
h <- function(z, verbose = TRUE) {
if (verbose) futile.logger::flog.info(z)
r <- log(abs(z))
solve(crossprod(matrix(rnorm(10e6), nrow = 1000, ncol = 1000)))
if (verbose) futile.logger::flog.error(r)
if (r < 10) r^2
else r^3
}
Rprof(tmp <- tempfile())
testOut <- f(10)
Rprof()
profileSummary <- summaryRprof(tmp)
unlink(tmp)
profileSummary$by.total
|
8a5aa2e7437c22ba0d02cd8c659a491e7b1d0a66 | 3d95dea75ac4f8d8d882746813758486de5f4227 | /List.r | 0e32a61c5e8522fae4ec4a918984bd5f935721bc | [] | no_license | kimalaacer/Tutorial-on-R | 755694dcab369345eb8f925c87b7c3b307b9b01e | 14ec2de76112de2e482383cd0bf2ccac30a51c14 | refs/heads/master | 2020-07-22T01:44:16.728400 | 2019-09-13T15:04:54 | 2019-09-13T15:04:54 | 207,035,244 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,001 | r | List.r | list1<-list(1,2,3)
list1
list2<-list(c(1,2,3))
list2
list3<-list(c(1,2,3), 3:7)
list3
theDF<-data.frame(First=1:5, Second=5:1, Sport=c('Hockey', 'Lacrosse', 'Football', 'Curling', 'Tennis'), stringsAsFactors=FALSE)
theDF
list4 <- list(theDF, 1:10)
list4
list5 <- list(theDF, 1:10, list3)
list5
names(list5)
names(list5) <- c('data.frame', 'vector', 'list')
names(list5)
list5
list6 <- list(TheDataFrame=theDF, TheVector=1:10, TheList=list3)
list6
names(list6)
emptyList <- vector(mode = 'list',length = 4)
emptyList
emptyList[[1]] <- 5
emptyList
list5[[1]]
names(list5)
list5[['data.frame']]
list5[[1]]$Sport
#returns as a vector after $sign: we are specifying the sport column from the data.frame
#similarly we can use brackets:
list5[[1]][,'Second']
#returns as a vector.
list5[[1]][,'Second', drop = FALSE]
#returns as a data frame
length(list5)
list5[[4]] <- 2
list5
list5[['NewElement']] <- 3:6
length(list5)
names(list5)
|
99a39fbdfe05f4a24bc702da65b393c052110f01 | 27806debbb5432bc70e9dcb22e74019ee3889ded | /man/ldknn.Rd | 3a565a96e73d6fa701d4160a415ed17d70d9bd74 | [] | no_license | estebahr/vwr | 98feb7086a6e6c3364b09983d30411eeeafdc32e | 1918a2c2a711dd292416e949e4c12bf224eea858 | refs/heads/master | 2023-03-17T11:45:23.311841 | 2013-08-07T00:00:00 | 2013-08-07T00:00:00 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,293 | rd | ldknn.Rd | \name{ldknn}
\alias{ldknn}
\alias{ld1nn}
\title{
Run the ldknn algorithm
}
\description{
The ldknn algorithm is used to detect bias in the composition of a lexical decison task, using k-nearest neighbor classification and the Levenshtein distance metric.
}
\usage{
ldknn(stimuli, types, reference, k = 1, method='levenshtein', parallel = FALSE)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{stimuli}{
character strings corresponding to the stimuli in the experiment.}
\item{types}{
factor corresponding to the type of each stimulus in the experiment.}
\item{reference}{
a character string giving the reference level. Must be a level of the factor in \code{types}}
\item{k}{
a value for the k parameter. Set to 1 by default.
}
\item{method}{
\itemize{
\item{"levenshtein": uses \code{\link{levenshtein.distance} to calculate distances}}
\item{'levenshtein.damerau': uses \code{\link{levenshtein.damerau.distance}} to calculate distances}
}
}
\item{parallel}{
with parallel=TRUE, \code{ldknn} will run in parallel an multiple cores. The number of parallel processes is specified by \code{detectCores(logical = FALSE)}.}
}
\details{
Combining k nearest neighbor classification with the Levenshtein distance produces an algorithm which can be described as follows.
For an experiment containing a number of stimuli, which can be words or nonwords:
\enumerate{
\item{Compute the Levenshtein distances between the currently presented stimulus and all previously presented stimuli.}
\item{Identify the previously presented stimuli that are at the k nearest distances from the current stimulus.}
\item{Compute the probability of a word response for the given stimulus based on the relative frequency of words among the nearest neighbors.}
}
}
\value{
A list with class \code{ldknn.run}.
\item{data}{A data frame containing the results of the run. \code{stimulus} gives the stimulus values, \code{type} gives the types of the stimuli, \code{p} gives the probability for a \code{reference.level} response for that stimulus.}
\item{reference level}{The reference level used for the simulation.}
\item{Odds}{The odds, z value, and p value for a reference level response, resulting from a logistic regression in which the probabilities generated by the ldknn algorithm are used to predict stimulus types.}
\code{plot} and \code{print} methods are available for objects of class \code{ld1nn.run}
}
\references{
Keuleers, E., & Brysbaert, M. (2011). Detecting inherent bias in lexical decision experiments with the LD1NN algorithm. \emph{The Mental Lexicon, 6}(1), 34–52.}
\author{
Emmanuel Keuleers}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
\code{\link{levenshtein.distance}, \link{levenshtein.damerau.distance}}
}
\examples{
data(english.words)
data(basque.words)
# set up a mock experiment: English stimuli are words, Basque stimuli are nonwords
experiment<-data.frame(stimulus=c(sample(english.words,500),
sample(basque.words,500)),
type=factor(rep(c('Word','Nonword'),each=500),levels=c('Word','Nonword')))
# randomize the trials
experiment<-experiment[sample(1:1000,1000),]
# run the ldknn algorithm
results<-ldknn(experiment$stimulus,experiment$type,'Word')
print(results)
plot(results)
}
|
82523bdc318d66cd23ac8006f229598fe4acbcc3 | 483ca5716cc806b9be7911032d356e149b79cf1b | /inst/scripts/examples.R | 8e29112bee3ecaa231b3a42dbd300b94d784ad4c | [] | no_license | raredd/forest | 8f66f5af070a082e1576654e91023e3afbcfdf9e | 9b63e67cf35ce581aa23f5504383f528aa686be8 | refs/heads/master | 2023-08-16T15:44:17.507643 | 2023-07-24T17:53:03 | 2023-07-24T17:53:03 | 85,359,461 | 1 | 2 | null | null | null | null | UTF-8 | R | false | false | 998 | r | examples.R | library('forest')
library('cmprsk2')
# simulated data to test
set.seed(1)
n <- 500L
dd <- data.frame(
ftime = rexp(n),
fstatus = sample(0:2, n, replace = TRUE),
x1 = runif(n),
x2 = runif(n),
x3 = runif(n),
factor1 = factor(sample(1:3, n, TRUE)),
factor2 = factor(sample(1:2, n, TRUE))
)
dd[] <- lapply(dd, function(x) {
if (!is.factor(x))
x[sample(length(x), sample(1:10))] <- NA
x
})
x <- with(dd, crr(ftime, fstatus,
cbind(x1, x2, x3, model.matrix(~factor1 + factor2)[, -1])
))
y <- crr2(Surv(ftime, fstatus(0) == 1) ~ ., dd)
clean <- cleanfp(x, formula = ftime + fstatus ~ ., dd)
clean_ref <- add_reference(clean)
prep_list <- prepare_forest(clean_ref)
plot(prep_list, show_conf = TRUE)
x <- crr2(Surv(futime, event(censored) == death) ~ age + sex + abo, transplant)
fp <- cleanfp(x$`CRR: death`, futime ~ age + sex + abo, transplant)
fp <- cleanfp(x)
fp <- add_reference(fp)
fp <- prepare_forest(fp)
plot(fp, show_conf = TRUE)
forest(x)
|
92d34e2a0e23788ebba984ec9ce502174b19c8db | 71ac4d51bf2b1fac3db8339a3f3bfde9cf47da0e | /functions.R | aa4688a8edbc34ecb96ae1d00486f4088cfc61b7 | [] | no_license | kharoof/Rugby-Stats | 8f472604fd86f1cbc3fee546bb0d712962f59247 | 613c8981c025f0a61b0d5b577e978704e9bfbb68 | refs/heads/master | 2021-11-07T05:50:26.583233 | 2021-11-02T12:30:56 | 2021-11-02T12:30:56 | 30,774,965 | 4 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,578 | r | functions.R | library(lubridate)
####################################################################################################
## Functions that clean the results for a match
####################################################################################################
## Format dates, add team and opponent fields and pts diff win lose etc...
cleanResults <- function(data, team){
data = cbind(data,data.frame(matrix(as.numeric(unlist(strsplit(data$Score, "-|v"))), ncol=2, byrow=T)))
names(data) <- c("date", "time.local", "home.team", "away.team", "score", "venue","url", "home.score", "away.score")
data$date = as.Date(data$date, "%d/%m/%Y")
data$opponent <- ifelse(data$home.team==team, data$away.team, data$home.team)
data$opponent <- as.factor(data$opponent)
data$home.away <- ifelse(data$home.team==team, "Home", "Away")
data$home.away <- as.factor(data$home.away)
data$pts.diff <- ifelse(data$home.team==team, data$home.score - data$away.score,data$away.score - data$home.score)
data$team.score <- ifelse(data$home.team==team, data$home.score ,data$away.score )
data$opponent.score <- ifelse(data$home.team==team, data$away.score ,data$home.score )
data$year <- as.ordered(year(data$date))
data$month <- as.ordered(month(data$date))
data$win.lose <- as.factor(ifelse(data$pts.diff>0 , "Win",ifelse(data$pts.diff<0 , "Lose","Draw" ) ))
data$team <- team
keep = c("date", "year", "month","time.local", "team", "opponent", "pts.diff", "home.away", "win.lose", "url","opponent.score", "team.score")
data <- data[,keep]
return(data)
}
|
a208ba854f9b0a7e66639ede2b0d18b50db6019e | a33b1a6c61f80539343be9ac6aec5412f30cdc12 | /20170620geologyGeometry/library/ellipsoids.R | dd3c11c531f2f8e65906410f1baae397ec82286c | [
"MIT",
"Apache-2.0"
] | permissive | nicolasmroberts/nicolasmroberts.github.io | 9a143c93859f2b3f133ade1acf54fb1ba1c966d3 | f6e8a5a02eea031fb68c926d6d922846eeb71781 | refs/heads/master | 2022-09-08T22:03:26.646877 | 2022-07-27T20:50:50 | 2022-07-27T20:50:50 | 117,170,161 | 0 | 1 | MIT | 2018-03-04T23:16:00 | 2018-01-12T00:23:20 | HTML | UTF-8 | R | false | false | 45,411 | r | ellipsoids.R |
# Copyright 2016-2017 Joshua R. Davis
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
# An ellipsoid is a complete description of an ellipsoid in three-dimensional space: its size, shape, and orientation (but not its location). There are three degrees of freedom in orientation and three degrees of freedom in size-shape. Sometimes we normalize ellipsoids to have a particular volume, in which case only two degrees of freedom remain in shape. An ellipsoid can be described as a symmetric, positive-definite ellipsoid tensor E, in that the (boundary of the) ellipsoid is the set of points x such that x^T E x == 1. Or it can be described as an orientation and semi-axis lengths, or as a log-ellipsoid vector. In this R code, an 'ellipsoid' is a list of five elements: $vector, $tensor, $a, $logA, $rotation. $a are the semi-axis lengths, and $logA is their logarithms. $rotation is a special orthogonal matrix with the semi-axes of the ellipsoid along its rows. See also geoDataFromFile. Because they inhabit a vector space, we can throw all of multivariate statistics at the ellipsoid vectors. For ideas, see http://cran.r-project.org/web/views/Multivariate.html.
### CONVERSIONS AMONG REPRESENTATIONS ###
#' A log-ellipsoid tensor from an ellipsoid tensor, respecting normalization.
#'
#' @param ell A real 3x3 matrix (symmetric, positive-definite).
#' @param A real 3x3 matrix (symmetric).
ellLog <- function(ell) {
eig <- eigen(ell, symmetric=TRUE)
eig$vectors %*% diag(log(eig$values)) %*% t(eig$vectors)
}
#' An ellipsoid tensor from a log-ellipsoid tensor, respecting normalization.
#'
#' @param logEll A real 3x3 matrix (symmetric).
#' @param A real 3x3 matrix (symmetric, positive-definite).
ellExp <- function(logEll) {
eig <- eigen(logEll, symmetric=TRUE)
eig$vectors %*% diag(exp(eig$values)) %*% t(eig$vectors)
}
#' An ellipsoid 6-vector from an unnormalized log-ellipsoid tensor.
#'
#' The conversion is tuned so that the Frobenius inner product on matrices maps to the dot product on vectors.
#' @param logEll A real 3x3 matrix (symmetric).
#' @return A 6-dimensional real vector.
ellVectorFromLog <- function(logEll) {
v1 <- sqrt(2) * logEll[1, 2]
v2 <- sqrt(2) * logEll[1, 3]
v3 <- sqrt(2) * logEll[2, 3]
v4 <- logEll[1, 1]
v5 <- logEll[2, 2]
v6 <- logEll[3, 3]
c(v1, v2, v3, v4, v5, v6)
}
#' An unnormalized log-ellipsoid tensor from an ellipsoid 6-vector.
#'
#' The conversion is tuned so that the Frobenius inner product on matrices maps to the dot product on vectors.
#' @param vec A 6-dimensional real vector.
#' @return A real 3x3 matrix (symmetric).
ellLogFromVector <- function(vec) {
l12 <- vec[[1]] / sqrt(2)
l13 <- vec[[2]] / sqrt(2)
l23 <- vec[[3]] / sqrt(2)
l11 <- vec[[4]]
l22 <- vec[[5]]
l33 <- vec[[6]]
matrix(c(l11, l12, l13, l12, l22, l23, l13, l23, l33), 3, 3)
}
#' An ellipsoid 5-vector from a normalized log-ellipsoid tensor.
#'
#' The conversion is tuned so that the Frobenius inner product on matrices maps to the dot product on vectors.
#' @param logEll A real 3x3 matrix (symmetric, trace-zero).
#' @return A 5-dimensional real vector.
ellNormalizedVectorFromLog <- function(normLogEll) {
v1 <- sqrt(2) * normLogEll[1, 2]
v2 <- sqrt(2) * normLogEll[1, 3]
v3 <- sqrt(2) * normLogEll[2, 3]
v4 <- (normLogEll[2, 2] + normLogEll[1, 1]) * sqrt(1.5)
v5 <- (normLogEll[2, 2] - normLogEll[1, 1]) / sqrt(2)
c(v1, v2, v3, v4, v5)
}
#' A normalized log-ellipsoid tensor from an ellipsoid 5-vector.
#'
#' The conversion is tuned so that the Frobenius inner product on matrices maps to the dot product on vectors.
#' @param vec A 5-dimensional real vector.
#' @return A real 3x3 matrix (symmetric, trace-zero).
ellLogFromNormalizedVector <- function(vec) {
l11 <- vec[[4]] / sqrt(6) - vec[[5]] / sqrt(2)
l22 <- vec[[4]] / sqrt(6) + vec[[5]] / sqrt(2)
l33 <- -sqrt(2 / 3) * vec[[4]]
l12 <- vec[[1]] / sqrt(2)
l13 <- vec[[2]] / sqrt(2)
l23 <- vec[[3]] / sqrt(2)
matrix(c(l11, l12, l13, l12, l22, l23, l13, l23, l33), 3, 3)
}
#' An ellipsoid from an ellipsoid vector.
#'
#' @param v An ellipsoid vector, either 5-dimensional (if normalized) or 6-dimensional (if not).
#' @return An ellipsoid.
ellEllipsoidFromVector <- function(v) {
# Diagonalize the log ellipsoid tensor.
if (length(v) == 5)
logEll <- ellLogFromNormalizedVector(v)
else
logEll <- ellLogFromVector(v)
eig <- eigen(logEll, symmetric=TRUE)
# Everything else follows from that diagonalization.
logA <- -0.5 * eig$values
a <- exp(logA)
rotation <- t(eig$vectors)
if (det(rotation) < 0)
rotation[3,] <- -rotation[3,]
tensor <- eig$vectors %*% diag(exp(eig$values)) %*% t(eig$vectors)
list(vector=v, a=a, logA=logA, rotation=rotation, tensor=tensor)
}
#' An ellipsoid from its orientation and the logarithms of its semi-axis lengths.
#'
#' @param r A real 3x3 matrix (special orthogonal), with the semi-axis directions along its rows.
#' @param logA A real 3-dimensional vector. The logarithms of the semi-axis lengths, in order corresponding to the rows of r.
#' @return An ellipsoid.
ellEllipsoidFromRotationLogA <- function(r, logA, doNormalize=FALSE) {
if (doNormalize)
logA <- logA - sum(logA) / 3
a <- exp(logA)
tensor <- t(r) %*% diag(a^-2) %*% r
logEll <- t(r) %*% diag(-2 * logA) %*% r
if (doNormalize)
v <- ellNormalizedVectorFromLog(logEll)
else
v <- ellVectorFromLog(logEll)
list(vector=v, a=a, logA=logA, rotation=r, tensor=tensor)
}
ellTensorFromRotationA <- function(r, a) {
t(r) %*% diag(a^-2) %*% r
}
ellRotationAFromTensor <- function(e) {
eig <- eigen(e, symmetric=TRUE)
a <- eig$values^(-1 / 2)
r <- t(eig$vectors)
if (det(r) < 0)
r[3,] <- -r[3,]
list(rotation=r, a=a)
}
ellEllipsoidFromTensor <- function(e, doNormalize=FALSE) {
ra <- ellRotationAFromTensor(e)
logA <- log(ra$a)
if (doNormalize)
logA <- logA - sum(logA) / 3
logEll <- t(ra$rotation) %*% diag(-2 * logA) %*% ra$rotation
if (doNormalize)
v <- ellNormalizedVectorFromLog(logEll)
else
v <- ellVectorFromLog(logEll)
list(vector=v, logA=logA, a=exp(logA), rotation=ra$rotation, tensor=e)
}
#' Ellipsoid orientation with axes in ascending or descending order.
#'
#' @param rot A real 3x3 matrix (special orthogonal), with the semi-axis directions along its rows.
#' @param aOrLogA A real 3-dimensional vector. The semi-axis lengths or their logarithms, in order corresponding to the rows of rot.
#' @param descending A Boolean. If TRUE, then sort axes in descending order. If FALSE, then sort in ascending order.
#' @return A real 3x3 matrix (special orthogonal). This matrix equals the input matrix, with its rows reordered and possibly one row negated to maintain determinant 1.
ellAscendingRotation <- function(rot, aOrLogA, descending=FALSE) {
ord <- order(aOrLogA, decreasing=descending)
first <- rot[ord[[1]],]
second <- rot[ord[[2]],]
third <- rot[ord[[3]],]
if (dot(cross(first, second), third) > 0)
rbind(first, second, third)
else
rbind(first, second, -third)
}
ellDescendingRotation <- function(rot, aOrLogA) {
ellAscendingRotation(rot, aOrLogA, descending=TRUE)
}
#' Ellipsoid orientation with axes ordered short, then long, then intermediate.
#'
#' This function is useful for comparing ellipsoid orientations to foliation-lineation orientations.
#' @param rot A real 3x3 matrix (special orthogonal), with the semi-axis directions along its rows.
#' @param aOrLogA A real 3-dimensional vector. The semi-axis lengths or their logarithms, in order corresponding to the rows of rot.
#' @return A real 3x3 matrix (special orthogonal). This matrix equals the input matrix, with its rows reordered and possibly one row negated to maintain determinant 1.
ellPoleDirectionRotation <- function(rot, aOrLogA) {
ord <- order(aOrLogA)
first <- rot[ord[[1]],]
second <- rot[ord[[3]],]
third <- rot[ord[[2]],]
if (dot(cross(first, second), third) > 0)
rbind(first, second, third)
else
rbind(first, second, -third)
}
### SIZE AND SHAPE ###
#' Size-related tensor invariant of ellipsoids. Tantamount to volume.
#'
#' The first invariant (trace) of log E^(-1 / 2), where E is the ellipsoid tensor. The volume of the ellipsoid is (4 pi / 3) * exp(size). The size is positive for large ellipsoids, zero for normalized ellipsoids, and negative for small ellipsoids.
#' @param logs A real 3D vector. The logs of the ellipsoid's semi-axis lengths, in any order.
#' @return A real number (can achieve any real value).
ellSizeInvariant <- function(logs) {
sum(logs)
}
#' Strain-related tensor invariant of ellipsoids. Tantamount to octahedral shear strain.
#'
#' The second invariant of log E^(-1 / 2), where E is the ellipsoid tensor. Equals zero for spheres. For normalized ellipsoids, this strain == -Es^2 / 2, where Es is the octahedral shear strain.
#' @param logs A real 3D vector. The logs of the ellipsoid's semi-axis lengths, in any order.
#' @return A real number <= 0.
ellStrainInvariant <- function(logs) {
logs[[1]] * logs[[2]] + logs[[2]] * logs[[3]] + logs[[3]] * logs[[1]]
}
#' Shape-related tensor invariant of ellipsoids. An analogue of Lode's parameter.
#'
#' The third invariant (determinant) of log E^(-1 / 2), where E is the ellipsoid tensor. For normalized ellipsoids, this shape is positive for oblate ellipsoids, zero for 'plane strain' ellipsoids, and negative for prolate ellipsoids. In this sense it is analogous to (but not equal to or even tantamount to) Lode's parameter.
#' @param logs A real 3D vector. The logs of the ellipsoid's semi-axis lengths, in any order.
#' @return A real number (can achieve any real value).
ellShapeInvariant <- function(logs) {
logs[[1]] * logs[[2]] * logs[[3]]
}
#' Volume of an ellipsoid.
#'
#' @param logs A real 3D vector. The logs of the ellipsoid's semi-axis lengths, in any order.
#' @return A real number > 0.
ellVolume <- function(logs) {
exp(sum(logs)) * 4 * pi / 3
}
#' Octahedral shear strain e_s.
#'
#' @param logs A real 3D vector. The logs of the ellipsoid's semi-axis lengths, in any order.
#' @return A real number >= 0.
ellOctahedralShearStrain <- function(logs) {
mostOfIt <- (logs[[1]] - logs[[2]])^2 + (logs[[2]] - logs[[3]])^2 + (logs[[3]] - logs[[1]])^2
sqrt(mostOfIt / 3)
}
# Lode's parameter nu.
#'
#' nu is undefined for spheres, but we arbitrarily declare nu = 0 for them. Otherwise -1 <= nu <= 1. nu = -1 for prolate spheroids and nu = 1 for oblate spheroids.
#' @param logs A real 3D vector. The logs of the ellipsoid's semi-axis lengths, in any order.
#' @return A real number (in the interval [-1, 1]), unless it fails.
ellLodeNu <- function(logs) {
# Sort the logs so that l1 >= l2 >= l3.
l1 <- max(logs)
l3 <- min(logs)
l2 <- sum(logs) - l1 - l3
if (l1 == l3)
0
else
(2 * l2 - l1 - l3) / (l1 - l3)
}
#' The statistic P_j of Jelinek (1981).
#'
#' @param logs A real 3D vector. The logs of the ellipsoid's semi-axis lengths, in any order.
#' @return A real number >= 1.
ellJelinekP <- function(logs) {
# We used to have an '8' in the square root, where Jelinek had a '2', because we thought that he was working with eta_i = -2 l_i. But on 2016/03/10 Mike Jackson at the IRM told me that, according to all recent authors, AMS ellipsoids are 'magnitude ellipsoids'(e.g., Hrouda, 1982), whose semi-axis lengths are the principal susceptibilities, which are the eigenvalues of the susceptibility tensor. So it seems that eta_i = l_i now. And saying so reproduces the IRM's computed P_j.
v <- logs - sum(logs) / 3
exp(sqrt(2 * dot(v, v)))
}
#' Flinn's K measure of ellipsoid shape.
#'
#' Fails in the case of a prolate spheroid or sphere. Zero in the case of an oblate spheroid (that is not a sphere).
#' @param logs A real 3D vector. The logs of the ellipsoid's semi-axis lengths, in any order.
#' @return A real number >= 0, unless it fails.
ellFlinnK <- function(logs) {
# Sort the logs so that l1 >= l2 >= l3.
l1 <- max(logs)
l3 <- min(logs)
l2 <- sum(logs) - l1 - l3
a1 <- exp(l1)
a2 <- exp(l2)
a3 <- exp(l3)
(a1 / a2 - 1) / (a2 / a3 - 1)
}
#' The logs of an ellipsoid's semi-axis lengths, from three other measures of shape.
#'
#' @param vEsNu A real 3D vector consisting of volume, octahedral shear strain, and Lode's parameter nu.
#' @return A real 3D vector, consisting of the logs of the ellipsoid's semi-axis lengths.
ellLogAFromVEsNu <- function(vEsNu) {
# Invert the volume-logs relationship.
sumOfLogs <- log(vEsNu[[1]] * 3 / (4 * pi))
# logs1 = alpha + beta logs3.
alpha <- sumOfLogs * 2 / (vEsNu[[3]] + 3)
beta <- (vEsNu[[3]] - 3) / (vEsNu[[3]] + 3)
# logs2 = gamma + delta logs3.
gamma <- (1 - 2 / (vEsNu[[3]] + 3)) * sumOfLogs
delta <- -1 - beta
# Compute the coefficients of the quadratic.
aa <- 2 - 2 * beta + 2 * beta^2 - 2 * delta - 2 * beta * delta + 2 * delta^2
bb <- -2 * alpha + 4 * alpha * beta - 2 * gamma - 2 * beta * gamma - 2 * alpha * delta + 4 * gamma * delta
cc <- 2 * alpha^2 - 2 * alpha * gamma + 2 * gamma^2 - 3 * vEsNu[[2]]^2
# Solve the quadratic aa logs3^2 + bb logs3 + cc == 0 and back out the other logs.
logs3 <- realQuadraticSolutions(aa, bb, cc)
logs1 <- sapply(logs3, function(l3) {alpha + beta * l3})
logs2 <- sapply(logs3, function(l3) {gamma + delta * l3})
sols <- lapply(1:length(logs3), function(i) c(logs1[[i]], logs2[[i]], logs3[[i]]))
# Choose the solution such that logs1 >= logs2 >= logs3, as required by nu.
sols <- Filter(function(sol) {sol[[1]] >= sol[[2]] && sol[[2]] >= sol[[3]]}, sols)
if (length(sols) != 1) {
print("warning: ellLogsFromVEsNu: did not find one and only one solution as expected")
print(sols)
}
sols[[1]]
}
### DESCRIPTIVE STATISTICS ###
#' Geometric mean.
#'
#' @param vectors A list of 5- or 6-dimensional real vectors.
#' @return An ellipsoid.
ellMean <- function(vectors) {
ellEllipsoidFromVector(arithmeticMean(vectors))
}
#' Covariance matrix.
#'
#' The vectors are automatically centered about their mean. The denominator is n - 1, not n.
#' @param vectors A list of 5- or 6-dimensional real vectors.
#' @return A 5x5 or 6x6 real matrix.
ellCovariance <- function(vectors) {
var(t(simplify2array(vectors)))
}
#' Convenience shortcut to the eigenvalues of the covariance matrix.
#'
#' These 5 or 6 numbers quantify the dispersion of the ellipsoids.
#' @param vectors A list of 5- or 6-dimensional real vectors.
#' @return A 5- or 6-dimensional vector, containing the eigenvalues of the variance.
ellCovarianceScalars <- function(vectors) {
eigen(ellCovariance(vectors), only.values=TRUE)$values
}
#' Principal component analysis.
#'
#' @param vectors A list of 5- or 6-dimensional real vectors.
#' @return See prcomp.
ellPrincipalComponentAnalysis <- function(vectors) {
prcomp(t(simplify2array(vectors)))
}
### INFERENCE ###
#' One-sample inference based on Hotelling's T2 test.
#'
#' @param vectors A list of 5- or 6-dimensional real vectors.
#' @param hypoth A 5- or 6-dimensional real vector, respectively.
#' @param fOrChi Character. Should be 'f' or 'chi'.
#' @return See HotellingsT2.
ellHotellingT2Inference <- function(vectors, hypoth, fOrChi="f") {
HotellingsT2(X=t(simplify2array(vectors)), mu=hypoth, test=fOrChi)
}
#' One-sample inference based on Hotelling test using the MM estimator.
#'
#' @param vectors A list of 5- or 6-dimensional real vectors.
#' @param hypoth A 5- or 6-dimensional real vector, respectively.
#' @param numBoots A real number (positive integer). The number of bootstrap samples.
#' @return See FRBhotellingMM.
ellBootstrapMMInference <- function(vectors, hypoth, numBoots=1000, ...) {
FRBhotellingMM(X=t(simplify2array(vectors)), mu0=hypoth, R=numBoots, ...)
}
#' One-sample inference based on Hotelling test using the S estimator.
#'
#' @param vectors A list of 5- or 6-dimensional real vectors.
#' @param hypoth A 5- or 6-dimensional real vector, respectively.
#' @param numBoots A real number (positive integer). The number of bootstrap samples.
#' @return See FRBhotellingS.
ellBootstrapSInference <- function(vectors, hypoth, numBoots=1000, ...) {
FRBhotellingS(X=t(simplify2array(vectors)), mu0=hypoth, R=numBoots, ...)
}
# 32 or 64 corners of a crude confidence region.
ellCICombinationVectors <- function(ci) {
if (!class(ci) == "matrix")
list(ci[[1]], ci[[2]])
else {
recursive <- ellCICombinationVectors(ci[,(2:ncol(ci))])
c(lapply(recursive, function(rec) c(ci[[1, 1]], rec)),
lapply(recursive, function(rec) c(ci[[2, 1]], rec)))
}
}
# Generates approximately 7^(dim - 1) points on the (dim - 1)-dimensional unit sphere in dim-dimensional Euclidean space.
ellHighSphereVectors <- function(ambientDimension, numSamples=7) {
if (ambientDimension == 0)
list()
else if (ambientDimension == 1)
list(1, -1)
else if (ambientDimension == 2)
lapply(0:(numSamples - 1),
function(i) {a <- (i * 2 * pi + 1) / numSamples; c(sin(a), cos(a))})
else {
recursive <- ellHighSphereVectors(ambientDimension - 1, numSamples)
unlist(lapply(
recursive,
function(rec) lapply(0:(numSamples - 1),
function(i) {a <- (i * 2 * pi + 1) / numSamples; c(sin(a) * rec, cos(a))})),
recursive=FALSE, use.names=FALSE)
}
}
#' A sampling of points on an ellipsoid in high-dimensional space.
#'
#' d is the dimension of the ambient space. The arguments for this function typically come out of ellBootstrapInference, where d == 5 or d == 6.
#' @param covarInv A real dxd matrix (symmetric, positive-definite).
#' @param center A real dD vector.
#' @param level A real number. Typically q095^2 from ellBootstrapInference.
#' @param numSamples A real number (positive integer). Roughly, the number of samples per dimension.
#' @return A list of dD real vectors.
ellHighEllipsoidVectors <- function(covarInv, center, level, numSamples=7) {
eig <- eigen(covarInv, symmetric=TRUE)
q <- eig$vectors
a <- sqrt(level) * eig$values^(-0.5)
sphere <- ellHighSphereVectors(ncol(covarInv), numSamples)
lapply(sphere, function(v) as.numeric((q %*% (a * v)) + center))
}
# Confidence region based on percentiles of Mahalanobis distance.
ellMahalanobisInference <- function(ss, sBar) {
vs <- lapply(ss, function(s) {s - sBar})
covar <- arithmeticMean(lapply(vs, function(v) {outer(v, v)}))
covarInv <- solve(covar)
norms <- sapply(vs, function(v) {sqrt(v %*% covarInv %*% v)})
empiricalCDF <- ecdf(norms)
# Build the p-value function.
f <- function(s) {
v <- s - sBar
1 - empiricalCDF(sqrt(v %*% covarInv %*% v))
}
# Compute a few popular percentiles.
qs <- quantile(norms, probs=c(0.00, 0.25, 0.50, 0.75, 0.95, 0.99, 1.00), names=FALSE)
list(pvalue=f, center=sBar, covarInv=covarInv,
q000=qs[[1]], q025=qs[[2]], q050=qs[[3]], q075=qs[[4]], q095=qs[[5]], q099=qs[[6]], q100=qs[[7]])
}
#' One-sample inference based on bootstrapping.
#'
#' @param vectors A list of 5- or 6-dimensional real vectors.
#' @param numBoots A real number (positive integer). The number of bootstrap samples.
#' @return A list with members $pvalue, $center, $covarInv, $bootstraps, $q000, $q025, $q050, $q075, $q095, $q099, $q100. $pvalue is a function with input a 5D or 6D vector and output a real number, the p-value for the null hypothesis that the mean is that vector. $center is the mean of $bootstraps, which are the bootstraps. $covarInv is their inverse covariance matrix at the mean. The $qxxx values are percentiles of Mahalanobis distance among the bootstraps.
ellBootstrapInference <- function(vectors, numBoots=1000) {
boots <- replicate(numBoots, arithmeticMean(sample(vectors, length(vectors), replace=TRUE)), simplify=FALSE)
bootMean <- arithmeticMean(boots)
infer <- ellMahalanobisInference(boots, bootMean)
infer$bootstraps <- boots
infer
}
#' Two-sample inference based on Hotelling's T2 test.
#'
#' @param firsts A list of 5- or 6-dimensional real vectors.
#' @param seconds A list of 5- or 6-dimensional real vectors.
#' @param fOrChi Character. Should be 'f' or 'chi'.
#' @return See HotellingsT2.
ellTwoSampleHotellingT2Inference <- function(firsts, seconds, fOrChi="f") {
HotellingsT2(X=t(simplify2array(firsts)), Y=t(simplify2array(seconds)), test=fOrChi)
}
#' Two-sample inference based on bootstrapping.
#'
#' @param firsts A list of 5- or 6-dimensional real vectors.
#' @param seconds A list of 5- or 6-dimensional real vectors.
#' @param numBoots A real number (positive integer). The number of bootstrap samples.
#' @return A list with members $pvalue, $center, $covarInv, $bootstraps, $q000, $q025, $q050, $q075, $q095, $q099, $q100. $pvalue is a function with input a 5D or 6D vector and output a real number, the p-value for the null hypothesis that second-mean - first-mean is that vector. $center is the mean of $bootstraps, which are the bootstraps. $covarInv is their inverse covariance matrix at the mean. The $qxxx values are percentiles of Mahalanobis distance among the bootstraps.
ellTwoSampleBootstrapInference <- function(firsts, seconds, numBoots=1000) {
f <- function() {
firstMean <- arithmeticMean(sample(firsts, length(firsts), replace=TRUE))
secondMean <- arithmeticMean(sample(seconds, length(seconds), replace=TRUE))
secondMean - firstMean
}
boots <- replicate(numBoots, f(), simplify=FALSE)
bootMean <- arithmeticMean(boots)
infer <- ellMahalanobisInference(boots, bootMean)
infer$bootstraps <- boots
infer
}
### FITTING ELLIPSOIDS TO ELLIPTICAL SECTIONS (ROBIN, 2002) ###
# poleRakeOther is a rotation matrix with rows pointing along pole, rake, and other direction.
# Returns coefficients of B11, B12, B13, B23, B22, 1, and extra variable C in three equations.
ellRobinCoefficients <- function(poleRakeOther, rakeSemiaxisLength, otherSemiaxisLength) {
# l is the matrix with columns pole, rake, other. Only its second and third columns will be used.
l <- t(poleRakeOther)
# First equation, based on (1, 1) entry.
first <- c(
l[[1, 2]]^2 - l[[3, 2]]^2,
2 * l[[1, 2]] * l[[2, 2]],
2 * l[[1, 2]] * l[[3, 2]],
2 * l[[2, 2]] * l[[3, 2]],
l[[2, 2]]^2 - l[[3, 2]]^2,
0,
-rakeSemiaxisLength^-2)
# Second equation, based on (2, 2) entry.
second <- c(
l[[1, 3]]^2 - l[[3, 3]]^2,
2 * l[[1, 3]] * l[[2, 3]],
2 * l[[1, 3]] * l[[3, 3]],
2 * l[[2, 3]] * l[[3, 3]],
l[[2, 3]]^2 - l[[3, 3]]^2,
0,
-otherSemiaxisLength^-2)
# Third equation, based on (1, 2) or (2, 1) entry.
third <- c(
l[[1, 2]] * l[[1, 3]] - l[[3, 2]] * l[[3, 3]],
l[[1, 3]] * l[[2, 2]] + l[[1, 2]] * l[[2, 3]],
l[[1, 3]] * l[[3, 2]] + l[[1, 2]] * l[[3, 3]],
l[[2, 3]] * l[[3, 2]] + l[[2, 2]] * l[[3, 3]],
l[[2, 2]] * l[[2, 3]] - l[[3, 2]] * l[[3, 3]],
3 * l[[3, 2]] * l[[3, 3]],
0)
list(first, second, third)
}
#' Fit an ellipsoid to elliptical sections, using their shape but not size.
#'
#' Warning: This function is not well tested. Actually I have reason to believe that it is quite wrong. Anyway, this is the second case treated by Robin (2002). The output ellipsoid tensor is normalized to have trace 3, and might not actually be positive-definite at all.
#' @param poleRakeOthers A list of real 3x3 matrices (special orthogonal). Each matrix describes the orientation of an ellipse in space. The first row is the pole to the plane. The second row is the rake of one of the ellipse's axes in that plane. The third row is the cross product of the first two.
#' @param rakeSemiaxisLengths A vector of real numbers. The length of the semi-axis indicated by the rake in the first argument.
#' @param otherSemiaxisLengths A vector of real numbers. The length of the semi-axis perpendicular to the rake.
#' @return A real 3x3 matrix (symmetric, trace-3). The putative ellipsoid tensor.
ellRobin <- function(poleRakeOthers, rakeSemiaxisLengths, otherSemiaxisLengths) {
# Construct a system X B = Y of linear equations.
n <- length(poleRakeOthers)
x <- matrix(0, 3 * n, 5 + n)
y <- replicate(3 * n, 0)
for (i in 1:n) {
eqns <- ellRobinCoefficients(poleRakeOthers[[i]], rakeSemiaxisLengths[[i]], otherSemiaxisLengths[[i]])
x[(i * 3 - 2),1:5] <- eqns[[1]][1:5]
x[(i * 3 - 2),(5 + i)] <- eqns[[1]][7]
x[(i * 3 - 1),1:5] <- eqns[[2]][1:5]
x[(i * 3 - 1),(5 + i)] <- eqns[[2]][7]
x[(i * 3),1:5] <- eqns[[3]][1:5]
x[(i * 3),(5 + i)] <- eqns[[3]][7]
y[[i * 3 - 2]] <- -eqns[[1]][[6]]
y[[i * 3 - 1]] <- -eqns[[2]][[6]]
y[[i * 3]] <- -eqns[[3]][[6]]
}
# Solve for B.
fit <- lm.fit(x, y)
# For now, just rebuild the trace-3 ellipsoid tensor.
es <- fit$coefficients
rbind(c(es[[1]], es[[2]], es[[3]]),
c(es[[2]], es[[4]], es[[5]]),
c(es[[3]], es[[5]], 3 - es[[1]] - es[[4]]))
}
#' Fit an ellipsoid to elliptical sections, using their shape but not size.
#'
#' This is my custom method for fitting SPO. Unlike the method of Robin (2002, Case 2), this method is guaranteed to produce a positive-definite ellipsoid tensor E. Currently I force volume normalization (det E = 1) as well. Works well, except when minEigenvalue is negative. Works less well if BFGS is replaced with the default (Nelder-Mead).
#' @param poleRakeOthers A list of real 3x3 matrices (special orthogonal). Each matrix describes the orientation of an ellipse in space. The first row is the pole to the plane. The second row is the rake of one of the ellipse's axes in that plane. The third row is the cross product of the first two.
#' @param rakeSemiaxisLengths A vector of real numbers. The length of the semi-axis indicated by the rake in the first argument.
#' @param otherSemiaxisLengths A vector of real numbers. The length of the semi-axis perpendicular to the rake.
#' @param numSteps A real number (positive integer). The number of steps to use in the optimization algorithm.
#' @return A list with members $ellipsoid, $error, $minEigenvalue, and $value. $ellipsoid is an ellipsoid. $error is an error code; if it is non-zero, then an error occurred; try increasing numSteps. $minEigenvalue is the least eigenvalue of the Hessian at the putative optimum; if it is non-positive, then an error occurred. $value is the value of the misfit function at the optimum.
ellSPO <- function(poleRakeOthers, rakeSemiaxisLengths, otherSemiaxisLengths, numSteps=10000) {
# Pre-process the data.
n <- length(poleRakeOthers)
ls <- lapply(poleRakeOthers, function(r) r[2:3,])
bs <- thread(function(a1, a2) diag(c(a1, a2)^-2), rakeSemiaxisLengths, otherSemiaxisLengths)
# Build the misfit function to be minimized.
misfit <- function(pars) {
s <- rbind(
c(pars[[1]], pars[[2]], pars[[3]]),
c(pars[[2]], pars[[4]], pars[[5]]),
c(pars[[3]], pars[[5]], -pars[[1]] - pars[[4]]))
e <- ellExp(s)
diffs <- lapply(1:n, function(i) {exp(pars[[5 + i]]) * bs[[i]] - ls[[i]] %*% e %*% t(ls[[i]])})
normSqs <- sapply(diffs, function(diff) tr(t(diff) %*% diff))
sum(normSqs)
}
# Seed the minimization from the unit sphere and all k_i = 0.5 arbitrarily.
seed <- c(0, 0, 0, 0, 0, replicate(n, 0.5))
solution <- optim(seed, misfit, hessian=TRUE, method="BFGS", control=list(maxit=numSteps))
# Report the answer and diagnostic information.
eigvals <- eigen(solution$hessian, symmetric=TRUE, only.values=TRUE)$values
s <- rbind(
c(solution$par[[1]], solution$par[[2]], solution$par[[3]]),
c(solution$par[[2]], solution$par[[4]], solution$par[[5]]),
c(solution$par[[3]], solution$par[[5]], -solution$par[[1]] - solution$par[[4]]))
ell <- ellEllipsoidFromTensor(ellExp(s), doNormalize=TRUE)
list(ellipsoid=ell, error=solution$convergence, minEigenvalue=min(eigvals), value=solution$value)
}
# Testing for ellSPO. Noiseless. Test exactly mirrors the optimization procedure.
ellSPOTest <- function(n) {
# Make a random ellipsoid.
q <- rotUniform()
a <- exp(rnorm(3))
e <- t(q) %*% diag(a^-2) %*% q
# Make n random sections.
f <- function() {
l <- rotUniform()[2:3,]
b <- l %*% e %*% t(l)
eig <- eigen(b, symmetric=TRUE)
l <- t(eig$vectors) %*% l
b <- l %*% e %*% t(l)
rake <- b[[1, 1]]^(-1 / 2)
other <- b[[2, 2]]^(-1 / 2)
list(l=l, rake=rake, other=other)
}
sections <- replicate(n, f(), simplify=FALSE)
# Dissect the sections into the format desired by ellRobin and ellSPO.
poleRakeOthers <- lapply(sections,
function(s) rbind(cross(s$l[1,], s$l[2,]), s$l[1,], s$l[2,]))
rakeSemiaxisLengths <- sapply(sections, function(s) s$rake)
otherSemiaxisLengths <- sapply(sections, function(s) s$other)
# Compare the true answer to the deduced answer.
print("true ellipsoid tensor, volume-normalized:")
print(e * det(e)^(-1 / 3))
print("ellSPO result:")
pred <- ellSPO(poleRakeOthers, rakeSemiaxisLengths, otherSemiaxisLengths)
print(pred$ellipsoid$tensor)
print(c(pred$error, pred$minEigenvalue))
print("ellRobin result, volume-normalized:")
pred <- ellRobin(poleRakeOthers, rakeSemiaxisLengths, otherSemiaxisLengths)
print(pred * det(pred)^(-1 / 3))
}
# This is an example hand-ported from Mathematica. Again noiseless, but the sections are not being generated by the same code that does the optimization. We should get rbind(c(0.879642, -0.0768036, -0.0419311), c(-0.0768036, 1.06686, 0.0109123), c(-0.0419311, 0.0109123, 1.07437)).
ellSPOTestSpecific <- function() {
rakeOthers <- list(
rbind(
c(-0.336673, 0.941231, 0.0271225),
c(-0.941158, -0.335463, -0.0410727)),
rbind(
c(-0.251698, 0.938829, 0.23505),
c(-0.506325, 0.0792426, -0.858694)),
rbind(
c(-0.263783, 0.947118, -0.182718),
c(0.865012, 0.31609, 0.389668)),
rbind(
c(0.065659, -0.526426, 0.847682),
c(-0.324202, -0.814681, -0.48082)))
rakeSemiaxisLengths <- c(0.955355, 0.952817, 0.960189, 0.970211)
otherSemiaxisLengths <- c(1.08491, 1.00371, 1.07812, 0.998093)
poleRakeOthers <- lapply(
rakeOthers, function(ro) rotProjectedMatrix(rbind(cross(ro[1,], ro[2,]), ro[1,], ro[2,])))
ellSPO(poleRakeOthers, rakeSemiaxisLengths, otherSemiaxisLengths)
}
### PLOTTING ###
#' Visualization of ellipsoids.
#'
#' @param rots A list of 3x3 real matrices (special orthogonal). The ellipsoid orientations, as given by the $rotation field of an ellipsoid.
#' @param as A list of 3D real vectors. The ellipsoid semi-axis lengths, in order corresponding to the rows of the rots, as given by the $a field of an ellipsoid.
#' @param centers A list of 3D real vectors. The locations of the ellipsoid centers.
#' @param numNonAdapt A real number (non-negative integer). The number of refinements to use. Each refinement makes the ellipsoids smoother, but increases time and memory requirements by a factor of four.
#' @return NULL.
ellEllipsoidPlot <- function(rots, as, centers=replicate(length(rots), c(0, 0, 0), simplify=FALSE), numNonAdapt=4, ...) {
sphere <- rayTetrahedralSphere(numNonAdapt)
triangles <- unlist(
lapply(1:length(as),
function(i) lapply(sphere, function(tri)
lapply(tri, function(v) {centers[[i]] + as.numeric(t(rots[[i]]) %*% (as[[i]] * v))}))),
recursive=FALSE, use.names=FALSE)
plot3D(triangles=triangles, ...)
}
#' Equal-area plot of ellipsoid axes.
#'
#' Short axes are shown as circles, intermediate as triangles, long as squares. Warning: Curves are not well tested.
#' @param rots A list of 3x3 real matrices (special orthogonal). The ellipsoid orientations, as given by the $rotation field of an ellipsoid.
#' @param as A list of 3D real vectors. The ellipsoid semi-axis lengths, in order corresponding to the rows of the rots, as given by the $a field of an ellipsoid. Alternatively, you can pass logA; this information is used only to determine the order of the axes.
#' @param rotCurves A list of lists of 3x3 real matrices (special orthogonal). Like rots, but curves rather than points.
#' @param aCurves A list of lists of 3D real vectors. Like as, but curves rather than points.
#' @param colors Character. A vector of colors for coloring the points, as in all R graphics functions.
#' @return NULL.
ellEqualAreaPlot <- function(rots, as, rotCurves=list(), aCurves=list(), colors=c("black")) {
# Prepare to plot points based on rots and as.
f <- function(i, rs, as) {
ord <- order(as[[i]])
list(rs[[i]][ord[[1]],], rs[[i]][ord[[2]],], rs[[i]][ord[[3]],])
}
points <- unlist(lapply(1:length(rots), f, rots, as), recursive=FALSE, use.names=FALSE)
# Prepare to plot curves based on curvesRots and curvesAs.
if (length(rotCurves) >= 1 && length(rotCurves) == length(aCurves)) {
curves <- lapply(1:length(rotCurves), function(j) lapply(1:length(rotCurves[[j]]), f, rotCurves[[j]], aCurves[[j]]))
curves1 <- lapply(curves, function(curve) lapply(curve, function(tri) tri[[1]]))
curves2 <- lapply(curves, function(curve) lapply(curve, function(tri) tri[[2]]))
curves3 <- lapply(curves, function(curve) lapply(curve, function(tri) tri[[3]]))
curves <- c(curves1, curves2, curves3)
} else
curves <- list()
# Plot.
newColors <- as.character(sapply(colors, function(s) c(s, s, s)))
lineEqualAreaPlot(points, curves=curves, colors=newColors, shapes=c("c", "t", "s"))
}
#' Equal-volume plot of ellipsoid orientations.
#'
#' @param rots A list of 3x3 real matrices (special orthogonal). The ellipsoid orientations, as given by the $rotation field of an ellipsoid.
#' @param as A list of 3D real vectors. The ellipsoid semi-axis lengths, in order corresponding to the rows of the rots, as given by the $a field of an ellipsoid. Alternatively, you can pass logA; this information is used only to determine the order of the axes.
#' @param rotCurves A list of lists of 3x3 real matrices (special orthogonal). Like rots, but curves rather than points.
#' @param aCurves A list of lists of 3D real vectors. Like as, but curves rather than points.
#' @param colors Character. A vector of colors for coloring the points, as in all R graphics functions.
#' @return NULL.
ellEqualVolumePlot <- function(rots, as, rotCurves=list(), aCurves=list(), colors=c("white"), ...) {
# The rotations are permuted into this row-order: short, long, intermediate. To match other plane-line stuff in our library.
f <- function(i, rs, as) {
ord <- order(as[[i]])
short <- rs[[i]][ord[[1]],]
long <- rs[[i]][ord[[3]],]
rbind(short, long, cross(short, long))
}
points <- lapply(1:length(rots), f, rots, as)
if (length(rotCurves) >= 1 && length(rotCurves) == length(aCurves)) {
curves <- lapply(1:length(rotCurves), function(j) lapply(1:length(rotCurves[[j]]), f, rotCurves[[j]], aCurves[[j]]))
f <- function(curve) {
cur <- list(curve[[1]])
for (r in curve[2:length(curve)])
cur[[length(cur) + 1]] <- oriNearestRepresentative(r, cur[[length(cur)]], oriLineInPlaneGroup)
cur
}
curves <- lapply(curves, f)
} else
curves <- list()
oriEqualVolumePlot(points=points, curves=curves, group=oriLineInPlaneGroup, colors=colors, ...)
}
#' A distorted version of the Hsu-Nadai plot of ellipsoid shapes.
#'
#' This is a polar plot, in which the radial coordinate is octahedral shear strain and the angular coordinate is Lode's parameter. This plot is similar to, but not identical to, the Hsu-Nadai plot. See ellHsuNadaiPlot for the real thing.
#' @param logAs A list of 3D real vectors. The ellipsoid semi-axis log-lengths.
#' @param curves A list of lists of 3D real vectors. Like logAs, but curves rather than points.
#' @param es A real number (positive) or NULL. If a number, then that is the radius of the plot in the E_s direction. If NULL, then the radius of the plot is inferred from the points (not the curves, currently).
#' @param colors Character. A vector of colors for coloring the points, as in all R graphics functions.
#' @return NULL.
ellWrongHsuNadaiPlot <- function(logAs, curves=list(), es=NULL, colors=c("black")) {
ess <- sapply(logAs, ellOctahedralShearStrain)
nus <- sapply(logAs, ellLodeNu)
esCurves <- lapply(curves, function(curve) sapply(curve, ellOctahedralShearStrain))
nuCurves <- lapply(curves, function(curve) sapply(curve, ellLodeNu))
# Make the plot window.
if (is.null(es))
es <- max(c(ess, 1))
plot.new()
plot.window(xlim=c(-0.55 * es, 0.55 * es), ylim=c(-0.05 * es, 1.05 * es))
# Plot the points.
if (length(logAs) >= 1) {
xs <- sapply(1:length(logAs), function(i) ess[[i]] * cos(pi / 2 - nus[[i]] * pi / 6))
ys <- sapply(1:length(logAs), function(i) ess[[i]] * sin(pi / 2 - nus[[i]] * pi / 6))
points(xs, ys, col=colors, pch=c(19))
}
# Plot the curves.
if (length(curves) >= 1)
for (j in 1:length(curves)) {
xs <- sapply(1:length(curves[[j]]), function(i) esCurves[[j]][[i]] * cos(pi / 2 - nuCurves[[j]][[i]] * pi / 6))
ys <- sapply(1:length(curves[[j]]), function(i) esCurves[[j]][[i]] * sin(pi / 2 - nuCurves[[j]][[i]] * pi / 6))
lines(xs, ys)
}
# Plot the boundary.
xys <- sapply(0:30, function(t) {
theta <- (t / 30) * (pi / 3) + (pi / 3)
es * c(cos(theta), sin(theta))
})
lines(xys[1,], xys[2,])
lines(c(-0.5 * es, 0, 0.5 * es), c(sqrt(3) / 2 * es, 0, sqrt(3) / 2 * es))
# Plot some tick marks.
if (es >= 1)
for (i in 1:es) {
xs <- c(i * 0.5, i * 0.5 + sqrt(3) * 0.5 * 0.05)
ys <- c(i * sqrt(3) * 0.5, i * sqrt(3) * 0.5 - 0.5 * 0.05)
lines(xs, ys)
lines(-xs, ys)
}
}
#' Hsu-Nadai plot of ellipsoid shapes.
#'
#' @param logAs A list of 3D real vectors. The ellipsoid semi-axis log-lengths.
#' @param curves A list of lists of 3D real vectors. Like logAs, but curves rather than points.
#' @param es A real number (positive) or NULL. If a number, then that is the radius of the plot in the E_s direction. If NULL, then the radius of the plot is inferred from the points (not the curves, currently).
#' @param colors Character. A vector of colors for coloring the points, as in all R graphics functions.
#' @return NULL.
ellHsuNadaiPlot <- function(logAs, curves=list(), es=NULL, colors=c("black")) {
x <- function(logA) {-sqrt(1.5) * max(logA - sum(logA) / 3) - sqrt(1.5) * min(logA - sum(logA) / 3)}
y <- function(logA) {sqrt(0.5) * max(logA - sum(logA) / 3) - sqrt(0.5) * min(logA - sum(logA) / 3)}
# Make the plot window.
if (is.null(es))
es <- max(c(1, sapply(logAs, ellOctahedralShearStrain)))
plot.new()
plot.window(xlim=c(-0.55 * es, 0.55 * es), ylim=c(-0.05 * es, 1.05 * es))
# Plot the points.
if (length(logAs) >= 1)
points(sapply(logAs, x), sapply(logAs, y), col=colors, pch=c(19))
# Plot the curves.
if (length(curves) >= 1)
for (j in 1:length(curves))
lines(sapply(curves[[j]], x), sapply(curves[[j]], y))
# Plot the boundary.
xys <- sapply(0:30, function(t) {
theta <- (t / 30) * (pi / 3) + (pi / 3)
es * c(cos(theta), sin(theta))
})
lines(xys[1,], xys[2,])
lines(c(-0.5 * es, 0, 0.5 * es), c(sqrt(3) / 2 * es, 0, sqrt(3) / 2 * es))
# Plot some tick marks.
if (es >= 1)
for (i in 1:es) {
xs <- c(i * 0.5, i * 0.5 + sqrt(3) * 0.5 * 0.05)
ys <- c(i * sqrt(3) * 0.5, i * sqrt(3) * 0.5 - 0.5 * 0.05)
lines(xs, ys)
lines(-xs, ys)
}
}
#' Hsu-Nadai plot of ellipsoid shapes, with a third dimension specified by the user.
#'
#' @param logAs A list of 3D real vectors. The ellipsoid semi-axis log-lengths.
#' @param zs A vector of real numbers. The coordinates of the points in the direction perpendicular to the Hsu-Nadai plot.
#' @param es A real number (positive) or NULL. If a number, then that is the radius of the plot in the E_s direction. If NULL, then the radius of the plot is inferred from the points.
#' @param colors Character. A vector of colors for coloring the points, as in all R graphics functions.
#' @param ... Other arguments to pass to the underlying plot3D.
#' @return NULL.
ellHsuNadaiScalarPlot <- function(logAs, zs, es=NULL, colors=c("white"), ...) {
x <- function(logA) {-sqrt(1.5) * max(logA - sum(logA) / 3) - sqrt(1.5) * min(logA - sum(logA) / 3)}
y <- function(logA) {sqrt(0.5) * max(logA - sum(logA) / 3) - sqrt(0.5) * min(logA - sum(logA) / 3)}
# Determine how big the plot will be.
if (is.null(es))
es <- max(c(1, sapply(logAs, ellOctahedralShearStrain)))
z <- max(abs(zs))
radius <- max(es, z)
# Build the points.
points <- lapply(1:length(logAs), function(i) c(x(logAs[[i]]), y(logAs[[i]]), zs[[i]]))
# Build the curves.
f <- function(t, zz) {
theta <- (t / 30) * (pi / 3) + (pi / 3)
c(es * c(cos(theta), sin(theta)), zz)
}
bottom <- lapply(0:30, f, -z)
bottom <- c(bottom, list(c(-0.5 * es, sqrt(3) / 2 * es, -z), c(0, 0, -z), c(0.5 * es, sqrt(3) / 2 * es, -z)))
middle <- lapply(0:30, f, 0)
middle <- c(middle, list(c(-0.5 * es, sqrt(3) / 2 * es, 0), c(0, 0, 0), c(0.5 * es, sqrt(3) / 2 * es, 0)))
top <- lapply(0:30, f, z)
top <- c(top, list(c(-0.5 * es, sqrt(3) / 2 * es, z), c(0, 0, z), c(0.5 * es, sqrt(3) / 2 * es, z)))
plot3D(radius=radius, points=points, curves=list(bottom, middle, top), colors=colors, ...)
}
#' Flinn plot of ellipsoid shapes.
#'
#' @param as A list of 3D real vectors, with all entries positive. The ellipsoid semi-axis lengths.
#' @param colors Character. A vector of colors for coloring the points, as in all R graphics functions.
#' @return NULL.
ellFlinnPlot <- function(as, colors=c("black")) {
xs <- sapply(as, function(a) {(sum(a) - min(a) - max(a)) / min(a)})
ys <- sapply(as, function(a) {max(a) / (sum(a) - min(a) - max(a))})
plot(x=xs, y=ys, xlim=c(1, max(xs)), ylim=c(1, max(ys)),
xlab="intermediate / short", ylab="long / intermediate")
}
#' Logarithmic Flinn plot (Ramsay plot) of ellipsoid shapes.
#'
#' @param logAs A list of 3D real vectors. The ellipsoid semi-axis log-lengths.
#' @param colors Character. A vector of colors for coloring the points, as in all R graphics functions.
#' @return NULL.
ellLogFlinnPlot <- function(logAs, colors=c("black")) {
xs <- sapply(logAs, function(logA) {-max(logA - sum(logA) / 3) - 2 * min(logA - sum(logA) / 3)})
ys <- sapply(logAs, function(logA) {2 * max(logA - sum(logA) / 3) + min(logA - sum(logA) / 3)})
plot(x=xs, y=ys, xlim=c(0, max(xs)), ylim=c(0, max(ys)),
xlab="log(intermediate / short)", ylab="log(long / intermediate)")
}
#' Jelinek plot of ellipsoid shapes.
#'
#' This is a rectangular plot of Jelinek's Pj vs. Lode's nu. Tauxe (2010) called it the Jelinek plot, after Jelinek (1981).
#' @param logAs A list of 3D real vectors. The ellipsoid semi-axis log-lengths.
#' @param colors Character. A vector of colors for coloring the points, as in all R graphics functions.
#' @return NULL.
ellJelinekPlot <- function(logAs, colors=c("black")) {
plot(x=sapply(logAs, ellJelinekP),
y=sapply(logAs, ellLodeNu),
col=colors, xlab="P_j", ylab="nu", ylim=c(-1, 1))
}
#' Pair plot of ellipsoid vectors.
#'
#' @param points A list of 5D or 6D real vectors. Ellipsoid vectors, as in the $vector field of an ellipsoid.
#' @param colors Character. A vector of colors for coloring the points, as in all R graphics functions.
#' @param ... Other parameters to be passed to the underlying pairs function.
#' @param NULL.
ellPairsPlot <- function(points, colors=c("black"), ...) {
pairs(t(simplify2array(points)), labels=c("v_1", "v_2", "v_3", "v_4", "v_5"), col=colors, ...)
}
#' 2D or 3D plot of ellipsoid vectors.
#'
#' The 2D case is like a single panel of ellPairsPlot. Warning: In 2D, curves are not well tested.
#' @param ijk A 2D or 3D vector of real numbers (positive integers). These should be in 1, ..., d, where d is the dimension of the vectors. They select out which coordinates of the vectors to display. For example, c(1, 2, 3) indicates to make a 3D plot of the first three vector coordinates.
#' @param points A list of 5D or 6D real vectors. Ellipsoid vectors, as in the $vector field of an ellipsoid.
#' @param curves A list of lists of 5D or 6D real vectors. Like points, but curves.
#' @param colors Character or NULL. A vector of colors for coloring the points, as in all R graphics functions. If NULL, then defaults to black in 2D or white in 3D.
#' @param ... Other parameters to be passed to the underlying plot3D function. Ignored in the 2D case.
#' @param NULL.
ellVectorPlot <- function(ijk, points=list(), curves=list(), colors=NULL, ...) {
pointsNew <- lapply(points, function(v) v[ijk])
curvesNew <- lapply(curves, function(curve) lapply(curve, function(v) v[ijk]))
if (length(ijk) == 3) {
if (is.null(colors))
colors="white"
plot3D(points=pointsNew, curves=curvesNew, colors=colors, ...)
} else {
if (is.null(colors))
colors="black"
plot(t(simplify2array(pointsNew)), col=colors,
xlab=paste0("ellipsoid v_", as.character(ijk[[1]])),
ylab=paste0("ellipsoid v_", as.character(ijk[[2]])))
for (curve in curvesNew)
lines(t(simplify2array(curve)))
}
}
|
de8b87f10457214d7dc2ec5ff5f4bfafd7bc68da | b4f3e2145015d8c207d2414c7cbf90666f15c260 | /Thesis/try_code/multimodal.R | 271753069ce32aa2bd9d358ebf90b9a4af7b20dd | [] | no_license | shaomin4/shaomin_research | 1e9ead0d70e4e453d65c0070cfe87a77cc317a44 | e932d577431397e75f23ba6e6fb0f1ce39613be2 | refs/heads/master | 2022-11-28T15:14:47.223439 | 2020-08-10T18:23:17 | 2020-08-10T18:23:17 | 286,543,756 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,363 | r | multimodal.R | source("R/GPU_envir_vars.R")
source("R/find_topic.R")
source("R/hungarianAssignNMF.R")
source("R/topic_dist.R")
source("R/load_dtm.R")
library(tm)
# Data preparation --------------------------------------------------------
# Load metadata for years
load("/data_lake/arXiv_ML/ML_meta_2019.RData")
all_df <- all_df[grep(pattern = "(astro|cond-|gr|hep|math-ph|nlin|nucl|physics|quant|q-bio)",
x = all_df$categories,
invert = T),]
# Load dtm
dtm <- load_dtm(file = "/data_lake/arXiv_ML/dtm_2019.RData", tfIdf = F)
dtm <- dtm[rownames(dtm) %in% all_df$id,]
dtm <- dtm[rowSums(dtm) > 1, colSums(dtm) > 1]
#load("/data_lake/arXiv_ML/dym_2019.RData")
load("/data_lake/arXiv_ML/tfym_2019.RData")
# Parameters --------------------------------------------------------------
batch_size <- 64L
original_dtm <- ncol(dtm)
original_dym <- ncol(dym)
latent_dim <- 10L
intermediate_dim <- 30L
epochs <- 3000L
epsilon_std <- 1.0
# Model definition --------------------------------------------------------
left_input <- layer_input(name="left_input" ,shape = c(original_dtm))
left_branch <- layer_dense(name="left_branch", left_input, intermediate_dim, activation = "relu",
kernel_constraint = constraint_nonneg())
left_code <- layer_dense(name = "left_code",left_branch,latent_dim, activation = "relu",
kernel_constraint = constraint_nonneg())
right_input <- layer_input(name="right_input",shape = c(original_dym))
right_branch <- layer_dense(name="right_branch", right_input, intermediate_dim, activation = "relu",
kernel_constraint = constraint_nonneg())
right_code <- layer_dense(name = "right_code",right_branch,latent_dim, activation = "relu",
kernel_constraint = constraint_nonneg())
z <- layer_concatenate(list(left_code, right_code)) %>%
layer_dense(name = "code_layer",latent_dim, activation = "relu",
kernel_constraint = constraint_nonneg())
encoder <- keras_model(c(left_input,right_input),z)
# we instantiate these layers separately so as to reuse them later
decoder_left <- layer_dense(name = "left_decoder",z,units = intermediate_dim, activation = "relu",
kernel_constraint = constraint_nonneg())
decoder_right <- layer_dense(name= "right_decoder",z,units = intermediate_dim, activation = "relu",
kernel_constraint = constraint_nonneg())
left_output <- layer_dense(name= "left_output",decoder_left,units = original_dtm, activation = "relu",
kernel_constraint = constraint_nonneg())
right_output <- layer_dense(name= "right_output",decoder_right ,units = original_dym, activation = "relu",
kernel_constraint = constraint_nonneg())
autoencoder <- keras_model(c(left_input,right_input),c(left_output,right_output))
# RMSE
rmse <- function(y_true, y_pred) {
return(k_sqrt(k_mean(k_square(y_true - y_pred))))
}
# Use rmse as loss function
autoencoder %>% compile(
loss = rmse,
optimizer = optimizer_adam(lr = 0.001)
)
# Run autoencoder with early stopping and patience epoch = 100
autoencoder %>% fit(list(dtm,dym),
list(dtm,dym),
epochs = 5,
batch_size = 128,
view_metrics = F,
callbacks = list(callback_early_stopping(monitor = "loss", patience = 100))
)
# Get weight from Keras model
w <- get_weights(autoencoder)
View(w)
# Build topic-term matrix
topic_term <- w[[1]] %*% w[[5]]
# Normalize for each colSum = 1
topic_term <- t(solve(diag(colSums(topic_term))) %*% t(topic_term))
rownames(topic_term) <- colnames(dtm)
colnames(topic_term) <- paste0(2019, "_topic_", 1:ncol(topic_term))
topic_term <- ifelse(topic_term <= .Machine$double.eps, 0, topic_term)
top10_terms <- find_topic(topic_term, 10)$term
top10_terms <- top10_terms
# Build topic-term matrix
topic_year <- w[[3]] %*% w[[7]]
# Normalize for each colSum = 1
topic_year <- t(solve(diag(colSums(topic_year))) %*% t(topic_year))
rownames(topic_year) <- colnames(dym)
colnames(topic_year) <- paste0(2019, "_topic_", 1:ncol(topic_year))
topic_year <- ifelse(topic_year <= .Machine$double.eps, 0, topic_year)
top10_terms <- find_topic(topic_term, 10)$term
top10_terms <- top10_terms
|
be31b1097db0512e933a497051927b6f9964507c | 8585dd8814d82d9a0870804d8a5acf9ad650d0ed | /tests/testthat/test-iv.r | 90edbbdd11a5259a9b03f5b7388b887490e70de1 | [] | no_license | brentonk/coefbounds | 7500d38188c87b41c2b6ebdbef5f1d5f04517dce | 7c7b65a7d34ecec01ac6a6f1062c4eeab24cab08 | refs/heads/master | 2021-01-17T19:17:25.817055 | 2016-06-28T21:33:03 | 2016-06-28T21:33:03 | 59,677,826 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,196 | r | test-iv.r | context("Instrumental variables estimation")
library("AER")
test_that("Underidentified models not allowed", {
set.seed(876)
x1 <- rnorm(10)
x2 <- rnorm(10)
z1 <- rnorm(10)
yl <- yh <- rnorm(10)
expect_error(coefbounds(yl + yh ~ x1 + x2 | z1,
boot = 0),
"fewer instruments")
})
test_that("No IVs allowed in logit", {
set.seed(21)
x1 <- rnorm(10)
z1 <- rnorm(10)
yl <- rbinom(10, 1, 0.5)
yh <- yl
expect_error(coefbounds(yl + yh ~ x1 | z1,
model = "logit",
boot = 0),
"instrumental variables not allowed")
})
test_that("coefbounds() equals ivreg() when point-identified", {
set.seed(47)
yl <- rnorm(100)
yu <- yl
x1 <- rnorm(100)
x2 <- rnorm(100)
x3 <- rnorm(100)
z1 <- 0.5 * x1 + 0.5 * rnorm(100)
fit_ivreg <- ivreg(yl ~ x1 + x2 + x3 | z1 + x2 + x3)
fit_bd <- coefbounds(yl + yu ~ x1 + x2 + x3 | z1 + x2 + x3,
boot = 0)
expect_equal(coef(fit_ivreg),
coef(fit_bd)[, 1])
expect_equal(coef(fit_ivreg),
coef(fit_bd)[, 2])
})
|
e1a1c986694aebcc4b93d1ec01ddc412dfe8e90e | 75eb3a5fd2ccc29c3759970d3f8ff188b1a1018e | /GalapagosTPC.R | 6546b9ed28f8e43a5274e2c77cdc72d911b71aed | [] | no_license | njsilbiger/GalapagosUrchins | 36bcde7e4e295f2f5bf5280de7192de81f0aa7a0 | 459b98a60229d5cc49937173efec0336211ed710 | refs/heads/master | 2021-07-26T00:48:28.853721 | 2020-12-21T19:04:21 | 2020-12-21T19:04:21 | 232,166,635 | 1 | 0 | null | null | null | null | ISO-8859-1 | R | false | false | 25,450 | r | GalapagosTPC.R | ##TPC curves, maps, and temperature analysis for Urchin Galapagos paper
rm(list=ls())
##Install packages
# load packages
library(nls.multstart)
library(broom)
library(purrr)
library(tidyverse)
library(nlstools)
library(nls2)
library(grid)
library(gridExtra)
library(cowplot)
library(lubridate)
library(directlabels)
library(rgdal)
library(rgeos)
library(ggthemes)
library(ggsn)
library(sp)
library(ggrepel)
library(raster)
library(rgdal)
library(patchwork)
#load data
photo.data <- read.csv("GalapagosRates.csv")
photo.data$X <- NULL
View(photo.data)
glimpse(photo.data)
#decide color scheme for the plots
#cols<-c("#99817b", "#F2C3A7", "#FEF3E1", "#C489B9")
cols<-c("#073e3e", "#c35119", "#f896b0", "#e4e0ca")
# remove the NAs from the data
#photo.data<-photo.data[-which(is.na(photo.data$umol.cm2.hr)),]
# remove the three organisms that are wrong (had too much messy respiration files)
remove<-c('Egala_Bart_1','Egala_Ibbet_1','Egala_Ibbet_3','Egala_Botel_2', 'Egala_Corm_10')
bad.ID<-which(photo.data$Organism.ID %in% remove)
photo.data<-photo.data[-bad.ID,]
mydata <- photo.data
mydata$log.rate <- log(mydata$umol.cm2.hr) #logging and adding 0.1 because a log of zero does not exist
# convert temp to K
mydata$K<-mydata$Temp.C + 273.15
# define the Sharpe-Schoolfield equation
schoolfield_high <- function(lnc, E, Eh, Th, temp, Tc) {
Tc <- 273.15 + Tc
k <- 8.62e-5
boltzmann.term <- lnc + log(exp(E/k*(1/Tc - 1/temp))) #units are eV/K, electrovolts/Kelvin
inactivation.term <- log(1/(1 + exp(Eh/k*(1/Th - 1/temp))))
return(boltzmann.term + inactivation.term)
}
# fit over each set of groupings
#droplevels(mydata$Organism.ID)
mydata$Location<-as.character(mydata$Location)
mydata$Organism.ID<-as.character(mydata$Organism.ID)
fits <- mydata %>%
group_by(Organism.ID, Location) %>%
nest() %>%
mutate(fit = purrr::map(data, ~ nls_multstart(log.rate ~ schoolfield_high(lnc, E, Eh, Th, temp = K, Tc = 26),
data = .x,
iter = 1000,
start_lower = c(lnc = -10, E = 0.1, Eh = 0.2, Th = 285),
start_upper = c(lnc = 10, E = 2, Eh = 5, Th = 330),
supp_errors = 'Y',
na.action = na.omit,
lower = c(lnc = -10, E = 0, Eh = 0, Th = 0))))
#broom, models over and over again and purr for lists
#make dplyr code and export the slope, intercept and R2 values
#get r2, extract predit and pull out and join lists, shows o vs predcited and r2
PredictedEgala_Bart_1 <- predict(fits$fit[[1]])
ObservedEgala_Bart_1 <- mydata$umol.cm2.hr[mydata$Organism.ID == "Egala_Bart_10"]
po <- lm(PredictedEgala_Bart_1 ~ ObservedEgala_Bart_1)
summary(po)
plot(ObservedEgala_Bart_1,PredictedEgala_Bart_1)
abline(po)
legend("topleft", bty="n", legend=paste("r2 =", format(summary(po)$adj.r.squared, digits=4)))
# look at a single fit
summary(fits$fit[[1]])
# look at output object
#select(fits, Organism.ID, data, fit)
# get summary info
info <- fits %>%
unnest_legacy(fit %>% map(glance))
# get params
params <- fits %>%
unnest_legacy(fit %>% map(tidy))
# get confidence intervals
CI <- fits %>%
unnest_legacy(fit %>% map(~ confint2(.x) %>%
data.frame() #%>%
# rename(., conf.low = X2.5.., conf.high = X97.5..)
)) %>%
group_by(., Organism.ID) %>%
mutate(., term = c('lnc', 'E', 'Eh', 'Th')) %>%
ungroup()
colnames(CI)[3:4]<-c("conf.low", "conf.high") # rename columns
# merge parameters and CI estimates
params <- merge(params, CI, by = intersect(names(params), names(CI)))
# get predictions
preds <- fits %>%
unnest_legacy(fit %>% map(augment))
#select(info, fragment.ID, logLik, AIC, BIC, deviance, df.residual)
# new data frame of predictions, do this to set a sequence to make a smooth curve with your prediction points
new_preds <- mydata %>% do(., data.frame(K = seq(min(.$K), max(.$K), length.out = 150), stringsAsFactors = FALSE)) #setting a specific sequence so you can have a smooth curve
# max and min for each curve
max_min <- mydata %>% group_by(Organism.ID) %>%
dplyr::summarise(., min_K = min(K), max_K = max(K)) %>%
ungroup()
# create new predictions
preds2 <- fits %>%
unnest_legacy(fit %>% map(augment, newdata = new_preds)) %>%
merge(., max_min, by = "Organism.ID") %>%
group_by(., Organism.ID) %>%
filter(., K > unique(min_K) & K < unique(max_K)) %>%
dplyr::rename(., ln.rate = .fitted) %>%
ungroup()
#want to do ggplot where we look at fragments individually
#reorder the sites
mydata$Location<- factor(mydata$Location, levels=c('Bart','Ibbet','Corm','Doug','Espi','Botel'))
preds2$Location<- factor(preds2$Location, levels=c('Bart','Ibbet','Corm','Doug','Espi','Botel'))
# rename the labels
ggplot() +
geom_point(aes(K - 273.15, log.rate, col = Location), size = 2, mydata) +
geom_line(aes(K - 273.15, ln.rate, col = Location, group = Organism.ID), alpha = 0.5, preds2) +
facet_wrap(~ Organism.ID, labeller = labeller(.multi_line = FALSE), scales = 'free_y') +
#scale_colour_manual(values = c('green4', 'blue', 'black')) +
theme_bw(base_size = 12, base_family = 'Helvetica') +
ylab(expression(paste("Respiration Rates (Log " *mu* "mol O"[2], " "*g^-1 , " "*hr^-1*")"), sep = " ") )+
xlab('Temperature (ºC)') +
theme(legend.position = c(0.91, 0.85))+
# scale_color_manual(values = cols)+
labs(color = "Rate Type") +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),panel.background = element_blank(), axis.line = element_line(colour = "black"), legend.position = 'none')
# plot all values in TPCs and remove Doug and Ibbet
mydata<-mydata %>%
filter(Location != 'Ibbet', Location != 'Doug') %>%
droplevels() %>%
mutate(Location = factor(Location,levels = c("Botel","Corm", "Espi", "Bart")),
Location = factor(Location, labels = c(" La Botella"," Punta Cormorant" ," Punta Espinosa" ," Bartolomé" )))
preds2<-preds2%>%
filter(Location != 'Ibbet', Location != 'Doug')%>%
droplevels() %>%
mutate(Location = factor(Location,levels = c("Botel","Corm", "Espi", "Bart")),
Location = factor(Location, labels = c(" La Botella"," Punta Cormorant" ," Punta Espinosa" ," Bartolomé" )))
ggplot() +
geom_point(aes(K - 273.15, log.rate, col = Location), size = 2, mydata) +
geom_line(aes(K - 273.15, ln.rate, col = Location, group = Organism.ID), alpha = 0.5, preds2) +
facet_wrap(~ Location, labeller = labeller(.multi_line = FALSE)) +
scale_color_manual(values = cols)+
#scale_color_manual(values = c("#323695", "#abd9e9", "#f4a582", "#b2182b"))+
#scale_colour_manual(values = c('green4', 'blue', 'black')) +
theme_bw(base_size = 12, base_family = 'Helvetica') +
ylab(expression(paste("Respiration Rates (Log " *mu* "mol O"[2], " "*g^-1 , " "*hr^-1*")"), sep = " ") )+
xlab('Temperature (ºC)') +
guides(color = FALSE) +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),panel.background = element_blank(), axis.line = element_line(colour = "black"))
ggsave(filename = "Output/MSPlots/TPCcurves.pdf", device = "pdf", width = 6, height = 6)
# function for calculating Topt
get_topt <- function(E, Th, Eh){
return((Eh*Th)/(Eh + (8.62e-05 *Th*log((Eh/E) - 1))))
}
# calc topts for all
Topt_data <- params %>%
dplyr::select(Organism.ID, term, estimate,Location) %>%
spread(term, estimate) %>%
mutate(Topt = get_topt(E, Th, Eh)) %>%
group_by(., Location, Organism.ID)
#get temerature back in celcius not K
Topt_data$Topt <- Topt_data$Topt - 273.15
#anova function
Topt.mod <- lm(Topt~Location, data=Topt_data)
#check for normality, use normality plots
qqnorm(resid(Topt.mod))
qqline(resid(Topt.mod))
#check heteroscisity with boxplots
boxplot(resid(Topt.mod)~Topt_data$Location)
#high R and low show inconsistent variances, may need to do weighted regression in the future
anova(Topt.mod)
summary(Topt.mod)
TukeyHSD(aov(Topt.mod))
# plot all the TPCs
ggplot() +
geom_point(aes(K - 273.15, log.rate, col = Location), size = 2, mydata) +
geom_line(aes(K - 273.15, ln.rate, col = Location, group = Organism.ID), alpha = 0.5, preds2) +
facet_wrap(~ Location, labeller = labeller(.multi_line = FALSE)) +
scale_color_manual(values = cols)+
#scale_color_manual(values = c("#323695", "#abd9e9", "#f4a582", "#b2182b"))+
#scale_colour_manual(values = c('green4', 'blue', 'black')) +
theme_bw(base_size = 12, base_family = 'Helvetica') +
ylab(expression(paste("Respiration Rates (Log " *mu* "mol O"[2], " "*g^-1 , " "*hr^-1*")"), sep = " ") )+
xlab('Temperature (ºC)') +
guides(color = FALSE) +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),panel.background = element_blank(), axis.line = element_line(colour = "black"))
ggsave(filename = "Output/MSPlots/TPCcurves.pdf", device = "pdf", width = 6, height = 6)
data.summary<-Topt_data %>%
group_by(Location) %>% #tells to group by these two factors
dplyr::select(-c(Eh, Th)) %>% # only keep the E, lnc, and Topt metrics
dplyr::summarise_if(is.numeric, list(~mean(.), ~var(.)))
#dplyr::summarise_if(is.numeric, list(~mean(.), ~var(.), ~sd(.)/sqrt(n())))
# colnames(data.summary)[12:16]<-c("E_SE","Eh_SE","lnc_SE","Th_SE","Topt_SE")
# dplyr::summarise(mean=mean(Topt), se=sd(Topt)/sqrt(n()), var = var(Topt)) #calculates mean and s.e.
data.summary
#reorder the sites
data.summary$Location<- factor(data.summary$Location, levels=c('Bart','Ibbet','Corm','Doug','Espi','Botel'))
# bring in the temperature data
#tempdata.insi<-read.csv('HoboLoggersFiles/logger_gal_daily_means.csv')
#tempdata<-read.csv('MUR_sst_gal.csv')
tempdata.insi<-read.csv('HoboLoggersFiles/CSV files/AllTempData.csv')
# # make a date
tempdata.insi$t<-mdy_hm(tempdata.insi$t)
# # add column for day
tempdata.insi$day<-date(tempdata.insi$t)
#Make a plot with the raw data for each site for the last two months
rawplot<-tempdata.insi %>%
mutate(Location = factor(Location, levels = c("Botel","Corm","Espi","Bart")), # we only had in situ temperature data for these 4 sites so they were the only ones included in the analysis
LocationNice = recode(Location,
Botel = "La Botella",
Corm = "Punta Cormorant",
Espi = "Punta Espinosa",
Bart = "Bartolomé")) %>%
filter(Location!="")%>% # remove the empty site
filter(t>max(t)-months(2))%>% # only use the last 30 days
ggplot(aes(x = t, y = temp, group = Location, color = LocationNice))+
geom_line()+
xlab('Date')+
ylab(expression("Temperature " ( degree*C)))+
# geom_dl(aes(label = LocationNice), method = list("last.bumpup", cex = 1))+ # add labels at end of plot
scale_color_manual(values = cols)+
# scale_color_manual(values = c("#323695", "#abd9e9", "#f4a582", "#b2182b"))+
guides(color = FALSE) +
theme_bw()
# labs(colour = "Location")+
# ggsave("Output/MSPlots/TempRaw2.pdf", width = 8, height = 5)
#geom_dl(aes(label = LocationNice), method = list("last.bumpup", cex = 1)) # add labels at end of plot
bplot<-tempdata.insi %>%
mutate(Location = factor(Location, levels = c("Botel","Corm","Espi","Bart")),
LocationNice = recode(Location,
Botel = "La Botella",
Corm = "Punta Cormorant",
Espi = "Punta Espinosa",
Bart = "Bartolomé")) %>%
filter(Location!="")%>% # remove the empty site
filter(t>max(t)-months(2))%>% # only use the last 30 days
ggplot(aes(x = LocationNice, y = temp, fill = LocationNice))+
geom_boxplot()+
# ylab(expression("Temperature " ( degree*C)))+
xlab("")+
ylab("")+
# geom_dl(aes(label = LocationNice), method = list("last.bumpup", cex = 1))+ # add labels at end of plot
# scale_fill_manual(values = c("#323695", "#abd9e9", "#f4a582", "#b2182b"))+
scale_fill_manual(values = cols)+
guides(fill = FALSE) +
theme_bw()+
theme(axis.text.y = element_blank())
rawplot+bplot +plot_annotation(tag_levels = "A")+
ggsave("Output/MSPlots/combinedtemp.pdf", height = 5, width = 10)
# plot the daily maximim over time
Dailymax<-tempdata.insi %>%
filter(Location!="")%>% # remove the empty site
filter(t>max(t)-months(2))%>% # only use the last 30 days
group_by(Location, day) %>%
dplyr::summarise(max = max(temp))
#reorder sites
Dailymax$Location<-factor(Dailymax$Location, levels=c('Bart','Corm','Espi','Botel'), labels = c(" Bartolomé", " Punta Cormorant" ," Punta Espinosa" ," La Botella" ))
Dailymax%>%
ggplot(aes(x = day, y = max, group = Location, color = Location))+
geom_line(lwd = 1)+
xlab('Date')+
ylab(expression("Daily Maximum Temperature "(degree*C)))+
scale_color_manual(values = cols)+
#scale_color_brewer(palette="Set2")+
guides(color = FALSE) +
theme_bw() +
scale_x_date(date_labels = "%b %d", breaks = "2 weeks", limits = c(as.Date("2018-06-15"),as.Date("2018-08-15")+days(20)))+ # expand the x-axis by 5 days
geom_dl(aes(label = Location), method = list("last.bumpup", cex = 1))+ # add labels at end of plot
ggsave(filename = "Output/MSPlots/TempTimeSeries.pdf", width = 7, height = 4)
# take the average daily max for analysis
Dailymax.mean<-Dailymax%>%
group_by(Location)%>%
summarise(meandailymax=mean(max))
# Dailymax$Location<-as.factor(c("Bart","Corm","Espi", "Botel"))
# 90th percentile temperature and other summaries from the raw data
tempsummary<-tempdata.insi %>%
filter(Location!="")%>% # remove the empty site
filter(t>max(t)-months(2))%>% # only use the last 30 days
group_by(Location) %>%
dplyr::summarise(Q90 = quantile(temp, probs=0.95),mean.sst = mean(temp, na.rm=TRUE),
max.sst = max(temp, na.rm=TRUE), var.sst = var(temp, na.rm=TRUE),
range.sst = max(temp, na.rm=TRUE) - mean(temp, na.rm=TRUE),
min = min(temp,na.rm = TRUE))
# left_join(.,Dailymax.mean)
# join with the thermal optimum data
data.summary.all<-left_join(data.summary, tempsummary)%>%
#select(-c("mean.sst","var.sst","range.sst")) %>% # only do mean teperature for now
gather("Parameter", "Metric",-c(Location, Q90, mean.sst, max.sst, var.sst, range.sst, min)) %>%
filter(Location != 'Ibbet', Location != 'Doug') %>%
separate(col = Parameter, into = c("Parameter", "Stat"),sep = "_") # split the stat and the parameter name
#change facot for the stat so that mean and var are capitalized
data.summary.all$Stat<-as.factor(data.summary.all$Stat)
levels(data.summary.all$Stat)<-c("Mean","Variance")
ggplot(data.summary.all, aes(x = mean.sst, y = Metric), group = Parameter)+
geom_point(position="dodge", size=2) +
theme(legend.text=element_text(size=rel(1))) + #makes legend elements larger
#geom_errorbar(aes(ymax=mean+se, ymin=mean-se), position=position_dodge(width=0.9), width=0.1) +
# facet_wrap(~Parameter, scales = 'free', ncol = 2)+
facet_grid(Parameter~ Stat, scale = "free")+
theme_bw()+
ggtitle('Two months of temperature data')
ggplot(data.summary.all, aes(x = range.sst, y = Metric), group = Parameter)+
geom_point(position="dodge", size=2) +
theme(legend.text=element_text(size=rel(1))) + #makes legend elements larger
#geom_errorbar(aes(ymax=mean+se, ymin=mean-se), position=position_dodge(width=0.9), width=0.1) +
facet_wrap(Parameter~ Stat, scale = "free")+
ggtitle('Two months of temperature data')
ggplot(data.summary.all, aes(x = Q90, y = Metric), group = Parameter)+
geom_point(position="dodge", size=2) +
theme(legend.text=element_text(size=rel(1))) + #makes legend elements larger
#geom_errorbar(aes(ymax=mean+se, ymin=mean-se), position=position_dodge(width=0.9), width=0.1) +
facet_wrap(Parameter~ Stat, scale = "free")+
ggtitle('Two months of temperature data')
## Make plots with regression lines
ggplot(data.summary.all, aes(x = Q90, y = Metric, label = Location, col = Location), group = Parameter)+
geom_point(position="dodge", size=2) +
scale_color_manual(values = cols)+
theme(legend.text=element_text(size=rel(1))) + #makes legend elements larger
xlab(expression("95th Percentile Temperature " (degree*C)))+
geom_smooth(method = "lm", se=FALSE, col = 'grey')+
theme_bw()+
facet_wrap(~Parameter+Stat, scale = "free_y", ncol = 2,
strip.position = "left",
labeller = as_labeller(c(E = "E", lnc = "b(Tc)", Topt = "Topt", Mean = "", Variance = "", sep = ""), multi_line = FALSE )
) +
ylab(NULL) +
theme(strip.background = element_blank(),
strip.placement = "outside",
legend.position = "none")+
ggsave(filename = "Output/MSPlots/MetricsVsTemp.pdf", device = "pdf", width = 6, height = 6, useDingbats = FALSE)
#ggtitle('Two months of temperature data (in situ)')
# run stats for each pair and make datatable
results<-data.summary.all %>%
nest(-c(Parameter, Stat)) %>%
mutate(fit = map(data, ~ lm(Metric~Q90, data = .)),
results = map(fit,glance))%>%
unnest_legacy(results)%>%
dplyr::select(-c(data,fit))
#effect sizes
data.summary.all %>%
group_by(Parameter, Stat) %>%
do(allfits = tidy(lm(Metric ~Q90, data = .)))%>%
unnest(allfits)
# look at AICs of all metrics
data.summary.all %>%
pivot_longer(cols = c("Q90":"min"), names_to = "tempparams", values_to = "values") %>%
group_by(Parameter, Stat, tempparams) %>%
do(allfits = glance(lm(Metric ~values, data = .)))%>%
unnest(allfits) %>%
View()
# wriate a data table
write.csv(x = results,file = 'Output/MSPlots/lmresults.csv')
## Make a plot with population TPC curves with bootstrapped confidence internvals
# run nls.multstart on each curve of the original data ####
fit_many <- mydata %>%
group_by(Location) %>%
nest() %>%
mutate(., fit = purrr::map(data, ~nls_multstart(log.rate ~ schoolfield_high(lnc, E, Eh, Th, temp = K, Tc = 20),
data = .x,
iter = 500,
start_lower = c(lnc = -10, E = 0.1, Eh = 0.2, Th = 285),
start_upper = c(lnc = 10, E = 2, Eh = 5, Th = 330),
supp_errors = 'Y',
na.action = na.omit,
lower = c(lnc = -10, E = 0, Eh = 0, Th = 0))))
# run bootstrap over many curves ####
boot_many <- mydata %>%
group_by(Location) %>%
# create 200 bootstrap replicates per curve
do(., boot = modelr::bootstrap(., n = 200, id = 'boot_num')) %>%
# unnest to show bootstrap number, .id
unnest_legacy() %>%
# regroup to include the boot_num
group_by(., Location, boot_num) %>%
# run the model using map()
mutate(fit = map(strap, ~nls_multstart(log.rate ~ schoolfield_high(lnc, E, Eh, Th, temp = K, Tc = 20),
data = data.frame(.),
iter = 50,
start_lower = c(lnc = -10, E = 0.1, Eh = 0.2, Th = 285),
start_upper = c(lnc = 10, E = 2, Eh = 5, Th = 330),
lower = c(lnc=-10, E=0, Eh=0, Th=0),
supp_errors = 'Y')
))
# new data frame for smooth predictions
new_preds <- mydata %>%
do(., data.frame(K = seq(min(.$K), max(.$K), length.out = 150), stringsAsFactors = FALSE))
# get max and min for each curve
max_min <- mydata %>%
group_by(Location) %>%
summarise(., min_K = min(K), max_K = max(K)) %>%
ungroup()
# create smoother predictions for unbootstrapped models
preds_many <- fit_many %>%
unnest_legacy(fit %>% map(augment, newdata = new_preds))
# create smoother predictions for bootstrapped replicates
preds_many <- boot_many %>%
unnest_legacy(fit %>% map(augment, newdata = new_preds)) %>%
ungroup() %>%
# group by each value of K and get quantiles
group_by(., Location, K) %>%
summarise(lwr_CI = quantile(.fitted, 0.025),
upr_CI = quantile(.fitted, 0.975)) %>%
ungroup() %>%
merge(., preds_many, by = c('K', 'Location')) %>%
# merge with max_min to delete predictions outside of the max and min temperatures of each curve
merge(., max_min, by = c('Location')) %>%
group_by(., Location) %>%
filter(., K >= unique(min_K) & K <= unique(max_K)) %>%
rename(., log.rate = .fitted) %>%
ungroup()
# plot predictions
ggplot(mydata, aes(K - 273.15, log.rate, group = Location)) +
geom_point(alpha = 0.5, size = 0.5, aes(col = Location)) +
geom_line(data = preds_many) +
geom_ribbon(aes(ymin = lwr_CI, ymax = upr_CI, fill = Location), data = preds_many, alpha = .5) +
scale_color_manual(values = cols)+
scale_fill_manual(values = cols)+
#scale_color_brewer(palette="Set2")+
#scale_fill_brewer(palette="Set2") +
theme_bw(base_size = 12, base_family = 'Helvetica') +
ylab(expression(paste("Respiration Rates (Log " *mu* "mol O"[2], " "*g^-1 , " "*hr^-1*")"), sep = " ") )+
xlab('Temperature (ºC)') +
theme_bw(base_size = 14, base_family = 'Helvetica') +
theme(legend.position = c(0.75, 0.15), legend.text=element_text(size=rel(0.8))) +
ggsave(filename = 'Output/MSPlots/bootstrappedTPC.pdf', device = 'pdf', width = 6, height = 6)
## Make a map of the sites
lats<-c(-0.279722, -1.220667,-0.270361, -1.291444 )
lons<-c(-90.544861, -90.422611, -91.435833, -90.496583)
sitetype<-c(" Bartolomé", " Punta Cormorant" ," Punta Espinosa" ," La Botella")
#colors<-c( "#b2182b", "#abd9e9", "#f4a582","#323695")
colors<-cols
pts = data.frame(lats, lons, sitetype,colors)
# Create SpatialPointsDataFrame from this data, assign WGS 84 projection
spdf <- SpatialPointsDataFrame(coords = data.frame(lons, lats), data = data.frame(sitetype),
proj4string = CRS("+init=epsg:4326"))
### DOWNLOAD GALAPAGOS DATA -----
URL <- "https://osm2.cartodb.com/api/v2/sql?filename=public.galapagos_islands&q=select+*+from+public.galapagos_islands&format=geojson&bounds=&api_key="
fil <- "gal.json"
if (!file.exists(fil)) download.file(URL, fil)
gal <- readOGR(fil)
#gal <- gSimplify(gUnaryUnion(spTransform(gal, CRS("+init=epsg:31983")), id=NULL), tol=0.001)
# Match projections between spdf and gal
spdf<-spTransform(spdf, CRS("+init=epsg:31983"))
# Verify data lines up
plot(gal); plot(spdf, add=T)
### GGPLOT MAP -----
# Step 1: Create dfs of both survey sites and gal
sites <- data.frame(spdf)
#gal_map <- fortify(gal)
# Step 2: Change format of sitetype field from character to factor for color mapping
spdf$sitetype<-factor(spdf$sitetype, levels = c(" Bartolomè", " Punta Cormorant" ," Punta Espinosa" ," La Botella"))
sites$sitetype<-factor(sites$sitetype, levels = c(" Bartolomè", " Punta Cormorant" ," Punta Espinosa" ," La Botella"))
# Generate ggplot
ecuador <- getData('alt', country='ECU', download = TRUE)
# convert to lat long coords
g_longlat <- spTransform(x = gal, CRSobj = crs(ecuador))
g_longlat<-gSimplify(g_longlat, tol = 0.001)
gal_map<-fortify(g_longlat)
gg<-ggplot()+
geom_map(map=gal_map, data=gal_map,
aes(map_id=id),
color="black", fill="#FFFFFF", size=.5) +
geom_point(data=pts, aes(x=lons, y=lats, col=sitetype), size = 5)+
geom_text(data = pts, aes(x = lons, y = lats, label = sitetype), hjust = -0.1, nudge_x = .02, cex = 5)+
#coord_equal() +
coord_map()+
#theme_map()+
theme(legend.title=element_text(size=20) , legend.text=element_text(size=16),
legend.position="top", legend.justification = 'center',
plot.margin=grid::unit(c(0,0,0,0),"mm")) +
labs(color=NULL) +
theme(legend.position = "none")+
scale_color_brewer(palette="Set2")+
# ggsn::north(data=gal_map, symbol = 1, scale = 0.15, location="topright") +
ggsn::scalebar(data=gal_map,
dist=50, height=0.05, st.size = 3,
location="bottomleft", dist_unit = 'km', transform = TRUE, model = "WGS84")
# Save ggplot
## take 2 with new map lat/longs
gg<-ggplot()+
geom_map(map=gal_map, data=gal_map,
aes(map_id=id),
color="black", fill="black", size=.5) +
geom_point(data=pts, aes(x=lons, y=lats, color = colors), size = 5)+
# scale_color_manual(values = c("#323695", "#abd9e9", "#b2182b", "#f4a582"))+
scale_color_manual(values = cols)+
geom_text_repel(data = pts, aes(x = lons, y = lats, label = sitetype),cex = 5, hjust = -0.2, nudge_x = .05)+
xlim(-93,-88)+
ylim(-1.5, 1)+
# coord_equal()
coord_map()+
theme_bw()+
xlab("")+
ylab("")+
theme(legend.title=element_text(size=20) , legend.text=element_text(size=14),
legend.position="top", legend.justification = 'center',
plot.margin=grid::unit(c(0,0,0,0),"mm")) +
labs(color=NULL) +
theme(legend.position = "none")+
ggsn::scalebar(data=gal_map,
dist=50, height=0.05, st.size = 3,
location="bottomleft", dist_unit = 'km', transform = TRUE, model = "WGS84")
ggsave("Output/MSPlots/Galapagos_map.pdf", gg, width=5, height=5)
|
f7f90a5528faf96531ba0c7b337ab44050758ebc | 2764167b5743be62adadc491ec7dfde210e0703d | /man/UTM.xy.Rd | 8e908938cc4989073e734f2d774916730e9b2b01 | [] | no_license | cran/GEOmap | 528a4cbe293211d324405037eb280b415e65f62e | 0149894022496cee8237868b0bb693d00ef01e41 | refs/heads/master | 2023-08-18T14:47:52.021469 | 2023-08-13T12:40:21 | 2023-08-13T13:30:31 | 17,713,753 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,644 | rd | UTM.xy.Rd | \name{UTM.xy}
\alias{UTM.xy}
\alias{utm.wgs84.xy}
\title{Map projection}
\description{
UTM Map projection parameters supplied and LAT-LON, return the x-y
values, WGS-84 datum
}
\usage{
UTM.xy(phideg, lamdeg, PROJ.DATA)
utm.wgs84.xy(phideg, lamdeg, PROJ.DATA)
}
\arguments{
\item{phideg}{Latitude}
\item{lamdeg}{Longitude}
\item{PROJ.DATA}{list of projection parameters}
}
\value{
List
\item{x}{x-coordinate}
\item{y}{y-coordinate}
}
\references{Snyder, J. P., 1987; Map Projections - A Working Manual. U.S. Geological Survey Professional Paper 1395, 383 p.}
\author{Jonathan M. Lees<jonathan.lees.edu>}
\note{
When calling the conversion from LL to XY or vice versa,
convert the lon to 0 to 360. Use RPMG::fmod for this conversion.
This may be rectified in future revisions.
}
\seealso{setPROJ, GLOB.XY, projtype, utm.sphr.xy, UTMzone, plotUTM,
utmbox, DATUMinfo}
\examples{
lat = 40.5
lon = -73.50
lon0 = -75
LON = RPMG::fmod(lon, 360)
wproj = setPROJ(type = 5, LAT0 = 0 , LON0 = lon0 , FE = 0 )
u1 = utm.elps.xy(lat, LON ,wproj )
utm.wgs84.xy(lat, LON ,wproj)
#### also for more general UTM:
### this is the wgs84 projection
wproj1 = setPROJ(type = 8, LAT0 = 0 , LON0 = lon0 , FE = 0 , IDATUM=1 )
UTM.xy(lat, LON,wproj1)
### this is the Clark-1866 (see page 270 in Snyder)
wproj8 = setPROJ(type = 8, LAT0 = 0 , LON0 = lon0 , FE = 0 , IDATUM=8)
UTM.xy(lat, LON,wproj8)
## which is the same as:
uzone = UTMzone(lat, lon)
lon0 = uzone$CEN[2]
wproj = setPROJ(type = 5, LAT0 = 0 , LON0 = lon0 , FE = 500000 )
utm.elps.xy(lat, LON,wproj )
## to see all the Datums, use: DATUMinfo()
}
\keyword{misc}
|
697bfb56176625c36ac5ad6e0c3175b84f490284 | 9e059240cb494f4b43868fac4f6c0416411139a1 | /man/Coordinate_Mapping_gffTB.Rd | 9ca2e1df3b97e81447e30c174a85cd428cf4c993 | [] | no_license | kingyang728/genomicCoordinateConverter | b161448b4d595069aa30d12f6ea1921c661ef583 | 66ac990688137b89c8431b3c91e37adfc572f5fb | refs/heads/master | 2022-10-24T05:09:52.679835 | 2020-06-16T08:56:52 | 2020-06-16T08:56:52 | 272,298,011 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 630 | rd | Coordinate_Mapping_gffTB.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/HelperFunction.R
\name{Coordinate_Mapping_gffTB}
\alias{Coordinate_Mapping_gffTB}
\title{Coordinate select function for gff dataframe Table.}
\usage{
Coordinate_Mapping_gffTB(chr, start_pos, end_pos, gff_Table)
}
\arguments{
\item{chr}{sequence name like "chrX"}
\item{start_pos}{genomic start position}
\item{end_pos}{genomic end position}
\item{gff_Table}{gff data frame generated from gff file}
}
\value{
selected dataframe according to chr, genomic start and end coordinate.
}
\description{
Coordinate select function for gff dataframe Table.
}
|
3fcfbda948d9bf6a97fd8251f03cd812f1ed6222 | 5b8eaa5e612df2f2383621d86ef1206543d0754d | /aspasiouvasiliki.R | a02cb1782d9ff5062a7174452f93c48bab59c538 | [] | no_license | vickyaspasiou/lesson2 | d5906e4ed5d740b3feb548cf90aaa1ee5412992b | daea8b6546679160479ad3f2c8af4ad3c0fd4232 | refs/heads/master | 2021-01-19T18:00:30.849101 | 2013-11-06T16:05:59 | 2013-11-06T16:05:59 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 363 | r | aspasiouvasiliki.R | rm(list = ls())
datdir=getwd()
library(rasta)
plot(1)
automap=function(ctr="FR",ctrlvl=2) {
map <- raster::getData("GADM", country = ctr,level = ctrlvl, path = datdir)
plot(map)
plot(map, bg = "dodgerblue", axes=T)
plot(map, lwd = 3, border = "skyblue", add=T)
plot(map, col = "green4", add = T)
grid()
box()
automap()
|
575a232ff9a9511f85de8249b7bd044adb1772f1 | cc9ccb383be538f3f1c70d361d98d43107028b8c | /man/efron.Rd | 16f36d456b33d22c86b8efb1022fd450dd27588e | [] | no_license | spencerwoody/saFAB | fef16125f0c5db3c060ddc471c40076b5c69090e | 7419eb6277f881ee4edd18512d359647fea880c9 | refs/heads/master | 2020-03-25T21:50:15.221998 | 2020-02-17T17:44:14 | 2020-02-17T17:44:14 | 144,191,913 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 456 | rd | efron.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/efron.R
\name{efron}
\alias{efron}
\title{Efron}
\usage{
efron(z, nmids = 150, pct = -0.01, pct0 = 0.25, df = 10,
nulltype = "theoretical")
}
\arguments{
\item{z}{}
\item{nmids}{}
\item{pct}{}
\item{pct0}{}
\item{df}{}
\item{nulltype}{}
}
\description{
.. content for \description{} (no empty lines) ..
}
\details{
.. content for \details{} ..
}
\author{
Spencer Woody
}
|
3ca7de40ecc43904a46cf4f871ca3c9fb5227eb4 | b1b6872aeea3c8a1011ca8190b07195acb6dfaf7 | /man/Plot_bar_horizontal.Rd | f8994cb2e2922ca19cc1ea78b510c40d3e588b00 | [] | no_license | DDS-MCSM/group-assignment-team-02 | 9667355729e11d79be110a6e1ee65d96849c7feb | e9a5572eb4af7f60c049cf474ef36862da003565 | refs/heads/master | 2020-04-10T10:50:33.890351 | 2019-01-13T22:50:12 | 2019-01-13T22:50:12 | 160,977,217 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 420 | rd | Plot_bar_horizontal.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/code.R
\name{Plot_bar_horizontal}
\alias{Plot_bar_horizontal}
\title{Plot all the malware occurrences per country}
\usage{
Plot_bar_horizontal(x, yg)
}
\arguments{
\item{df}{year}
}
\value{
Bar plot showing the ranswomware by country
}
\description{
This function plots the occurrences of all the malware listed in the csv file per
country
}
|
a6145007c454a8bce736ec222985a8b673b26b36 | 025cd9c77cbefc7367df61ab8b45ff3259902815 | /R/clustering.R | cdbb900939a8a3623fb28f27a29eee05d5ea6ea1 | [] | no_license | yspreen/NbClust | 9b59eb928f4d8aaba9c24a67f387c2aa7496eec8 | e82043569b5cf104382125cb1bcd1515a36d49d5 | refs/heads/master | 2020-12-29T08:59:10.611521 | 2020-02-05T22:58:37 | 2020-02-05T22:58:37 | 238,546,165 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,135 | r | clustering.R | clustering = function(method, game, nc, max_nc, hc, nn) {
# init values
cl0 = 0
cl1 = 0
cl2 = 0
clall = 0
clall1 = 0
clmax = 0
if (method == 8)
{
set.seed(1)
cl2 = kmeans(game,nc+1)$cluster
set.seed(1)
clmax = kmeans(game,max_nc)$cluster
if (nc > 2)
{
set.seed(1)
cl1 = kmeans(game,nc)$cluster
clall = cbind(cl1, cl2)
set.seed(1)
cl0 = kmeans(game,nc-1)$cluster
clall1 = cbind(cl0, cl1, cl2)
}
if (nc == 2)
{
set.seed(1)
cl1 = kmeans(game,nc)$cluster
clall = cbind(cl1, cl2)
cl0 = rep(1,nn)
clall1 = cbind(cl0, cl1, cl2)
}
if (nc == 1)
{
stop("Number of clusters must be higher than 2")
}
} else {
cl1 = cutree(hc, k=nc)
cl2 = cutree(hc, k=nc+1)
clall = cbind(cl1, cl2)
clmax = cutree(hc, k=max_nc)
if (nc >= 2)
{
cl0 = cutree(hc, k=nc-1)
clall1 = cbind(cl0, cl1, cl2)
}
if (nc == 1)
{
cl0 = rep(NA,nn)
clall1 = cbind(cl0, cl1, cl2)
}
}
return(list(cl0=cl0, cl1=cl1, cl2=cl2, clall=clall, clall1=clall1, clmax=clmax))
} |
cf17cadbe5e316c80c2cfdabb9d70183be752523 | 80a638d19c8ca136b942b1ed449d3193a503621d | /cachematrix.R | dbee94806b9b1bd31a48708170d5c9fa23a1c555 | [] | no_license | ricrogz/ProgrammingAssignment2 | a20d7610fd85cd42bbb26de0d10da3bf3e494ab7 | 94cec42480b7675e7905f04a5e821a066455831e | refs/heads/master | 2020-05-20T19:30:38.635377 | 2014-05-15T10:56:06 | 2014-05-15T10:56:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,378 | r | cachematrix.R | ## Functions to build a matrix with cached inverse.
##
## Usage (suppose A is an invertible matrix):
##
## To create the cached object:
## Acached <- makeCacheMatrix(A)
##
## To retrieve the inverse:
## Ainverse <- cacheSolve(Acached)
##
## See descriptions of each function below.
## Implementation tested on magic squares matrixes
## (see package "magic") of dimensions 3 and 5
## (4 is not invertible).
## MAKECACHEMATRIX:
## This function creates a caché object that stores
## the matrix and assigns a variable to hold the caché
## of the inverse matrix. Note that the inverse is not
## calculated by this function, since at the creation
## of the object we might still don't need the inverse,
## (we might even not need it at all).
makeCacheMatrix <- function(x = matrix()) {
# This function is more than less the same as
# the makeVector example in https://class.coursera.org/rprog-003
m <- NULL
set <- function(y) { # This function sets the value of the matrix,
x <<- y # and a nulled variable for the cached inverse
m <<- NULL
}
get <- function() x # Retrieve the matrix
setinv <- function(inv) m <<- inv # Store the inverse
getinv <- function() m # retrieve cached inverse
list(set = set, get = get, # return results
setinv = setinv,
getinv = getinv)
}
## CACHESOLVE:
## This function is to be called when the inverse matrix
## is needed. Note that its argument is the caché object,
## not the original matrix.
## The first time it is called, the inverse matrix is
## calculated by means of an internal call to solve(),
## and stored. On subsequent calls, the inverse is not
## calculated but retrieved from its storage.
##
cacheSolve <- function(x, ...) {
# Return an inverse for the matrix stored in x
m <- x$getinv() # recover content of caché
if(is.null(m)) { # if we retrieved an empty caché...
# tell the user
message("no cached inverse found, calculating")
# then calculate the inverse
m <- solve(x$get(), ...)
# store inverse in cache for subsequent calls
x$setinv(m)
}
m # return inverse, either calculated or retrieved
}
|
2510d1daab68c59449c13e3fc0ef7041a79bd29a | 491bddbfa8fbe8e3bd1a47dc2e4527842c46e5e7 | /plot1.R | a5e7b7d5ff34785d38865feac86003cb385afc9e | [] | no_license | mahimagupta29/ExData_Plotting1 | fed398e1c7381db995298bd4e8afe173ecda1d18 | c076c55a7c6413aeaf46b90d4c8d65756093841c | refs/heads/master | 2022-11-11T15:45:02.571759 | 2020-06-21T23:05:41 | 2020-06-21T23:05:41 | 273,972,832 | 0 | 0 | null | 2020-06-21T19:45:08 | 2020-06-21T19:45:07 | null | UTF-8 | R | false | false | 1,205 | r | plot1.R |
## Aim of this function is to
## 1. read the household_power_consumption.txt file
## 2. subset for data taken from 2 days: 2007-02-01 and 2007-02-02
## 3. generate a histogram of global active power(kilowatts)
## Assumes household_power_consumption.txt file located in working dir
## read data
power_consumption_data <- read.table("./household_power_consumption.txt", stringsAsFactors = FALSE, header = TRUE, sep =";" )
power_consumption_data
## change class of all columns to correct class
power_consumption_data$Date <- as.Date(power_consumption_data$Date, format="%d/%m/%Y")
power_consumption_data$Global_active_power <- as.numeric(power_consumption_data$Global_active_power)
## subset data from 2007-02-01 and 2007-02-02
subsetdata <- subset(power_consumption_data, Date == "2007-02-01" | Date =="2007-02-02")
## plot histogram of global active power for those 2 days
# Plot must be saved as PNG files of 480x480 pixels
png("plot1.png", width=480, height=480, units = "px")
hist(subsetdata$Global_active_power, col="red", border="black", main ="Global Active Power", xlab="Global Active Power (kilowatts)", ylab="Frequency")
dev.off()
|
d698d1af5c3a31525cc361596cd67bb890a0e5c8 | 434632808778d021126f5db3acb10e571ca39769 | /tumors.expression/tumors.expression.DIA.public.R | 160a164b94861a04105e4aedc0c6bdc76d268b7f | [] | no_license | FraRol/proteomics.data.analysis.FGFR2.project.Zingg.et.al | 839c4f3c01bf2887843d3db8839ef206f8cbd802 | 9031356003d0a5c943bbc8e31d192d279aaeed2d | refs/heads/main | 2023-04-08T11:50:22.826599 | 2022-06-08T09:34:43 | 2022-06-08T09:34:43 | 450,564,851 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 38,321 | r | tumors.expression.DIA.public.R | #_______________________________________________________________________________________________________________________
# 20.01.2022
#
# Project: proteomics.truncated.FGFR2.is.oncogene.cancer.Zingg.et.al
#
# Script name: tumors.expression.DIA.public
#
# Purpose of script: analysis protein expression data tumors truncated FGFR2 is oncogene cancer project Zingg et al
#
# Author: Frank Rolfs
#
# Notes:
#
#
#_______________________________________________________________________________________________________________________
### set working directory
try(setwd(dirname(rstudioapi::getActiveDocumentContext()$path)))
getwd()
### load packages
library(tidyverse)
library(data.table)
library(pbapply)
library(ComplexHeatmap)
library(circlize)
library(viridis)
library(reshape2)
library(splitstackshape)
library(readxl)
library(scico)
library(forcats)
library(limma)
library(cmapR)
library(gtools)
library(paletteer)
###################################################################################################################################
##############################################################################################################################################################
### load expresion data tumors
load("Zingg.tumors.expression.Spectronaut.report.long.Rdata")
glimpse(DZ.tumors.expr_V2) #188802
length(unique(DZ.tumors.expr_V2$PG.Genes)) #6757
length(unique(DZ.tumors.expr_V2$PG.ProteinGroups)) #6956
# changes for sample names
DZ.tumors.expr_V2 <- mutate(DZ.tumors.expr_V2, Condition=R.FileName) #
glimpse(DZ.tumors.expr_V2)
# select less columns: PG level only
DZ.tumors.expr_V2 <- select(DZ.tumors.expr_V2, Condition, contains("PG")) %>% distinct()
glimpse(DZ.tumors.expr_V2)
nrow(DZ.tumors.expr_V2) #188802
#detect PG.ProteinGroups with "CON__" and remove them
# CON entries do not have data like PG.Quantity, or Q.value ...
DZ.tumors.expr_V2$contamination <- pbsapply(DZ.tumors.expr_V2$PG.ProteinGroups, function(x) if(str_detect(string= x, pattern="CON__")){"CON"} else {"ok"})
table(DZ.tumors.expr_V2$contamination) #712 CON
DZ.tumors.expr_V2.CON__ <- filter(DZ.tumors.expr_V2, contamination == "CON" )
nrow(DZ.tumors.expr_V2.CON__) #712
glimpse(DZ.tumors.expr_V2.CON__)
DZ.tumors.expr_V2.2 <- filter(DZ.tumors.expr_V2, contamination == "ok" )
nrow(DZ.tumors.expr_V2.2) #188090
glimpse(DZ.tumors.expr_V2.2)
#quick overview number identifications
print(DZ.tumors.expr_V2.2 %>% group_by(Condition) %>% summarize(number.identifications=n()), n=32)
# get unique PG.ProteinAccessions => needed for long format to wide format transformation step
unique.PG.ProteinGroups.after.quality.filters.V2 <- unique(DZ.tumors.expr_V2.2$PG.ProteinGroups)
glimpse(unique.PG.ProteinGroups.after.quality.filters.V2) #6916
#### long format to wide format spread
DZ.tumors.expr_V2.2.PG.wide <- spread(DZ.tumors.expr_V2.2, key=Condition , value = PG.Quantity)
glimpse(DZ.tumors.expr_V2.2.PG.wide) #6916 obs
length(unique(DZ.tumors.expr_V2.2.PG.wide$PG.ProteinGroups)) #6916
### change sample names
#sample overview
# A Fgfr2
# B Fgfr2-E18-C2
# C Fgfr2-dE18
# D Fgfr2-Bicc1
# E Fgfr2-dE18-Bicc1
# F Fgfr2-Ate1
# G Fgfr2-dE18-Ate1
# H Fgfr2-Tacc2
# I Fgfr2-dE18-Tacc2
# J Fgfr2-dE18-IGR1
# K Fgfr2-dE18-IGR2
# L Fgfr2-E18-C3
# M Fgfr2-E18-C4
# N Fgfr2 2 1.79
# O Fgfr2-dE18
# P Fgfr2-Bicc1
# Q Fgfr2-dE18-Bicc1
# R Fgfr2-Ate1
# S Fgfr2-dE18-Ate1
# T Fgfr2-Tacc2
# U Fgfr2-dE18-Tacc2
# V Fgfr2-dE18-IGR1
# W Fgfr2-dE18-IGR2
# X Fgfr2-E18-C2
# Y Fgfr2-E18-C3
# Z Fgfr2-E18-C4
# AA Fgfr2-dE18
# AB Fgfr2-Bicc1
# AC Fgfr2-dE18-Bicc1
# AD Fgfr2-Tacc2
# AE Fgfr2-dE18-Tacc2
# AF Fgfr2-E18-C2
# load Zingg tumor expression DIA sample information
load("Zingg.Fgfr2.tumors.sample.info.Rdata")
glimpse(Zingg.Fgfr2.tumors.sample.info)
colnames(DZ.tumors.expr_V2.2.PG.wide)
colnames(DZ.tumors.expr_V2.2.PG.wide)<-c("PG.Qvalue", "PG.IsSingleHit", "PG.FastaHeaders",
"PG.Genes", "PG.Organisms", "PG.ProteinAccessions",
"PG.FastaFiles", "PG.ProteinDescriptions", "PG.NrOfStrippedSequencesMeasured",
"PG.NrOfModifiedSequencesMeasured", "PG.NrOfPrecursorsMeasured", "PG.ProteinGroups",
"PG.CellularComponent", "PG.BiologicalProcess", "PG.MolecularFunction",
"PG.ProteinNames", "PG.UniProtIds", "contamination",
"A_1_Fgfr2", "AA_6_Fgfr2-dE18", "AB_9_Fgfr2-Bicc1", "AC_12_Fgfr2-dE18-Bicc1", "AD_19_Fgfr2-Tacc2", "AE_22_Fgfr2-dE18-Tacc2",
"AF_28_Fgfr2-E18-C2", "B_3_Fgfr2-E18-C2", "C_4_Fgfr2-dE18", "D_7_Fgfr2-Bicc1", "E_10_Fgfr2-dE18-Bicc1", "F_13_Fgfr2-Ate1",
"G_15_Fgfr2-dE18-Ate1", "H_17_Fgfr2-Tacc2", "I_20_Fgfr2-dE18-Tacc2", "J_23_Fgfr2-dE18-IGR1", "K_25_Fgfr2-dE18-IGR2", "L_29_Fgfr2-E18-C3",
"M_31_Fgfr2-E18-C4", "N_2_Fgfr2", "O_5_Fgfr2-dE18", "P_8_Fgfr2-Bicc1", "Q_11_Fgfr2-dE18-Bicc1", "R_14_Fgfr2-Ate1",
"S_16_Fgfr2-dE18-Ate1", "T_18_Fgfr2-Tacc2", "U_21_Fgfr2-dE18-Tacc2", "V_24_Fgfr2-dE18-IGR1", "W_26_Fgfr2-dE18-IGR2", "X_27_Fgfr2-E18-C2",
"Y_30_Fgfr2-E18-C3", "Z_32_Fgfr2-E18-C4"
)
glimpse(DZ.tumors.expr_V2.2.PG.wide)
### reorder samples
DZ.tumors.expr_V2.2.PG.wide <- DZ.tumors.expr_V2.2.PG.wide %>% select(PG.Qvalue :contamination,
"A_1_Fgfr2",
"N_2_Fgfr2",
"F_13_Fgfr2-Ate1",
"R_14_Fgfr2-Ate1",
"D_7_Fgfr2-Bicc1",
"P_8_Fgfr2-Bicc1",
"AB_9_Fgfr2-Bicc1",
"H_17_Fgfr2-Tacc2",
"T_18_Fgfr2-Tacc2",
"AD_19_Fgfr2-Tacc2",
"C_4_Fgfr2-dE18",
"O_5_Fgfr2-dE18",
"AA_6_Fgfr2-dE18",
"G_15_Fgfr2-dE18-Ate1",
"S_16_Fgfr2-dE18-Ate1",
"E_10_Fgfr2-dE18-Bicc1",
"Q_11_Fgfr2-dE18-Bicc1",
"AC_12_Fgfr2-dE18-Bicc1",
"I_20_Fgfr2-dE18-Tacc2",
"U_21_Fgfr2-dE18-Tacc2",
"AE_22_Fgfr2-dE18-Tacc2",
"J_23_Fgfr2-dE18-IGR1",
"V_24_Fgfr2-dE18-IGR1",
"K_25_Fgfr2-dE18-IGR2",
"W_26_Fgfr2-dE18-IGR2",
"B_3_Fgfr2-E18-C2",
"X_27_Fgfr2-E18-C2",
"AF_28_Fgfr2-E18-C2",
"L_29_Fgfr2-E18-C3",
"Y_30_Fgfr2-E18-C3",
"M_31_Fgfr2-E18-C4",
"Z_32_Fgfr2-E18-C4" )
glimpse(DZ.tumors.expr_V2.2.PG.wide)
### boxplot and density plot samples to check normalization #####################################################################################
data.for.boxplot <- DZ.tumors.expr_V2.2.PG.wide %>% select(A_1_Fgfr2 : `Z_32_Fgfr2-E18-C4`)
data.for.boxplot <- log2(data.for.boxplot)
glimpse(data.for.boxplot)
# reshape for plot
melted.data.for.boxplot <- reshape2::melt(data.for.boxplot)
glimpse(melted.data.for.boxplot)
unique(melted.data.for.boxplot$variable)
#box plot
ggplot(data = melted.data.for.boxplot)+
geom_boxplot(mapping = aes(x= variable, y = value, fill=variable), outlier.size = 0.5, notch = TRUE, notchwidth = 0.1)+
scale_fill_viridis(discrete=T, option="inferno")+
theme(axis.text.x = element_text(angle = 90, hjust = 1.0, vjust = 0.5))+
theme(legend.position="none", legend.justification = "center")+
ggtitle("mouse tumors expression DIA") +
theme(plot.title = element_text(hjust = 0.5, face= "bold"))+
theme(axis.text.x = element_text(face= "plain", colour="black", size=14))+
theme(axis.text.y = element_text(face= "plain", colour="black", size=14))+
xlab(NULL)+
ylab("log2(Int.)")
# density plot
glimpse(melted.data.for.boxplot)
ggplot(melted.data.for.boxplot, aes(x=value, colour = variable) ) +
geom_density() +
scale_color_viridis(discrete=T, option="inferno")+
theme(legend.position="bottom")+
ggtitle("mouse tumors expression DIA \n data distribution") +
theme(plot.title = element_text(hjust = 0.5, face= "bold"))+
theme(axis.text.x = element_text(face= "plain", colour="black", size=14))+
theme(axis.text.y = element_text(face= "plain", colour="black", size=14))
### count indentifications per sample ###################################################################################################################################
glimpse(DZ.tumors.expr_V2.2.PG.wide)
colnames(DZ.tumors.expr_V2.2.PG.wide)
result.EXPR.count.LFQ <- c()
sample.name.count <- 1
for(i in 19:50){
print(i)
sample.name <- colnames(DZ.tumors.expr_V2.2.PG.wide)
sample.name <- sample.name[i]
print(sample.name)
temp <- DZ.tumors.expr_V2.2.PG.wide %>% select(sample.name) %>% pull()
temp.data <- sum(!is.na(temp))
temp.na<- sum(is.na(temp))
temp.result <- tibble(sample=sample.name, identifications=temp.data, MVs=temp.na)
result.EXPR.count.LFQ <- bind_rows(result.EXPR.count.LFQ, temp.result)
}
print(result.EXPR.count.LFQ, n=32)
mean(result.EXPR.count.LFQ$identifications) #5877.406
#add group information to count results
result.EXPR.count.LFQ$variant.group <- c("Fgfr2", "Fgfr2", "Fgfr2-Ate1", "Fgfr2-Ate1", "Fgfr2-Bicc1", "Fgfr2-Bicc1", "Fgfr2-Bicc1", "Fgfr2-Tacc2",
"Fgfr2-Tacc2", "Fgfr2-Tacc2", "Fgfr2-dE18", "Fgfr2-dE18", "Fgfr2-dE18", "Fgfr2-dE18-Ate1", "Fgfr2-dE18-Ate1", "Fgfr2-dE18-Bicc1",
"Fgfr2-dE18-Bicc1", "Fgfr2-dE18-Bicc1", "Fgfr2-dE18-Tacc2", "Fgfr2-dE18-Tacc2", "Fgfr2-dE18-Tacc2", "Fgfr2-dE18-IGR1", "Fgfr2-dE18-IGR1", "Fgfr2-dE18-IGR2",
"Fgfr2-dE18-IGR2", "Fgfr2-E18-C2", "Fgfr2-E18-C2", "Fgfr2-E18-C2", "Fgfr2-E18-C3", "Fgfr2-E18-C3", "Fgfr2-E18-C4", "Fgfr2-E18-C4")
result.EXPR.count.LFQ
### plot numbers expression
ggplot(data = result.EXPR.count.LFQ )+
geom_col(aes(x=sample, y=identifications, fill=variant.group), color="black")+
theme(legend.position="none") +
scale_x_discrete(limits= c( "A_1_Fgfr2",
"N_2_Fgfr2",
"F_13_Fgfr2-Ate1",
"R_14_Fgfr2-Ate1",
"D_7_Fgfr2-Bicc1",
"P_8_Fgfr2-Bicc1",
"AB_9_Fgfr2-Bicc1",
"H_17_Fgfr2-Tacc2",
"T_18_Fgfr2-Tacc2",
"AD_19_Fgfr2-Tacc2",
"C_4_Fgfr2-dE18",
"O_5_Fgfr2-dE18",
"AA_6_Fgfr2-dE18",
"G_15_Fgfr2-dE18-Ate1",
"S_16_Fgfr2-dE18-Ate1",
"E_10_Fgfr2-dE18-Bicc1",
"Q_11_Fgfr2-dE18-Bicc1",
"AC_12_Fgfr2-dE18-Bicc1",
"I_20_Fgfr2-dE18-Tacc2",
"U_21_Fgfr2-dE18-Tacc2",
"AE_22_Fgfr2-dE18-Tacc2",
"J_23_Fgfr2-dE18-IGR1",
"V_24_Fgfr2-dE18-IGR1",
"K_25_Fgfr2-dE18-IGR2",
"W_26_Fgfr2-dE18-IGR2",
"B_3_Fgfr2-E18-C2",
"X_27_Fgfr2-E18-C2",
"AF_28_Fgfr2-E18-C2",
"L_29_Fgfr2-E18-C3",
"Y_30_Fgfr2-E18-C3",
"M_31_Fgfr2-E18-C4",
"Z_32_Fgfr2-E18-C4"))+
scale_fill_viridis(discrete=T, option="inferno")+
theme(axis.text.x = element_text(angle = 90, hjust = 1.0, vjust = 0.5))+
ylab("number PS")+
xlab(NULL)+
scale_y_continuous(breaks=seq(from=0, to=6600, by=500), limits = c(0,6600))+
ggtitle("mouse tumors - identifications per sample" )+
theme(plot.title = element_text(hjust = 0.5, face= "bold"))+ #center title
theme(axis.text.x = element_text(face= "plain", colour="black", size=14))+
theme(axis.text.y = element_text(face= "plain", colour="black", size=14))+
geom_vline(xintercept = c(2.5, 4.5, 7.5, 10.5, 13.5, 15.5, 18.5, 21.5, 23.5, 25.5, 28.5, 30.5) , size=0.25, linetype = 2)
#############################################################################################################################################################
#############################################################################################################################################################
### correlation EXPRESSION proteins
glimpse(DZ.tumors.expr_V2.2.PG.wide)
#select columns and log2 transform
DF_for_correlation_plot.EXPR.Prot <- DZ.tumors.expr_V2.2.PG.wide %>% select(A_1_Fgfr2 : `Z_32_Fgfr2-E18-C4`)
DF_for_correlation_plot.EXPR.Prot <- log2(DF_for_correlation_plot.EXPR.Prot)
glimpse(DF_for_correlation_plot.EXPR.Prot)
#prepare better sample names
tum.expr.name.change <- tibble(orig.col.name = colnames(DF_for_correlation_plot.EXPR.Prot))
tum.expr.name.change$new.col.name.1 <- pbsapply(tum.expr.name.change$orig.col.name, function(x) unlist(str_split(string=x, pattern="_"))[3] )
tum.expr.name.change$new.col.name.2 <- paste0(tum.expr.name.change$new.col.name.1, ".", c(1, 2, 1, 2, 1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 1, 2, 3, 1, 2, 3, 1, 2, 1, 2, 1, 2, 3 , 1, 2, 1, 2))
tum.expr.name.change
colnames(DF_for_correlation_plot.EXPR.Prot) <- tum.expr.name.change$new.col.name.2
##check NA rows
DF_for_correlation_plot.EXPR.Prot$na.rows <- pbapply(DF_for_correlation_plot.EXPR.Prot, 1, function(x) sum(is.na(x)))
glimpse(DF_for_correlation_plot.EXPR.Prot)
table(DF_for_correlation_plot.EXPR.Prot$na.rows)
DF_for_correlation_plot.EXPR.Prot <- DF_for_correlation_plot.EXPR.Prot %>% select(-na.rows )
glimpse(DF_for_correlation_plot.EXPR.Prot)
colnames(DF_for_correlation_plot.EXPR.Prot)
### correlation
corr2 <- cor(DF_for_correlation_plot.EXPR.Prot, method = "pearson", use = "na.or.complete")
glimpse(corr2)
head(corr2)
min(corr2) #0.3725943
max(corr2)
round(min(corr2), 2)
### prepare heatmap annotation
annot_df_for_heatmap <- data.frame(samples = colnames(DF_for_correlation_plot.EXPR.Prot),
group = c("Fgfr2", "Fgfr2", "Fgfr2-Ate1", "Fgfr2-Ate1", "Fgfr2-Bicc1", "Fgfr2-Bicc1", "Fgfr2-Bicc1", "Fgfr2-Tacc2",
"Fgfr2-Tacc2", "Fgfr2-Tacc2", "Fgfr2-dE18", "Fgfr2-dE18", "Fgfr2-dE18", "Fgfr2-dE18-Ate1", "Fgfr2-dE18-Ate1", "Fgfr2-dE18-Bicc1",
"Fgfr2-dE18-Bicc1", "Fgfr2-dE18-Bicc1", "Fgfr2-dE18-Tacc2", "Fgfr2-dE18-Tacc2", "Fgfr2-dE18-Tacc2", "Fgfr2-dE18-IGR1", "Fgfr2-dE18-IGR1", "Fgfr2-dE18-IGR2",
"Fgfr2-dE18-IGR2", "Fgfr2-E18-C2", "Fgfr2-E18-C2", "Fgfr2-E18-C2", "Fgfr2-E18-C3", "Fgfr2-E18-C3", "Fgfr2-E18-C4", "Fgfr2-E18-C4") #
)
annot_df_for_heatmap
# make shorter annotation data frame
annot_df_for_heatmap.short <- data.frame(
group = annot_df_for_heatmap$group
)
glimpse(annot_df_for_heatmap.short)
annot_df_for_heatmap.short
# define colors for annotation bar
annot.colors = list(group = c("Fgfr2"="blue",
"Fgfr2-Ate1"="white",
"Fgfr2-Bicc1"="black",
"Fgfr2-Tacc2"="deeppink",
"Fgfr2-dE18"="red",
"Fgfr2-dE18-Ate1"="lavender",
"Fgfr2-dE18-Bicc1"="grey40",
"Fgfr2-dE18-Tacc2"="lightskyblue",
"Fgfr2-dE18-IGR1"="gold1",
"Fgfr2-dE18-IGR2"="gold2",
"Fgfr2-E18-C2"="chocolate2",
"Fgfr2-E18-C3"="chocolate3",
"Fgfr2-E18-C4"="chocolate4"))
annot.colors
### Create the heatmap annotation
ha.fr <- HeatmapAnnotation(group=annot_df_for_heatmap.short$group,
treatment=annot_df_for_heatmap.short$treatment,
col = annot.colors,
annotation_legend_param = list(grid_height = unit(8, "mm"))
)
ha.fr
#prepare heatmap colors
FR.heatmap.colors.2 <-colorRamp2(c(min(corr2), 1.0), c("grey90", "grey10"))
#plot
#pdf("DZ.tumors.expr.DIA.correlation.pdf", width=25/2.54, height=20/2.54, useDingbats=FALSE)
Heatmap(corr2,
name = "corr.coeff",
top_annotation = ha.fr,
col = FR.heatmap.colors.2,
clustering_distance_rows = function(m) as.dist((1-m)/2),
clustering_method_rows = "ward.D2",
clustering_distance_columns = function(m) as.dist((1-m)/2),
clustering_method_columns = "ward.D2",
column_dend_height = unit(40, "mm"),
row_dend_width = unit(40, "mm"),
heatmap_legend_param = list(ncol = 1, nrow = 1, legend_height = unit(60, "mm"))
)
#dev.off()
##################################################################################################################################
##################################################################################################################################
### multiple candidates plot to check expression
# multiple heatmap plot: ###################################################################################################################################
# multiple heatmap plot: ###################################################################################################################################
### define expression candidates to plot A ###
mtpl.candidates <- c("FGFR2_MOUSE", "BICC1_MOUSE", "TACC2_MOUSE", "ATE1_MOUSE")
###filter for candidates
EXPR.mtplplot.data <- DZ.tumors.expr_V2.2.PG.wide %>% filter(PG.ProteinNames %in% mtpl.candidates)
glimpse(EXPR.mtplplot.data)
### Reorder data according to order candidates, average technical replicates, log2 transform, reshape for plot
EXPR.mtplplot.data <- left_join(data.frame(PG.ProteinNames = mtpl.candidates),
EXPR.mtplplot.data,
by = "PG.ProteinNames")
EXPR.mtplplot.data.2A <- EXPR.mtplplot.data %>% select(PG.Genes, PG.ProteinNames,PG.UniProtIds
)
glimpse(EXPR.mtplplot.data.2A)
EXPR.mtplplot.data.2B <- EXPR.mtplplot.data %>% select(
"A_1_Fgfr2",
"N_2_Fgfr2",
"F_13_Fgfr2-Ate1",
"R_14_Fgfr2-Ate1",
"D_7_Fgfr2-Bicc1",
"P_8_Fgfr2-Bicc1",
"AB_9_Fgfr2-Bicc1",
"H_17_Fgfr2-Tacc2",
"T_18_Fgfr2-Tacc2",
"AD_19_Fgfr2-Tacc2",
"C_4_Fgfr2-dE18",
"O_5_Fgfr2-dE18",
"AA_6_Fgfr2-dE18",
"G_15_Fgfr2-dE18-Ate1",
"S_16_Fgfr2-dE18-Ate1",
"E_10_Fgfr2-dE18-Bicc1",
"Q_11_Fgfr2-dE18-Bicc1",
"AC_12_Fgfr2-dE18-Bicc1",
"I_20_Fgfr2-dE18-Tacc2",
"U_21_Fgfr2-dE18-Tacc2",
"AE_22_Fgfr2-dE18-Tacc2",
"J_23_Fgfr2-dE18-IGR1",
"V_24_Fgfr2-dE18-IGR1",
"K_25_Fgfr2-dE18-IGR2",
"W_26_Fgfr2-dE18-IGR2",
"B_3_Fgfr2-E18-C2",
"X_27_Fgfr2-E18-C2",
"AF_28_Fgfr2-E18-C2",
"L_29_Fgfr2-E18-C3",
"Y_30_Fgfr2-E18-C3",
"M_31_Fgfr2-E18-C4",
"Z_32_Fgfr2-E18-C4")
glimpse(EXPR.mtplplot.data.2B)
#log2 data transformation
EXPR.mtplplot.data.2D <- log2(EXPR.mtplplot.data.2B)
glimpse(EXPR.mtplplot.data.2D)
# combine data frames
EXPR.mtplplot.data.3 <- bind_cols(EXPR.mtplplot.data.2A, EXPR.mtplplot.data.2D)
glimpse(EXPR.mtplplot.data.3)
nrow(EXPR.mtplplot.data.3)
# prepare additional candidate names
EXPR.mtplplot.data.3$ProteinNames_Genes_UniprotID <- paste0(EXPR.mtplplot.data.3$PG.Genes, "_", EXPR.mtplplot.data.3$PG.UniProtIds)
glimpse(EXPR.mtplplot.data.3)
# define order in plot
order.in.plot <- EXPR.mtplplot.data.3$ProteinNames_Genes_UniprotID
order.in.plot
order.in.plot2 <- EXPR.mtplplot.data.3$PG.Genes
order.in.plot2
# prepare additional sample names and order
name.change.DZ.tum.expr.dia <- tibble(orig.col.name = colnames(EXPR.mtplplot.data.3 %>% select(-PG.Genes, -PG.ProteinNames, -PG.UniProtIds, -ProteinNames_Genes_UniprotID)) )
name.change.DZ.tum.expr.dia$new.col.name.1 <- pbsapply(name.change.DZ.tum.expr.dia$orig.col.name, function(x) unlist(str_split(string=x, pattern="_"))[3] )
name.change.DZ.tum.expr.dia$new.col.name.2 <- paste0(name.change.DZ.tum.expr.dia$new.col.name.1, ".", c(1, 2, 1, 2, 1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 1, 2, 3, 1, 2, 3, 1, 2, 1, 2, 1, 2, 3 , 1, 2, 1, 2))
print(name.change.DZ.tum.expr.dia, n=32)
custom.sample.order.DZ.tum.expr.dia <- c("Fgfr2.1", "Fgfr2.2",
"Fgfr2-dE18.1", "Fgfr2-dE18.2", "Fgfr2-dE18.3",
"Fgfr2-Bicc1.1", "Fgfr2-Bicc1.2" , "Fgfr2-Bicc1.3",
"Fgfr2-dE18-Bicc1.1", "Fgfr2-dE18-Bicc1.2", "Fgfr2-dE18-Bicc1.3",
"Fgfr2-Ate1.1", "Fgfr2-Ate1.2",
"Fgfr2-dE18-Ate1.1", "Fgfr2-dE18-Ate1.2",
"Fgfr2-Tacc2.1", "Fgfr2-Tacc2.2", "Fgfr2-Tacc2.3",
"Fgfr2-dE18-Tacc2.1", "Fgfr2-dE18-Tacc2.2", "Fgfr2-dE18-Tacc2.3",
"Fgfr2-dE18-IGR1.1", "Fgfr2-dE18-IGR1.2",
"Fgfr2-dE18-IGR2.1", "Fgfr2-dE18-IGR2.2",
"Fgfr2-E18-C2.1", "Fgfr2-E18-C2.2", "Fgfr2-E18-C2.3",
"Fgfr2-E18-C3.1", "Fgfr2-E18-C3.2",
"Fgfr2-E18-C4.1", "Fgfr2-E18-C4.2"
)
custom.sample.order.DZ.tum.expr.dia
### prepare and reshape data for plot
glimpse(EXPR.mtplplot.data.3)
EXPR.mtplplot.data.4 <- EXPR.mtplplot.data.3
colnames(EXPR.mtplplot.data.4) <-c("PG.Genes", "PG.ProteinNames", "PG.UniProtIds", name.change.DZ.tum.expr.dia$new.col.name.2, "ProteinNames_Genes_UniprotID")
melted.EXPR.mtplplot.data.4 <- reshape2::melt(EXPR.mtplplot.data.4)
glimpse(melted.EXPR.mtplplot.data.4)
#heatmap plot DZ Expression Fgfr2 Bicc1 Tacc2 Ate 1
ggplot(
melted.EXPR.mtplplot.data.4 , aes(x=variable, y=PG.Genes, fill=value)) +
geom_tile() +
scale_fill_viridis(limits = c(6, max(melted.EXPR.mtplplot.data.4$value, na.rm=T)), option = "inferno", na.value = "grey30",name="log2(intensity)") +
coord_equal()+
scale_y_discrete(limits= rev(order.in.plot2))+
scale_x_discrete(limits= custom.sample.order.DZ.tum.expr.dia)+
theme(legend.position="bottom", legend.justification = "center")+
theme(axis.line = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.border = element_blank(),
panel.background = element_blank()) +
theme(legend.title.align=0.5)+
theme(plot.title = element_text(hjust = 0.5, face="bold", size=16)) +
theme(axis.text.x=element_text(angle=90,vjust=0.5,hjust=1))+
theme(axis.text.y= element_text(size=14))+
xlab(NULL) +
xlab(NULL) +
ylab(NULL) +
geom_vline(xintercept = c(2.5, 5.5, 8.5, 11.5, 13.5, 15.5, 18.5, 21.5, 23.5, 25.5, 28.5, 30.5) , size=1.0, linetype = "solid", color="white")+
annotate("text", x = 1.5, y = nrow(EXPR.mtplplot.data.4)+1, label = "Fgfr2", fontface = "bold", angle='90', hjust = 0, size=5)+
annotate("text", x = 4, y = nrow(EXPR.mtplplot.data.4)+1, label = "Fgfr2-dE18", fontface = "bold", angle='90', hjust = 0, size=5)+
annotate("text", x = 7, y = nrow(EXPR.mtplplot.data.4)+1, label = "Fgfr2-Bicc1", fontface = "bold", angle='90', hjust = 0, size=5)+
annotate("text", x = 10, y = nrow(EXPR.mtplplot.data.4)+1, label = "Fgfr2-dE18-Bicc1", fontface = "bold", angle='90', hjust = 0, size=5)+
annotate("text", x = 12.5, y = nrow(EXPR.mtplplot.data.4)+1, label = "Fgfr2-Ate1", fontface = "bold", angle='90', hjust = 0, size=5)+
annotate("text", x = 14.5, y = nrow(EXPR.mtplplot.data.4)+1, label = "Fgfr2-dE18-Ate1", fontface = "bold", angle='90', hjust = 0, size=5)+
annotate("text", x = 17, y = nrow(EXPR.mtplplot.data.4)+1, label = "Fgfr2-Tacc2", fontface = "bold", angle='90', hjust = 0, size=5)+
annotate("text", x = 20, y = nrow(EXPR.mtplplot.data.4)+1, label = "Fgfr2-dE18-Tacc2", fontface = "bold", angle='90', hjust = 0, size=5)+
annotate("text", x = 22.5, y = nrow(EXPR.mtplplot.data.4)+1, label = "Fgfr2-dE18-IGR1", fontface = "bold", angle='90', hjust = 0, size=5)+
annotate("text", x = 24.5, y = nrow(EXPR.mtplplot.data.4)+1, label = "Fgfr2-dE18-IGR2", fontface = "bold", angle='90', hjust = 0, size=5)+
annotate("text", x = 27, y = nrow(EXPR.mtplplot.data.4)+1, label = "Fgfr2-E18-C2", fontface = "bold", angle='90', hjust = 0, size=5)+
annotate("text", x = 29.5, y = nrow(EXPR.mtplplot.data.4)+1, label = "Fgfr2-E18-C3", fontface = "bold", angle='90', hjust = 0, size=5)+
annotate("text", x = 31.5, y = nrow(EXPR.mtplplot.data.4)+1, label = "Fgfr2-E18-C4", fontface = "bold", angle='90', hjust = 0, size=5)+
annotate("text", x = 32, y = nrow(EXPR.mtplplot.data.4)+10, label = "", color = "transparent")
#ggsave("DZ.tumors.expression.DIA.candidatesA.pdf", useDingbats=FALSE, width = 18, height =14, units = "cm")
### define expression candidates to plot B ###
mtpl.candidates <- c("AKTS1_MOUSE", "ACLY_MOUSE", "BAD_MOUSE", "F262_MOUSE", "STX7_MOUSE", "TBCD1_MOUSE",
"CCDC6_MOUSE", "CDK7_MOUSE", "COR1A_MOUSE",
"RUNX1_MOUSE", "SMAD3_MOUSE", "TP53B_MOUSE",
"CTNB1_MOUSE", "CTND1_MOUSE", "HDAC2_MOUSE",
"PAK2_MOUSE", "FAK1_MOUSE", "RIPK1_MOUSE",
"MP2K1_MOUSE", "MP2K2_MOUSE", "MP2K4_MOUSE", "MK01_MOUSE", "MK03_MOUSE", "KSR1_MOUSE", "JUN_MOUSE", "JUNB_MOUSE", "EPS8_MOUSE",
"SP1_MOUSE", "SP3_MOUSE", "GSK3B_MOUSE", "MAP1B_MOUSE",
"MAP2_MOUSE",
"KS6A1_MOUSE", "KS6A3_MOUSE", "RS6_MOUSE", "IF4B_MOUSE", "4EBP1_MOUSE", "IF2B1_MOUSE",
"PDCD4_MOUSE", "TIF1B_MOUSE") #
mtpl.candidates
###filter for candidates
EXPR.mtplplot.data <- DZ.tumors.expr_V2.2.PG.wide %>% filter(PG.ProteinNames %in% mtpl.candidates)
glimpse(EXPR.mtplplot.data)
### Reorder data according to order candidates, average technical replicates, log2 transform, reshape for plot
EXPR.mtplplot.data <- left_join(data.frame(PG.ProteinNames = mtpl.candidates),
EXPR.mtplplot.data,
by = "PG.ProteinNames")
EXPR.mtplplot.data.2A <- EXPR.mtplplot.data %>% select(PG.Genes, PG.ProteinNames,PG.UniProtIds
)
glimpse(EXPR.mtplplot.data.2A)
EXPR.mtplplot.data.2B <- EXPR.mtplplot.data %>% select(
"A_1_Fgfr2",
"N_2_Fgfr2",
"F_13_Fgfr2-Ate1",
"R_14_Fgfr2-Ate1",
"D_7_Fgfr2-Bicc1",
"P_8_Fgfr2-Bicc1",
"AB_9_Fgfr2-Bicc1",
"H_17_Fgfr2-Tacc2",
"T_18_Fgfr2-Tacc2",
"AD_19_Fgfr2-Tacc2",
"C_4_Fgfr2-dE18",
"O_5_Fgfr2-dE18",
"AA_6_Fgfr2-dE18",
"G_15_Fgfr2-dE18-Ate1",
"S_16_Fgfr2-dE18-Ate1",
"E_10_Fgfr2-dE18-Bicc1",
"Q_11_Fgfr2-dE18-Bicc1",
"AC_12_Fgfr2-dE18-Bicc1",
"I_20_Fgfr2-dE18-Tacc2",
"U_21_Fgfr2-dE18-Tacc2",
"AE_22_Fgfr2-dE18-Tacc2",
"J_23_Fgfr2-dE18-IGR1",
"V_24_Fgfr2-dE18-IGR1",
"K_25_Fgfr2-dE18-IGR2",
"W_26_Fgfr2-dE18-IGR2",
"B_3_Fgfr2-E18-C2",
"X_27_Fgfr2-E18-C2",
"AF_28_Fgfr2-E18-C2",
"L_29_Fgfr2-E18-C3",
"Y_30_Fgfr2-E18-C3",
"M_31_Fgfr2-E18-C4",
"Z_32_Fgfr2-E18-C4")
glimpse(EXPR.mtplplot.data.2B)
#log2 data transformation
EXPR.mtplplot.data.2D <- log2(EXPR.mtplplot.data.2B)
glimpse(EXPR.mtplplot.data.2D)
# combine data frames
EXPR.mtplplot.data.3 <- bind_cols(EXPR.mtplplot.data.2A, EXPR.mtplplot.data.2D)
glimpse(EXPR.mtplplot.data.3)
nrow(EXPR.mtplplot.data.3)
# prepare additional candidate names
EXPR.mtplplot.data.3$ProteinNames_Genes_UniprotID <- paste0(EXPR.mtplplot.data.3$PG.Genes, "_", EXPR.mtplplot.data.3$PG.UniProtIds)
glimpse(EXPR.mtplplot.data.3)
# define order in plot
order.in.plot <- EXPR.mtplplot.data.3$ProteinNames_Genes_UniprotID
order.in.plot
order.in.plot2 <- EXPR.mtplplot.data.3$PG.Genes
order.in.plot2
# prepare additional sample names and order
name.change.DZ.tum.expr.dia <- tibble(orig.col.name = colnames(EXPR.mtplplot.data.3 %>% select(-PG.Genes, -PG.ProteinNames, -PG.UniProtIds, -ProteinNames_Genes_UniprotID)) )
name.change.DZ.tum.expr.dia$new.col.name.1 <- pbsapply(name.change.DZ.tum.expr.dia$orig.col.name, function(x) unlist(str_split(string=x, pattern="_"))[3] )
name.change.DZ.tum.expr.dia$new.col.name.2 <- paste0(name.change.DZ.tum.expr.dia$new.col.name.1, ".", c(1, 2, 1, 2, 1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 1, 2, 3, 1, 2, 3, 1, 2, 1, 2, 1, 2, 3 , 1, 2, 1, 2))
print(name.change.DZ.tum.expr.dia, n=32)
custom.sample.order.DZ.tum.expr.dia <- c("Fgfr2.1", "Fgfr2.2",
"Fgfr2-dE18.1", "Fgfr2-dE18.2", "Fgfr2-dE18.3",
"Fgfr2-Bicc1.1", "Fgfr2-Bicc1.2" , "Fgfr2-Bicc1.3",
"Fgfr2-dE18-Bicc1.1", "Fgfr2-dE18-Bicc1.2", "Fgfr2-dE18-Bicc1.3",
"Fgfr2-Ate1.1", "Fgfr2-Ate1.2",
"Fgfr2-dE18-Ate1.1", "Fgfr2-dE18-Ate1.2",
"Fgfr2-Tacc2.1", "Fgfr2-Tacc2.2", "Fgfr2-Tacc2.3",
"Fgfr2-dE18-Tacc2.1", "Fgfr2-dE18-Tacc2.2", "Fgfr2-dE18-Tacc2.3",
"Fgfr2-dE18-IGR1.1", "Fgfr2-dE18-IGR1.2",
"Fgfr2-dE18-IGR2.1", "Fgfr2-dE18-IGR2.2",
"Fgfr2-E18-C2.1", "Fgfr2-E18-C2.2", "Fgfr2-E18-C2.3",
"Fgfr2-E18-C3.1", "Fgfr2-E18-C3.2",
"Fgfr2-E18-C4.1", "Fgfr2-E18-C4.2"
)
custom.sample.order.DZ.tum.expr.dia
### prepare and reshape data for plot
glimpse(EXPR.mtplplot.data.3)
EXPR.mtplplot.data.4 <- EXPR.mtplplot.data.3
colnames(EXPR.mtplplot.data.4) <-c("PG.Genes", "PG.ProteinNames", "PG.UniProtIds", name.change.DZ.tum.expr.dia$new.col.name.2, "ProteinNames_Genes_UniprotID")
melted.EXPR.mtplplot.data.4 <- reshape2::melt(EXPR.mtplplot.data.4)
glimpse(melted.EXPR.mtplplot.data.4)
### heatmap plot expression candidates B
#exlude Ctnnd1_P30999-2;P30999-3
melted.EXPR.mtplplot.data.4 <- melted.EXPR.mtplplot.data.4 %>% filter(!ProteinNames_Genes_UniprotID %in% c("Ctnnd1_P30999-2;P30999-3"))
ggplot(
melted.EXPR.mtplplot.data.4 , aes(x=variable, y=PG.Genes , fill=value)) +
geom_tile() +
scale_fill_viridis( option = "inferno", na.value = "grey30",name="log2(intensity)") +
coord_equal()+
scale_y_discrete(limits= rev(c(
"Akt1s1", "Acly", "Bad", "Pfkfb2", "Stx7", "Tbc1d1", "Ccdc6", "Cdk7", "Coro1a", "Runx1", "Smad3",
"Tp53bp1", "Ctnnb1", "Ctnnd1", "Hdac2", "Pak2", "Ptk2", "Ripk1", "Map2k1", "Map2k2", "Map2k4",
"Mapk1", "Mapk3", "Ksr1", "Jun", "Junb", "Eps8", "Sp1", "Sp3", "Gsk3b", "Map1b", "Metap2",
"Rps6ka1", "Rps6ka3", "Rps6", "Eif4b", "Eif4ebp1", "Igf2bp1", "Pdcd4", "Trim28" )) )+
scale_x_discrete(limits= c(
# #Group 1
# "1_Fgfr2","2_Fgfr2", "13_Fgfr2-Ate1","14_Fgfr2-Ate1",
"Fgfr2.1", "Fgfr2.2", "Fgfr2-Ate1.1", "Fgfr2-Ate1.2",
# #Group 3
# "7_Fgfr2-Bicc1","8_Fgfr2-Bicc1","9_Fgfr2-Bicc1","17_Fgfr2-Tacc2","18_Fgfr2-Tacc2","19_Fgfr2-Tacc2",
"Fgfr2-Bicc1.1", "Fgfr2-Bicc1.2", "Fgfr2-Bicc1.3", "Fgfr2-Tacc2.1", "Fgfr2-Tacc2.2", "Fgfr2-Tacc2.3",
# #Group 5
# "3_Fgfr2-E18-C2","27_Fgfr2-E18-C2","28_Fgfr2-E18-C2",
"Fgfr2-E18-C2.1", "Fgfr2-E18-C2.2", "Fgfr2-E18-C2.3",
# #Group 2
# "4_Fgfr2-dE18","5_Fgfr2-dE18","6_Fgfr2-dE18","23_Fgfr2-dE18-IGR1","24_Fgfr2-dE18-IGR1","25_Fgfr2-dE18-IGR2","26_Fgfr2-dE18-IGR2","29_Fgfr2-E18-C3","30_Fgfr2-E18-C3","31_Fgfr2-E18-C4","32_Fgfr2-E18-C4",
"Fgfr2-dE18.1", "Fgfr2-dE18.2", "Fgfr2-dE18.3", "Fgfr2-dE18-IGR1.1", "Fgfr2-dE18-IGR1.2", "Fgfr2-dE18-IGR2.1", "Fgfr2-dE18-IGR2.2", "Fgfr2-E18-C3.1", "Fgfr2-E18-C3.2", "Fgfr2-E18-C4.1", "Fgfr2-E18-C4.2",
# #Group 4
# "15_Fgfr2-dE18-Ate1","16_Fgfr2-dE18-Ate1","10_Fgfr2-dE18-Bicc1","11_Fgfr2-dE18-Bicc1","12_Fgfr2-dE18-Bicc1","20_Fgfr2-dE18-Tacc2","21_Fgfr2-dE18-Tacc2","22_Fgfr2-dE18-Tacc2"
"Fgfr2-dE18-Ate1.1", "Fgfr2-dE18-Ate1.2", "Fgfr2-dE18-Bicc1.1", "Fgfr2-dE18-Bicc1.2", "Fgfr2-dE18-Bicc1.3", "Fgfr2-dE18-Tacc2.1", "Fgfr2-dE18-Tacc2.2", "Fgfr2-dE18-Tacc2.3"
) )+
theme(legend.position="bottom", legend.justification = "center")+
theme(axis.line = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.border = element_blank(),
panel.background = element_blank()) +
theme(legend.title.align=0.5)+
theme(plot.title = element_text(hjust = 0.5, face="bold", size=16)) + #center title
theme(axis.text.x=element_text(angle=90,vjust=0.5,hjust=1))+ ## Vertical text on x axis
theme(axis.text.y= element_text(size=14))+ ## Vertical text on x axis
xlab(NULL) +
xlab(NULL) +
ylab(NULL) +
geom_vline(xintercept = c( 10.5, 13.5, 24.5) , size=1.0, linetype = "solid", color="white")+ #G1+G3, G5, G2, G4
annotate("text", x = 5, y = nrow(EXPR.mtplplot.data.4)+2, label = "G1&G3", fontface = "bold")+
annotate("text", x = 12, y = nrow(EXPR.mtplplot.data.4)+2, label = "G5", fontface = "bold")+
annotate("text", x = 19.0, y = nrow(EXPR.mtplplot.data.4)+2, label = "G2", fontface = "bold")+
annotate("text", x = 28.5, y = nrow(EXPR.mtplplot.data.4)+2, label = "G4", fontface = "bold")+
annotate("text", x = 3.5, y = nrow(EXPR.mtplplot.data.4)+3, label = "", color = "transparent")
#ggsave("DZ.tumors.expression.DIA.candidatesB.pdf", useDingbats=FALSE, width = 18, height =22, units = "cm")
|
e5e62929cfed1f6084e9d76b57a3e8538612091e | ba08297ca485f1dec988abbe42db78cc6ddfe7af | /ui.R | fa4c090be93dd4dbd2395494255f595ef7e22efa | [
"MIT"
] | permissive | ToshihiroIguchi/ezvoigt | 95361f3cccbfa706d7adc8b21df2692f530711ec | 2b22ff02f7f6e4ba1ab5983b0273f9d01c493c26 | refs/heads/master | 2020-03-29T22:46:08.490517 | 2018-10-31T14:06:18 | 2018-10-31T14:06:18 | 150,439,487 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,672 | r | ui.R |
file.input.accept <- c("text/csv",
"text/comma-separated-values,text/plain",
".csv", ".ASC", ".int")
shinyUI(
fluidPage(
titlePanel("Peak separation by Voigt function"),
sidebarLayout(
sidebarPanel(
fileInput("file", "File", accept = file.input.accept),
tags$hr(),
htmlOutput("xaxis"),
htmlOutput("Intensity"),
fluidRow(
column(6, htmlOutput("x.min")),
column(6, htmlOutput("x.max"))
),
htmlOutput("peak.range"),
htmlOutput("submit")
),
mainPanel(
tabsetPanel(type = "tabs",
tabPanel("Table", DT::dataTableOutput("table")),
tabPanel("Peak", plotOutput("peak"),
fluidRow(
column(6, verbatimTextOutput("sum")),
column(6, DT::dataTableOutput("Voigt.table"))
)),
tabPanel("Setting",
h4("Number of optimizations"),
numericInput("opt.times", "Number of optimizations",
value = 5, min = 1),
tags$hr(),
h4("Particle Swarm Optimizer"),
h5("Global optimization"),
numericInput("maxit", "The maximum number of iterations",
value = 50, min = 3),
numericInput("s", "The swarm size",
value = 20, min = 10),
tags$hr(),
h4("General-purposec Optimization"),
h5("Loal optimization"),
selectInput("opt.method",
"method",
choices = c("Nelder-Mead", "BFGS",
"CG", "L-BFGS-B", "SANN"),
selected = "BFGS"),
tags$hr(),
h4("Graph"),
sliderInput("font.size", "Font size",
value = 15,
min = 5, max = 30)
)
)
)
)
)
) |
1b11e15a75690a5ef51de1c557ad9d9ff0ce3547 | daa136d27577d3dee32b28c32b5fea0d6edf9a15 | /ProblemSets/PS7/PS7_Yarberry.R | 172fbc17912b93a7c5ddb53d5bb2d0d7a2c062db | [
"MIT"
] | permissive | myarberry26/DScourseS20 | 6b35fb4511011859d509b8fe5abbb2c0ffb34740 | da3e45e48a26e7c8c37da33696e845e992a34711 | refs/heads/master | 2020-12-13T11:37:28.453636 | 2020-04-14T14:33:13 | 2020-04-14T14:33:13 | 234,405,603 | 0 | 0 | MIT | 2020-01-16T20:28:11 | 2020-01-16T20:28:10 | null | UTF-8 | R | false | false | 1,195 | r | PS7_Yarberry.R | # PS7 Yarberry
library(readr)
wages <- read_csv("ModelingOptimization/wages.csv")
View(wages)
# problem 5
wages <- wages[!is.na(wages$hgc), ]
wages <- wages[!is.na(wages$tenure), ]
# problem 6
install.packages("stargazer")
library('stargazer')
stargazer(wages)
# problem 7
est.1 <- lm(logwage ~ hgc + college + tenure +tenure^2 + age + married , data = wages)
stargazer(est.1)
df.mean.imp <- wages
mean.log.wage <- mean(wages$logwage, na.rm = TRUE)
df.mean.imp$logwage[is.na(wages$logwage)] <- mean.log.wage
stargazer(df.mean.imp)
est.2 <- lm(logwage ~ hgc + college + tenure +tenure^2 + age + married , data = df.mean.imp)
stargazer(est.2)
df.mar <- wages
df.mar$mar.logwage <- predict(est.1, df.mar)
df.mar$logwage[is.na(wages$logwage)] <- df.mar$mar.logwage[is.na(wages$logwage)]
est.3 <- lm(logwage ~ hgc + college + tenure +tenure^2 + age + married , data = df.mar)
stargazer(df.mar)
install.packages("mice")
library('mice')
df.mice <- mice(wages, seed = 1234)
est.4 <- lm(logwage ~ hgc + college + tenure +tenure^2 + age + married , data = df.mice$data)
stargazer(est.4)
stargazer(est.1, est.2, est.3, title="Regression Results")
|
f58a74aafa53e11a46a7a33e8a02017a7dc2b9ec | d3ddacf81c1c8f86f4aaf9d44f4529f20e701e8f | /man/parse_config_list.Rd | 77310579bed4c018e0aa7cfcad527b6e38c804e8 | [] | no_license | ijlyttle/ieeecomtrade | 3bd2145bf235ef8198b6c0572aade7268dfb8811 | 5abaa74980aaf3b3f5b133a7d1a982ecc10484b6 | refs/heads/master | 2021-01-19T07:29:30.859311 | 2017-08-23T00:12:23 | 2017-08-23T00:12:23 | 87,544,705 | 3 | 0 | null | null | null | null | UTF-8 | R | false | true | 416 | rd | parse_config_list.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/read_config.R
\name{parse_config_list}
\alias{parse_config_list}
\title{parse into a list}
\usage{
parse_config_list(text, ...)
}
\arguments{
\item{text}{character, vector of strings}
\item{...}{other args passed to \code{\link[readr:read_csv]{readr::read_csv()}}}
}
\value{
tibble
}
\description{
parse into a list
}
\keyword{internal}
|
5051d2de31563b929294db9d376efd98725d2b73 | 1db71d2f2d70bf7c38320d7bcc9725fbb59cb064 | /data/studies/findlay_2018_brca1/standardise_findlay_2018_brca1.R | c3ad3d54ea5ba1892c6fa92d0e950a2477cde697 | [
"Apache-2.0"
] | permissive | ostrokach/aa_subtypes | fda6346f4244dfb5cb15388f742d63087a29fe40 | 5f8e7e7557436d8798c908e94981aead21243755 | refs/heads/master | 2022-12-01T03:09:55.367152 | 2020-08-10T13:34:26 | 2020-08-10T13:34:26 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 966 | r | standardise_findlay_2018_brca1.R | #!/usr/bin/env Rscript
# Standardise data from Findlay et al. 2018 (BRCA1)
source('src/config.R')
source('src/study_standardising.R')
# Import and process data
meta <- read_yaml('data/studies/findlay_2018_brca1/findlay_2018_brca1.yaml')
dm_data <- read_xlsx('data/studies/findlay_2018_brca1/raw/41586_2018_461_MOESM3_ESM.xlsx', skip = 2, na = 'NA') %>%
rename_all(list( ~ gsub('[\\/ \\(\\)]+', '_', .))) %>%
rename(wt_nuc = reference,
mut_nuc = alt,
wt = aa_ref,
mut = aa_alt,
position = aa_pos) %>%
drop_na(position) %>%
group_by(position, wt, mut) %>%
summarise(raw_score = mean(function.score.mean, na.rm=TRUE)) %>%
ungroup() %>%
mutate(transformed_score = raw_score,
score = normalise_score(transformed_score),
class = get_variant_class(wt, mut)) %>%
select(position, wt, mut, score, transformed_score, raw_score, class)
# Save output
standardise_study(dm_data, meta$study, meta$transform)
|
ba8ac386afd614117e117c1ee5b03fb5b2b0d056 | 52fa34c014cd9500523d55d0d04747e440abd6e8 | /ui.R | e0b5fed2afdaa5e88bab2d8c64d874ad24bde136 | [] | no_license | gdmacedo/Developing-Data-Products | 1c2c471b7e7044d852e2c4c0a856e91959a66001 | d0e47b7cc7007cdb2b7fa891da599d180a2796a8 | refs/heads/master | 2020-04-03T23:09:23.199132 | 2019-06-11T21:19:07 | 2019-06-11T21:19:07 | 155,620,626 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,294 | r | ui.R | library(shiny)
library(ggplot2)
library(shinythemes)
fluidPage(tags$head(tags$link(rel = "stylesheet",
type = "text/css",
href = "./www/style.css"
)
),
includeCSS("./www/style.css"),
theme = shinytheme("cerulean"),
# Application title
titlePanel("Â DATA SCIENCE"),
navbarPage("Developing Data Products",
tabPanel("Project",
fluidRow(column(4,
headerPanel("MTCars : Motor Trend Car"),
img(class="img-polaroid",
src=paste0("https://scontent-gru2-2.xx.fbcdn.net/v/t1.0-9/60830516_2343055332413021_530036820168146944_n.jpg?_nc_cat=103&_nc_ht=scontent-gru2-2.xx&oh=d90c767917455ab41b3c322b2b5f5c04&oe=5D5AEC27")
)
),
column(8,
headerPanel("The Project"),
img(class="img-polaroid",
src=paste0("https://scontent.ffor21-1.fna.fbcdn.net/v/t1.0-9/64396807_2211761662193860_1967510959085846528_o.jpg?_nc_cat=106&_nc_oc=AQmiGDLyw-iqUaDZ92OSfyiNXgTqQhf8y6E9m8XYcMWvoRm-9VfvOt80WTPkxhGlKy8&_nc_ht=scontent.ffor21-1.fna&oh=cc9cd586896ed0f1aca0daed7d865e90&oe=5D86F909")
)
)
)
),
tabPanel("Data",
titlePanel("MTCars : Motor Trend Car - Road test data set"),
fluidRow(
column(3,
selectInput("man",
"Manufacturer:",
c("All",
unique(as.character(mpg$manufacturer))))
),
column(3,
selectInput("trans",
"Transmission:",
c("All",
unique(as.character(mpg$trans))))
),
column(3,
selectInput("year",
"year:",
c("All",
unique(as.character(mpg$year))))
),
column(3,
selectInput("cyl",
"Cylinders:",
c("All",
unique(as.character(mpg$cyl))))
)
),
fluidRow(DT::dataTableOutput("tableMpg")
)
),
navbarMenu("Graphics",
tabPanel("Histagran",
sidebarLayout(sidebarPanel(sliderInput("mpg",
"Miles Per Galon",
min = 1,
max = 50,
value = 30
)
),
mainPanel(plotOutput("PlotHstgm"))
)
),
tabPanel("Plot",
sidebarLayout(sidebarPanel(radioButtons("plotType",
"Plot type",
c("Scatter"="p",
"Line"="l"
)
)
),
mainPanel(tags$small("O grafico Abaixo"),
plotOutput("plot")
)
)
)
),
tabPanel("The Author",
fluidRow(column(4,
headerPanel("Macedo, Glener Diniz"),
img(class="img-polaroid",
src=paste0("https://scontent.ffor21-1.fna.fbcdn.net/v/t1.0-9/19895061_1375893182447383_7592759197865761547_n.jpg?_nc_cat=102&_nc_eui2=AeHnofEmBWhsTfKAcKw4MOcn6xq-GfgXmpjp85-1_N_7dhxxeKqPrRaDk8MnLLwwztQLjKtzd6NIhu0B7lZTj1ufhym47dGxoYbPPXJOKyn0MA&_nc_ht=scontent.ffor21-1.fna&oh=4b53017593a9a233de9090430f877ae5&oe=5D5E9ADD")
)
),
column(8,
headerPanel("The Author"),
img(class="img-polaroid",
src=paste0("https://scontent.ffor21-1.fna.fbcdn.net/v/t1.0-9/62261034_2211761648860528_4689876039784988672_n.jpg?_nc_cat=101&_nc_oc=AQlIIeaHorMFE33OtAJOizcYOFCPbFm0lYQGMk2hD-ut_s2w2QfIYZsrYlSe3ppFC_U&_nc_ht=scontent.ffor21-1.fna&oh=31f20f944307c6b2996cea839fd933af&oe=5D931DB0")
)
)
)
),
navbarMenu("Help",
tabPanel("About",
fluidRow(column(4,
headerPanel("About"),
img(class="img-polaroid",
src=paste0("https://scontent.ffor21-1.fna.fbcdn.net/v/t1.0-9/64307251_2211821288854564_7531273557817425920_n.png?_nc_cat=103&_nc_oc=AQnsx9W2fiFdXnW0Of6FNnN-dbieOnAB-1wrWOoMXITOFcf0tRBoKcuDh8pEcp3b3mY&_nc_ht=scontent.ffor21-1.fna&oh=8651b961f0dd57a079d3dd194b9dd933&oe=5D7F86E9")
)
),
column(8,
headerPanel("The University"),
img(class="img-polaroid",
src=paste0("https://scontent.ffor21-1.fna.fbcdn.net/v/t1.0-9/62214582_2211861935517166_1605283832636899328_o.jpg?_nc_cat=110&_nc_oc=AQn0c4I_3dNgU6lIJqhk-j6SHvS1ER5bal-Lf0H3W7HxeXiljecMTPGZ5f19hEw8t-c&_nc_ht=scontent.ffor21-1.fna&oh=b2239cae0d30c3feb0e4f17e23718a35&oe=5D90105F")
)
)
)
)
)
)
)
|
0aa6d5e7ccdf7925b5fdbb09dac98b836e27fa97 | 83f845cf9d3987c8d816ca9b0d168c9c90e6cdf4 | /man/plot.psem.Rd | 6804bc1ba25654c81d8fa2613d005909939e9ffe | [] | no_license | jslefche/piecewiseSEM | e5a572eef4538b6fb2cb0df00fa3e49f7c3c3872 | aac65aafd979b8dbce6c725b11b85123097f6fe7 | refs/heads/main | 2023-06-07T12:30:47.849089 | 2023-06-02T18:45:45 | 2023-06-02T18:45:45 | 22,606,015 | 145 | 53 | null | 2023-03-28T21:10:38 | 2014-08-04T13:55:07 | R | UTF-8 | R | false | true | 2,405 | rd | plot.psem.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot_psem.R
\name{plot.psem}
\alias{plot.psem}
\title{Plotting of Piecewise Structural Equation Models}
\usage{
\method{plot}{psem}(
x,
return = FALSE,
node_attrs = data.frame(shape = "rectangle", color = "black", fillcolor = "white"),
edge_attrs = data.frame(style = "solid", color = "black"),
ns_dashed = T,
alpha = 0.05,
show = "std",
digits = 3,
add_edge_label_spaces = TRUE,
...
)
}
\arguments{
\item{x}{a [psem()] object}
\item{return}{whether to return the output from [DiagrammeR::create_graph()] for modification and later plotting}
\item{node_attrs}{List of node attributes to override defaults of rectangular nodes with black outline and white fill. See [here](http://visualizers.co/diagrammer/articles/node-edge-data-frames.html) and [here](http://visualizers.co/diagrammer/articles/graphviz-mermaid.html) for a more complete rundown of options.}
\item{edge_attrs}{List of edge attributes to override defaults of solid black arrows. See [here](http://visualizers.co/diagrammer/articles/node-edge-data-frames.html) and [here](http://visualizers.co/diagrammer/articles/graphviz-mermaid.html) for a more complete rundown of options.}
\item{ns_dashed}{If TRUE, paths that are not different from 0 will be dashed rather than solid, unless the whole is overridden in `edge_attrs`}
\item{alpha}{The alpha level for assessing whether a path is different from 0}
\item{show}{What types of path coefficients are shown? Default `"std"` is standardized coefficients. For undstandardized, use `"unstd"`}
\item{digits}{How many significant digits should be shown?}
\item{add_edge_label_spaces}{Should spaces by added on either side of edge labels? Default is `TRUE` as otherwise paths too often overlap edges.}
\item{...}{Other arguments to [DiagrammeR::render_graph()]}
}
\value{
Returns an object of class [DiagrammeR::dgr_graph]
}
\description{
plot.psem uses [DiagrammeR] to generate path diagrams
of `piecewiseSEM`` fits within R.
}
\examples{
data(keeley)
mod <- psem(
lm(rich ~ cover, data=keeley),
lm(cover ~ firesev, data=keeley),
lm(firesev ~ age, data=keeley),
data = keeley
)
plot(mod)
### More customized plot
plot(mod, node_attrs = list(
shape = "rectangle", color = "black",
fillcolor = "orange", x = 3, y=1:4))
}
\author{
Jarrett Byrnes <jarrett.byrnes@umb.edu>
}
|
3b0351093a53ff2ddc867a151a3f2a1f069c59da | 5c0d14df7ed580aa08330b8296386cb0c4f489e2 | /1999.R | 0a6c6fd8f3b5946143c9ae3e4120806f744489cd | [] | no_license | thienngole/NY-Housing-Data | 423c925b71d55ccd069f931e4290120eea098e71 | 2e480fdaf824e782ef8e8270b158a25156c02d88 | refs/heads/Group | 2020-04-29T19:30:50.379049 | 2019-04-03T06:17:21 | 2019-04-03T06:17:21 | 176,357,344 | 3 | 3 | null | 2019-03-30T18:35:01 | 2019-03-18T19:39:11 | R | UTF-8 | R | false | false | 8,989 | r | 1999.R | #condition of walls (36a, 37a, d1, d3, d12)
#Anayeli Ochoa
library(dplyr)
# Select the 1999 .csv file, save it:
my.file <- file.choose()
# Read in data only (no headers -- skip the first two rows):
nych1999 <- read.csv(my.file, skip = 2, header = FALSE, stringsAsFactors = FALSE)
# Read in data (with both rows of headers), temporary data frame:
tmp <- read.csv(my.file, header = TRUE, stringsAsFactors = FALSE)
# Use headers from tmp for nych1999:
names(nych1999) <- names(tmp)
# Remove the temporary data frame:
rm(tmp)
#print data nych1999
nych1999
#-------------------------------------------------------------------
#Total Household Income Recode (hhinc) ?
#data.frame
hhinc = nych1999[c("hhinc")]
#-------------------------------------------------------------------
#Monthly contract rent (30a) ?
X_30a = nych1999[c("X_30a")]
#-------------------------------------------------------------------
#Out of pocket rent (31b) ?
X_31b = nych1999[c("X_31b")]
#-------------------------------------------------------------------
#Kitchen facilities (26a) 5 if none
#3=none and that means replace with 5
nych1999$X_26a <- replace(nych1999$X_26a, nych1999$X_26a==0, 0)
nych1999$X_26a <- replace(nych1999$X_26a, nych1999$X_26a==1, 0)
nych1999$X_26a <- replace(nych1999$X_26a, nych1999$X_26a==2, 0)
nych1999$X_26a <- replace(nych1999$X_26a, nych1999$X_26a==3, 5)
X_26a = nych1999[c("X_26a")]
#-------------------------------------------------------------------
#Toilet breakdowns (25c) 3 = 1(Yes) 5 = 9(no plumbing)
nych1999$X_25c <- replace(nych1999$X_25c, nych1999$X_25c==1, 3)
nych1999$X_25c <- replace(nych1999$X_25c, nych1999$X_25c==2, 0)
nych1999$X_25c <- replace(nych1999$X_25c, nych1999$X_25c==3, 0)
nych1999$X_25c <- replace(nych1999$X_25c, nych1999$X_25c==8, 0)
nych1999$X_25c <- replace(nych1999$X_25c, nych1999$X_25c==9, 5)
X_25c = nych1999[c("X_25c")]
#rowSums(X_25c)
#--------------------------------------------------------------------
#Number of heating equipment breakdowns (32b) (2 for each break)
#2 = one time
nych1999$X_32b <- replace(nych1999$X_32b, nych1999$X_32b==2, 2)
nych1999$X_32b <- replace(nych1999$X_32b, nych1999$X_32b==3, 0)
nych1999$X_32b <- replace(nych1999$X_32b, nych1999$X_32b==4, 0)
nych1999$X_32b <- replace(nych1999$X_32b, nych1999$X_32b==5, 0)
nych1999$X_32b <- replace(nych1999$X_32b, nych1999$X_32b==8, 0)
nych1999$X_32b <- replace(nych1999$X_32b, nych1999$X_32b==9, 0)
X_32b <- nych1999[c("X_32b")] #creo q eso es un data fram y ne parentensis es character
#--------------------------------------------------------------------
#Number of rooms (24a)
X_24a = nych1999[c("X_24a")]
#--------------------------------------------------------------------
#condition of walls (36a, 37a, d1, d3, d12)
nych1999$X_36a <- replace(nych1999$X_36a, nych1999$X_36a==0, 2)
nych1999$X_36a <- replace(nych1999$X_36a, nych1999$X_36a==1, 0)
nych1999$X_36a <- replace(nych1999$X_36a, nych1999$X_36a==8, 0)
nych1999$X_37a <- replace(nych1999$X_37a, nych1999$X_37a==0, 0)
nych1999$X_37a <- replace(nych1999$X_37a, nych1999$X_37a==1, 2)
nych1999$X_37a <- replace(nych1999$X_37a, nych1999$X_37a==8, 0)
nych1999$X_d1 <- replace(nych1999$X_d1, nych1999$X_d1 == 1, 2)
nych1999$X_d1 <- replace(nych1999$X_d1, nych1999$X_d1 == 8, 0)
nych1999$X_d1 <- replace(nych1999$X_d1, nych1999$X_d1 == 9, 0)
nych1999$X_d3 <- replace(nych1999$X_d3, nych1999$X_d3 == 1, 2)
nych1999$X_d3 <- replace(nych1999$X_d3, nych1999$X_d3 == 8, 0)
nych1999$X_d3 <- replace(nych1999$X_d3, nych1999$X_d3 == 9, 0)
nych1999 <- mutate(.data = nych1999, X_d12 = 0)
#numeric vector
#condition_wall <- c(nych1999$X_36a, nych1999$X_37a, nych1999$X_d1, nych1999$X_d3, nych1999$X_d12)
#sum rows and put them into a new column \mutate us just for data frames
#matrix
condition_wall <-(cbind(nych1999$X_36a, nych1999$X_37a, nych1999$X_d1, nych1999$X_d3, nych1999$X_d12))
condition_wall <- rowSums(condition_wall)
#-----------------------------------------------------------------------
#Condition of floors (g1, g2, g3, g4, g5)
nych1999$X_g1 <- replace(nych1999$X_g1, nych1999$X_g1==1, 2)
nych1999$X_g1 <- replace(nych1999$X_g1, nych1999$X_g1==8, 0)
nych1999$X_g1 <- replace(nych1999$X_g1, nych1999$X_g1==9, 0)
nych1999$X_g2 <- replace(nych1999$X_g2, nych1999$X_g2==1, 2)
nych1999$X_g2 <- replace(nych1999$X_g2, nych1999$X_g2==8, 0)
nych1999$X_g2 <- replace(nych1999$X_g2, nych1999$X_g2==9, 0)
nych1999$X_g3 <- replace(nych1999$X_g3, nych1999$X_g3==1, 2)
nych1999$X_g3 <- replace(nych1999$X_g3, nych1999$X_g3==8, 0)
nych1999$X_g3 <- replace(nych1999$X_g3, nych1999$X_g3==9, 0)
nych1999$X_g4 <- replace(nych1999$X_g4, nych1999$X_g4==1, 2)
nych1999$X_g4 <- replace(nych1999$X_g4, nych1999$X_g4==8, 0)
nych1999$X_g4 <- replace(nych1999$X_g4, nych1999$X_g4==9, 0)
nych1999$X_g5 <- replace(nych1999$X_g5, nych1999$X_g5==1, 2)
nych1999$X_g5 <- replace(nych1999$X_g5, nych1999$X_g5==8, 0)
nych1999$X_g5 <- replace(nych1999$X_g5, nych1999$X_g5==9, 0)
#condition_floors = nych1999[c("nych1999$X_g1","nych1999$X_g2", "nych1999$X_g3", "nych1999$X_g4", "nych1999$X_g5")]
#rowSums(condition_floors)
#pisos = transmute(.data = condition_floors, floors = rowSums(condition_floors))
condition_floors <-(cbind(nych1999$X_g1, nych1999$X_g2, nych1999$X_g3, nych1999$X_g4, nych1999$X_g5))
condition_floors <- rowSums(condition_floors)
#--------------------------------------------------------------
#Condition of Windows (e1, e2, e3, e4)
nych1999$X_e1 <- replace(nych1999$X_e1, nych1999$X_e1==1, 2)
nych1999$X_e1 <- replace(nych1999$X_e1, nych1999$X_e1==8, 0)
nych1999$X_e1 <- replace(nych1999$X_e1, nych1999$X_e1==9, 0)
nych1999$X_e2 <- replace(nych1999$X_e2, nych1999$X_e2==1, 2)
nych1999$X_e2 <- replace(nych1999$X_e2, nych1999$X_e2==8, 0)
nych1999$X_e2 <- replace(nych1999$X_e2, nych1999$X_e2==9, 0)
nych1999$X_e3 <- replace(nych1999$X_e3, nych1999$X_e3==1, 2)
nych1999$X_e3 <- replace(nych1999$X_e3, nych1999$X_e3==8, 0)
nych1999$X_e3 <- replace(nych1999$X_e3, nych1999$X_e3==9, 0)
nych1999$X_e4 <- replace(nych1999$X_e4, nych1999$X_e4==1, 2)
nych1999$X_e4 <- replace(nych1999$X_e4, nych1999$X_e4==8, 0)
nych1999$X_e4 <- replace(nych1999$X_e4, nych1999$X_e4==9, 0)
#condition_windows = nych1999[c("X_e1","X_e2", "X_e3", "X_e4")]
#rowSums(condition_windows)
#nych1999$condition_windows = transmute(nych1999, windows = rowSums(condition_windows))
condition_windows <-(cbind(nych1999$X_e1, nych1999$X_e2, nych1999$X_e3, nych1999$X_e4))
condition_windows <- rowSums(condition_windows)
#------------------------------------------------------------------
#Condition of Stairways (f1, f2, f3, f4, f5)
nych1999$X_f1 <- replace(nych1999$X_f1, nych1999$X_f1==1, 2)
nych1999$X_f1 <- replace(nych1999$X_f1, nych1999$X_f1==8, 0)
nych1999$X_f1 <- replace(nych1999$X_f1, nych1999$X_f1==9, 0)
nych1999$X_f2 <- replace(nych1999$X_f2, nych1999$X_f2==1, 2)
nych1999$X_f2 <- replace(nych1999$X_f2, nych1999$X_f2==8, 0)
nych1999$X_f2 <- replace(nych1999$X_f2, nych1999$X_f2==9, 0)
nych1999$X_f3 <- replace(nych1999$X_f3, nych1999$X_f3==1, 2)
nych1999$X_f3 <- replace(nych1999$X_f3, nych1999$X_f3==8, 0)
nych1999$X_f3 <- replace(nych1999$X_f3, nych1999$X_f3==9, 0)
nych1999$X_f4 <- replace(nych1999$X_f4, nych1999$X_f4==1, 2)
nych1999$X_f4 <- replace(nych1999$X_f4, nych1999$X_f4==8, 0)
nych1999$X_f4 <- replace(nych1999$X_f4, nych1999$X_f4==9, 0)
nych1999$X_f5 <- replace(nych1999$X_f5, nych1999$X_f5==1, 2)
nych1999$X_f5 <- replace(nych1999$X_f5, nych1999$X_f5==8, 0)
nych1999$X_f5 <- replace(nych1999$X_f5, nych1999$X_f5==9, 0)
#condition_stairways = nych1999[c("X_f1","X_f2","X_f3", "X_f4", "X_f5")]
#rowSums(condition_stairways)
#nych1999$condition_stairways = transmute(nych1999,stairways=rowSums(condition_stairways))
condition_stairways <-(cbind(nych1999$X_f1, nych1999$X_f2, nych1999$X_f3, nych1999$X_f4, nych1999$X_f5))
condition_stairways <- rowSums(condition_stairways)
#------------------------------------------------------------------
#Presence of mice or rats (35a) | 3 for Yes
nych1999$X_35a <- replace(nych1999$X_35a, nych1999$X_35a==1, 3)
nych1999$X_35a <- replace(nych1999$X_35a, nych1999$X_35a==2, 0)
nych1999$X_35a <- replace(nych1999$X_35a, nych1999$X_35a==8, 0)
X_35a = nych1999[c("X_35a")]
#-------------------------------------------------------------------
#Water leakage inside apartment (38a) | 3 for Yes
nych1999$X_38a <- replace(nych1999$X_38a, nych1999$X_38a==1, 3)
nych1999$X_38a <- replace(nych1999$X_38a, nych1999$X_38a==2, 0)
nych1999$X_38a <- replace(nych1999$X_38a, nych1999$X_38a==8, 0)
X_38a = nych1999[c( "X_38a")]
#===================================================================
#printing final data
library(dplyr)
new_nych1999 <- cbind(hhinc, X_30a, X_31b, X_26a, X_25c,X_32b,
X_24a, condition_wall, condition_floors, condition_windows,condition_stairways, X_35a, X_38a)
new_nych1999
write.csv(new_nych1999, file = "new_nych1999.csv")
|
6126e3939356d27b7f696e77de157404087bc1bb | 99bdd171cc8729d98261370e4ee9d733ce8563bf | /groundwater_dnr.R | 5a3b5482e089b95b4bab83f0d519a699c0cf7f4d | [] | no_license | capellett/drought-status-monitor | c509e2a0a5772bc0793a562d98af89b22297912a | 30721cb8b15f3967cdc4d08fd0ccdc464f0e69ce | refs/heads/master | 2021-08-18T07:09:48.927258 | 2021-08-04T13:49:27 | 2021-08-04T13:49:27 | 166,278,949 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 504 | r | groundwater_dnr.R | library(tidyverse)
dnr_wells <- c('AIK-0849', 'ALL-0372', 'CTF-0081',
'GRV-3342', 'JAS-0425', 'KER-0263',
'LRN-1705', 'ORG-0431', 'SAL-0069')
grv3342 <- read.csv('documentation/GRV-3342_WLs.txt') %>%
dplyr::mutate(Day = lubridate::mdy_hms(Day))
unique(grv3342$CountyID)
## There are two values.
unique(grv3342$site_serial)
initialize_dnr_well_data <- function(sites) {}
update_dnr_well_data <- function(sites) {}
calculate_gw_percentiles <- function(gw_data) {}
|
af96ff20931cb87842ffc910c59d36a8ff04f6b3 | fb1f07a4526d455872375e889f0fbf10ee6cbaec | /ipl_batch9.R | a0b2346558edf2274625ebbfa3ad76657ec3154c | [] | no_license | vidyachenna/IPL | deb317cda18441813ef4e280d7fdb6e5dd179fd0 | fdf24c6727436e31d67721486366a735a49c3f69 | refs/heads/master | 2021-01-13T04:28:03.546159 | 2017-01-25T08:38:25 | 2017-01-25T08:38:25 | 79,936,721 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,696 | r | ipl_batch9.R | library(ggplot2) #Adding the packages required.
library(plyr)
library(ggrepel)
library(reshape2)
library(graphics)
mat_data <- read.csv("matches.csv") #reading the .csv files into datasets
del_data <- read.csv("deliveries.csv")
win_data <- count(mat_data, c("winner", "season")) #taking total count of winners corresponding to season
win_data <- subset(win_data, winner != "") #eliminating NA values
#Plotting season Vs Frequency of winning of teams
#------------------------------------------------
ggplot(data = win_data, aes(x = "",y = freq, fill = winner)) +
geom_bar(width=1, stat = "identity") +
facet_grid(facets = .~season) +
xlab("Season") +
ylab("No of matches won") +
guides(fill = guide_legend(title = "Team"))
champs = c() #creating dataset champs to store Champions data so far
for(i in 2:nrow(mat_data))
if(mat_data$season[i - 1] < mat_data$season[i])
champs = rbind(champs, subset(mat_data, mat_data$id == mat_data$id[i] - 1))
#Plotting Pie plot for Winning Frequency for each Champion team
#--------------------------------------------------------------
win_count <- count(champs, "winner")
pie(win_count$freq, labels = win_count$winner, main = "Winning Rate of Champions")
#Holding the data of 2016 ChampionShip match(i.e, the last one)
cham_mat <- subset(del_data, del_data$match_id == champs$id[nrow(champs)])
cham_data <- data.frame(cham_mat$over, cham_mat$total_runs, cham_mat$inning)
#Calculatinfg the runrate for each over in two innings
rr_data <- aggregate(cham_data$cham_mat.total_runs,
by = list(Over = cham_data$cham_mat.over,
inning = cham_data$cham_mat.inning), FUN = sum)
names(rr_data)[3] <- paste("RunRate")
rr_data$inning[rr_data$inning == 1] <- "1"
rr_data$inning[rr_data$inning == 2] <- "2"
#Plotting the runrate for two innings in 2016 ChampionShip match
#---------------------------------------------------------------
qplot(Over, RunRate,data = rr_data, geom = c("point","line"), colour =inning) +
geom_point() +
geom_text_repel(aes(label = inning), size = 3) +
ggtitle("Plot of Runrate per Over in each Innings") +
xlab("Overs in each Innings") + ylab("Run Rate")
#Holding the the Top ten Batsman according to their MOM(Man of the Match) Awards
player_mat <- count(mat_data, "player_of_match")
player_order <- player_mat[order(player_mat$freq,player_mat$player_of_match,decreasing = TRUE),]
bst_btsman <- head(player_order, n = 10)
#Holding the data of batsman with fours and sixes as total runs
four_tot <- subset(del_data, del_data$total_runs == 4)
six_tot <- subset(del_data, del_data$total_runs == 6)
four_count <- count(four_tot, "batsman")
six_count <- count(six_tot, "batsman")
names(four_count)[2] <- paste("No.of Fours")
names(six_count)[2] <- paste("No.of Sixes")
names(bst_btsman)[1] <- paste("batsman")
names(bst_btsman)[2] <- paste("Freq")
#Merging the datasets so that we are having the Frequency Count of MOM Awards, Sixes, Fours.
tot_perf <- merge(six_count, four_count)
tot_perf <- merge(tot_perf, bst_btsman)
#Holding each column in merged data set as a variable
new.man =as.vector(tot_perf$batsman)
player_of_match = new.man
new.freq = as.vector(tot_perf$Freq)
MOMawards = new.freq
new.fours = as.vector(tot_perf$`No.of Fours`)
Fours = new.fours
new.six = as.vector(tot_perf$`No.of Sixes`)
Sixes = new.six
#Framing a data frame by above variables and Melting it to make tit compatible for plotting.
df = data.frame(player_of_match,MOMawards, Fours, Sixes)
df <- melt(df, id.vars='player_of_match')
#Plotting the above data frame in a grouped barplot
#--------------------------------------------------
ggplot(df, aes(x=player_of_match, y=value, fill=variable)) +
ggtitle("Performance of Top Ten Players") +
geom_bar(stat='identity', position='dodge')+
xlab("Top Ten Players") +
ylab("Range") +
guides(fill = guide_legend(title = "Parameters"))+
theme(axis.text.x = element_text(angle = 60, hjust = 1))
#Holding count of each team as team1 & team2 in each city
t1_count<- count(mat_data, c("city", "team1"))
t2_count <- count(mat_data, c("city", "team2"))
#combining above two datasets to get total frequency count of team played in each city
names(t1_count)[2] <- paste("team")
names(t2_count)[2] <- paste("team")
team_tot <- rbind(t1_count, t2_count)
team_count <- count(team_tot, c("city", "team"))
#Holding frequency count of team won in a each city
winner_count <- count(mat_data, c("city", "winner"))
#Now I am taking "MUMBAI INDIANS" as my team to perform regression between.....
# matches played in each city and matches won in that city.
mi_played <- subset(team_count, team == "Mumbai Indians")
mi_won <- subset(winner_count, winner == "Mumbai Indians")
names(mi_won)[3] <- paste("matches_won")
names(mi_played)[3] <- paste("matches_played")
mi_res <- merge(mi_played, mi_won)
mi_res$team <- NULL
mi_res$winner<- NULL
#plotting my regression showing the sucess rate of "MUMBAI INDIANS" in each city
#-------------------------------------------------------------------------------
attach(mi_res)
plot(matches_played, matches_won,
main = "Success rate of MUMBAI INDIANS in each city", col = "blue",
abline(lm(matches_won ~ matches_played)),
pch = 19,
xlab = "Matches Played",
ylab = "Matches Won",
xlim = c(0, 12), ylim = c(0, 10))
text(matches_played, matches_won, labels = city, cex = .7, pos = 4)
detach(mi_res)
#########################################################################################
|
09810271218cfe826cf790db67d5671e0964f5d4 | 94adc0b1c4b2304d4fe99d90a7e6f1126d6a8795 | /man/Utilities.Rd | 561559c5eb51a5a11c9a5b87daebbc6a42fdd669 | [
"Minpack"
] | permissive | karlines/ccSolve | 0cb2788e33846a09ecb1d9da2b4f5c9c5d4e26db | 60108e2451db58b6bbee62b9b9fa62486c759a53 | refs/heads/master | 2020-04-29T17:12:18.124295 | 2015-04-18T08:44:28 | 2015-04-18T08:44:28 | 33,730,033 | 5 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,514 | rd | Utilities.Rd | \name{ccfunc}
\alias{ccfunc}
\title{
Utilities for ccSolve.
}
\description{
Retrieves function values of compiled code problems.
}
\usage{
ccfunc(fn, ...)
}
\arguments{
\item{fn }{The compiled object whose function value should be retrieved.
}
\item{... }{Arguments required to estimate the function value, as passed to the
solution method.
}
}
\note{
As the compilation of a problem is the time-limiting factor, a compiled object
can be saved with \link{writeDynLib} and then later loaded with \link{readDynLib}
from the package \code{inline}.
}
\author{
Karline Soetaert <karline.soetaert@nioz.nl>
}
\examples{
# generate a code and print it
String <- "
f(1) = 3*x(1) + x(2)*x(3) +2
f(2) = 2*x(2) + 3
f(3) = x(3)*(x(1)+1)
"
rfun <- compile.multiroot(String)
code(rfun)
(res <- multiroot(rfun, 1:3))
# function value
ccfunc(rfun, 1:3)
ccfunc(rfun, res$root)
\dontrun{
# save the compiled code for later use - will create two files
save.cc(rfun, file = "test.cc")
# load it, call it rr
rr <- load.cc("test.cc")
multiroot(rr, 1:3)
}
}
\references{
A lot of the code used to create the compiled codes is modified from the R-package
inline.
The modifications relate to the part of function \code{cfunction}
to create inline code for fortran and fortran 95.
Oleg Sklyar, Duncan Murdoch, Mike Smith, Dirk Eddelbuettel and Romain Francois (2013).
inline: Inline C, C++, Fortran function calls from R. R package version
0.3.13. http://CRAN.R-project.org/package=inline
}
|
e6809af9868418fa1f0ce66c16834f48f4977830 | 3a8d17c9dc99fbc0ba6460035b75199655f15596 | /runBrokenStick.R | 292b92c9ed13c5d1618526814e003b4253b289f2 | [] | no_license | hanskolus/4ka | ee042b2112e2ca9765389a5d8920ab4f5120d868 | 6d1826c1ada1385ce231d034e255bd5daf31e715 | refs/heads/master | 2021-04-25T06:59:42.619049 | 2020-07-03T21:08:49 | 2020-07-03T21:08:49 | 122,224,270 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,824 | r | runBrokenStick.R | library(geoChronR)
library(segmented)
library(lipdR)
setwd('/Users/hannah/Documents/Arctic Group/Proxy analysis/forGitHub/4ka')
source('gaussianize.R')
source('brokenStick.R')
source('createPaths.R')
mainDir = createPaths()
load(file.path(mainDir, 'RData', 'TS_climateInterp_2019.RData'))
#load('RData/TS_climateInterp_complete.RData')
TS_BS = filterTs(TS, 'useBS == 1')
#figDir = '/Users/hannah/Documents/Arctic Group/Proxy analysis/forGitHub/BS_figs/'
figDir = file.path(mainDir, 'broken_stick', 'individual')
dir.create(figDir)
for (i in 1:length(TS_BS)) {
print(paste0('RECORD ', i))
# run the broken stick code
results = iterativeBrokenStick(TS_BS[[i]]$age, TS_BS[[i]]$paleoData_values,
plot.opt = T, plotName = TS_BS[[i]]$dataSetName, figDir = figDir)
if (is.na(results$cp)) {
TS_BS[[i]]$brk_pts = NA
TS_BS[[i]]$brk_ptsErr = NA
TS_BS[[i]]$brk_dirs = NA
} else {
ordI = order(results$cp)
TS_BS[[i]]$brk_pts = results$cp[ordI]
TS_BS[[i]]$brk_ptsErr = results$cp.se[ordI]
slopes = results$o$coefficients[3:(length(results$cp)+2)]
TS_BS[[i]]$brk_dirs = ifelse(slopes > 0, 1, -1)
}
} # end loop thru records
fileName = file.path(mainDir, 'RData', 'BS_results_complete.RData')
save(TS_BS, file = fileName)
#save(TS_BS, file = 'BS_results_complete.RData')
## ---------------------- HISTOGRAM PLOT ---------------------- ##
histBreaks = seq(100, 11500, by = 200)
yrs = seq(300, 11500, by = 600)
yrStr = yrs / 1000
p=ggplot() + geom_histogram(aes(breakPts), breaks = yrs)
pg = ggplot_build(p)
densityBS = pg$data[[1]]$density
xvals = pg$data[[1]]$x
ggplot() + geom_col(aes(x = xvals, y = densityBS)) +
xlab('Event years [BP]') + ylab('Density of events') +
ggtitle('Broken Stick Results (simple plot)')
|
be12f4aae181d7e5ca31268897f587a80b05bae5 | f4b91fd0123d354315a3147f9df14b002fde2aa5 | /testingPlotRandom.R | e17ea28a00539ffe04033d4b1c59a4170c9ff8e9 | [] | no_license | JakeWheeler1/PlotRandomNormalFunc | 7096380d3d9d4fd173cf58a6d54bb817e78e747b | 8c0e96732a59a20283043f4421a2e167f8b117ed | refs/heads/main | 2023-08-24T18:59:45.676237 | 2021-10-06T19:36:41 | 2021-10-06T19:36:41 | 414,338,627 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 561 | r | testingPlotRandom.R | #############
### TestPlotRandom.R
## A short script to test the function
## that we wrote in PlotRandom.R
##
## Load the function into the environment.
source("PlotRandom.R")
plotRandomNormals()
out <- plotRandomNormals()
str(out)
out2 <- plotRandomNormals(numpts=1000, mu=3, sigma=0.5,
numbins=18, meanColor="green4",
seed=2020)
out2$Random_values
out2$Mean_x
out2$SD_x
out3 <- plotRandomNormals(meanColor="purple")
## Try and break it
plotRandomNormals(numpts="Jake")
## Not completely idiot proof
|
522a55f2db3305cad2f213119789790ee5acd563 | 20d5a2f19823905ef318c04035123419d910b42b | /R/2_distance.R | 66168d2660516ea20cf3a4995c13f771164057f4 | [] | no_license | hckum/approxmapR | 2d8cc2e6c92b881547514637b2dc2b6e5f95d29d | d4251a2a8c64cff4b560a72f94e65c09bbacaccb | refs/heads/master | 2021-01-20T02:53:46.426329 | 2017-11-20T21:43:15 | 2017-11-20T21:43:15 | 83,822,747 | 0 | 0 | null | 2017-03-08T18:00:56 | 2017-03-03T17:15:12 | CSS | UTF-8 | R | false | false | 3,700 | r | 2_distance.R | sorenson_distance <- function(x, ...){
UseMethod("sorenson_distance")
}
sorenson_distance.Sequence_Itemset <- function(sequence_itemset_1, sequence_itemset_2) {
set_1 <- setdiff(sequence_itemset_1,sequence_itemset_2)
set_2 <- setdiff(sequence_itemset_2,sequence_itemset_1)
set_union <- union(set_1,set_2)
length(set_union) / (length(sequence_itemset_1) + length(sequence_itemset_2))
}
sorenson_distance.W_Sequence_Itemset <- function(w_sequence_itemset,sequence_itemset,n) {
v <- w_sequence_itemset$itemset_weight
w <- w_sequence_itemset$element_weights
x <- w_sequence_itemset$elements
y <- sequence_itemset
y_no <- length(sequence_itemset)
eR <- (sum(w) + (y_no * v) - (2 * sum(w[x %in% y]))) / (sum(w) + (y_no * v))
((eR * v) + n - v) / n
}
repl <- function(x, ...){
UseMethod("repl")
}
repl.Sequence_Itemset <- function(sequence_itemset_1, sequence_itemset_2, fun = sorenson_distance) {
fun(sequence_itemset_1, sequence_itemset_2)
}
repl.W_Sequence_Itemset <- function(w_sequence_itemset,
sequence_itemset,
n,
fun = sorenson_distance) {
fun(w_sequence_itemset,sequence_itemset, n)
}
indel <- function(x, ...){
UseMethod("indel")
}
indel.Sequence_Itemset <- function(sequence_itemset,fun) {
repl(sequence_itemset,"",fun)
}
indel.W_Sequence_Itemset <- function(w_sequence_itemset, n, fun) {
repl(w_sequence_itemset, "", n, fun)
}
inter_sequence_distance <- function(x, ...){
UseMethod("inter_sequence_distance")
}
inter_sequence_distance.Sequence <- function(sequence_1,
sequence_2,
fun = sorenson_distance) {
distance_matrix <- matrix(nrow = length(sequence_1) + 1,
ncol = length(sequence_2) + 1)
distance_matrix[1,] <- 0:length(sequence_2)
distance_matrix[,1] <- 0:length(sequence_1)
for(i in 2:nrow(distance_matrix)) {
for(j in 2:ncol(distance_matrix)) {
replace <- distance_matrix[i-1,j-1] + repl(sequence_1[[i-1]],
sequence_2[[j-1]], fun)
indel_r <- distance_matrix[i,j-1] + indel(sequence_2[[j-1]], fun)
indel_d <- distance_matrix[i-1,j] + indel(sequence_1[[i-1]], fun)
distance_matrix[i,j] <- min(replace, indel_d, indel_r)
}
}
list(distance_matrix = distance_matrix,
distance = distance_matrix[nrow(distance_matrix),ncol(distance_matrix)])
}
inter_sequence_distance.W_Sequence <- function(w_sequence,
sequence,
fun = sorenson_distance) {
n <- attr(w_sequence, "n")
distance_matrix <- matrix(nrow = length(sequence) + 1,
ncol = length(w_sequence) + 1)
distance_matrix[1,] <- 0:length(w_sequence)
distance_matrix[,1] <- 0:length(sequence)
for(i in 2:nrow(distance_matrix)) {
for(j in 2:ncol(distance_matrix)) {
sequence_itemset <- sequence[[i-1]]
w_sequence_itemset <- w_sequence[[j-1]]
replace <- distance_matrix[i-1,j-1] + repl(w_sequence_itemset, sequence_itemset, n, fun)
indel_r <- distance_matrix[i,j-1] + 1 #indel(w_sequence_itemset, n, fun)
indel_d <- distance_matrix[i-1,j] + 1 #indel(sequence_itemset, fun)
distance_matrix[i,j] <- min(replace,indel_d,indel_r)
}
}
list(distance_matrix = distance_matrix,
distance = distance_matrix[nrow(distance_matrix), ncol(distance_matrix)])
}
inter_sequence_distance.Sequence_List <- function(sequence_list){
inter_sequence_distance_cpp(sequence_list)
}
|
efc5aeda48cdf2c1316230d1693e2ba572deee34 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/locits/examples/print.tosANYN.Rd.R | b1b8d5f5043144f1014696d2bf8ecf3f25fcddb3 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 239 | r | print.tosANYN.Rd.R | library(locits)
### Name: print.tosANYN
### Title: Print out a 'tosANYN' class object, eg from the 'link{hwtos}'
### function.
### Aliases: print.tosANYN
### Keywords: ts
### ** Examples
#
# See example at end of help for hwtos
#
|
cefea7c1d28914b32a60f6b6d20a459f9c63f576 | fdf839eeaa710470b7a9d881735e893d9f0fdd0c | /man/getLoc.Rd | f19827543aeea0ba2f57d3d5345dd8670b69dd4a | [] | no_license | kjrom-sol/plantR | 0da8749c532cda13aa4f6681dc0293412e385632 | e75fe4f1711c971f5be7a4e77bff5663c069bee3 | refs/heads/master | 2023-04-28T03:05:08.054234 | 2021-05-05T01:56:21 | 2021-05-05T01:56:21 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 6,339 | rd | getLoc.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getLoc.R
\name{getLoc}
\alias{getLoc}
\title{Get Locality and Coordinates}
\usage{
getLoc(
x,
str.names = c("resol.orig", "loc.string", "loc.string1", "loc.string2"),
gazet = "plantR",
gazet.names = c("loc", "loc.correct", "latitude.gazetteer", "longitude.gazetteer",
"resolution.gazetteer"),
orig.names = FALSE
)
}
\arguments{
\item{x}{a data.frame containing the strings for locality search. See details
for the specifications of this data frame.}
\item{str.names}{a vector of at least two columns names containing the
locality resolution and search string(s), in that order. Defaults to
'resol.orig', 'loc.string', 'loc.string1' and 'loc.string2'.}
\item{gazet}{a data.frame containing the gazetteer. The default is "plantR",
the internal \strong{plantR} gazetteer (biased towards Latin America).}
\item{gazet.names}{a vector of at least four columns names containing the
locality search string, latitude and longitude, in that order. If available,
the resolution of the gazetteer can be provided as a fifth name. Defaults
to columns names of the \strong{plantR} gazetteer: 'loc', 'loc.correct',
'latitude.gazetteer', 'longitude.gazetteer' and 'resolution.gazetteer'.}
\item{orig.names}{logical. Should the original columns names of the gazetteer
be preserved. Default to FALSE.}
}
\value{
The data frame \code{x}, with the new columns retrieved from the
gazetteer. More specifically, it returns the string used for the search in
the gazetteer (column 'loc'), the string retrieved (if any, column
'loc.correct'), the geographical coordinates (in decimal degrees) and the
resolution associated with the string retrieved (columns
'latitude.gazetteer', 'longitude.gazetteer', and 'resolution.gazetteer',
respectively) and the associated resolution.
}
\description{
This function uses the \strong{plantR} locality strings to search for
existing localities and their respective coordinates in a
\href{https://en.wikipedia.org/wiki/Gazetteer}{gazetteer}, which can be
used to replace missing coordinates and in the validation process of the
locality information and geographical coordinates provided.
}
\details{
The function was initially designed as part of a larger routine to
edit and validate locality information from plant occurrence data. It is
possible to use it separately, but it may be easier to use it under the
workflow presented in the \strong{plantR} manual. If used separately, users must
provide a data frame with at least two columns ('resol.orig' and
'loc.string'). Other locality strings ('loc.string1' and 'loc.string2') may
also be provided and in this case, these additional strings are used to
search for information below the municipality/county level, that is, to
retrieve from the gazetteer information at the locality level or below. If
these columns have different names in \code{x}, these names can be supplied using,
the argument \code{str.names}. See Examples below.
The default \strong{plantR} gazetteer includes information for all countries at
the country level (i.e. administrative level 0) and at the lowest
administrative level available for all Latin at GDAM
(\url{https://gadm.org}) for 51 Latin American countries. For Brazil, the
gazetteer also contains information at the locality level (e.g. farms,
forest fragments, parks), obtained from \href{https://www.ibge.gov.br/}{IBGE},
\href{http://cncflora.jbrj.gov.br}{CNCFlora} and
\href{http://labtrop.ib.usp.br/doku.php?id=projetos:treeco:start}{TreeCo}
databases. It also includes common spelling variants and historical changes
to locality names (currently biased for Brazil) and more common notation
variants of locality names found in the lcoality description of records
from GBIF, speciesLink and JABOT databases (include few type localities).
In total the gazetteer has nearly 25,000 locality names associated with a
valid geographical coordinates.
A different gazetteer than the \strong{plantR} default can be used. This gazetteer
must be provided using the argument \code{gazet} and it must contain the
columns 'loc' (search string), 'loc.correct' (correct string),
'latitude.gazetteer', 'longitude.gazetteer' (in decimal degrees) and
'resolution.gazetteer' (e.g. country, state, etc). If the names for these
columns are different, they can be supplied using argument \code{gazet.names}.
It is important to stress that the retrieval of locality information
depends on the completeness of the gazetteer itself. So, if a query does
not find a "valid" locality, it does not necessarily mean that the locality
does not exist or that its notation is wrong. It can simply mean that the
gazetteer is incomplete for the region you are working with. The gazetteer
is permanently being improved. If you find an error or if you want to
contribute with region-specific gazetteers, please send an email to
\href{mailto:raflima@usp.br}{raflima@usp.br}.
}
\examples{
## Using the function separately (need to provide column names and
#strings in an specific format)
(df <- data.frame(resol = c("municipality","locality"),
loc = c("brazil_rio janeiro_parati","brazil_rio janeiro_paraty"),
loc1 = c(NA, "brazil_rio janeiro_paraty_paraty mirim"),
stringsAsFactors = FALSE))
getLoc(df, str.names = c("resol", "loc", "loc1"))
## Using the function under the __plantR__ workflow
(df <- data.frame(country = c("BR", "Brazil", "Brasil", "USA"),
stateProvince = c("RJ", "Rio de Janeiro", "Rio de Janeiro","Florida"),
municipality = c("Paraty", "Paraty", "Parati", NA),
locality = c(NA,"Paraty-Mirim", NA, NA),
stringsAsFactors = FALSE))
# Formating the locality information
occs.fix <- fixLoc(df)
# Creating locality strings used to query the gazetteer
occs.locs <- strLoc(occs.fix)
# Final editing the locality strings (reduces variation in locality notation)
occs.locs$loc.string <- prepLoc(occs.locs$loc.string)
occs.locs$loc.string1 <- prepLoc(occs.locs$loc.string1)
occs.locs$loc.string2 <- prepLoc(occs.locs$loc.string2)
# Making the query of the edited strings in the gazetter
getLoc(occs.locs)
}
\seealso{
\link[plantR]{fixLoc}, \link[plantR]{strLoc} and \link[plantR]{prepLoc}.
}
\author{
Renato A. F. de Lima
}
|
dbd02ea49cc3f9b7a2352580706aa14615cf28f3 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/Rfacebook/examples/getNewsfeed.Rd.R | a56d5f551d33aefa39631401b08de17e45c7b2bf | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 381 | r | getNewsfeed.Rd.R | library(Rfacebook)
### Name: getNewsfeed
### Title: Download recent posts from the authenticated user's newsfeed
### Aliases: getNewsfeed
### ** Examples
## Not run:
##D ## See examples for fbOAuth to know how token was created.
##D ## Capture 100 most recent posts on my newsfeed
##D load("fb_oauth")
##D my_newsfeed <- getNewsfeed(token=fb_oauth, n=100)
## End(Not run)
|
bc58a2b567d0a23fbf253d589452b56431c74697 | 887293aeeccaba7e37369a47844c7f870e6f0ca2 | /run_analysis.R | 6f091c9f8cd8d7d96401b4ea1a021e15e8eb6f3f | [] | no_license | cfelipefranco/getting-and-cleaning-data-course-project | 44f80db9d0c86b8546cef75a392f35b56b7be633 | 42ae4a19f83ddc29938c1ba6a3bb678b5441a0a1 | refs/heads/master | 2021-01-09T06:13:38.622603 | 2017-02-08T16:59:26 | 2017-02-08T16:59:26 | 80,940,622 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,389 | r | run_analysis.R | run_analysis <- function(){
# Verifying if the UCI HAR Dataset is in the working directory
if(sum(grepl("UCI HAR Dataset",dir()))==0){
stop("Please set working directory to folder
containing the UCI HAR Dataset unzipped
folder")
}
# Read the X_test data set and complement it with label, subject and a type flag to
# indicate if the observation is part of the test or train dataset
testSet <- fread("UCI HAR Dataset/test/X_test.txt")
testLabels <- fread("UCI HAR Dataset/test/y_test.txt")
testSubjects <- fread("UCI HAR Dataset/test/subject_test.txt")
testSet <- cbind(testSet,label=testLabels$V1,subject=testSubjects$V1,type="test")
# Read the X_train data set and complement it with label, subject and a type flag to
# indicate if the observation is part of the test or train dataset
trainSet<-fread("UCI HAR Dataset/train/X_train.txt")
trainLabels<-fread("UCI HAR Dataset/train/y_train.txt")
trainSubjects <- fread("UCI HAR Dataset/train/subject_train.txt")
trainSet <- cbind(trainSet,label=trainLabels$V1,subject=trainSubjects$V1, type="train")
# Complete task 1 by combining the test and train sets into one dataset
mergedSet <- rbind(testSet,trainSet)
# Complete task 2 by reading the features file containing 561 columns names of the test
# and train sets and filtering only its mean and standard deviation informations
# (whilst keeping label, subject and type)
features <- fread("UCI HAR Dataset/features.txt")
meanAndStdFilter <- (grepl("mean",features$V2)|grepl("std",features$V2)) & !grepl(
"mean[Ff]req",features$V2)
meanAndStdColumns <- features[meanAndStdFilter,]
filteredSet <- select(mergedSet, meanAndStdColumns$V1,label, subject,type)
# Complete task 3 by setting appropriate names to the activities in the data set instead
# of numeric label
activities <- fread("UCI HAR Dataset/activity_labels.txt")
activityNamedSet <- mutate(filteredSet, label = factor(activities$V2[label]))
# Complete task 4 by setting column labels that are descriptive, lowercased, unique and
# lacking special characters and spaces
colLabels <- meanAndStdColumns$V2 %>% tolower() %>% {gsub("['('')'-]","",.)} %>%
{gsub("(body){2}","body", .)}
colnames(activityNamedSet) <- c(colLabels, "activity", "subject", "type")
# Complete task 5 by creating an independent data set with averages of each variable for
# each activity and each subject
resultingMeansSet <- aggregate(activityNamedSet[,1:66],list(activityNamedSet$activity
, activityNamedSet$subject),mean)
resultingMeansSet <- rename(resultingMeansSet, activity=Group.1, subject=Group.2)
# Write output file containing resulting data set (content will be available both in
# resultingMeansSet.txt file and run_analysis function return)
write.table(resultingMeansSet,file = "resultingMeansSet.txt",row.names = F)
# Free used variables
remove("testSet")
remove("testLabels")
remove("testSubjects")
remove("trainSet")
remove("trainLabels")
remove("trainSubjects")
remove("mergedSet")
remove("features")
remove("meanAndStdFilter")
remove("meanAndStdColumns")
remove("filteredSet")
remove("activities")
remove("activityNamedSet")
remove("colLabels")
# Return resulting set
resultingMeansSet
} |
a231d391275b0348f5da8c766c72f9006753e90e | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/npcp/examples/cpRho.Rd.R | 101f83a668a8e8b440fb88b3a332151fc7ecca5a | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 480 | r | cpRho.Rd.R | library(npcp)
### Name: cpRho
### Title: Test for Change-Point Detection Based on Spearman's Rho
### Aliases: cpRho
### Keywords: htest multivariate nonparametric ts
### ** Examples
## Not run:
##D require(copula)
##D n <- 100
##D k <- 50 ## the true change-point
##D u <- rCopula(k,gumbelCopula(1.5))
##D v <- rCopula(n-k,gumbelCopula(3))
##D x <- rbind(u,v)
##D cp <- cpRho(x, b = 1)
##D cp
##D ## Estimated change-point
##D which(cp$rho == max(cp$rho))
## End(Not run)
|
7eb30ea2fa66e88603d19f0e3797d50518e749e2 | c0e9806481a6e93748b6ad4a49f6d6deb5f06eea | /man/safe_write_csv.Rd | 8a58a2726addb53a08827f45bb3768f93312b7e6 | [] | no_license | beauchamplab/rave | 8057d2f04f7631e631da74bf228f647cf23f9a27 | 458e4abf9104e12b15eff7743f95200b33a168b5 | refs/heads/master | 2023-08-03T12:53:01.874006 | 2023-07-22T12:42:07 | 2023-07-22T12:42:07 | 110,274,745 | 21 | 7 | null | 2022-03-15T19:54:42 | 2017-11-10T17:34:16 | R | UTF-8 | R | false | true | 482 | rd | safe_write_csv.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/io-csv.R
\name{safe_write_csv}
\alias{safe_write_csv}
\title{Save data to "CSV", if file exists, rename old file}
\usage{
safe_write_csv(data, file, ..., quiet = FALSE)
}
\arguments{
\item{data}{data frame}
\item{file}{"CSV" file to save}
\item{...}{pass to \code{\link[utils]{write.csv}}}
\item{quiet}{suppress overwrite message}
}
\description{
Save data to "CSV", if file exists, rename old file
}
|
85f13f3ba7984174021e2ed082deef89cc8d21bf | 89d219d3dfa744543c6ba1c1b3a99e4dcabb1442 | /man/xtsToDataFrame.Rd | d3b4d5468a7f9d560d259c600fb10e2fb3523e90 | [] | no_license | pteetor/tutils | e2eb5d2fba238cbfe37bf3c16b90df9fa76004bb | fe9b936d8981f5cb9b275850908ef08adeffef4e | refs/heads/master | 2022-06-17T11:34:30.590173 | 2022-06-14T02:02:15 | 2022-06-14T02:02:15 | 77,761,145 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 368 | rd | xtsToDataFrame.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/xtsConversion.R
\name{xtsToDataFrame}
\alias{xtsToDataFrame}
\title{Convert xts matrix to data frame}
\usage{
xtsToDataFrame(x)
}
\arguments{
\item{x}{An xts matrix}
}
\value{
A data frame with the input columns
plus a new 'date' column
}
\description{
DEPRECATED. Use tidy() instead.
}
|
72f1d369af938467a0e87dfd54929b5b45a6a2ee | 2253c85e1c90b54df4b69ad40b6ce9b207c76415 | /man/use_post.Rd | c3b7d005341888e1e6f08f3f0748cc5c7e21fb70 | [
"MIT"
] | permissive | djnavarro/hugodown | a204e1709df31ac1dae81f895bf3e89191f93e39 | 168a361518f5450e498d0fa9e34eea93f0aa677d | refs/heads/master | 2023-07-02T11:17:59.870024 | 2021-07-04T23:50:13 | 2021-07-04T23:50:13 | 270,511,218 | 0 | 0 | NOASSERTION | 2020-07-10T05:29:44 | 2020-06-08T03:15:26 | R | UTF-8 | R | false | true | 1,226 | rd | use_post.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/post.R
\name{use_post}
\alias{use_post}
\title{Create a new post}
\usage{
use_post(path, kind = NULL, data = list(), site = ".", open = is_interactive())
}
\arguments{
\item{path}{Directory to create, like \code{blog/2020-my-favourite-package}.}
\item{kind}{Kind of archetype of use; usually automatically derived
from the base directory of \code{path}.}
\item{data}{Any additional data to be used when templating \code{.Rmd} files.
The default data includes:
\itemize{
\item \code{date}: today's date (in YYYY-MM-DD format).
\item \code{author}: \code{\link[whoami:fullname]{whoami::fullname()}}.
\item \code{slug}: taken from the file name of \code{path}.
}}
\item{site}{Path to the hugo site.}
\item{open}{Open file for interactive editing?}
}
\description{
Post creation takes advantage of Hugo's
\href{https://gohugo.io/content-management/archetypes/}{archetypes} or templates,
with an extension for \code{.Rmd} files. \code{use_post()} first calls \verb{hugo new}
(which will apply go templating to \code{.md} files in the archetype),
and then uses \href{https://github.com/edwindj/whisker}{whisker} to template
any \code{.Rmd} files.
}
|
f981bd3bc31ed85353d083cae571253ec3860e58 | e8c032e334efeb0a6a70b78ef9a710313ca66a5d | /R/token.R | 709079dfad432eefa51a1b683cb263ecbe09c165 | [
"Apache-2.0"
] | permissive | alfahaama/r-corpus | 1afb9e8ad25b0ea08d0f6da1083c579356ed6438 | 5f024c231f3dccbd145f4118013931d2f308deae | refs/heads/master | 2021-01-25T09:21:25.814791 | 2017-06-09T00:53:28 | 2017-06-09T00:53:28 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,356 | r | token.R | # Copyright 2017 Patrick O. Perry.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
token_filter <- function(map_case = TRUE, map_compat = TRUE, map_quote = TRUE,
remove_ignorable = TRUE,
stemmer = NA, stem_except = drop, combine = NULL,
drop_letter = FALSE, drop_mark = FALSE,
drop_number = FALSE, drop_punct = FALSE,
drop_symbol = FALSE, drop_other = FALSE,
drop = NULL, drop_except = NULL)
{
ans <- structure(list(), class="corpus_token_filter")
ans$map_case <- map_case
ans$map_compat <- map_compat
ans$map_quote <- map_quote
ans$remove_ignorable <- remove_ignorable
ans$stemmer <- stemmer
ans$stem_except <- stem_except
ans$combine <- combine
ans$drop_letter <- drop_letter
ans$drop_mark <- drop_mark
ans$drop_number <- drop_number
ans$drop_symbol <- drop_symbol
ans$drop_punct <- drop_punct
ans$drop_other <- drop_other
ans$drop <- drop
ans$drop_except <- drop_except
ans
}
as_token_filter <- function(filter)
{
if (is.null(filter)) {
return(NULL)
}
ans <- structure(list(), class = "corpus_token_filter")
keys <- names(token_filter())
for (key in keys) {
ans[[key]] <- filter[[key]]
}
ans
}
`[<-.corpus_token_filter` <- function(x, i, value)
{
if (anyNA(i)) {
stop("NAs are not allowed in subscripted assignments")
}
if (!is.character(i)) {
i <- names(x)[i]
}
if (length(value) == 1) {
value <- rep(value, length(i))
} else if (length(value) != length(i)) {
stop("number of items to replace differs from the replacement length")
}
for (j in seq_along(i)) {
key <- i[[j]]
val <- value[[j]]
if (!is.na(key)) {
x[[key]] <- val
}
}
x
}
`$<-.corpus_token_filter` <- function(x, name, value)
{
if (name %in% c("map_case", "map_compat", "map_quote",
"remove_ignorable", "drop_letter", "drop_mark",
"drop_number", "drop_symbol", "drop_punct",
"drop_other")) {
value <- as_option(name, value)
} else if (name %in% c("stem_except", "combine", "drop", "drop_except")) {
value <- as_character_vector(name, value)
} else if (name == "stemmer") {
value <- as_stemmer(value)
} else {
stop(paste0("unrecognized token filter property: '", name, "'"))
}
y <- unclass(x)
if (is.null(value)) {
# setting a list element to NULL is tricky; see
# http://stackoverflow.com/a/7945259
y[[name]] <- NA
y[match(name, names(y))] <- list(NULL)
} else {
y[[name]] <- value
}
class(y) <- class(x)
y
}
`[[<-.corpus_token_filter` <- function(x, i, value)
{
if (length(i) > 1) {
stop("no such token filter property")
}
if (!is.character(i)) {
name <- names(x)[[i]]
} else {
name <- i
}
if (is.na(name)) {
stop(paste0("no such token filter property (", i, ")"))
}
`$<-.corpus_token_filter`(x, name, value)
}
print.corpus_token_filter <- function(x, ...)
{
cat("Token filter with the following options:\n\n")
for (k in names(x)) {
val <- x[[k]]
cat(paste0("\t", k, ": "))
if (is.null(val)) {
cat("NULL\n")
} else if (length(val) == 1) {
cat(paste0(val, "\n"))
} else {
utils::str(val, width = getOption("width") - 8 - nchar(k) - 2,
give.attr = FALSE)
}
}
invisible(x)
}
tokens <- function(x, filter = token_filter())
{
x <- as_text(x)
filter <- as_token_filter(filter)
.Call(C_tokens_text, x, filter)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.