blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d8f70b34a6ecbc47d4220f9c287f1786017ac9ac | 8c4a746ea10217b868480b1937e48c02c8a75ac7 | /Exascalar Power and Efficiency Trend Plot.R | 4347ad964fbc0a0205cd05be0729ec60819d151d | [] | no_license | ww44ss/Exascalar-Analysis- | 2d6bbf4ae786a811b4b9463965fa2cb56badeaa8 | c418446080ebefc3ec045bde96ff776cce72a532 | refs/heads/master | 2020-04-12T01:48:45.515884 | 2019-08-11T21:37:54 | 2019-08-11T21:37:54 | 21,465,360 | 5 | 13 | null | null | null | null | UTF-8 | R | false | false | 10,284 | r | Exascalar Power and Efficiency Trend Plot.R | # Exascalar Data Trend Plot
# This program pulls in all the data and then plots a trend of the top, mean, and median exascalar.
# plan to update to pull big exascalar file
## This program imports cleaned data from the Green500 and Top500 lists
## GET THE CLEANED DATA
##check for Exascalar Directory. If none exists stop program with error
##check to ensure results director exists
if(!file.exists("./results")) stop("Data not found in directory Exascalar, first run Exascalar_Cleaner to get tidy data")
## set working directory
# define Data Directories to use
results <- "./results"
## ------------------------
ExaPerf <- 10^12 ##in Megaflops
ExaEff <- 10^12/(20*10^6) ##in Megaflops/Watt
## this function coputes Exascalar from a list with columns labeled $rmax and $megaflopswatt
## note the function computes to three digits explicitly
compute_exascalar <- function(xlist){
## compute exascalar
t1 <- (log10(xlist$rmax*10^3/ExaPerf) + log10(xlist$mflopswatt/(ExaEff)))/sqrt(2)
## round to three digits
t2 <- round(t1, 3)
## clean up
format(t2, nsmall=3)
}
## Read results files
# import data set
BigExascalar <- read.csv(paste0(results, "/BigExascalar.csv"), header=TRUE)
print("data read")
## create datamatrix table to select as function of date various stats.
datematrix<-as.data.frame(table(BigExascalar$date))
##PLOT MID, MEDIAN AND TOP EXASCALAR TREND
##the way this works is for each date first take the subest of BigExascalar and then find the max value.
TopEx <- NULL
for (ii in 1:length(datematrix[,1])) {
xx <- BigExascalar[BigExascalar$date == datematrix[ii,1],]
xrow <- subset(xx, xx$exascalar == max(xx$exascalar))
TopEx<-rbind(TopEx, xrow)
}
MeanEx <- NULL
for (ii in 1:length(datematrix[,1])) {
xx <- BigExascalar[BigExascalar$date == datematrix[ii,1],]
xrow <- subset(xx, xx$exascalar == mean(xx$exascalar))
MeanEx<-rbind(MeanEx, xrow)
}
MedianPerf <- NULL
for (ii in 1:length(datematrix[,1])) {
xx <- BigExascalar[BigExascalar$date == datematrix[ii,1],]
xrow <- subset(xx, xx$rmax == median(xx$rmax))
MedianPerf<-rbind(MedianPerf, xrow)
}
Median <- NULL
for (ii in 1:length(datematrix[,1])) {
xx <- BigExascalar[BigExascalar$date == datematrix[ii,1],]
xrow <- subset(xx, xx$rmax == median(xx$rmax))
MedianPerf<-rbind(MedianPerf, xrow)
}
##mean efficiency function calculated the mean perforance adn power and then the mean efficiency from that ratio
##thus defined it reflects the popultion of the Top500 computers.
mean_eff <- function(list){mean(list$rmax)/mean(list$power)}
MeanEx <- matrix(c(mean_eff(Jun09), mean_eff(Nov09),
mean_eff(Jun10), mean_eff(Nov10),
mean_eff(Jun11), mean_eff(Nov11),
mean_eff(Jun12), mean_eff(Nov12),
mean_eff(Jun13), mean_eff(Nov13),
mean_eff(Jun14),
mean(Jun09$rmax), mean(Nov09$rmax),
mean(Jun10$rmax), mean(Nov10$rmax),
mean(Jun11$rmax), mean(Nov11$rmax),
mean(Jun12$rmax), mean(Nov12$rmax),
mean(Jun13$rmax), mean(Nov13$rmax),
mean(Jun14$rmax)),
ncol=2, nrow = 11)
MeanEx <- as.data.frame(MeanEx)
names(MeanEx) <- c("mflopswatt", "rmax")
median_eff <- function(list){median(list$rmax)/median(list$power)}
MedianEx <- matrix(c(median_eff(Jun09), median_eff(Nov09),
median_eff(Jun10), median_eff(Nov10),
median_eff(Jun11), median_eff(Nov11),
median_eff(Jun12), median_eff(Nov12),
median_eff(Jun13), median_eff(Nov13),
median_eff(Jun14),
median(Jun09$rmax), median(Nov09$rmax),
median(Jun10$rmax), median(Nov10$rmax),
median(Jun11$rmax), median(Nov11$rmax),
median(Jun12$rmax), median(Nov12$rmax),
median(Jun13$rmax), median(Nov13$rmax),
median(Jun14$rmax)),
ncol=2, nrow = 11)
MedianEx <- as.data.frame(MedianEx)
names(MedianEx) <- c("mflopswatt", "rmax")
bottom_eff <- function(list){list$rmax[which(list$X == max(list$X))]/list$power[which(list$X == max(list$X))]}
bottom_perf <- function(list){list$rmax[which(list$X == max(list$X))]}
BottomGreen <- matrix(c(bottom_eff(Jun09), bottom_eff(Nov09),
bottom_eff(Jun10), bottom_eff(Nov10),
bottom_eff(Jun11), bottom_eff(Nov11),
bottom_eff(Jun12), bottom_eff(Nov12),
bottom_eff(Jun13), bottom_eff(Nov13),
bottom_eff(Jun14),
bottom_perf(Jun09), bottom_perf(Nov09),
bottom_perf(Jun10), bottom_perf(Nov10),
bottom_perf(Jun11), bottom_perf(Nov11),
bottom_perf(Jun12), bottom_perf(Nov12),
bottom_perf(Jun13), bottom_perf(Nov13),
bottom_perf(Jun14)),
ncol=2, nrow = 11)
BottomGreen <- as.data.frame(MedianEx)
names(BottomGreen) <- c("mflopswatt", "rmax")
top_eff <- function(list){list$rmax[which(list$green500rank == 1)[1]]/list$power[which(list$green500rank == 1)[1]]}
top_perf <- function(list){list$rmax[which(list$green500rank == 1)[1]]}
TopGreen <- matrix(c(top_eff(Jun09), top_eff(Nov09),
top_eff(Jun10), top_eff(Nov10),
top_eff(Jun11), top_eff(Nov11),
top_eff(Jun12), top_eff(Nov12),
top_eff(Jun13), top_eff(Nov13),
top_eff(Jun14),
top_perf(Jun09), top_perf(Nov09),
top_perf(Jun10), top_perf(Nov10),
top_perf(Jun11), top_perf(Nov11),
top_perf(Jun12), top_perf(Nov12),
top_perf(Jun13), top_perf(Nov13),
top_perf(Jun14)),
ncol=2, nrow = 11)
TopGreen <- as.data.frame(TopGreen)
names(TopGreen) <- c("mflopswatt", "rmax")
DatesString<-c("06/01/2009", "11/01/2009","06/01/2010","11/01/2010","06/01/2011","11/01/2011",
"06/01/2012",
"11/01/2012","06/01/2013",
"11/01/2013","06/01/2014")
Date <- as.Date(DatesString, "%m/%d/%Y")
## EXASCALAR TREND
## Plot of the Top and Median Exascalar for current cleaned data set
require(ggplot2)
## create TopEx vector
topexascalar<-TopEx$exascalar
## create TopEX data frame for fitting
TopExData <- as.data.frame(cbind(Date, topexascalar))
## fitted model of Top Exascalar data
TopExFit <- lm(topexascalar ~ Date , data = TopExData)
## plot the data
plot(Date, topexascalar,
ylim=c(-7.0,0),
xlim = c(14000, 19000),
main = "",
ylab = "Exascalar",
col = "red",
bg = "steelblue2",
pch=21)
par(new=TRUE)
print('median')
## create median vector for plotting
MedianEx$exascalar <- compute_exascalar(MedianEx)
medianexascalar <- as.numeric(MedianEx$exascalar)
## createe median data fram for fitting
MedianExData <- as.data.frame(cbind(Date, medianexascalar))
##fitted model of median data
MedianExFit <- lm(medianexascalar ~ Date , data = MedianExData)
plot(Date, medianexascalar,
ylim=c(-7.0,0),
xlim = c(14000, 19000),
xlab = "",
ylab = "",
main = "Exascalar Trend",
col = "dark blue",
bg = "green",
pch=19)
## get parameters for fitted lines
topslope<-TopExFit$coefficient[2]
topintercept<-TopExFit$coefficient[1]
## calculate date zero - when the top trend will intercet zero exascalar
## the zero date is an important figure of merit of the population (zero exascalar)
## representing the most advanced supercomputing capability
datezero = -topintercept/topslope
##draw lines
lines(c(14000, datezero), c(topintercept+topslope*14000, topintercept+topslope*datezero))
medianslope<-MedianExFit$coefficient[2]
medianintercept<-MedianExFit$coefficient[1]
## draw fitted line for median
lines(c(14000, datezero), c(medianintercept+medianslope*14000, medianintercept+medianslope*datezero))
## add text to graph
text(datezero, 0, as.Date(datezero, origin="1970-01-01"), cex=.5, srt=0, pos = 2)
text(datezero, -1.2, "Top", cex=.7, srt=0, pos = 2)
text(datezero, medianintercept+medianslope*datezero-1.2, "Median", cex=.7, srt=0, pos = 2)
text(datezero-100,
-7, "data from June14 Green500 and Top500 ", cex=.4, col="black", pos=3)
TopEx<-cbind(TopEx, Date)
## POWER TREND PLOT
powerplot <- ggplot(TopEx, aes(x = Date, y = power)) + geom_point() + coord_trans(y="log10")
## get rid of grid lines
powerplot <- powerplot + theme(panel.grid.minor=element_blank(), panel.grid.major=element_blank())
## add performance ski
powerplot<-powerplot+ geom_point(aes(alpha = rmax))
png(filename=paste0(d, "/ExaPowerTrend.png"))
##print(aaaaaa)
dev.off()
par(new=FALSE)
toppower<-TopEx$power
plot(Date, toppower,
ylim=c(200,20000),
xlim = c(14000, 16500),
main = "",
log="y",
ylab = "Power (kW)",
col = "red",
bg = "steelblue2",
pch=21)
par(new=TRUE)
medianpower<-MedianEx$rmax/MeanEx$mflopswatt
plot(Date, medianpower,
ylim=c(200,20000),
xlim = c(14000, 16500),
log="y",
xlab = "",
ylab = "",
main = "Power Trend",
col = "dark blue",
bg = "green",
pch=19)
text(Date[5], toppower[5], "Top Power", cex=.7, srt=0, pos = 2)
text(Date[5], medianpower[5], "Median Power", cex=.7, srt=0, pos = 2)
text(16222,
300, "data from June14 Green500 and Top500 ", cex=.4, col="black", pos=3)
topeff<-TopEx$mflopswatt
plot(Date, topeff,
ylim=c(50,3000),
xlim = c(14000, 16500),
log="y",
main = "",
ylab = "efficiency (mflops per Watt)",
col = "red",
bg = "steelblue2",
pch=21)
par(new=TRUE)
medianeff<-MedianEx$mflopswatt
plot(Date, medianeff,
ylim=c(50,3000),
xlim = c(14000, 16500),
log="y",
xlab = "",
ylab = "",
main = "Efficiency Trend",
col = "dark blue",
bg = "green",
pch=19)
|
d3deb55cf47dcca0062f2a736206182eb568754d | 131aab8b543b599c798443c022a61ce0d095dbce | /server.r | 5f81a5334589dc3fb29b3ab0b617c2da025ed3a3 | [
"MIT"
] | permissive | mkuzak/atmap-shiny | ff837c630b98c986dad080b003558aafbe500051 | f8fba74e6e7c732fd16cb1db260406bc6e68d3d0 | refs/heads/master | 2021-01-01T19:39:22.377517 | 2014-09-23T20:29:25 | 2014-09-23T20:29:25 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 668 | r | server.r | library(shiny)
library(ggvis)
library(leaflet)
library(maps)
# load versaille stock accessions
accessions <- read.table("data/versaille_stock.csv",
sep="\t", header=TRUE)
shinyServer(function(input, output, session) {
accessions$id <- c(1:nrow(accessions))
# render leaflet map
map <- createLeafletMap(session, 'map')
observe({
map$addCircle(
accessions$Latitude,
accessions$Longitude,
radius=rep(100000/max(5, input$map_zoom)^2, nrow(accessions)),
layerId=accessions$id,
options=list(color='#4A9', fill=TRUE, wight=1)
)
})
output$accession_table = renderDataTable({
accessions
})
}) |
3cb8b509165c1abb5cd30390466ee692e99b68db | 57e77ea9dfe2ce75d4c7e69490901d21059bd410 | /20200930_workshop_code.R | 45ef427ca419cfd051fd8609b486863485e693e4 | [
"CC-BY-4.0",
"LicenseRef-scancode-public-domain",
"MIT"
] | permissive | oulib-resdata/repetitive_r | 74bfb32b5787be06f6448eaf6be3b2013e741d42 | 5163f4bd30620d0802cf4396c2876aac4f22817a | refs/heads/main | 2023-05-29T23:35:59.193119 | 2023-05-17T17:59:02 | 2023-05-17T17:59:02 | 188,274,852 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,590 | r | 20200930_workshop_code.R | #Welcome! We'll begin a little after 3pm once people arrive.
setwd("~/Desktop/20200930_repetitive_r")
#test for loops
# for (iterator in setofvalues){
# do a thing
# }
output_vector <- c()
for(i in 1:5){
for(j in 1:5){
print(paste(i,j))
output_vector <- c(output_vector, i*j)
}
}
output_vector
output_matrix <- matrix(nrow = 5, ncol = 5)
j_vector <- c('a', 'b', 'c', 'd', 'e')
for (i in 1:5){
for(j in 1:5){
temp_j_value <- j_vector[j]
temp_output <- paste(i, temp_j_value)
output_matrix[i, j] <- temp_output
}
}
output_matrix
output_vector2 <- as.vector(output_matrix)
#while loops
#while (this condition is true){
# do a thing
#}
z <- 1
while(z>0.1){
z <- runif(1)
cat(z, "\n")
}
data(mtcars)
str(mtcars)
mtcars$mpg
mtcars$cyl
# write a script that loops through mtcars by cylinder number
# and prints out mean miles per gallon for each category of cyl (cylinders)
#Step 1
unique(mtcars$cyl)
#fill in what unique categories you need here
#Step 2
for (i in unique(mtcars$cyl)) { #vector of numbers to go through in loop
temp <- mtcars[mtcars$cyl==i,] #subset data
cat(i,
mean(temp$mpg, na.rm = TRUE),
"\n") #report mean for subset
}
#vectorization
x <- 1:4
for (i in x){
print(x[i]+y[i])
}
x*2
y <- 6:10
x + y
#challenge
#convert mtcars$mpg to kilograms per liter
mtcars$kpl <- mtcars$mpg*0.425144
#step by step
mpg <-mtcars$mpg
mpg
klh <- mpg*0.425144
klh
mtcars$kpl <- klh
x[x>2]
lapply(X = mtcars, FUN = mean, na.rm = TRUE)
#functions
source("20200930_functions-lesson.R")
kelvin_to_celsius(1235)
|
68a1fe4d947c6dcebb047317f8f121dbfb7ec145 | eae6125fa496e9c6e2a260ec6a4fe00084203910 | /man/effectSizeCI.Rd | 73b366256be62ec2f69166f343a3ec3b499dbb22 | [] | no_license | cran/reproducer | 196c5cbd2c9fb5b62be07994d7fd4089c13e7373 | 613330ca8689417e39982f6732e233bd7d79b96e | refs/heads/master | 2023-07-19T19:16:50.012059 | 2023-07-08T09:40:10 | 2023-07-08T09:40:10 | 29,832,704 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 2,824 | rd | effectSizeCI.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/MadeyskiKitchenhamMetaAnalysis.R
\name{effectSizeCI}
\alias{effectSizeCI}
\title{effectSizeCI}
\usage{
effectSizeCI(
expDesign,
t,
n1,
n2,
r = 0,
epsilon = 1e-10,
maxsteps = 1000,
stepsize = 3
)
}
\arguments{
\item{expDesign}{Experimental design: 1) crossover repeated measures ('CrossOverRM'), 2) before-after repeated measures (expDesign=='BeforeAfterRM'), 3) independent groups ('IG)}
\item{t}{t-statistics (t must be less than or equal to 37.62, the limit from the R function documentation)}
\item{n1}{The number of observations in sequence group 1 (expDesign=='CrossOverRM'), the number of observations in group 1 (expDesign=='IG'), or the total number of observations (expDesign=='BeforeAfterRM')}
\item{n2}{The number of observations in sequence group 2 (expDesign=='CrossOverRM') or the number of observations in group 2 (expDesign=='IG')}
\item{r}{The correlation between outcomes for individual subject (the within subject correlation)}
\item{epsilon}{The precision of the iterative procedure}
\item{maxsteps}{The maximum number of steps of the iterative procedure (the procedure terminates at maxsteps or earlier if CI with enough precision have been calculated)}
\item{stepsize}{The size of steps (influences the convergence of the calculations, i.e., the number of steps required to obtain the final result of precision defined by the epsilon)}
}
\value{
A list of Confidence Intervals for: t-statistic (t_LB and t_UB), repeated-measures effect size d_RM (d_RM_LB, d_RM_UB), independent groups effect size (d_IG_LB, d_IG_UB)
}
\description{
95% Confidence Intervals (CI) on Standardised Effect Sizes (d) for cross-over repeated-measures, before-after repeated-measures, and independent group experimental designs
The procedure is based on finding the upper and lower 0.025 bounds for the related t-variable.
The t-variable needs to be adjusted for bias by multiplying by c
The upper and lower bounds on the t-variable are then used to calculate to upper and lower bounds on the
repeated measures effect size (d_RM) by multiplying the upper and lower bound of the t-variable by sqrt((n1+n2)/(2*(n1*n2))).
Upper and lower bounds on the equivalent independent groups effect size (d_IG) are found by multiplying the upper and lower
bounds on d_RM by sqrt(1-r).
}
\examples{
effectSizeCI(expDesign = "CrossOverRM", t = 14.4, n1 = 15, n2 = 15, r = 0.6401)
effectSizeCI(expDesign = "BeforeAfterRM", t = 14.16536, n1 = 15, n2 = 0, r = 0.6146771)
effectSizeCI(expDesign = "IG", t = -6.344175, n1 = 15, n2 = 15)
effectSizeCI(expDesign = "CrossOverRM", t = 0.5581, n1 = 6, n2 = 6, r = 0.36135)
effectSizeCI(expDesign = "CrossOverRM", r = 0.855, t = 4.33, n1 = 7, n2 = 6)
}
\author{
Lech Madeyski and Barbara Kitchenham
}
|
175e697137d9dfa6fd752964b5394959dd70e9eb | 7612408d3e43aa59d066c0a5797446ec8c675694 | /SchoolsGraphs.R | fb87d0a56ce3bb07ff5d9081acd44700069ab3fc | [] | no_license | julesbeley/datathon | 2512a96478a25fa3ce5c382482070e5940239402 | ff42508a10019585039bba3c6631036a99e9e508 | refs/heads/master | 2020-04-18T04:01:08.590283 | 2019-01-24T14:26:13 | 2019-01-24T14:26:13 | 167,222,742 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,342 | r | SchoolsGraphs.R | library(tidyverse)
install.packages("rio")
library(rio)
edu <- import(.)
library(readr)
read_csv("data/Education.csv")
table(edu)
head(edu)
View(edu)
edu <- edu[-1,]
head(edu)
names(edu)<- c("Libellé","Population légale 2016","Lycée 2017","École maternelle 2017","École élémentaire 2017",
"Collège 2017")
headers<- c("Dep","Pop","Ly","Mat","Ele","Col")
head(edu)
edu %>% setNames(headers) %>%
group_by(Ly)
##
names(edu) <- headers
names(edu)
summary(edu)
glimpse(edu)
edu <- edu[,-1]
edu <- edu[-1,]
view(edu)
##sorting
#Subset
##population
edu$Pop <- as.numeric(edu$Pop)
edu <- edu %>%
arrange(`Pop`)
view(edu)
##convert to numerical
edu$Ly <- as.numeric(as.character(edu$Ly))
edu$Pop <- as.numeric(as.character(edu$Pop))
edu$Mat <- as.numeric(as.character(edu$Mat))
edu$Ele <- as.numeric(as.character(edu$Ele))
edu$Col <- as.numeric(as.character(edu$Col))
edu %>%
arrange(`Pop`) -> edu
## graphing pop
edu %>% setNames(headers) %>%
ggplot(aes(x= Dep, y = Pop, fill = Pop)) +
geom_col()
## ratios----
##graphing Lycée
edu %>% setNames(headers) %>%
ggplot(aes(x= Dep, y = Ly, fill = Ly/Pop)) +
geom_col()
##graphing College
edu %>% setNames(headers) %>%
ggplot(aes(x= Dep, y = Col/Pop, fill = Col/Pop)) +
geom_col()
##graphing elementary
edu %>% setNames(headers) %>%
ggplot(aes(x= Dep, y = Ele/Pop, fill = Ele/Pop)) +
geom_col()
##graphing maternelle
edu %>% setNames(headers) %>%
ggplot(aes(x= Dep, y = Mat/Pop, fill = Mat/Pop)) +
geom_col()
##population density
x=reorder ##command on R lookup
edu %>%
mutate(ratio = Ele/Pop) %>%
View()
##regions
read_csv("data/RegEdu.csv")
REdu <- REdu[-1,]
head(REdu)
names(REdu)<- c("Libellé","Population légale 2016","Lycée 2017","École maternelle 2017","École élémentaire 2017",
"Collège 2017")
headers1<- c("Reg","Pop1","Ly1","Mat1","Ele1","Col1")
head(REdu)
REdu %>% setNames(headers1)
names(REdu) <- headers1
REdu <- REdu[,-1]
REdu <- REdu[-1,]
##convert to numerical
REdu$Ly1 <- as.numeric(as.character(REdu$Ly1))
REdu$Pop1 <- as.numeric(as.character(REdu$Pop1))
REdu$Mat1 <- as.numeric(as.character(REdu$Mat1))
REdu$Ele1 <- as.numeric(as.character(REdu$Ele1))
REdu$Col1 <- as.numeric(as.character(REdu$Col1))
REdu %>%
arrange(`Pop1`) -> REdu
##graph pop regions
REdu %>% setNames(headers1) %>%
ggplot(aes(x= reorder(Reg, -Pop1), y = Pop1, fill = Reg)) +
geom_col()+ expand_limits(x = 15) + theme(axis.text.x=element_text(angle=90,hjust=1)) + theme(axis.text.y=element_blank())+
ggtitle("Population per region") + xlab("Regions") + ylab("Population") + guides(fill=FALSE) + scale_y_continuous(labels = scales::comma)
REdu<- REdu[-18,]
## ratios----regions
##graphing Lycée
REdu %>% setNames(headers1) %>%
ggplot(aes(x= reorder(Reg, -Ly1/Pop1), y = Ly1/Pop1, fill = Reg)) +
geom_col()+ expand_limits(x = 15) + theme(axis.text.x=element_text(angle=90,hjust=1)) + theme(axis.text.y=element_blank())+
ggtitle("Ratio of high-school:population per region") + xlab("Regions") + ylab("Ratio") + guides(fill=FALSE) + scale_y_continuous(labels = scales::comma)
##graphing College
REdu %>% setNames(headers1) %>%
ggplot(aes(x= reorder(Reg, -Col1/Pop1), y = Col1/Pop1, fill = Reg)) +
geom_col()+ expand_limits(x = 15) + theme(axis.text.x=element_text(angle=90,hjust=1)) + theme(axis.text.y=element_blank())+
ggtitle("Ratio of middle school:population per region") + xlab("Regions") + ylab("Ratio") + guides(fill=FALSE) + scale_y_continuous(labels = scales::comma)
##graphing elementary
REdu %>% setNames(headers1) %>%
ggplot(aes(x= reorder(Reg, -Ele1/Pop1), y = Ele1/Pop1, fill = Reg)) +geom_col() + expand_limits(x = 15) + theme(axis.text.x=element_text(angle=90,hjust=1)) + theme(axis.text.y=element_blank())+
ggtitle("Ratio of elementary school:population per region") + xlab("Regions") + ylab("Ratio") + guides(fill=FALSE) + scale_y_continuous(labels = scales::comma)
##graphing maternelle
REdu %>% setNames(headers1) %>%
ggplot(aes(x= reorder(Reg, -Mat1/Pop1), y = Mat1/Pop1,fill =Reg)) +
geom_col() + expand_limits(x = 15) + theme(axis.text.x=element_text(angle=90,hjust=1)) + theme(axis.text.y=element_blank())+
ggtitle("Ratio of pre-school:population per region") + xlab("Regions") + ylab("Ratio") + guides(fill=FALSE) + scale_y_continuous(labels = scales::comma)
|
ce3c80908693273c8755aff0f035d3c0dc5fce58 | f34493e0d59b888650383b66ec095bbb71359c00 | /man/show-methods.Rd | f8f26d4fdcc12dd4e5cafe3e31dc4e4dd92673e4 | [] | no_license | neuroconductor/aws | f3c4a23034343a3f53ae3a668bcf456324c32af2 | fea929ff0ef19cc27459489626541b67245fdcb5 | refs/heads/master | 2023-01-19T22:39:10.633488 | 2023-01-18T16:40:12 | 2023-01-18T16:40:13 | 238,157,999 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,739 | rd | show-methods.Rd | \name{show-methods}
\docType{methods}
\alias{show-methods}
\alias{show,ANY-method}
\alias{show,aws-method}
\alias{show,awssegment-method}
\alias{show,ICIsmooth-method}
\alias{show,kernsm-method}
\title{Methods for Function `show' in Package `aws'}
\description{
The function provides information on data dimensions, data source and
existing slot-names for objects of class \code{"aws"}, \code{"awssegment"},
\code{"ICIsmooth"} and \code{"kernsm"} in package \pkg{aws}
}
\section{Methods}{
\describe{
\item{\code{signature(object = "ANY")}}{
Generic function.
}
\item{\code{signature(object = "aws")}}{
Provide information on data dimensions, data source and existing slot-names for objects of class \code{"dti"}
and classes that extent \code{"aws"}.}
\item{\code{signature(object = "awssegment")}}{
Provide information on data dimensions, data source and existing slot-names for objects of class \code{"dti"}
and classes that extent \code{"awssegment"}.}
\item{\code{signature(object = "ICIsmooth")}}{
Provide information on data dimensions, data source and existing slot-names for objects of class \code{"dti"}
and classes that extent \code{"ICIsmooth"}.}
\item{\code{signature(object = "kernsm")}}{
Provide information on data dimensions, data source and existing slot-names for objects of class \code{"dti"}
and classes that extent \code{"kernsm"}.}
}}
\author{
Karsten Tabelow \email{tabelow@wias-berlin.de}\cr
J\"org Polzehl \email{polzehl@wias-berlin.de}
}
\seealso{
\code{\linkS4class{aws}},
\code{\linkS4class{awssegment}},
\code{\linkS4class{ICIsmooth}}
\code{\linkS4class{kernsm}}
}
\keyword{methods}
\keyword{ utiities }
|
7035ee4eeecd16edb14b590f9ab9bc2226dbc2f5 | 64b0d18eb0e78a963ef19599c2dec448da6603d3 | /man/test_resource.Rd | 7d80eab53c5841b03150422a913f288f3939cb01 | [
"MIT"
] | permissive | Chicago-R-User-Group/2017-n4-Meetup-Syberia | 0bb8cf04112ba236e373e89b01db8f92b857b000 | dc248c8702fc851ae50335ad6406f14e414c0744 | refs/heads/master | 2021-01-01T04:22:57.806786 | 2017-07-14T04:19:50 | 2017-07-14T04:19:50 | 97,166,590 | 5 | 0 | null | null | null | null | UTF-8 | R | false | true | 760 | rd | test_resource.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/test.R
\name{test_resource}
\alias{test_resource}
\title{Run the tests for a single resource.}
\usage{
test_resource(engine, resource, setup, teardown, reporter)
}
\arguments{
\item{engine}{syberia_engine. The engine to run the test on.}
\item{resource}{character. The resource to test.}
\item{setup}{stageRunner. A \code{\link[stagerunner]{stageRunner}} to
execute setup hooks for this test.}
\item{teardown}{stageRunner. A \code{\link[stagerunner]{stageRunner}} to
execute teardown hooks for this test.}
\item{reporter}{reporter. A testthat reporter object.}
}
\value{
The testthat result summary for this one test run.
}
\description{
Run the tests for a single resource.
}
|
928fddef382ef0fd2fa19c98d90be9617de71c03 | b25b4b8d51e279a196b1dddf1606400445d0072c | /bayesrate/r_functions/BiSSEBMA.r | da1012a36e141ae2289d958077d6294316f9065e | [
"MIT"
] | permissive | schnitzler-j/BayesRate | 86bf7deae6a2f64deeac68df9f323d56d9ed6fc8 | 86fb252df8589c89fce1c42699c69c5e2bbccb6f | refs/heads/master | 2021-01-20T20:28:57.517417 | 2016-08-10T09:55:11 | 2016-08-10T09:55:11 | 65,297,633 | 3 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,966 | r | BiSSEBMA.r | #!/usr/bin/Rscript
# version 20130821
arg <- commandArgs(trailingOnly=TRUE)
mcmc_BiSSE <- function(
infile1, # tree
infile2, # trait table
wd="default",
TDI=0,
out_file_stem="default",
rho0=1,
rho1=1,
BD_model_0=1,
BD_model_1=1,
Trait_model=0,
link_speciation=0,
link_extinction=0,
link_trait=0,
prior_r=5,
prior_a=1,
prior_b=1,
prior_q=1,
trees=1,
IT=20000,
sampling_freq=100,
print_freq=100,
burnin=1000,
win_1=0.5,
win_2=0.25,
win_3=0.05,
categories=10,
beta_shape=0.3,
path_lib="default"
) { # end args
if (wd=="default"){
wd=dirname(infile1)
}else{setwd(wd)}
if (out_file_stem=="default"){
st=basename(infile2)
out_file= paste(st,"_BiSSEBMA.log", sep="")
}else{out_file= paste(out_file_stem,".log", sep="")}
if (path_lib=="default") {
library(diversitree)
}else{library(diversitree, , lib.loc=path_lib)}
options(warn=-1) # hide warnings
rhos=c(rho0, rho1) #sampling fractions for state 0 and 1
#out_file="boh.log" #!!change in each analysis
#TDI=0 # 0 parameter estimation, 1 TD integration (not real estimates)
#trees=5 # one tree for TDI is advised
#burnin=0 # run test to check the proportion
#sampling_freq=25
#print_freq=10
#IT=200 # mcmc iterations (per tree)
#categories=20
use_exp=1 # 0: uniform priors; 1: exponential priors (extinction fraction has always uniform prior 0-1)
M=c(5,5,1,1,5,5) # max parameter values (if uniform priors)
# if exp no limit (max =1000)
if (prior_r>0){M[1:2]=1000}
if (prior_q>0){M[5:6]=1000}
#shape_prior=c(2,2,1,1) # shape parameters of exp prior (net diversification and q rates)
#constraints=c() # constant sp. [2], ex. [4], q. [6] const sp + ex is 2,4
#PB=c() # 3: pb clade 1, 4: pb clade 2, 5: q0->1 =0; 6: q1->0 =0 # pure birth / irreversible rate models
PB=NULL
if (BD_model_0==0){PB=append(PB,3)}
if (BD_model_1==0){PB=append(PB,4)}
if (Trait_model==1){PB=append(PB,6)}
if (Trait_model==2){PB=append(PB,5)}
constraints=NULL
if (link_speciation==1){constraints=append(constraints,2)}
if (link_extinction==1){constraints=append(constraints,4)}
if (link_trait==1) {constraints=append(constraints,6)}
shape_prior=c(prior_r,prior_a,prior_b,prior_q)
win_size=c(win_1,win_1,win_2,win_2,win_3,win_3)
Tree=read.nexus(file=infile1)
traits=read.csv(file=infile2, header=TRUE, sep="\t") #one single trait or create a vector
states<-traits[,2]
names(states)<-as.character(traits[,1])
update_parameter <- function(i,d,M) {
if (length(i)==1) {
ii=abs(i+(runif(length(i),0,1)-.5)*d)
if (ii>M) {ii=abs((M-(ii-M)))}
if (ii>M) {ii=i}
}
ii}
if (TDI>0) {
K=categories-1.
k=0:K # K+1 categories
beta=k/K
alpha=beta_shape # categories are beta distributed
temps=rev(beta^(1./alpha))
} else{temps=c(1)}
exp_prior = function(value,l,Mv) {
if (l>0){
l=1./l
log(l)-l*(value)
}else{log(1/Mv)}
}
beta_prior = function(a,b,value){
dbeta(value, a, b, log = TRUE)
}
true_iteration=1
cat(sprintf("it\tlikelihood\tprior\tacceptance\tl0\tl1\tm0\tm1\tr0\tr1\ta0\ta1\tq0\tq1\ttemp\ttree\n"), append=FALSE, file=out_file)
cat(sprintf("it\tlikelihood\tprior\tacc\tl0\tl1\tm0\tm1\ttemp\ttree\n"))
for (J in 1:length(temps)) { # Loop temperatures
temperature=temps[J]
d= win_size*(4 - 3*temperature)
for (t_index in 1:trees) { # loop trees
if (class(Tree)=="multiPhylo"){current_tree=Tree[[J]]}
else{current_tree=Tree}
BISSE=make.bisse(current_tree, states, strict=F, sampling.f=rhos)
pars=runif(min=.1,max=.5, 6) # initialize parameters
if (temperature<1) {burnin=0}
LIK=0
acc=0
for (iteration in 1:(IT+burnin)) { # MCMC loop
if (iteration==1){
likA = BISSE(pars)[1]
parsA=pars}
pars=parsA
ind=sample(1:6,1)
pars[ind]=update_parameter(pars[ind],d[ind],M[ind])
pars[PB]=0
if (length(constraints)>0) {
if (is.element(2, constraints) & is.element(4, constraints)) {pars[constraints] = pars[constraints-1]}
else if (is.element(2, constraints)) {pars[2]=(pars[1]/(1-pars[3]))*(1-pars[4])}
else if (is.element(4, constraints)) {pars[4]=pars[3]*pars[1]/(pars[2]-pars[3]*pars[2]+pars[3]*pars[1])}
if (is.element(6, constraints)) {pars[6] = pars[5]}
}
#cat("pars:", pars)
l=pars[1:2]/(1-pars[3:4])
m=-pars[3:4]*pars[1:2]/(pars[3:4]-1)
#lik=BISSE(c(l,m,pars[5:6]))[1]
# CALC PRIORS # exponential
if (use_exp==1) {
prior=sum(exp_prior(pars[1:2],shape_prior[1],M[1:2])) + sum(exp_prior(pars[5:6],shape_prior[4],M[5:6])) + sum(beta_prior(shape_prior[2],shape_prior[3],pars[3:4]))
} else{
prior=sum(log(1/M))
if (min(M-temp)<0) {prior = -Inf}
} # uniform
# CALC LIKELIHOOD
if (prior> -Inf) {
lik=try(BISSE(c(l,m,pars[5:6])))
if (is.na(lik) | (class(lik) == "try-error" )) {lik=-Inf}
} else{lik= -Inf}
if (iteration==1){priorA=prior}
tryCatch(
{
if ( (lik-likA)*temperature + (prior-priorA) >= log(runif(1,0,1)) ){
likA=lik
priorA=prior
parsA=pars
acc =acc + 1.
}
}
,error = function(e) NULL
)
if (iteration %% print_freq ==0) {cat(sprintf("%s\t", round(c(true_iteration, likA, priorA, parsA, temperature, t_index),2)), "\n")}
if (true_iteration %% sampling_freq ==0 & iteration>=burnin) {
l=parsA[1:2]/(1-parsA[3:4])
m=-parsA[3:4]*parsA[1:2]/(parsA[3:4]-1)
LIK= LIK+likA
cat(sprintf("%s\t", c(true_iteration, likA, priorA, acc/iteration, l, m, parsA, temperature, t_index)), "\n", append=TRUE, file=out_file)
}
if (iteration>burnin) {true_iteration = true_iteration+1}
}
}
}
if (TDI>0) {
out_marg= paste(out_file_stem,"_marginal.txt",sep="")
d<-read.table(out_file, header=T) #, stringsAsFactors=F,sep="\t") #, row.names = NULL)
trapezia<-aggregate(d$lik, list(d$temp),FUN="mean")
stds<-aggregate(d$lik, list(d$temp),FUN="sd")
ml=0
for (i in 1:(dim(trapezia)[[1]]-1)){
ml=ml+( (trapezia[i,2] + trapezia[(i+1),2])/2 * (trapezia[(i+1),1] - trapezia[(i),1]))
}
cat("\nmean log likelihoods:", trapezia[,2], file=out_marg, append=TRUE)
cat("\ntemperatures:", unique(d$temp), file=out_marg, append=TRUE)
cat("\nstd(lnL):", stds[,2], file=out_marg, append=TRUE)
cat("\n\nLog Marginal Likelihood:", ml, file=out_marg, append=TRUE)
cat("\n\n Log Marginal Likelihood:", ml)
cat("
The marginal likelihood can be used to compare different analyses and to perform model selection
and hypothesis testing via Bayes factors.
The marginal likelihood was saved to file:", out_marg,"\n\n")
}
cat("\n")
}
mcmc_BiSSE(
as.character(arg[1]), # infile1, # tree
as.character(arg[2]), # infile2, # trait table
as.character(arg[3]), # wd,
as.integer(arg[4]), # TDI,
as.character(arg[5]), # out_file,
as.double(arg[6]), # rho0,
as.double(arg[7]), # rho1,
as.integer(arg[8]), # BD_model_0,
as.integer(arg[9]), # BD_model_1,
as.integer(arg[10]), # Trait_model,
as.integer(arg[11]), # link_speciation,
as.integer(arg[12]), # link_extinction,
as.double(arg[13]), # link_trait,
as.double(arg[14]), # prior_r,
as.double(arg[15]), # prior_a,
as.double(arg[16]), # prior_b,
as.double(arg[17]), # prior_q,
as.integer(arg[18]), # trees,
as.integer(arg[19]), # iterations,
as.integer(arg[20]), # sampling_freq,
as.integer(arg[21]), # print_freq,
as.integer(arg[22]), # burnin,
as.double(arg[23]), # win_1,
as.double(arg[24]), # win_2,
as.double(arg[25]), # win_3,
as.integer(arg[26]), # categories,
as.double(arg[27]), # beta_shape
as.character(arg[28]) # path to diversitree library
)
|
6bd10c63d70393c2cfd5134b735e594beb7f2446 | b69b106037370ef128b294ea2c82b7afa0817c66 | /inst/financialPlots/shiny app.R | 22d6ab495b2bcb180d9ef182aa7fd4d184054c70 | [
"MIT"
] | permissive | DrRoad/financialStatementPlot | 2a14f71c51fe4df8b28e0b97bb84650659bd7a8e | cb528a47c2aeef00e97a89c35c724f0bd3e99a35 | refs/heads/master | 2021-09-05T13:02:28.554149 | 2018-01-27T21:39:14 | 2018-01-27T21:39:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,975 | r | shiny app.R | library(shiny)
library(DT)
library(plotly)
library(rvest)
#require create.data()
#require comparisonPlot(), or:
devtools::install_github("ryanvoyack/financialStatementPlot")
library(financialStatementPlot)
#######app code########
hello1<-c("\r\n Sales/Revenue\r\n ","Sales Growth"," Cost of Goods Sold (COGS) incl. D&A","COGS excluding D&A","Depreciation & Amortization Expense","Depreciation","Amortization of Intangibles","COGS Growth"," Gross Income","Gross Income Growth","Gross Profit Margin",""," SG&A Expense","Research & Development","Other SG&A","SGA Growth","Other Operating Expense","Unusual Expense","EBIT after Unusual Expense","Non Operating Income/Expense","Non-Operating Interest Income","Equity in Affiliates (Pretax)"," Interest Expense","Interest Expense Growth","Gross Interest Expense","Interest Capitalized"," Pretax Income","Pretax Income Growth","Pretax Margin","Income Tax","Income Tax - Current Domestic","Income Tax - Current Foreign","Income Tax - Deferred Domestic","Income Tax - Deferred Foreign","Income Tax Credits","Equity in Affiliates","Other After Tax Income (Expense)","Consolidated Net Income","Minority Interest Expense"," Net Income","Net Income Growth","Net Margin Growth","Extraordinaries & Discontinued Operations","Extra Items & Gain/Loss Sale Of Assets","Cumulative Effect - Accounting Chg","Discontinued Operations","Net Income After Extraordinaries","Preferred Dividends","Net Income Available to Common"," EPS (Basic)","EPS (Basic) Growth","Basic Shares Outstanding"," EPS (Diluted)","EPS (Diluted) Growth","Diluted Shares Outstanding"," EBITDA","EBITDA Growth","EBITDA Margin")
hello2<-c(" Cash & Short Term Investments","Cash Only","Short-Term Investments","Cash & Short Term Investments Growth","Cash & ST Investments / Total Assets"," Total Accounts Receivable","Accounts Receivables, Net","Accounts Receivables, Gross","Bad Debt/Doubtful Accounts","Other Receivables","Accounts Receivable Growth","Accounts Receivable Turnover","Inventories","Finished Goods","Work in Progress","Raw Materials","Progress Payments & Other","Other Current Assets","Miscellaneous Current Assets","Total Current Assets","","Net Property, Plant & Equipment","Property, Plant & Equipment - Gross",
"Buildings","Land & Improvements","Computer Software and Equipment","Other Property, Plant & Equipment","Accumulated Depreciation","Total Investments and Advances","Other Long-Term Investments","Long-Term Note Receivable","Intangible Assets","Net Goodwill","Net Other Intangibles","Other Assets","Tangible Other Assets"," Total Assets","Assets - Total - Growth","","ST Debt & Current Portion LT Debt","Short Term Debt","Current Portion of Long Term Debt"," Accounts Payable","Accounts Payable Growth","Income Tax Payable","Other Current Liabilities","Dividends Payable","Accrued Payroll","Miscellaneous Current Liabilities"," Total Current Liabilities","Long-Term Debt",
"Long-Term Debt excl. Capitalized Leases","Non-Convertible Debt","Convertible Debt","Capitalized Lease Obligations","Provision for Risks & Charges","Deferred Taxes","Deferred Taxes - Credit","Deferred Taxes - Debit","Other Liabilities","Other Liabilities (excl. Deferred Income)","Deferred Income"," Total Liabilities","Non-Equity Reserves","Total Liabilities / Total Assets","Preferred Stock (Carrying Value)","Redeemable Preferred Stock","Non-Redeemable Preferred Stock"," Common Equity (Total)","Common Stock Par/Carry Value","Retained Earnings","ESOP Debt Guarantee","Cumulative Translation Adjustment/Unrealized For. Exch. Gain","Unrealized Gain/Loss Marketable Securities","Revaluation Reserves",
"Treasury Stock","Common Equity / Total Assets"," Total Shareholders' Equity","Total Shareholders' Equity / Total Assets","Accumulated Minority Interest","Total Equity","Liabilities & Shareholders' Equity" )
hello3<- c(" Net Income before Extraordinaries","Net Income Growth","Depreciation, Depletion & Amortization","Depreciation and Depletion","Amortization of Intangible Assets","Deferred Taxes & Investment Tax Credit","Deferred Taxes","Investment Tax Credit","Other Funds","Funds from Operations","Extraordinaries","Changes in Working Capital","Receivables","Accounts Payable","Other Assets/Liabilities"," Net Operating Cash Flow","Net Operating Cash Flow Growth","Net Operating Cash Flow / Sales",""," Capital Expenditures","Capital Expenditures (Fixed Assets)","Capital Expenditures (Other Assets)","Capital Expenditures Growth","Capital Expenditures / Sales","Net Assets from Acquisitions","Sale of Fixed Assets & Businesses","Purchase/Sale of Investments","Purchase of Investments","Sale/Maturity of Investments","Other Uses","Other Sources"," Net Investing Cash Flow","Net Investing Cash Flow Growth","Net Investing Cash Flow / Sales","","Cash Dividends Paid - Total","Common Dividends","Preferred Dividends","Change in Capital Stock","Repurchase of Common & Preferred Stk.","Sale of Common & Preferred Stock","Proceeds from Stock Options","Other Proceeds from Sale of Stock","Issuance/Reduction of Debt, Net","Change in Current Debt","Change in Long-Term Debt","Issuance of Long-Term Debt","Reduction in Long-Term Debt","Other Funds","Other Uses","Other Sources"," Net Financing Cash Flow","Net Financing Cash Flow Growth","Net Financing Cash Flow / Sales","Exchange Rate Effect","Miscellaneous Funds","Net Change in Cash"," Free Cash Flow","Free Cash Flow Growth","Free Cash Flow Yield")
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com
ui <- fluidPage(
# Application title
titlePanel("Financial statement variable comparing app"),
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
h5('Please select the type of financial statement that you wish to compare'),
selectInput("state", "Financial Statement", c("None", "Income-Statement", "Balance-Sheet", "Cash-Flows")),
h5('Please enter the stock tickers of 2 companies that you wish to compare'),
textInput('comp1', 'Company 1', value = "wmt", width = NULL, placeholder = NULL),
textInput('comp2', 'Company 2', value = "aapl", width = NULL, placeholder = NULL),
actionButton("Button", "import", icon = icon("line-chart")),
uiOutput("statements")
),
mainPanel(
tabsetPanel(
tabPanel("Financial Statement 1", DT::dataTableOutput('table1')),
tabPanel("Financial Statement 2", DT::dataTableOutput('table2')),
tabPanel("Comparing", plotly::plotlyOutput('comparison'), print("Hover over the plot to interact with specific values. Choose another variable in the drop-down bar to immediately render another plot"))
)
)
)
)
server <- function(input, output) {
output$statements <- renderUI({
if(input$state == "Income-Statement"){
selectInput("property", "What Would You Like To Compare:", hello1)
}else if(input$state == "Balance-Sheet"){
selectInput("property", "What Would You Like To Compare:", hello2)
}else if(input$state == "Cash-Flows"){
selectInput("property", "What Would You Like To Compare:", hello3)
}
})
aa <- eventReactive(input$Button, valueExpr = {
create.data(char = input$comp1, state = input$state)
})
bb <- eventReactive(input$Button, valueExpr = {
create.data(char = input$comp2, state = input$state)[[1]]
})
output$comparison <- renderPlotly({
comparison.plot(x = (grep(input$property, aa()[[2]][-1])), A = aa()[[1]], B = bb(), ticker1 = input$comp1, ticker2 = input$comp2)
})
output$table1 <- DT::renderDataTable({
DT::datatable( aa()[[1]], rownames = TRUE)
})
output$table2 <- DT::renderDataTable({
DT::datatable( bb(), rownames = TRUE)
})
}
# Run the application
shinyApp(ui = ui, server = server)
|
09b50e0991cd66c5a0967c0a147c46cb4ae1840f | 3a0df2539e8e38b207c845b97bca1e0b5478ce88 | /data_preprocess.R | 1b185a0e043f992f311ec8cf4246d77540b10f47 | [] | no_license | Caiwei-Zhang/UserPersona | 2c080440e1cd8c2e5e5cc4d2fb9934deb99b9136 | c6c877646bacb388f997cea526e6f0a71798f82a | refs/heads/main | 2023-07-11T00:22:40.797740 | 2021-08-21T09:13:27 | 2021-08-21T09:13:27 | 398,218,348 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,938 | r | data_preprocess.R | setwd("E:/Competition/xf/UserPersona/")
t0 <- Sys.time()
train <- fread(input = "./data/train.txt", sep = ",",
header = FALSE, encoding = "UTF-8") %>% as_tibble()
test <- fread(input = "./data/apply_new.txt", sep = ",",
header = FALSE, encoding = "UTF-8") %>% as_tibble()
t1 <- Sys.time()
cat("read data with data.table::fread() takes ", t1 - t0, "secs.")
colnames(train) <- c("pid", "label", "gender", "age",
"tagid", "time", "province", "city",
"model", "make")
colnames(test) <- c("pid", "gender", "age",
"tagid", "time", "province", "city",
"model", "make")
# iconv(train$model, from = "UTF-8", to = "gbk")
# iconv(train$make, from = "UTF-8", to = "gbk")
user <- train %>% select(-label) %>% rbind(test) %>%
mutate(label = c(train$label, rep(-1, nrow(test))))
# Visualization
make_tr <- ggplot(data = train, mapping = aes(x = make)) +
geom_bar() + theme_bw()
make_te <- ggplot(data = test, mapping = aes(x = make)) +
geom_bar() + theme_bw()
ggarrange(make_tr, make_te, ncol = 2, nrow = 1)
model_tr <- ggplot(data = train, mapping = aes(x = model)) +
geom_bar() + theme_bw()
model_te <- ggplot(data = test, mapping = aes(x = model)) +
geom_bar() + theme_bw()
ggarrange(model_tr, model_te, ncol = 2, nrow = 1)
# glob2rx("[*")
# glob2rx("*]")
user <- user %>% mutate(tagid = str_replace(tagid, "^\\[", "") %>%
str_replace("]$", "") %>% str_split(","),
time = str_replace(tagid, "^\\[", "") %>%
str_replace("]$", "") %>% str_split(","))
## Method 1: text2Vec by R
sentences <- user$tagid
# Create iterator over tokens
# tokens<- word_tokenizer(sentences)
# 设置迭代器
it <- itoken(sentences, progressbar = FALSE)
# 创建字典:包含唯一词及对应统计量,修剪低频词语
vocab <- create_vocabulary(it) %>% prune_vocabulary(term_count_min = 5L) #[1] 58643 3
# 设置形成语料文件
vectorizer <- vocab_vectorizer(vocab)
# 构建TCM矩阵,传入迭代器和语料文件
tcm <- create_tcm(it, vectorizer, skip_grams_window = 5L)
emb_size <- 32
# glove <- GlobalVectors$new(rank = emb_size, x_max = 10)
glove <- GloVe$new(rank = emb_size, x_max = 10, learning_rate = 0.10)
wv_main <- glove$fit_transform(tcm, n_iter = 10)
dim(wv_main) # [1] 58643 32
wv_cont <- glove$components
wv <- wv_main + t(wv_cont)
class(glove$components)
# generate embedding matrix
emb_matrix <- NULL
for (i in 1:length(sentences)) {
if(i%%100 == 0) cat("Run the tagid of user:", i)
vec <- NULL
for (w in sentences[[i]]) {
if (w %in% rownames(wv)) {
vec <- cbind(vec, wv[w,])
}
}
if (length(vec) > 0) {
emb_matrix <- cbind(emb_matrix, rowMeans(vec, na.rm = TRUE))
} else {
emb_matrix <- cbind(emb_matrix, rep(0, emb_size))
}
}
## Method 2: Word2Vec by python
# emb_tibble <- fread("./output/emb_mat.csv", header = FALSE) %>% as_tibble()
# add the embedding results into user data
for (c in 1:32) user[paste0("tagid_emb_", c)] <- emb_matrix[c,]
# label encoding
user$gender <- as.integer(user$gender)
user$age <- as.integer(user$age)
table(user$gender, useNA = "ifany")
table(user$age, useNA = "ifany")
user$gender <- ifelse(is.na(user$gender), 3, user$gender)
user$age <- ifelse(is.na(user$age), 7, user$age)
sink("./output/cate_level.txt")
cat("the level of province :", "\n")
table(user$province)
cat("the level of city :", "\n")
table(user$city)
cat("the level of gender :", "\n")
table(user$gender)
cat("the level of age :", "\n")
table(user$age)
sink()
## model
inter_model <- intersect(unique(train$model), unique(test$model))
inter_make <- intersect(unique(train$make), unique(test$make))
sum(train$model %in% inter_model) / nrow(train)
sum(test$model %in% inter_model) / nrow(test)
sum(train$make %in% inter_make) / nrow(train)
sum(test$make %in% inter_make) / nrow(test)
#将不在交集中的level编码为other
user <- user %>%
mutate(model = fct_collapse(model,
other = setdiff(unique(user$model), inter_model),
vivo = c("vivo", "VIVO")),
make = fct_collapse(make,
other = setdiff(unique(user$make), inter_make)))
# 找出model中的minority类别
min_model <- names(table(user$model)[table(user$model) < 100])
# 将model中的少数类别编码minority
user <- user %>%
mutate(model = fct_collapse(model, minority = min_model))
# 找出make中的minority类别
min_make <- names(table(user$make)[table(user$make) < 5])
# 将model中的少数类别编码minority
user <- user %>%
mutate(make = fct_collapse(make, minority = min_make))
ggplot(data = user) + geom_bar(mapping = aes(x = model))
ggplot(data = user) + geom_bar(mapping = aes(x = make))
|
18a174fb86150ca56e42c74b34ac93ab5de3d677 | dc7169116a18420ba27791d1ae937519cd3b7028 | /man/modeLogitnorm.Rd | 70ae89c09df78220598d5edd701c37011a67e693 | [] | no_license | bgctw/logitnorm | 1ee55f1f36a4700f276b8c0ed3bde4b199e2215b | 527a5cf52b8d8a17b48ad9b5a2cfb3042e6093f6 | refs/heads/master | 2022-01-01T21:00:59.761492 | 2022-01-01T11:57:05 | 2022-01-01T11:57:05 | 73,286,222 | 1 | 1 | null | 2018-07-30T12:01:11 | 2016-11-09T13:25:06 | R | UTF-8 | R | false | false | 397 | rd | modeLogitnorm.Rd | \name{modeLogitnorm}
\alias{modeLogitnorm}
\title{modeLogitnorm}
\description{Mode of the logitnormal distribution by numerical optimization}
\usage{modeLogitnorm(mu, sigma, tol = invlogit(mu)/1000)}
\arguments{
\item{mu}{parameter mu}
\item{sigma}{parameter sigma}
\item{tol}{precisions of the estimate}
}
\author{Thomas Wutzler}
\seealso{\code{\link{logitnorm}}}
|
0440507fea0de716719c1706e7eeb4dccedfbc30 | 8ab10cc30485d724a9fffda8471e96a38f8f9801 | /functions/AxiomUKB.R | 779c058b4fccd6e2ca211cecf3eb6c345f1ae737 | [] | no_license | tgerke/AffyAnnotScrapeR | efa9e9b2ff298c96660b4664c0a968e857f7c305 | 84fb86d9b142feae6d329e54932ea8317c744dd3 | refs/heads/master | 2021-01-11T21:26:30.149002 | 2017-01-13T20:53:17 | 2017-01-13T20:53:17 | 78,784,769 | 0 | 1 | null | 2017-01-13T17:43:29 | 2017-01-12T20:36:39 | R | UTF-8 | R | false | false | 1,906 | r | AxiomUKB.R | #######################################################################################
# load and clean affymetrix transcript cluster annotation for Human Gene 1.0 ST array
#######################################################################################
# set directory, load libraries
# Mac
setwd("/Volumes/Lab_Gerke/ukb/AXIOM/")
# Windows
setwd("M:/lab/Lab_Gerke/ukb/AXIOM/")
#obviously localy machine dependent for now (can't post the Affy data)
library(Biobase)
library(data.table)
options(stringsAsFactors=FALSE)
#######################################################################################
# load transcript cluster annotation, downloaded from Affymetrix.com 01/10/2017
#windows
annot <- fread("Axiom_UKB_WCSG.na35.annot.csv",
sep=",", header=TRUE, skip=19, showProgress=FALSE)
#mac
annot <- fread("Axiom_UKB_WCSG.na35.annot.csv",
sep=",", header=TRUE, skip=19, showProgress=FALSE)
#######################################################################################
# create lists of data frames which contain gene level annotation
# x will be annot$'Associated Gene'
getgeneannot <- function(x) {
list1 <- sapply(x, function(y) strsplit(y, " /// "), USE.NAMES=FALSE)
list1 <- lapply(list1, function(x) {
strsplit(x, " // ")
})
list2 <- lapply(list1, function(x) {
dat <- as.data.frame(matrix(unlist(x), nrow=length(x), byrow=TRUE), stringsAsFactors=FALSE)
if (ncol(dat)==1) {dat <- data.frame(t(rep("---", 7)))}
names(dat) <- c("transcript_accession", "SNP_gene_relationship", "distance", "unigene_cluster_id", "gene","ncbi_gene_name","genbank_description")
return(dat)
})
}
geneannot <- sapply(annot$'Associated Gene', getgeneannot, USE.NAMES=FALSE)
names(geneannot) <- annot$'Probe Set ID'
annot[,'Associated Gene':=NULL]
out <- list(annot, geneannot)
save(out, file="AxiomUKB.RData")
|
6191640855f1bcd01661c61334130cb3c6100479 | 67d2e379f1df737a20c0741a8df88fb78b55c056 | /date.R | c66971ff3adc3c1d4072a8f2fb41174bcd422840 | [] | no_license | kaiyu33/R_Repository | 0313ff0d2a023f26262bb8cdb8c3549dcec6c573 | d3c29756fbd37468b314d0ffafff170f0f3f9745 | refs/heads/master | 2016-08-10T12:53:15.229999 | 2016-04-26T09:30:29 | 2016-04-26T09:30:29 | 54,948,219 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,881 | r | date.R | #time
format(Sys.time())
# [1] "2016-04-13 05:06:05"
format(Sys.time(), "%x %A %X %Z %z")
# [1] "2016/4/13 星期三 上午 05:04:58 CST +0800"
format(Sys.time(), "%Y/%m/%d")
#[1] "2016/04/13"
############################################################################################
Sys.time()
#[1] "2016-04-04 20:05:03 CST"
proc.time()
# user system elapsed
# 11463.12 9.59 111870.56
Sys.Date()
#[1] "2016-04-03"
date()
#[1] "Sun Apr 03 03:28:50 2016"
format(Sys.time(), "%a %b %c %d")
format(Sys.Date(), "%a %b %d")
"%a %b %d"
[1] "週日 四月 03"
## read in date/time info in format 'm/d/y'
dates <- c("02/27/92", "02/27/92", "01/14/92", "02/28/92", "02/01/92")
as.Date(dates, "%m/%d/%y")
[1] "1992-02-27" "1992-02-27" "1992-01-14" "1992-02-28" "1992-02-01"
#
as.numeric(as.Date("2016-04-08"))
#[1] 16899
## So for dates (post-1901) from Windows Excel
as.Date(35981, origin = "1899-12-30") # 1998-07-05
as.Date(35982, origin = "1899-12-30")
## and Mac Excel
as.Date(34519, origin = "1904-01-01") # 1998-07-05
## Time zone effect
z <- ISOdate(2010, 04, 13, c(0,12)) # midnight and midday UTC
as.Date(z) # in UTC
## these time zone names are common
as.Date(z, tz = "NZ")
as.Date(z, tz = "HST") # Hawaii
#計算距今幾天
as.numeric(as.Date("2011-01-01"))-as.numeric(as.Date(as.numeric(Sys.Date())+25569, origin = "1899-12-30"))
#取距離今天幾天前
beforeDay<-200
as.Date(as.numeric(Sys.Date())+25569-beforeDay, origin = "1899-12-30")
##########################################################################################
format(Sys.time(), "%X")
#[1] "上午 04:55:09"
format(Sys.time(), "%x")
#[1] "2016/4/13"
format(Sys.time(), "%A")
##[1] "星期三"
format(Sys.time(), "%a ")
##[1] "週三 "
format(Sys.time(), "%Z")
#[1] "CST"
format(Sys.time(), "%z")
#[1] "+0800"
##########################################################################################
## locale-specific version of date()
format(Sys.time(), "%a %b %d %X %Y %Z")
## time to sub-second accuracy (if supported by the OS)
format(Sys.time(), "%H:%M:%OS3")
## read in date info in format 'ddmmmyyyy'
## This will give NA(s) in some locales; setting the C locale
## as in the commented lines will overcome this on most systems.
## lct <- Sys.getlocale("LC_TIME"); Sys.setlocale("LC_TIME", "C")
x <- c("1jan1960", "2jan1960", "31mar1960", "30jul1960")
z <- strptime(x, "%d%b%Y")
## Sys.setlocale("LC_TIME", lct)
z
## read in date/time info in format 'm/d/y h:m:s'
dates <- c("02/27/92", "02/27/92", "01/14/92", "02/28/92", "02/01/92")
times <- c("23:03:20", "22:29:56", "01:03:30", "18:21:03", "16:56:26")
x <- paste(dates, times)
strptime(x, "%m/%d/%y %H:%M:%S")
## time with fractional seconds
z <- strptime("20/2/06 11:16:16.683", "%d/%m/%y %H:%M:%OS")
z # prints without fractional seconds
op <- options(digits.secs = 3)
z
options(op)
## time zones name are not portable, but 'EST5EDT' comes pretty close.
(x <- strptime(c("2006-01-08 10:07:52", "2006-08-07 19:33:02"),
"%Y-%m-%d %H:%M:%S", tz = "EST5EDT"))
attr(x, "tzone")
## An RFC 822 header (Eastern Canada, during DST)
strptime("Tue, 23 Mar 2010 14:36:38 -0400", "%a, %d %b %Y %H:%M:%S %z")
## Make sure you know what the abbreviated names are for you if you wish
## to use them for input (they are matched case-insensitively):
format(seq.Date(as.Date('1978-01-01'), by = 'day', len = 7), "%a")
# [1] "週日" "週一" "週二" "週三" "週四" "週五" "週六"
format(seq.Date(as.Date('2000-01-01'), by = 'month', len = 12), "%b")
# [1] "一月" "二月" "三月" "四月" "五月" "六月" "七月" "八月" "九月" "十月" "十一月" "十二月"
##########################################################################################
format(Sys.time(), "%A")
##[1] "星期三"
format(Sys.time(), "%a ")
##[1] "週三 "
format(Sys.time(), "%B")
##[1] "四月"
format(Sys.time(), "%b")
##[1] "四月"
format(Sys.time(), "%c")
##[1] "週三 四月 13 04:52:01 2016"
format(Sys.time(), "%D")
##[1] "04/13/16"
format(Sys.time(), "%d")
##[1] "13"
format(Sys.time(), "%e")
##[1] "13"
#Day of the month as decimal number (1–31), with a leading space for a single-digit number.
format(Sys.time(), "%v")
#[1] "13-四月-2016"
format(Sys.time(), "%W")
#[1] "15"
format(Sys.time(), "%X")
#[1] "上午 04:55:09"
format(Sys.time(), "%x")
#[1] "2016/4/13"
format(Sys.time(), "%Y")
#[1] "2016"
format(Sys.time(), "%y")
#[1] "16"
format(Sys.time(), "%Z")
#[1] "CST"
format(Sys.time(), "%z")
#[1] "+0800"
##########################################################################################
startDate<-as.POSIXlt(time(sample.xts[startisNumRow]))
unlist(unclass(startDate))
# sec min hour mday mon year wday yday isdst
# 0 0 0 15 8 115 2 257 0
unlist(unclass(startDate_l))[7]
# wday
# 2
unlist(unclass(startDate_l))[7]==2
# wday
# TRUE
(z <- Sys.time()) # the current datetime, as class "POSIXct"
unclass(z) # a large integer
floor(unclass(z)/86400) # the number of days since 1970-01-01 (UTC)
(now <- as.POSIXlt(Sys.time())) # the current datetime, as class "POSIXlt"
unlist(unclass(now)) # a list shown as a named vector
now$year + 1900 # see ?DateTimeClasses
months(now); weekdays(now) # see ?months
## suppose we have a time in seconds since 1960-01-01 00:00:00 GMT
## (the origin used by SAS)
z <- 1472562988
# ways to convert this
as.POSIXct(z, origin = "1960-01-01") # local
as.POSIXct(z, origin = "1960-01-01", tz = "GMT") # in UTC
## SPSS dates (R-help 2006-02-16)
z <- c(10485849600, 10477641600, 10561104000, 10562745600)
as.Date(as.POSIXct(z, origin = "1582-10-14", tz = "GMT"))
## Stata date-times: milliseconds since 1960-01-01 00:00:00 GMT
## format %tc excludes leap-seconds, assumed here
## For format %tC including leap seconds, see foreign::read.dta()
z <- 1579598122120
op <- options(digits.secs = 3)
# avoid rounding down: milliseconds are not exactly representable
as.POSIXct((z+0.1)/1000, origin = "1960-01-01")
options(op)
## Matlab 'serial day number' (days and fractional days)
z <- 7.343736909722223e5 # 2010-08-23 16:35:00
as.POSIXct((z - 719529)*86400, origin = "1970-01-01", tz = "UTC")
as.POSIXlt(Sys.time(), "GMT") # the current time in UTC
## These may not be correct names on your system
as.POSIXlt(Sys.time(), "America/New_York") # in New York
as.POSIXlt(Sys.time(), "EST5EDT") # alternative.
as.POSIXlt(Sys.time(), "EST" ) # somewhere in Eastern Canada
as.POSIXlt(Sys.time(), "HST") # in Hawaii
as.POSIXlt(Sys.time(), "Australia/Darwin")
cols <- c("code", "coordinates", "TZ", "comments")
tmp <- read.delim(file.path(R.home("share"), "zoneinfo", "zone.tab"),
header = FALSE, comment.char = "#", col.names = cols)
if(interactive()) View(tmp) |
677c5cb48be59fb54691f23f72a8e91c4dc0d0d7 | 13a88b7fe9ee099862e961e8d4b58bc293703545 | /R/tv-delay.R | 4995d1ce6a4c69a331a29aecaea6928c314549e8 | [] | no_license | cran/gpDDE | f2986d78f08fa2586253fef8dbcc08219012fa8b | b7a8d145cba6808ad047242ebc8407c283e7e6d5 | refs/heads/master | 2016-08-11T15:20:58.948341 | 2015-12-16T23:25:34 | 2015-12-16T23:25:34 | 48,164,930 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 28,295 | r | tv-delay.R | ##' This function runs generalized profiling for DDE models with time varying coefficients.
##' This function carry out the profiled optimization method for DDe models using a sum of squared errors criteria for both fit to data and the fit of the derivatives to a delay differential equation with time varying coefficients.
##' @title Profile Estimation Functions for DDE with Time Varying Coefficients.
##' @param fn fn A named list of functions giving the righthand side of a delay differential equation. The functions should have arguments
##' \describe{
##' \item{times}{he times at which the righthand side is being evaluated.}
##' \item{x}{The state values at those times.}
##' \item{p}{Parameters to be entered in the system.}
##' \item{more}{A list object containing additional inputs to \code{fn}, The distributed delay state are passed into derivative calculation as \code{more$y}.}
##' The list of functions should contain the elements:
##' \item{fn}{Function to calculate the right hand sid.}
##' \item{dfdx}{Function to calculate the derivative of each right-hand function with respect to the states.}
##' \item{dfdp}{calculates the derivative of therighthand side function with respect to parameters. }
##' \item{d2fdx2}{Function to calculate the second derivatives with respect to states.}
##' \item{d2fdxdp}{Function to calculate the cross derivatives of each right-hand function with respect to state and parameters.}
##' \item{dfdx.d}{Function to calculate the the derivative of each righthand function with respect to the delayed states.}
##' \item{d2fdx.ddp}{Function to calculate the cross derivatives of each righthand function with respect to the delayed states and parameters.}
##' \item{d2fdxdx.d}{Function to calculate the cross derivatives of each right-hand function with respect to the state and the delayed states.}
##' \item{d2fdx.d2}{Function to calculate the second derivatives of the right-hand function with respect to the delayed states.}
##' }
##' @param data Matrix of observed data values.
##' @param times Vector observation times for the data.
##' @param pars Initial values of parameters to be estimated processes.
##' @param beta Initial values of the contribution of lags for the delay.
##' @param kappa Initial values of parameters for a time varying function.
##' @param coefs Vector giving the current estimate of the coefficients in the spline.
##' @param basisvals Values of the collocation basis to be used. This should be a basis object from the fda package.
##' @param lambda Penalty value trading off fidelity to data with fidelity to dif- ferential equations.
##' @param fd.obj A functional data object; if this is non-null, coefs and basisvals is extracted from here.
##' @param more An object specifying additional arguments to fn.
##' @param weights Weights for weighted estimation.
##' @param quadrature Quadrature points, should contain two elements (if not \code{NULL})
##' \describe{
##' \item{qpts}{ sQuadrature points; defaults to midpoints between knots}
##' \item{qwts}{Quadrature weights; defaults to normalizing by the length of qpts.}
##' }
##' @param in.meth Inner optimization function currently one of \code{'nlminb'}, \code{'optim'}, or \code{'trustOptim'}.
##' @param out.meth Outer optimization function to be used, depending on the type of method.
##' \describe{
##' \item{nls}{Nonlinear least square}
##' \item{nnls.eq}{Nonlinear least square with equality or/and inequality constraints of the parameters.}
##' }
##' @param control.in Control object for inner optimization function.
##' @param control.out Control object for outer optimization function.
##' @param eps Finite differencing step size, if needed.
##' @param active Incides indicating which parameters of pars should be estimated; defaults to all of them.
##' @param posproc Should the state vector be constrained to be positive? If this is the case, the state is represented by an exponentiated basis expansion in the proc object.
##' @param discrete Is it a discrete process.
##' @param poslik Should the state be exponentiated before being compared to the data? When the state is represented on the log scale (posproc=TRUE), this is an alternative to taking the log of the data.
##' @param names The names of the state variables if not given by the column names of coefs.
##' @param sparse Should sparse matrices be used for basis values? This option can save memory when using 'trust' optimization method.
##' @param basisvals0 Values of the collocation basis to be used for the history part of the data. This should be a basis object from the fda package.
##' @param coefs0 Vector giving the estimate of the coefficients in the spline for the history part of the data.
##' @param nbeta The number of lags for the delay.
##' @param ndelay A vector inidicating which state process has a delay term.
##' @param tau A list of delay lags.
##' @return A list with elements
##' \describe{
##' \item{data}{The matrix for the observed data.}
##' \item{res}{The inner optimization result.}
##' \item{ncoefs}{The estimated coefficients.}
##' \item{lik}{The \code{lik} object generated.}
##' \item{proc}{The \code{proc} object generated.}
##' \item{pars}{The estimated parameters.}
##' \item{beta}{The estimated contribution of lags for the delay.}
##' \item{kappa}{The estimated parameters for the time varying function.}
##' \item{times}{The times at which the data are observed.}
##' \item{fdobj.d}{The functional data object for the estimated state process.}
##' \item{fdobj0}{The functional data object for the estimated state process of the history part.}
##' \item{tau}{The lags of delays.}
##' }
##' @export
##' @author Ziqian Zhou
Profile.LS.TV.DDE <- function(fn, data, times, pars, beta, kappa, coefs = NULL, basisvals = NULL,
lambda, fd.obj = NULL, more = NULL, weights = NULL, quadrature = NULL,
in.meth = "nlminb", out.meth = "nls", control.in = list(),
control.out = list(), eps = 1e-06, active = NULL, posproc = FALSE,
discrete = FALSE, poslik = FALSE, names = NULL, sparse = FALSE,
basisvals0 = NULL, coefs0 = NULL, nbeta, ndelay, tau)
{
if (is.null(active)) {
active = 1:length(c(pars, kappa))
}
## Create y.d
fdnames <- list(NULL, NULL, NULL)
fdnames[[2]] <- attr(coefs, "dimnames")[[2]]
fdobj0 <- list(coefs = coefs0, basis = basisvals0, fdnames =fdnames)
fdobj.d <- list(coefs = coefs, basis = basisvals, fdnames =fdnames)
attr(fdobj0, "class") <- "fd"
attr(fdobj.d, "class") <- "fd"
profile.obj = LS.setup(pars = c(pars, kappa), coefs = coefs, fn = fn,
basisvals, lambda = lambda, fd.obj, more, data, weights,
times, quadrature, eps = 1e-06, posproc, poslik, discrete,
names, sparse, likfn = make.id(), likmore = NULL)
dims = dim(data)
lik = profile.obj$lik
proc = profile.obj$proc
proc$more$more$nKappa <- length(kappa)
coefs = profile.obj$coefs
data = profile.obj$data
times = profile.obj$times
## Create names for delay parameters beta
betanames <- c()
for(i in 1:length(nbeta)){
for(j in 1:nbeta[i]){
betanames <- c(betanames,paste("beta",i,".",j, sep = ""))
}
}
proc$more$betanames <- betanames
##################################################
## Added delay data and functions
##################################################
delayProcObj <- delay.fit.sparse(fd0 = fdobj0, fd.d = fdobj.d, times = proc$more$qpts, tau = tau, beta= beta, ndelay = ndelay )
delayLikObj <- delay.fit.sparse(fd0 = fdobj0, fd.d = fdobj.d, times = times,tau = tau, beta= beta, ndelay = ndelay)
lik$more$more$y.d <- delayLikObj$y.d
proc$more$more$y.d <- delayProcObj$y.d
lik$more$more$bvals.d <- delayLikObj$bvals.d
proc$more$more$bvals.d <- delayProcObj$bvals.d
proc$more$more$bvals.d.list <- delayProcObj$bvals.d.list
proc$more$more$y.d.list <- delayProcObj$y.d.list
proc$more$more$ndelay <- lik$more$more$ndelay <- ndelay
proc$more$more$nbeta <- lik$more$more$nbeta <- sapply(tau, length)
proc$more$more$tau <- lik$more$more$tau <- tau
delay <- make.delay()
proc$dfdc <- delay$dfdc
proc$d2fdc2 <- delay$d2fdc2.DDE
proc$d2fdcdp <- delay$d2fdcdp.sparse
proc$more$delay <- delay
proc$more$dfdtau <- dfdbeta.sparse
proc$more$d2fdxdtau <- d2fxdbeta.sparse
proc$more$d2fdx.ddtau <- d2fdx.ddbeta.sparse
Ires <- inneropt.DDE(data, times, c(pars, kappa), beta, coefs, lik, proc, in.meth, control.in, basisvals = basisvals, fdobj0 = fdobj0)
## Ires <- IresTmp
ncoefs <- Ires$coefs
apars = c(pars, kappa)[active]
aparamnames = names(apars)
if (is.null(control.out$maxIter)) {
control.out$maxIter = 100
}
if (is.null(control.out$tol)){
control.out$tol = 1e-08
}
if (is.null(control.out$echo)){
control.out$echo = TRUE
}
res <- nls.tv.delay(pars = pars, beta = beta, kappa = kappa, active = active, basisvals = basisvals, fdobj0 = fdobj0, times = times, data = data, coefs = ncoefs, lik = lik, proc = proc, control.out = control.out, control.in = control.in, in.meth = in.meth)
ncoefs <- res$coefs
return(list( data = data, res = res, ncoefs = ncoefs, lik = lik, proc = proc, pars = res$pars, beta = res$beta, kappa = res$kappa, times = times, fdobj.d = fdobj.d, fdobj0 = fdobj0, tau = tau))
}
nls.tv.delay <- function(pars, beta, kappa, active, basisvals, fdobj0, times, data, coefs, lik, proc, start, X.index, control.out, control.in, in.meth){
if(control.out$method == "twoStage"){
delta <- rep(1, length(data))
}
pars.names <- names(pars)
kappa.names <- names(kappa)
f.conv <- pars.kappa.beta <- c()
maxStep <- 10
lambda.sparse <- control.out$lambda.sparse
for(i in 1:control.out$maxIter){
for(j in 1:maxStep){
linObj <- ProfileSSE.AllPar.sparse(pars = c(pars,kappa), beta = beta, times = times, data = data, coefs = coefs, lik = lik, proc = proc, in.meth = in.meth,control.in = control.in, basisvals = basisvals, fdobj0 = fdobj0)
f.new <- linObj$f
f.new <- sum(f.new^2)
if(control.out$echo){
print(x = c(paste("Iter:", i, f.new)))
cat(pars, kappa, beta, "\n")
}
if(i == 1){
break
}else{
if(f.conv[i - 1] - f.new > 0 & f.conv[i - 1] - f.new < control.out$tol){
return(list(pars=pars.old, kappa = kappa.old, beta = beta.old, coefs = coefs, f = f.new, y = y, Xdf = Xdf, Zdf = Zdf, conv = list(f = f.conv, pars.kappa.beta=pars.kappa.beta, conv.message = "Converged.")))
}
if(f.conv[i - 1] - f.new > 0){
break
}
if(f.conv[i - 1] - f.new < 0 & j == maxStep){
return(list(pars=pars.old, kappa = kappa.old, beta = beta.old, coefs = coefs, f = f.new, y = y, Xdf = Xdf, Zdf = Zdf, conv = list(f = f.conv, pars.kappa.beta=pars.kappa.beta, conv.message = "Non-dereasing objective.")))
}
pars <- 0.5*(pars - pars.old) + pars.old
kappa <- 0.5*(kappa - kappa.old) + kappa.old
beta <- 0.5*(beta - beta.old) + beta.old
}
}
pars.old <- pars
kappa.old <- kappa
beta.old <- beta
f.conv <- c(f.conv, f.new)
pars.kappa.beta <- rbind(pars.kappa.beta, c(pars, kappa, beta))
Xdf <- - linObj$df[, 1:length(pars), drop = FALSE]
Zdf <- - linObj$df[, (length(pars) +1): dim(linObj$df)[2]]
y <- - linObj$df %*% c(pars, kappa, beta) + linObj$f
coefs <- linObj$coefs
if(control.out$method == "nnls.eq"){
E <- t(c(rep(0, length(pars)) , rep(0, length(kappa)), rep(1, length(beta))))
F <- 1
G <- diag(length(c(pars, kappa, beta)))
H <- rep(0, length(c(pars, kappa, beta)))
res <- limSolve::lsei(A= cbind(Xdf, Zdf), B = y, E = E, F=F, G = G, H = H)
## res <- nnls(A = cbind(Xdf, Zdf), b= y)
pars <- res$X[1:length(pars)]
kappa <- res$X[(length(pars) + 1) : (length(pars) + length(kappa))]
beta <- res$X[(length(pars) + length(kappa) + 1) : (length(pars) + length(kappa) + length(beta))]
}
names(pars) <- pars.names
names(kappa) <- kappa.names
}
return(list(pars=pars.old, kappa = kappa.old, beta = beta.old, coefs = coefs, f = f.new, y = y, Xdf = Xdf, Zdf = Zdf, conv = list(f = f.conv, pars.kappa.beta=pars.kappa.beta, conv.message = "Maximum iterations reached.")))
}
##' Sparsity selection for the lags of delay and time varying coefficients
##' This function carry out one step sparsity selection for the lags of delay given the profiled optimization result.
##' @title Sparsity selection for the lags of delay and time varying coefficients
##' @param fn A named list of functions giving the righthand side of a delay differential equation. The functions should have arguments
##' \describe{
##' \item{times}{he times at which the righthand side is being evaluated.}
##' \item{x}{The state values at those times.}
##' \item{p}{Parameters to be entered in the system.}
##' \item{more}{A list object containing additional inputs to \code{fn}, The distributed delay state are passed into derivative calculation as \code{more$y}.}
##' The list of functions should contain the elements:
##' \item{fn}{Function to calculate the right hand sid.}
##' \item{dfdx}{Function to calculate the derivative of each right-hand function with respect to the states.}
##' \item{dfdp}{calculates the derivative of therighthand side function with respect to parameters. }
##' \item{d2fdx2}{Function to calculate the second derivatives with respect to states.}
##' \item{d2fdxdp}{Function to calculate the cross derivatives of each right-hand function with respect to state and parameters.}
##' \item{dfdx.d}{Function to calculate the the derivative of each righthand function with respect to the delayed states.}
##' \item{d2fdx.ddp}{Function to calculate the cross derivatives of each righthand function with respect to the delayed states and parameters.}
##' \item{d2fdxdx.d}{Function to calculate the cross derivatives of each right-hand function with respect to the state and the delayed states.}
##' \item{d2fdx.d2}{Function to calculate the second derivatives of the right-hand function with respect to the delayed states.}
##' }
##' @param data Matrix of observed data values.
##' @param times Vector observation times for the data.
##' @param basisvals Values of the collocation basis to be used. This should be a basis object from the fda package.
##' @param lambda Penalty value trading off fidelity to data with fidelity to dif- ferential equations.
##' @param fd.obj A functional data object; if this is non-null, coefs and basisvals is extracted from here.
##' @param more An object specifying additional arguments to fn.
##' @param weights Weights for weighted estimation.
##' @param quadrature Quadrature points, should contain two elements (if not \code{NULL})
##' \describe{
##' \item{qpts}{ sQuadrature points; defaults to midpoints between knots}
##' \item{qwts}{Quadrature weights; defaults to normalizing by the length of qpts.}
##' }
##' @param in.meth Inner optimization function currently one of \code{'nlminb'}, \code{'optim'}, or \code{'trustOptim'}.
##' @param out.meth Outer optimization selection function to be used, depending on the type of method.
##' \describe{
##' \item{"penalized"}{Uses LASSO method from \code{penalized} package.}
##' }
##' @param control.in Control object for inner optimization function.
##' @param control.out Control object for outer optimization function.
##' @param eps Finite differencing step size, if needed.
##' @param active Incides indicating which parameters of pars should be estimated; defaults to all of them.
##' @param posproc Should the state vector be constrained to be positive? If this is the case, the state is represented by an exponentiated basis expansion in the proc object.
##' @param poslik Should the state be exponentiated before being compared to the data? When the state is represented on the log scale (posproc=TRUE), this is an alternative to taking the log of the data.
##' @param discrete Is it a discrete process.
##' @param names The names of the state variables if not given by the column names of coefs.
##' @param sparse Should sparse matrices be used for basis values? This option can save memory when using 'trust' optimization method.
##' @param basisvals0 Values of the collocation basis to be used for the history part of the data. This should be a basis object from the fda package.
##' @param coefs0 Vector giving the estimate of the coefficients in the spline for the history part of the data.
##' @param nbeta The number of lags for the delay.
##' @param ndelay A vector inidicating which state process has a delay term.
##' @param tau A list of delay lags.
##' @param nnls.res nnls.res \code{res} item returned from \code{\link{Profile.LS.DDE}}
##' @return A list with elements
##' \describe{
##' \item{data}{The matrix for the observed data.}
##' \item{res}{The inner optimization result.}
##' \item{select}{A list containing the result after selection, the parameter, delay contribution and coefficients after the selection.}
##' }
##' @seealso \code{\link{Profile.LS.TV.DDE}}
##' @export
##' @author Ziqian Zhou
sparse.TV.DDE <- function(fn, data, times, basisvals = NULL,
lambda, fd.obj = NULL, more = NULL, weights = NULL, quadrature = NULL,
in.meth = "nlminb", out.meth = "nls", control.in = list(),
control.out = list(), eps = 1e-06, active = NULL, posproc = FALSE,
poslik = FALSE, discrete = FALSE, names = NULL, sparse = FALSE,
basisvals0 = NULL, coefs0 = NULL, nbeta, ndelay, tau, nnls.res)
{
betanames <- c()
for(i in 1:length(nbeta)){
for(j in 1:nbeta[i]){
betanames <- c(betanames,paste("beta",i,".",j, sep = ""))
}
}
pars <- nnls.res$pars
beta <- nnls.res$beta
kappa <- nnls.res$kappa
coefs <- nnls.res$coefs
if (is.null(active)) {
active = 1:length(c(pars,beta, kappa))
}
apars <- pars[active]
## Create y.d
fdnames <- list(NULL, NULL, NULL)
fdnames[[2]] <- attr(coefs, "dimnames")[[2]]
fdobj0 <- list(coefs = coefs0, basis = basisvals0, fdnames =fdnames)
fdobj.d <- list(coefs = coefs, basis = basisvals, fdnames =fdnames)
attr(fdobj0, "class") <- "fd"
attr(fdobj.d, "class") <- "fd"
profile.obj <- LS.setup(pars = c(pars, kappa), coefs = coefs, fn = fn,
basisvals, lambda = lambda, fd.obj, more, data, weights,
times, quadrature, eps = 1e-06, posproc, poslik, discrete,
names, sparse, likfn = make.id(), likmore = NULL)
dims = dim(data)
lik = profile.obj$lik
proc = profile.obj$proc
proc$more$more$nKappa <- length(kappa)
coefs <- profile.obj$coefs
data = profile.obj$data
times = profile.obj$times
proc$more$betanames <- betanames
##################################################
## Added delay data and functions
##################################################
delayProcObj <- delay.fit.sparse(fd0 = fdobj0, fd.d = fdobj.d, times = proc$more$qpts, tau = tau, beta= beta, ndelay = ndelay )
delayLikObj <- delay.fit.sparse(fd0 = fdobj0, fd.d = fdobj.d, times = times,tau = tau, beta= beta, ndelay = ndelay)
lik$more$more$y.d <- delayLikObj$y.d
proc$more$more$y.d <- delayProcObj$y.d
lik$more$more$bvals.d <- delayLikObj$bvals.d
proc$more$more$bvals.d <- delayProcObj$bvals.d
proc$more$more$bvals.d.list <- delayProcObj$bvals.d.list
proc$more$more$y.d.list <- delayProcObj$y.d.list
proc$more$more$ndelay <- lik$more$more$ndelay <- ndelay
proc$more$more$nbeta <- lik$more$more$nbeta <- sapply(tau, length)
proc$more$more$tau <- lik$more$more$tau <- tau
delay <- make.delay()
proc$dfdc <- delay$dfdc
proc$d2fdc2 <- delay$d2fdc2.DDE
proc$d2fdcdp <- delay$d2fdcdp.sparse
proc$more$delay <- delay
proc$more$dfdtau <- dfdbeta.sparse
proc$more$d2fdxdtau <- d2fxdbeta.sparse
proc$more$d2fdx.ddtau <- d2fdx.ddbeta.sparse
aparamnames = names(apars)
if (is.null(control.out$maxIter)) {
control.out$maxIter = 100
}
if (is.null(control.out$tol)){
control.out$tol = 1e-08
}
if(is.null(control.out$selection.method)){
control.out$selection.method <- out.meth
}
res <- nnls.res
if(is.null(control.out$pars.c))
control.out$pars.c <- 100
if(control.out$lambda.sparse == "penalized"){
if(is.null(control.out$maxInnerIter)) maxInnerIter <- 50
if(is.null(control.out$lambda.len)) nlambda <- 10
Zdf1 <- res$Zdf[, 1: length(kappa), drop = FALSE]
Zdf2 <- res$Zdf[,(1+length(kappa)):(length(kappa)+length(beta)),drop = FALSE]
Xdf <- res$Xdf
y <- res$y
Z1 <- rowSums(Zdf1)
Z2 <- rowSums(Zdf2)
beta0 <- sum((y- Xdf %*% pars) * Z1) / sum(Z1^2)
lambda10 <- max(abs(as.vector(t(y - Xdf %*% pars - Z1 * beta0) %*% sweep(Zdf1, 1, mean(Zdf1)))))
lambda1 = exp(seq(log(lambda10 * 0.1), log(lambda10 * 0.001), len = nlambda))
lambda20 <- max(abs(as.vector(t(y) %*% Zdf2)))
lambda2 = exp(seq(log(lambda20), log(lambda20 * 0.001), len = nlambda))
pars.pen <- kappa.pen <- beta.pen <- coefs.pen <- list()
bic <- rep(NA, length(lambda1) * length(lambda2))
for(i in 1:nlambda){
for(j in 1:nlambda){
ij <- (i - 1) * nlambda + j
lambda.i1 <- lambda1[i]
lambda.j1 <- lambda2[j]
y1 <- y - Zdf2 %*% beta
y2 <- y - Zdf1 %*% kappa
kappa.pen[[ij]] <- kappa
pars.pen[[ij]] <- pars
beta.pen[[ij]] <- beta
for(k in 1:maxInnerIter){
res.sparse1 <- penalized::penalized(response = y1, penalized = Zdf1, unpenalized = Xdf, lambda2 = lambda1[i], fusedl = TRUE, positive = TRUE, maxiter = 50, trace = FALSE)
res.sparse2 <- penalized::penalized(response = y2, penalized = Zdf2, unpenalized = Xdf, lambda1 = lambda2[j], positive = TRUE, maxiter = 50, trace = FALSE)
ifelse(sum(res.sparse2@penalized) == 0, sumbeta <- 1, sumbeta <- sum(res.sparse2@penalized))
if(sum((kappa.pen[[ij]] - res.sparse1@penalized)^2, (pars.pen[[ij]] - (res.sparse1@unpenalized + res.sparse2@unpenalized)/2)^2, (res.sparse1@unpenalized - res.sparse2@unpenalized)^2, (beta.pen[[ij]] - res.sparse2@penalized / sumbeta)^2) < eps){
kappa.pen[[ij]] <- res.sparse1@penalized
pars.pen[[ij]] <- (res.sparse2@unpenalized + res.sparse1@unpenalized) / 2
beta.pen[[ij]] <- res.sparse2@penalized / sumbeta
break
}
kappa.pen[[ij]] <- res.sparse1@penalized
pars.pen[[ij]] <- (res.sparse2@unpenalized + res.sparse1@unpenalized) / 2
beta.pen[[ij]] <- res.sparse2@penalized / sumbeta
y1 <- y - Zdf2 %*% beta.pen[[ij]]
y2 <- y - Zdf1 %*% kappa.pen[[ij]]
}
Ires <- inneropt.DDE(data, times, c(pars.pen[[ij]],kappa.pen[[ij]]), beta.pen[[ij]], coefs, lik, proc, in.meth, control.in, basisvals = basisvals, fdobj0 = fdobj0)
devals <- as.matrix(lik$bvals%*%Ires$coefs)
f <- as.vector(as.matrix(data - lik$more$fn(times, devals, pars, lik$more$more)))
coefs.pen[[ij]] <- Ires$coefs
sd.pen <- sd(f)
ll.pen <- - sum(f^2) / (sd.pen^2) / 2 - length(f) * log(sd.pen)
bic[(i-1) * length(lambda1) + j] <- -2 * ll.pen + (length(unique(kappa.pen[[ij]])) + length(pars.pen[[ij]]) + sum(beta.pen[[ij]] != 0)) * log(length(data))
}
}
ij.select <- which(bic == min(bic))
sel.res <- list(pars.pen = pars.pen[[ij.select]], kappa.pen = kappa.pen[[ij.select]], beta.pen = beta.pen[[ij.select]], bic = bic[ij.select], coefs.pen = coefs.pen[[ij.select]], lambda = c(lambda1[ceiling(ij.select / nlambda)], lambda2[ifelse(ij.select %% nlambda == 0, nlambda, ij.select %% nlambda)]))
}
return(list(data = data, res = res, sel.res))
}
## Initialize with unobserved data
init.unob.LS.tv.delay <- function(fn, data, times, pars, beta, kappa, coefs = NULL, basisvals = NULL,
lambda, fd.obj = NULL, more = NULL, weights = NULL, quadrature = NULL,
in.meth = "nlminb", out.meth = "nls", control.in = list(),
control.out = list(), eps = 1e-06, active = NULL, posproc = FALSE,
poslik = FALSE, discrete = FALSE, names = NULL, sparse = FALSE,
likfn = make.id(), likmore = NULL, delay = NULL, tauMax = NULL,
basisvals0 = NULL, coefs0 = NULL, nbeta, ndelay, tau, unob = 1)
{
if (is.null(active)) {
active = 1:length(c(pars, kappa))
}
## Create y.d
fdnames <- list(NULL, NULL, NULL)
fdnames[[2]] <- attr(coefs, "dimnames")[[2]]
fdobj0 <- list(coefs = coefs0, basis = basisvals0, fdnames =fdnames)
fdobj.d <- list(coefs = coefs, basis = basisvals, fdnames =fdnames)
attr(fdobj0, "class") <- "fd"
attr(fdobj.d, "class") <- "fd"
profile.obj = LS.setup(pars = c(pars, kappa), coefs = coefs, fn = fn,
basisvals, lambda = lambda, fd.obj, more, data, weights,
times, quadrature, eps = 1e-06, posproc, poslik, discrete,
names, sparse, likfn = make.id(), likmore = NULL)
dims = dim(data)
lik = profile.obj$lik
proc = profile.obj$proc
proc$more$more$nKappa <- length(kappa)
coefs = profile.obj$coefs
data = profile.obj$data
times = profile.obj$times
## Create names for delay parameters beta
betanames <- c()
for(i in 1:length(nbeta)){
for(j in 1:nbeta[i]){
betanames <- c(betanames,paste("beta",i,".",j, sep = ""))
}
}
proc$more$betanames <- betanames
##################################################
## Added delay data and functions
##################################################
delayProcObj <- delay.fit.sparse(fd0 = fdobj0, fd.d = fdobj.d, times = proc$more$qpts, tau = tau, beta= beta, ndelay = ndelay )
delayLikObj <- delay.fit.sparse(fd0 = fdobj0, fd.d = fdobj.d, times = times,tau = tau, beta= beta, ndelay = ndelay)
lik$more$more$y.d <- delayLikObj$y.d
proc$more$more$y.d <- delayProcObj$y.d
lik$more$more$bvals.d <- delayLikObj$bvals.d
proc$more$more$bvals.d <- delayProcObj$bvals.d
proc$more$more$bvals.d.list <- delayProcObj$bvals.d.list
proc$more$more$y.d.list <- delayProcObj$y.d.list
proc$more$more$ndelay <- lik$more$more$ndelay <- ndelay
proc$more$more$nbeta <- lik$more$more$nbeta <- sapply(tau, length)
proc$more$more$tau <- lik$more$more$tau <- tau
delay <- make.delay()
proc$dfdc <- delay$dfdc
proc$d2fdc2 <- delay$d2fdc2.DDE
proc$d2fdcdp <- delay$d2fdcdp.sparse
proc$more$delay <- delay
proc$more$dfdtau <- dfdbeta.sparse
proc$more$d2fdxdtau <- d2fxdbeta.sparse
proc$more$d2fdx.ddtau <- d2fdx.ddbeta.sparse
## Ires <- inneropt.DDE.unob(data, times, c(pars, kappa), beta, coefs[,1:unob], lik, proc, in.meth, control.in, basisvals = basisvals, fdobj0 = fdobj0, coefs.fix = coefs[, (unob+1):dim(coefs)[2]])
ncoefs <- inneropt.LS.unob(data, pars, kappa, beta, coefs, lik, proc)
return(ncoefs)
}
inneropt.LS.unob <- function(data, pars, kappa, beta, coefs, lik, proc){
coefsS <- coefs[,1, drop = FALSE]
coefsI <- coefs[,2, drop = FALSE]
I <- as.matrix(proc$bvals$bvals %*% coefsI)
I.d <- proc$more$more$y.d
dI <- as.matrix(proc$bvals$dbvals %*% coefsI)
X1 <- sweep(proc$bvals$bvals, 1, tvtrans(proc$more$qpts, kappa) * I.d ,"*" ) + proc$bvals$dbvals
X2 <- sweep(proc$bvals$bvals, 1, tvtrans(proc$more$qpts, kappa) * I.d, "*")
X <- rbind(X1, X2)
y <- c(proc$more$more$b, as.vector(dI + pars["gamma"] * I))
coefs.fit <- lm.fit(x = X, y=y)
return(coefs.fit)
}
|
c75f659f1ddd4c93a2a68a0db92c2bc55e1020a5 | cf7df86691b86344df40a2cb1a188c189536ac29 | /Normal/Algo8_Norm.R | 0f866ff3b7f7e4238319b3d64da9998b6e92944d | [] | no_license | pasudyan/RNeal_MCMC_DPMM | c1d6308d16441916cfe77c8c06c7283c0098d3c3 | 31a01ccafbd5bb2beb522c8aedb00faf5033f315 | refs/heads/master | 2021-04-29T17:25:18.301869 | 2018-02-15T19:12:28 | 2018-02-15T19:12:28 | 121,668,987 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,711 | r | Algo8_Norm.R | ##MCMC methods for parameter estimation (Algorithm 8)##
#######################################################
test_phi.8 = numeric(numIter)
#initialize number of clusters
K = floor(alpha*log(num_cust))
#initialize cluster assignment
phi.8 = numeric(xtraSpace)
phi.8[1:K] = rnorm(K, mu_base, sd_base) #for algo 8
#storing the cluster assignments for each iteration
cluster_store8 = numeric(numIter)
#initialize cluster assignment
sampling = c(1:K, sample(1:K, num_cust-K, replace=TRUE))
cluster = sample(sampling, num_cust, replace=FALSE)
#calculate number of people in each table
counts = numeric(xtraSpace)
counts[1:K] = sapply(1:K, function(r) sum(cluster == r))
#conditional prob for each cluster
prob_clust = numeric(xtraSpace)
#sum of observations in each cluster
sum_cluster = numeric(xtraSpace)
for (ii in 1:numIter){
rand_samples = sample(1:num_cust,
num_cust,
replace=FALSE)
#iteration for each observation
for (b in 1:num_cust){
j = rand_samples[b]
h = K + m
#removing the observation from the cluster
counts[cluster[j]] = counts[cluster[j]]-1
#reevaluating cluster assignments
if (counts[cluster[j]]!=0){
#drawing values for the aux params from G_0
phi.8[(K+1):h] = rnorm(m, mu_base, sd_base)
} else if (counts[cluster[j]]==0 & cluster[j]!=K){
#renumbering the clusters
counts[cluster[j]] = counts[K]
cluster[cluster==K] = cluster[j]
counts[K] = 0
#renumbering the phi.8
temp = phi.8[cluster[j]]
phi.8[cluster[j]] = phi.8[K]
phi.8[K] = temp
#reducing the number of clusters and aux clusters
K = K-1
h = h-1
#drawing values for the aux params from G_0
phi.8[(K+2):h] = rnorm(m-1, mu_base, sd_base)
} else if (counts[cluster[j]]==0 & cluster[j]==K){
#reducing the number of clusters and aux clusters
K = K-1
h = h-1
#drawing values for the aux params from G_0
phi.8[(K+2):h] = rnorm((m-1), mu_base, sd_base)
}
#prob of choosing existing cluster
prob_clust[1:K] = log(counts[1:K]) +
dnorm(mixDat[j], phi.8[1:K], sd_obs, log=TRUE)
#prob of choosing new cluster
prob_clust[(K+1):h] = log(alpha/m) +
dnorm(mixDat[j], phi.8[(K+1):h], sd_obs, log=TRUE)
#normalizing constant
prob_norm = prob_clust[1:h] -
logSumExp(prob_clust[1:h])
#sampling new cluster assignments
new_table = sample(1:h,
1,
replace = FALSE,
exp(prob_norm)
)
#new table addition
if (new_table > K){
cluster[j] = K+1
phi.8[K+1] = phi.8[new_table]
counts[K+1] = 1
K = K+1
} else {
cluster[j] = new_table
counts[new_table] = counts[new_table]+1
}
phi.8[(K+1):xtraSpace] = rep(0,xtraSpace-K)
counts[(K+1):xtraSpace] = rep(0,xtraSpace-K)
}
#sampling the new parameters
#this one here could be done using the MH algorithm
sum_cluster = sapply(1:K, function(r)
sum(mixDat[cluster==r]))
var_phi.8 =((var_base*var_obs)/
(counts[1:K]*var_base+var_obs))
mean_phi.8 = (var_phi.8)*
(mu_base/var_base +
sum_cluster/var_obs)
phi.8[1:K] = rnorm(K, mean_phi.8, sqrt(var_phi.8))
#storing params value for observation
test_phi.8[ii] = phi.8[cluster[test_obs]]
}
|
28c882ec0f0c8f8cd0abe1a34c9a526f2f4ea57e | a329d887c850b4dabe78ea54b8bf3587136f7571 | /man/boostLinear.Rd | c677e109b1756adb2319bf3608aa550222311be9 | [
"MIT"
] | permissive | mllg/compboost | 2838ab72b1505305136fa8fbf752a62f886303ec | f493bb92050e27256f7937c82af6fa65e71abe67 | refs/heads/master | 2020-04-06T09:45:45.958662 | 2018-11-13T09:53:05 | 2018-11-13T09:53:05 | 157,355,755 | 0 | 0 | NOASSERTION | 2018-11-13T09:39:14 | 2018-11-13T09:39:13 | null | UTF-8 | R | false | true | 3,044 | rd | boostLinear.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/boost_linear.R
\name{boostLinear}
\alias{boostLinear}
\title{Wrapper to boost linear models for each feature.}
\usage{
boostLinear(data, target, optimizer = OptimizerCoordinateDescent$new(),
loss, learning.rate = 0.05, iterations = 100, trace = -1,
intercept = TRUE, data.source = InMemoryData,
data.target = InMemoryData)
}
\arguments{
\item{data}{[\code{data.frame}]\cr
A data frame containing the data on which the model should be built.}
\item{target}{[\code{character(1)}]\cr
Character indicating the target variable. Note that the loss must match the
data type of the target.}
\item{optimizer}{[\code{S4 Optimizer}]\cr
Optimizer to select features. This should be an initialized \code{S4 Optimizer} object
exposed by Rcpp (for instance \code{OptimizerCoordinateDescent$new()}).}
\item{loss}{[\code{S4 Loss}]\cr
Loss used to calculate the risk and pseudo residuals. This object must be an initialized
\code{S4 Loss} object exposed by Rcpp (for instance \code{LossQuadratic$new()}).}
\item{learning.rate}{[\code{numeric(1)}]\cr
Learning rate which is used to shrink the parameter in each step.}
\item{iterations}{[\code{integer(1)}]\cr
Number of iterations that are trained.}
\item{trace}{[\code{integer(1)}]\cr
Integer indicating how often a trace should be printed. Specifying \code{trace = 10}, then every
10th iteration is printed. If no trace should be printed set \code{trace = 0}. Default is
-1 which means that we set \code{trace} at a value that 40 iterations are printed.}
\item{intercept}{[\code{logical(1)}]\cr
Internally used by \code{BaselearnerPolynomial}. This logical value indicates if
each feature should get an intercept or not (default is \code{TRUE}).}
\item{data.source}{[\code{S4 Data}]\cr
Uninitialized \code{S4 Data} object which is used to store the data. At the moment
just in memory training is supported.}
\item{data.target}{[\code{S4 Data}]\cr
Uninitialized \code{S4 Data} object which is used to store the data. At the moment
just in memory training is supported.}
}
\value{
Usually a model of class \code{Compboost}. This model is an \code{R6} object
which can be used for retraining, predicting, plotting, and anything described in
\code{?Compboost}.
}
\description{
This wrapper function automatically initializes the model by adding all numerical
features of a dataset within a linear base-learner. Categorical features are
dummy encoded and inserted using linear base-learners without intercept. After
initializing the model \code{boostLinear} also fits as many iterations as given
by the user through \code{iters}.
}
\details{
The returned object is an object of the \code{Compboost} class which then can be
used for further analyses (see \code{?Compboost} for details).
}
\examples{
mod = boostLinear(data = iris, target = "Sepal.Length", loss = LossQuadratic$new())
mod$getBaselearnerNames()
mod$getEstimatedCoef()
table(mod$getSelectedBaselearner())
mod$predict()
mod$plot("Sepal.Width_linear")
}
|
e9f73be1dfa8e5a0c0446c57980bae04619a0316 | 5dd95e1685c24227627d86fac4e828140b88b2f7 | /code/corellation_DF.R | e8854ea6757829e93fc9b8e8b06a90d3e84ab061 | [] | no_license | NOAA-SWFSC-ERD/BREP | b39e7305b4766fe40bb8624584d2fc85532bf34d | 8356fca506a16cb70ffac8a3e7776d6fc5f5a28b | refs/heads/master | 2022-09-24T13:39:27.332691 | 2022-09-21T17:54:07 | 2022-09-21T17:54:07 | 92,861,834 | 3 | 1 | null | null | null | null | UTF-8 | R | false | false | 4,702 | r | corellation_DF.R | #### code to run ts correlations
## follows NC_batch_SST_SSTANOM.R
#### load libraries
library(raster)
library(ncdf4)
library(maps)
#### load global objects
load("/Volumes/SeaGate/BREP/BREP/brep_scb_CC_pts_enso34.RData") ## attach turtle citings time-series
netcdf=list.files("/Volumes/SeaGate/BREP/jplmur",pattern="*jplMURSST41mday_*",full.names = T)#names of netcdffiles
template_native=raster(netcdf[1])
e=extent(-140,-108,18,42)
#### define functions
make_png=function(r,name,blanks){ ### does what it says
if(blanks=="NA"){
png(paste0("/Volumes/SeaGate/BREP/BREP/monthly_plots/",name,"_NA.png"), width=7, height=5, units="in", res=400)
}
if(blanks=="zeros"){
png(paste0("/Volumes/SeaGate/BREP/BREP/monthly_plots/",name,"_zeros.png"), width=7, height=5, units="in", res=400)
}
par(ps=10) #settings before layout
layout(matrix(c(1,2), nrow=2, ncol=1, byrow=TRUE), heights=c(4,1), widths=7)
#layout.show(2) # run to see layout; comment out to prevent plotting during .pdf
par(cex=1) # layout has the tendency change par()$cex, so this step is important for control
par(mar=c(4,4,1,1)) # I usually set my margins before each plot
#pal <- colorRampPalette(c("blue", "grey", "red"))
pal <- colorRampPalette(c("purple4","blue", "cyan", "yellow", "red"))
#pal <- colorRampPalette(c("purple4", "white", "blue"))
ncolors <- 100
breaks <- seq(-1,1,,ncolors+1)
image(r, col=pal(ncolors), breaks=breaks)
map("world", add=TRUE, lwd=2)
contour(r, add=TRUE, col="black",levels=c(-.75,-.5,.5,.75))
box()
par(mar=c(4,4,0,1)) # I usually set my margins before each plot
levs <- breaks[-1] - diff(breaks)/2
image(x=levs, y=1, z=as.matrix(levs), col=pal(ncolors), breaks=breaks, ylab="", xlab="", yaxt="n")
if(blanks=="NA"){
mtext(paste0("Correlation [R], ",name,", 2003-2016, years with no sightings removed"), side=1, line=2.5)
}
if(blanks=="zeros"){
mtext(paste0("Correlation [R], ",name,", 2003-2016, years with no sightings zeroed"), side=1, line=2.5)
}
box()
dev.off() # closes device
}
make_raster=function(r,name,blanks){
if(blanks=="NA"){
writeRaster(r,filename=paste("/Volumes/SeaGate/BREP/BREP/monthly_plots/",name,"_NA.grd",sep=''),overwrite=TRUE)
}
if(blanks=="zeros"){
writeRaster(r,filename=paste("/Volumes/SeaGate/BREP/BREP/monthly_plots/",name,"_zeros.grd",sep=''),overwrite=TRUE)
}
}
for(rdss in list.files("/Volumes/SeaGate/BREP/BREP/monthly",pattern="*.rds")){
name=gsub(".rds","",rdss)
print(name)
rds=readRDS(paste("/Volumes/SeaGate/BREP/BREP/monthly/",rdss,sep=""))
transposed=as.data.frame(t(rds))
if(grepl("2002",rownames(transposed)[3])){ ### get rid of 2002 data, only 6 months OTY have data from 2002
transposed=transposed[c(1:2,4:nrow(transposed)),]
}
######## NAs for years missing data
transposed$sightings_blanks=NA
transposed[3,7674402]=10 #2003
#transposed[4,7674402]=3 #2004
transposed[5,7674402]=1 #2005
transposed[6,7674402]=34 #2006
#transposed[7,7674402]=3 #2007
#transposed[8,7674402]=3 #2008
#transposed[9,7674402]=3 #2009
#transposed[10,7674402]=3 #2010
#transposed[11,7674402]=3 #2011
#transposed[12,7674402]=3 #2012
transposed[13,7674402]=1 #2013
transposed[14,7674402]=94 #2014
transposed[15,7674402]=469 #2015
transposed[16,7674402]=56 #2016
######## zeros for years missing data
transposed$sightings_zeros=NA
transposed[3,7674403]=10 #2003
transposed[4,7674403]=0 #2004
transposed[5,7674403]=1 #2005
transposed[6,7674403]=34 #2006
transposed[7, 7674403] = 0 #2007
transposed[8,7674403]=0 #2008
transposed[9,7674403]=0 #2009
transposed[10,7674403]=0 #2010
transposed[11,7674403]=0 #2011
transposed[12,7674403]=0 #2012
transposed[13,7674403]=1 #2013
transposed[14,7674403]=94 #2014
transposed[15,7674403]=469 #2015
transposed[16,7674403]=56 #2016
saveRDS(transposed,file=paste0("/Volumes/SeaGate/BREP/BREP/monthly/",name,"_sightings.rds"))
## blanks
print(paste0(name," blanks"))
transposed_NA=transposed[complete.cases(transposed[,7674402]),]
a=cor(transposed_NA[,7674402],transposed_NA[,1:7674401])
b=as.data.frame(t(a))
colnames(b)="Cor"
b$lon=rds$lon
b$lat=rds$lat
coordinates(b)=~lon+lat
r=rasterize(b,template_native,field="Cor",fun=mean)
r=crop(r,e)
make_png(r=r,name=name,blanks="NA")
make_raster(r=r,name=name,blanks="NA")
## zeros
print(paste0(name," zeros"))
transposed_zeros=transposed[complete.cases(transposed[,7674403]),]
a=cor(transposed_zeros[,7674403],transposed_zeros[,1:7674401])
b=as.data.frame(t(a))
colnames(b)="Cor"
b$lon=rds$lon
b$lat=rds$lat
coordinates(b)=~lon+lat
r=rasterize(b,template_native,field="Cor",fun=mean)
r=crop(r,e)
make_png(r=r,name=name,blanks="zeros")
make_raster(r=r,name=name,blanks="zeros")
}
|
0a6a26a2ae080bbcb76f832883dc975f55443414 | 28c27fbecd9dc8d075b772c01bf793f83bacf8bd | /plot.R | b4b211a24a31386ceeecd807dcdd696e04605289 | [
"MIT"
] | permissive | titou26140/bnn | dc29e254746c3e16879ebf7ea3645cd32c0e6fa6 | cafab2f9a1bde2dfdb737310cbb823338e987a2b | refs/heads/master | 2020-04-24T03:33:53.471337 | 2019-03-11T14:22:16 | 2019-03-11T14:22:16 | 171,673,960 | 0 | 0 | MIT | 2019-02-20T13:01:11 | 2019-02-20T13:01:11 | null | UTF-8 | R | false | false | 566 | r | plot.R | library(ggplot2)
library(lubridate)
df = read.delim("/home/mat/dev/bnn2/predict.out", h=F,
col.names=c("idx", "img_name", "count"))
df$idx = NULL
head(df)
df$dts = ymd_hms(gsub(".jpg", "", df$img_name))
df$date_str = as.factor(sprintf("%4d%02d%02d", year(df$dts), month(df$dts), day(df$dts)))
df$time_of_day = hour(df$dts) + minute(df$dts)/60 + second(df$dts)/3600
summary(df)
ggplot(df, aes(time_of_day, count)) +
geom_point(alpha=0.3, aes(color=date_str), position = "jitter") +
geom_smooth(aes(color=date_str)) +
facet_grid(.~date_str)
|
2f20a727450813634647bc7071dc4b1c68f066fa | 131c83453196969874b2eebc65b0c12e98bb00dc | /reresearch/processstormdata.R | 695a9e4685eb4a63018206691c1327bc51b03127 | [] | no_license | david-c-street/datasciencecoursera | d3cb4fc25c2f50d85d3610dd0806cabda94648cc | f63fe5f72d5429ba4abecbb4f71ccccb13e95491 | refs/heads/master | 2021-05-29T17:43:58.973159 | 2015-04-26T18:17:34 | 2015-04-26T18:17:34 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,166 | r | processstormdata.R | #script to process storm data and do some analysis on it
library(ggplot2)
library(data.table)
library(dplyr)
#file and data processing
originalfile <- 'StormData.csv'
cleanfile <- 'stormdataclean.csv'
#make a nice CSV file and save it
if (!file.exists(cleanfile)){
rawstormdata <- read.csv(originalfile)
rawstormdata <- tbl_df(rawstormdata)
#get rid of some unused columns to speed up operations
rawstormdata <- select(rawstormdata, -REMARKS, -STATE__,
-(BGN_RANGE:END_LOCATI), -(CROPDMG:ZONENAMES))
write.csv(rawstormdata, cleanfile)
}
#load data into local data table, make a couple changes
if (!exists('stormdata')){
stormdata <- tbl_dt(fread(cleanfile, stringsAsFactors=T))
}
#create convenient grouped data sets and get relevant sums
#human costs
stormdata_byevent <- group_by(stormdata, EVTYPE)
stormdata_byevent_humansums <- tbl_df(summarise(stormdata_byevent,
fatalities=sum(FATALITIES, na.rm=T), injuries = sum(INJURIES, na.rm=T),
casualties=sum(FATALITIES, INJURIES, na.rm=T)))
stormdata_byevent_humansums <- filter(stormdata_byevent_humansums, casualties>0)
stormdata_byevent_humansums <- arrange(stormdata_byevent_humansums,
desc(casualties))
stormdata_byevent_humansumstop5 <- head(stormdata_byevent_humansums, n=5)
#property costs
stormdata_byeventpdmg <- group_by(stormdata, EVTYPE, PROPDMGEXP)
stormdata_byevent_propsums <- tbl_df(summarise(stormdata_byeventpdmg,
damages=sum(PROPDMG, na.rm=T)))
stormdata_byevent_propsums <- filter(stormdata_byevent_propsums, damages>0)
#one last processing step to get apply PROPDMGEXP column to sums
#create a new column of data that translates EXP tag into a multiplier
allexp <- stormdata_byevent_propsums[,'PROPDMGEXP']
multy <- numeric()
for (i in 1:dim(allexp)[1]){
crnt_str <- as.character(allexp[i,])
if (crnt_str=='M' | crnt_str=='m'){
multy <- c(multy, 1000000.0)
} else if (crnt_str=='B' | crnt_str=='b'){
multy <- c(multy, 1000000000.0)
} else {
#assume if it is K, k or blank, the multiplier in 1,000
multy <- c(multy, 1000.0)
}
}
#use that multiplier to create a columns that is the actual numerical damage
stormdata_byevent_propsums <- cbind(stormdata_byevent_propsums, multy=multy)
stormdata_byevent_propsums <- mutate(stormdata_byevent_propsums,
propdmgtotal=damages*multy)
#group by one more time to consolidate the event types in to one row
stormdata_byeventpdmg <- group_by(stormdata_byevent_propsums, EVTYPE)
stormdata_byevent_propsums <- summarise(stormdata_byeventpdmg,
damages= sum(propdmgtotal))
stormdata_byevent_propsums <- arrange(stormdata_byevent_propsums,
desc(damages))
stormdata_byevent_propsumstop5 <- head(stormdata_byevent_propsums, n=5)
#plots
propbartop5 <- ggplot(head(stormdata_byevent_propsumstop5))
propbartop5 <- propbartop5 + geom_bar(aes(x=EVTYPE, y=damages),
stat='identity')
propbartop5 <- propbartop5 + labs(title='5 Most Costly Events',
x='Event', y='Property Damage (USD)')
humanbartop5 <- ggplot(head(stormdata_byevent_humansumstop5))
humanbartop5 <- humanbartop5 + geom_bar(aes(x=EVTYPE, y=casualties),
stat='identity')
humanbartop5 <- humanbartop5 + labs(title='5 Most Dangerous Events',
x='Event', y='Casualties') |
8dbfcb209834abbfbcb5229c84dbbdc99f714c1d | 4951e7c534f334c22d498bbc7035c5e93c5b928d | /sourcecode/select_nesting.R | d34e2aa07733ae5526e88487443fca1f55efd605 | [] | no_license | Derek-Jones/ESEUR-code-data | 140f9cf41b2bcc512bbb2e04bcd81b5f82eef3e1 | 2f42f3fb6e46d273a3803db21e7e70eed2c8c09c | refs/heads/master | 2023-04-04T21:32:13.160607 | 2023-03-20T19:19:51 | 2023-03-20T19:19:51 | 49,327,508 | 420 | 50 | null | null | null | null | UTF-8 | R | false | false | 1,350 | r | select_nesting.R | #
# select_nesting.R, 16 Jan 20
# Data from:
# The New {C} Standard: {An} Economic and Cultural Commentary
# Derek M. Jones
#
# Example from:
# Evidence-based Software Engineering: based on the publicly available data
# Derek M. Jones
#
# TAG C selection-statement_nesting source-code_C
source("ESEUR_config.r")
plot_layout(2, 1)
pal_col=rainbow(2)
sn=read.csv(paste0(ESEUR_dir, "sourcecode/select_nesting.csv.xz"), as.is=TRUE)
ll=read.csv(paste0(ESEUR_dir, "sourcecode/logicline.csv.xz"), as.is=TRUE)
# tl=read.csv(paste0(ESEUR_dir, "sourcecode/tokenonline.csv.xz"), as.is=TRUE)
cf=subset(ll, file_suff == ".c")
cf=subset(cf, characters < 400)
plot(cf$characters, cf$occurrences, log="xy", col=point_col,
xlab="Characters on line", ylab="Lines\n")
# plot(tl$tokens, tl$lines, log="y", col=point_col,
# xlab="Tokens on line", ylab="Lines\n")
plot(sn$nesting, sn$occurrences, log="y", col=pal_col[1],
xaxs="i",
xlim=c(0, 25),
xlab="Nesting level", ylab="Selection-statements\n")
mod_113=glm(log(occurrences) ~ nesting, data=sn, subset=2:13)
pred=predict(mod_113)
lines(1:12, exp(pred), col=pal_col[2])
# Embedded C data from Engblom <book Engblom_98>
#
# emb=data.frame(nesting=1:10,
# occurrences=c(0.495, 0.196, 0.095, 0.067, 0.065,
# 0.063, 0.019, 0.014, 0.007, 0.008))
#
# points(emb$nesting, 1e5*emb$occurrences)
|
a3fadcf98d9345965301233360ba63bdcd27189b | 5675371ff792c9e1e80a3b7e0c1884093c41b5ac | /PlotAll.R | 1de83670f2bf37fe6cc41bb24c360af248271ebc | [] | no_license | jacosmuts/exploratory1 | f959109c7aaf0e65e9a5fd6202d675c4ae1547c5 | d526dde265ac5218e08cd942b613504f6608891f | refs/heads/master | 2021-01-25T09:53:27.471482 | 2015-06-07T23:37:43 | 2015-06-07T23:37:43 | 37,015,841 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 213 | r | PlotAll.R | source('DataPrep.R')
source('plot1.R')
source('plot2.R')
source('plot3.R')
source('plot4.R')
data <- get_data()
write_all <- function() {
write_plot1()
write_plot2()
write_plot3()
write_plot4()
} |
e34dd419e0214b20e5b6c295fe65c73d0aff8a05 | 2171f3867747a89929b1ad396afb855b09896309 | /man/example_dr1.Rd | 53f5f53bca44bcfbc7d547f0d6d5dfd05ab0f9c5 | [
"MIT"
] | permissive | aelhossiny/rDeepMAPS | 4c1a0a0bcdad4bf6240984b9a47da55250c1fa8e | ec6b742f9f42dc4f1a40a392ddfe8d3610ab9e63 | refs/heads/master | 2023-06-09T20:02:19.866887 | 2021-07-07T02:56:57 | 2021-07-07T02:56:57 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 261 | rd | example_dr1.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/regulon_service.R
\name{example_dr1}
\alias{example_dr1}
\title{Run DR}
\usage{
example_dr1(tf = c("CTCF", "DEAF1"), ct1 = c(0, 1), ct2 = c(2, 3))
}
\value{
}
\description{
Run DR
}
|
57e402030c38086bdb8c781e7c47e95925f2ab04 | 8c9cb896a089216b204a7de405011f43a52eae87 | /src/functions/gc_content.R | ea0c735135adac0d3e62e506f36fbd2b3649eb92 | [] | no_license | timchisamore/rosalind_problems | 5c6912ff36d294eb9b75a64ad921f0061937be5c | b0a4cccfcd15ec7cee14503008df0fc90e744397 | refs/heads/master | 2021-01-02T11:17:08.403280 | 2020-02-12T02:12:30 | 2020-02-12T02:12:30 | 239,597,664 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 384 | r | gc_content.R | #' Finding the GC Content
#'
#' This function determinees the proportion (%) of a DNA string that is
#' either G or C.
#'
#' @param s A DNA sequence
#'
#' @return A numeric bounded between 0 and 1
#' @export
#'
#' @examples
#' gc_content("ACTG")
#' gc_content("ACCTGTTGACCAA")
gc_content <- function(s) {
return(100 * (sum(count_dna_bases(s)[c(2, 3)]) / sum(count_dna_bases(s))))
}
|
a7f6376121a4961dac616d34293846107cfffed3 | 8a87ac5a13ed42ef99a7f0e566e07e32207c885a | /refm/doc/download.rd | c09860b4c0b40433f6c20f26d73c0f0d3b5fc18b | [
"CC-BY-3.0"
] | permissive | mrkn/rubydoc | cf096e253afbe11d94a7b390645f18872754e5f1 | dff08665e8c537073d3ebab5b98d4bcd175055b5 | refs/heads/master | 2021-01-18T14:54:27.743911 | 2010-08-15T06:15:58 | 2010-08-15T06:15:58 | 876,651 | 1 | 0 | null | null | null | null | EUC-JP | R | false | false | 1,650 | rd | download.rd | = マニュアルダウンロード
この[[unknown:リファレンスマニュアル|Rubyリファレンスマニュアル]]は以下からまとめてダウンロードできます。
HTML 形式 (2005-11-29版)
* EUC
[[m:tar.bz2]],
[[unknown:zip|URL:http://www.ruby-lang.org/ja/man/archive/ruby-man-ja-html-20051129.zip]]
* UTF8
[[m:tar.bz2]],
[[unknown:zip|URL:http://www.ruby-lang.org/ja/man/archive/ruby-man-ja-utf8-html-20051129.zip]]
上記以外の形式などについては以下のページを参照してください。
* [[unknown:"HTML版 http://www.ruby-lang.org/ja/man/archive/"|URL:http://www.ruby-lang.org/ja/man/archive/]]
* [[unknown:"Windows HTML Help版と分割HTML http://elbereth-hp.hp.infoseek.co.jp/ruby.html"|URL:http://elbereth-hp.hp.infoseek.co.jp/ruby.html]]
* [[unknown:"Rubyドキュメント http://www.ruby-lang.org/ja/20020107.html"|URL:http://www.ruby-lang.org/ja/20020107.html]]
また、最新の[[c:RD]]形式のソースは以下からダウンロードできます。
毎時17分頃のチェックで変更があった時に更新されます。
* [[m:URL:http:#/www.ruby-lang.org/ja/man/man-rd-ja.tar.gz]] (reject 以外)
* [[m:URL:http:#/www.ruby-lang.org/ja/man/man-rd-ja-reject.tar.gz]] (reject のページのみ)
頭に(('### reject'))がついているページは[[unknown:man-rd-ja.tar.gz|URL:http://www.ruby-lang.org/ja/man/man-rd-ja.tar.gz]]に含まれないので、
RWikiで使う場合には [[unknown:man-rd-ja-reject.tar.gz|URL:http://www.ruby-lang.org/ja/man/man-rd-ja-reject.tar.gz]] を展開してman-rd-ja-reject/*.rdをman-rd-ja/直下に追加すると便利です。
|
32fc13dc2024665850c8c9f2dd88554ebe8b4754 | b264dc5e70fbceb7322690bdb2065b330d94b644 | /R_code/all_pdbs.R | 64f8dddf895b23b8dcbcc6c4fe3e6c4d14923b1e | [] | no_license | yobi-livingstone/Analysis-of-Antibody-VH-VL-Packing | 6d2c930301d0c2759107623be904dbed30dfe0fb | b43db1a7ad224fac2a9d5e5fb141fdad41055498 | refs/heads/master | 2022-02-16T14:15:01.281607 | 2019-09-12T13:59:01 | 2019-09-12T13:59:01 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 117,798 | r | all_pdbs.R | all_pdbs<-c(-39.828697,
-43.822898,
-42.917285,
-43.350009,
-47.443229,
-45.313827,
-44.448832,
-52.831369,
-48.487318,
-42.713188,
-46.496476,
-48.549944,
-53.084738,
-51.905931,
-46.328987,
-46.960871,
-45.67456,
-43.826983,
-47.524914,
-43.326059,
-41.765888,
-49.739157,
-48.503628,
-42.801669,
-45.446059,
-37.355853,
-47.113828,
-48.934848,
-44.557802,
-45.083678,
-59.918745,
-47.245886,
-42.292975,
-44.937963,
-43.247467,
-42.505273,
-42.290631,
-44.415073,
-43.067016,
-46.860705,
-45.15624,
-46.186489,
-48.238867,
-45.118054,
-44.685997,
-39.1937,
-38.310783,
-48.838392,
-41.7228,
-38.796198,
-43.506117,
-43.187669,
-43.734953,
-44.302664,
-48.727618,
-45.047629,
-43.683897,
-46.372599,
-54.33679,
-47.789796,
-44.526028,
-55.652163,
-45.330903,
-44.286072,
-46.093907,
-41.844744,
-45.673104,
-48.274337,
-41.748568,
-49.485641,
-43.842739,
-41.948726,
-41.631092,
-41.56632,
-41.995256,
-47.602883,
-48.044041,
-41.211097,
-49.394353,
-45.836418,
-46.178344,
-47.531889,
-40.578977,
-45.968944,
-39.296914,
-46.757426,
-44.659265,
-48.751531,
-40.035517,
-52.778407,
-38.419172,
-43.387872,
-47.442519,
-39.944493,
-42.726005,
-46.887805,
-45.178472,
-41.255835,
-47.210325,
-43.374371,
-41.498271,
-47.687581,
-37.643801,
-48.304021,
-45.167047,
-44.912993,
-39.251262,
-49.181138,
-41.371729,
-41.01092,
-48.487614,
-44.716061,
-45.19264,
-49.576149,
-44.59721,
-46.663747,
-39.782682,
-47.455167,
-47.582683,
-46.970917,
-45.059848,
-44.293841,
-44.942361,
-54.858094,
-42.476021,
-47.622724,
-41.62096,
-42.73512,
-42.572916,
-45.452673,
-46.786486,
-37.209346,
-48.177528,
-49.664415,
-47.058433,
-42.466745,
-47.083492,
-50.032873,
-55.01781,
-47.600777,
-47.472028,
-42.450944,
-46.385281,
-40.837163,
-43.165337,
-47.73351,
-47.423959,
-43.369235,
-45.819407,
-52.396799,
-43.743775,
-44.706318,
-43.926597,
-42.846174,
-42.693679,
-47.01889,
-40.53309,
-42.48336,
-48.447546,
-46.488931,
-48.923284,
-43.745366,
-48.546157,
-45.490127,
-44.158833,
-42.848491,
-44.836931,
-50.553583,
-46.961861,
-49.356946,
-47.682891,
-46.008193,
-43.008261,
-40.991834,
-46.972329,
-43.468291,
-44.073583,
-46.441301,
-45.937249,
-53.450927,
-46.016645,
-47.38845,
-46.186828,
-47.987578,
-37.031072,
-35.30233,
-52.440286,
-45.181331,
-43.160445,
-38.21292,
-48.481521,
-45.136829,
-45.402211,
-43.199755,
-45.525873,
-43.15486,
-42.799171,
-51.549599,
-42.075899,
-46.258104,
-46.016879,
-39.684817,
-42.931373,
-49.284433,
-43.677404,
-48.955871,
-45.348132,
-45.419558,
-44.534289,
-42.497268,
-47.621681,
-45.174954,
-43.626238,
-50.928477,
-46.935835,
-43.80241,
-42.980483,
-46.352433,
-51.058098,
-45.233043,
-41.142423,
-44.373782,
-47.972624,
-42.36199,
-47.882542,
-45.867731,
-41.197101,
-41.627968,
-45.003066,
-50.41114,
-45.284708,
-37.021613,
-45.136672,
-52.152121,
-43.045997,
-50.340559,
-51.47442,
-46.52975,
-43.616874,
-49.685099,
-32.43931,
-39.60583,
-45.589493,
-40.753638,
-46.4519,
-48.091485,
-43.847356,
-49.189375,
-42.952551,
-48.130025,
-46.839637,
-43.844067,
-46.344126,
-45.85095,
-37.728481,
-44.796014,
-49.394873,
-49.679092,
-43.778961,
-36.886678,
-41.298243,
-41.664663,
-46.299561,
-44.234045,
-50.308879,
-42.715123,
-43.949452,
-42.471696,
-45.101486,
-48.010755,
-41.003996,
-47.18964,
-43.435961,
-48.666063,
-46.824448,
-47.1876,
-45.568542,
-50.948274,
-46.365881,
-47.780751,
-44.933273,
-43.181105,
-47.410916,
-40.239264,
-47.480851,
-48.59435,
-38.637546,
-50.977908,
-44.131609,
-48.466339,
-41.436482,
-50.91099,
-44.111994,
-48.062351,
-46.684385,
-40.904658,
-42.187088,
-45.473923,
-48.461204,
-50.378183,
-44.212825,
-40.799008,
-47.161714,
-49.046438,
-43.401478,
-44.410667,
-49.338033,
-44.322849,
-44.10368,
-44.691768,
-47.661454,
-47.929002,
-47.476155,
-44.078527,
-46.286848,
-48.884909,
-43.124385,
-42.591013,
-49.2341,
-45.463556,
-47.653834,
-42.820225,
-46.149115,
-45.425363,
-45.934623,
-41.308787,
-43.799454,
-44.731941,
-48.124573,
-46.746576,
-43.809195,
-53.718679,
-46.360096,
-48.727937,
-44.264067,
-42.608714,
-49.444071,
-43.089919,
-41.879728,
-40.943872,
-44.183804,
-40.016456,
-45.299794,
-47.636529,
-46.856039,
-46.760902,
-43.880324,
-47.377589,
-43.42368,
-41.549115,
-39.225023,
-54.191804,
-42.287815,
-44.67666,
-49.744491,
-41.423771,
-43.847462,
-44.442725,
-40.594015,
-45.01079,
-47.48114,
-43.658401,
-44.905804,
-41.666537,
-41.071752,
-46.79768,
-48.889235,
-42.615497,
-47.781798,
-40.607853,
-47.415261,
-42.763725,
-47.019177,
-45.744457,
-51.94678,
-49.406182,
-47.630609,
-42.861134,
-44.438128,
-49.770021,
-45.00007,
-40.09177,
-41.931049,
-45.909369,
-39.124763,
-46.298305,
-48.521883,
-46.137536,
-42.286383,
-47.147621,
-48.169614,
-46.671863,
-41.656049,
-47.213414,
-50.52257,
-46.011578,
-41.932153,
-47.794529,
-48.061059,
-44.781974,
-46.591928,
-42.866327,
-47.07253,
-44.785607,
-43.909732,
-45.37497,
-43.822142,
-43.733415,
-46.036971,
-47.938083,
-46.076507,
-50.975224,
-45.212327,
-45.865696,
-44.1208,
-48.361574,
-44.622257,
-43.36967,
-42.834958,
-49.232197,
-39.931721,
-45.425926,
-42.833281,
-41.622307,
-45.130232,
-43.231425,
-44.698808,
-47.952654,
-40.662254,
-42.156815,
-43.471509,
-41.607434,
-44.931364,
-44.815235,
-54.194112,
-46.276657,
-45.107407,
-49.951937,
-50.134868,
-52.338404,
-46.874917,
-38.662947,
-46.531847,
-53.998476,
-46.255557,
-46.736646,
-55.510213,
-49.672874,
-44.301843,
-49.414815,
-48.747317,
-42.192881,
-44.222162,
-45.125791,
-43.815529,
-48.539939,
-48.217831,
-52.344607,
-42.610818,
-48.137882,
-51.976443,
-43.225381,
-47.998119,
-45.001256,
-48.688908,
-53.300589,
-42.124242,
-50.574686,
-44.147327,
-42.70685,
-51.799965,
-44.744259,
-41.75818,
-44.567773,
-46.524276,
-45.171196,
-45.955109,
-42.946568,
-47.063196,
-43.633076,
-46.318686,
-45.440745,
-46.864964,
-47.347823,
-43.215635,
-44.508896,
-49.086711,
-43.903332,
-48.595892,
-42.771884,
-47.889897,
-54.448248,
-47.492993,
-45.855633,
-48.483805,
-53.719726,
-47.204289,
-42.209756,
-44.110738,
-46.112282,
-56.345118,
-50.721461,
-48.418665,
-41.380986,
-47.949441,
-43.001026,
-42.642757,
-46.830622,
-44.951175,
-47.791561,
-42.489772,
-47.233874,
-46.609391,
-53.819853,
-46.584057,
-48.443511,
-45.102267,
-44.854657,
-43.895436,
-51.783916,
-41.820295,
-45.479346,
-45.383736,
-44.31814,
-49.746572,
-45.595445,
-43.042272,
-47.414216,
-48.72929,
-41.219549,
-46.846714,
-40.820351,
-44.870534,
-45.907193,
-48.483829,
-46.801443,
-46.048856,
-42.942243,
-47.18499,
-45.008285,
-45.115281,
-48.881371,
-46.28323,
-44.592557,
-48.787941,
-48.840601,
-49.750087,
-39.410199,
-43.731925,
-46.967318,
-45.056367,
-44.130435,
-48.219096,
-42.822229,
-46.60639,
-47.345352,
-43.38747,
-52.849021,
-44.659867,
-45.365689,
-46.145267,
-48.333301,
-46.683375,
-44.142629,
-46.534233,
-41.914307,
-48.767496,
-43.210733,
-41.176234,
-50.289387,
-45.864349,
-43.844248,
-48.96108,
-43.887324,
-49.229956,
-42.073201,
-48.909567,
-40.260913,
-40.628343,
-43.168638,
-47.308262,
-45.531482,
-48.120096,
-44.01468,
-48.21459,
-47.381881,
-45.933728,
-45.607565,
-46.620375,
-41.709929,
-43.824511,
-43.758285,
-45.041458,
-46.697349,
-46.265944,
-47.824892,
-46.668898,
-46.832052,
-44.697561,
-48.936859,
-48.544346,
-46.739548,
-46.964006,
-50.726467,
-38.669553,
-47.607746,
-44.504229,
-41.590399,
-46.482105,
-48.281727,
-49.026769,
-43.248795,
-48.976062,
-48.428714,
-46.911204,
-46.544961,
-48.842083,
-46.366911,
-39.21405,
-39.414819,
-45.032676,
-37.646686,
-45.679841,
-46.02563,
-47.704509,
-47.539642,
-46.679803,
-42.740485,
-45.508963,
-46.768938,
-44.24055,
-40.375772,
-43.902285,
-45.45788,
-42.35259,
-45.5606,
-48.112794,
-44.42247,
-47.786161,
-42.056846,
-43.106902,
-42.706588,
-44.556942,
-48.633659,
-43.456211,
-44.153381,
-44.25356,
-41.791057,
-56.361995,
-50.215356,
-41.255016,
-40.003297,
-47.022983,
-44.516315,
-46.736385,
-44.365164,
-47.604093,
-47.166156,
-42.529194,
-50.581197,
-46.586141,
-44.989533,
-46.720812,
-46.714404,
-41.287383,
-44.615047,
-50.368151,
-44.077999,
-42.616924,
-43.464253,
-45.940727,
-52.748785,
-40.627543,
-50.327276,
-46.445904,
-44.579022,
-48.650251,
-34.987894,
-47.423484,
-50.011004,
-45.376789,
-39.004904,
-49.960329,
-42.316354,
-38.52012,
-53.800248,
-43.230659,
-51.162317,
-49.100606,
-42.999104,
-43.470772,
-48.998547,
-42.107663,
-42.600344,
-44.140564,
-50.751474,
-46.489973,
-46.116987,
-41.447869,
-50.594746,
-48.001622,
-45.161096,
-46.664165,
-53.167122,
-42.538688,
-44.831978,
-48.477691,
-50.343768,
-43.968006,
-42.462093,
-43.402619,
-41.559012,
-45.358784,
-45.641977,
-49.440336,
-44.016817,
-45.393408,
-42.701511,
-42.22969,
-42.862966,
-38.407605,
-43.677477,
-52.56685,
-48.48808,
-46.979046,
-57.877257,
-43.720044,
-44.457468,
-48.745773,
-43.037725,
-46.35993,
-43.952334,
-51.966417,
-43.542444,
-47.441604,
-44.031775,
-45.660026,
-46.717288,
-45.309914,
-49.162829,
-44.522103,
-52.396246,
-46.48835,
-40.821611,
-46.22182,
-46.728892,
-44.323212,
-45.748933,
-44.104198,
-42.802298,
-41.808574,
-55.106534,
-69.380093,
-47.045543,
-44.206611,
-49.5688,
-50.256122,
-43.305199,
-45.037292,
-41.908176,
-46.26574,
-48.153178,
-48.028872,
-43.258283,
-49.116649,
-40.798966,
-48.769102,
-50.085725,
-50.236336,
-40.303252,
-48.769722,
-45.788607,
-46.399094,
-42.356905,
-41.779602,
-44.640924,
-47.849295,
-43.879952,
-43.559205,
-45.30041,
-45.995254,
-44.142898,
-46.454182,
-49.101583,
-42.087706,
-45.717132,
-41.347074,
-36.247781,
-54.270663,
-42.53874,
-42.447893,
-37.396074,
-53.518382,
-44.225727,
-47.192644,
-53.383108,
-45.418577,
-36.901752,
-49.596874,
-48.565214,
-47.655514,
-47.361235,
-48.588499,
-47.259264,
-44.224906,
-44.409922,
-45.722554,
-45.258495,
-48.013459,
-49.088146,
-46.872236,
-49.885602,
-43.901082,
-48.157613,
-47.252646,
-51.834002,
-47.1155,
-44.76171,
-45.591764,
-41.249699,
-43.449957,
-44.604471,
-46.485591,
-47.530095,
-43.971718,
-46.300415,
-51.093078,
-42.384656,
-46.3157,
-51.933304,
-44.931349,
-48.294842,
-41.500409,
-42.325949,
-42.11582,
-47.975446,
-42.233522,
-44.452145,
-40.322992,
-52.013269,
-47.392459,
-41.847504,
-47.302052,
-51.698886,
-48.032458,
-41.961739,
-47.960491,
-42.28115,
-42.655005,
-44.717152,
-47.325604,
-41.061676,
-47.18687,
-46.543675,
-44.991561,
-48.289022,
-45.677669,
-45.156278,
-43.851928,
-50.227314,
-41.161335,
-52.712299,
-43.819034,
-49.879456,
-47.528629,
-43.348154,
-36.566786,
-46.945596,
-44.276304,
-43.276721,
-40.872345,
-61.657292,
-45.210255,
-48.405755,
-39.962651,
-38.60643,
-48.209378,
-49.253862,
-45.559857,
-41.123638,
-53.644047,
-47.726257,
-49.533993,
-46.721782,
-47.413242,
-45.365629,
-40.871009,
-51.403514,
-40.344936,
-45.371394,
-45.892477,
-52.340541,
-46.62192,
-45.563756,
-50.395503,
-39.795905,
-46.150515,
-43.712156,
-48.486675,
-41.381837,
-50.946659,
-45.584636,
-37.855281,
-46.700364,
-52.942467,
-44.103649,
-43.83072,
-47.288219,
-40.476492,
-42.628647,
-51.656726,
-50.066999,
-43.560923,
-44.555471,
-46.397652,
-48.843751,
-48.585412,
-43.243011,
-45.051928,
-44.247857,
-45.061879,
-47.904083,
-42.640815,
-45.676294,
-46.774768,
-46.830622,
-41.176727,
-51.235284,
-41.735041,
-41.599259,
-45.717491,
-47.54351,
-50.450571,
-43.167252,
-41.529625,
-51.290498,
-42.378713,
-46.275773,
-43.349505,
-48.28765,
-44.701982,
-43.683378,
-44.106788,
-47.710344,
-41.438563,
-42.256827,
-43.140213,
-40.271286,
-45.316484,
-51.990054,
-48.419546,
-46.888662,
-46.715502,
-44.301128,
-44.606407,
-49.3677,
-45.325386,
-45.364222,
-51.274859,
-44.083438,
-49.676482,
-40.432206,
-45.936025,
-44.014837,
-45.590031,
-42.367971,
-47.778057,
-41.214367,
-47.167824,
-43.003928,
-41.800801,
-47.250583,
-48.975839,
-39.885992,
-41.638902,
-43.180882,
-46.350592,
-48.506659,
-49.030799,
-45.755622,
-50.827697,
-48.823543,
-47.869519,
-55.363583,
-44.440114,
-45.31166,
-43.858657,
-47.634336,
-47.542118,
-45.414404,
-47.949418,
-44.203672,
-46.374768,
-46.111603,
-49.682331,
-50.547115,
-45.432032,
-44.447166,
-42.032385,
-43.985565,
-47.705156,
-42.788763,
-44.287643,
-46.617305,
-48.510011,
-47.328573,
-45.453049,
-46.497704,
-44.366359,
-51.511609,
-51.251668,
-50.667614,
-51.276554,
-44.72013,
-43.320995,
-46.743164,
-45.472482,
-46.888134,
-43.10944,
-43.372392,
-43.383318,
-46.117591,
-45.981224,
-38.043143,
-46.530744,
-43.192657,
-46.402041,
-45.496632,
-50.899163,
-40.776653,
-45.883823,
-44.019576,
-47.211435,
-43.489585,
-46.849588,
-46.289901,
-39.87347,
-49.281529,
-42.909403,
-42.508213,
-46.907417,
-41.864953,
-45.356227,
-51.042388,
-43.976172,
-51.437396,
-45.624313,
-48.441806,
-43.341723,
-47.499828,
-46.709466,
-46.232713,
-44.248734,
-43.952445,
-43.17661,
-46.854266,
-46.906302,
-46.064455,
-45.061151,
-48.335771,
-43.528296,
-40.21558,
-51.246583,
-49.679884,
-43.067877,
-42.200221,
-49.068319,
-43.679203,
-43.375769,
-45.890669,
-47.593472,
-45.825776,
-43.941053,
-41.85391,
-47.120113,
-48.673797,
-45.072992,
-46.296897,
-47.63005,
-41.870931,
-47.69691,
-46.460673,
-45.44938,
-47.848843,
-47.373765,
-51.999887,
-42.775525,
-43.09547,
-41.808456,
-43.105067,
-50.386467,
-42.358563,
-45.586014,
-42.793627,
-48.465545,
-47.958172,
-42.728091,
-42.841189,
-49.251904,
-40.935975,
-41.271989,
-45.381346,
-44.620143,
-41.271008,
-41.24741,
-49.097498,
-48.214479,
-43.026535,
-43.748319,
-44.268007,
-47.290211,
-49.905096,
-42.356498,
-49.224894,
-39.984,
-44.365926,
-43.80625,
-52.211268,
-49.772059,
-45.658182,
-50.293941,
-48.587096,
-43.465213,
-43.136449,
-52.238982,
-49.220648,
-49.422962,
-42.328178,
-48.356267,
-45.876453,
-51.279988,
-45.674556,
-48.049251,
-43.610797,
-39.60604,
-45.713037,
-41.554471,
-53.059794,
-43.330275,
-44.692371,
-48.449087,
-45.939724,
-38.709239,
-40.694062,
-42.681972,
-44.744416,
-46.606979,
-43.494911,
-47.900695,
-42.471789,
-44.97863,
-47.338401,
-47.428917,
-45.376784,
-48.35185,
-44.872391,
-46.928593,
-33.490381,
-44.822234,
-47.211957,
-45.372765,
-44.383226,
-45.929722,
-46.626704,
-42.297805,
-41.216118,
-48.109944,
-48.756191,
-45.472262,
-43.199975,
-40.80336,
-46.07003,
-44.64892,
-47.885645,
-46.543504,
-43.926146,
-50.273539,
-46.434181,
-48.786314,
-44.223906,
-46.866972,
-46.26883,
-47.562567,
-45.959992,
-47.869582,
-47.450631,
-55.711393,
-46.245022,
-43.289869,
-45.377449,
-45.74373,
-43.720888,
-48.597986,
-47.094781,
-45.050952,
-41.781858,
-45.744117,
-46.509328,
-47.085761,
-44.397053,
-47.901073,
-46.345566,
-48.159356,
-43.043852,
-43.214756,
-46.57317,
-46.276644,
-43.896453,
-44.695747,
-43.825909,
-43.382061,
-47.971202,
-36.769199,
-48.487788,
-47.876705,
-44.442848,
-47.871463,
-43.521159,
-49.396706,
-46.800818,
-47.474173,
-49.453606,
-47.289566,
-41.369488,
-53.300589,
-41.953335,
-46.376326,
-39.198397,
-47.326523,
-41.272804,
-47.996991,
-42.38992,
-53.020476,
-46.093848,
-44.494258,
-50.804774,
-48.155957,
-48.761328,
-51.414246,
-43.496996,
-44.882693,
-44.323573,
-48.343795,
-44.351347,
-48.660623,
-44.126661,
-44.616905,
-49.515686,
-42.99214,
-59.543895,
-46.306505,
-48.687064,
-43.561005,
-46.323325,
-46.786709,
-45.227353,
-54.946972,
-53.385248,
-41.236998,
-51.097659,
-43.985154,
-47.782908,
-47.715432,
-43.53641,
-47.616869,
-44.31415,
-42.912672,
-43.830243,
-41.722076,
-46.665794,
-46.556012,
-50.331347,
-47.836813,
-51.20574,
-45.630522,
-44.357539,
-49.241214,
-40.677393,
-46.252105,
-39.371179,
-55.105222,
-47.501665,
-42.52176,
-41.440587,
-41.530935,
-45.607683,
-42.551908,
-43.201133,
-45.766288,
-43.134501,
-41.747777,
-48.873432,
-48.798233,
-45.716243,
-45.333514,
-49.709552,
-48.706101,
-42.769537,
-45.248765,
-45.193615,
-48.760018,
-57.835422,
-49.58459,
-43.877553,
-45.568935,
-50.083055,
-47.054833,
-44.20132,
-40.453091,
-49.729868,
-56.247432,
-41.452966,
-49.538291,
-46.026204,
-45.804723,
-43.649748,
-45.226678,
-48.673246,
-47.225486,
-48.173279,
-47.313526,
-46.903455,
-44.583378,
-41.992576,
-48.560701,
-41.908743,
-41.870119,
-41.098823,
-47.375961,
-45.761843,
-44.852258,
-43.615059,
-44.68309,
-42.624467,
-43.049909,
-40.701095,
-45.295226,
-51.731286,
-48.019272,
-45.565218,
-49.996058,
-44.460131,
-43.521638,
-43.961698,
-47.894849,
-50.559995,
-45.393433,
-37.449264,
-41.222561,
-47.776423,
-43.844869,
-43.356583,
-48.655657,
-49.932414,
-44.670335,
-41.645877,
-56.839463,
-48.903384,
-48.241896,
-45.423124,
-45.515461,
-46.655068,
-50.247678,
-47.470893,
-45.277608,
-53.31836,
-41.592718,
-47.68964,
-46.939407,
-45.561053,
-47.718186,
-44.279783,
-43.63499,
-46.229569,
-44.206807,
-43.211846,
-48.274187,
-45.921656,
-42.2609,
-46.999378,
-47.470149,
-45.210256,
-34.330916,
-46.027085,
-43.707811,
-46.632477,
-49.907102,
-40.060314,
-43.14374,
-51.852333,
-46.728523,
-45.303756,
-45.491346,
-49.488094,
-51.543777,
-42.977262,
-46.652111,
-45.26427,
-47.616855,
-41.958504,
-48.794398,
-45.552359,
-42.55205,
-43.158132,
-46.053042,
-51.265995,
-44.925169,
-45.216939,
-55.709015,
-43.270494,
-42.396431,
-46.836916,
-44.795246,
-53.634179,
-42.287398,
-46.173719,
-45.466816,
-53.524524,
-46.427394,
-48.568149,
-45.539717,
-46.742625,
-42.97056,
-47.23842,
-43.089297,
-44.186546,
-42.757237,
-48.911734,
-47.92045,
-40.216118,
-45.621234,
-47.797698,
-46.170452,
-46.687205,
-41.537385,
-45.973407,
-50.321579,
-42.473848,
-47.217685,
-40.856559,
-43.108372,
-42.755486,
-48.604279,
-47.146952,
-46.955594,
-51.090624,
-41.561898,
-41.448596,
-45.556254,
-50.315738,
-54.195066,
-59.928802,
-40.788417,
-39.686654,
-50.851828,
-47.876774,
-44.754013,
-44.781147,
-47.516269,
-46.425311,
-46.412719,
-48.716419,
-46.346227,
-48.679393,
-45.632052,
-45.941018,
-41.199991,
-44.13159,
-47.099835,
-47.93799,
-48.247666,
-48.079646,
-47.027513,
-45.884054,
-47.46666,
-46.940346,
-46.025122,
-51.345798,
-43.729105,
-47.633655,
-51.82508,
-45.665423,
-41.829918,
-38.289517,
-44.627331,
-45.472623,
-47.737137,
-41.909974,
-48.828681,
-46.278499,
-47.664798,
-47.389278,
-41.883612,
-44.752526,
-51.310202,
-42.71252,
-47.919243,
-47.224245,
-38.401663,
-50.448727,
-43.690159,
-46.674003,
-42.299583,
-43.684925,
-39.526944,
-48.8464,
-45.976573,
-42.920964,
-46.00523,
-47.96359,
-51.586714,
-41.316705,
-48.904699,
-46.323769,
-47.561376,
-42.716417,
-47.91901,
-43.098119,
-41.440348,
-43.509195,
-43.734691,
-39.99632,
-40.103981,
-40.252531,
-40.990673,
-43.249886,
-42.276617,
-47.694946,
-45.522226,
-46.398781,
-42.05952,
-45.199476,
-42.787411,
-39.373171,
-60.754947,
-42.496288,
-45.513639,
-48.14515,
-46.416726,
-47.44119,
-49.758284,
-43.213103,
-47.510002,
-43.873662,
-41.902054,
-45.134022,
-48.309935,
-43.116984,
-43.669725,
-46.689579,
-45.502389,
-47.886684,
-47.218021,
-49.714707,
-44.086669,
-51.008721,
-49.69249,
-45.517929,
-42.911953,
-46.656345,
-40.679815,
-41.579983,
-44.846713,
-42.639621,
-44.908246,
-50.852905,
-45.402503,
-49.776059,
-40.481815,
-49.169424,
-48.228249,
-49.885356,
-45.804687,
-44.23596,
-47.424194,
-43.281576,
-46.307666,
-50.037076,
-44.105266,
-44.130828,
-47.58778,
-46.011373,
-41.779486,
-44.642162,
-46.987991,
-43.542095,
-47.391534,
-51.003432,
-47.977233,
-44.572643,
-43.887769,
-46.234287,
-46.059673,
-46.786942,
-43.283158,
-45.128605,
-44.329565,
-40.421545,
-49.928241,
-47.434829,
-46.719607,
-46.042565,
-46.290988,
-42.720047,
-54.858094,
-49.890594,
-43.646623,
-43.642118,
-45.122763,
-45.0229,
-45.002443,
-45.294564,
-46.963874,
-57.243705,
-48.646383,
-51.780263,
-48.401007,
-46.830861,
-47.345243,
-49.061053,
-45.079968,
-47.604851,
-57.060024,
-48.206188,
-46.258368,
-43.216197,
-42.189616,
-45.669397,
-41.902173,
-50.545476,
-44.795812,
-46.099664,
-45.856883,
-46.017607,
-40.905454,
-42.635655,
-44.170804,
-46.030773,
-39.622872,
-49.715874,
-48.408616,
-51.284307,
-44.309554,
-46.959267,
-50.664054,
-44.998048,
-44.195536,
-46.554972,
-46.457033,
-47.115197,
-46.22283,
-48.086883,
-51.098349,
-46.191238,
-43.691472,
-46.692718,
-43.218048,
-40.754493,
-42.400642,
-49.715468,
-53.403346,
-46.973334,
-47.348084,
-46.342691,
-43.975922,
-46.444842,
-46.124987,
-45.410615,
-50.312865,
-42.521013,
-50.880604,
-43.945977,
-47.880827,
-49.041477,
-43.471988,
-54.942641,
-42.168623,
-46.303766,
-40.68968,
-43.995358,
-41.213695,
-46.215691,
-45.420152,
-47.735503,
-46.839014,
-48.855173,
-45.631725,
-46.414348,
-43.029816,
-48.787561,
-47.325082,
-43.60144,
-43.529347,
-43.344574,
-38.609404,
-44.736934,
-47.28384,
-43.57007,
-47.516785,
-43.202594,
-50.907077,
-47.55734,
-49.234653,
-50.39466,
-47.843733,
-42.22235,
-49.743682,
-44.460903,
-51.424628,
-43.541905,
-47.618688,
-47.424828,
-45.58457,
-51.129708,
-44.330843,
-43.962035,
-41.777594,
-50.713092,
-48.775355,
-29.193569,
-42.328772,
-49.295451,
-48.413822,
-46.882807,
-41.591613,
-47.419967,
-42.930949,
-48.825383,
-45.69223,
-40.260609,
-41.827921,
-45.973792,
-49.586692,
-44.225817,
-48.62105,
-39.780726,
-45.539494,
-44.494848,
-36.408443,
-46.775283,
-50.829993,
-46.088765,
-47.156513,
-42.427398,
-46.030361,
-47.902149,
-42.582036,
-45.109615,
-43.768615,
-45.854967,
-45.437722,
-42.799031,
-44.447138,
-47.151166,
-48.920107,
-48.039101,
-47.80396,
-43.724703,
-40.947139,
-47.548557,
-46.398773,
-40.76923,
-44.552245,
-43.129042,
-41.644956,
-49.781246,
-45.710101,
-46.808503,
-50.824393,
-39.534416,
-45.59113,
-45.306491,
-42.938052,
-43.129196,
-42.31302,
-38.953864,
-47.54117,
-46.304039,
-49.853367,
-44.988988,
-43.016747,
-42.1726,
-43.02074,
-45.876927,
-41.831133,
-44.320978,
-40.753638,
-49.569316,
-47.616347,
-41.50958,
-40.783564,
-45.431898,
-44.600566,
-44.599219,
-46.574146,
-42.478604,
-46.49022,
-52.599524,
-48.53033,
-43.870562,
-41.393927,
-45.813747,
-48.313854,
-40.918274,
-50.270941,
-50.767763,
-46.78667,
-51.011138,
-47.806619,
-48.818219,
-45.409618,
-44.842485,
-42.313635,
-47.131067,
-47.601817,
-49.393806,
-47.27377,
-46.897948,
-43.643996,
-47.757646,
-46.010427,
-46.672424,
-54.192917,
-45.314503,
-46.082964,
-45.953439,
-47.388945,
-49.74357,
-47.915797,
-44.992577,
-41.759253,
-37.832892,
-47.420275,
-43.352158,
-47.533993,
-48.871412,
-47.826187,
-44.684481,
-33.961296,
-43.096037,
-44.807563,
-41.50123,
-50.004318,
-42.296135,
-51.2347,
-48.46682,
-41.291586,
-47.17759,
-54.740487,
-46.203512,
-49.115494,
-41.681581,
-44.068533,
-49.063691,
-42.628153,
-50.03061,
-43.248075,
-51.051752,
-43.86758,
-51.243062,
-47.232307,
-47.340173,
-49.987657,
-43.126365,
-48.606376,
-46.568153,
-43.054926,
-47.329656,
-43.125508,
-45.029598,
-45.469881,
-41.740987,
-43.679869,
-47.567105,
-43.858472,
-38.577102,
-46.485631,
-43.189149,
-44.695762,
-39.667293,
-46.684699,
-46.303208,
-40.265553,
-48.154014,
-46.530644,
-44.778299,
-44.570805,
-45.459838,
-46.533212,
-41.533412,
-53.643361,
-45.081043,
-42.910585,
-42.70238,
-42.619518,
-48.269864,
-40.129762,
-45.291598,
-46.029246,
-43.537515,
-42.820245,
-38.923794,
-46.456421,
-43.783194,
-43.864046,
-43.32369,
-47.52812,
-39.229653,
-48.238049,
-49.478695,
-40.67129,
-44.128908,
-44.533898,
-50.886183,
-42.481624,
-40.892482,
-46.027064,
-45.832723,
-41.558972,
-48.384245,
-40.559671,
-43.12524,
-44.133326,
-44.606585,
-47.608109,
-46.666899,
-46.539955,
-41.876758,
-46.036242,
-45.512514,
-41.14374,
-44.576998,
-43.28823,
-54.940998,
-47.034129,
-45.00686,
-43.25077,
-50.850217,
-43.465713,
-32.470757,
-49.805409,
-46.783624,
-44.227025,
-44.349676,
-41.097165,
-41.461286,
-43.494957,
-46.591223,
-44.764122,
-47.097693,
-49.783248,
-47.706093,
-42.13269,
-43.723455,
-45.109947,
-49.510173,
-44.151976,
-48.678327,
-42.456371,
-44.690405,
-48.585297,
-46.431457,
-50.470417,
-40.281063,
-44.552559,
-48.809146,
-45.886402,
-50.335688,
-44.29565,
-42.759852,
-45.655529,
-45.127643,
-42.048698,
-42.890381,
-53.432463,
-50.256537,
-44.972404,
-44.517033,
-56.891411,
-56.165137,
-46.272234,
-43.490584,
-43.995949,
-41.762177,
-48.283362,
-44.606509,
-43.873484,
-44.52247,
-45.254447,
-47.787449,
-43.532076,
-48.341105,
-48.488551,
-44.667821,
-41.994937,
-42.968715,
-44.843319,
-47.392537,
-50.671501,
-33.724721,
-30.539944,
-69.377528,
-45.995978,
-42.331978,
-49.149645,
-51.199271,
-50.351676,
-45.611908,
-41.05522,
-54.273723,
-40.892001,
-48.481879,
-47.589878,
-49.267606,
-43.502821,
-37.388797,
-37.593145,
-30.95127,
-44.466287,
-51.132384,
-49.290977,
-49.806308,
-48.267027,
-46.473508,
-44.28435,
-47.535694,
-40.429931,
-49.902566,
-45.765495,
-46.142819,
-41.38509,
-43.737927,
-44.668767,
-41.285207,
-44.226447,
-42.654584,
-50.989488,
-46.610209,
-54.060881,
-42.771432,
-48.722478,
-48.589647,
-48.62667,
-48.499141,
-41.737329,
-47.311401,
-49.269677,
-42.185267,
-48.567549,
-40.085165,
-46.712319,
-44.071951,
-45.619963,
-41.971541,
-45.01214,
-48.618213,
-47.373593,
-48.467323,
-43.145013,
-44.625168,
-45.476633,
-42.797091,
-45.381592,
-49.312087,
-44.812566,
-44.231502,
-43.6365,
-48.086087,
-49.257827,
-40.367787,
-44.235954,
-46.106847,
-46.399981,
-43.62895,
-49.10183,
-50.876091,
-50.527899,
-46.29367,
-49.556193,
-44.015731,
-41.675409,
-49.637085,
-41.636232,
-40.839794,
-58.576729,
-44.171961,
-47.898696,
-41.592233,
-49.004131,
-44.399723,
-42.956452,
-44.275232,
-48.448388,
-47.552519,
-38.12487,
-45.323592,
-45.737418,
-45.428856,
-48.606372,
-45.060023,
-42.400705,
-46.551727,
-46.115514,
-47.175226,
-42.778143,
-40.007661,
-30.884546,
-47.576484,
-50.597295,
-47.744176,
-59.805616,
-46.771338,
-45.757324,
-50.341836,
-49.089521,
-39.363454,
-43.862257,
-44.164255,
-43.814981,
-48.485492,
-44.807584,
-43.916285,
-47.649688,
-48.807819,
-51.405661,
-42.582056,
-40.397295,
-43.543207,
-46.931886,
-46.396106,
-41.0062,
-48.249761,
-48.412222,
-41.653801,
-40.647009,
-42.278345,
-48.976182,
-54.539741,
-47.410721,
-44.923257,
-43.960749,
-50.240744,
-42.366758,
-41.983734,
-49.584488,
-41.468687,
-45.011037,
-47.140955,
-38.915894,
-51.076982,
-45.434186,
-42.864726,
-49.662718,
-47.276182,
-43.548049,
-42.560823,
-43.221107,
-42.81164,
-49.200804,
-42.20883,
-43.299119,
-49.058096,
-49.083526,
-46.158072,
-43.855611,
-44.760129,
-42.923947,
-46.102289,
-43.557684,
-49.001792,
-42.44666,
-45.746044,
-46.040968,
-45.083473,
-45.11215,
-49.782582,
-52.240801,
-38.888263,
-43.260741,
-51.47051,
-55.566597,
-41.64492,
-49.354622,
-43.722215,
-48.720817,
-43.703777,
-41.852931,
-49.526513,
-45.790796,
-44.961093,
-45.539494,
-48.791603,
-45.79653,
-45.437718,
-45.518381,
-44.292657,
-46.819853,
-44.864977,
-40.66024,
-49.559026,
-49.928821,
-48.867266,
-49.139746,
-42.706192,
-43.415494,
-42.914116,
-49.393237,
-47.712566,
-50.432952,
-43.712447,
-45.414288,
-48.523584,
-49.295393,
-49.276722,
-47.215896,
-42.967856,
-48.530354,
-44.787581,
-47.961581,
-45.666847,
-49.45646,
-45.114018,
-43.803892,
-50.50022,
-45.069772,
-47.814488,
-47.23327,
-43.419794,
-40.591049,
-44.689748,
-40.368219,
-46.948963,
-47.701904,
-45.579282,
-47.477173,
-43.467443,
-37.128219,
-41.846477,
-42.611403,
-46.597192,
-44.004898,
-47.695092,
-43.869119,
-49.738911,
-43.76755,
-50.184002,
-45.161124,
-44.462235,
-45.637942,
-43.342148,
-45.302602,
-48.387393,
-42.957591,
-44.813603,
-47.691877,
-42.835063,
-41.912432,
-39.855571,
-43.967124,
-44.521392,
-43.886814,
-46.180153,
-49.926897,
-47.900894,
-44.35773,
-45.745002,
-46.69543,
-45.220456,
-49.62974,
-42.897542,
-44.757525,
-40.827888,
-51.032698,
-45.084696,
-48.085687,
-49.479373,
-38.302464,
-40.694293,
-51.07093,
-43.561434,
-51.201636,
-43.696462,
-43.262547,
-48.157664,
-45.816405,
-45.690411,
-49.503733,
-47.935107,
-44.586365,
-47.40837,
-46.051329,
-44.309465,
-44.795697,
-43.862218,
-44.932441,
-39.794388,
-43.484748,
-46.641123,
-45.876323,
-49.157725,
-46.146904,
-51.61034,
-41.850602,
-46.061837,
-51.503661,
-46.389709,
-47.26079,
-43.158697,
-41.616382,
-44.743061,
-46.507857,
-44.189898,
-52.16742,
-35.691373,
-47.0753,
-31.832248,
-47.094433,
-47.344143,
-39.262747,
-44.273678,
-35.376974,
-45.949518,
-43.531494,
-43.354391,
-41.969412,
-49.01101,
-47.521268,
-50.102078,
-46.11652,
-40.387561,
-47.250189,
-42.043294,
-44.048321,
-42.627812,
-42.18865,
-49.589811,
-43.462714,
-39.739372,
-44.060508,
-38.721474,
-43.51982,
-44.887753,
-44.934785,
-42.815162,
-42.818839,
-42.314879,
-46.346538,
-43.029958,
-45.489748,
-52.82797,
-43.767871,
-42.730806,
-43.363964,
-49.423203,
-44.980718,
-44.167935,
-51.949036,
-47.563792,
-35.667083,
-48.124628,
-47.515546,
-51.534976,
-43.089777,
-45.693934,
-49.588223,
-43.490867,
-45.435767,
-38.309815,
-49.394786,
-50.989591,
-51.233403,
-48.594526,
-45.397334,
-45.341919,
-47.931341,
-47.572601,
-47.858434,
-45.619044,
-43.389284,
-41.514048,
-46.679309,
-44.144713,
-52.44633,
-43.95397,
-45.791117,
-43.618034,
-45.584869,
-46.340627,
-38.655664,
-41.990275,
-49.476569,
-46.04404,
-41.907483,
-45.999887,
-44.886979,
-44.312978,
-43.863187,
-44.55057,
-45.795908,
-44.429503,
-47.723922,
-44.864029,
-41.444753,
-48.481101,
-45.050174,
-45.794925,
-43.38342,
-41.65789,
-46.846403,
-46.740821,
-47.801268,
-44.162878,
-46.297634,
-45.737696,
-45.997516,
-42.819607,
-44.903373,
-43.968681,
-49.027235,
-45.491083,
-45.082949,
-45.566452,
-46.660984,
-42.5507,
-54.86047,
-44.6297,
-42.457199,
-48.321304,
-44.784675,
-43.85095,
-42.172503,
-46.100518,
-46.537517,
-47.481465,
-44.934996,
-49.34867,
-37.67455,
-44.052087,
-41.425668,
-42.998013,
-46.293013,
-39.624774,
-53.072016,
-46.301217,
-44.422746,
-46.155905,
-45.621287,
-49.762657,
-43.44358,
-46.825981,
-43.524402,
-48.950197,
-44.50014,
-43.586758,
-48.859067,
-45.647194,
-31.236478,
-51.009745,
-45.274228,
-47.529549,
-44.183609,
-45.988686,
-42.230755,
-46.533433,
-51.004899,
-46.539394,
-43.434501,
-45.987884,
-43.873925,
-48.973943,
-44.252181,
-43.855062,
-45.19264,
-47.657023,
-48.252676,
-45.047184,
-47.618905,
-45.470957,
-47.795804,
-46.272607,
-46.385851,
-46.510322,
-43.50749,
-48.375006,
-46.770357,
-46.509572,
-43.187997,
-44.207662,
-45.072022,
-43.688832,
-46.541746,
-43.82813,
-46.050205,
-45.053912,
-44.067004,
-43.762381,
-48.420708,
-47.114601,
-48.541314,
-46.10003,
-44.886416,
-49.295363,
-46.586715,
-48.480545,
-48.514008,
-41.726379,
-47.935114,
-54.722359,
-48.076702,
-47.850943,
-48.183986,
-47.822932,
-40.956787,
-46.572752,
-44.254561,
-50.482156,
-46.579052,
-45.56262,
-43.583429,
-45.239856,
-48.482546,
-41.825366,
-47.251713,
-46.824059,
-45.353141,
-53.537325,
-47.842621,
-48.953195,
-47.958251,
-50.738413,
-43.652177,
-47.399545,
-41.89457,
-44.877688,
-52.142607,
-47.536399,
-45.874791,
-48.203285,
-43.739272,
-42.353198,
-51.233667,
-47.593785,
-44.968789,
-41.18348,
-44.203141,
-46.283695,
-48.713107,
-46.352654,
-41.961265,
-46.748277,
-48.925713,
-50.980202,
-42.068029,
-45.515826,
-45.631011,
-41.102674,
-43.153584,
-40.797023,
-38.474689,
-50.075214,
-50.275078,
-44.000891,
-47.685608,
-46.026238,
-49.860805,
-45.526054,
-45.973722,
-44.737139,
-44.492127,
-44.745134,
-44.066636,
-40.080254,
-49.300897,
-40.870365,
-43.215033,
-45.72263,
-43.668218,
-43.388081,
-43.31113,
-46.174292,
-35.801605,
-39.843473,
-47.010679,
-46.132892,
-56.911442,
-48.469263,
-46.289564,
-48.569109,
-41.812751,
-46.158484,
-46.281754,
-43.648224,
-49.861753,
-43.13918,
-40.917324,
-42.67108,
-42.580166,
-41.726642,
-49.067422,
-51.829194,
-45.560781,
-47.867097,
-45.225609,
-49.10034,
-41.958382,
-48.506261,
-45.408568,
-46.328059,
-43.819512,
-48.676082,
-45.539849,
-43.773339,
-37.535054,
-43.186982,
-47.121272,
-52.23141,
-45.759236,
-45.797389,
-47.837185,
-45.5582,
-47.333436,
-47.929678,
-46.606789,
-53.871741,
-46.565727,
-46.435895,
-47.028763,
-42.404084,
-46.778273,
-45.140618,
-47.683258,
-47.830008,
-51.228668,
-42.60826,
-44.740575,
-45.602713,
-41.332061,
-50.662019,
-48.952398,
-51.635387,
-44.835745,
-51.251272,
-49.020275,
-46.172537,
-45.755114,
-40.961918,
-47.425735,
-42.024373,
-45.904552,
-46.465687,
-38.634586,
-41.748949,
-42.86012,
-49.596424,
-43.376105,
-46.212599,
-48.936586,
-48.787189,
-43.351499,
-51.293517,
-45.934053,
-44.697125,
-45.906408,
-41.383713,
-47.553202,
-48.701316,
-49.558188,
-47.304837,
-46.467403,
-46.920868,
-47.439297,
-45.618268,
-35.507964,
-52.625319,
-47.083791,
-47.244262,
-40.858482,
-46.027135,
-48.230244,
-43.624839,
-48.611366,
-41.525691,
-47.866134,
-45.144354,
-44.046933,
-49.07477,
-47.344612,
-44.805711,
-45.481703,
-47.055724,
-48.478578,
-40.068452,
-46.863539,
-48.639721,
-40.861194,
-48.211897,
-49.755092,
-43.524111,
-45.714281,
-42.333467,
-47.546649,
-43.647314,
-51.475509,
-43.846051,
-48.067981,
-43.916764,
-45.835373,
-39.810517,
-41.797751,
-42.243521,
-43.357468,
-39.580718,
-47.289731,
-47.519131,
-50.940569,
-46.239938,
-44.155169,
-47.544511,
-41.541951,
-41.859243,
-49.709032,
-45.53873,
-49.72376,
-47.832939,
-48.941388,
-44.042342,
-49.321923,
-50.867125,
-45.234288,
-48.057636,
-42.376364,
-44.544437,
-43.187877,
-43.117831,
-41.265629,
-36.073497,
-46.224035,
-43.981347,
-44.093588,
-48.819773,
-45.026522,
-45.663823,
-46.303675,
-40.745318,
-48.574511,
-44.03503,
-45.511179,
-46.756315,
-37.523258,
-44.029779,
-43.098398,
-44.556636,
-44.242293,
-41.696036,
-41.561741,
-52.154485,
-40.812819,
-51.938885,
-43.981326,
-45.621478,
-41.700853,
-47.70974,
-46.356852,
-47.885837,
-47.757845,
-38.464735,
-45.06313,
-49.409514,
-43.254638,
-44.048502,
-48.853891,
-47.491395,
-47.676692,
-46.691942,
-42.267448,
-46.714531,
-43.196702,
-45.112175,
-44.972534,
-45.468089,
-42.479076,
-41.199786,
-40.481982,
-42.286185,
-44.989132,
-49.310965,
-47.900026,
-45.426929,
-39.713313,
-47.857367,
-50.510259,
-46.315093,
-44.810534,
-40.141757,
-44.459784,
-45.527761,
-39.332027,
-46.941528,
-46.159879,
-38.166779,
-45.638292,
-43.867213,
-42.130468,
-44.815327,
-44.856321,
-41.782753,
-49.434178,
-42.559671,
-43.99601,
-43.710487,
-46.419478,
-47.074988,
-48.274601,
-41.668777,
-46.980569,
-51.472205,
-41.56986,
-45.866972,
-43.016414,
-46.275656,
-46.150466,
-41.369265,
-46.564899,
-50.795361,
-44.406785,
-36.10311,
-46.045753,
-45.724888,
-44.872068,
-45.071511,
-45.67143,
-41.995789,
-42.320304,
-45.708097,
-51.254932,
-48.641007,
-48.376136,
-50.340499,
-44.72362,
-44.906896,
-47.006516,
-35.389227,
-41.561527,
-44.114013,
-46.650371,
-48.49132,
-46.709713,
-49.032384,
-52.818672,
-42.956793,
-45.696826,
-43.586554,
-45.249925,
-42.33393,
-45.858068,
-42.535734,
-33.357282,
-45.730473,
-37.583702,
-46.396535,
-45.635791,
-47.836761,
-47.426867,
-43.516926,
-42.952625,
-46.362756,
-51.276391,
-45.199174,
-49.491303,
-42.725431,
-49.150174,
-47.155523,
-45.079891,
-51.544975,
-42.995431,
-42.928411,
-52.196305,
-45.911288,
-44.547985,
-43.813124,
-46.478271,
-41.882017,
-43.234678,
-48.613708,
-40.475435,
-46.938078,
-48.314282,
-47.725388,
-41.44382,
-51.324932,
-48.395287,
-46.618414,
-51.580199,
-45.174077,
-44.624726,
-43.596969,
-46.095123,
-46.229907,
-42.864177,
-48.133712,
-45.240621,
-45.752608,
-47.069199,
-47.016365,
-48.872451,
-45.997344,
-51.258793,
-48.384275,
-43.888049,
-50.234817,
-48.443732,
-40.885618,
-45.069064,
-52.075012,
-53.73991,
-53.949244,
-41.847933,
-45.551612,
-42.165479,
-47.439053,
-47.14572,
-44.374148,
-43.134448,
-40.374601,
-46.829749,
-49.162273,
-51.181325,
-46.401995,
-44.158237,
-46.651296,
-49.046173,
-49.311066,
-48.084016,
-48.956827,
-43.6271,
-43.850203,
-39.919619,
-45.383211,
-46.486181,
-44.265498,
-49.603099,
-47.702426,
-45.468689,
-47.090417,
-49.751179,
-49.950411,
-45.167469,
-42.001789,
-46.227413,
-42.029196,
-48.586449,
-51.05179,
-50.698691,
-38.329413,
-50.159242,
-51.350431,
-52.198217,
-43.276348,
-50.993363,
-47.630059,
-44.04393,
-42.516252,
-48.817647,
-46.481738,
-44.259809,
-44.768763,
-45.175537,
-42.697423,
-43.227631,
-47.617803,
-44.923026,
-48.396303,
-46.171382,
-45.823573,
-46.108863,
-54.192524,
-39.009567,
-43.211631,
-42.944808,
-44.976506,
-44.150524,
-46.731508,
-43.705411,
-46.44522,
-43.16254,
-52.791286,
-47.516681,
-48.10975,
-38.975238,
-48.502561,
-44.938001,
-43.867176,
-48.985505,
-43.512967,
-42.158388,
-45.017948,
-42.73379,
-42.49215,
-40.597221,
-44.988133,
-51.225054,
-45.466568,
-49.181746,
-45.240286,
-44.365403,
-47.578812,
-45.005027,
-49.598415,
-45.714038,
-50.666638,
-44.319866,
-53.770801,
-49.64353,
-48.341893,
-46.263173,
-47.694607,
-47.055109,
-43.520456,
-48.229669,
-46.624577,
-42.088199,
-46.585176,
-44.072354,
-44.285877,
-48.902717,
-45.69789,
-50.494215,
-45.226191,
-47.991173,
-45.745649,
-47.605573,
-41.160081,
-44.406846,
-49.796249,
-47.446448,
-37.483174,
-49.009107,
-48.313433,
-54.040046,
-46.507286,
-51.199905,
-43.780735,
-38.622662,
-46.857389,
-56.103809,
-42.25555,
-59.501075,
-46.853495,
-45.044538,
-43.969341,
-45.694387,
-41.951621,
-40.658754,
-41.459472,
-48.798868,
-46.448348,
-45.65262,
-49.986783,
-44.349902,
-42.385896,
-50.039011,
-45.906691,
-42.317457,
-49.538164,
-48.854788,
-43.858461,
-47.206072,
-48.961024,
-46.187115,
-43.736551,
-44.548555,
-48.099456,
-42.659671,
-52.104017,
-44.705555,
-43.417924,
-40.588527,
-49.455531,
-45.60092,
-40.780613,
-43.579357,
-48.218001,
-47.074365,
-45.362317,
-44.055019,
-49.474089,
-41.153549,
-41.518233,
-47.277079,
-44.044725,
-47.144643,
-43.639027,
-50.068341,
-43.05743,
-43.433473,
-43.001885,
-43.805812,
-45.289779,
-46.984714,
-45.883157,
-46.269752,
-45.53454,
-44.986244,
-45.604355,
-43.789697,
-44.466122,
-49.174901,
-43.656593,
-48.542515,
-46.778594,
-46.003525,
-49.700018,
-42.933772,
-47.131043,
-51.060276,
-56.625618,
-42.883839,
-48.017451,
-43.097761,
-44.997222,
-45.302336,
-40.542635,
-43.764639,
-47.575686,
-46.074351,
-44.15152,
-46.286691,
-43.557066,
-38.996246,
-47.443844,
-48.787743,
-49.853817,
-42.571563,
-41.712741,
-46.406483,
-41.543164,
-46.994201,
-44.700501,
-44.289309,
-38.378719,
-40.455522,
-45.711049,
-48.19888,
-42.832118,
-43.298514,
-41.764852,
-43.767694,
-53.075583,
-43.532982,
-43.601407,
-47.002396,
-48.107396,
-44.598659,
-45.028284,
-49.669085,
-44.973993,
-46.751963,
-47.770224,
-38.047851,
-41.978805,
-46.832611,
-48.692532,
-45.22422,
-45.377041,
-42.862407,
-50.593507,
-47.502388,
-42.02842,
-40.922005,
-46.627149,
-46.277568,
-46.488265,
-48.99447,
-44.954941,
-40.03245,
-45.106829,
-40.437102,
-47.135335,
-44.833161,
-46.979015,
-44.001916,
-38.564237,
-54.147265,
-45.34486,
-41.506218,
-44.159458,
-41.833439,
-52.496716,
-46.218535,
-48.323656,
-47.078006,
-43.914804,
-45.323398,
-48.075441,
-43.891439,
-45.762474,
-46.321397,
-46.297216,
-46.076831,
-48.481668,
-45.027311,
-47.283887,
-51.565295,
-56.257992,
-43.084559,
-43.254745,
-47.670251,
-45.936594,
-48.893759,
-45.502345,
-40.669138,
-48.401282,
-44.634005,
-47.933389,
-47.855306,
-44.542274,
-46.286191,
-56.880034,
-45.556554,
-42.506607,
-43.80083,
-42.393684,
-38.539547,
-51.001762,
-41.86118,
-43.748023,
-43.987362,
-47.73279,
-52.679543,
-44.807658,
-41.525986,
-48.14324,
-43.874558,
-51.114336,
-46.630157,
-45.301239,
-49.747959,
-40.923686,
-42.682711,
-45.697581,
-47.276071,
-48.422791,
-48.596385,
-42.04633,
-40.850954,
-44.507783,
-46.666534,
-51.970955,
-48.338687,
-45.674316,
-51.26337,
-46.487844,
-39.091902,
-46.531021,
-46.494201,
-49.303436,
-46.417261,
-46.913975,
-44.057464,
-42.393554,
-49.658085,
-44.9014,
-41.808851,
-44.982347,
-44.869589,
-45.016753,
-45.227868,
-50.204367,
-45.863899,
-47.304375,
-45.996721,
-45.236995,
-46.524753,
-42.894678,
-53.075554,
-52.874222,
-49.392999,
-47.951784,
-48.484902,
-48.04811,
-42.984821,
-43.771905,
-48.444869,
-41.002231,
-46.387889,
-46.711894,
-41.303504,
-42.666238,
-51.166097,
-42.498247,
-46.800159,
-46.979881,
-41.201694,
-43.036513,
-39.359167,
-43.404519,
-45.648241,
-41.840363,
-42.87558,
-43.300707,
-46.826521,
-47.278045,
-41.573568,
-45.561977,
-46.327485,
-50.334533,
-44.857201,
-47.443445,
-48.974377,
-48.255324,
-46.450103,
-52.145564,
-43.194153,
-46.611096,
-41.292931,
-48.379369,
-43.329207,
-46.768055,
-47.621202,
-49.460468,
-46.239273,
-39.928764,
-43.151882,
-48.884864,
-46.245926,
-43.159821,
-46.741993,
-45.366064,
-48.543667,
-44.16222,
-46.599689,
-42.704844,
-45.282714,
-46.360135,
-47.046019,
-45.786998,
-48.509585,
-44.561893,
-50.508614,
-49.085587,
-44.315001,
-32.795362,
-46.235044,
-47.987667,
-39.248723,
-56.023104,
-51.047355,
-45.526757,
-44.386731,
-12.22633,
-46.712226,
-47.295031,
-49.980327,
-46.488337,
-43.69483,
-50.182304,
-43.922025,
-44.927148,
-40.673837,
-43.029573,
-43.804729,
-44.866763,
-51.879685,
-50.760563,
-47.268136,
-44.753349,
-44.835512,
-45.917475,
-43.628226,
-49.955063,
-47.039367,
-47.325635,
-48.904145,
-48.046271,
-45.190068,
-46.653322,
-41.328954,
-47.594078,
-59.583229,
-45.773511,
-42.362189,
-47.094716,
-47.389816,
-49.102958,
-44.200163,
-46.658636,
-42.190608,
-45.491837,
-43.999491,
-49.646025,
-46.023101,
-47.093243,
-45.905059,
-39.442037,
-45.987625,
-43.148545,
-52.597685,
-45.341786,
-45.972907,
-50.157247,
-46.410057,
-52.960312,
-48.336693,
-48.942928,
-48.569212,
-53.485136,
-51.47522,
-39.29367,
-49.309989,
-44.131985,
-44.001342,
-46.816559,
-48.657909,
-45.274488,
-43.813744,
-43.292598,
-46.474235,
-41.479867,
-53.66576,
-51.086117,
-41.790321,
-45.530618,
-41.208278,
-48.226493,
-50.521158,
-42.330489,
-41.869602,
-47.66772,
-45.160946,
-42.663248,
-44.341563,
-44.857193,
-46.223992,
-42.431954,
-49.114713,
-42.76788,
-50.599602,
-46.884989,
-47.739273,
-45.814601,
-47.838349,
-47.585607,
-50.28294,
-43.618815,
-47.134334,
-50.673588,
-41.539062,
-47.899181,
-44.152313,
-43.479135,
-40.087437,
-45.494325,
-47.867573,
-50.108597,
-41.110093,
-43.711255,
-52.139093,
-41.728895,
-42.740805,
-44.050167,
-49.804431,
-47.193057,
-43.944283,
-49.611946,
-48.33925,
-48.313826,
-45.204094,
-46.098657,
-48.721909,
-44.013381,
-45.786658,
-44.248734,
-51.181752,
-49.830349,
-48.177817,
-41.127931,
-43.88511,
-46.206896,
-44.977745,
-51.482783,
-48.023376,
-45.097949,
-47.114646,
-48.805115,
-41.041031,
-45.774212,
-46.028694,
-48.929521,
-43.791589,
-44.9356,
-48.478359,
-48.883014,
-45.124472,
-43.040388,
-44.917164,
-43.159936,
-35.63817,
-48.375441,
-43.314837,
-50.06321,
-41.420235,
-46.636476,
-42.393156,
-42.567459,
-43.223622,
-45.731674,
-43.812286,
-45.231548,
-40.880605,
-44.474884,
-45.911402,
-47.892905,
-42.291106,
-41.34454,
-48.458904,
-46.831597,
-46.240811,
-45.029024,
-43.888708,
-42.501211,
-48.825415,
-43.024664,
-44.790183,
-44.953719,
-47.199721,
-44.662726,
-41.215612,
-48.354038,
-47.257646,
-43.440374,
-48.481547,
-48.475058,
-41.602276,
-46.39312,
-46.520345,
-42.914164,
-51.40038,
-43.471116,
-43.320269,
-47.051988,
-49.223185,
-44.364682,
-44.208676,
-44.754236,
-41.753441,
-47.802257,
-51.798037,
-46.43277,
-47.77779,
-47.855446,
-38.890585,
-42.737555,
-38.444355,
-43.449328,
-37.642038,
-47.320398,
-43.531244,
-46.458359,
-46.307424,
-45.677224,
-43.339132,
-43.416979,
-41.321947,
-36.553073,
-46.306287,
-44.190602,
-44.283598,
-45.51837,
-44.547241,
-42.911198,
-44.26387,
-46.105609,
-45.157687,
-47.300027,
-52.241541,
-42.200949,
-49.11466,
-45.587887,
-45.978188,
-49.621716,
-44.41183,
-45.500483,
-41.675386,
-39.832837,
-42.656009,
-42.960909,
-45.859263,
-43.416496,
-45.48719,
-44.296088,
-42.9553,
-48.617683,
-41.719362,
-44.945842,
-47.90738,
-45.263788,
-51.167128,
-42.203999,
-44.947142,
-48.485778,
-50.628377,
-43.127627,
-47.085002,
-46.796618,
-47.463238,
-48.484314,
-42.182964,
-50.259588,
-48.012524,
-44.987792,
-41.686106,
-41.363715,
-41.48213,
-45.35844,
-45.581757,
-47.308278,
-44.780125,
-43.763539,
-41.618024,
-49.006714,
-45.539849,
-42.664453,
-48.426839,
-44.840373,
-50.918518,
-46.031765,
-44.296927,
-42.87828,
-44.128257,
-44.280901,
-46.274823,
-54.18606,
-42.55624,
-39.71709,
-42.600549,
-47.672423,
-49.407627,
-43.93728,
-41.536459,
-42.987604,
-43.840022,
-45.170463,
-50.33595,
-35.807637,
-45.672123,
-54.825832,
-38.523749,
-41.532244,
-47.118947,
-44.287532,
-49.567842,
-48.166764,
-48.908964,
-47.596738,
-43.045747,
-49.083947,
-41.483053,
-48.831481,
-45.46748,
-53.654973,
-47.554193,
-49.237867,
-51.407079,
-47.988993,
-44.147035,
-43.15064,
-41.590962,
-48.103031,
-47.575278,
-50.693901,
-42.710669,
-47.555496,
-48.583232,
-42.940713,
-40.153187,
-45.072296,
-50.249685,
-48.815639,
-47.367221,
-46.374735,
-43.852073,
-44.086781,
-48.487429,
-43.187357,
-49.46744,
-49.032304,
-50.309226,
-44.54373,
-46.1246,
-43.549691,
-41.30075,
-48.647598,
-48.264983,
-45.749302,
-43.091169,
-52.470916,
-45.230913,
-44.464529,
-48.970042,
-50.092811,
-43.992642,
-45.832488,
-48.777037,
-45.746521,
-42.590731,
-43.961191,
-52.000382,
-52.348112,
-43.392509,
-47.001813,
-44.29372,
-50.536963,
-51.85131,
-51.456777,
-41.908104,
-47.539178,
-42.872183,
-43.790604,
-40.466317,
-45.236072,
-48.881972,
-40.675355,
-39.952039,
-48.822663,
-39.588141,
-45.17877,
-42.736741,
-47.514323,
-49.870332,
-45.978935,
-51.744463,
-47.322631,
-47.507915,
-49.366464,
-49.377374,
-45.428267,
-46.533823,
-42.887677,
-46.934797,
-43.478881,
-46.636164,
-43.62488,
-48.489535,
-50.978962,
-61.528078,
-46.514003,
-45.757617,
-45.190816,
-45.765618,
-42.284078,
-47.346176,
-48.343256,
-42.569335,
-46.991453,
-44.773201,
-46.127266,
-46.650973,
-43.944268,
-49.46529,
-43.594731,
-47.392214,
-43.099093,
-48.362926,
-46.028923,
-47.805543,
-43.934335,
-54.793728,
-45.139625,
-43.644402,
-51.138858,
-45.651711,
-44.686493,
-52.168362,
-49.158957,
-45.048246,
-35.193287,
-43.099491,
-43.835539,
-43.151703,
-40.66139,
-43.895856,
-48.487711,
-44.245907,
-46.981209,
-47.93416,
-47.936884,
-50.497038,
-42.421475,
-45.657908,
-46.559066,
-45.084013,
-47.203362,
-43.726888,
-50.985021,
-50.240244,
-47.030803,
-45.015972,
-42.093845,
-42.127728,
-39.920961,
-48.185514,
-44.788894,
-44.813676,
-44.251686,
-43.267414,
-41.751646,
-48.88501,
-40.635374,
-42.810882,
-51.20441,
-43.26197,
-39.008975,
-49.661417,
-50.370783,
-43.224133,
-41.871665,
-40.130563,
-47.796982,
-46.190802,
-40.507172,
-43.258262,
-40.399242,
-45.270509,
-48.322827,
-57.086544,
-44.668851,
-43.511147,
-44.816221,
-46.315564,
-43.933595,
-43.998193,
-46.234787,
-45.516303,
-45.463633,
-44.577772,
-44.840181,
-44.491435,
-43.383014,
-45.893904,
-47.936372,
-48.782444,
-49.457326,
-46.199292,
-45.987413,
-48.838169,
-44.497281,
-44.782349,
-49.783417,
-46.424397,
-49.493023,
-46.839506,
-44.327358,
-42.909321,
-50.673095,
-54.212533,
-41.413268,
-41.914309,
-42.970554,
-47.356635,
-42.868596,
-47.830116,
-40.382534,
-43.938823,
-39.646663,
-51.97352,
-47.044905,
-46.728626,
-42.612304,
-47.502388,
-52.41281,
-44.807683,
-47.702708,
-45.59369,
-50.178094,
-42.231204,
-41.617222,
-45.726736,
-44.619369,
-44.826562,
-41.396929,
-46.982695,
-40.317091,
-48.39872,
-54.177371,
-45.114398,
-46.49215,
-48.038922,
-41.67899,
-45.90987,
-32.160074,
-51.582857,
-43.840846,
-44.799891,
-44.743992,
-56.067307,
-37.733725,
-49.12675,
-46.054578,
-48.399409,
-44.259617,
-47.443001,
-42.027154,
-42.965051,
-42.005013,
-47.746357,
-49.021149,
-48.015022,
-42.709124,
-47.038237,
-47.886555,
-44.249629,
-43.991269,
-38.30953,
-44.113183,
-43.514818,
-46.359821,
-42.901055,
-45.652709,
-41.05011,
-41.039512,
-44.280181,
-51.162537,
-46.953854,
-45.847172,
-43.234828,
-47.865782,
-51.654578,
-41.091049,
-42.972372,
-48.293313,
-50.076283,
-57.123718,
-46.696876,
-49.318072,
-43.741746,
-45.423768,
-39.473622,
-44.703035,
-43.123363,
-47.583171,
-45.277395,
-43.002631,
-46.342452,
-44.967822,
-41.633776,
-47.794254,
-48.486588,
-43.885874,
-49.349963,
-45.364885,
-46.282795,
-41.393989,
-44.466964,
-49.394612,
-54.943057,
-43.473103,
-50.366292,
-53.308066,
-43.744596,
-44.459531,
-42.763947,
-46.718508,
-45.026079,
-52.290535,
-48.558473,
-43.344345,
-44.367754,
-45.59241,
-40.245205,
-47.701066,
-45.453835,
-36.446425,
-48.054517,
-45.758567,
-51.364993,
-42.783499,
-47.374803,
-52.144345,
-49.285846,
-48.661951,
-45.272504,
-41.528849,
-45.288604,
-43.582467,
-47.23828,
-45.242866,
-44.631658,
-42.334723,
-41.690752,
-46.753912,
-53.046575,
-49.316087,
-50.159695,
-41.759187,
-49.679764,
-48.009325,
-48.880828,
-41.672284,
-43.603454,
-45.801524,
-47.14801,
-42.724435,
-46.759074,
-44.580493,
-39.433534,
-48.003334,
-40.867108,
-42.087044,
-44.534617,
-50.754414,
-44.471856,
-43.469043,
-50.616309,
-43.768242,
-51.014101,
-43.550237,
-44.984495,
-43.790608,
-46.516194,
-49.956732,
-41.957988,
-47.054071,
-43.632744,
-46.807649,
-47.5073,
-46.878223,
-45.856705,
-45.540524,
-47.930836,
-48.113622,
-43.377352,
-50.937236,
-42.448781,
-43.977675,
-47.700517,
-51.46085,
-46.038898,
-45.070797,
-46.120167,
-44.894093,
-42.518788,
-49.555284,
-47.218931,
-43.009647,
-48.923835,
-44.078473,
-46.603826,
-51.981028,
-47.236079,
-49.271167,
-44.649037,
-43.918337,
-44.843978,
-44.34469,
-51.520338,
-43.945708,
-45.397923,
-43.661364,
-44.193102,
-49.979974,
-42.642711,
-41.783497,
-45.177589,
-43.99931,
-43.753529,
-45.561077,
-44.657566,
-43.173398,
-40.450113,
-46.848438,
-42.163217,
-49.522219,
-47.553973,
-44.802296,
-41.707393,
-49.510187,
-45.768567,
-39.953747,
-47.695205,
-48.961822,
-47.080231,
-43.403102,
-48.804291,
-46.119414,
-41.953479,
-47.347257,
-43.98996,
-50.652813,
-50.297774,
-44.594413,
-44.176413,
-42.284306,
-42.473505,
-42.563749,
-59.835545,
-38.479889,
-45.777813,
-46.972534,
-43.006627,
-46.786479,
-45.831095,
-46.03212,
-45.476165,
-50.39654,
-44.01071,
-47.427834,
-46.502881,
-56.892349,
-49.110627,
-47.212916,
-48.839704,
-42.088262,
-48.030476,
-45.294937,
-49.722278,
-44.24587,
-41.974186,
-46.153278,
-48.678536,
-44.096039,
-46.013008,
-45.713143,
-42.079318,
-40.933347,
-43.425002,
-43.513467,
-47.12679,
-41.607541,
-47.617903,
-47.10215,
-43.730294,
-43.993882,
-45.007841,
-48.711496,
-42.380888,
-45.092059,
-43.584017,
-49.36991,
-48.485488,
-44.797826,
-36.038892,
-49.127809,
-46.403775,
-47.362043,
-43.077975,
-46.357817,
-44.721156,
-41.206147,
-40.89614,
-45.464667,
-47.881853,
-46.406708,
-38.03434,
-37.988149,
-55.919684,
-42.683926,
-41.975733,
-43.705984,
-43.217718,
-42.464903,
-43.126819,
-49.088889,
-43.408937,
-43.484721,
-40.773509,
-48.456861,
-44.327358,
-40.711736,
-46.188568,
-41.63073,
-48.406783,
-48.236834,
-43.82135,
-45.609681,
-46.496607,
-51.707699,
-45.0578,
-46.131653,
-46.233337,
-41.314605,
-47.502209,
-47.217771,
-45.866068,
-46.463796,
-45.90186,
-45.597928,
-43.553716,
-46.872558,
-44.476155,
-45.868465,
-42.426815,
-51.709327,
-46.898136,
-51.247954,
-45.897086,
-42.662074,
-45.556108,
-47.809476,
-44.415325,
-51.01436,
-49.397475,
-43.786446,
-43.600843,
-54.189356,
-42.622674,
-41.917938,
-45.732657,
-45.462784,
-48.585639,
-42.03343,
-44.295618,
-44.85556,
-46.381328,
-40.65068,
-44.107405,
-42.628057,
-52.714892,
-44.831329,
-47.102883,
-45.38925,
-45.508764,
-48.448319,
-42.784863,
-48.822202,
-48.667563,
-47.656382,
-50.171541,
-45.617808,
-45.015787,
-55.533762,
-44.07194,
-52.275099,
-45.707915,
-48.353211,
-46.141524,
-47.644758,
-46.886547,
-43.095827,
-47.022513,
-45.364139,
-44.342941,
-43.486008,
-52.169576,
-53.206615,
-48.242594,
-45.74234,
-44.511081,
-46.71238,
-48.348208,
-42.645245,
-41.772624,
-47.056354,
-42.545769,
-47.782475,
-44.821502,
-47.394382,
-49.498886,
-45.740056,
-47.4816,
-46.326399,
-39.012231,
-45.168726,
-43.407076,
-55.107653,
-43.079687,
-38.304192,
-45.389481,
-44.048283,
-49.203197,
-47.35305,
-44.847911,
-41.678239,
-39.427805,
-49.344804,
-47.874318,
-45.612422,
-47.103986,
-44.085853,
-43.209425,
-41.099015,
-47.550589,
-42.109095,
-44.997947,
-44.260062,
-48.314031,
-46.107056,
-46.766021,
-43.479096,
-48.554401,
-48.707963,
-40.833479,
-44.195622,
-43.876725,
-52.656002,
-47.214469,
-37.288729,
-42.929259,
-46.872592,
-45.240728,
-43.483404,
-54.492675,
-46.091795,
-46.522319,
-40.60115,
-43.756964,
-45.885308,
-47.611174,
-50.408059,
-42.014467,
-47.529166,
-44.61774,
-55.320957,
-53.699445,
-46.002842,
-46.614699,
-45.677357,
-48.684437,
-43.352198,
-40.031105,
-41.501184,
-48.834273,
-43.320917,
-47.641685,
-44.682123,
-43.080208,
-41.427765,
-42.279453,
-43.307642,
-40.757803,
-44.796961,
-38.174072,
-41.993038,
-47.710513,
-44.870246,
-46.016994,
-43.042559,
-40.474798,
-44.231229,
-45.916776,
-51.599224,
-45.898128,
-42.231307,
-52.215278,
-47.90741,
-39.940435,
-45.324031,
-45.448048,
-44.813552,
-48.132673,
-57.621962,
-49.040247,
-47.463056,
-41.873045,
-46.990096,
-42.372612,
-41.84363,
-42.038306,
-46.593827,
-44.359337,
-41.658802,
-48.488772,
-50.613892,
-46.276152,
-46.513935,
-45.522339,
-46.254396,
-49.011446,
-45.066343,
-49.509891,
-45.552491,
-43.963243,
-39.485915,
-49.339986,
-46.864378,
-48.676798,
-48.38278,
-48.073294,
-50.528284,
-44.250272,
-44.425983,
-47.0621,
-44.833367,
-38.628621,
-40.193503,
-44.252099,
-48.954814,
-53.508901,
-49.512924,
-47.294821,
-47.432829,
-42.817703,
-54.193903,
-46.509903,
-42.471618,
-44.846286,
-45.526103,
-47.682785,
-47.57123,
-43.278083,
-42.517894,
-47.6295,
-48.898632,
-47.732713,
-44.301023,
-52.434203,
-47.360951,
-43.128385,
-48.159402,
-46.689831,
-42.90043,
-42.441973,
-48.950416,
-42.452184,
-38.119183,
-51.192919,
-46.093693,
-45.855238,
-46.286154,
-43.12403,
-42.47722,
-42.460553,
-48.91942,
-46.452216,
-42.149038,
-49.723798,
-47.878536,
-52.099879,
-45.319322,
-49.419689,
-47.43755,
-48.197027,
-45.415218,
-44.694881,
-44.022082,
-49.163402,
-54.186881,
-47.975041,
-45.879088,
-48.071747,
-51.22525,
-45.959803,
-46.193991,
-42.606425,
-49.030883,
-48.151278,
-52.314447,
-44.533233,
-43.13691,
-47.887823,
-40.047147,
-45.85585,
-43.136872,
-47.538582,
-55.781439,
-46.358167,
-46.458026,
-57.582265,
-57.301976,
-50.765908,
-47.883303,
-49.039942,
-45.922493,
-42.873362,
-50.200973,
-47.441746,
-44.503605,
-48.128583,
-42.779938,
-50.190268,
-48.285097,
-47.101979,
-46.13874,
-45.289552,
-48.477111,
-47.211937,
-41.202271,
-45.270371,
-49.073607,
-44.351522,
-41.402127,
-45.509409,
-46.475597,
-45.127291,
-42.942058,
-41.35302,
-39.811471,
-44.951309,
-42.063836,
-44.302595,
-43.632299,
-47.582683,
-49.985512,
-46.047574,
-46.588545,
-44.273757,
-49.189682,
-42.181063,
-52.606748,
-42.85789,
-41.868664,
-46.424249,
-47.778138,
-49.26647,
-44.211686,
-45.871777,
-44.209367,
-48.268095,
-46.994162,
-47.159355,
-41.902604,
-44.067248,
-42.95413,
-43.262139,
-45.211678,
-43.210279,
-42.211968,
-46.383095,
-43.993648,
-48.429379,
-45.539717,
-49.404638,
-40.446876,
-44.346645,
-39.128127,
-48.485526,
-46.457963,
-49.040601,
-43.594176,
-42.022007,
-45.910264,
-40.667586,
-51.732398,
-47.129971,
-45.508618,
-45.777702,
-47.494577,
-47.337402,
-48.85387,
-49.34987,
-46.904319,
-43.881096,
-46.537705,
-48.175912,
-45.205189,
-44.701804,
-40.931495,
-48.224738,
-44.694266,
-43.994229,
-51.736577,
-40.053336,
-42.064862,
-48.180697,
-44.959983,
-46.578305,
-48.645921,
-47.185555,
-45.531289,
-45.826068,
-40.791162,
-47.073734,
-44.507005,
-41.850127,
-41.723975,
-46.971327,
-49.159382,
-46.08748,
-41.379298,
-48.461863,
-41.738893,
-48.674384,
-53.122197,
-41.193536,
-43.852799,
-42.573779,
-41.316899,
-39.822763,
-43.425867,
-53.280582,
-46.531325,
-46.588316,
-43.218675,
-45.564598,
-32.731761,
-46.629771,
-48.514685,
-46.728651,
-44.152034,
-50.980022,
-46.442553,
-43.50759,
-48.031124,
-45.17465,
-47.270089,
-52.202795,
-42.674264,
-45.084849,
-42.589284,
-46.392808,
-50.081559,
-48.849556,
-45.990095,
-46.095383,
-43.748923,
-49.308192,
-47.499546,
-47.934658,
-44.992342,
-54.185112,
-54.237209,
-52.26206,
-44.055105,
-51.765533,
-45.786273,
-45.647319,
-43.506247,
-49.946669,
-50.022691,
-51.804217,
-37.551382,
-43.682879,
-47.993805,
-50.594634,
-44.68579,
-48.201606,
-47.541184,
-44.580888,
-47.76758,
-47.182483,
-44.079759,
-46.902442,
-47.51441,
-43.689102,
-43.875579,
-46.685745,
-42.655196,
-43.63569,
-42.528114,
-47.531746,
-47.541582,
-46.196315,
-44.529933,
-40.515139,
-40.343045,
-43.722914,
-45.631267,
-45.694132,
-43.243368,
-41.47579,
-48.492228,
-38.249037,
-47.925532,
-45.305942,
-46.09731,
-48.290986,
-51.474918,
-42.806468,
-44.178004,
-47.174388,
-47.436946,
-42.775757,
-45.293728,
-49.187149,
-55.09863,
-38.556526,
-42.888312,
-46.578849,
-41.450757,
-46.673304,
-47.974887,
-46.823712,
-45.258551,
-45.210188,
-45.857735,
-44.291277,
-43.609827,
-45.625384,
-49.297459,
-43.521039,
-47.280005,
-47.908856,
-41.536102,
-45.614889,
-47.518538,
-44.177523,
-52.660392,
-41.474161,
-40.267717,
-41.923304,
-48.632152,
-42.796875,
-42.163006,
-42.158222,
-48.648649,
-46.572855,
-45.328306,
-47.880708,
-47.27554,
-42.065983,
-45.487352,
-40.926045,
-43.147067,
-44.241673,
-41.661756,
-45.428674,
-44.085366,
-41.929493,
-41.314127,
-53.477268,
-49.546925,
-49.935571,
-42.90074,
-42.06687,
-42.896072,
-42.721114,
-49.200268,
-47.53557,
-44.140857,
-47.046135,
-43.828552,
-47.935186,
-48.796955,
-47.399713,
-46.954523,
-42.87427,
-48.299154,
-42.85175,
-43.194271,
-48.578098,
-41.643473,
-48.006565,
-45.697538,
-49.540173,
-43.763028,
-47.827346,
-44.070033,
-45.743922,
-45.241882,
-35.812005,
-51.656135,
-48.981695,
-46.911346,
-44.761285,
-44.097451,
-42.214492,
-47.620192,
-50.109401,
-50.60223,
-38.087407,
-49.497661,
-46.90999,
-48.209915,
-43.893424,
-46.907174,
-47.149764,
-45.511778,
-45.175275,
-48.896556,
-48.102178,
-46.966575,
-45.263597,
-43.361908,
-45.454,
-47.768669,
-43.597182,
-45.510746,
-47.790366,
-49.451404,
-45.409609,
-53.895386,
-43.616559,
-46.976246,
-57.296274,
-47.416753,
-45.840719,
-43.92177,
-47.034752,
-46.457172,
-49.057838,
-45.165243,
-41.757829,
-44.038248,
-43.579051,
-46.911475,
-49.25183,
-40.896241,
-46.828199,
-43.841761,
-43.311331,
-44.659102,
-40.934225,
-48.708803,
-52.547748,
-45.197002,
-46.314353,
-47.771291,
-39.529549,
-50.672076,
-46.293541,
-45.893644,
-45.729947,
-44.357534,
-50.016043,
-45.771341,
-47.268499,
-44.517883,
-46.732036,
-42.676107,
-43.870456,
-49.4964,
-44.46806,
-45.878785,
-44.670955,
-43.615097,
-50.228267,
-55.716837,
-48.664953,
-49.602231,
-45.212532,
-48.858683,
-37.223263,
-46.899529,
-48.556215,
-48.089387,
-39.128174,
-47.227735,
-53.308066,
-54.948297,
-48.378992,
-44.85181,
-45.624377,
-47.013865,
-44.759831,
-48.699049,
-45.852495,
-43.264011,
-45.46474,
-43.029958,
-43.733149,
-45.423442,
-45.344466,
-52.031626,
-47.051977,
-48.779142,
-46.352529,
-49.373975,
-46.533343,
-41.405421,
-39.652837,
-46.799893,
-50.094058,
-41.668768,
-47.275105,
-43.291547,
-41.287201,
-47.591012,
-47.881363,
-43.88887,
-44.107847,
-48.609653,
-47.555275,
-47.225132,
-47.746648,
-44.572896,
-46.265319,
-45.216761,
-50.901184,
-47.325763,
-47.423959,
-46.969505,
-41.304118,
-47.95751,
-43.967651,
-47.214086,
-45.748293,
-44.950659,
-41.542861,
-49.893233,
-49.949362,
-51.579513,
-50.130017,
-49.817375,
-47.958188,
-43.920319,
-44.455618,
-46.654219,
-43.058297,
-50.597496,
-45.731994,
-42.635321,
-39.312263,
-46.20039,
-50.193089,
-47.093065,
-46.575834,
-43.052487,
-46.723259,
-49.360271,
-51.474566,
-45.650013,
-49.887301,
-51.585745,
-49.237418,
-43.967393,
-46.816694,
-40.245838,
-47.722875,
-44.557218,
-39.278475,
-44.629317,
-49.106968,
-49.032181,
-51.198245,
-44.376431,
-46.099973,
-50.164305,
-46.762917,
-44.318917,
-43.250927,
-42.048251,
-45.07538,
-49.135477,
-42.605647,
-42.153838,
-45.226747,
-50.783432,
-46.695884,
-46.709648
)
|
6d11d4bba4a2029d807d3e6900679848d0e9b596 | 2bfd2b59474b38750f91807f42f91453406c2429 | /tes_drought/Draft code/FTICR_tes_drought_code_VanKrevelenPlot_only.R | a57c3bb21bf3677cb755436dd31814cf7b19d612 | [] | no_license | kaizadp/suli2021 | 94ac3da36b35ac4033ca77517b41662d5f03b60e | 3b058ac1743c70921b3a4effe49436ae8471779b | refs/heads/master | 2023-06-25T15:02:41.437285 | 2021-07-28T18:46:46 | 2021-07-28T18:46:46 | 373,258,014 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,690 | r | FTICR_tes_drought_code_VanKrevelenPlot_only.R |
# 3. Van Krevelen Plot ----
gg_vankrev <- function(data,mapping){
ggplot(data,mapping) +
# plot points
geom_point(size=1, alpha = 0.5) + # set size and transparency
# axis labels
ylab("H/C") +
xlab("O/C") +
# axis limits
xlim(0,1.25) +
ylim(0,2.5) +
# add boundary lines for Van Krevelen regions
geom_segment(x = 0.0, y = 1.5, xend = 1.2, yend = 1.5,color="black",linetype="longdash") +
geom_segment(x = 0.0, y = 0.7, xend = 1.2, yend = 0.4,color="black",linetype="longdash") +
geom_segment(x = 0.0, y = 1.06, xend = 1.2, yend = 0.51,color="black",linetype="longdash") +
guides(colour = guide_legend(override.aes = list(alpha=1, size=2)))
}
# Loading Files ----
data_long_trt = read.csv("tes_drought/data/Processed Data/Processed_FTICR_DATA/fticr_tes_drought_data_long_trt.csv.gz")
meta = read.csv("tes_drought/data/Processed Data/Processed_FTICR_DATA/fticr_tes_drought_meta.csv")
# Processing Files for Plotting ----
library(tidyverse)
meta_hcoc = meta %>%
dplyr::select(formula, HC, OC)
data_hcoc =
data_long_trt %>%
left_join(meta_hcoc) %>%
mutate(DOC_ID = as.character(DOC_ID))
gg_vankrev(data_hcoc, aes(x = OC, y = HC, color = depth))+
facet_wrap(~treatment)+
facet_wrap(~Site)+
theme_classic()
# RA BAR Plotting ----
RA = read.csv("tes_drought/data/Processed Data/Processed_FTICR_DATA/fticr_tes_drought_RA_trt.csv")
RA %>%
ggplot(aes(x = Site, y = relabund2, fill = class))+
geom_bar(stat = "identity")+
facet_wrap(~treatment)+
theme_classic()
RA %>%
ggplot(aes(x = depth, y = relabund2, fill = class))+
geom_bar(stat = "identity")+
facet_wrap(~treatment)+
theme_classic()
|
813499d47933f34885af4d553eee15f075efa9c2 | 81498a41a2c4c01b6005aa45847770a72cd2f84c | /man/ontTermAgg.Rd | 6bf74e1dca3522bf9632b61fc3d11cff44b9d25b | [] | no_license | KrishnaTO/OntologyTermAggregator | 8ccee7e553dcef94e7782524a2190a3868761819 | 4c71ac664e720d3dae159a0d9958ab1370e3c95d | refs/heads/main | 2023-01-10T19:00:24.360047 | 2020-11-14T03:29:47 | 2020-11-14T03:29:47 | 312,690,746 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 557 | rd | ontTermAgg.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ontTermAgg.R
\name{ontTermAgg}
\alias{ontTermAgg}
\title{Get all results of term}
\usage{
ontTermAgg(searchTerm, yourAPIKey, format = "json")
}
\arguments{
\item{searchTerm}{Enter the term you want the combined data for}
\item{yourAPIKey}{manually include you API key for BioOntology here}
\item{format}{Choose which format to get the results in; default is JSON, or XML}
}
\description{
Higher order function to essentially run the same code from the vignette as a function
}
|
21ed86cfc2a1ce66c76178150a6f46563fde774d | 6ad152f997c2c64e341ac07feef7aea472e6c12e | /MCCauchy.R | 42607a4e1dba9ca31cff62e75546bb5106fb1aa3 | [] | no_license | cynesthesia/DistributionsR | 3d966db662fab1a769f0203eb793ab837cbc817f | 0375709aed23e9554510286b1764db4f0170f2ef | refs/heads/master | 2021-01-20T05:48:48.892002 | 2017-03-06T03:48:44 | 2017-03-06T03:48:44 | 83,864,291 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,513 | r | MCCauchy.R | # Problem 2 F(x): cdf of the standard Cauchy distribution (t-dist with df=1)
# (A): For x = 1, 2, ..., 10, compute a MC est of F(x) using Unif(0,x).
# Compare the results with pcauchy function
# Estimate the standard error and give 95% CIs
x <- seq(1, 10, length=10)
m <- 10000
cdf.U <- numeric(length(x))
sd.U <- numeric(length(x))
for (i in 1:length(x)){
u <- runif(m, min=0, max=x[i])
g <- 1 / (pi*(1 + u^2))
cdf.U[i] <- mean(g) * x[i] + 0.5
sd.U[i] <- x[i] * sqrt(mean((g - mean(g))^2) / m)
}
# Since the standard Cauchy distribution coincides with the t-distribution with df=1
alpha <- 0.05
CI.lower.U <- cdf.U - 1.96*sd.U
CI.upper.U <- cdf.U + 1.96*sd.U
phi <- pcauchy(x)
round(rbind(x, phi, cdf.U, sd.U, CI.lower.U, CI.upper.U), 5)
# phi: Theoretical value, cdf.U: Empirical value, sd.U: standard errors
# CI.lower.U & CI.lower.U: 95% confidence intervals using t-dist with df=1
# (B): Repeat (A) using the hit-or-miss method
x <- seq(1, 10, length=10)
m <- 10000
z <- rcauchy(m)
dim(x) <- length(x)
cdf.HM <- apply(x, MARGIN=1, FUN=function(x,z) {mean(z<x)}, z=z)
sd.HM <- apply(x, MARGIN=1, FUN=function(x,z) {sqrt(var(z<x)/m)}, z=z)
alpha <- 0.05
CI.lower.HM <- cdf.HM - 1.96*sd.HM
CI.upper.HM <- cdf.HM + 1.96*sd.HM
round(rbind(x, phi, cdf.HM, sd.HM, CI.lower.HM, CI.upper.HM), 5)
# (C): For each x, compute the empirical efficiency of the MC method (A) to the hit-or-miss method (B)
efficiency <- (sd.U/sd.HM)^2
efficiency
|
de30e4f940c8fc77f64416dfe422f9223a92be1e | 98550ab8b21f1d86f5954886911fc01498ef7699 | /R/utilities.R | 7b3dd521636f64fad169c79212276c576d6266aa | [] | no_license | lindbrook/packageRank | a68ee94e0ed3621e7f10239f1eb2d12dbb7c6530 | a83ebfaa05f6ee82b7e5ae76cf0b8a4c296b4dfb | refs/heads/master | 2023-08-04T21:18:01.261280 | 2023-08-01T22:00:29 | 2023-08-01T22:00:29 | 184,319,415 | 27 | 1 | null | 2023-08-01T22:00:20 | 2019-04-30T19:25:45 | R | UTF-8 | R | false | false | 9,944 | r | utilities.R | #' Extract Package Logs.
#'
#' @param lst Object. List of logs.
#' @param i Numeric. Day/ID.
#' @param pkg Character.
#' @param clean.output Logical.
#' @noRd
pkgLog0 <- function(lst, i = 1, pkg = "cholera", clean.output = TRUE) {
cran_log <- cleanLog(lst[[i]])
tmp <- cran_log[cran_log$package == pkg, ]
tmp$t2 <- dateTime(tmp$date, tmp$time)
tmp <- tmp[order(tmp$t2), c(1:6, 8:10)]
if (clean.output) row.names(tmp) <- NULL
tmp
}
#' Extract Package Logs.
#'
#' @param lst Object. List of logs.
#' @param i Numeric. Day/ID.
#' @param triplet.filter Logical.
#' @param ip.filter Logical.
#' @param ip.campaigns Logical.
#' @param small.filter Logical.
#' @param sequence.filter Logical.
#' @param pkg Character.
#' @param multi.core Logical or Numeric.
#' @param clean.output Logical.
#' @noRd
pkgLog <- function(lst, i = 1, triplet.filter = TRUE, ip.filter = TRUE,
ip.campaigns = TRUE, small.filter = TRUE, sequence.filter = TRUE,
pkg = "cholera", multi.core = TRUE, clean.output = TRUE) {
cores <- multiCore(multi.core)
cran_log <- cleanLog(lst[[i]])
if (ip.filter) {
row.delete <- ipFilter(cran_log, campaigns = ip.campaigns,
multi.core = cores)
cran_log <- cran_log[!row.names(cran_log) %in% row.delete, ]
}
tmp <- cran_log[cran_log$package == pkg, ]
if (nrow(tmp) != 0) {
if (triplet.filter) tmp <- tripletFilter(tmp)
if (small.filter) tmp <- smallFilter(tmp)
if (sequence.filter) {
pkg.history <- packageRank::blog.data$pkg.history
p.hist <- pkg.history[[pkg]]
p.date <- names(lst)[i]
sel <- p.hist$Date <= as.Date(p.date) & p.hist$Repository == "Archive"
arch.pkg.history <- p.hist[sel, ]
tmp <- sequenceFilter(tmp, arch.pkg.history)
}
tmp$t2 <- dateTime(tmp$date, tmp$time)
tmp <- tmp[order(tmp$t2), !names(tmp) %in% "t2"]
if (clean.output) row.names(tmp) <- NULL
}
tmp
}
#' Package Filter Counts.
#'
#' @param lst Object. cran_log list of data frames.
#' @param pkg Character.
#' @param ip.campaigns Logical.
#' @param multi.core Logical or Numeric. \code{TRUE} uses \code{parallel::detectCores()}. \code{FALSE} uses one, single core. You can also specify the number logical cores. Mac and Unix only.
#' @noRd
packageFilterCounts <- function(lst, pkg = "cholera", ip.campaigns = TRUE,
multi.core = TRUE) {
cores <- multiCore(multi.core)
dates <- names(lst)
out <- parallel::mclapply(seq_along(lst), function(i) {
filter_counts(lst[[i]], pkg, dates[i], ip.campaigns)
}, mc.cores = cores)
versions <- parallel::mclapply(lst, function(x) {
x <- x[!is.na(x$package), ]
unique(x[x$package == pkg, "version"])
}, mc.cores = cores)
versions <- length(unique(unlist(versions)))
out <- list(data = do.call(rbind, out),
versions = versions,
pkg = pkg,
dates = dates)
class(out) <- "packageFilterCounts"
out
}
#' Filter counts helper.
#'
#' @param dat Object. cran_log data frame.
#' @param pkg Character.
#' @param date Character.
#' @param ip.campaigns Logical.
#' @noRd
filter_counts <- function(dat, pkg = "cholera", date, ip.campaigns) {
dat0 <- cleanLog(dat)
dat <- dat0[dat0$package == pkg, ]
if (nrow(dat) != 0) {
# IP filter #
row.delete <- ipFilter(dat0, campaigns = ip.campaigns, multi.core = FALSE)
ip.filtered <- sum(!row.names(dat) %in% row.delete)
out <- dat[!row.names(dat) %in% row.delete, ]
# Triplet filter #
out <- tripletFilter(dat)
triplet.filtered <- nrow(out)
# Small Filter #
small.filtered <- nrow(smallFilter(dat))
if (nrow(out) != 0) out <- smallFilter(out)
# Sequence Filter #
pkg.history <- packageRank::blog.data$pkg.history
p.hist <- pkg.history[[pkg]]
sel <- p.hist$Date <= as.Date(date) & p.hist$Repository == "Archive"
arch.pkg.history <- p.hist[sel, ]
pre.filter <- nrow(dat) - nrow(out)
out <- sequenceFilter(out, arch.pkg.history)
sequence.filtered <- nrow(out) + pre.filter
# Output #
data.frame(package = pkg, ct = nrow(dat), triplet = triplet.filtered,
ip = ip.filtered, small = small.filtered, sequence = sequence.filtered,
all = nrow(out))
} else {
data.frame(package = pkg, ct = nrow(dat), triplet = 0, ip = 0, small = 0,
sequence = 0, all = 0)
}
}
#' Plot method for packageFilterCounts().
#'
#' @param x object.
#' @param filter Character. "triplet", "ip", "small", "sequence", "all".
#' @param smooth Logical.
#' @param median Logical.
#' @param legend.loc Character. Location of legend.
#' @param ... Additional plotting parameters.
#' @noRd
plot.packageFilterCounts <- function(x, filter = "all", smooth = FALSE,
median = FALSE, legend.loc = "topleft", ...) {
dat <- x$data
dates <- as.Date(x$dates)
wed.id <- which(weekdays(dates, abbreviate = TRUE) == "Wed")
plot(dates, dat$ct, pch = NA, ylim = range(dat[, -1]), xlab = "Date",
ylab = "Downloads")
abline(v = dates[wed.id], col = "gray", lwd = 2/3)
lines(dates, dat$ct, pch = 15, type = "o", col = "red",)
lines(dates, dat[, filter], type = "o", pch = 16)
axis(3, at = dates[wed.id], labels = rep("W", length(wed.id)), cex.axis = 2/3,
mgp = c(3, 0.5, 0))
legend(x = legend.loc,
legend = c("unfiltered", "filtered"),
col = c("red", "black"),
pch = c(15, 16),
bg = "white",
cex = 2/3,
lwd = 1,
title = NULL)
if (filter == "ip") {
title(main = paste0("'", x$pkg, "'", " ", toupper(filter), " Filter"))
} else if (filter == "all") {
title(main = paste0("'", x$pkg, "'", ": ", wordCase(filter), " Filters"))
} else {
title(main = paste0("'", x$pkg, "'", ": ", wordCase(filter), " Filter"))
}
if (smooth) {
lines(stats::lowess(dates, dat$ct), col = "red", lty = "dotted", lwd = 2)
lines(stats::lowess(dates, dat[, filter]), lty = "dotted", lwd = 2)
}
if (median) {
axis(4, at = median(dat$ct), labels = median(dat$ct), col.axis = "red")
axis(4, at = median(dat[, filter]), labels = median(dat[, filter]))
}
tot <- colSums(dat[, -1])
ptA <- paste0("unfiltered = ", format(tot["ct"], big.mark = ","),
"; filtered = ")
ptB <- paste0("% | ", x$versions, " vers. observed")
delta.pct <- round(100 * (tot["ct"] - tot[filter]) / tot[filter], 1)
title(sub = paste0(ptA, format(tot[filter], big.mark = ","), "; inflation = ",
format(delta.pct, big.mark = ","), ptB))
}
#' CRAN Filter Counts.
#'
#' @param lst Object. cran_log list of data frames.
#' @param ip.campaigns Logical.
#' @param multi.core Logical or Numeric. \code{TRUE} uses \code{parallel::detectCores()}. \code{FALSE} uses one, single core. You can also specify the number logical cores. Mac and Unix only.
#' @noRd
cranFilterCounts <- function(lst, ip.campaigns = TRUE, multi.core = TRUE) {
cores <- multiCore(multi.core)
out <- parallel::mclapply(lst, function(x) {
cran_log <- cleanLog(x)
u.ct <- length(unique(cran_log$package))
row.delete <- ipFilter(cran_log, campaigns = ip.campaigns,
multi.core = cores)
tmp <- cran_log[!row.names(cran_log) %in% unlist(row.delete), ]
ip.ct <- length(unique(tmp$package))
sm.tmp <- smallFilter(cran_log)
sm.ct <- length(unique(sm.tmp$package))
tmp <- smallFilter(tmp)
ip_sm.ct <- length(unique(tmp$package))
data.frame(ct = u.ct, ip = ip.ct, small = sm.ct, all = ip_sm.ct)
}, mc.cores = cores)
dates <- as.Date(names(out))
out <- do.call(rbind, out)
out <- list(data = data.frame(date = dates, out, row.names = NULL))
class(out) <- "cranFilterCounts"
out
}
#' Plot method for cranFilterCounts().
#'
#' @param x object.
#' @param filter Character. "ip", "small", "all".
#' @param smooth Logical.
#' @param median Logical.
#' @param legend.loc Character. Location of legend.
#' @param add.legend Logical.
#' @param ... Additional plotting parameters.
#' @noRd
plot.cranFilterCounts <- function(x, filter = "all", smooth = FALSE,
median = FALSE, legend.loc = "topleft", add.legend = TRUE, ...) {
c.data <- x$data
mo <- c.data$date
id <- which(weekdays(mo, abbreviate = TRUE) == "Wed")
plot(mo, c.data$ct, type = "o", col = "red", pch = 15,
ylim = range(c.data[, -1]), xlab = "Date", ylab = "Count")
# lines(mo, c.data$f.ct, type = "o", col = "black", pch = 16, lwd = 2)
lines(mo, c.data[, filter], type = "o", pch = 16)
abline(v = mo[id], col = "gray", lty = "dotted")
axis(3, at = mo[id], labels = rep("W", length(id)), cex.axis = 2/3,
col.ticks = "black", mgp = c(3, 0.5, 0))
# title(main = "Packages Downloaded")
if (add.legend) {
legend(x = legend.loc,
legend = c("all", "filtered"),
col = c("red", "black"),
pch = c(15, 16),
bg = "white",
cex = 2/3,
lwd = 1,
title = NULL)
}
if (filter == "ip") {
title(main = paste0(toupper(filter), " Filter"))
} else if (filter == "all") {
title(main = paste0(wordCase(filter), " Filters"))
} else {
title(main = paste0(wordCase(filter), " Filter"))
}
if (smooth) {
lines(stats::lowess(mo, c.data$u.ct), col = "red", lty = "dotted", lwd = 2)
lines(stats::lowess(mo, c.data[, filter]), lty = "dotted", lwd = 2)
}
if (median) {
axis(4, at = median(c.data$ct), labels = median(c.data$ct),
col.axis = "red")
axis(4, at = median(c.data[, filter]), labels = median(c.data[, filter]))
}
tot <- colSums(c.data[, -1])
ptA <- paste0("unfiltered = ", format(tot["ct"], big.mark = ","),
"; filtered = ")
# ptB <- paste0("% | ", x$versions, " vers. observed")
delta.pct <- round(100 * (tot["ct"] - tot[filter]) / tot[filter], 1)
title(sub = paste0(ptA, format(tot[filter], big.mark = ","),
"; inflation = ", format(delta.pct, big.mark = ",")))
}
wordCase <- function(x) {
# tools::toTitleCase("all")?
paste0(toupper(substr(x, 1, 1)), tolower(substr(x, 2, nchar(x))))
}
|
6dc65d9b0a4b75dd760cd05c4e4037dfa6bc5797 | a6a058f6cb3b62c520c527fcaf67f95b3ce87491 | /CDBcleanup_fullpar_phone_match_upd.R | 99a656a621eda9ecf379e8ee50f54bcf76be7002 | [] | no_license | rvsreeni/R_customer_tagging | 1f407438a5d3ddc7528e71134be530b3f4e8d05f | 66d94a6ea1edef20a9a40390eed6852a5aa28c5c | refs/heads/master | 2020-04-07T01:43:24.385588 | 2018-11-17T04:46:14 | 2018-11-17T04:46:14 | 157,949,516 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,398 | r | CDBcleanup_fullpar_phone_match_upd.R | #Load Phone boundary check code
#Check boundaries of chunks to see if records (with same Phone#) spanned across 2 chunks, and if so, move them to 1 chunk
if (exists("ParFn_phone_bndchk", mode="function"))
source("./src/CDBcleanup_par_bndchk_func.R")
bdwlist <- NULL
bdwlist <- c(basedatawrt12, basedatawrt22)
#Parallel process of 2 chunks (calling funtion which does Phone Match tagging)
res <- foreach(i=1:2, .packages="stringdist") %dopar% {
ParFn_phone_match(data.frame(bdwlist[(((i-1)*6)+1):(i*6)], stringsAsFactors=FALSE))
}
#Update revised Masterid, Description (returned by ParFn_phone_match)
#Get updated/revised masterid, description (returned by ParFn_phone_match)
New_Masterid <- NULL
Decription <- NULL
tmp <- NULL
tmp <- data.frame(res[1], stringsAsFactors=FALSE)
mstid1 <- tmp$masteridnew
desc1 <- tmp$description
tmp <- NULL
tmp <- data.frame(res[2], stringsAsFactors=FALSE)
mstid2 <- tmp$masteridnew
desc2 <- tmp$description
New_Masterid <- c(mstid1, mstid2)
Description <- c(desc1, desc2)
datbdwrtn <- rbind(basedatawrt12, basedatawrt22)
#Update revised masterid, description in dataframe
datbdwrtn$masteridnew <- New_Masterid
datbdwrtn$description <- Description
#garbage cleanup
rm(res)
rm(tmp)
rm(bdwlist)
rm(basedatawrt12)
rm(basedatawrt22)
rm(mstid1)
rm(mstid2)
rm(desc1)
rm(desc2)
rm(New_Masterid)
rm(Description) |
f8fe58e7429ee203ed4ee00baef08a7c73fc16b4 | bbb686ee52e305f066b38178cb9d098f8d5c4c63 | /ui.R | b6bde70e9a6a2647d3ae3986a3b20e3635d83d80 | [] | no_license | multipass/epascore | 15ae958a0031b56b070538d9b8e10b4bf22d6a2e | 60d0d8ea5b33898e828b31edba089ab83ac75a80 | refs/heads/master | 2020-12-24T20:52:18.853946 | 2016-05-08T22:26:00 | 2016-05-08T22:26:00 | 58,307,336 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,204 | r | ui.R | library(shiny)
library(readr)
#Local copy of vehicules data
#Source http://www.fueleconomy.gov/feg/download.shtml
archive.loc <-"feg_vehicules.csv"
df <-read_csv(archive.loc)
df$cylinders<-as.factor(df$cylinders)
df$drive <-as.factor(df$drive)
df$feScore <-as.factor(df$feScore)
df$fuelType <-as.factor(df$fuelType)
df$make <-as.factor(df$make)
df$VClass <-as.factor(df$VClass)
df$year <-as.factor(df$year)
df$startStop<-as.factor(df$startStop)
shinyUI(pageWithSidebar(
headerPanel(
"Fuel Economy Vehicules Dataset (EPA)"
),
sidebarPanel(
h4("1. Data Exploration"),
selectInput("facet_row",
label=h5("Grid (Row)"),
choices=c(None=".", names(df[sapply(df, is.factor)]))
),
selectInput("facet_col",
label=h5("Grid (Column)"),
choices=c(None=".", names(df[sapply(df, is.factor)]))
),
selectInput("datay",
label=h5("Measurement"),
choices=list("UCity","UHighway","co2TailpipeGpm")
),
selectInput("color",
label=h5("Color"),
choices=c("None", names(df[sapply(df, is.factor)]))
),
hr(),
h4("2. EPA Score Prediction (Same Year)"),
tags$p("Run a rpart (CART) classification algorithm from the caret package on the data from the selected year. The dataset will be divided into a training and a prediction subsets using the selected percentage of data to be used. A prediction tree will be built using the training data subset, and the resulting classification tree will be run on the prediction data subset. The prediction statistics and confusion matrix will be displayed in the main panel."),
selectInput("yr1",
label=h5("Year"),
choices=levels(df$year)
),
sliderInput("pct",
label=h5("Dataset % for model calibration"),
min=10,
max=90,
value=60,
step=5
),
actionButton("runSameYr", "Show Results"),
hr(),
h4("3. EPA Score Prediction (Across Years)"),
tags$p("Is the EPA score formula consistent over the year?. The idea is to calibrate a classification tree using the rpart (CART) algorithm from the caret package in R on a selected year, and apply it on another year. The prediction statistics and confusion matrix are displayed in the main panel."),
selectInput("yr2_train",
label=h5("Year for model calibration"),
choices=levels(df$year)
),
selectInput("yr2_test",
label=h5("Year for prediction application"),
choices=levels(df$year)
),
actionButton("runAllYr", "Show Results"),
tags$p("Note: The prediction statistics are not very good across years, but a more thorough anaylsis is needed before drawing any conclusion on the consistancy of the EPA score over the years."),
hr(),
h4("Variables"),
tags$div(
tags$ul(
tags$li("co2TailpipeGpm - tailpipe CO2 in grams/mile"),
tags$li("cylinders - engine cylinders"),
tags$li("displ - engine displacement in liters"),
tags$li("drive - drive axle type"),
tags$li("feScore - EPA Fuel economy score"),
tags$li("fuelType - fuel type"),
tags$li("make - manufacturer (division)"),
tags$li("UCity - Unadjusted city MPG"),
tags$li("UHighway - Unadjusted highway MPG"),
tags$li("VClass - EPA vehicle size class"),
tags$li("year - Model year"),
tags$li("startStop - Vehicule has start/stop technology (Y/N)"),
tags$li("lv4 - 4 door luggage volume (cubic feet)"),
tags$li("pv4 - 4 door passenger volume (cubic feet)"),
tags$li("am - Transmission (Automatic/Manual)")
)
),
h4("References"),
tags$div(
tags$ul(
tags$li(a(href="http://www.fueleconomy.gov/feg/ws/index.shtml","Fuel Economy EPA dataset")),
tags$li(a(href="http://caret.r-forge.r-project.org/","Caret Package R")),
tags$li(a(href="https://en.wikipedia.org/wiki/Decision_tree_learning","CART (Classification and Regression Tree)")),
tags$li(a(href="https://www.coursera.org/learn/data-products/home/welcome", "Coursera Data Science (Product Development) Homepage"))
)
)
),
mainPanel(
h4("1. EPA Score Dataset Exploration"),
plotOutput("explot"),
hr(),
h4("2. EPA Score Prediction (Within Year) Statistics"),
verbatimTextOutput("cmatrix1"),
hr(),
h4("3. EPA Score Prediction (Across years) Statistics"),
verbatimTextOutput("cmatrix2"),
hr()
)
)) |
e7d0231dd1ba79aef98b71f30e592dbda6a5c57b | 2b5895474a98cca1d0d41e7f44a21b28ac07aee6 | /ad_hoc_analysis/win_rate_x_goalkicker.R | c7f2b5e34b9308d36220c45c01ac37f833990b12 | [] | no_license | insightlane/score-progression | 7666c36ebdeaf1ac346e1da4982208ab92371a7a | ecf24455dcdb46ff149a3bbccaf33806bfb02755 | refs/heads/master | 2023-05-07T20:19:04.581541 | 2021-06-06T10:37:16 | 2021-06-06T10:37:16 | 288,646,937 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,694 | r | win_rate_x_goalkicker.R | library(extrafont)
library(dplyr)
library(ggplot2)
library(tidyr)
library(ggrepel)
score_progression_worm %>%
mutate(totalgoals = Team1CumGoals + Team2CumGoals) %>%
filter(Event == "G" & Team1 == Teamscore) %>%
##filter(Round == "EF" | Round == "QF") %>%
group_by(totalgoals) %>%
summarise(count = n(),
count_win = sum(ifelse(Team1FinalMargin > 0, 1, 0)),
winrate = count_win/count) %>%
ggplot(aes(x = totalgoals, y = winrate)) +
geom_point(aes(size = count), colour = "#f8d159") +
stat_smooth(weights=count, colour = "#1f77b4") +
scale_y_continuous(labels = scales::percent) +
coord_cartesian(ylim = c(0, 1)) +
theme_minimal() +
theme(plot.title = element_text(family = "Trebuchet MS", color="#555555", face = "bold", size = 28),
plot.subtitle = element_text(family = "Trebuchet MS", color="#555555", size = 18),
axis.text.x=element_text(size = 16, vjust = 0.5),
axis.text.y=element_text(size = 16),
legend.title =element_text(size = 16, face = "bold"),
legend.text = element_text(size = 14),
axis.title = element_text(size = 16, face = "bold"),
plot.caption = element_text(size = 12)) +
labs(title = "Win rate by team kicking nth goal of the game",
subtitle = "Proportion of matches won by the team kicking the nth goal of each game| 2008-2017 AFL seasons",
caption = "Source: AFL Tables",
size = "No. games",
y = "Proportion of matches won (%)", x = "nth goal of the game") |
a2b973d44bbecc890c2a9048e143f0ef8c34dfec | 77157987168fc6a0827df2ecdd55104813be77b1 | /onlineforecast/R/rls_predict.R | 4726cef9299a42298536529b4d50490f71cea7d1 | [] | no_license | akhikolla/updatedatatype-list2 | e8758b374f9a18fd3ef07664f1150e14a2e4c3d8 | a3a519440e02d89640c75207c73c1456cf86487d | refs/heads/master | 2023-03-21T13:17:13.762823 | 2021-03-20T15:46:49 | 2021-03-20T15:46:49 | 349,766,184 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,393 | r | rls_predict.R | #' Use a fitted forecast model to predict its output variable with transformed data.
#'
#' See the ??ref(recursive updating vignette, not yet available).
#'
#' @title Prediction with an rls model.
#' @param model Onlineforecast model object which has been fitted.
#' @param datatr Transformed data.
#' @return The Yhat forecast matrix with a forecast for each model$kseq and for each time point in \code{datatr$t}.
#' @examples
#'
#' # Take data
#' D <- subset(Dbuilding, c("2010-12-15", "2011-01-01"))
#' D$y <- D$heatload
#' # Define a simple model
#' model <- forecastmodel$new()
#' model$add_inputs(Ta = "Ta", mu = "one()")
#' model$add_regprm("rls_prm(lambda=0.99)")
#'
#' # Before fitting the model, define which points to include in the evaluation of the score function
#' D$scoreperiod <- in_range("2010-12-20", D$t)
#' # And the sequence of horizons to fit for
#' model$kseq <- 1:6
#'
#' # Transform using the mdoel
#' datatr <- model$transform_data(D)
#'
#' # See the transformed data
#' str(datatr)
#'
#' # The model has not been fitted
#' model$Lfits
#'
#' # To fit
#' rls_fit(model=model, data=D)
#'
#' # Now the fits for each horizon are there (the latest update)
#' # For example the current parameter estimates
#' model$Lfits$k1$theta
#'
#' # Use the current values for prediction
#' D$Yhat <- rls_predict(model, datatr)
#'
#' # Plot it
#' plot_ts(D, c("y|Yhat"), kseq=1)
#'
#' # Recursive updating and prediction
#' Dnew <- subset(Dbuilding, c("2011-01-01", "2011-01-02"))
#'
#' for(i in 1:length(Dnew$t)){
#' # New data arrives
#' Dt <- subset(Dnew, i)
#' # Remember that the transformation must only be done once if some transformation
#' # which is has a state, e.g. lp(), is used
#' datatr <- model$transform_data(Dt)
#' # Update, remember that this must only be once for each new point
#' # (it updates the parameter estimates, i.e. model$Lfits)
#' rls_update(model, datatr, Dt$heatload)
#' # Now predict to generate the new forecast
#' print(rls_predict(model, datatr))
#' }
#'
#' @export
rls_predict <- function(model, datatr = NA) {
# - model: the model object
# - datatr: is a datalist which holds the transformed inputs
# Predict with the model for each k
Yhat <- sapply(model$kseq, function(k) {
# Take the fit for k
theta <- model$Lfits[[pst("k",k)]]$theta
# Form the regressor matrix
X <- sapply(datatr, function(x) {
x[, pst("k", k)]
})
# Catch if only one row, then X is vector, convert to matrix
if (is.null(dim(X))) {
X <- matrix(X, ncol = length(X), dimnames = list(NULL, nams(X)))
}
# The predictions
yhat <- as.numeric(rep(NA, nrow(X)))
#
iOk <- which(apply(is.na(X), 1, sum) == 0)
for (i in iOk) {
x <- matrix(X[i, ])
# Predict
yhat[i] <- t(x) %*% theta
}
return(yhat)
})
if (is.null(dim(Yhat))) {
Yhat <- matrix(Yhat, ncol = length(Yhat), dimnames = list(NULL, nams(Yhat)))
}
Yhat <- as.data.frame(Yhat)
nams(Yhat) <- pst("k", model$kseq)
# Maybe crop the output
if(!is.na(model$outputrange[1])){ Yhat[Yhat < model$outputrange[1]] <- model$outputrange[1] }
if(!is.na(model$outputrange[2])){ Yhat[model$outputrange[1] > Yhat] <- model$outputrange[2] }
#
return(Yhat)
}
|
690b52c80176371fb89815b938e2fa2bff3875ba | 8a3635f03033b6f29a1572ccfd63c215e58ad31f | /03_R_100_knocks/Questions/Week-06.R | df0c6267d37f0f15af4da711f034a5bda67f9b5a | [] | no_license | rekkoba/100_knocks_challenge | 232ee3a58c7e6bb3a67721d17f2878d369d18d86 | 5e2155834f11d93ac849f4b959efaf5da0e95b87 | refs/heads/master | 2023-07-16T18:26:24.767125 | 2021-09-04T00:14:33 | 2021-09-04T00:14:33 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,688 | r | Week-06.R | # Week-06
getwd() #ディレクトリ確認getwd() #ディレクトリ確認
setwd("C:/Users/iiats/OneDrive/デスクトップ/100本ノックチャレンジ/03_R_100_knocks/Questions/data")
getwd() #ディレクトリ確認
# DataFrameの準備
df <- read.csv("titanic3.csv", header=T)
df2 <- read.csv("data1.csv", header=T)
df3 <- read.csv("data1_2.csv", header=T)
df4 <- read.csv("data1_3.csv", header=T)
df5 <- read.csv("data2.csv", header=T, fileEncoding = "CP932")
# データ加工 (33 - 58)
# 【51】
# dfの行をシャッフルし、インデックスを振り直して表示
#print(ans[51]) #回答表示
# 【52】
# ①df2の重複行数をカウント
# ②df2の重複行を削除し、df2を表示
# 【53】
# dfのnameの列をすべて大文字に変換し表示
# 【54】
# dfのnameの列をすべて小文字に変換し表示
# 【55】
# dfのsex列に含まれる「female」という単語を
# 「Python」に置換。その後、1行目の
# 「female」が「Python」に置き換わったことを確認
# 【56】
# dfのname列1行目の「Allen, Miss. Elisabeth Walton」の
# 「Elisabeth」を消去(import reをインポート)
# 【57】
# df5の都道府県列と市区町村列を空白がないように
# 「_」で結合(新規列名は「test2」)し、先頭5行を表示
# ※df5の「test」列は通常通り結合した場合の結果
# 【58】
# df2の行と列を入れ替えて表示
# マージと連結(59 - 65)
# 【59】
# df2にdf3を左結合し、df2に格納
# 【60】
# df2にdf3を右結合し、df2に格納
|
1f61afb98a90a541b80ca656d21f5cc8c9ae7d89 | f9e0b8d507319ae144d17729b520388137e27c35 | /CleanData/PerceptionAnalysis.R | b0104c0c24b5bfb9be63c3355c237741c7ce8822 | [] | no_license | meumesmo/Evaluation | 6381262ded042bff84c88ef3b3030919076df706 | a4ea35f9cd43fd97f09aa00ed04a552264769302 | refs/heads/master | 2021-01-11T23:01:00.402265 | 2017-03-20T11:37:16 | 2017-03-20T11:37:20 | 78,534,689 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,471 | r | PerceptionAnalysis.R | ##PERCEPTIONS ANALYSIS
##Install packages
install.packages("devtools")
library(devtools)
##Likert Packages
install_github('likert','jbryer')
library(ggbiplot)
##RUSSIA NUMERIC VALUES: LETI, KAI, OMSU, TVER, NNSU Only questions
Rupeold <- rbind(LETIpeold[,8:35], KAIpeoldfirst[,8:35], KAIpeoldsec[,8:35], OMSUpeold[,8:35], TVERpeoldfirst[,8:35],TVERpeoldsec[,8:35],NNSUpeold[,8:35])
Rupeold <- na.omit(Rupeold)
Rupoold <- rbind(LETIpoold[,8:35], KAIpooldfirst[,8:35], KAIpooldsec[,8:35], OMSUpoold[,8:35], TVERpooldfirst[,8:35], TVERpooldsec[,8:35], NNSUpoold[,8:35])
Rupoold <- na.omit(Rupoold)
Rupemo <- rbind(LETIpemo[,8:35], KAIpemofirst[,8:35], KAIpemosec[,8:35], OMSUpemo[,8:35], TVERpemofirst[,8:35], TVERpemosec[,8:35], NNSUpemo[,8:35])
Rupemo <- na.omit(Rupemo)
Rupomo <- rbind(LETIpomo[,8:35], KAIpomofirst[,8:35], KAIpomosec[,8:35], OMSUpomo[,8:35], TVERpomofirst[,8:35], TVERpomosec[,8:35], NNSUpomo[,8:35])
Rupomo <- na.omit(Rupomo)
##RUSSIA FACTOR VALUES: LETI, KAI, OMSU, TVER, NNSU
Rupeoldfa <- Rupeold
for(col in 1:27){Rupeoldfa[,col] <- factor(Rupeoldfa[,col], levels = 1:6, ordered = TRUE)}
Rupeold1 <- likert::likert(as.data.frame(Rupeoldfa$N1), grouping = Rupeoldfa$univ)
plot(Rupeold1, ordered = FALSE, type = 'density')
Rupooldfa <- Rupoold
for(col in 1:27){Rupooldfa[,col] <- factor(Rupooldfa[,col], levels = 1:6, ordered = TRUE)}
Rupoold1 <- likert::likert(as.data.frame(Rupooldfa[,1:3]), grouping = Rupoold$univ)
plot(Rupoold1, ordered = FALSE)
plot(Rupoold1, type = 'density', ordered = FALSE)
Rupemofa <- Rupemo
for(col in 1:27){Rupemofa[,col] <- factor(Rupemofa[,col], levels = 1:6, ordered = TRUE)}
Rupemo1 <- likert::likert(as.data.frame(Rupemofa[,1:3]), grouping = Rupemo$univ)
plot(Rupemo1, ordered = FALSE)
plot(Rupemo1, type = 'density', ordered = FALSE)
Rupomofa <- na.omit(Rupomo)
for(col in 1:27){Rupomofa[,col] <- factor(Rupomofa[,col], levels = 1:6, ordered = TRUE)}
Rupomo1 <- likert::likert(as.data.frame(Rupomofa[,1:3]), grouping = Rupomo$univ)
plot(Rupomo1, ordered = FALSE)
plot(Rupomo1, type = 'density', ordered = FALSE)
##CHANGE THE DATA TO GET THE PCA GRAPHS
Data <- Rupomo
Questions <- Data[,1:27]
Questions.PCA <- prcomp(Questions, center = TRUE, scale = TRUE)
plot(Questions.PCA)
g <- ggbiplot(Questions.PCA, obs.scale = 1, var.scale = 1, groups = Data$univ, ellipse = TRUE)
g <- g + scale_color_discrete(name = '')
g <- g + theme(legend.direction = 'horizontal', legend.position = 'top')
print(g)
|
0ab7f327bc34db7c2331f2d9f9812cbd2d235c17 | 911ca0cc0da0e0405b6e7ba5765d5c8c25077810 | /1_summary.r | 7984854c5378710808c17c30453f9cf667395c09 | [] | no_license | sardormirzaev/-measuring-volatility-spillovers | de0d331c60c1a431ee05b892974f8c1e17523fce | 442fb958997440a7c1badff9cc88c8d9bbf83ac2 | refs/heads/master | 2022-08-29T00:20:08.932304 | 2020-05-31T07:41:48 | 2020-05-31T07:41:48 | 268,231,853 | 12 | 6 | null | null | null | null | UTF-8 | R | false | false | 2,165 | r | 1_summary.r | library(moments)
library(tseries)
library(reshape2)
#### table generator funtion for latex
plot.tab.tex <- function(data){
data <- as.matrix(data)
c <- ncol(data)
r <- nrow(data)
cat("\\begin{tabular}{l")
for(j in 1:c){cat("r")}
cat("} \n")
cat(" & ")
for (j in 1:c){
if ( j < c){
cat(colnames(data)[j])
cat(" & ")
} else if (j == c){
cat(colnames(data)[j])
cat(" \\\\ \\hline \\hline\n")}
}
for(i in 1:r){
cat(rownames(data)[i])
cat(" & ")
for (j in 1:c){
if ( j < c){
#cat("$")
cat(data[i,j])
#cat("$")
cat(" & ")
} else if (j == c & !((i/2) == floor(i/2))){
cat(data[i,j])
cat(" \\\\ \n")
}else if (j == c & ((i/2) == floor(i/2))){
cat(data[i,j])
cat(" \\\\ \n")}
}
}
cat("\\end{tabular}")
}
# Please, set your working directory
wdir <- "C:/Users/49151/Desktop"
setwd(wdir)
# Please, re-run all codes for forex market volatity in all 3 R-files
# by changing here : read.csv("report2.csv")
DAT1<-read.csv("report1.csv")
DATE<- as.Date(as.character(DAT1[,1]),"%d/%m/%Y")
Y <- abs(100*DAT1[,-1])
#View(Y)
colnames(Y)=c("Nasdaq","DowJones","EuroStox","FTSE100",
"CAC40", "DAX","HongKong", "Shanghai","Nikkei225",
"Allord","KSE", "Kospi", "SET","Sensex")
# chage here for forex c("GBP","EUR","HKD","CNY","JPY","AUD", "THB", "INR","TRY")
### Table 1 SUMMARY STATISTICS
tab1 = rbind(apply(Y,2,mean),
apply(Y,2,median),
apply(Y,2,max),
apply(Y,2,min),
apply(Y,2,sd),
apply(Y,2,skewness),
apply(Y,2,kurtosis))
rownames(tab1) = c("Mean","Median", "Max", "Min","Std.Dev","Skew","Kurt")#"Maximum","Minimum"
#plot.tab.tex(t(round(tab1,4)))
View(t(tab1))
####FIGURES OF VOLATILITIES
plotting<-ts(data=Y[,1:6])
plot(plotting, type="l", main="", col="grey20",xlab="",lwd=2)
plotting<-ts(data=Y[,7:14])#for forex change Y[,7:9]
plot(plotting, type="l", main="", col="grey20",xlab="",lwd=2)
|
7a705b1f335117e765c42cd704091b281664108a | 66ccb8012097d72c6bc3b0c24aae556b986c4b2c | /plot3.R | f0bb12e847862222d01d88d98556b0d11c1a31cb | [] | no_license | chahidazarkan/ExData_Plotting1 | 367297718c17073347f27fdd6bd7d701584ed321 | d45859703860b2316b61f4178aa87762d6583888 | refs/heads/master | 2020-12-11T08:07:12.259604 | 2016-03-11T14:45:01 | 2016-03-11T14:45:01 | 53,565,900 | 0 | 0 | null | 2016-03-10T07:54:48 | 2016-03-10T07:54:48 | null | UTF-8 | R | false | false | 1,577 | r | plot3.R | #load the libraries
library(dplyr)
library(lubridate)
# set the download location
fileUrl <- "https://d396qusza40orc.cloudfront.net/exdata/data/household_power_consumption.zip"
#create a temporary file
temp <- tempfile()
#download the file
download.file(fileUrl,temp, method="curl")
#read the data
hpc <- read.csv(unz(temp, "household_power_consumption.txt"), sep = ";")
#remove the temporaryfile
unlink(temp)
#convert hpc to a table dataframe
hpc <- tbl_df(hpc)
#convert the date variable to a proper date
hpc$Date <- dmy(hpc$Date)
#subset hpc
hpc <- select(hpc, Date, Time, Sub_metering_1, Sub_metering_2, Sub_metering_3)
hpc <- filter(hpc, Date>="2007-02-01" & Date<"2007-02-03")
#make a new variable DateTime
hpc$DateTime <- paste(hpc$Date, hpc$Time)
#convert DateTime and Time to time variables
hpc$Time <- hms(hpc$Time)
hpc$DateTime <- hpc$DateTime <- ymd_hms(hpc$DateTime)
#convert sub_metering_x to numeric
hpc$Sub_metering_1 <- as.numeric(as.character(hpc$Sub_metering_1))
hpc$Sub_metering_2 <- as.numeric(as.character(hpc$Sub_metering_2))
hpc$Sub_metering_3 <- as.numeric(as.character(hpc$Sub_metering_3))
#open a connection to a file
png(filename="plot3.png", bg="transparent")
#make the linegraph
plot(y=hpc$Sub_metering_1, x=hpc$DateTime, type="l", xlab=" ", ylab="Energy sub metering", col="black")
lines(y=hpc$Sub_metering_2, x=hpc$DateTime, type="l", col="red")
lines(y=hpc$Sub_metering_3, x=hpc$DateTime, type="l", col="blue")
legend("topright", legend=names(hpc[,3:5]), lty=c(1,1), col=c("black", "red", "blue"))
#close the connection
dev.off() |
bc053e84c9b64530a381120f95c337d203bb510e | 7a29163ac6095d3edac155de544a58b8dd23f07c | /plot4.R | 393506e62e75403d6867b76ef5b56779c4dc8b99 | [] | no_license | suppletree/ExData_Plotting1 | c5a3460882bab085895cea97201d1966c261c7e5 | edbd7dc9de8d072bc8eb961e9ec45569cac55c33 | refs/heads/master | 2021-01-16T20:32:29.835350 | 2016-02-28T05:39:27 | 2016-02-28T05:39:27 | 52,700,604 | 0 | 0 | null | 2016-02-28T02:26:14 | 2016-02-28T02:26:13 | null | UTF-8 | R | false | false | 1,699 | r | plot4.R | ## setwd("~/cr3wk1")
## Read data and set "?" as NA
pconsumeData <- read.table("./household_power_consumption.txt", sep = ";", na.strings = "?", header = TRUE)
## Subset the dataset to only 2007-02-01 and 2007-02-02
pconsumeData <- subset(pconsumeData, Date %in% c("1/2/2007","2/2/2007"))
## Create a "DateTime" column
pconsumeData$DateTime <- paste(pconsumeData$Date, pconsumeData$Time)
## Change Date variable to Date class
pconsumeData$DateTime <- strptime(pconsumeData$DateTime,"%d/%m/%Y %H:%M:%S")
## Remove Date & Time columns - we don't need them anymore
pconsumeData <- pconsumeData[,!(names(pconsumeData) %in% c("Date","Time"))]
## Make plot and save it to PNG
png(filename="plot4.png",width=480,height=480)
par(mfrow = c(2, 2))
## 1st Plot
plot(pconsumeData$DateTime, pconsumeData$Global_active_power, type="l", ylab = "Global Active Power (Kilowatts)", xlab="")
## 2nd Plot
plot(pconsumeData$DateTime, pconsumeData$Voltage, type="l", ylab = "Voltage", xlab="datetime")
## 3rd Plot
with( pconsumeData,
{ plot(pconsumeData$DateTime, Sub_metering_1, type = "n", xlab="", ylab="Energy sub metering")
lines(pconsumeData$DateTime, Sub_metering_1, col="black")
lines(pconsumeData$DateTime, Sub_metering_2, col="red")
lines(pconsumeData$DateTime, Sub_metering_3, col="blue")
}
)
legend("topright", lty = 1, col = c("black","red", "blue"), legend = c("Sub_metering_1", "Sub_metering_2","Sub_metering_3"))
## 4th Plot
plot(pconsumeData$DateTime, pconsumeData$Global_reactive_power, type="l", ylab = "Global_reactive_power", xlab="datetime")
dev.off()
|
0cd6cd02c219afd19def834b6a8042fa5a224df4 | 4cf6b39f7078a8ae7791d0334eb0f3edcd80750c | /R/linux_clipboard.R | 96416e8321adbc094a51fd40a107f2e498831b3d | [] | no_license | lmmx/clipr | a8573d5dbd43cdccdd502d848fa7a23f571372cc | f70db7a9456a0e131296a5b23ed4bb9501892b20 | refs/heads/master | 2021-01-15T17:37:01.659399 | 2015-08-31T01:39:15 | 2015-08-31T01:39:15 | 41,516,479 | 1 | 0 | null | 2015-08-28T02:37:57 | 2015-08-27T23:32:42 | R | UTF-8 | R | false | false | 2,077 | r | linux_clipboard.R | # Function to stop the read/write and return an error of missing clipboard software.
notify_no_cb <- function() {
stop("Clipboard on Linux requires 'xclip' (recommended) or 'xsel'. Try using:\nsudo apt-get install xclip",
call.=FALSE)
}
# Helper function to read from the Linux clipboard
#
# Requires the Linux utility 'xclip' or 'xsel'. This function will stop with an error if neither is found.
# Adapted from: https://github.com/mrdwab/overflow-mrdwab/blob/master/R/readClip.R
# and: https://github.com/jennybc/reprex/blob/master/R/clipboard.R
linux_read_clip <- function() {
if (Sys.which("xclip") != "") {
con <- pipe("xclip -o -selection clipboard")
} else if (Sys.which("xsel") != "") {
con <- pipe("xsel --clipboard")
} else {
notify_no_cb()
}
content <- scan(con, what = character(), sep = "\n",
blank.lines.skip = FALSE, quiet = TRUE)
close(con)
return(content)
}
# Helper function to write to the Linux clipboard
#
# Requires the Linux utility 'xclip' or 'xsel'. This function will stop with an error if neither is found.
# Adapted from https://github.com/mrdwab/overflow-mrdwab/blob/master/R/writeClip.R
#
# Targets "primary" and "clipboard" clipboards if using xclip, see: http://unix.stackexchange.com/a/69134/89254
linux_write_clip <- function(content, wc.opts) {
if (Sys.which("xclip") != "") {
con <- pipe("xclip -i -sel p -f | xclip -i -sel c", "w")
} else if (Sys.which("xsel") != "") {
con <- pipe("xsel -b", "w")
} else {
notify_no_cb()
}
# If no custom line separator has been specified, use Unix's default newline character: (\code{\n})
sep <- ifelse(is.null(wc.opts$sep), '\n', wc.opts$sep)
# If no custom 'end of string' character is specified, then by default assign \code{eos = NULL}
# Text will be sent to the clipboard without a terminator character.
eos <- wc.opts$eos
# Note - works the same as ifelse(is.null,NULL,wc.opts$eos)
content <- flat_str(content, sep)
writeChar(content, con = con, eos = eos)
close(con)
return(content)
}
|
ce4544638797e2c3e4de906052837c2813e40af5 | d171f25da232e963dd938b64b5159aab13c32da2 | /Keloid_main/rna_seq_qa_2.R | 0aacfb03f6560053709789d4195f5997a3cebfb7 | [
"MIT"
] | permissive | uhkniazi/BRC_Keloid | bac8b972a5def699da2afa676026aa51b48958e1 | 57032328576d24948a611f5c6fe1ee7193cd5b05 | refs/heads/master | 2020-04-12T06:23:18.075595 | 2017-11-23T11:34:13 | 2017-11-23T11:34:13 | 65,537,780 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,512 | r | rna_seq_qa_2.R | # File: rna_seq_qa_2.R
# Auth: uhkniazi
# DESC: quality checks on the rna-seq data
# Date: 23/08/2016
## set variables and source libraries
gcswd = getwd()
setwd('Keloid_main/')
library(downloader)
url = 'https://raw.githubusercontent.com/uhkniazi/CFastqQuality/master/CFastqQuality.R'
download(url, 'CFastqQuality.R')
# load the required packages
source('CFastqQuality.R')
# delete the file after source
unlink('CFastqQuality.R')
## connect to mysql database to get sample information
library('RMySQL')
##### connect to mysql database to get samples
db = dbConnect(MySQL(), user='rstudio', password='12345', dbname='Projects', host='127.0.0.1')
dbListTables(db)
dbListFields(db, 'Sample')
# another way to get the query, preferred
dfSample = dbGetQuery(db, "select title, group1, group2 from Sample where idData=2;")
# close connection after getting data
dbDisconnect(db)
# remove any whitespace from the names
dfSample$title = gsub(" ", "", dfSample$title, fixed = T)
#### get the names of the fastq files for first sequencing run
setwd('Data_external/keloid1')
csFiles = list.files('.', pattern = '*.gz')
# remove the index files from the list
i = grep('_I', csFiles)
head(csFiles[i])
i2 = file.size(csFiles[i])
head(i2)/1e6
summary(i2/1e6)
csFiles = csFiles[-i]
# remove undetermined
i = grep('Undetermined', csFiles)
head(csFiles[i])
i2 = file.size(csFiles[i])
head(i2)/1e6
summary(i2/1e6)
csFiles = csFiles[-i]
# split samples by lane
fLane = strsplit(csFiles, '_')
fLane = sapply(fLane, function(x) x[3])
# split by samples
fSamples = strsplit(csFiles, '_')
fSamples = sapply(fSamples, function(x) x[1])
table(fLane, fSamples)
# add sample names and file location
f = fSamples %in% dfSample$title
table(f)
i = match(fSamples, dfSample$title)
dfFiles = data.frame(fSamples, title=dfSample$title[i], files=csFiles, fLane)
## perform the analysis one sample at a time
## function to write the qa files
write.qa = function(fls, indir, title){
wd = getwd()
setwd(indir)
ob = CFastqQuality(fls, title)
setwd(wd)
cat(paste('done', title, '\n'))
return(ob)
}
n = 1:nrow(dfFiles)
lOb = lapply(n, function(x) {
write.qa(as.character(dfFiles[x,'files']), indir=getwd(), title=paste(dfFiles[x,'title'], dfFiles[x, 'fLane']))
})
setwd(gcswd)
dir.create('Keloid_main/Objects')
save(lOb, file='Keloid_main/Objects/CFastqQuality.object.s021.rds')
getwd()
pdf(file='Keloid_main/Results/qa.quality.cycle.s021.pdf')
par(mfrow=c(2,1))
temp = sapply(lOb, plot.qualitycycle)
dev.off(dev.cur())
|
ab2ba60e2c2935260d7c43d295ea9671c907143f | a18c2a7cf79b96fd50d45dab7493a482d37eddb0 | /data/cellrangerRkit/man/load_cellranger_analysis_results.Rd | b36e789a6fe761edab4bf1dca3cd413adedae83a | [
"MIT"
] | permissive | buenrostrolab/10x_scRNAseq | b6514c07873ae2a9c8959498234958fb833db568 | 8e65ceffd8a7186d0c81b159e6b316bc2bfdc6bf | refs/heads/master | 2021-01-11T01:53:54.856901 | 2016-11-21T03:41:37 | 2016-11-21T03:41:37 | 70,646,869 | 2 | 0 | null | null | null | null | UTF-8 | R | false | true | 878 | rd | load_cellranger_analysis_results.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/util.r
\name{load_cellranger_analysis_results}
\alias{load_cellranger_analysis_results}
\title{Load Cell Ranger secondary analysis results}
\usage{
load_cellranger_analysis_results(pipestance_path)
}
\arguments{
\item{pipestance_path}{Path to the output directory produced by Cell Ranger}
}
\value{
A list containing: \itemize{
\item pca - pca projection
\item tsne - tsne projection
\item kmeans - kmeans results for various values of K
}
}
\description{
Load Cell Ranger secondary analysis results
}
\examples{
\dontrun{
analysis <- load_cellranger_analysis_results("/home/user/cellranger_output")
# Plot t-SNE projection
plot(analysis$tsne$TSNE.1, analysis$tsne$TSNE.2)
# Color by kmeans with K=4
plot(analysis$tsne$TSNE.1, analysis$tsne$TSNE.2, col=analysis$kmeans[["2_clusters"]]$Cluster)
}
}
|
d7d40aed6e0448c451bde0a1f3d34f0061e59eec | a4bf8ea2ca052a6ebaa8d32ea427eb4f747e3b67 | /R/getWM.R | fa08ffea1cc3aa0fcc58f29d57584f5e9fe41a2e | [] | no_license | cran/edl | 941498a8f2b8d8df00ffa7c317d707ff25c189f9 | c40160df056e31bfafda4197405bddf89158d4b4 | refs/heads/master | 2023-08-11T09:11:13.818275 | 2021-09-20T06:40:05 | 2021-09-20T06:40:05 | 357,249,841 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,208 | r | getWM.R | #' Retrieve all cues from a vector of text strings.
#'
#' @export
#' @param wmlist A list with weightmatrices for each learning event,
#' generated by \code{\link{RWlearning}}.
#' @param event Numeric: for which event to return the weight matrix.
#' Defaults to NULL, which wil return the last weight matrix.
#' @return A matrix with connection weights between cues (rows) and outcomes.
#' @author Jacolien van Rij
#' @seealso \code{\link{RWlearning}}, \code{\link{getWeightsByCue}},
#' \code{\link{getWeightsByOutcome}}
#' @examples
#' # load example data:
#' data(dat)
#'
#' # add obligatory columns Cues, Outcomes, and Frequency:
#' dat$Cues <- paste("BG", dat$Shape, dat$Color, sep="_")
#' dat$Outcomes <- dat$Category
#' dat$Frequency <- dat$Frequency1
#' head(dat)
#' dim(dat)
#'
#' # now use createTrainingData to sample from the specified frequencies:
#' train <- createTrainingData(dat)
#'
#' # this training data can actually be used train network:
#' wm <- RWlearning(train)
#'
#' # final weight matrix:
#' getWM(wm)
#' # ... which is the same as:
#' wm[[length(wm)]]
#' # 25th learning event:
#' getWM(wm, event=25)
#' # ... which is the same as:
#' wm[[25]]
#'
getWM <- function(wmlist, event=NULL){
if(!is.list(wmlist)){
stop("Argument wmlist should specify a list with weight matrices.")
}
cues <- getCues(wmlist)
outcomes <- getOutcomes(wmlist)
if(!is.null(event)){
if(event[1] > length(wmlist)){
warning(
sprintf("wmlist contains only %d learning events. The value of event (%d) is ignored, and the last weight matrix is returned.",
length(wmlist), event))
return(wmlist[[length(wmlist)]])
}else{
out <- wmlist[[event[1]]]
add.cues <- cues[!cues %in% rownames(out)]
add.out <- outcomes[!outcomes %in% colnames(out)]
if(length(add.cues)>0){
r.out <- c(rownames(out), add.cues)
out <- rbind(out, matrix(rep(0, ncol(out)*length(add.cues)), ncol=ncol(out)))
rownames(out) <- r.out
}
if(length(add.out)>0){
c.out <- c(colnames(out), add.out)
out <- cbind(out, matrix(rep(0, nrow(out)*length(add.out)), nrow=nrow(out)))
colnames(out) <- c.out
}
return(out)
}
}else{
return(wmlist[[length(wmlist)]])
}
}
|
7eaed443734730c76b00b3b18d061ccf0be50a01 | bb7a9c7a631393e10a83ccc55febf06526f0265a | /sentiment_analysis/visualisation.R | 6976e1d16e841ab9b685331ebe94958bb9591350 | [] | no_license | thalvari/SpamReviewGenerator | ebe8442104c38b7aeaf2570ed9c371dbd0fa1ce8 | e5dad6fe5a13e898bdc15cd9a09ad972c3d5c571 | refs/heads/master | 2020-03-28T23:09:09.999711 | 2018-10-29T21:34:31 | 2018-10-29T21:34:31 | 149,278,855 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 14,118 | r | visualisation.R | library(tidytext)
library(dplyr)
library(stringr)
library(ggplot2)
library(gridExtra)
#the input file should have all 6 different review categories in it and the columns should
#consist of 'rating', 'text' and 'category', rating should have a numeric rating of a review
#'text' should contain the review text itself, and 'category' should be a string, see examples used below
#copy or remove code from below when necessary
reviews <- read.csv(file="cleanedagain2.csv")
reviews$text <- as.character(reviews$text)
reviewsbycategory <- reviews %>%
group_by(category) %>%
ungroup()
tidy_reviews <- reviewsbycategory %>%
unnest_tokens(word,text)
data(stop_words)
tidy_reviews <- tidy_reviews %>%
anti_join(stop_words)
#most common words in each review category
a <- tidy_reviews %>%
filter(category == "generated cellphone & accessory reviews") %>%
count(word, sort = T) %>%
top_n(10) %>%
mutate(word = reorder(word, n))
pa <- ggplot(a, aes(word, n)) + geom_col(fill = 'blue') + coord_flip() + labs(title ="generated cellphone & accessory reviews")
b <- tidy_reviews %>%
filter(category == "generated hotel reviews") %>%
count(word, sort = T) %>%
top_n(10) %>%
mutate(word = reorder(word, n))
pb <- ggplot(b, aes(word, n)) + geom_col(fill = 'red') + coord_flip() + labs(title = "generated hotel reviews")
c <- tidy_reviews %>%
filter(category == "generated marvel movie reviews") %>%
count(word, sort = T) %>%
top_n(10) %>%
mutate(word = reorder(word, n))
pc <- ggplot(c, aes(word, n)) + geom_col(fill = 'green') + coord_flip() + labs(title = "generated marvel movie reviews")
d <- tidy_reviews %>%
filter(category == "real hotel reviews") %>%
count(word, sort = T) %>%
top_n(10) %>%
mutate(word = reorder(word, n))
pd <- ggplot(d, aes(word, n)) + geom_col(fill = 'magenta') + coord_flip() + labs(title = "real hotel reviews")
e <- tidy_reviews %>%
filter(category == "real marvel movie reviews") %>%
count(word, sort = T) %>%
top_n(10) %>%
mutate(word = reorder(word, n))
pe <- ggplot(e, aes(word, n)) + geom_col(fill = 'yellow') + coord_flip() + labs(title = "real marvel movie reviews")
f <- tidy_reviews %>%
filter(category == "real cellphone & accessories reviews") %>%
count(word, sort = T) %>%
top_n(10) %>%
mutate(word = reorder(word, n))
pf <- ggplot(f, aes(word, n)) + geom_col(fill = 'cyan') + coord_flip() + labs(title = "real cellphone & accessories reviews")
grid.arrange(pa,pf,pb,pd,pc,pe)
#most positive and negative words using bing sentiments
gencellposneg <- tidy_reviews %>%
filter(category == "generated cellphone & accessory reviews") %>%
inner_join(get_sentiments("bing")) %>%
count(word, sentiment, sort = T) %>%
ungroup()
a <- gencellposneg %>%
group_by(sentiment) %>%
top_n(10) %>%
ungroup() %>%
mutate(word = reorder(word,n))
pa <- ggplot(a, aes(word, n, fill=sentiment)) + geom_col() + facet_wrap(~sentiment, scales = "free_y")+ labs(title = "generated cellphone & accessory reviews") + coord_flip()
genhotelposneg <- tidy_reviews %>%
filter(category == "generated hotel reviews") %>%
inner_join(get_sentiments("bing")) %>%
count(word, sentiment, sort = T) %>%
ungroup()
b <- genhotelposneg %>%
group_by(sentiment) %>%
top_n(10) %>%
ungroup() %>%
mutate(word = reorder(word,n))
pb <- ggplot(b, aes(word, n, fill=sentiment)) + geom_col() + facet_wrap(~sentiment, scales = "free_y")+ labs(title = "generated hotel reviews") + coord_flip()
genmarvelposneg <- tidy_reviews %>%
filter(category == "generated marvel movie reviews") %>%
inner_join(get_sentiments("bing")) %>%
count(word, sentiment, sort = T) %>%
ungroup()
c <- genmarvelposneg %>%
group_by(sentiment) %>%
top_n(10) %>%
ungroup() %>%
mutate(word = reorder(word,n))
pc <- ggplot(c, aes(word, n, fill=sentiment)) + geom_col() + facet_wrap(~sentiment, scales = "free_y")+ labs(title = "generated marvel reviews") + coord_flip()
marvelposneg <- tidy_reviews %>%
filter(category == "real marvel movie reviews") %>%
inner_join(get_sentiments("bing")) %>%
count(word, sentiment, sort = T) %>%
ungroup()
d <- marvelposneg %>%
group_by(sentiment) %>%
top_n(10) %>%
ungroup() %>%
mutate(word = reorder(word,n))
pd <- ggplot(d, aes(word, n, fill=sentiment)) + geom_col() + facet_wrap(~sentiment, scales = "free_y")+ labs(title = "real marvel reviews") + coord_flip()
hotelposneg <- tidy_reviews %>%
filter(category == "real hotel reviews") %>%
inner_join(get_sentiments("bing")) %>%
count(word, sentiment, sort = T) %>%
ungroup()
e <- hotelposneg %>%
group_by(sentiment) %>%
top_n(10) %>%
ungroup() %>%
mutate(word = reorder(word,n))
pe <- ggplot(e, aes(word, n, fill=sentiment)) + geom_col() + facet_wrap(~sentiment, scales = "free_y")+ labs(title = "real hotel reviews") + coord_flip()
cellposneg <- tidy_reviews %>%
filter(category == "real cellphone & accessories reviews") %>%
inner_join(get_sentiments("bing")) %>%
count(word, sentiment, sort = T) %>%
ungroup()
f <- cellposneg %>%
group_by(sentiment) %>%
top_n(10) %>%
ungroup() %>%
mutate(word = reorder(word,n))
pf <- ggplot(f, aes(word, n, fill=sentiment)) + geom_col() + facet_wrap(~sentiment, scales = "free_y")+ labs(title = "real cellphone & accessory reviews") + coord_flip()
grid.arrange(pa,pf,pb,pe,pc,pd)
#td_idf for review types
review_words <- reviewsbycategory %>%
unnest_tokens(word,text) %>%
count(category, word, sort = T) %>%
ungroup()
review_words <- review_words %>%
bind_tf_idf(word, category, n)
review_words %>%
arrange(desc(tf_idf)) %>%
mutate(word = factor(word, levels = rev(unique(word)))) %>%
group_by(category) %>%
top_n(10) %>%
ungroup() %>%
ggplot(aes(word, tf_idf, fill = category)) +
geom_col(show.legend = F) +
labs(x = NULL, y = "tf_idf") +
facet_wrap(~category, ncol = 2, scales = "free") +
coord_flip()
gencell <- reviewsbycategory %>%
filter(category == "generated cellphone & accessory reviews") %>%
unnest_tokens(word,text) %>%
count(rating, word, sort = T) %>%
ungroup()
gencell <- gencell %>%
bind_tf_idf(word, rating, n)
gencell %>%
arrange(desc(tf_idf)) %>%
mutate(word = factor(word, levels = rev(unique(word)))) %>%
group_by(rating) %>%
top_n(6) %>%
ungroup() %>%
ggplot(aes(word, tf_idf, fill = rating)) +
geom_col(show.legend = F) +
labs(x = NULL, y = "tf_idf", label = "generated cellphone & accessory reviews") +
facet_wrap(~rating, ncol = 2, scales = "free") +
coord_flip()
realcell <- reviewsbycategory %>%
filter(category == "real cellphone & accessories reviews") %>%
unnest_tokens(word,text) %>%
count(rating, word, sort = T) %>%
ungroup()
realcell <- realcell %>%
bind_tf_idf(word, rating, n)
realcell %>%
arrange(desc(tf_idf)) %>%
mutate(word = factor(word, levels = rev(unique(word)))) %>%
group_by(rating) %>%
top_n(6) %>%
ungroup() %>%
ggplot(aes(word, tf_idf, fill = rating)) +
geom_col(show.legend = F) +
labs(x = NULL, y = "tf_idf") +
facet_wrap(~rating, ncol = 2, scales = "free") +
coord_flip()
genhotel <- reviewsbycategory %>%
filter(category == "generated hotel reviews") %>%
unnest_tokens(word,text) %>%
count(rating, word, sort = T) %>%
ungroup()
genhotel <- genhotel %>%
bind_tf_idf(word, rating, n)
genhotel %>%
arrange(desc(tf_idf)) %>%
mutate(word = factor(word, levels = rev(unique(word)))) %>%
group_by(rating) %>%
top_n(6) %>%
ungroup() %>%
ggplot(aes(word, tf_idf, fill = rating)) +
geom_col(show.legend = F) +
labs(x = NULL, y = "tf_idf") +
facet_wrap(~rating, ncol = 2, scales = "free") +
coord_flip()
realhotel <- reviewsbycategory %>%
filter(category == "real hotel reviews") %>%
unnest_tokens(word,text) %>%
count(rating, word, sort = T) %>%
ungroup()
realhotel <- realhotel %>%
bind_tf_idf(word, rating, n)
realhotel %>%
arrange(desc(tf_idf)) %>%
mutate(word = factor(word, levels = rev(unique(word)))) %>%
group_by(rating) %>%
top_n(6) %>%
ungroup() %>%
ggplot(aes(word, tf_idf, fill = rating)) +
geom_col(show.legend = F) +
labs(x = NULL, y = "tf_idf") +
facet_wrap(~rating, ncol = 2, scales = "free") +
coord_flip()
genmarvel <- reviewsbycategory %>%
filter(category == "generated marvel movie reviews") %>%
unnest_tokens(word,text) %>%
count(rating, word, sort = T) %>%
ungroup()
genmarvel <- genmarvel %>%
bind_tf_idf(word, rating, n)
genmarvel %>%
arrange(desc(tf_idf)) %>%
mutate(word = factor(word, levels = rev(unique(word)))) %>%
group_by(rating) %>%
top_n(6) %>%
ungroup() %>%
ggplot(aes(word, tf_idf, fill = rating)) +
geom_col(show.legend = F) +
labs(x = NULL, y = "tf_idf") +
facet_wrap(~rating, ncol = 2, scales = "free") +
coord_flip()
realmarvel <- reviewsbycategory %>%
filter(category == "real marvel movie reviews") %>%
unnest_tokens(word,text) %>%
count(rating, word, sort = T) %>%
ungroup()
realmarvel <- realmarvel %>%
bind_tf_idf(word, rating, n)
realmarvel %>%
arrange(desc(tf_idf)) %>%
mutate(word = factor(word, levels = rev(unique(word)))) %>%
group_by(rating) %>%
top_n(6) %>%
ungroup() %>%
ggplot(aes(word, tf_idf, fill = rating)) +
geom_col(show.legend = F) +
labs(x = NULL, y = "tf_idf") +
facet_wrap(~rating, ncol = 2, scales = "free") +
coord_flip()
#sentiment barcharts
marvelneg <- tidy_reviews %>%
filter(category == "real marvel movie reviews") %>%
filter(rating < 3) %>%
inner_join(get_sentiments("nrc")) %>%
count(sentiment, sort = TRUE) %>%
mutate(sentiment = reorder(sentiment, n)) %>%
ungroup()
marvelpos <- tidy_reviews %>%
filter(category == "real marvel movie reviews") %>%
filter(rating > 3) %>%
inner_join(get_sentiments("nrc")) %>%
count(sentiment, sort = TRUE) %>%
mutate(sentiment = reorder(sentiment, n)) %>%
ungroup()
marvelneg2 <- tidy_reviews %>%
filter(category == "generated marvel movie reviews") %>%
filter(rating < 3) %>%
inner_join(get_sentiments("nrc")) %>%
count(sentiment, sort = TRUE) %>%
mutate(sentiment = reorder(sentiment, n)) %>%
ungroup()
marvelpos2 <- tidy_reviews %>%
filter(category == "generated marvel movie reviews") %>%
filter(rating > 3) %>%
inner_join(get_sentiments("nrc")) %>%
count(sentiment, sort = TRUE) %>%
mutate(sentiment = reorder(sentiment, n)) %>%
ungroup()
hotelneg <- tidy_reviews %>%
filter(category == "real hotel reviews") %>%
filter(rating < 3) %>%
inner_join(get_sentiments("nrc")) %>%
count(sentiment, sort = TRUE) %>%
mutate(sentiment = reorder(sentiment, n)) %>%
ungroup()
hotelpos <- tidy_reviews %>%
filter(category == "real hotel reviews") %>%
filter(rating > 3) %>%
inner_join(get_sentiments("nrc")) %>%
count(sentiment, sort = TRUE) %>%
mutate(sentiment = reorder(sentiment, n)) %>%
ungroup()
hotelneg2 <- tidy_reviews %>%
filter(category == "generated hotel reviews") %>%
filter(rating < 3) %>%
inner_join(get_sentiments("nrc")) %>%
count(sentiment, sort = TRUE) %>%
mutate(sentiment = reorder(sentiment, n)) %>%
ungroup()
hotelpos2 <- tidy_reviews %>%
filter(category == "generated hotel reviews") %>%
filter(rating > 3) %>%
inner_join(get_sentiments("nrc")) %>%
count(sentiment, sort = TRUE) %>%
mutate(sentiment = reorder(sentiment, n)) %>%
ungroup()
cellneg <- tidy_reviews %>%
filter(category == "real cellphone & accessories reviews") %>%
filter(rating < 3) %>%
inner_join(get_sentiments("nrc")) %>%
count(sentiment, sort = TRUE) %>%
mutate(sentiment = reorder(sentiment, n)) %>%
ungroup()
cellpos <- tidy_reviews %>%
filter(category == "real cellphone & accessories reviews") %>%
filter(rating > 3) %>%
inner_join(get_sentiments("nrc")) %>%
count(sentiment, sort = TRUE) %>%
mutate(sentiment = reorder(sentiment, n)) %>%
ungroup()
cellneg2 <- tidy_reviews %>%
filter(category == "generated cellphone & accessory reviews") %>%
filter(rating < 3) %>%
inner_join(get_sentiments("nrc")) %>%
count(sentiment, sort = TRUE) %>%
mutate(sentiment = reorder(sentiment, n)) %>%
ungroup()
cellpos2 <- tidy_reviews %>%
filter(category == "generated cellphone & accessory reviews") %>%
filter(rating > 3) %>%
inner_join(get_sentiments("nrc")) %>%
count(sentiment, sort = TRUE) %>%
mutate(sentiment = reorder(sentiment, n)) %>%
ungroup()
pa11 <- ggplot(marvelneg, aes(sentiment, n)) + geom_col(fill = 'blue') + coord_flip() + labs(title ="real negative marvel reviews")
pa12 <- ggplot(marvelpos, aes(sentiment, n)) + geom_col(fill = 'blue') + coord_flip() + labs(title ="real positive marvel reviews")
pa21 <- ggplot(marvelneg2, aes(sentiment, n)) + geom_col(fill = 'blue') + coord_flip() + labs(title ="generated negative marvel reviews")
pa22 <- ggplot(marvelpos2, aes(sentiment, n)) + geom_col(fill = 'blue') + coord_flip() + labs(title ="generated positive marvel reviews")
pb11 <- ggplot(hotelneg, aes(sentiment, n)) + geom_col(fill = 'cyan') + coord_flip() + labs(title ="real negative hotel reviews")
pb12 <- ggplot(hotelpos, aes(sentiment, n)) + geom_col(fill = 'cyan') + coord_flip() + labs(title ="real positive hotel reviews")
pb21 <- ggplot(hotelneg2, aes(sentiment, n)) + geom_col(fill = 'cyan') + coord_flip() + labs(title ="generated negative hotel reviews")
pb22 <- ggplot(hotelpos2, aes(sentiment, n)) + geom_col(fill = 'cyan') + coord_flip() + labs(title ="generated positive hotel reviews")
pc11 <- ggplot(cellneg, aes(sentiment, n)) + geom_col(fill = 'lightblue') + coord_flip() + labs(title ="real negative cellphone & accessories reviews")
pc12 <- ggplot(cellpos, aes(sentiment, n)) + geom_col(fill = 'lightblue') + coord_flip() + labs(title ="real positive cellphone & accessories reviews")
pc21 <- ggplot(cellneg2, aes(sentiment, n)) + geom_col(fill = 'lightblue') + coord_flip() + labs(title ="generated negative cellphone & accessories reviews")
pc22 <- ggplot(cellpos2, aes(sentiment, n)) + geom_col(fill = 'lightblue') + coord_flip() + labs(title ="generated positive cellphone & accessories reviews")
grid.arrange(pa11,pa12,pa21,pa22, pb11,pb12,pb21,pb22, pc11,pc12,pc21,pc22, ncol = 4)
|
bb094b75550f761cc387cf1cc426f5bfa8e75bec | 1cd2b7d5a6af6717509c2a2ad7a6c07b707f6900 | /2021/laboratorios/LAB02/utiles/chi2test_multiples_atributos.R | b870e562fe61818ab5ade16bd21381cb7ddb372e | [] | no_license | dm-uba/dm-uba.github.io | 38531b23957d25e5295ab79f588a7ade94191b76 | 9ae84d6be651333aee5cac823e2784c8d8d6f492 | refs/heads/master | 2023-06-11T01:43:27.390649 | 2021-07-10T01:17:59 | 2021-07-10T01:17:59 | 343,927,914 | 2 | 10 | null | null | null | null | UTF-8 | R | false | false | 517 | r | chi2test_multiples_atributos.R | data = data.original
for (i in 1:(ncol(data)-1)) {
if(is.factor(data[,i])) {
cat('Es factor: ', names(data)[i] ,'.\n', sep="")
for (j in (i+1):ncol(data)) {
if(is.factor(data[,j])) {
cat('Vamos a calcular chisqtest entre: ', names(data)[i] ,' y ', names(data)[j], '.\n', sep="")
tbl_cont = table(data[, i], data[, j])
calculo = chisq.test(tbl_cont)
cat('CHI2 ', names(data)[i] ,'-', names(data)[j], ': ', calculo$statistic,'\n', sep="")
}
}
}
}
|
0fbeb3eec6e971c0fc0aebd2b7fe9da8e484bbd0 | e9f1f3b80d0978c3377997e8abed43230cb6380e | /R/listToParams.R | 1d091651e297414b217ae4623cbe0fb1a537ff84 | [
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | fcocquemas/rtumblr | d3faed61a3a4d14e0f8305a6efd86af312d4c7cf | c80cd6ed0392dfc76d9d7f4b163846bb185d4180 | refs/heads/master | 2021-01-23T04:10:05.901064 | 2012-10-11T11:44:16 | 2012-10-11T11:44:16 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 573 | r | listToParams.R | #' Collapse a single-level list to URL parameters
#'
#' @param par list of parameters
#' @return character vector of URL parameters
listToParams <- function(par) {
if(length(par) > 0) {
par <- sapply(1:length(par),
function(n) {
if(!is.null(par[[n]]))
paste(URLencode(names(par[n])),
URLencode(as.character(par[[n]])),
sep="=")
else ""
})
par <- paste(par[par!=""], collapse="&")
} else par <- ""
par
}
|
81e8cfaf71d8b9101d1266545a2a1bc5813cf4d0 | 184180d341d2928ab7c5a626d94f2a9863726c65 | /issuestests/NAM/inst/testfiles/emBL/emBL_output/log_3dfa95c4118fdc8e3ae8e28437698d9ba853e18f/emBL-test.R | 2c09d04d2ebb77fc9926fd3b97e28fe64ac8d0a9 | [] | no_license | akhikolla/RcppDeepStateTest | f102ddf03a22b0fc05e02239d53405c8977cbc2b | 97e73fe4f8cb0f8e5415f52a2474c8bc322bbbe5 | refs/heads/master | 2023-03-03T12:19:31.725234 | 2021-02-12T21:50:12 | 2021-02-12T21:50:12 | 254,214,504 | 2 | 1 | null | null | null | null | UTF-8 | R | false | false | 2,835 | r | emBL-test.R | testlist <- list(alpha = -1.86578883789805e+256, gen = structure(c(3.51717968818147e+276, 1.26217642061503e+303, 2.16951552437276e-50, 1.42775424140385e+29, 7.43166794673685e+217, 5.50634782850172e+131, 1.99823208614144e-124, 4.48183848947735e+207, 2.01264058748702e+121, 6.01202720326984e-103, 6.83795161186921e+36, 6.19740298143953e+117, 1.44343468698267e+271, 4.9776595511806e-210, 1.77107973819217e-172, 2.90009538955293e-09, 1.50698076959833e+22, 1.24055326796773e-271, 4.11735378552045e+108, 1.54268570181245e-217, 8.54031407174272e-106, 3.35699821123612e-185, 8.09536017270156e+97, 7.18289777632381e+62, 2.28396648819175e+120, 1.84995912403343e+122, 1.77647323289718e+164, 1.11207856132166e+282, 2.61231586356277e-79, 2.19228414697475e+189, 9.58130410691397e+174, 2.56065073878455e+238, 1.52597819328389e-304, 6.36911083531343e+149, 2.01597930796942e+62, 2.42333161688226e-144, 9.89003393243781e-235, 2.23406757584783e+198, 8.9255257934965e+289, 4.31010023012535e+162, 7.95127118488583e+102, 1.3627111658508e-214, 1.97337327655264e+176, 1.18812726576203e-49, 2.63637484900092e-45), .Dim = c(9L, 5L)), R2 = 2.7774092763286e-33, y = c(1.2804716371351e+219, -4.32818570044253e-193, 3.73901271875121e+180, -1.38707810757858e-109, -8.80568312893422e+208, 8.50751844833796e+300, 3.54517047891429e+74, 9.85781527708462e+297, 4.02901950584164e-213, 4.53725978353036e-207, -2.67540447246508e-61, -5.65340132739339e+106, -2.30944127835205e-275, 1.33895651737226e+157, 1.19839910555662e+70, -8.12530884577588e+94, 5.08725580053646e+39, 2.37670306966689e+261, 1.65570390396492e+142, -1.70483989134069e+131, -7.50695089820293e+30, 9.04790579466146e+187, -5.89109594719464e+275, Inf, -4.73886150149773e+68, 1.82291320377423e-301, 8.96326675977328e+237, -1.45215820387556e+162, 6.04016881422814e-200, -2.08928208127421e-227, -4.94383508425558e-216, 4.101165003194e-58, 1.28138711582668e-188, -1.48337197135534e-298, -1.67730231796253e+192, -2.4005880014623e+132, 1.94021956156373e+117, -4.55167658200535e-164, -7.29042238168763e+63, -4.16168271202441e-107, -3.61834095209576e+164, -4.93720337813108e+236, 6.59726686638396e-18, -1.50415139425896e-127, -3.38540663761598e+263, 1.24254814009178e-104, -1.31230530887456e-104, 4888098009.72424, 1.02764243247658e+88, -7.15334487901262e-211, -2.2336856411256e-162, -5.01015103114616e+118, -4.19135205241892e+252, -1.15779901598505e+220, 8.16995063736108e-202, 1.62608061924974e-72, 4.27836586556738e+95, -2.6626128138124e-39, -3.45204372204458e-237, -3.74041686676493e+66, 3.18802156019726e+205, 5.23649774365411e+54, -1.46834954780253e-125, 2.247461263049e+150, NaN, -1.75029793658937e-94, NA, -8.12530884577588e+94 ))
result <- do.call(NAM:::emBL,testlist)
str(result) |
d4662eaf303d059348fd8eb9ae36b7aa19c75750 | 7c6b5502b56e7be64f87dcb474f5f01ac88992a5 | /man/nwt.Rd | a19e8e65721c3f48aaec0dae6dd2951187481985 | [] | no_license | rxmenezes/rscreenorm | c4cc575c3c5c1f05aec3d7c14448a13452a7c310 | 88c8abca441fd8894d026a351c232ae832d5b228 | refs/heads/master | 2020-03-24T17:21:31.899634 | 2018-08-05T06:43:08 | 2018-08-05T06:43:08 | 141,115,438 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 457 | rd | nwt.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/aux_functions.R
\name{nwt}
\alias{nwt}
\title{Extracts the number of well types}
\usage{
nwt(data_rscreen)
}
\arguments{
\item{data_rscreen}{an rscreen.object}
}
\value{
A numeric value.
}
\description{
To be used internally, this function extracts the number of levels
in the well type variable within a given rscreen.object. If the object
has no well type, it will return 0.
}
|
670211c5cd6f8cf7479eec73a697971e51da23ce | 134aa45491390c876501c9d0d9930b561bd91db6 | /Problem_4/rkhinda_prob_4.R | a5d0d29e98ed1636dbaa42cb60770b2c0b912691 | [
"Apache-2.0"
] | permissive | ramanpreetSinghKhinda/CSE_587_Statistical_Analysis_Using_R | a9ba73fbaf71fb49500985dcca49db297b26e4cc | 5a0c0a354303d92c6cc49f3908e0d81a7d3ef709 | refs/heads/master | 2021-01-16T23:22:25.612730 | 2020-08-10T15:53:47 | 2020-08-10T15:53:47 | 59,958,879 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 41,519 | r | rkhinda_prob_4.R | install.packages("rjson")
library(rjson)
library(streamR)
setwd("/Users/raman/Documents/SUNY_BUFFALO/DIC_CSE_587/Project_1/Problem_4")
tweets_manhattan <- fromJSON(file = "Tweets_Manhattan.json")
df_manhattan <- data.frame(tweets_manhattan[[1]]$id_str, tweets_manhattan[[1]]$created_at,
tweets_manhattan[[1]]$text, tweets_manhattan[[1]]$favorite_count,
tweets_manhattan[[1]]$retweet_count, tweets_manhattan[[1]]$user$id_str,
tweets_manhattan[[1]]$user$verified, tweets_manhattan[[1]]$user$followers_count,
tweets_manhattan[[1]]$user$friends_count, tweets_manhattan[[1]]$user$listed_count,
tweets_manhattan[[1]]$user$statuses_count, tweets_manhattan[[1]]$user$location)
df_manhattan$data_location <- "Manhattan"
df_manhattan$sentiment_buy_home <- "Neutral"
df_manhattan$sentiment_rent_apt <- "Neutral"
colnames(df_manhattan) <- c("tweet_id","tweet_created_at","tweet_text","tweet_favorite_count","tweet_retweet_count","user_id","user_verified","user_followers_count","user_friends_count","user_listed_count","user_statuses_count","user_location","data_location","sentiment_buy_home","sentiment_rent_apt")
# sentiment for buying home
if((grepl("buy", df_manhattan[1 ,]$tweet_text, ignore.case = TRUE) | grepl("purchase", df_manhattan[1 ,]$tweet_text, ignore.case = TRUE) | grepl("acquire", df_manhattan[1 ,]$tweet_text, ignore.case = TRUE) | grepl("bought", df_manhattan[1 ,]$tweet_text, ignore.case = TRUE))
& (grepl("house", df_manhattan[1 ,]$tweet_text, ignore.case = TRUE) | grepl("housing", df_manhattan[1 ,]$tweet_text, ignore.case = TRUE) | grepl("home", df_manhattan[1 ,]$tweet_text, ignore.case = TRUE) | grepl("apartment", df_manhattan[1 ,]$tweet_text, ignore.case = TRUE) | grepl("residence", df_manhattan[1 ,]$tweet_text, ignore.case = TRUE) | grepl("suite", df_manhattan[1 ,]$tweet_text, ignore.case = TRUE) | grepl("lodge", df_manhattan[1 ,]$tweet_text, ignore.case = TRUE) | grepl("loft", df_manhattan[1 ,]$tweet_text, ignore.case = TRUE) | grepl("penthouse", df_manhattan[1 ,]$tweet_text, ignore.case = TRUE) | grepl("flat", df_manhattan[1 ,]$tweet_text, ignore.case = TRUE))
& (grepl("expensive", df_manhattan[1 ,]$tweet_text, ignore.case = TRUE) | grepl("high price", df_manhattan[1 ,]$tweet_text, ignore.case = TRUE) | grepl("can't afford", df_manhattan[1 ,]$tweet_text, ignore.case = TRUE))){
df_manhattan[1 ,]$sentiment_buy_home <- "Expensive"
} else if((grepl("buy", df_manhattan[1 ,]$tweet_text, ignore.case = TRUE) | grepl("purchase", df_manhattan[1 ,]$tweet_text, ignore.case = TRUE) | grepl("acquire", df_manhattan[1 ,]$tweet_text, ignore.case = TRUE) | grepl("bought", df_manhattan[1 ,]$tweet_text, ignore.case = TRUE))
& (grepl("house", df_manhattan[1 ,]$tweet_text, ignore.case = TRUE) | grepl("housing", df_manhattan[1 ,]$tweet_text, ignore.case = TRUE) | grepl("home", df_manhattan[1 ,]$tweet_text, ignore.case = TRUE) | grepl("apartment", df_manhattan[1 ,]$tweet_text, ignore.case = TRUE) | grepl("residence", df_manhattan[1 ,]$tweet_text, ignore.case = TRUE) | grepl("suite", df_manhattan[1 ,]$tweet_text, ignore.case = TRUE) | grepl("lodge", df_manhattan[1 ,]$tweet_text, ignore.case = TRUE) | grepl("loft", df_manhattan[1 ,]$tweet_text, ignore.case = TRUE) | grepl("penthouse", df_manhattan[1 ,]$tweet_text, ignore.case = TRUE) | grepl("flat", df_manhattan[1 ,]$tweet_text, ignore.case = TRUE))){
df_manhattan[1 ,]$sentiment_buy_home <- "Affordable"
}
# sentiment for renting home
if((grepl("rent", df_manhattan[1 ,]$tweet_text, ignore.case = TRUE) | grepl("rental", df_manhattan[1 ,]$tweet_text, ignore.case = TRUE) | grepl("lease", df_manhattan[1 ,]$tweet_text, ignore.case = TRUE) | grepl("charter", df_manhattan[1 ,]$tweet_text, ignore.case = TRUE) | grepl("hire", df_manhattan[1 ,]$tweet_text, ignore.case = TRUE) | grepl("shelter", df_manhattan[1 ,]$tweet_text, ignore.case = TRUE) | grepl("move", df_manhattan[1 ,]$tweet_text, ignore.case = TRUE) | grepl("relocate", df_manhattan[1 ,]$tweet_text, ignore.case = TRUE) | grepl("shift", df_manhattan[1 ,]$tweet_text, ignore.case = TRUE) | grepl("transfer", df_manhattan[1 ,]$tweet_text, ignore.case = TRUE) | grepl("live", df_manhattan[1 ,]$tweet_text, ignore.case = TRUE) | grepl("living", df_manhattan[1 ,]$tweet_text, ignore.case = TRUE))
| (grepl("house", df_manhattan[1 ,]$tweet_text, ignore.case = TRUE) | grepl("housing", df_manhattan[1 ,]$tweet_text, ignore.case = TRUE) | grepl("home", df_manhattan[1 ,]$tweet_text, ignore.case = TRUE) | grepl("apartment", df_manhattan[1 ,]$tweet_text, ignore.case = TRUE) | grepl("residence", df_manhattan[1 ,]$tweet_text, ignore.case = TRUE) | grepl("suite", df_manhattan[1 ,]$tweet_text, ignore.case = TRUE) | grepl("lodge", df_manhattan[1 ,]$tweet_text, ignore.case = TRUE) | grepl("loft", df_manhattan[1 ,]$tweet_text, ignore.case = TRUE) | grepl("penthouse", df_manhattan[1 ,]$tweet_text, ignore.case = TRUE) | grepl("flat", df_manhattan[1 ,]$tweet_text, ignore.case = TRUE))
& (grepl("expensive", df_manhattan[1 ,]$tweet_text, ignore.case = TRUE) | grepl("high price", df_manhattan[1 ,]$tweet_text, ignore.case = TRUE) | grepl("can't afford", df_manhattan[1 ,]$tweet_text, ignore.case = TRUE))){
df_manhattan[1 ,]$sentiment_rent_apt <- "Expensive"
} else if((grepl("rent", df_manhattan[1 ,]$tweet_text, ignore.case = TRUE) | grepl("rental", df_manhattan[1 ,]$tweet_text, ignore.case = TRUE) | grepl("lease", df_manhattan[1 ,]$tweet_text, ignore.case = TRUE) | grepl("charter", df_manhattan[1 ,]$tweet_text, ignore.case = TRUE) | grepl("hire", df_manhattan[1 ,]$tweet_text, ignore.case = TRUE) | grepl("shelter", df_manhattan[1 ,]$tweet_text, ignore.case = TRUE) | grepl("move", df_manhattan[1 ,]$tweet_text, ignore.case = TRUE) | grepl("relocate", df_manhattan[1 ,]$tweet_text, ignore.case = TRUE) | grepl("shift", df_manhattan[1 ,]$tweet_text, ignore.case = TRUE) | grepl("transfer", df_manhattan[1 ,]$tweet_text, ignore.case = TRUE) | grepl("live", df_manhattan[1 ,]$tweet_text, ignore.case = TRUE) | grepl("living", df_manhattan[1 ,]$tweet_text, ignore.case = TRUE))
| (grepl("house", df_manhattan[1 ,]$tweet_text, ignore.case = TRUE) | grepl("housing", df_manhattan[1 ,]$tweet_text, ignore.case = TRUE) | grepl("home", df_manhattan[1 ,]$tweet_text, ignore.case = TRUE) | grepl("apartment", df_manhattan[1 ,]$tweet_text, ignore.case = TRUE) | grepl("residence", df_manhattan[1 ,]$tweet_text, ignore.case = TRUE) | grepl("suite", df_manhattan[1 ,]$tweet_text, ignore.case = TRUE) | grepl("lodge", df_manhattan[1 ,]$tweet_text, ignore.case = TRUE) | grepl("loft", df_manhattan[1 ,]$tweet_text, ignore.case = TRUE) | grepl("penthouse", df_manhattan[1 ,]$tweet_text, ignore.case = TRUE) | grepl("flat", df_manhattan[1 ,]$tweet_text, ignore.case = TRUE))){
df_manhattan[1 ,]$sentiment_rent_apt <- "Affordable"
}
count_manhattan_tweets <- length(tweets_manhattan)
print(paste("Total no. of tweets collected from Manhattan : ", count_manhattan_tweets,sep=""))
x <- 2
repeat {
print(paste("Creating Data Frame for Manhattan Tweets Data. Collected ", x," Tweets" ,sep=""))
temp_df_manhattan <- data.frame(tweets_manhattan[[x]]$id_str, tweets_manhattan[[x]]$created_at, tweets_manhattan[[x]]$text, tweets_manhattan[[x]]$favorite_count, tweets_manhattan[[x]]$retweet_count, tweets_manhattan[[x]]$user$id_str, tweets_manhattan[[x]]$user$verified, tweets_manhattan[[x]]$user$followers_count, tweets_manhattan[[x]]$user$friends_count, tweets_manhattan[[x]]$user$listed_count, tweets_manhattan[[x]]$user$statuses_count, tweets_manhattan[[x]]$user$location)
temp_df_manhattan$data_location <- "Manhattan"
temp_df_manhattan$sentiment_buy_home <- "Neutral"
temp_df_manhattan$sentiment_rent_apt <- "Neutral"
colnames(temp_df_manhattan) <- c("tweet_id","tweet_created_at","tweet_text","tweet_favorite_count","tweet_retweet_count","user_id","user_verified","user_followers_count","user_friends_count","user_listed_count","user_statuses_count","user_location","data_location","sentiment_buy_home","sentiment_rent_apt")
if((grepl("buy", temp_df_manhattan$tweet_text, ignore.case = TRUE) | grepl("purchase", temp_df_manhattan$tweet_text, ignore.case = TRUE) | grepl("acquire", temp_df_manhattan$tweet_text, ignore.case = TRUE) | grepl("bought", temp_df_manhattan$tweet_text, ignore.case = TRUE))
& (grepl("house", temp_df_manhattan$tweet_text, ignore.case = TRUE) | grepl("housing", temp_df_manhattan$tweet_text, ignore.case = TRUE) | grepl("home", temp_df_manhattan$tweet_text, ignore.case = TRUE) | grepl("apartment", temp_df_manhattan$tweet_text, ignore.case = TRUE) | grepl("residence", temp_df_manhattan$tweet_text, ignore.case = TRUE) | grepl("suite", temp_df_manhattan$tweet_text, ignore.case = TRUE) | grepl("lodge", temp_df_manhattan$tweet_text, ignore.case = TRUE) | grepl("loft", temp_df_manhattan$tweet_text, ignore.case = TRUE) | grepl("penthouse", temp_df_manhattan$tweet_text, ignore.case = TRUE) | grepl("flat", temp_df_manhattan$tweet_text, ignore.case = TRUE))
& (grepl("expensive", temp_df_manhattan$tweet_text, ignore.case = TRUE) | grepl("high price", temp_df_manhattan$tweet_text, ignore.case = TRUE) | grepl("can't afford", temp_df_manhattan$tweet_text, ignore.case = TRUE))){
temp_df_manhattan$sentiment_buy_home <- "Expensive"
} else if((grepl("buy", temp_df_manhattan$tweet_text, ignore.case = TRUE) | grepl("purchase", temp_df_manhattan$tweet_text, ignore.case = TRUE) | grepl("acquire", temp_df_manhattan$tweet_text, ignore.case = TRUE) | grepl("bought", temp_df_manhattan$tweet_text, ignore.case = TRUE))
& (grepl("house", temp_df_manhattan$tweet_text, ignore.case = TRUE) | grepl("housing", temp_df_manhattan$tweet_text, ignore.case = TRUE) | grepl("home", temp_df_manhattan$tweet_text, ignore.case = TRUE) | grepl("apartment", temp_df_manhattan$tweet_text, ignore.case = TRUE) | grepl("residence", temp_df_manhattan$tweet_text, ignore.case = TRUE) | grepl("suite", temp_df_manhattan$tweet_text, ignore.case = TRUE) | grepl("lodge", temp_df_manhattan$tweet_text, ignore.case = TRUE) | grepl("loft", temp_df_manhattan$tweet_text, ignore.case = TRUE) | grepl("penthouse", temp_df_manhattan$tweet_text, ignore.case = TRUE) | grepl("flat", temp_df_manhattan$tweet_text, ignore.case = TRUE))){
temp_df_manhattan$sentiment_buy_home <- "Affordable"
}
# sentiment for renting home
if((grepl("rent", temp_df_manhattan$tweet_text, ignore.case = TRUE) | grepl("rental", temp_df_manhattan$tweet_text, ignore.case = TRUE) | grepl("lease", temp_df_manhattan$tweet_text, ignore.case = TRUE) | grepl("charter", temp_df_manhattan$tweet_text, ignore.case = TRUE) | grepl("hire", temp_df_manhattan$tweet_text, ignore.case = TRUE) | grepl("shelter", temp_df_manhattan$tweet_text, ignore.case = TRUE) | grepl("move", temp_df_manhattan$tweet_text, ignore.case = TRUE) | grepl("relocate", temp_df_manhattan$tweet_text, ignore.case = TRUE) | grepl("shift", temp_df_manhattan$tweet_text, ignore.case = TRUE) | grepl("transfer", temp_df_manhattan$tweet_text, ignore.case = TRUE) | grepl("live", temp_df_manhattan$tweet_text, ignore.case = TRUE) | grepl("living", temp_df_manhattan$tweet_text, ignore.case = TRUE))
| (grepl("house", temp_df_manhattan$tweet_text, ignore.case = TRUE) | grepl("housing", temp_df_manhattan$tweet_text, ignore.case = TRUE) | grepl("home", temp_df_manhattan$tweet_text, ignore.case = TRUE) | grepl("apartment", temp_df_manhattan$tweet_text, ignore.case = TRUE) | grepl("residence", temp_df_manhattan$tweet_text, ignore.case = TRUE) | grepl("suite", temp_df_manhattan$tweet_text, ignore.case = TRUE) | grepl("lodge", temp_df_manhattan$tweet_text, ignore.case = TRUE) | grepl("loft", temp_df_manhattan$tweet_text, ignore.case = TRUE) | grepl("penthouse", temp_df_manhattan$tweet_text, ignore.case = TRUE) | grepl("flat", temp_df_manhattan$tweet_text, ignore.case = TRUE))
& (grepl("expensive", temp_df_manhattan$tweet_text, ignore.case = TRUE) | grepl("high price", temp_df_manhattan$tweet_text, ignore.case = TRUE) | grepl("can't afford", temp_df_manhattan$tweet_text, ignore.case = TRUE))){
temp_df_manhattan$sentiment_rent_apt <- "Expensive"
} else if((grepl("rent", temp_df_manhattan$tweet_text, ignore.case = TRUE) | grepl("rental", temp_df_manhattan$tweet_text, ignore.case = TRUE) | grepl("lease", temp_df_manhattan$tweet_text, ignore.case = TRUE) | grepl("charter", temp_df_manhattan$tweet_text, ignore.case = TRUE) | grepl("hire", temp_df_manhattan$tweet_text, ignore.case = TRUE) | grepl("shelter", temp_df_manhattan$tweet_text, ignore.case = TRUE) | grepl("move", temp_df_manhattan$tweet_text, ignore.case = TRUE) | grepl("relocate", temp_df_manhattan$tweet_text, ignore.case = TRUE) | grepl("shift", temp_df_manhattan$tweet_text, ignore.case = TRUE) | grepl("transfer", temp_df_manhattan$tweet_text, ignore.case = TRUE) | grepl("live", temp_df_manhattan$tweet_text, ignore.case = TRUE) | grepl("living", temp_df_manhattan$tweet_text, ignore.case = TRUE))
| (grepl("house", temp_df_manhattan$tweet_text, ignore.case = TRUE) | grepl("housing", temp_df_manhattan$tweet_text, ignore.case = TRUE) | grepl("home", temp_df_manhattan$tweet_text, ignore.case = TRUE) | grepl("apartment", temp_df_manhattan$tweet_text, ignore.case = TRUE) | grepl("residence", temp_df_manhattan$tweet_text, ignore.case = TRUE) | grepl("suite", temp_df_manhattan$tweet_text, ignore.case = TRUE) | grepl("lodge", temp_df_manhattan$tweet_text, ignore.case = TRUE) | grepl("loft", temp_df_manhattan$tweet_text, ignore.case = TRUE) | grepl("penthouse", temp_df_manhattan$tweet_text, ignore.case = TRUE) | grepl("flat", temp_df_manhattan$tweet_text, ignore.case = TRUE))){
temp_df_manhattan$sentiment_rent_apt <- "Affordable"
}
df_manhattan <- rbind(df_manhattan, temp_df_manhattan)
x = x+1
if (x > count_manhattan_tweets){
break
}
}
df_manhattan = unique(df_manhattan)
nrow(df_manhattan)
head(df_manhattan)
df_manhattan.sentiment_buy_home <- data.frame(Sentiment = "Expensive", Frequency = length(which(df_manhattan$sentiment_buy_home=="Expensive")))
df_manhattan.sentiment_buy_home <- rbind(df_manhattan.sentiment_buy_home, data.frame(Sentiment = "Affordable", Frequency = length(which(df_manhattan$sentiment_buy_home=="Affordable"))))
df_manhattan.sentiment_buy_home <- rbind(df_manhattan.sentiment_buy_home, data.frame(Sentiment = "Neutral", Frequency = length(which(df_manhattan$sentiment_buy_home=="Neutral"))))
df_manhattan.sentiment_buy_home
ggplot(data=df_manhattan.sentiment_buy_home,aes(x=df_manhattan.sentiment_buy_home$Sentiment,y=df_manhattan.sentiment_buy_home$Frequency,fill=df_manhattan.sentiment_buy_home$Sentiment)) + geom_bar(stat = "identity")
df_manhattan.sentiment_rent_apt <- data.frame(Sentiment = "Expensive", Frequency = length(which(df_manhattan$sentiment_rent_apt=="Expensive")))
df_manhattan.sentiment_rent_apt <- rbind(df_manhattan.sentiment_rent_apt, data.frame(Sentiment = "Affordable", Frequency = length(which(df_manhattan$sentiment_rent_apt=="Affordable"))))
df_manhattan.sentiment_rent_apt <- rbind(df_manhattan.sentiment_rent_apt, data.frame(Sentiment = "Neutral", Frequency = length(which(df_manhattan$sentiment_rent_home=="Neutral"))))
df_manhattan.sentiment_rent_apt
ggplot(data=df_manhattan.sentiment_rent_apt,aes(x=df_manhattan.sentiment_rent_apt$Sentiment,y=df_manhattan.sentiment_rent_apt$Frequency,fill=df_manhattan.sentiment_rent_apt$Sentiment)) + geom_bar(stat = "identity")
df_tweet_collection <- df_manhattan
# -----------------------------------
# Collecting and Cleaning tweets for brooklyn
tweets_brooklyn <- fromJSON(file = "Tweets_Brooklyn.json")
count_brooklyn_tweets <- length(tweets_brooklyn)
print(paste("Total no. of tweets collected from brooklyn : ", count_brooklyn_tweets,sep=""))
df_brooklyn <- data.frame(tweet_id = 1,tweet_created_at=1,tweet_text=1,tweet_favorite_count=1,tweet_retweet_count=1,user_id=1,user_verified=1,user_followers_count=1,user_friends_count=1,user_listed_count=1,user_statuses_count=1,user_location=1,data_location=1,sentiment_buy_home=1,sentiment_rent_apt=1)
x <- 1
repeat {
print(paste("Creating Data Frame for brooklyn Tweets Data. Collected ", x," Tweets" ,sep=""))
temp_df_brooklyn <- data.frame(tweets_brooklyn[[x]]$id_str, tweets_brooklyn[[x]]$created_at, tweets_brooklyn[[x]]$text, tweets_brooklyn[[x]]$favorite_count, tweets_brooklyn[[x]]$retweet_count, tweets_brooklyn[[x]]$user$id_str, tweets_brooklyn[[x]]$user$verified, tweets_brooklyn[[x]]$user$followers_count, tweets_brooklyn[[x]]$user$friends_count, tweets_brooklyn[[x]]$user$listed_count, tweets_brooklyn[[x]]$user$statuses_count, tweets_brooklyn[[x]]$user$location)
temp_df_brooklyn$data_location <- "brooklyn"
temp_df_brooklyn$sentiment_buy_home <- "Neutral"
temp_df_brooklyn$sentiment_rent_apt <- "Neutral"
colnames(temp_df_brooklyn) <- c("tweet_id","tweet_created_at","tweet_text","tweet_favorite_count","tweet_retweet_count","user_id","user_verified","user_followers_count","user_friends_count","user_listed_count","user_statuses_count","user_location","data_location","sentiment_buy_home","sentiment_rent_apt")
if((grepl("buy", temp_df_brooklyn$tweet_text, ignore.case = TRUE) | grepl("purchase", temp_df_brooklyn$tweet_text, ignore.case = TRUE) | grepl("acquire", temp_df_brooklyn$tweet_text, ignore.case = TRUE) | grepl("bought", temp_df_brooklyn$tweet_text, ignore.case = TRUE))
& (grepl("house", temp_df_brooklyn$tweet_text, ignore.case = TRUE) | grepl("housing", temp_df_brooklyn$tweet_text, ignore.case = TRUE) | grepl("home", temp_df_brooklyn$tweet_text, ignore.case = TRUE) | grepl("apartment", temp_df_brooklyn$tweet_text, ignore.case = TRUE) | grepl("residence", temp_df_brooklyn$tweet_text, ignore.case = TRUE) | grepl("suite", temp_df_brooklyn$tweet_text, ignore.case = TRUE) | grepl("lodge", temp_df_brooklyn$tweet_text, ignore.case = TRUE) | grepl("loft", temp_df_brooklyn$tweet_text, ignore.case = TRUE) | grepl("penthouse", temp_df_brooklyn$tweet_text, ignore.case = TRUE) | grepl("flat", temp_df_brooklyn$tweet_text, ignore.case = TRUE))
& (grepl("expensive", temp_df_brooklyn$tweet_text, ignore.case = TRUE) | grepl("high price", temp_df_brooklyn$tweet_text, ignore.case = TRUE) | grepl("can't afford", temp_df_brooklyn$tweet_text, ignore.case = TRUE))){
temp_df_brooklyn$sentiment_buy_home <- "Expensive"
} else if((grepl("buy", temp_df_brooklyn$tweet_text, ignore.case = TRUE) | grepl("purchase", temp_df_brooklyn$tweet_text, ignore.case = TRUE) | grepl("acquire", temp_df_brooklyn$tweet_text, ignore.case = TRUE) | grepl("bought", temp_df_brooklyn$tweet_text, ignore.case = TRUE))
& (grepl("house", temp_df_brooklyn$tweet_text, ignore.case = TRUE) | grepl("housing", temp_df_brooklyn$tweet_text, ignore.case = TRUE) | grepl("home", temp_df_brooklyn$tweet_text, ignore.case = TRUE) | grepl("apartment", temp_df_brooklyn$tweet_text, ignore.case = TRUE) | grepl("residence", temp_df_brooklyn$tweet_text, ignore.case = TRUE) | grepl("suite", temp_df_brooklyn$tweet_text, ignore.case = TRUE) | grepl("lodge", temp_df_brooklyn$tweet_text, ignore.case = TRUE) | grepl("loft", temp_df_brooklyn$tweet_text, ignore.case = TRUE) | grepl("penthouse", temp_df_brooklyn$tweet_text, ignore.case = TRUE) | grepl("flat", temp_df_brooklyn$tweet_text, ignore.case = TRUE))){
temp_df_brooklyn$sentiment_buy_home <- "Affordable"
}
# sentiment for renting home
if((grepl("rent", temp_df_brooklyn$tweet_text, ignore.case = TRUE) | grepl("rental", temp_df_brooklyn$tweet_text, ignore.case = TRUE) | grepl("lease", temp_df_brooklyn$tweet_text, ignore.case = TRUE) | grepl("charter", temp_df_brooklyn$tweet_text, ignore.case = TRUE) | grepl("hire", temp_df_brooklyn$tweet_text, ignore.case = TRUE) | grepl("shelter", temp_df_brooklyn$tweet_text, ignore.case = TRUE) | grepl("move", temp_df_brooklyn$tweet_text, ignore.case = TRUE) | grepl("relocate", temp_df_brooklyn$tweet_text, ignore.case = TRUE) | grepl("shift", temp_df_brooklyn$tweet_text, ignore.case = TRUE) | grepl("transfer", temp_df_brooklyn$tweet_text, ignore.case = TRUE) | grepl("live", temp_df_brooklyn$tweet_text, ignore.case = TRUE) | grepl("living", temp_df_brooklyn$tweet_text, ignore.case = TRUE))
| (grepl("house", temp_df_brooklyn$tweet_text, ignore.case = TRUE) | grepl("housing", temp_df_brooklyn$tweet_text, ignore.case = TRUE) | grepl("home", temp_df_brooklyn$tweet_text, ignore.case = TRUE) | grepl("apartment", temp_df_brooklyn$tweet_text, ignore.case = TRUE) | grepl("residence", temp_df_brooklyn$tweet_text, ignore.case = TRUE) | grepl("suite", temp_df_brooklyn$tweet_text, ignore.case = TRUE) | grepl("lodge", temp_df_brooklyn$tweet_text, ignore.case = TRUE) | grepl("loft", temp_df_brooklyn$tweet_text, ignore.case = TRUE) | grepl("penthouse", temp_df_brooklyn$tweet_text, ignore.case = TRUE) | grepl("flat", temp_df_brooklyn$tweet_text, ignore.case = TRUE))
& (grepl("expensive", temp_df_brooklyn$tweet_text, ignore.case = TRUE) | grepl("high price", temp_df_brooklyn$tweet_text, ignore.case = TRUE) | grepl("can't afford", temp_df_brooklyn$tweet_text, ignore.case = TRUE))){
temp_df_brooklyn$sentiment_rent_apt <- "Expensive"
} else if((grepl("rent", temp_df_brooklyn$tweet_text, ignore.case = TRUE) | grepl("rental", temp_df_brooklyn$tweet_text, ignore.case = TRUE) | grepl("lease", temp_df_brooklyn$tweet_text, ignore.case = TRUE) | grepl("charter", temp_df_brooklyn$tweet_text, ignore.case = TRUE) | grepl("hire", temp_df_brooklyn$tweet_text, ignore.case = TRUE) | grepl("shelter", temp_df_brooklyn$tweet_text, ignore.case = TRUE) | grepl("move", temp_df_brooklyn$tweet_text, ignore.case = TRUE) | grepl("relocate", temp_df_brooklyn$tweet_text, ignore.case = TRUE) | grepl("shift", temp_df_brooklyn$tweet_text, ignore.case = TRUE) | grepl("transfer", temp_df_brooklyn$tweet_text, ignore.case = TRUE) | grepl("live", temp_df_brooklyn$tweet_text, ignore.case = TRUE) | grepl("living", temp_df_brooklyn$tweet_text, ignore.case = TRUE))
| (grepl("house", temp_df_brooklyn$tweet_text, ignore.case = TRUE) | grepl("housing", temp_df_brooklyn$tweet_text, ignore.case = TRUE) | grepl("home", temp_df_brooklyn$tweet_text, ignore.case = TRUE) | grepl("apartment", temp_df_brooklyn$tweet_text, ignore.case = TRUE) | grepl("residence", temp_df_brooklyn$tweet_text, ignore.case = TRUE) | grepl("suite", temp_df_brooklyn$tweet_text, ignore.case = TRUE) | grepl("lodge", temp_df_brooklyn$tweet_text, ignore.case = TRUE) | grepl("loft", temp_df_brooklyn$tweet_text, ignore.case = TRUE) | grepl("penthouse", temp_df_brooklyn$tweet_text, ignore.case = TRUE) | grepl("flat", temp_df_brooklyn$tweet_text, ignore.case = TRUE))){
temp_df_brooklyn$sentiment_rent_apt <- "Affordable"
}
df_brooklyn <- rbind(df_brooklyn, temp_df_brooklyn)
x = x+1
if (x > count_brooklyn_tweets){
break
}
}
df_brooklyn <- df_brooklyn[-1,]
df_brooklyn = unique(df_brooklyn)
nrow(df_brooklyn)
head(df_brooklyn)
# -----------------------------------
# Collecting and Cleaning tweets for bronx
tweets_bronx <- fromJSON(file = "Tweets_Bronx.json")
df_bronx <- data.frame(tweet_id = 1,tweet_created_at=1,tweet_text=1,tweet_favorite_count=1,tweet_retweet_count=1,user_id=1,user_verified=1,user_followers_count=1,user_friends_count=1,user_listed_count=1,user_statuses_count=1,user_location=1,data_location=1,sentiment_buy_home=1,sentiment_rent_apt=1)
count_bronx_tweets <- length(tweets_bronx)
print(paste("Total no. of tweets collected from bronx : ", count_bronx_tweets,sep=""))
x <- 1
repeat {
print(paste("Creating Data Frame for bronx Tweets Data. Collected ", x," Tweets" ,sep=""))
temp_df_bronx <- data.frame(tweets_bronx[[x]]$id_str, tweets_bronx[[x]]$created_at, tweets_bronx[[x]]$text, tweets_bronx[[x]]$favorite_count, tweets_bronx[[x]]$retweet_count, tweets_bronx[[x]]$user$id_str, tweets_bronx[[x]]$user$verified, tweets_bronx[[x]]$user$followers_count, tweets_bronx[[x]]$user$friends_count, tweets_bronx[[x]]$user$listed_count, tweets_bronx[[x]]$user$statuses_count, tweets_bronx[[x]]$user$location)
temp_df_bronx$data_location <- "bronx"
temp_df_bronx$sentiment_buy_home <- "Neutral"
temp_df_bronx$sentiment_rent_apt <- "Neutral"
colnames(temp_df_bronx) <- c("tweet_id","tweet_created_at","tweet_text","tweet_favorite_count","tweet_retweet_count","user_id","user_verified","user_followers_count","user_friends_count","user_listed_count","user_statuses_count","user_location","data_location","sentiment_buy_home","sentiment_rent_apt")
if((grepl("buy", temp_df_bronx$tweet_text, ignore.case = TRUE) | grepl("purchase", temp_df_bronx$tweet_text, ignore.case = TRUE) | grepl("acquire", temp_df_bronx$tweet_text, ignore.case = TRUE) | grepl("bought", temp_df_bronx$tweet_text, ignore.case = TRUE))
& (grepl("house", temp_df_bronx$tweet_text, ignore.case = TRUE) | grepl("housing", temp_df_bronx$tweet_text, ignore.case = TRUE) | grepl("home", temp_df_bronx$tweet_text, ignore.case = TRUE) | grepl("apartment", temp_df_bronx$tweet_text, ignore.case = TRUE) | grepl("residence", temp_df_bronx$tweet_text, ignore.case = TRUE) | grepl("suite", temp_df_bronx$tweet_text, ignore.case = TRUE) | grepl("lodge", temp_df_bronx$tweet_text, ignore.case = TRUE) | grepl("loft", temp_df_bronx$tweet_text, ignore.case = TRUE) | grepl("penthouse", temp_df_bronx$tweet_text, ignore.case = TRUE) | grepl("flat", temp_df_bronx$tweet_text, ignore.case = TRUE))
& (grepl("expensive", temp_df_bronx$tweet_text, ignore.case = TRUE) | grepl("high price", temp_df_bronx$tweet_text, ignore.case = TRUE) | grepl("can't afford", temp_df_bronx$tweet_text, ignore.case = TRUE))){
temp_df_bronx$sentiment_buy_home <- "Expensive"
} else if((grepl("buy", temp_df_bronx$tweet_text, ignore.case = TRUE) | grepl("purchase", temp_df_bronx$tweet_text, ignore.case = TRUE) | grepl("acquire", temp_df_bronx$tweet_text, ignore.case = TRUE) | grepl("bought", temp_df_bronx$tweet_text, ignore.case = TRUE))
& (grepl("house", temp_df_bronx$tweet_text, ignore.case = TRUE) | grepl("housing", temp_df_bronx$tweet_text, ignore.case = TRUE) | grepl("home", temp_df_bronx$tweet_text, ignore.case = TRUE) | grepl("apartment", temp_df_bronx$tweet_text, ignore.case = TRUE) | grepl("residence", temp_df_bronx$tweet_text, ignore.case = TRUE) | grepl("suite", temp_df_bronx$tweet_text, ignore.case = TRUE) | grepl("lodge", temp_df_bronx$tweet_text, ignore.case = TRUE) | grepl("loft", temp_df_bronx$tweet_text, ignore.case = TRUE) | grepl("penthouse", temp_df_bronx$tweet_text, ignore.case = TRUE) | grepl("flat", temp_df_bronx$tweet_text, ignore.case = TRUE))){
temp_df_bronx$sentiment_buy_home <- "Affordable"
}
# sentiment for renting home
if((grepl("rent", temp_df_bronx$tweet_text, ignore.case = TRUE) | grepl("rental", temp_df_bronx$tweet_text, ignore.case = TRUE) | grepl("lease", temp_df_bronx$tweet_text, ignore.case = TRUE) | grepl("charter", temp_df_bronx$tweet_text, ignore.case = TRUE) | grepl("hire", temp_df_bronx$tweet_text, ignore.case = TRUE) | grepl("shelter", temp_df_bronx$tweet_text, ignore.case = TRUE) | grepl("move", temp_df_bronx$tweet_text, ignore.case = TRUE) | grepl("relocate", temp_df_bronx$tweet_text, ignore.case = TRUE) | grepl("shift", temp_df_bronx$tweet_text, ignore.case = TRUE) | grepl("transfer", temp_df_bronx$tweet_text, ignore.case = TRUE) | grepl("live", temp_df_bronx$tweet_text, ignore.case = TRUE) | grepl("living", temp_df_bronx$tweet_text, ignore.case = TRUE))
| (grepl("house", temp_df_bronx$tweet_text, ignore.case = TRUE) | grepl("housing", temp_df_bronx$tweet_text, ignore.case = TRUE) | grepl("home", temp_df_bronx$tweet_text, ignore.case = TRUE) | grepl("apartment", temp_df_bronx$tweet_text, ignore.case = TRUE) | grepl("residence", temp_df_bronx$tweet_text, ignore.case = TRUE) | grepl("suite", temp_df_bronx$tweet_text, ignore.case = TRUE) | grepl("lodge", temp_df_bronx$tweet_text, ignore.case = TRUE) | grepl("loft", temp_df_bronx$tweet_text, ignore.case = TRUE) | grepl("penthouse", temp_df_bronx$tweet_text, ignore.case = TRUE) | grepl("flat", temp_df_bronx$tweet_text, ignore.case = TRUE))
& (grepl("expensive", temp_df_bronx$tweet_text, ignore.case = TRUE) | grepl("high price", temp_df_bronx$tweet_text, ignore.case = TRUE) | grepl("can't afford", temp_df_bronx$tweet_text, ignore.case = TRUE))){
temp_df_bronx$sentiment_rent_apt <- "Expensive"
} else if((grepl("rent", temp_df_bronx$tweet_text, ignore.case = TRUE) | grepl("rental", temp_df_bronx$tweet_text, ignore.case = TRUE) | grepl("lease", temp_df_bronx$tweet_text, ignore.case = TRUE) | grepl("charter", temp_df_bronx$tweet_text, ignore.case = TRUE) | grepl("hire", temp_df_bronx$tweet_text, ignore.case = TRUE) | grepl("shelter", temp_df_bronx$tweet_text, ignore.case = TRUE) | grepl("move", temp_df_bronx$tweet_text, ignore.case = TRUE) | grepl("relocate", temp_df_bronx$tweet_text, ignore.case = TRUE) | grepl("shift", temp_df_bronx$tweet_text, ignore.case = TRUE) | grepl("transfer", temp_df_bronx$tweet_text, ignore.case = TRUE) | grepl("live", temp_df_bronx$tweet_text, ignore.case = TRUE) | grepl("living", temp_df_bronx$tweet_text, ignore.case = TRUE))
| (grepl("house", temp_df_bronx$tweet_text, ignore.case = TRUE) | grepl("housing", temp_df_bronx$tweet_text, ignore.case = TRUE) | grepl("home", temp_df_bronx$tweet_text, ignore.case = TRUE) | grepl("apartment", temp_df_bronx$tweet_text, ignore.case = TRUE) | grepl("residence", temp_df_bronx$tweet_text, ignore.case = TRUE) | grepl("suite", temp_df_bronx$tweet_text, ignore.case = TRUE) | grepl("lodge", temp_df_bronx$tweet_text, ignore.case = TRUE) | grepl("loft", temp_df_bronx$tweet_text, ignore.case = TRUE) | grepl("penthouse", temp_df_bronx$tweet_text, ignore.case = TRUE) | grepl("flat", temp_df_bronx$tweet_text, ignore.case = TRUE))){
temp_df_bronx$sentiment_rent_apt <- "Affordable"
}
df_bronx <- rbind(df_bronx, temp_df_bronx)
x = x+1
if (x > count_bronx_tweets){
break
}
}
df_bronx <- df_bronx[-1,]
df_bronx = unique(df_bronx)
nrow(df_bronx)
head(df_bronx)
# -----------------------------------
# Collecting and Cleaning tweets for Staten Island
tweets_statenisland <- fromJSON(file = "Tweets_StatenIsland.json")
df_statenisland <- data.frame(tweet_id = 1,tweet_created_at=1,tweet_text=1,tweet_favorite_count=1,tweet_retweet_count=1,user_id=1,user_verified=1,user_followers_count=1,user_friends_count=1,user_listed_count=1,user_statuses_count=1,user_location=1,data_location=1,sentiment_buy_home=1,sentiment_rent_apt=1)
count_statenisland_tweets <- length(tweets_statenisland)
print(paste("Total no. of tweets collected from statenisland : ", count_statenisland_tweets,sep=""))
x <- 1
repeat {
print(paste("Creating Data Frame for statenisland Tweets Data. Collected ", x," Tweets" ,sep=""))
temp_df_statenisland <- data.frame(tweets_statenisland[[x]]$id_str, tweets_statenisland[[x]]$created_at, tweets_statenisland[[x]]$text, tweets_statenisland[[x]]$favorite_count, tweets_statenisland[[x]]$retweet_count, tweets_statenisland[[x]]$user$id_str, tweets_statenisland[[x]]$user$verified, tweets_statenisland[[x]]$user$followers_count, tweets_statenisland[[x]]$user$friends_count, tweets_statenisland[[x]]$user$listed_count, tweets_statenisland[[x]]$user$statuses_count, tweets_statenisland[[x]]$user$location)
temp_df_statenisland$data_location <- "statenisland"
temp_df_statenisland$sentiment_buy_home <- "Neutral"
temp_df_statenisland$sentiment_rent_apt <- "Neutral"
colnames(temp_df_statenisland) <- c("tweet_id","tweet_created_at","tweet_text","tweet_favorite_count","tweet_retweet_count","user_id","user_verified","user_followers_count","user_friends_count","user_listed_count","user_statuses_count","user_location","data_location","sentiment_buy_home","sentiment_rent_apt")
if((grepl("buy", temp_df_statenisland$tweet_text, ignore.case = TRUE) | grepl("purchase", temp_df_statenisland$tweet_text, ignore.case = TRUE) | grepl("acquire", temp_df_statenisland$tweet_text, ignore.case = TRUE) | grepl("bought", temp_df_statenisland$tweet_text, ignore.case = TRUE))
& (grepl("house", temp_df_statenisland$tweet_text, ignore.case = TRUE) | grepl("housing", temp_df_statenisland$tweet_text, ignore.case = TRUE) | grepl("home", temp_df_statenisland$tweet_text, ignore.case = TRUE) | grepl("apartment", temp_df_statenisland$tweet_text, ignore.case = TRUE) | grepl("residence", temp_df_statenisland$tweet_text, ignore.case = TRUE) | grepl("suite", temp_df_statenisland$tweet_text, ignore.case = TRUE) | grepl("lodge", temp_df_statenisland$tweet_text, ignore.case = TRUE) | grepl("loft", temp_df_statenisland$tweet_text, ignore.case = TRUE) | grepl("penthouse", temp_df_statenisland$tweet_text, ignore.case = TRUE) | grepl("flat", temp_df_statenisland$tweet_text, ignore.case = TRUE))
& (grepl("expensive", temp_df_statenisland$tweet_text, ignore.case = TRUE) | grepl("high price", temp_df_statenisland$tweet_text, ignore.case = TRUE) | grepl("can't afford", temp_df_statenisland$tweet_text, ignore.case = TRUE))){
temp_df_statenisland$sentiment_buy_home <- "Expensive"
} else if((grepl("buy", temp_df_statenisland$tweet_text, ignore.case = TRUE) | grepl("purchase", temp_df_statenisland$tweet_text, ignore.case = TRUE) | grepl("acquire", temp_df_statenisland$tweet_text, ignore.case = TRUE) | grepl("bought", temp_df_statenisland$tweet_text, ignore.case = TRUE))
& (grepl("house", temp_df_statenisland$tweet_text, ignore.case = TRUE) | grepl("housing", temp_df_statenisland$tweet_text, ignore.case = TRUE) | grepl("home", temp_df_statenisland$tweet_text, ignore.case = TRUE) | grepl("apartment", temp_df_statenisland$tweet_text, ignore.case = TRUE) | grepl("residence", temp_df_statenisland$tweet_text, ignore.case = TRUE) | grepl("suite", temp_df_statenisland$tweet_text, ignore.case = TRUE) | grepl("lodge", temp_df_statenisland$tweet_text, ignore.case = TRUE) | grepl("loft", temp_df_statenisland$tweet_text, ignore.case = TRUE) | grepl("penthouse", temp_df_statenisland$tweet_text, ignore.case = TRUE) | grepl("flat", temp_df_statenisland$tweet_text, ignore.case = TRUE))){
temp_df_statenisland$sentiment_buy_home <- "Affordable"
}
# sentiment for renting home
if((grepl("rent", temp_df_statenisland$tweet_text, ignore.case = TRUE) | grepl("rental", temp_df_statenisland$tweet_text, ignore.case = TRUE) | grepl("lease", temp_df_statenisland$tweet_text, ignore.case = TRUE) | grepl("charter", temp_df_statenisland$tweet_text, ignore.case = TRUE) | grepl("hire", temp_df_statenisland$tweet_text, ignore.case = TRUE) | grepl("shelter", temp_df_statenisland$tweet_text, ignore.case = TRUE) | grepl("move", temp_df_statenisland$tweet_text, ignore.case = TRUE) | grepl("relocate", temp_df_statenisland$tweet_text, ignore.case = TRUE) | grepl("shift", temp_df_statenisland$tweet_text, ignore.case = TRUE) | grepl("transfer", temp_df_statenisland$tweet_text, ignore.case = TRUE) | grepl("live", temp_df_statenisland$tweet_text, ignore.case = TRUE) | grepl("living", temp_df_statenisland$tweet_text, ignore.case = TRUE))
| (grepl("house", temp_df_statenisland$tweet_text, ignore.case = TRUE) | grepl("housing", temp_df_statenisland$tweet_text, ignore.case = TRUE) | grepl("home", temp_df_statenisland$tweet_text, ignore.case = TRUE) | grepl("apartment", temp_df_statenisland$tweet_text, ignore.case = TRUE) | grepl("residence", temp_df_statenisland$tweet_text, ignore.case = TRUE) | grepl("suite", temp_df_statenisland$tweet_text, ignore.case = TRUE) | grepl("lodge", temp_df_statenisland$tweet_text, ignore.case = TRUE) | grepl("loft", temp_df_statenisland$tweet_text, ignore.case = TRUE) | grepl("penthouse", temp_df_statenisland$tweet_text, ignore.case = TRUE) | grepl("flat", temp_df_statenisland$tweet_text, ignore.case = TRUE))
& (grepl("expensive", temp_df_statenisland$tweet_text, ignore.case = TRUE) | grepl("high price", temp_df_statenisland$tweet_text, ignore.case = TRUE) | grepl("can't afford", temp_df_statenisland$tweet_text, ignore.case = TRUE))){
temp_df_statenisland$sentiment_rent_apt <- "Expensive"
} else if((grepl("rent", temp_df_statenisland$tweet_text, ignore.case = TRUE) | grepl("rental", temp_df_statenisland$tweet_text, ignore.case = TRUE) | grepl("lease", temp_df_statenisland$tweet_text, ignore.case = TRUE) | grepl("charter", temp_df_statenisland$tweet_text, ignore.case = TRUE) | grepl("hire", temp_df_statenisland$tweet_text, ignore.case = TRUE) | grepl("shelter", temp_df_statenisland$tweet_text, ignore.case = TRUE) | grepl("move", temp_df_statenisland$tweet_text, ignore.case = TRUE) | grepl("relocate", temp_df_statenisland$tweet_text, ignore.case = TRUE) | grepl("shift", temp_df_statenisland$tweet_text, ignore.case = TRUE) | grepl("transfer", temp_df_statenisland$tweet_text, ignore.case = TRUE) | grepl("live", temp_df_statenisland$tweet_text, ignore.case = TRUE) | grepl("living", temp_df_statenisland$tweet_text, ignore.case = TRUE))
| (grepl("house", temp_df_statenisland$tweet_text, ignore.case = TRUE) | grepl("housing", temp_df_statenisland$tweet_text, ignore.case = TRUE) | grepl("home", temp_df_statenisland$tweet_text, ignore.case = TRUE) | grepl("apartment", temp_df_statenisland$tweet_text, ignore.case = TRUE) | grepl("residence", temp_df_statenisland$tweet_text, ignore.case = TRUE) | grepl("suite", temp_df_statenisland$tweet_text, ignore.case = TRUE) | grepl("lodge", temp_df_statenisland$tweet_text, ignore.case = TRUE) | grepl("loft", temp_df_statenisland$tweet_text, ignore.case = TRUE) | grepl("penthouse", temp_df_statenisland$tweet_text, ignore.case = TRUE) | grepl("flat", temp_df_statenisland$tweet_text, ignore.case = TRUE))){
temp_df_statenisland$sentiment_rent_apt <- "Affordable"
}
df_statenisland <- rbind(df_statenisland, temp_df_statenisland)
x = x+1
if (x > count_statenisland_tweets){
break
}
}
df_statenisland <- df_statenisland[-1,]
df_statenisland = unique(df_statenisland)
nrow(df_statenisland)
head(df_statenisland)
#--------------------------
# Sentiment Analysis
df_sentiment <- data.frame(location = "Manhattan_Expensive", freq_sentiment_buy_home = length(which(df_manhattan$sentiment_buy_home=="Expensive")), freq_sentiment_rent_apt = length(which(df_manhattan$sentiment_rent_apt=="Expensive")))
df_sentiment <- rbind(df_sentiment, data.frame(location = "M_Affordable", freq_sentiment_buy_home = length(which(df_manhattan$sentiment_buy_home=="Affordable")), freq_sentiment_rent_apt = length(which(df_manhattan$sentiment_rent_apt=="Affordable"))))
df_sentiment <- rbind(df_sentiment, data.frame(location = "M_Neutral", freq_sentiment_buy_home = length(which(df_manhattan$sentiment_buy_home=="Neutral")), freq_sentiment_rent_apt = length(which(df_manhattan$sentiment_rent_apt=="Neutral"))))
df_sentiment <- rbind(df_sentiment, data.frame(location = "Brooklyn_Expensive", freq_sentiment_buy_home = length(which(df_brooklyn$sentiment_buy_home=="Expensive")), freq_sentiment_rent_apt = length(which(df_brooklyn$sentiment_rent_apt=="Expensive"))))
df_sentiment <- rbind(df_sentiment, data.frame(location = "Br_Affordable", freq_sentiment_buy_home = length(which(df_brooklyn$sentiment_buy_home=="Affordable")), freq_sentiment_rent_apt = length(which(df_brooklyn$sentiment_rent_apt=="Affordable"))))
df_sentiment <- rbind(df_sentiment, data.frame(location = "Br_Neutral", freq_sentiment_buy_home = length(which(df_brooklyn$sentiment_buy_home=="Neutral")), freq_sentiment_rent_apt = length(which(df_brooklyn$sentiment_rent_apt=="Neutral"))))
df_sentiment <- rbind(df_sentiment, data.frame(location = "Bronx_Expensive", freq_sentiment_buy_home = length(which(df_bronx$sentiment_buy_home=="Expensive")), freq_sentiment_rent_apt = length(which(df_bronx$sentiment_rent_apt=="Expensive"))))
df_sentiment <- rbind(df_sentiment, data.frame(location = "B_Affordable", freq_sentiment_buy_home = length(which(df_bronx$sentiment_buy_home=="Affordable")), freq_sentiment_rent_apt = length(which(df_bronx$sentiment_rent_apt=="Affordable"))))
df_sentiment <- rbind(df_sentiment, data.frame(location = "B_Neutral", freq_sentiment_buy_home = length(which(df_bronx$sentiment_buy_home=="Neutral")), freq_sentiment_rent_apt = length(which(df_bronx$sentiment_rent_apt=="Neutral"))))
df_sentiment <- rbind(df_sentiment, data.frame(location = "Staten_Island_Expensive", freq_sentiment_buy_home = length(which(df_statenisland$sentiment_buy_home=="Expensive")), freq_sentiment_rent_apt = length(which(df_statenisland$sentiment_rent_apt=="Expensive"))))
df_sentiment <- rbind(df_sentiment, data.frame(location = "S_Affordable", freq_sentiment_buy_home = length(which(df_statenisland$sentiment_buy_home=="Affordable")), freq_sentiment_rent_apt = length(which(df_statenisland$sentiment_rent_apt=="Affordable"))))
df_sentiment <- rbind(df_sentiment, data.frame(location = "S_Neutral", freq_sentiment_buy_home = length(which(df_statenisland$sentiment_buy_home=="Neutral")), freq_sentiment_rent_apt = length(which(df_statenisland$sentiment_rent_apt=="Neutral"))))
df_sentiment
ggplot(data=df_sentiment,aes(x=df_sentiment$location,y=df_sentiment$freq_sentiment_buy_home,fill=df_sentiment$location)) + geom_bar(stat = "identity")
ggplot(data=df_sentiment,aes(x=df_sentiment$location,y=df_sentiment$freq_sentiment_rent_apt,fill=df_sentiment$location)) + geom_bar(stat = "identity") |
7d7eb8e6d12509c97debfb791c666291e5ab8ff7 | 208b25a80840f5c1ef3c82db6014abbc47502387 | /src/ISLR_10-4-Lab.R | 56a6bc4dda681a6dcdb9024ec37ec4285f50de0f | [] | no_license | arnold-c/stat508 | 44a200c7bbbaacd65c8e8a0eb5fbac9ec109b6f0 | f8d94d678efde328d671c894b9a197ca8b4e2fa7 | refs/heads/master | 2023-01-13T21:33:23.476942 | 2020-11-24T17:03:55 | 2020-11-24T17:03:55 | 294,135,318 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 999 | r | ISLR_10-4-Lab.R | # Set Up ------------------------------------------------------------------
library(ISLR)
library(glmnet)
library(tidyverse)
RNGkind(sample.kind = "Rounding")
set.seed(1)
# Q1 ----------------------------------------------------------------------
states <- row.names(USArrests)
states
names(USArrests)
apply(USArrests, 2, mean)
apply(USArrests, 2, var)
pr.out <- prcomp(USArrests, scale = TRUE)
names(pr.out)
pr.out$center
pr.out$scale
pr.out$rotation
dim(pr.out$x)
biplot(pr.out, scale = 0)
pr.out$rotation <- -pr.out$rotation
pr.out$x <- -pr.out$x
biplot(pr.out, scale = 0)
pr.out$sdev
pr.var <- pr.out$sdev^2
pr.var
pve <- pr.var / sum(pr.var)
pve
plot(pve,
xlab = "Principal Component",
ylab = "Proportion of Variance Explained",
ylim = c(0, 1),
type = "b")
plot(cumsum(pve),
xlab = "Principal Component",
ylab = "Cumulative Proportion of Variance Explained",
ylim = c(0, 1),
type = "b")
a <- c(1, 2, 8, -3)
cumsum(a)
|
c9c1c5b5c965e7bbc1f7498165de3b1b9654cf81 | 9a35b9d1e53d6dc65700076c2e7ab286b98cdeb7 | /.#Extra/Kdi.R | 209e0a2780705811aac729a4569cbd80f50d82f7 | [] | no_license | minghao2016/dbmss | 7b042a6a91d58dc55179c3e2692683593ff8f6c0 | 2344071e57ff392c18c28a2b7822db3f41f02dc7 | refs/heads/master | 2023-01-03T13:33:27.042091 | 2020-10-31T09:32:19 | 2020-10-31T09:32:19 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,421 | r | Kdi.R | data(paracou16)
X <- paracou16
ReferenceType <- "V. Americana"
NeighborType <- "V. Americana"
plot(X[X$marks$PointType=="Q. Rosea"])
# Vectors to recognize point types
IsReferenceType <- X$marks$PointType==ReferenceType
IsNeighborType <- X$marks$PointType==NeighborType
# Eliminate useless points
X <- X[IsReferenceType | IsNeighborType]
# Compute the matrix of distances
Dist <- pairdist.ppp(X)
# Reduce the matrix to pairs of interest
IsReferenceType <- X$marks$PointType==ReferenceType
IsNeighborType <- X$marks$PointType==NeighborType
Dist <- Dist[IsReferenceType, IsNeighborType]
Weights <- matrix(rep(X$marks$PointWeight, each=X$n), nrow=X$n)
Weights <- Weights[IsReferenceType, IsNeighborType]
# Set self point pair weight equal to 0 or density will be erroneous
if (NeighborType == ReferenceType) {
diag(Weights) <- 0
}
# Choose r
r <- seq(0, 100, 10)
# Prepare a matrix for the results: each line is a for a point
Kdi <- matrix(nrow=dim(Dist)[1], ncol=length(r))
for (i in 1:dim(Dist)[1]) {
# Get neighbor weights
w <- Weights[i,]
# Calculate the density of neighbors. bw="nrd0" is D&O’s choice in their paper.
Density <- density(Dist[i,], weights=w/sum(w), bw="nrd0", from=0)
# Interpolate results at the chosen r
Kdi[i,] <- stats::approx(Density$x, Density$y, xout=r)$y
}
# Check
plot(Kdhat(as.wmppp(X), r, ReferenceType, NeighborType, Weighted=TRUE))
points(x=r, y=apply(Kdi, 2, mean))
|
3870db2d251adb6d5ae34ccf66278962890c2738 | 374d430ce30df66160db33e260579a26a8b143f0 | /Accuracy.R | fd3373bf4df86ce3225639645ea83ee0aae23194 | [] | no_license | TBKelley/PredMachLearn_PeerAssessment_01 | 47e3f9488eccb1cfaaacaa46b460b880eead4c83 | 05df0b7589b17c6080b4ea2408ab5e713bda404f | refs/heads/master | 2020-05-29T13:18:08.815704 | 2014-06-21T04:36:41 | 2014-06-21T04:36:41 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 354 | r | Accuracy.R | # Accuracy.R
#
# Print the fitted model accuract
#
library(caret)
Accuracy = function(fit, strDataType, df){
pred <- predict(fit, df)
pred.Correct <- pred == df$classe
accuracy <- sum(pred == df$classe)/length(pred)
modelName <- class(fit$finalModel)[1]
sprintf("%s %s Accuracy = %3.1f%%", modelName, strDataType, accuracy*100.0)
}
|
847202cfb6aff5ff01ce2e2abee737c9c153b88e | 2bec5a52ce1fb3266e72f8fbeb5226b025584a16 | /ALassoSurvIC/R/log_penlikelihood.R | 32bf5e9029dc20273f4046d0c8202de713d44be5 | [] | no_license | akhikolla/InformationHouse | 4e45b11df18dee47519e917fcf0a869a77661fce | c0daab1e3f2827fd08aa5c31127fadae3f001948 | refs/heads/master | 2023-02-12T19:00:20.752555 | 2020-12-31T20:59:23 | 2020-12-31T20:59:23 | 325,589,503 | 9 | 2 | null | null | null | null | UTF-8 | R | false | false | 1,015 | r | log_penlikelihood.R | log_penlikelihood <- function(b, arglist) {
n <- arglist$n
npmle_set <- arglist$set
u <- npmle_set[,2]
l <- arglist$l
r <- arglist$r
r_cen <- is.infinite(r)
lr <- cbind(l, r)
z <- arglist$z
trunc <- arglist$trunc
tol <- arglist$tol
niter <- arglist$niter
distance <- tol + 1000
iter <- 1
old_lambda <- arglist$initial_lambda
while ((distance > tol) & (iter < niter)) {
ew <- fun_ew(b, old_lambda, arglist)
new_lambda <- fun_updatelambda(b, ew, arglist)
distance <- max(abs(new_lambda - old_lambda))
old_lambda <- new_lambda
iter <- iter + 1
}
exp_zb <- exp(z %*% b)
lambda_exp_zb <- exp_zb %x% t(new_lambda)
if (is.null(trunc)) {
target_set1 <- fun_subless(u = u, lessthan = l)
} else {
target_set1 <- fun_sublr(u = u, l = trunc-1e-10, r = l)
}
target_set2 <- fun_sublr(u = u, l = l, r = r)
value <- sum(-rowSums(target_set1 * lambda_exp_zb)) + sum(log((1 - exp(- rowSums(target_set2 * lambda_exp_zb)))[!r_cen]))
return(value)
}
|
cf2bb1389409ef4d790264b2ae7c5c97e96c0b24 | c20f6eb14009096caff968b1d80e06fb25bf583d | /man/to_numeric_vector.Rd | dbe7d980e5c42a2bd4c5180dca2daa91bca13d16 | [
"MIT"
] | permissive | YanVT/MATSS | b11b1eb33e893d06acfbf94bcd68d1405b7abb99 | 75940bdc39883081b6253c7a1e9a7052853f92de | refs/heads/master | 2023-05-21T16:49:55.800479 | 2020-05-13T05:22:12 | 2020-05-13T05:22:12 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 353 | rd | to_numeric_vector.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils-data-summarize.R
\name{to_numeric_vector}
\alias{to_numeric_vector}
\title{Extract a numeric vector}
\usage{
to_numeric_vector(x)
}
\arguments{
\item{x}{the input data}
}
\description{
Extract a numeric vector from a data.frame or a matrix (taking
the first column).
}
|
3df759e20c0cdfe0d990e941a18560ab99914fc9 | 49df0f3d24cc70379c144ef1ae5cd120bec114b3 | /capitulo_01/exercicios-1.3.R | 0725abfbee70c10af1462f6e3611968917aebfc9 | [] | no_license | lidimayra/bayesian-analysis | 40a9babb01525be3c6a25ae9e0557a7e73d207cb | 32abea22693c355fa9e72c0e679e9b236c7732f1 | refs/heads/master | 2021-01-16T18:26:55.699963 | 2017-08-12T16:28:11 | 2017-08-12T16:28:11 | 100,082,539 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,327 | r | exercicios-1.3.R | #!/usr/bin/env Rscript
# Exercícios
# 1.3 Estimativa dos parâmetros com o uso de um modelo de regressão linear.
# No exercício anterior, fizemos uma simulação simples de uma amostra de
# relação entre comprimento (objeto comp) e peso (objeto pr). Vamos fazer uso
# desta "amostra" para tentar estimar os parâmetros α e β do modelo
# pr = α * comp^β
# Para isto considere que o modelo pode ser parametrizado como:
# log pr = α' + β' * log c
# em que:
# α' = log a
# β' = β
# TODO
# a. Utilize as funções log() e lm() para modelar os dados e estimar os coeficientes
# α' e β'.
# TODO
# b. Use a inversa da função log() para obter o valor de α a partir de α'. Compare os
# valores obtidos com os "verdadeiros" utilizados para a simulação dos dados
# (α = 0.00001 e β = 3). Dica: Para converter α' em α visite a "ajuda" sobre a função
# do logarítmo com o auxílio do comando ? e você encontrará o que precisa.
#
# TODO
# c. Use a fórmula da relação comprimento-peso, o vetor com os comprimentos (comp)
# e os valores estimados de α e β para criar um objeto ppred que deve conter
# predições do peso médio esperado por classe de comprimento.
# TODO
# d. Construa um gráfico em que as previsões (ppred) são representadas como uma
# linha sobreposta aos dados simulados comp e pr
|
9ec4da28006be6322440ea78198fe5b3dfbeedc4 | b58ef6361161adfad9bdc7cc1b23c4988030fbe3 | /stuff/cf_resources_geocode.R | 960b8135ccd719d7964ef5f3cab0544ded5738f0 | [
"MIT"
] | permissive | DSPG-ISU/DSPG | 01b9ec9a3dd02cd2ee7e52a28ba22f6d312ad2f8 | c20b50c1dd28eedd879a9226b5f6511a0471c870 | refs/heads/master | 2023-02-26T12:54:52.616558 | 2021-02-05T05:44:04 | 2021-02-05T05:44:04 | 277,006,430 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 543 | r | cf_resources_geocode.R | library(tidyverse)
library(ggmap)
library(ggplot2)
library(dplyr)
library(sf)
library(leaflet)
cf_resources <- readr::read_csv("cfrhelp_scrape.csv")
cf_resources <- cf_resources %>% mutate(
search_address = paste(Address, City, State, Zip, sep = ", ")
)
register_google(key = "your api key", write = TRUE)
cf_resources <- cf_resources %>% mutate_geocode(search_address) #One NA value due to Address
# don't include the variable for the ID
cf_resources <- cf_resources %>% select(-X1)
usethis::use_data(cf_resources, overwrite = TRUE)
|
5f3b6a7246ce68c3427eca973330defed3df2e3e | 8f1d12e7ed4f9d726a7b30ba939210e3254c41ea | /log scale IS.R | 911a007b5b9ef75d53eec00e069790d6aa5de7ea | [] | no_license | Mckzz/Ionic-Strength | 4d3c02fce33400fd6d04c69ef9b40c7ffaf28450 | 4cba788420a53222dab84429565ad3a19c6663ea | refs/heads/main | 2023-05-30T06:09:58.976135 | 2021-06-10T07:44:16 | 2021-06-10T07:44:16 | 375,593,024 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,195 | r | log scale IS.R | library(tidyverse)
library(ggplot2)
IS_NaCl <- read_csv("~/student documents/UBC/Research/chaoborus imaging/Square capillary/2021_06_08 IS using NaCl/IS_NaCl.csv")
view(IS_NaCl)
ISlong <-
pivot_longer(
IS_NaCl,
cols = c(`1`, `2`, `3`, `4`),
names_to = "larva", values_to = "area")
#mutate(larva = as_factor(larva))
print(ISlong, n= 20)
#making % change column
ISlong.pct <- ISlong %>%
group_by(larva) %>%
mutate(
area.pct.change = ((area - area[1]) / area[1]
)*100) %>%
ungroup() #why the ungroup?
print(ISlong.pct, n= 40)
pct.meansd <- ISlong.pct %>%
group_by(mM) %>%
dplyr::summarise(
pct.mean = mean(area.pct.change),
pct.sd = sd(area.pct.change))
print(pct.meansd, n= 25)
#plot means/ sd
ggplot(data = pct.meansd, aes(x= mM)) +
geom_point(aes(y= pct.mean)) +
geom_line(aes(y= pct.mean)) +
geom_errorbar(aes(x= mM,
ymin= pct.mean - pct.sd,
ymax= pct.mean + pct.sd),
group= "ant.post",
width= 0.1) +
scale_color_manual(values=c("#D55E00", "#009e73")) +
scale_x_log10() +
labs(x = "mM",
y = "Area % change") +
#ggtitle("") +
theme_classic()
|
b261faf006dfa34cbe8f485eba240315413eb838 | 34863e69f7bf6fdc9e5726f3ccc8eb2bec63c14a | /R/resp_styles_to_mplus.R | 4d4286d6243518ece9cc2ca7dfbf438f8a43df44 | [] | no_license | tzoltak/rstyles | b9d2bf5c1ba0e0265ed57c85795b48e4b7b8d6a0 | 6d8150632c8267786d9f406a65ad5267c4cccee3 | refs/heads/main | 2023-04-17T18:43:03.038970 | 2023-02-09T13:53:14 | 2023-02-09T13:53:14 | 312,404,647 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 41,868 | r | resp_styles_to_mplus.R | #' @title Preparing Mplus code to estimate RS models
#' @description Prepares components of Mplus model description syntax for
#' IRTree model.
#' @param data a data frame
#' @inheritParams make_mplus_gpcm_vmmc_syntax
#' @param output optionally a character vector of Mplus options defining what
#' type of results should be included in the output
#' @param savedata optionally a string with a name of the file in which factor
#' scores should be saved
#' @param analysis a list with elements \code{ESTIMATOR}, \code{ALGORITHM},
#' \code{INTEGRATION} and \code{PROCESSORS} containing Mplus \emph{ANALYSIS}
#' options (provided as strings)
#' @param title string with a title for the analysis
#' @details For the description of model specification see \emph{Details}
#' section in \code{\link{make_mplus_irtree_vmmc_syntax}}
#' @section Limitations:
#' At the moment there is no possibility to prepare models with many
#' no-RS latent traits loading different sets of items.
#' @return A list with elements named \code{TITLE}, \code{VARIABLE},
#' \code{ANALYSIS}, \code{MODEL}, \code{MODELCONSTRAINT}, \code{SAVEDATA},
#' \code{rdata}, \code{usevariables} that can be used as arguments to the
#' \code{mplusObject} function from the package \emph{MplusAutomation} using
#' \code{\link{do.call}}
#' @importFrom stats na.omit setNames
#' @export
make_mplus_irtree_model_syntax <- function(data, items, scoringMatrix,
observedExogenous = vector(mode = "character", length = 0L),
observedDependent = vector(mode = "character", length = 0L),
fixSlopes = vector(mode = "character", length = 0L),
reverseCoded = vector(mode = "list", length = 0L),
orthogonal = vector(mode = "character", length = 0L),
weight = NA_character_,
output = "STDYX",
savedata = NA_character_,
analysis = list(ESTIMATOR = "MLR",
ALGORITHM = "INTEGRATION",
INTEGRATION = "STANDARD",
PROCESSORS = "4"),
title = "Some GPCM model with custom scoring matrix") {
stopifnot(is.data.frame(data),
all(sapply(data, function(x) !all(is.na(x)))),
is.character(items) | is.list(items),
all(unlist(items) %in% names(data)),
is.matrix(scoringMatrix), is.numeric(scoringMatrix),
all(sapply(data[, unique(unlist(items)), drop = FALSE],
function(x, values) {all(x %in% values)},
values = c(rownames(scoringMatrix), NA_character_))),
is.list(reverseCoded),
is.character(savedata), length(savedata) == 1L,
is.character(output), all(!is.na(output)),
is.vector(analysis),
is.character(title), length(title) == 1L, nchar(title) < 78L)
if (is.character(items)) {
stopifnot(all(items %in% names(data)))
itemNames <- items
items <- setNames(rep(list(list(items)), ncol(scoringMatrix)),
colnames(scoringMatrix))
items <- mapply(setNames, items, names(items), SIMPLIFY = FALSE)
} else { # a list
stopifnot(all(names(items) %in% colnames(scoringMatrix)),
all(colnames(scoringMatrix) %in% names(items)),
all(unlist(items) %in% names(data)))
itemNames <- unique(unlist(items))
}
if (is.character(observedExogenous)) {
stopifnot(all(observedExogenous %in% names(data)))
} else {
stopifnot(is.matrix(observedExogenous),
all(rownames(observedExogenous) %in% names(data)))
}
if (is.character(observedDependent)) {
stopifnot(all(observedDependent %in% names(data)))
} else {
stopifnot(is.matrix(observedDependent),
all(rownames(observedDependent) %in% names(data)))
}
addToAnalysis <- formals(make_mplus_gpcm_model_syntax)$analysis[
setdiff(names(formals(make_mplus_gpcm_model_syntax)$analysis),
c(names(analysis), ""))]
if (!is.null(addToAnalysis)) analysis <- append(analysis, addToAnalysis)
dataIRTree <- expand_responses(responses = data[, itemNames],
scoringMatrix = scoringMatrix)
onlyNAs <- apply(is.na(dataIRTree), 2L, all)
if (any(onlyNAs)) warning("There are pseudoitems with only NAs in the data: '",
paste(colnames(dataIRTree)[onlyNAs], collapse = "', '"),
"'. They will be excluded from the modeling.")
onlyOneValue <- apply(dataIRTree, 2L,
function(x) {return(length(unique(na.omit(x))))}) < 2L
if (any(onlyOneValue)) warning("There are pseudoitems that have only one value in the data: '",
paste(colnames(dataIRTree)[onlyNAs], collapse = "', '"),
"'. They will be excluded from the modeling.")
dataIRTree <- dataIRTree[, !onlyNAs & !onlyOneValue, drop = FALSE]
# changing names of items in data structures to the corresponding pseudo-items
itemNames <- colnames(dataIRTree)
items <- mapply(function(x, nm) lapply(x, function(x, nm) paste0(nm, "_", x), nm = nm),
items, names(items), SIMPLIFY = FALSE)
if (length(reverseCoded) > 0L) {
stopifnot(!is.null(names(reverseCoded)))
for (i in seq_along(items)) {
for (j in seq_along(items[[i]])) {
if (names(items[[i]])[j] %in% names(reverseCoded)) {
reverseCoded[[names(items[[i]])[j]]] <-
paste0(names(items)[i], "_", reverseCoded[[names(items[[i]])[j]]])
}
}
}
}
# end of changing names
data <- data.frame(dataIRTree, data[, na.omit(c(observedExogenous,
observedDependent, weight)),
drop = FALSE])
syntax <- make_mplus_irtree_vmmc_syntax(items = items,
scoringMatrix = scoringMatrix,
observedExogenous = observedExogenous,
observedDependent = observedDependent,
fixSlopes = fixSlopes,
reverseCoded = reverseCoded,
orthogonal = orthogonal,
weight = weight)
analysis <- paste(mapply(
function(nm, val) {
return(paste0(nm, " IS ", val, ";"))
},
names(analysis), analysis, SIMPLIFY = TRUE))
if (length(output) > 0L) {
output <- paste0(paste(output, collapse = " "), ";")
} else {
output <- NULL
}
if (savedata %in% c(NA_character_, "")) {
savedata <- NULL
} else {
savedata <- paste0("FILE IS ", savedata, ";\n",
"SAVE IS FSCORES;")
}
return(as.MplusSyntaxElements(list(TITLE = title,
VARIABLE = syntax$VARIABLE,
ANALYSIS = analysis,
MODEL = syntax$MODEL,
MODELCONSTRAINT = syntax$MODELCONSTRAINT,
OUTPUT = output,
SAVEDATA = savedata,
rdata = as.data.frame(data),
usevariables = colnames(data))))
}
#' @title Preparing Mplus code to estimate RS models
#' @description Prepares components of Mplus model description syntax for
#' IRTree model.
#' @param items a character vector of item names or a list describing items
#' matching to latent traits in a between-item multidimensional model; in the
#' latter case list elements must be named after column names of the
#' \code{scoringMatrix} with each element being a list describing different
#' latent traits that should be scored using a given column of the
#' \code{scoringMatrix}; names of these \emph{second-level} elements specify
#' names of latent traits and elements themselves are character vectors of item
#' names that are assigned to (loaded) a given latent trait
#' @param scoringMatrix a matrix describing how responses (described in rownames
#' of the matrix) map on \emph{scores} of latent traits (described in columns of
#' the matrix)
#' @inheritParams make_mplus_structural_model
#' @param fixSlopes optionally a character vector of latent trait names
#' for which item slopes parameters should be fixed across items
#' (these names need to occur in column names of \code{scoringMatrix})
#' @param reverseCoded optionally a named list of character vectors with names
#' of list elements specifying latent trait names and elements giving names of
#' items that are \emph{reverse coded} with respect to this latent trait;
#' please note, that these don't need to be given if slopes of the no-RS
#' trait(s) are not fixed across items
#' @param orthogonal optionally a character vector of latent trait names
#' indicating which latent traits should be specified as orthogonal to each
#' other (all the mentioned latent traits will be specified as orthogonal to each
#' other and all the other latent traits)
#' @param weight optionally a string with a name of the variable storing weights
#' @details Models are identified by fixing variances of the the latent
#' variables \strong{that are not mentioned in \code{fixSlopes}} to 1 and
#' by fixing \emph{slope} parameters to 1 (or -1 in a case of reverse coded
#' items) and freeing latent trait variances that are mentioned in
#' \code{fixSlopes}.
#' @return A list of strings with elements named \code{VARIABLE}, \code{MODEL}
#' and \code{MODELCONSTRAINT} (the last one can be \code{NULL})
#' @importFrom stats setNames
make_mplus_irtree_vmmc_syntax <- function(items, scoringMatrix,
observedExogenous = vector(mode = "character", length = 0L),
observedDependent = vector(mode = "character", length = 0L),
fixSlopes = vector(mode = "character", length = 0L),
reverseCoded = vector(mode = "list", length = 0L),
orthogonal = vector(mode = "character", length = 0L),
weight = NA_character_) {
stopifnot(is.character(items) | is.list(items),
is.matrix(scoringMatrix), is.numeric(scoringMatrix),
is.character(weight), length(weight) == 1L)
if (is.character(items)) {
stopifnot(!anyNA(items), length(items) > 0L,
all(!duplicated(items)))
latentTraits <- colnames(scoringMatrix)
names(latentTraits) <- latentTraits
items <- rep(list(items), ncol(scoringMatrix))
items <- mapply(setNames, items, colnames(scoringMatrix), SIMPLIFY = FALSE)
names(items) <- colnames(scoringMatrix)
} else { # a list
stopifnot(all(names(items) %in% colnames(scoringMatrix)),
all(colnames(scoringMatrix) %in% names(items)),
!anyNA(unlist(items)),
all(sapply(items, is.list)),
all(sapply(items, function(x) all(sapply(x, is.character)))),
all(!sapply(items, function(x) duplicated(unlist(x)))),
all(sapply(items, function(x) all(sapply(x, length) > 0))))
latentTraits <- c(unlist(lapply(items, names)),
setdiff(colnames(scoringMatrix), names(items)))
names(latentTraits) <- c(unlist(mapply(function(x, nm) rep(nm, length(x)),
items, names(items), SIMPLIFY = FALSE)),
setdiff(colnames(scoringMatrix), names(items)))
}
stopifnot(is.character(fixSlopes), all(fixSlopes %in% latentTraits),
is.list(reverseCoded), all(names(reverseCoded) %in% latentTraits),
all(!duplicated(names(reverseCoded))), all(unlist(reverseCoded) %in% unlist(items)),
is.character(orthogonal), all(orthogonal %in% latentTraits))
if (length(reverseCoded) > 0L & length(fixSlopes) == 0L) {
message("With no slopes being fixed argument 'reverseCoded' won't affect the model specification.")
}
modelStructural <- make_mplus_structural_model(latent = latentTraits,
observedExogenous = observedExogenous,
observedDependent = observedDependent)
observedExogenous <- attributes(modelStructural)$observedExogenous
observedDependent <- attributes(modelStructural)$observedDependent
model <- constraints <- vector(mode = "character", length = 0L)
for (lt in seq_along(latentTraits)) {
i <- which(colnames(scoringMatrix) == names(latentTraits)[lt])
traitItems <- items[[colnames(scoringMatrix)[i]]][[latentTraits[lt]]]
if (latentTraits[lt] %in% names(reverseCoded)) {
traitItemsReversed <- reverseCoded[[latentTraits[lt]]]
} else {
traitItemsReversed <- vector(mode = "character", length = 0L)
}
if (latentTraits[lt] %in% fixSlopes) {
traitItems <- setdiff(traitItems, traitItemsReversed)
if (length(traitItems) > 0L) {
model <- c(model,
paste0(latentTraits[lt], " BY ",
paste(traitItems, collapse = "@1 "), "@1;"))
}
if (length(traitItemsReversed) > 0L) {
model <- c(model,
paste0(latentTraits[lt], " BY ",
paste(traitItemsReversed, collapse = "@-1 "), "@-1;"))
}
model <- c(model,
paste0(latentTraits[lt], "*;"))
} else {
model <- c(model,
paste0(latentTraits[lt], " BY ", traitItems[1L], "* ",
paste(traitItems[-1L], collapse = " "), ";"))
model <- c(model,
paste0(latentTraits[lt], "@1;"))
}
}
model <- c(model,
make_mplus_latent_traits_orthogonal_syntax(orthogonal))
if (!is.na(weight)) {
weight <- paste0("WEIGHT = ", weight, ";")
} else {
weight <- vector(mode = "character", length = 0L)
}
if (length(constraints) == 0L) constraints = NULL
results <- list(VARIABLE =
paste(c(strwrap(paste0("CATEGORICAL = ",
paste(unique(unlist(items)),
collapse = " "), ";"),
width = 80L, exdent = 14L),
weight),
collapse = "\n"),
MODEL = paste(c(modelStructural, model),
collapse = "\n"),
MODELCONSTRAINT = paste(constraints, collapse = "\n"))
return(as.MplusSyntaxElements(results))
}
#' @title Preparing Mplus code to estimate RS models
#' @description Prepares components of Mplus model description syntax for a
#' GPCM (NRM).
#' @inheritParams make_mplus_irtree_model_syntax
#' @details For the description of model specification see \emph{Details}
#' section in \code{\link{make_mplus_gpcm_vmmc_syntax}}
#' @section Limitations:
#' At the moment there is no possibility to prepare models with many
#' no-RS latent traits loading different sets of items.
#' @return A list with elements named \code{TITLE}, \code{VARIABLE},
#' \code{ANALYSIS}, \code{MODEL}, \code{MODELCONSTRAINT}, \code{SAVEDATA},
#' \code{rdata}, \code{usevariables} that can be used as arguments to the
#' \code{mplusObject} function from the package \emph{MplusAutomation} using
#' \code{\link{do.call}}
#' @importFrom stats na.omit
#' @export
make_mplus_gpcm_model_syntax <- function(data, items, scoringMatrix,
observedExogenous = vector(mode = "character", length = 0L),
observedDependent = vector(mode = "character", length = 0L),
fixSlopes = vector(mode = "character", length = 0L),
reverseCoded = vector(mode = "list", length = 0L),
orthogonal = vector(mode = "character", length = 0L),
weight = NA_character_,
output = "STDYX",
savedata = NA_character_,
analysis = list(ESTIMATOR = "MLR",
ALGORITHM = "INTEGRATION",
INTEGRATION = "STANDARD",
PROCESSORS = "4"),
title = "Some GPCM model with custom scoring matrix") {
stopifnot(is.data.frame(data),
all(sapply(data, function(x) !all(is.na(x)))),
is.character(items) | is.list(items),
all(unlist(items) %in% names(data)),
is.matrix(scoringMatrix), is.numeric(scoringMatrix), !anyNA(scoringMatrix),
all(sapply(data[, unique(unlist(items)), drop = FALSE],
function(x, values) {all(x %in% values)},
values = c(rownames(scoringMatrix), NA_character_))),
is.list(reverseCoded),
is.character(savedata), length(savedata) == 1L,
is.vector(analysis),
is.character(title), length(title) == 1L, nchar(title) < 78L)
if (is.character(items)) {
stopifnot(all(items %in% names(data)))
itemNames <- items
} else { # a list
stopifnot(all(names(items) %in% colnames(scoringMatrix)),
all(colnames(scoringMatrix) %in% names(items)),
all(unlist(items) %in% names(data)))
itemNames <- unique(unlist(items))
}
if (is.character(observedExogenous)) {
stopifnot(all(observedExogenous %in% names(data)))
} else {
stopifnot(is.matrix(observedExogenous),
all(rownames(observedExogenous) %in% names(data)))
}
if (is.character(observedDependent)) {
stopifnot(all(observedDependent %in% names(data)))
} else {
stopifnot(is.matrix(observedDependent),
all(rownames(observedDependent) %in% names(data)))
}
addToAnalysis <- formals(make_mplus_gpcm_model_syntax)$analysis[
setdiff(names(formals(make_mplus_gpcm_model_syntax)$analysis),
c(names(analysis), ""))]
if (!is.null(addToAnalysis)) analysis <- append(analysis, addToAnalysis)
itemCategories <- lapply(data[, itemNames, drop = FALSE],
function(x) {return(setdiff(unique(x), NA))})
syntax <- make_mplus_gpcm_vmmc_syntax(items = items,
scoringMatrix = scoringMatrix,
observedExogenous = observedExogenous,
observedDependent = observedDependent,
fixSlopes = fixSlopes,
reverseCoded = reverseCoded,
orthogonal = orthogonal,
weight = weight,
itemCategories = itemCategories,
trySimpleGPCM = !any(grepl("STD",
output)))
if (is.na(weight)) weight = vector(mode = "character", length = 0L)
data <- data[, c(unique(unlist(items)), observedExogenous,
observedDependent, weight), drop = FALSE]
analysis <- paste(mapply(
function(nm, val) {
return(paste0(nm, " IS ", val, ";"))
},
names(analysis), analysis, SIMPLIFY = TRUE))
if (length(output) > 0L) {
output <- paste0(paste(output, collapse = " "), ";")
} else {
output <- NULL
}
if (savedata %in% c(NA_character_, "")) {
savedata = NULL
} else {
savedata <- paste0("FILE IS ", savedata, ";\n",
"SAVE IS FSCORES;")
}
return(as.MplusSyntaxElements(list(TITLE = title,
VARIABLE = syntax$VARIABLE,
ANALYSIS = analysis,
MODEL = syntax$MODEL,
MODELCONSTRAINT = syntax$MODELCONSTRAINT,
OUTPUT = output,
SAVEDATA = savedata,
rdata = as.data.frame(data),
usevariables = colnames(data))))
}
#' @title Preparing Mplus code to estimate RS models
#' @description Prepares components of Mplus model description syntax.
#' @inheritParams make_mplus_irtree_vmmc_syntax
#' @param itemCategories a list of values that a given item takes in the data
#' @param trySimpleGPCM a logical value indicating whether to try to use
#' a simple Mplus GPCM specification instead of the NRM when
#' \code{scoringMatrix} has only one column
#' @details Models are identified by fixing variances of the the latent
#' variables \strong{that are not mentioned in \code{fixSlopes}} to 1 and
#' by fixing \emph{slope} parameters to 1 (or -1 in a case of reverse coded
#' items) and freeing latent trait variances that are mentioned in
#' \code{fixSlopes}.
#'
#' Please note that Mplus assumes that the last category is always
#' scored 0, so if \code{scoringMatrix} contains some non-zero elements in its
#' last row function will automatically adjust the coding scheme for latent
#' traits (columns of the \code{scoringMatrix}) where last cell is non-zero by
#' subtracting value in this cell from the whole column. Typically this will
#' introduce negative scores to this column, but this is something Mplus can
#' carry and it doesn't affect estimates of slope parameters. However,
#' \strong{this will make estimated intercept parameters incomparable with the
#' specification using the original scoring scheme}. Also, this will make slope
#' parameters for a given latent trait negative (while preserving the origin -
#' for the purpose of interpretation - of the latent trait itself).
#' @return A list of strings with elements named \code{VARIABLE}, \code{MODEL}
#' and \code{MODELCONSTRAINT} (the last one can be \code{NULL})
#' @importFrom stats na.omit setNames
make_mplus_gpcm_vmmc_syntax <- function(items, scoringMatrix,
observedExogenous = vector(mode = "character", length = 0L),
observedDependent = vector(mode = "character", length = 0L),
fixSlopes = vector(mode = "character", length = 0L),
reverseCoded = vector(mode = "list", length = 0L),
orthogonal = vector(mode = "character", length = 0L),
weight = NA_character_,
itemCategories = rep(list(rownames(scoringMatrix)),
length(items)),
trySimpleGPCM = TRUE) {
stopifnot(is.character(items) | is.list(items),
is.matrix(scoringMatrix), is.numeric(scoringMatrix),
is.character(weight), length(weight) == 1L,
is.logical(trySimpleGPCM), length(trySimpleGPCM) == 1L,
trySimpleGPCM %in% c(TRUE, FALSE))
if (is.character(items)) {
stopifnot(!anyNA(items), length(items) > 0L,
all(!duplicated(items)))
latentTraits <- colnames(scoringMatrix)
names(latentTraits) <- latentTraits
items <- setNames(rep(list(list(items)), ncol(scoringMatrix)),
colnames(scoringMatrix))
items <- mapply(setNames, items, names(items), SIMPLIFY = FALSE)
} else { # a list
stopifnot(all(names(items) %in% colnames(scoringMatrix)),
all(colnames(scoringMatrix) %in% names(items)),
!anyNA(unlist(items)),
all(sapply(items, is.list)),
all(sapply(items, function(x) all(sapply(x, is.character)))),
all(!sapply(items, function(x) duplicated(unlist(x)))),
all(sapply(items, function(x) all(sapply(x, length) > 0))))
latentTraits <- c(unlist(lapply(items, names)),
setdiff(colnames(scoringMatrix), names(items)))
names(latentTraits) <- c(unlist(mapply(function(x, nm) rep(nm, length(x)),
items, names(items), SIMPLIFY = FALSE)),
setdiff(colnames(scoringMatrix), names(items)))
}
stopifnot(is.character(fixSlopes), all(fixSlopes %in% latentTraits),
is.list(reverseCoded), all(names(reverseCoded) %in% latentTraits),
all(!duplicated(names(reverseCoded))), all(unlist(reverseCoded) %in% unlist(items)),
is.character(orthogonal), all(orthogonal %in% latentTraits))
if (length(reverseCoded) > 0L & length(fixSlopes) == 0L) {
message("With no slopes being fixed argument 'reverseCoded' won't affect the model specification.")
}
itemCategories <- lapply(itemCategories, na.omit)
itemCategories <- mapply(function(v, nm) data.frame(item = nm,
value = v,
stringsAsFactors = FALSE),
itemCategories, names(itemCategories),
SIMPLIFY = FALSE)
modelStructural <- make_mplus_structural_model(latent = latentTraits,
observedExogenous = observedExogenous,
observedDependent = observedDependent)
observedExogenous <- attributes(modelStructural)$observedExogenous
observedDependent <- attributes(modelStructural)$observedDependent
model <- constraints <- vector(mode = "character", length = 0L)
# simple GPCM specification
if (ncol(scoringMatrix) == 1L & trySimpleGPCM &
all(sapply(itemCategories, nrow) == nrow(scoringMatrix))) {
variable <- paste0("CATEGORICAL = ",
paste(unique(unlist(items)), collapse = " "), " (gpcm);")
for (lt in seq_along(latentTraits)) {
i <- which(colnames(scoringMatrix) == names(latentTraits)[lt])
traitItemsReversed <- reverseCoded[[latentTraits[lt]]]
traitItems <- items[[colnames(scoringMatrix)[i]]][[latentTraits[lt]]]
stopifnot(all(traitItemsReversed %in% traitItems))
traitItemsStraight <- setdiff(traitItems, traitItemsReversed)
if (latentTraits[lt] %in% fixSlopes) {
if (length(traitItemsStraight) > 0L) {
model <- c(model,
paste0(latentTraits[lt], " BY ",
paste(traitItemsStraight, collapse = "@1 "), "@1;"))
}
if (length(traitItemsReversed) > 0L) {
model <- c(model,
paste0(latentTraits[lt], " BY ",
paste(traitItemsReversed, collapse = "@-1 "), "@-1;"))
}
model <- c(model,
paste0(latentTraits[lt], "*;"))
} else {
model <- c(model,
paste0(latentTraits[lt], " BY ", traitItems[1L], "* ",
paste(traitItems[-1L], collapse = " "), ";"))
model <- c(model,
paste0(latentTraits[lt], "@1;"))
}
}
# GPCM with a custom scoring matrix
} else {
itemCategories <- do.call(rbind, itemCategories)
variable <- paste0("NOMINAL = ",
paste(unique(unlist(items)), collapse = " "), ";")
for (lt in seq_along(latentTraits)) {
i <- which(colnames(scoringMatrix) == names(latentTraits)[lt])
if (latentTraits[lt] %in% names(reverseCoded)) {
traitItemsReversed <- reverseCoded[[latentTraits[lt]]]
} else {
traitItemsReversed <- vector(mode = "character", length = 0L)
}
traitItems <- items[[colnames(scoringMatrix)[i]]][[latentTraits[lt]]]
stopifnot(all(traitItemsReversed %in% traitItems))
# Mplus assumes that last category is always codded by 0
# if scoring matrix has another value there, the codding scheme must be changed
if (scoringMatrix[nrow(scoringMatrix), i] != 0) {
scoringMatrix[, i] <- scoringMatrix[, i] - scoringMatrix[nrow(scoringMatrix), i]
}
syntax <- make_mplus_gpcm_nrm_syntax(scoringColumn = scoringMatrix[, i, drop = FALSE],
latentTraitName = latentTraits[lt],
items = traitItems,
reverseCoded = traitItemsReversed,
itemCategories = itemCategories,
fixSlopes = latentTraits[lt] %in% fixSlopes)
model <- c(model,
syntax$loadings,
syntax$latentTrait,
"")
constraints <- c(constraints,
syntax$constraints)
}
model <- c(model,
make_mplus_latent_traits_orthogonal_syntax(orthogonal))
}
if (!is.na(weight)) {
weight <- paste0("WEIGHT = ", weight, ";")
} else {
weight <- vector(mode = "character", length = 0L)
}
if (length(constraints) == 0L) constraints = NULL
results <- list(VARIABLE =
paste(c(strwrap(variable,
width = 80L, exdent = 10L),
weight),
collapse = "\n"),
MODEL = paste(c(modelStructural, model),
collapse = "\n"),
MODELCONSTRAINT = paste(constraints, collapse = "\n"))
return(as.MplusSyntaxElements(results))
}
#' @title Preparing Mplus code to estimate RS models
#' @description Prepares Mplus model description syntax for the structural part
#' of the model given latent traits, observed exogenous predictors and observed
#' dependent variables.
#' @param latent a character vector of latent variable names
#' @param observedExogenous either:
#' \itemize{
#' \item{a character vector with names of observed exogenous predictors that
#' should be used to predict latent variables in the model}
#' \item{a matrix with latent traits in columns and observed exogenous
#' predictors in rows specifying which of the exogenous predictors should
#' be used to predict which latent traits (matrix should contain only
#' 0 and 1 or \code{TRUE} and \code{FALSE})}
#' }
#' @param observedDependent either:
#' \itemize{
#' \item{a character vector with names of observed dependent variables that
#' should be predicted using latent variables in the model}
#' \item{a matrix with latent traits in columns and observed dependent
#' variables in rows specifying which of the dependent variables should
#' be predicted by which latent traits (matrix should contain only
#' 0 and 1 or \code{TRUE} and \code{FALSE})}
#' }
#' @return A character vector
make_mplus_structural_model <- function(latent, observedExogenous,
observedDependent) {
stopifnot(is.character(latent), !anyNA(latent), length(latent) > 0L, all(!duplicated(latent)),
is.character(observedExogenous) | is.numeric(observedExogenous) | is.logical(observedExogenous),
is.character(observedDependent) | is.numeric(observedDependent) | is.logical(observedDependent))
if (is.character(observedExogenous)) {
stopifnot(is.vector(observedExogenous),
all(!is.na(observedExogenous)),
all(!duplicated(observedExogenous)))
observedExogenous <- matrix(TRUE, ncol = length(latent),
nrow = length(observedExogenous),
dimnames = list(observedExogenous, latent))
} else {
stopifnot(all(observedExogenous %in% c(0, 1)),
all(colnames(observedExogenous) %in% latent),
!anyNA(rownames(observedExogenous)),
all(!duplicated(rownames(observedExogenous))))
observedExogenous <- matrix(as.logical(observedExogenous),
nrow = nrow(observedExogenous),
dimnames = dimnames(observedExogenous))
}
predicting <- apply(observedExogenous, 1L, any)
if (any(!predicting)) {
warning("There are some observed exogenous variables that do not predict any latent variable: '",
paste(rownames(observedExogenous)[!predicting], collapse = "', '"), "'",
"\nThese variables won't be included in the model.")
}
observedExogenous <- observedExogenous[predicting, , drop = FALSE]
if (is.character(observedDependent)) {
stopifnot(is.vector(observedDependent),
all(!is.na(observedDependent)),
all(!duplicated(observedDependent)))
observedDependent <- matrix(TRUE, ncol = length(latent),
nrow = length(observedDependent),
dimnames = list(observedDependent, latent))
} else {
stopifnot(all(observedDependent %in% c(0, 1)),
all(colnames(observedDependent) %in% latent),
!anyNA(rownames(observedDependent)),
all(!duplicated(rownames(observedDependent))))
observedDependent <- matrix(as.logical(observedDependent),
nrow = nrow(observedDependent),
dimnames = dimnames(observedDependent))
}
predicted <- apply(observedDependent, 1L, any)
if (any(!predicted)) {
warning("There are some observed dependent variables that are not predicted by any latent variable: '",
paste(rownames(observedDependent)[!predicted], collapse = "', '"), "'",
"\nThese variables won't be included in the model.")
}
observedDependent <- observedDependent[predicted, , drop = FALSE]
if (nrow(observedExogenous) > 0L) {
latentPredicted <- apply(observedExogenous, 2L, any)
observedExogenous <-
paste0(latent[latentPredicted], " ON ",
apply(observedExogenous[, latentPredicted, drop = FALSE], 2L,
function(x) {return(paste(names(x)[x], collapse = " "))}),
";")
observedExogenous <- strwrap(observedExogenous, width = 80L, exdent = 5L)
} else {
observedExogenous <- vector(mode = "character", length = 0L)
}
if (nrow(observedDependent) > 0L) {
observedDependent <-
paste0(rownames(observedDependent), " ON ",
apply(observedDependent, 1L,
function(x) {return(paste(names(x)[x], collapse = " "))}),
";")
observedDependent <- strwrap(observedDependent, width = 80L, exdent = 5L)
} else {
observedDependent <- vector(mode = "character", length = 0L)
}
if (length(observedExogenous) > 0L & length(observedDependent) > 0L) {
space <- ""
} else {
space <- vector(mode = "character", length = 0L)
}
if (length(observedExogenous) > 0L | length(observedDependent) > 0L) {
results <- c(MODEL = paste(c(observedExogenous, space, observedDependent, ""),
collapse = "\n"))
} else {
results <- vector(mode = "character", length = 0L)
}
attributes(results)$observedExogenous = rownames(observedExogenous)
attributes(results)$observedDependent = rownames(observedDependent)
return(as.MplusSyntaxElements(results))
}
#' @title Preparing Mplus code to estimate RS models
#' @description Prepares Mplus model syntax describing how items are loaded
#' by latent traits in a GPCM specification of a NRM.
#' @param scoringColumn a one-column matrix (column of a scoring matrix)
#' @param latentTraitName a string with latent variable name
#' @param items a character vector with item names
#' @param reverseCoded a character vector with names of reverse-coded items
#' @param itemCategories a data frame with columns named \emph{item} and
#' \emph{value} storing unique (non-NA) values of items that occur in the data
#' @param fixSlopes a logical value indicating whether slopes of the latent
#' trait should be fixed to be the same
#' @return A character vector
make_mplus_gpcm_nrm_syntax <- function(scoringColumn, latentTraitName, items,
reverseCoded, itemCategories, fixSlopes) {
stopifnot(is.matrix(scoringColumn), ncol(scoringColumn) == 1L,
is.character(latentTraitName), length(latentTraitName) == 1L,
!is.na(latentTraitName),
is.character(items), length(items) > 0L, !anyNA(items),
is.character(reverseCoded), !anyNA(reverseCoded),
is.data.frame(itemCategories),
"item" %in% names(itemCategories),
"value" %in% names(itemCategories),
is.logical(fixSlopes), length(fixSlopes) == 1L,
fixSlopes %in% c(TRUE, FALSE))
results <- data.frame(lt = unname(latentTraitName),
item = rep(items, each = nrow(scoringColumn)),
value = rep(rownames(scoringColumn), length(items)),
wt = rep(scoringColumn[, 1L], length(items)),
absWt = rep(abs(scoringColumn[, 1L]), length(items)),
stringsAsFactors = FALSE)
results$fix = paste0(ifelse(rep(fixSlopes, nrow(results)),
ifelse(results$item %in% reverseCoded, "@-", "@"),
"*"),
results$wt)
results$rev = ifelse(results$item %in% reverseCoded, "n", "")
results <- merge(results, itemCategories,
by = c("item", "value"))
results <- lapply(split(results, results["item"]),
function(x) cbind(x, cat = c(seq(1L, nrow(x) - 1L), 0L)))
results <- do.call(rbind, results)
results <- results[results$cat != 0L & results$wt != 0, ]
results$label <- paste0(results$lt, results$item, "_", results$absWt, results$rev)
constraints <- unique(cbind(results[, c("item", "wt", "absWt", "rev", "label")]))
results <- paste0(results$lt, " BY ", results$item, "#", results$cat,
results$fix, " (", results$label, ");")
results <- sub("([*@])--([[:digit:]])", "\\1\\2", results)
constraints <- unlist(lapply(
split(constraints, constraints$item),
function(x) {
if (nrow(x) == 1L) return(vector(mode = "character", length = 0L))
c(paste0(x$label[-nrow(x)], " = ",
ifelse(x$rev[-nrow(x)] == "n", "-", ""), x$wt[-nrow(x)],
"/", ifelse(x$rev[-1L] == "n", "-", ""), x$wt[-1L],
"*", x$label[-1L], ";"),
"")
}))
constraints <- gsub("--([[:digit:]]+\\*)", "\\1", constraints)
if (fixSlopes) constraints <- vector(mode = "character", length = 0L)
return(list(loadings = results,
latentTrait = paste0(latentTraitName,
ifelse(fixSlopes, "*;", "@1;")),
constraints = constraints))
}
#' @title Preparing Mplus code to estimate RS models
#' @description Prepares Mplus model description syntax that fixes covariances
#' between given latent traits to 0.
#' @param latentTraits a character vector of latent traits names
#' @return A character vector
make_mplus_latent_traits_orthogonal_syntax <- function(latentTraits) {
stopifnot(is.character(latentTraits), !anyNA(latentTraits))
if (length(latentTraits) == 0L) return(vector(mode = "character", length = 0L))
covariancess <- expand.grid(lt1 = latentTraits, lt2 = latentTraits,
stringsAsFactors = FALSE)
covariancess <-
covariancess[as.vector(lower.tri(matrix(1L:nrow(covariancess),
nrow = nrow(covariancess)^0.5))), ]
covariancess <-
sapply(split(covariancess,
droplevels(factor(covariancess$lt1,
unique(covariancess$lt1)))),
function(x) {
return(strwrap(paste0(x$lt1[1L], " WITH ",
paste(x$lt2, collapse = "@0 "), "@0;"),
width = 80L, exdent = nchar(x$lt2[1L]) + 7L))})
return(covariancess)
}
#' @title Preparing Mplus code to estimate RS models
#' @description Print method for objects containing Mplus syntaxes
#' @param x an object of class \emph{MplusSyntaxElements}
#' @param ... optional arguments to \code{print.data.frame} methods (used only
#' if \code{x} has an element that is a data frame)
#' @param n an integer passed to \code{\link{head}} (used only if \code{x}
#' has an element that is a data frame)
#' @return invisibly \code{x}
#' @importFrom utils head
#' @export
print.MplusSyntaxElements <- function(x, ..., n = 10L) {
for (i in seq_along(x)) {
cat("----------------------------------------\n", names(x)[i], "\n")
if (is.character(x[[i]]) & names(x)[i] == toupper(names(x)[i])) {
cat(paste(x[[i]], collapse = "\n"), "\n\n", sep = "")
} else if (is.data.frame(x[[i]])) {
print(head(x[[i]], ..., n = n))
} else {
print(x[[i]])
}
}
invisible(x)
}
#' @title Preparing Mplus code to estimate RS models
#' @description Assigns \emph{MplusSyntaxElements} to an object
#' @param x an object
#' @return \code{x} with \emph{MplusSyntaxElements} class assigned
as.MplusSyntaxElements <- function(x) {
class(x) <- c("MplusSyntaxElements", class(x))
return(x)
}
|
c70982c78e48ab0d44831409eeb45049e0dfef1c | d86722c152f1db122d4f5c88d15964399c19ed4b | /run_analysis.R | 35ae16bc1ef192904a1af62b2283ef5615995543 | [] | no_license | alexanderAnokhin/GCD_course_project | 318ad58e4a9175e70904c9a835ab10c6897e4dc1 | 9992b077cc62d3d1cad6df486accace15d144d50 | refs/heads/master | 2020-12-25T16:47:45.994538 | 2016-08-22T09:05:12 | 2016-08-22T09:05:12 | 66,199,195 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,739 | r | run_analysis.R | ## Read train dataset
train <- read.table(file = "UCI HAR Dataset/train/X_train.txt")
train.labels <- read.table(file = "UCI HAR Dataset/train/y_train.txt")
train.subjects <- read.table(file = "UCI HAR Dataset/train/subject_train.txt")
## Read test dataset
test <- read.table(file = "UCI HAR Dataset/test/X_test.txt")
test.labels <- read.table(file = "UCI HAR Dataset/test/y_test.txt")
test.subjects <- read.table(file = "UCI HAR Dataset/test/subject_test.txt")
## Merge train and test datasets
train <- cbind(train, train.subjects, train.labels)
test <- cbind(test, test.subjects, test.labels)
data <- rbind(train, test)
## Delete unused objects
rm(train, train.subjects, train.labels, test, test.labels, test.subjects)
## Read features names
features <- read.table(file = "UCI HAR Dataset/features.txt", col.names = c("number", "feature"))
## Select features on the mean and standard deviation (regular expressions to match the features)
features <- features[grep("(mean|std)\\(\\)", features[, 2]), ]
data <- data[, c(features[, 1], dim(data)[2] - 1, dim(data)[2])]
## Name variables
names(data) <- c(as.character(features[, 2]), "subject", "activity.number")
## Name activities (replace activity number by name)
activities <- read.table(file = "UCI HAR Dataset/activity_labels.txt", col.names = c("number", "activity"))
data <- merge(data, activities, by.x = "activity.number", by.y = "number")
data <- data[, -which(names(data) %in% c("activity.number"))]
## Aggregate variables
library(dplyr)
library(tidyr)
result <- tbl_df(data) %>%
group_by(activity, subject) %>%
summarise_each(funs(mean)) %>%
gather(variable, mean, -c(activity, subject))
print(result)
write.table(result, row.name = FALSE, file = "tinyData.txt") |
df39e576d738d38193f71ab61d92c5744a2cf2d7 | 31dad25f94ca56b17eb851b49017dc6d32b51918 | /src/viz/barplot_of_intersected_chromHMM_states.R | d759af612405800a169e3e8de662a0673f952045 | [] | no_license | Adrien-Evo/CM-IPS_maturity | bbcd25f044e22d5a004b1f16a1663e59be5d6b03 | e70a2e95db2b48a72dd2dfa1021431cc7a99dba6 | refs/heads/master | 2022-12-19T19:27:54.552748 | 2020-09-25T14:59:41 | 2020-09-25T14:59:41 | 273,013,309 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,982 | r | barplot_of_intersected_chromHMM_states.R | library(yaml)
library(ggthemes)
library(tidyr)
require(scales)
#library(GenomicRanges)
#library(bedr)
library(reshape2)
library(ggplot2)
library(dplyr)
radio = read_yaml("/home/itx1433/Documents/PROJ/CM-IPS_maturity/data/radiofile.yml")
outDir=paste0(radio$viz,"/barplot_of_ENCODE_states_for_each_CMIPS_chromHMM_states")
dir.create(outDir)
setwd(outDir)
barplot_intersect <- function(intersect_cmips_encode,nb_encode_states,plotname){
colnames(intersect_cmips_encode) <-c("chr1","start1","end1","cmips","chr2","start2","end2","encode")
##Color scheme
if(nb_encode_states == 15){
color = read.table(radio$color.mapping.encode.15,sep="\t")
}else if(nb_encode_states == 18){
color = read.table(radio$color.mapping.encode.18,sep="\t")
}
getrgb <-function(x){
rgb(matrix(as.vector(unlist(strsplit(as.character(x),","))),ncol=3),maxColorValue=255)
}
chromHMM_color_scheme = sapply(color$V2,getrgb)
names(chromHMM_color_scheme) <- color$V1
chromHMM_color_scheme
melted_intersect=melt(data = intersect_cmips_encode, id.vars = "cmips", measure.vars = c("encode"))
head(melted_intersect)
toplot = melted_intersect %>% group_by(cmips) %>% count(value)
head(toplot)
####Rearrange levels for better plot organization
toplot$cmips <- factor(toplot$cmips,levels = rev(c("Active_promoter","Weak_promoter","Poised_promoter","Strong_enhancer","Poised_enhancer",
"Polycomb_repressed","Heterochrom_lowsignal")))
if(nb_encode_states == 15){
toplot$value <- factor(toplot$value,levels = rev(c("TssA","TssAFlnk","TssBiv","BivFlnk","EnhG","EnhA","EnhBiv",
"Tx","ReprPC","ZNF_Rpts","Het",
"Quies")))
}else if(nb_encode_states == 18){
toplot$value <- factor(toplot$value,levels = rev(c("TssA","TssAFlnk","TssBiv","EnhG","EnhA",
"EnhWk","EnhBiv","Tx","ReprPC","ZNF_Rpts","Het",
"Quies")))
}
theme_set(theme_classic())
g <- ggplot(toplot) +
geom_bar(stat="identity", aes(fill=value, x=cmips, y=n),colour="black", width = 0.95,position = position_fill()) +
coord_flip() + labs(fill="ENCODE ChromHMM annotation") + theme_tufte()+ scale_fill_manual(values = chromHMM_color_scheme)+ xlab("CM-IPS") + ylab("Fraction") +
theme(axis.text.x = element_text(angle = 45, hjust = 1))+ ggtitle(paste0("CM-IPS vs ENCODE ", plotname))
g
ggsave(g,filename = paste0(plotname,"_fraction.png"),width = 8, height = 6 )
gg <- ggplot(toplot) +
geom_bar(stat="identity", aes(fill=value, x=cmips, y=n),colour="black", width = 0.95) +
labs(fill="ENCODE ChromHMM annotation") + theme_tufte()+ scale_fill_manual(values = chromHMM_color_scheme)+ xlab("CM-IPS") + ylab("Count") +
theme(axis.text.x = element_text(angle = 45, hjust = 1))+ ggtitle(paste0("CM-IPS vs ENCODE ", plotname))+ scale_y_continuous(labels = scales::comma)
gg
ggsave(gg,filename = paste0(plotname,"_count.png"),width = 8, height = 6 )
}
intersect_cmips_encode=read.table(radio$intersect.all.CMIPS.FET,h=F,sep = "\t",stringsAsFactors=F)
barplot_intersect(intersect_cmips_encode,15,"FETAL")
intersect_cmips_encode=read.table(radio$intersect.all.CMIPS.VENT.R,h=F,sep = "\t",stringsAsFactors=F)
barplot_intersect(intersect_cmips_encode,18,"VENT.R")
intersect_cmips_encode=read.table(radio$intersect.all.CMIPS.FET.tss_windows_1kb,h=F,sep = "\t",stringsAsFactors=F)
barplot_intersect(intersect_cmips_encode,15,"FETAL_1kb_TSS")
intersect_cmips_encode=read.table(radio$intersect.all.CMIPS.VENT.R.tss_windows_1kb,h=F,sep = "\t",stringsAsFactors=F)
barplot_intersect(intersect_cmips_encode,18,"VENT.R_1kb_TSS")
####Tss CCDS
intersect_cmips_encode=read.table(radio$intersect.all.CMIPS.FET.tss.ccds,h=F,sep = "\t",stringsAsFactors=F)
barplot_intersect(intersect_cmips_encode,15,"FETAL_TSS_CCDS")
intersect_cmips_encode=read.table(radio$intersect.all.CMIPS.VENT.R.tss.ccds,h=F,sep = "\t",stringsAsFactors=F)
barplot_intersect(intersect_cmips_encode,18,"VENT.R_TSS_CCDS")
### TSS CCDS just looking at he base level
tss_intersect = read.table(radio$intersect.tss,h=F,sep = "\t", stringsAsFactors = FALSE)
tss_intersect.dcast = dcast(tss_intersect,V1~V2)
tss_intersect.dcast[sapply(tss_intersect.dcast, is.character)] <- lapply(tss_intersect.dcast[sapply(tss_intersect.dcast, is.character)],
as.factor)
head(tss_intersect.dcast)
test = tss_intersect.dcast[,c(1,1,1,4,1,1,1,8)]
barplot_intersect(test,18,"VENT.R_TSS_CCDS_1bp")
test = tss_intersect.dcast[,c(1,1,1,4,1,1,1,6)]
barplot_intersect(test,18,"HRT.FET_TSS_CCDS_1bp")
test = tss_intersect.dcast[,c(1,1,1,4,1,1,1,9)]
barplot_intersect(test,18,"IPS_TSS_CCDS_1bp")
sum(is.na(test$cmips))
|
7eaada12717ed78ed8aba17545d27f338a927b6b | f491d752d4d79cd5f739d74c4b169a88a5eb0f25 | /run_analysis.R | 5af40474757a5c00f85b7da2ca08b52ba8fe187e | [] | no_license | arsenitheunicorn/getting_cleaning_data_R | 53391b9d58db6cb7d288a2d34821c7237794b39c | cff0df17953e7df00afd36ea33501950ab903a3c | refs/heads/main | 2023-04-06T14:16:42.427996 | 2021-04-07T14:57:17 | 2021-04-07T14:57:17 | 355,579,283 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,208 | r | run_analysis.R | #setwd('../desktop/study/getting_cleaning_data')
# here is the data we work with
zipURL <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
# create a folder for this project
if (!file.exists("project_data")) {
dir.create("project_data")
}
# download required .zip file if it was not done before
if (length(list.files("./project_data")) == 0) {
dest <- "./project_data/dataset.zip"
download.file(zipURL, dest)
}
# let's move into this folder, shall we?
setwd('./project_data')
# unpack .zip file if it is not unpacked yet
if (length(list.dirs(".", recursive=F)) == 0) {
unzip("dataset.zip")
}
# let's look what we got there
for (val in list.dirs('.')) {
print(val)
print(list.files(val))
print("-----")
}
# this function helps with reading X datasets into a nice data frame
# it replaces unwanted whitespaces so that the remaining ones could be used as separators
pre_parse <- function(x) {
x <- gsub("(^|\n) +", "\n", x)
x <- gsub(" ", ' ', x)
return(x)
}
# reading all required datasets: X, y and subject
fileName <- './UCI HAR Dataset/test/X_test.txt'
X_test <- read.csv(text=pre_parse(readChar(fileName, file.info(fileName)$size)), sep=" ", header=F)
fileName <- './UCI HAR Dataset/train/X_train.txt'
X_train <- read.csv(text=pre_parse(readChar(fileName, file.info(fileName)$size)), sep=" ", header=F)
fileName <- "./UCI HAR Dataset/train/y_train.txt"
y_train <- read.csv(fileName, header=F)
fileName <- "./UCI HAR Dataset/test/y_test.txt"
y_test <- read.csv(fileName, header=F)
fileName <- "./UCI HAR Dataset/train/subject_train.txt"
s_train <- read.csv(fileName, header = F)
fileName <- "./UCI HAR Dataset/test/subject_test.txt"
s_test <- read.csv(fileName, header=F)
# adding y to X
X_test$activity_num <- y_test$V1
X_train$activity_num <- y_train$V1
# adding subject to X
X_test$Subject <- s_test$V1
X_train$Subject <- s_train$V1
# merging test and train datasets
df <- rbind(X_test, X_train)
# reading features that will become columnnames
fileName <- './UCI HAR Dataset/features.txt'
df_features <- read.csv(fileName, sep=" ", header=F)
#head(df_features)
# this will help to label activities properly
fileName <- "./UCI HAR Dataset/activity_labels.txt"
act_labels <- read.csv(fileName, sep=' ', header=F)
#act_labels
# labling activities
df$activity_label <- act_labels$V2[match(df$activity_num, act_labels$V1)]
# columnnames for X part of dataset
colnames(df)[1:561] = as.character(df_features[,2])
#colnames(df)
# extract only ... mean and standard deviation
needed_ixs <- grep("-(std|mean)[^A-Za-z]", colnames(df))
df_stdAndMean <- df[,needed_ixs]
# if you need to print this dataset from task 2:
df_stdAndMean
# make required tidy data set
df_stdAndMean$Subject <- df$Subject
df_stdAndMean$Activity <- df$activity_label
library(reshape2)
df_melted <- melt(df_stdAndMean, id = c("Subject", "Activity"))
df_tidy <- dcast(df_melted, Subject + Activity ~ variable, mean)
df_tidy
# write this tidy dataset into a .txt file
write.table(df_tidy, "./average_subject_activity_dataset.txt", row.names = FALSE, quote = FALSE) |
0f0bc135b8f56ffa99ea7d10fc45ae156979a99e | 54267662dca87e5964e8c9fbaf57167ad6a50817 | /man/calculate_E_blouin.Rd | f45083c5658c614ea106f26cffa73c2f030795aa | [] | no_license | wouterbeukema/ectotemp | 1e219d448c593622adfaddefad7237b9756a4e21 | 540fe25c03751320cb38584eea2275effc985c23 | refs/heads/master | 2021-07-06T07:32:47.455691 | 2020-07-27T13:36:05 | 2020-07-27T13:36:05 | 150,030,342 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 2,281 | rd | calculate_E_blouin.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/calculate_E_blouin.R
\name{calculate_E_blouin}
\alias{calculate_E_blouin}
\title{Thermoregulation effectiveness sensu Blouin-Demers & Weatherhead}
\usage{
calculate_E_blouin(te, tb, tset_low, tset_up)
}
\arguments{
\item{te}{A vector containing operative temperatures.}
\item{tb}{A vector containing body temperature measurements.}
\item{tset_low}{Lower boundary of a species or population set-point range
that was determined through thermal preference trials in a temperature
gradient. This may be a named double vector containing the lower boundary
value, or simply the value itself.}
\item{tset_up}{Upper boundary of the set-point range.}
}
\value{
Effectiveness of temperature regulation (E) sensu Blouin-Demers
and Weatherhead (2001).
}
\description{
This function calculates an often-used variant of the original formula to
determine effectiveness of temperature regulation of Hertz et al. (1993).
The concerning variant was proposed by Blouin-Demers & Weatherhead (2001),
who argued that interpretation of the formula of Hertz et al. (1993) is
confounded by the fact that different combinations of the mean thermal
quality of the habitat (de) and mean accuracy of temperature regulation (db)
might lead to similar E values. As such, Blouin-Demers & Weatherhead (2001)
proposed use of E = de - db, which quantifies the extent of departure from
perfect thermoconformity. Positive E values indicate active temperature
regulation, negative values represent active avoidance of suitable thermal
habitat, and values around 0 suggest thermoconformity.
The thermal quality of the habitat (de) and accuracy of temperature
regulation (db) are calculated as part of this formula, so it is not
necessary to run \code{\link{calculate_de}} and \code{\link{calculate_db}}
before running this function.
}
\examples{
te <- na.omit(bufbuf[,"te"])
tb <- na.omit(bufbuf[,"tb"])
E <- calculate_E_blouin(te, tb, 19.35, 26.44)
}
\references{
Blouin-Demers, G., & Weatherhead, P. J. (2001). Thermal ecology of black rat
snakes (Elaphe obsoleta) in a thermally challenging environment. Ecology, 82
(11), 3025-3043.
}
\seealso{
\code{\link{calculate_E_hertz}}.
}
|
f4213d7692c0efe953322b10f2e074c810029568 | 586b69ecc99fc3854b5eb5f7d4ad5c400c1612db | /graphs.R | a4585798a8e55b920c0145165b5b4c2ad047d71f | [
"Apache-2.0"
] | permissive | mfarr76/WellTabls | ef971e2734bbe663d972daee577e19b3522ba22d | 08bc09108701acead18d31d199ea886a44b080d7 | refs/heads/master | 2020-03-26T00:21:25.285765 | 2018-08-10T17:52:49 | 2018-08-10T17:52:49 | 144,316,707 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,773 | r | graphs.R | rm(list = ls())
# Library
library(dygraphs)
library(xts)
# Create data
trend=sin(seq(1,41))+runif(41)
data=data.frame(time=seq(from=Sys.Date()-40, to=Sys.Date(), by=1 ), trend=trend, max=trend+abs(rnorm(41)), min=trend-abs(rnorm(41, sd=1)))
data=xts(x = data[,-1], order.by = data$time)
# Plot
dygraph(data) %>%
dySeries(c("min", "trend", "max"))
# Create data (needs 4 data points per date stamp)
trend=sin(seq(1,41))+runif(41)
data=data.frame(time=seq(from=Sys.Date()-40, to=Sys.Date(), by=1 ), value1=trend, value2=trend+rnorm(41), value3=trend+rnorm(41), value4=trend+rnorm(41) )
data=xts(x = data[,-1], order.by = data$time)
# Plot it
dygraph(data) %>%
dyCandlestick()
dygraph(data) %>%
dyOptions( stemPlot=TRUE)
##lollipop===========================================================
# Library
library(tidyverse)
# Create data
value1=abs(rnorm(26))*2
data=data.frame(x=LETTERS[1:26], value1=value1, value2=value1+1+rnorm(26, sd=1) )
# Reorder data using average?
data = data %>% rowwise() %>% mutate( mymean = mean(c(value1,value2) )) %>% arrange(mymean) %>% mutate(x=factor(x, x))
# plot
ggplot(data) +
geom_segment( aes(x=x, xend=x, y=value1, yend=value2), color="grey") +
geom_point( aes(x=x, y=value1), color=rgb(0.2,0.7,0.1,0.5), size=3 ) +
geom_point( aes(x=x, y=value2), color=rgb(0.7,0.2,0.1,0.5), size=3 ) +
coord_flip()
# With a bit more style
ggplot(data) +
geom_segment( aes(x=x, xend=x, y=value1, yend=value2), color="grey") +
geom_point( aes(x=x, y=value1), color=rgb(0.2,0.7,0.1,0.5), size=3 ) +
geom_point( aes(x=x, y=value2), color=rgb(0.7,0.2,0.1,0.5), size=3 ) +
coord_flip()+
theme_light() +
theme(
legend.position = "none",
panel.border = element_blank(),
) +
xlab("") +
ylab("Value of Y")
|
60141ae75e7a673c73b848dae1ecb036406d9568 | 5df4be881c178d95869a8ee8dcb12c03f52b743c | /R/getMetricData.R | a0dffb1ffbcf3f9ab754c22abe07441a8241e4e0 | [] | no_license | cpanse/MSstatsQC | a6672e2bd04379a683ed6473aad5b96bfaafc736 | 648c909e35b0935bea51177db41bb1da5d05300d | refs/heads/master | 2021-01-22T18:08:31.692764 | 2017-03-10T15:48:26 | 2017-03-10T15:48:26 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 662 | r | getMetricData.R | getMetricData <- function(prodata, peptide, L, U, metric, normalization) {
precursor.data<-prodata[prodata$Precursor==peptide,] #"Precursor" is one of the columns in data that shows the name of peptides
metricData <- 0
if(is.null(metric)){
return(NULL)
}
metricData = precursor.data[,metric]
if(normalization == TRUE) {
mu=mean(metricData[L:U]) # in-control process mean
sd=sd(metricData[L:U]) # in-control process variance
if(sd == 0) {sd <- 0.0001}
metricData=scale(metricData[1:length(metricData)],mu,sd) # transformation for N(0,1) )
return(metricData)
} else if(normalization == FALSE){
return(metricData)
}
}
|
59834293a3677adef8ee5c13b54ba729a674a519 | 9a36b5e45fcd42b93365bd7f420b619195b87064 | /man/TsnePlot.Rd | 09ad4c01bf4517214f255368f7d5ddd65116b09a | [] | no_license | uilnauyis/homosapien-DEE2-data | 89adff0de09a7da6527e17b6e69689b8a5595523 | 89e9f5561103154ac8282073cd65523ff4f34fce | refs/heads/master | 2022-12-28T20:01:26.099631 | 2020-10-09T01:00:46 | 2020-10-09T01:00:46 | 260,235,126 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 483 | rd | TsnePlot.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/TsnePlot.R
\name{TsnePlot}
\alias{TsnePlot}
\title{tsne analysis of the count data}
\usage{
TsnePlot(sExpr, plot = "3d")
}
\arguments{
\item{sExpr}{a SummarizedExperiment object that contains count data along
with gene information and sample metadata.}
\item{plot}{the option of the plot of the PCA analysis result, which should
be either '2d' or '3d'}
}
\description{
tsne analysis of the count data
}
|
f3fcddddc7d822b4f1832ca803acbc71f91ca3ff | 288b4b6998906714ab368e0ee14c70a4059be4ab | /man/dat.nielweise2007.Rd | 6f7d958d6c5ee5686605d9a1995eca604e133030 | [] | no_license | qsh7950/metadat | f6243a382c8c0e3f4c9a0e2cd657edb0ffa3e018 | 5c70fa63d7acfa1f315534fb292950513cb2281e | refs/heads/master | 2021-02-26T06:42:18.937872 | 2019-10-21T21:58:33 | 2019-10-21T21:58:33 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,811 | rd | dat.nielweise2007.Rd | \name{dat.nielweise2007}
\docType{data}
\alias{dat.nielweise2007}
\title{Studies on Anti-Infective-Treated Central Venous Catheters for Prevention of Catheter-Related Bloodstream Infections}
\description{Results from 18 studies comparing the risk of catheter-related bloodstream infection when using anti-infective-treated versus standard catheters in the acute care setting.}
\usage{dat.nielweise2007}
\format{The data frame contains the following columns:
\tabular{lll}{
\bold{study} \tab \code{numeric} \tab study number \cr
\bold{author} \tab \code{character} \tab (first) author \cr
\bold{year} \tab \code{numeric} \tab publication year \cr
\bold{ai} \tab \code{numeric} \tab number of CRBSIs in patients receiving an anti-infective catheter \cr
\bold{n1i} \tab \code{numeric} \tab number of patients receiving an anti-infective catheter \cr
\bold{ci} \tab \code{numeric} \tab number of CRBSIs in patients receiving a standard catheter \cr
\bold{n2i} \tab \code{numeric} \tab number of patients receiving a standard catheter
}
}
\details{
The use of a central venous catheter may lead to a catheter-related bloodstream infection (CRBSI), which in turn increases the risk of morbidity and mortality. Anti-infective-treated catheters have been developed that are meant to reduce the risk of CRBSIs. Niel-Weise et al. (2007) conducted a meta-analysis of studies comparing infection risk when using anti-infective-treated versus standard catheters in the acute care setting. The results from 18 such studies are included in this dataset.
The dataset was used in the article by Stijnen et al. (2010) to illustrate various generalized linear mixed-effects models for the meta-analysis of proportions and odds ratios (see \sQuote{References}).
}
\source{
Niel-Weise, B. S., Stijnen, T., & van den Broek, P. J. (2007). Anti-infective-treated central venous catheters: A systematic review of randomized controlled trials. \emph{Intensive Care Medicine}, \bold{33}, 2058--2068.
}
\references{
Stijnen, T., Hamza, T. H., & Ozdemir, P. (2010). Random effects meta-analysis of event outcome in the framework of the generalized linear mixed model with applications in sparse data. \emph{Statistics in Medicine}, \bold{29}, 3046--3067.
}
\examples{
if (require(metafor)) {
### copy data into 'dat' and examine data
dat <- dat.nielweise2007
dat
### standard (inverse-variance) random-effects model
res <- rma(measure="OR", ai=ai, n1i=n1i, ci=ci, n2i=n2i, data=dat, drop00=TRUE)
print(res, digits=3)
predict(res, transf=exp, digits=2)
### random-effects conditional logistic model
\dontrun{
res <- rma.glmm(measure="OR", ai=ai, n1i=n1i, ci=ci, n2i=n2i, data=dat, model="CM.EL")
print(res, digits=3)
predict(res, transf=exp, digits=2)}
}
}
\keyword{datasets}
\concept{medicine}
|
99ed84a5b7adf6fcb9152d0bb9ad175404bec029 | c48f2a584ba20fab90aaeab28b575e180a276928 | /R/GFORCE.R | dc4eba27d3a7b4fd7cab10a75ea8dd35da9e2123 | [] | no_license | cran/GFORCE | 3a2fab8e3cf3d07c18427332985d0901c00c17d7 | 1c89579c5ecce1e8aef93bd7d2854986b835454a | refs/heads/master | 2020-03-20T11:15:14.741926 | 2019-04-07T08:42:42 | 2019-04-07T08:42:42 | 137,397,785 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 811 | r | GFORCE.R | # Package level documentation
# FOR TESTS -- LOADS C SYMBOLS TO USE WITH TESTTHAT
#' @useDynLib GFORCE kmeans_dual_solution_primal_min_R
#' @useDynLib GFORCE kmeans_pp_R
#' @useDynLib GFORCE test_daps
#' @useDynLib GFORCE test_dsmtd
#' @useDynLib GFORCE test_dvexp
#' @useDynLib GFORCE test_dsumv
#' @useDynLib GFORCE test_dtrace
#' @useDynLib GFORCE test_dcsum
#' @useDynLib GFORCE test_dxpyez
#' @useDynLib GFORCE test_clust_to_opt_val
#' @useDynLib GFORCE test_smoothed_objective
#' @useDynLib GFORCE test_project_E
#' @useDynLib GFORCE test_project_C_perpendicular
#' @useDynLib GFORCE test_project_C_perpendicular_nok
#' @useDynLib GFORCE test_smoothed_gradient
#' @useDynLib GFORCE test_smoothed_gradient_X_base
#' @useDynLib GFORCE test_smoothed_gradient_S_base
dummy_load_function <- function(){;} |
eb9ceae5c26d609430f94aeff68adf2f61e036f1 | 023267839ada61c94515f24ae2b782d2b844194e | /midtermExam/createDataset.R | 3b4de8db63516cf1fd36d05c7be81e87172b969f | [] | no_license | DarioBoh/ISDS3105_fall18 | 303399915687750d93e1d850d2fd07eb5870a2bd | 23bc7e464091e1641efec084c4803c612edebf0f | refs/heads/master | 2021-07-12T12:50:06.424984 | 2021-07-04T09:46:36 | 2021-07-04T09:46:36 | 145,449,465 | 0 | 25 | null | 2018-08-24T17:02:20 | 2018-08-20T17:28:20 | null | UTF-8 | R | false | false | 1,756 | r | createDataset.R | library(tidyverse)
library(lubridate)
library(chron)
dt <- read_csv('midtermExam/Baton_Rouge_Crime_Incidents.csv')
names(dt) <- names(dt) %>% tolower() %>% gsub(' ','', .)
select(dt, filenumber, offensedate, offensetime, crime, committed, zip, district, zone, subzone) %>%
mutate_all(tolower) -> dt
dt %>% filter(nchar(offensetime)==4) %>%
mutate(offensedate = mdy(offensedate)) %>%
filter(between(offensedate, ymd('2015-01-01'), ymd('2017-12-31'))) %>%
mutate(offensetime = paste0(str_sub(offensetime,1,2), ':', str_sub(offensetime,3,4), ':00')) -> dt1
dt1 <- filter(dt1, !is.na(offensetime))
# dt1 %>% mutate(daytime = cut(times(offensetime), breaks = times(c('04:30:00', '12:00:00', '17:00:00',
# '22:00:00', '04:29:59')), labels = c('id', 'morning', 'afternoon', 'evening')))
dt1$offensetime_discrete <- cut(times(dt1$offensetime), breaks = times(c('22:00:00', '04:30:00', '12:00:00', '17:00:00',
'04:29:59')), labels = c('night', 'morning', 'afternoon', 'evening'))
dt1$offensetime_discrete[is.na(dt1$offensetime_discrete)] <- 'night'
dt <- dt1
lkt <- data_frame(crime = unique(dt$crime), crimeId = 1:15)
dt$crimeId <- match(dt$crime, lkt$crime)
dt$crime <- NULL
dt <- dt %>% mutate(week = week(offensedate))
dt <- dt %>% mutate(year = as.integer(year(offensedate)), month = as.integer(month(offensedate)), day = as.integer(day(offensedate)))
#dt <- dt %>% mutate_at(vars(year, week, month, day), factor)
dt <- dt %>% mutate(zip = case_when(nchar(zip) == 5 ~ zip, T ~ as.character(NA)))
dt$year[70900] <- NA
max(dt$year)
save(list = c('lkt', 'dt'), file = 'midtermExam/dataset.RData')
load('midtermExam/dataset.RData')
|
86bdeee46a025021897d889c927938b30449c52b | 0284d04bccac0b83ff441b80574cf7874234724c | /R/RcppExports.R | e6f6183593155cef3c90811339cc78f772d68c91 | [] | no_license | kasparmartens/mixtureModels | 9555443bc52f13bc38f6bb369669874804a5fe22 | 1c29d3a50dda68395a173a6018e193909bc59263 | refs/heads/master | 2020-04-05T14:08:29.281121 | 2018-04-04T11:41:20 | 2018-04-04T11:41:20 | 94,789,309 | 8 | 1 | null | null | null | null | UTF-8 | R | false | false | 522 | r | RcppExports.R | # Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
#' @export
chol_update <- function(LL, xx) {
.Call('mixtureModels_chol_update', PACKAGE = 'mixtureModels', LL, xx)
}
#' @export
chol_downdate <- function(LL, xx) {
.Call('mixtureModels_chol_downdate', PACKAGE = 'mixtureModels', LL, xx)
}
calculate_log_V_n <- function(alpha, N, how_many) {
.Call('mixtureModels_calculate_log_V_n', PACKAGE = 'mixtureModels', alpha, N, how_many)
}
|
65f6c01f568c06424d838e957c872b845da2d59c | e54aaf041aef5c27670836bf47376b6fffb23474 | /man/single.boot.Rd | cfa1965a7c15b46f61d266e7568e56014ce74e31 | [] | no_license | cran/iDINGO | 70de597af0ff96a7c802713c0d6b7f07d6c40ffb | f0805797098590fe3e2b5168dafe72a2849c1f46 | refs/heads/master | 2021-01-19T17:49:52.398014 | 2020-07-30T13:00:02 | 2020-07-30T13:00:02 | 101,089,173 | 3 | 1 | null | null | null | null | UTF-8 | R | false | false | 929 | rd | single.boot.Rd | \name{single.boot}
\alias{single.boot}
\title{
Calculating differential score for a single bootstrap
}
\description{
This function calculates the edge-wise partial correlation difference for a single bootstrap.
}
\usage{
single.boot(i, z, n, tY.org, P, levels.z, w.upper)
}
\arguments{
\item{i}{iteration number. This is not used within this function, but necessary for parSapply within scoring.boot.parallel function.
}
\item{z}{a length n vector representing a binary covariate
}
\item{n}{the number of rows in data
}
\item{tY.org}{the transformed standardized data
}
\item{P}{the global correlation component
}
\item{levels.z}{the levels of the covariates
}
\item{w.upper}{the upper triangular of Omega
}
}
\value{
\item{boot.diff}{the difference for this bootstrap
}
}
\author{
Min Jin HA mjha@mdanderson.org, Caleb CLASS caclass@mdanderson.org
}
|
3db107f6014c9d604b164f17760096378b10dbfa | 5d090e950f10f89b2e3df2acfd9a2aad4a5c91b1 | /Mod14/Projeto/07-ScoreModel.R | 5af9ed7866c308958f5df9fa6b8b3511be7724a3 | [] | no_license | mneresc/formacao_r_azure_machine_learning | db7542ed24a9577bfbd6763f92734e38c6066202 | 147237e1fd4ab26d3aa6d5d25ba4392cd79206b7 | refs/heads/main | 2023-05-12T08:35:26.965443 | 2021-05-19T00:25:58 | 2021-05-19T00:25:58 | 334,229,847 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,460 | r | 07-ScoreModel.R | # Score do modelo preditivo com randomForest
# Este código foi criado para executar tanto no Azure, quanto no RStudio.
# Para executar no Azure, altere o valor da variavel Azure para TRUE.
# Se o valor for FALSE, o codigo será executado no RStudio
# Obs: Caso tenha problemas com a acentuação, consulte este link:
# https://support.rstudio.com/hc/en-us/articles/200532197-Character-Encoding
# Configurando o diretório de trabalho
# Coloque entre aspas o diretório de trabalho que você está usando no seu computador
# Não use diretórios com espaço no nome
# setwd("C:/FCD/BigDataRAzure/Cap14/Projeto")
# getwd()
# Função para tratar as datas
set.asPOSIXct <- function(inFrame) {
dteday <- as.POSIXct(
as.integer(inFrame$dteday),
origin = "1970-01-01")
as.POSIXct(strptime(
paste(as.character(dteday),
" ",
as.character(inFrame$hr),
":00:00",
sep = ""),
"%Y-%m-%d %H:%M:%S"))
}
char.toPOSIXct <- function(inFrame) {
as.POSIXct(strptime(
paste(inFrame$dteday, " ",
as.character(inFrame$hr),
":00:00",
sep = ""),
"%Y-%m-%d %H:%M:%S")) }
# Variável que controla a execução do script
Azure <- FALSE
if(Azure){
bikes <- dataset
bikes$dteday <- set.asPOSIXct(bikes)
}else{
bikes <- bikes
}
require(randomForest)
scores <- data.frame(actual = bikes$cnt,
prediction = predict(model, newdata = bikes))
|
c15575d3480997cb7e237728749fcf72af31f73e | 583726d46d9f0cd8759027daf6fafc9f47f3640f | /ModelForQ7.R | 8cfbd4ed1dd09ce115efc1e7f52f639d80669561 | [] | no_license | agyurov/Gymnasium2 | c825d59b5c6aadbfd45d4901cc5ec371b995bfdf | a9e4b587ed715f3913764a93d509c63c317fb804 | refs/heads/master | 2021-01-20T01:13:29.148727 | 2017-07-07T10:17:05 | 2017-07-07T10:17:05 | 89,238,382 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,215 | r | ModelForQ7.R | dff = fb
names(dff) = gsub("^.*?_","",names(dff))
# ------------------------------------------
cat("\014")
str(dfu)
# Q7 only for fbook
# binary ------------------------------------------------------------------
mf0 = glm(edit ~ 1,
data = dff, family = binomial())
goodness(mf0)
summary(mf0)
mf1 = glm(edit ~ gender,
data = dff, family = binomial())
goodness(mf1)
glm.pred(mf1)
summary(mf1)
anova(mf0,mf1,test="Chisq")
# gender > 1
mf2 = glm(edit ~ gender + digedu,
data = dff, family = binomial())
goodness(mf2)
glm.pred(mf2)
summary(mf2)
anova(mf1,mf2,test="Chisq")
# gender > gender + digedu
mf3 = glm(edit ~ gender + nocare, # BEST
data = dff, family = binomial())
goodness(mf3)
glm.pred(mf3)
summary(mf3)
anova(mf1,mf3,test="Chisq")
# Best model mf3
# unique obs on common vars -----------------------------------------------
fk = do.call(rbind.data.frame,bindf)
fk = fk[,c(6,8,25,26,27)]
fk$id = apply(fk,1,function(x) paste0(x,collapse=""))
x = duplicated(fk)
fk2 = fk[!x,]
x = aggregate(id~.,fk,sum)
y = table(fk$id)
z = fk[fk$id %in% names(y),]
tmp = unlist(lapply(names(y),function(x,y) which(y == x)[1],y=fk$id))
LOL = fk[tmp,]
LOL$count = y
|
d96a63192a2b2b12b8b081fca1661fba67b10010 | c3e693beafe67e6fb20afd158e9876770a3d3d3c | /Exhaustive-search/fixed-length/k_node_viz_unibi.r | f588679b613ce37304d5936a8534b1a408d9de2a | [] | no_license | rmuraglia/Schmidler | 8fd307f7393283d454c38600c29cc8f0adce6279 | 3026a84bf56da1477e2bf7f78d20c0f752de82cb | refs/heads/master | 2020-12-25T17:01:08.116539 | 2016-08-11T23:55:41 | 2016-08-11T23:55:41 | 57,866,522 | 0 | 0 | null | 2016-06-15T17:44:25 | 2016-05-02T04:44:07 | R | UTF-8 | R | false | false | 2,837 | r | k_node_viz_unibi.r | # k_node_viz_unibi.r
# import libraries
options(warn=-1)
suppressMessages(library(ggplot2))
suppressMessages(library(grid))
suppressMessages(library(gridExtra))
options(warn=0)
# set working directory
switch(Sys.info()[['sysname']],
Windows = {setwd('D:/Dropbox/pathsearch-exhaustive/uni-bi-symmetric/')},
Darwin = {setwd('/Users/rmuraglia/Dropbox/pathsearch-exhaustive/uni-bi-symmetric/')},
Linux = {setwd('/dscrhome/rm204/pathsearch-exhaustive/uni-bi-symmetric/')}
)
if (!file.exists('imgout')) { dir.create('imgout') }
###########
# set up info panel
###########
emptydf<-data.frame(0,0)
colnames(emptydf)<-c('x', 'y')
labelplot<-ggplot(emptydf,aes(x=x,y=y)) + geom_blank() + theme_bw() + labs(x='',y='')
init_string<-paste('mu0=', mu0, ', sigma0=', sig0, sep='')
target_string<-paste('A=', a, ', B=', b, ', C=', c, sep='')
title_string<-paste('Best paths of fixed length \n', 'unimodal: ', init_string, '\nto bimodal: ', target_string, '\n distance metric: asympvarbar \nmove jump coeff: ', move_jump, '\nsamples per node: ', num_draws, sep='')
infopanel<-labelplot + geom_text(x=0, y=0, label=title_string, size=3) + theme(axis.ticks=element_blank(), axis.text.x=element_blank()) + theme(axis.ticks=element_blank(), axis.text.y=element_blank())
########
# individual path overviews
########
plotlist<-list(infopanel)
pcost_vec<-rep(NA, length.out=max_num_nodes)
for (i in 1:max_num_nodes) { # for each possible path length
if (unscaled_cost[target_ind, i]!=Inf) { # if there exists a solution
# get scaled path cost
eff_N<-max_num_nodes*num_draws/i
path_cost<-unscaled_cost[target_ind, i]/(2*eff_N)
print_cost<-format(signif(path_cost, digits=5), scientific=T)
pcost_vec[i]<-path_cost
# get path as data frame
path_df<-path_soln(search_out[[3]], i)
# make filename string
i_print<-sprintf('%03g', i)
imgname<-paste('imgout/', filID, '-', i_print, 'node.eps', sep='')
# make plot
baseA<-ggplot(data=point_grid, aes(x=lambda, y=temperature))
layerA1<-geom_path(data=path_df, aes(x=lambda, y=temperature), arrow=arrow(), size=2, alpha=1)
labelA1<-labs(title=paste(i, ' node path cost: ', print_cost, sep=''))
plotA<-baseA + layerA1 + labelA1 + geom_point()
plotlist[[i+1]]<-plotA
# save to file
ggsave(file=imgname, width=5, height=5, plot=plotA)
}
}
##########
# summary figure for best paths
##########
top_ind<-order(pcost_vec, decreasing=F)[1:5]
summary_plots<-plotlist[c(1, top_ind+1)]
plotB<-arrangeGrob(summary_plots[[1]], summary_plots[[2]], summary_plots[[3]], summary_plots[[4]], summary_plots[[5]], summary_plots[[6]], nrow=2)
summaryname<-paste('imgout/', filID, '-summary.eps', sep='')
ggsave(file=summaryname, width=9, height=6, plot=plotB) |
dc31f9c4496090698d174c546fcee45931d4e2b9 | 5f82d1bc22e4ef72a63c58852a2d035e124f1a37 | /R/bupar.R | 5497db8012a7fcc41515eb5faf12797d4e91b950 | [] | no_license | cran/bupaR | 75608804ef045f678821740aaff123991d5d36b5 | ef020af22301e7aa8c82d62e4d01dd5aebaea99e | refs/heads/master | 2023-04-20T17:49:49.645967 | 2023-04-02T21:00:06 | 2023-04-02T21:00:06 | 86,215,725 | 0 | 3 | null | null | null | null | UTF-8 | R | false | false | 1,302 | r | bupar.R | #' @title bupaR - Business Process Analysis in R
#'
#' @description Functionalities for process analysis in R. This packages implements an S3-class for event log objects,
#' and related handler functions. Imports related packages for subsetting event data, computation of descriptive statistics,
#' handling of Petri Net objects and visualization of process maps.
#'
#' @docType package
#' @name bupaR
#'
## usethis namespace: start
#' @import dplyr
#' @import shiny
#' @import miniUI
#' @import eventdataR
#' @importFrom glue glue
#' @importFrom forcats fct_collapse fct_expand
#' @importFrom purrr map pmap
#' @importFrom tibble as_tibble
#' @importFrom tidyr nest unnest unite
#' @importFrom stats median na.omit quantile sd
#' @importFrom utils head setTxtProgressBar txtProgressBar data
#' @importFrom lifecycle deprecated
#' @importFrom magrittr %>%
#' @importFrom data.table as.data.table data.table := .I .N .SD
#' @importFrom rlang .data arg_match caller_arg caller_env is_character
#' @importFrom cli cli_abort
#' @importFrom stringi stri_c
#' @importFrom ggplot2 waiver scale_fill_gradient2 scale_fill_gradient scale_color_manual scale_fill_manual scale_color_gradient scale_color_gradient2
## usethis namespace: end
globalVariables(c("."))
"_PACKAGE"
NULL
|
c97d4a5cbfb73654c5c124d6b58f2040b18cfbb8 | fcd0086fde639019405c6cbebc382a03c09eeae9 | /tests/testthat.R | d259a7cdfc9317d527abaa3f56e77fd13a55a789 | [
"MIT"
] | permissive | amarsee/andrewacct | 5068c88e75662ee02cb08af1ff15124f071ae82d | 4158093f1b91ac0f95333336f97914577c27945d | refs/heads/master | 2020-06-03T16:35:46.768882 | 2020-02-18T20:02:15 | 2020-02-18T20:02:15 | 191,651,682 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 64 | r | testthat.R | library(testthat)
library(andrewacct)
test_check("andrewacct")
|
32a62ebd9b57bf69af71a8f16b55a4057b88a411 | 57f780626d36e07c5a824b9e24e092d6110a12c9 | /tools.R | 7a617a65b45ec69077529cbb9ad105be8a2bcb5d | [] | no_license | phanhung2/rps10_barcode | d173676fd7aed616bdbd3aca1c9bc280c17e759a | 76a5cb1a251df62363202f9593cd731515a5d7a4 | refs/heads/main | 2023-08-18T21:10:26.027377 | 2021-09-23T18:23:06 | 2021-09-23T18:23:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,249 | r | tools.R |
#' @keywords internal
parse_ft <- function(text) {
text <- gsub(text, pattern = "\n\t\t\t", replacement = "\t", fixed = TRUE)
parts <- strsplit(text, "\n\n", fixed = TRUE)[[1]]
part_data <- lapply(parts, function(x) {
first_line <- sub(x, pattern = "\\n.+$", replacement = "")
acc <- stringr::str_match(first_line, pattern = "\\|(.+)\\|")[,2]
the_rest <- sub(x, pattern = "^>.+?\\n", replacement = "")
# replace extra \t with , when there are multiple features
lines <- strsplit(the_rest, "\n")[[1]]
lines <- purrr::map2(stringr::str_locate_all(lines, "\t"), lines, function(matches, line) {
if (nrow(matches) > 4) {
for (i in matches[3:(nrow(matches) - 2), 1]) {
substr(line, i, i) <- ","
}
}
return(line)
})
the_rest <- paste0(lines, collapse = "\n")
output <- readr::read_tsv(paste0(the_rest, "\n"), col_names = c("start", "end", "feature", "type", "name"), col_types = "ccccc")
output <- tibble::as_tibble(cbind(list(acc = acc), output, stringsAsFactors = FALSE))
})
output <- dplyr::bind_rows(part_data)
output$complete <- ifelse(startsWith(as.character(output$start), "<") | startsWith(as.character(output$end), ">"), FALSE, TRUE)
output$start <- as.integer(gsub(output$start, pattern = "<", replacement = ""))
output$end <- as.integer(gsub(output$end, pattern = ">", replacement = ""))
return(output)
}
#' @keywords internal
parse_seqs <- function(text) {
xml <- xml2::read_xml(text)
tibble::tibble(acc = xml2::xml_text(xml2::xml_find_all(xml, "//TSeq_accver")),
seq = xml2::xml_text(xml2::xml_find_all(xml, "//TSeq_sequence")),
header = xml2::xml_text(xml2::xml_find_all(xml, "//TSeq_defline")),
length = xml2::xml_text(xml2::xml_find_all(xml, "//TSeq_length")))
}
#' Lookup gene sequences from NCBI
#'
#' Look for sequences of a particular gene for a list of species/isolates/genes from the Genbank
#' nucleotide database.
#'
#' @param species The names of species to look up.
#' @param genes The names of the genes to look up.
#' @param isolates The names of isolates to look up. Must be the same length as \code{species} if used.
#' @param extract_features If TRUE, return the sequence for each feature in the sequence annotation
#' instead of the whole sequence.
#' @param gene_name_in_feature If TRUE, only return features that have one of the gene names
#' somewhere in their description. Only has an effect if extract_features is TRUE.
#' @param db The name of the NCBI database to query. Only tested with "nucleotide", but a few others
#' might work.
#' @param pause The number of seconds to pause between each query. This avoids annoying NCBI and
#' having them block you IP address. Should be at least 0.35 seconds if you dont have an NCBI API
#' key and at least 0.1 seconds if you do.
#' @param ... Additional terms to add to the search request for each species/isolate, see NCBI documentation for a complete list: http://www.ncbi.nlm.nih.gov/books/NBK25499/#_chapter4_ESearch_
#'
#' @return A table
#'
#' @examples \dontrun{
#'
#' # Search for the whole seqeunces for with P. infestans Cox I
#' get_isolate_seqs(species = c("Phytophthora infestans"),
#' genes = c("cox I", "cox 1", "cox1", "coxI", "cytochrome oxidase I", "cytochrome oxidase 1"),
#' retmax = 100)
#'
#' # Search for the just the gene sequence for P. infestans Cox I
#' get_isolate_seqs(species = c("Phytophthora infestans"),
#' genes = c("cox I", "cox 1", "cox1", "coxI", "cytochrome oxidase I", "cytochrome oxidase 1"),
#' retmax = 100,
#' extract_features = TRUE)
#'
#' # Search for all the gene sequences in whole sequences that contain P. infestans Cox I
#' get_isolate_seqs(species = c("Phytophthora infestans"),
#' genes = c("cox I", "cox 1", "cox1", "coxI", "cytochrome oxidase I", "cytochrome oxidase 1"),
#' retmax = 100,
#' extract_features = TRUE,
#' gene_name_in_feature = FALSE)
#'
#' # Search for whole sequences for P. infestans Cox I for just some isolates
#' get_isolate_seqs(species = c("Phytophthora infestans", "Phytophthora infestans", "Phytophthora infestans"),
#' isolates = c("44", "580", "180"),
#' genes = c("cox I", "cox 1", "cox1", "coxI", "cytochrome oxidase I", "cytochrome oxidase 1"))
#'
#' # Search for just the gene sequences for P. infestans Cox I for just some isolates
#' get_isolate_seqs(species = c("Phytophthora infestans", "Phytophthora infestans", "Phytophthora infestans"),
#' isolates = c("44", "580", "180"),
#' genes = c("cox I", "cox 1", "cox1", "coxI", "cytochrome oxidase I", "cytochrome oxidase 1"),
#' extract_features = TRUE)
#' }
#'
#' @export
get_isolate_seqs <- function(species, genes, isolates = NULL, extract_features = FALSE, gene_name_in_feature = TRUE, db = "nucleotide", pause = 0.5, ...) {
get_one <- function(name, isolate = NULL) {
# Wait a bit so NCBI doesnt get unhappy
Sys.sleep(pause)
# Search for sequences
if (is.null(isolate)) {
query <- paste0('"', name, '"[Organism] AND (', paste0('"', genes, '"[All Fields]', collapse = " OR "), ')')
} else {
query <- paste0('"', name, '"[Organism] AND ("', isolate, '"[Isolate] OR "', isolate, '"[Strain]) AND (', paste0('"', genes, '"[All Fields]', collapse = " OR "), ')')
}
search <- rentrez::entrez_search(db, term = query, ...)
if (length(search$ids) == 0) {
return(NULL)
}
if (extract_features) {
# Parse features
features <- parse_ft(rentrez::entrez_fetch(db, id = search$ids, rettype = "ft", retmode = "text"))
if (gene_name_in_feature) {
gene_in_feature <- purrr:::map_lgl(features$name, function(text) {
purrr:::reduce(lapply(genes, function(gene) grepl(tolower(text), pattern = tolower(gene), fixed = TRUE)), `|`)
})
features <- features[gene_in_feature, ]
}
if (nrow(features) == 0) {
return(NULL)
}
# Parse sequences
sequences <- parse_seqs(rentrez::entrez_fetch(db, id = search$ids, rettype = "fasta", retmode = "xml"))
# Join feature and sequence data
output <- dplyr::left_join(features, sequences, by = "acc")
# Subset sequences to fetures
output$seq <- substr(output$seq, output$start, output$end)
output$length <- nchar(output$seq)
} else {
output <- parse_seqs(rentrez::entrez_fetch(db, id = search$ids, rettype = "fasta", retmode = "xml"))
}
# Add query info
if (is.null(isolate)) {
output <- tibble::as_tibble(cbind(list(species = name, query = query), output, stringsAsFactors = FALSE))
} else {
output <- tibble::as_tibble(cbind(list(species = name, isolate = isolate, query = query), output, stringsAsFactors = FALSE))
}
return(output)
}
if (is.null(isolates)) {
return(dplyr::bind_rows(purrr::pmap(list(species), get_one)))
} else {
return(dplyr::bind_rows(purrr::pmap(list(species, isolates), get_one)))
}
}
|
98d7ba3f1c64b9c622a5e36b5c73ac16833908d8 | 3e522bd2b18d0e9fab5ae19a614ec2a86c91c64a | /R/app_args.R | f0def9082276b6f938c18ec1d327b9ddda3d6cd3 | [
"MIT"
] | permissive | yonicd/reactor | 1cf3f1c66bef851fc85fe74cdd456d85276b38dc | 4aa03f687b80c8b8b9e35cd37eba7c8543b891c7 | refs/heads/master | 2021-08-22T07:03:19.569947 | 2021-01-20T02:36:38 | 2021-01-20T02:36:38 | 219,646,466 | 60 | 2 | NOASSERTION | 2023-01-12T15:15:15 | 2019-11-05T03:06:54 | R | UTF-8 | R | false | false | 4,682 | r | app_args.R | #' @title Arguments to pass to a Shiny App in a child process
#' @description
#'
#' - runApp_args: Arguments that populate a [runApp][shiny::runApp] call
#' that will be run in a child process.
#'
#' - golem_args: Arguments that populate an app run via the golem package.
#'
#' The command is appended predefined commands and sent to a [process][processx::process] object.
#'
#' @param appDir The application to run. Should be one of the following (Default: getwd()):
#' - A directory containing server.R, plus, either ui.R or a www directory
#' that contains the file index.html.
#' - A directory containing app.R.
#' - An .R file containing a Shiny application, ending with an expression that produces a Shiny app object.
#' @param package_name name of the golem package
#' @param test_port integer, port to run the app on. Default: httpuv::randomPort()
#' @param test_ip The IPv4 address that the application should listen on.
#' @param test_path character, Path the child process will have access to on the master, Default: tempdir()
#' @param test_trace logical, turn on the shiny.trace option in the background proccess?. Default: FALSE
#' @return character
#' @examples
#'
#' runApp_args()
#'
#' golem_args()
#'
#' @seealso [runApp][shiny::runApp], [process][processx::process]
#' @rdname app_args
#' @family application
#' @export
#' @importFrom glue glue
#' @import shiny
#' @importFrom httpuv randomPort
runApp_args <- function(
appDir = getwd(),
test_port = httpuv::randomPort(),
test_ip = getOption("shiny.host", "127.0.0.1"),
test_path = tempdir(),
test_trace = FALSE){
c(reactor_args(test_path = test_path),
glue::glue("options('shiny.port'= {test_port}, shiny.host='{test_ip}', shiny.trace = {test_trace})"),
glue::glue("shiny::runApp(appDir = '{appDir}')")
)
}
#' @rdname app_args
#' @family application
#' @export
#' @importFrom glue glue
#' @importFrom httpuv randomPort
golem_args <- function(package_name ='',
test_port = httpuv::randomPort(),
test_ip = getOption('shiny.host','127.0.0.1'),
test_path = tempdir(),
test_trace = FALSE){
app_call <- "run_app()"
if(nzchar(package_name)){
app_call <- paste(package_name,app_call,sep = '::')
}
shiny_opts <- glue::glue("options('shiny.port'= {test_port}, shiny.host='{test_ip}', shiny.trace = {test_trace})")
c(reactor_args(test_path = test_path),shiny_opts,app_call)
}
reactor_args <- function(test_path = tempdir()){
c("pkgload::load_all()",
"library(whereami)",
glue::glue("whereami::set_whereami_log('{file.path(test_path,'reactor')}')")
)
}
#' @title Attach Commands for Shiny Application to Reactor
#' @description Attach commands for starting shiny application
#' using runApp or golem commands to the reactor object.
#' @param obj reactor object
#' @inheritParams runApp_args
#' @param verbose logical, reactor willn notify the action taken. Default: TRUE
#' @return reactor object
#' @examples
#' \dontrun{
#' if(interactive()){
#' #EXAMPLE1
#' }
#' }
#' @rdname set_app_args
#' @family application
#' @export
#' @importFrom httpuv randomPort
set_runapp_args <- function(
obj,
appDir = getwd(),
test_port = httpuv::randomPort(),
test_path = tempdir(),
test_ip = getOption('shiny.host','127.0.0.1'),
test_trace = FALSE,
verbose = TRUE){
if(verbose){
reactor_message(names(obj$application),to = 'runApp')
}
obj$application <- list(
runApp = list(
test_port = test_port,
test_path = test_path,
test_ip = test_ip,
test_trace = test_trace,
appDir = appDir
)
)
invisible(obj)
}
#' @rdname set_app_args
#' @family application
#' @export
#' @importFrom httpuv randomPort
set_golem_args <- function(
obj,
package_name ='',
test_port = httpuv::randomPort(),
test_path = tempdir(),
test_ip = getOption('shiny.host','127.0.0.1'),
test_trace = FALSE,
verbose = TRUE
){
if(verbose){
reactor_message(names(obj$application),to = 'golem')
}
obj$application <- list(
golem = list(
test_port = test_port,
test_path = test_path,
test_ip = test_ip,
test_trace = test_trace,
package = package_name
)
)
invisible(obj)
}
#' @importFrom glue glue
reactor_message <- function(nm,to){
if(is.null(nm)){
msg <- glue::glue('Adding {to} Settings')
}else{
if(nm==to){
msg <- glue::glue('Updating {nm} Settings')
}else{
msg <- glue::glue('Replacing {nm} with {to} Settings')
}
}
message(msg)
} |
2364f4d9cf72411924a60311a2ae049b8d432042 | c5dd7d41b2dce010e9d80d8121ce6bf1a884abdf | /src/Analysis.R | 9535b09851125077040baa7c19edab434fa229e9 | [] | no_license | DarioTrujanoOchoa/ArticleCitizenCandidate | 23d6f74523a5ccdde5d486a657fe2d9c1bdbf241 | 47d97c7e709a051212540b55adb2308b28813d3e | refs/heads/master | 2021-04-29T20:28:00.549324 | 2018-03-15T16:37:20 | 2018-03-15T16:37:20 | 121,596,243 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,157 | r | Analysis.R | rm(list=ls())
#setwd("C:/Users/dtruj/Dropbox/Citizen-Candidate/ArticleCitizenCandidate")
library(pacman)
p_load(dplyr)
p_load(tidyr)
p_load(foreign)
source("src/QRE_functions_2.R")
source("src/VotingRules.R")
source("src/functions_CitCand.R")
#Import raw data ####
## Merge data bases with the games of interest ####
data_first <- read.csv("data/FirstTreatments.csv", sep=";", dec=",")
data_second <- read.csv("data/2x2x2.csv")
# counterbalance of ideal points
ex70 <- data_first[(data_first$num_periodo<=15 & data_first$Juego== "A")|(data_first$num_periodo>15 & data_first$Juego== "B"),]
ex70$Juego <- as.factor(rep("ex70", length(ex70$Juego)))
ex80 <- data_first[(data_first$num_periodo>15 & data_first$Juego== "A")|(data_first$num_periodo<=15 & data_first$Juego== "B"),]
ex80$Juego <- as.factor(rep("ex80", length(ex80$Juego)))
ex80$Sesion <- ex80$Sesion
data_1st_by_q <- rbind(ex70, ex80)
## set positions ####
positions1<- factor(c("Left","Center","Right","Right"),levels = c("Left","Center","Right"),ordered=T)
ideal_points1 <- c(30, 50, 70, 80)
data_1st_by_q$Position <- positions1[match(data_1st_by_q$punto_ideal,ideal_points1)]
positions2<- factor(c("Left","Center","Right"),levels = c("Left","Center","Right"),ordered=T)
ideal_points2 <- c(20, 30, 80)
data_second$Position <- positions2[match(data_second$punto_ideal,ideal_points2)]
Code <- read.csv("data/TreatmentsCode.csv")
raw_data <- rbind(data_1st_by_q,data_second) %>%
filter(id_tipo == "R",punto_ideal != -1,Juego %in% Code$CodeInDataBase) %>%
mutate(Treatment = factor(Code$ShortName[match(Juego,table = Code$CodeInDataBase)],ordered = T,
levels = Code$ShortName))
# General Characteritics by Sessions ####
Gral_Session <- raw_data %>% group_by(Treatment,Sesion) %>%
do(data.frame( No.Participants=max(.$id_usuario),
Bankrupcy=sum(.$balance<0))) %>%
data.frame()
stargazer::stargazer(Gral_Session,title = "Characteritics by Sessions",
header = F,summary = F,out = "results/Tables/Sessions.tex",rownames = F)
# Entry Proportions ####
Election_game <-raw_data %>% group_by(Treatment,Position) %>%
do(data.frame(Entry=sum(.$se_postula))) %>%
spread(Treatment,Entry) %>% data.frame()
parentesis_percentage <- function(x){paste("(",round(x*100,digits = 1),")",sep = "")}
Election_prop <- raw_data %>% group_by(Treatment,Position) %>%
do(data.frame(Prop=mean(.$se_postula))) %>% mutate(Prop=parentesis_percentage(Prop)) %>%
spread(Treatment, Prop) %>% data.frame() %>% mutate(Position= rep("%",3))
Election_table <- data.frame(matrix(nrow = 7,ncol = 7))
Election_table[c(1,3,5),] <- as.matrix(Election_game)
Election_table[c(2,4,6),] <- as.matrix(Election_prop)
Election_table[c(7),] <- c("Total",table(raw_data$Position,raw_data$Treatment)[1,])
colnames(Election_table) <- colnames(Election_game)
stargazer::stargazer(Election_table,title = "Total number of entries by position and game",
header = F,summary = F,out = "results/Tables/Entries.tex",rownames = F,
label = "tab:rawentry")
row.names(Election_game)<-Election_game$Position
Election_game$Position <- NULL
Total <- table(raw_data$Position,raw_data$Treatment)[1,]# total trials
Election_game <- t(cbind(t(Election_game),Total))
colnames(Election_game) <- c("PR_LC_q1", "PR_HC_q1", "RO_LC_q1", "RO_HC_q1","PR_LC_q2_ex70","PR_LC_q3_ex80")
#MLE Estimation ####
##parameters ####
alpha = 0.1 # cambios en la valoraci?n de la politica
cost = c(5,20) #LC and HC in the experiment
benefit = 25
D = 40 # penalty if no one enters
p= c(alpha, cost[2], benefit,D)
# what everybody does
n = 3 # numer of players
s = c(1,0,1) #rep(1,n) # 1 means participate
q = c(20, 30, 80) # ideal points used in the experiment (they must be put in order)
Q = rbind(q, c(30, 50, 70), c(30, 50, 80))
p_LC <- p; p_LC[2]<-5
p_HC<- p; p_HC[2]<-20
lambda= .1 # the MLE for the first treatments was .078
probs = c(.1, .9, .2)
## Calculations ####
### Global ###
MLE_global <- optim(par=.1,f = neg_logL_global_ABRSTU,election_game=Election_game, probs=c(.5, .5, .5), p_HC=p_HC, p_LC=p_LC, Q=Q,hessian = T)
neg_logL_global_ABRSTU(election_game = Election_game,lambda = MLE_global$par, probs = c(0.8, 0.1, 0.8), p_HC = p_HC, p_LC = p_LC, Q = Q)
Global <- c(MLE_global$par,sqrt(diag(solve(MLE_global$hessian))))
### # analysis by game ####
MLE_game<- data.frame(matrix(nrow = 2,ncol= length(Election_game[1,])))
names(MLE_game) <- colnames(Election_game)
for(g in 1:length(Election_game[1,])){
optim_game <- optim(par = 0.1,f = neg_logL_ABRSTU,election_game=Election_game, probs=c(.5, .5, .5), p_HC=p_HC, p_LC=p_LC, Q=Q,G= g,hessian = T)
MLE_game[1,g] <- optim_game$par
MLE_game[2,g] <- sqrt(diag(solve(optim_game$hessian)))
}
MLE <- cbind(MLE_game,Global)
row.names(MLE) <- c("Lambda","sd")
save(MLE,file = "results/MLE.RData")
### Table ####
load("results/MLE.RData")
stargazer::stargazer(MLE,summary = F,out = "results/Tables/MLE.tex",label = "tab:mle",digits = 4)
#Graphs ####
raw_data %>% group_by(Treatment,punto_ideal) %>% do(data.frame(mean(.$se_postula)))
hist()
## QRE Path ####
|
f4cd38b734e39ba4466512fe9190e91385a2672c | 9283b2a903149cc4cf33d4ece839a3aab9a73065 | /man/read_feats.Rd | 5faeab4bedc2a2731cb778c7032cfda5b97e99d3 | [
"MIT"
] | permissive | quanrd/gggenomes | 4193542dc3a8cc416f9dcd2774718ac0c918b49b | 1bfbdfe2a6f5b4c48b59cb3d2298be178fb3e2d1 | refs/heads/master | 2023-03-01T09:31:22.231667 | 2021-02-01T23:00:53 | 2021-02-01T23:00:53 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 2,412 | rd | read_feats.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/read_feats.R
\name{read_feats}
\alias{read_feats}
\alias{read_subfeats}
\alias{read_links}
\alias{read_sublinks}
\title{Read features and links from common file formats}
\usage{
read_feats(files, format = NULL, .id = "file_id", ...)
read_subfeats(files, format = NULL, .id = "file_id", ...)
read_links(files, format = NULL, .id = "file_id", ...)
read_sublinks(files, format = NULL, .id = "file_id", ...)
}
\arguments{
\item{files}{files to reads. Should all be of same format.}
\item{format}{If NULL, guess from file extension. Else, any format known to
gggenomes (gff3, gbk, ... see \code{\link[=file_formats]{file_formats()}} for full list) or any suffix
of a known \verb{read_<suffix>} function, e.g. tsv for \code{readr::read_tsv()}.}
\item{.id}{the name of the column storing the file name each record came
from. Defaults to "file_id". Set to "bin_id" if every file represents a
different bin.}
\item{...}{additional arguments passed on to the format-specific read
function called down the line.}
}
\value{
A gggenomes-compatible feature or link tibble
}
\description{
Read features or links from common formats, such as GFF3, Genbank, BED, BLAST
tabular output or PAF files. File formats and the format-specific \verb{read_*()}
function are automatically determined based in file extensions, if possible.
Can read multiple files in the same format into a single table: useful, for
example, to read a folder of gff-files with each containing genes of a
different genome.
}
\section{Functions}{
\itemize{
\item \code{read_feats}: read files as features mapping onto sequences
\item \code{read_subfeats}: read files as subfeatures mapping onto other features
\item \code{read_links}: read files as links connecting sequences
\item \code{read_sublinks}: read files as sublinks connecting features
}}
\examples{
# read a file
read_feats(ex("eden-utr.gff"))
# read all gffs from a directory
read_feats(list.files(ex("emales/"), "*.gff$", full.names=TRUE))
\dontrun{
# read remote files
gbk_phages <- c(
PSSP7 = "ftp://ftp.ncbi.nlm.nih.gov/genomes/all/GCF/000/858/745/GCF_000858745.1_ViralProj15134/GCF_000858745.1_ViralProj15134_genomic.gff.gz",
PSSP3 = "ftp://ftp.ncbi.nlm.nih.gov/genomes/all/GCF/000/904/555/GCF_000904555.1_ViralProj195517/GCF_000904555.1_ViralProj195517_genomic.gff.gz")
read_feats(gbk_phages)
}
}
|
45070f7b2d691c12c5752b05c5b593cd44c1a066 | 0002ad451346d510edbeb27d14ce213402f09b34 | /ClimatePackge/R/Hot_years.R | d7bdf39d438faa44fd828a7c83bc9fff2c1ed6ab | [] | no_license | Ebuciobustos/ESM262_Homework5 | fed6f0c946417d22ed77f970e173a064ffd39715 | 34dd970bd3dae07fe438b5140af91c8a47c3e6e8 | refs/heads/master | 2020-06-03T18:22:14.376635 | 2019-06-14T04:36:37 | 2019-06-14T04:36:37 | 191,680,924 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 968 | r | Hot_years.R | #' Summary information about the warmest years in Mexico from 1901 to 2016
#'
#' computes summary information about maximum temperature registered by year
#' @param TempMX_df data frame with columns temperature (celsius), year, month, city
#' @return returns the next information,
#' \describe{
#' \item{Temperature}{Temperature (C) at which the drought is expected}
#' \item{year}{Year in which the drought is expected}
#' }
Warmer_years <- function(TempMX_df, plot = TRUE){
Hot_years <- TempMX_df %>%
group_by(Year) %>%
filter(Temperature == max(Temperature)) %>%
ungroup()
if(plot == TRUE) {
Hot_years_plot <- ggplot(TempMX_df, aes(x = Year, y = Temperature)) +
geom_point(size = 1.5) +
ggtitle("Maximum Temperature in Celsius From Mexico 1901-2016") +
labs(y="Anually Maximum Temperature (degrees C)", x="Year")+
theme_classic() +
geom_smooth(method="lm")
print(Hot_years_plot)
}
return(Hot_years)
}
|
8aaed5df8d507c59fee5d696065479a76335cd75 | 23e3dc95a3a8be963267e3d7b65c1f61bc88d039 | /code/functions/fcn_implied_rho_phiphihat.R | d3980b617af034f89988fe594110f4aaf26b5cff | [] | no_license | jamesyae/LTY-JFE-2016 | 50f1bbcdc558f5bd8eba8c82cdb382dfb8b392d8 | 24d33babd663826971a9853b9f9e5a260ad37937 | refs/heads/master | 2022-12-06T06:31:04.755912 | 2020-08-12T18:52:01 | 2020-08-12T18:52:01 | 287,076,577 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,708 | r | fcn_implied_rho_phiphihat.R | fcn_implied_rho_phiphihat<- function(theta) {
# Global variables.
# true.sigma.a
# true.sigma.e
# true.sigma.n
true.phi = theta[1]
est.phi = theta[2]
est.Kx = theta[3]
## computation
true.w = 1/true.sigma.n^2/(1/true.sigma.n^2+1/true.sigma.e^2)
est.w = true.w
true.pis2 = 1/(1/true.sigma.e^2+1/true.sigma.n^2)
### b/c pospos_varE = 1/(1/pos_varE + 1/(true.sigma.a^2 + pospos_varE*true.phi^2) );
#a = 1/true.sigma.a^2
#b = 1-true.phi^2 - true.pis2/true.sigma.a^2
#c = -true.pis2
a = 1
b = (1-true.phi^2)*true.sigma.a^2 - true.pis2
c = -true.pis2*true.sigma.a^2
### a x^2 + bx + c = 0
true.pit1t2 = ( -b + sqrt( b^2 - 4*a*c ) ) /a / 2 ;
true.Kx = true.pit1t2/(true.sigma.a^2 + true.pit1t2 ) ;
varY = ( (1+true.phi^2)*true.sigma.a^2 + true.sigma.e^2 - 2*true.phi^2*true.sigma.a^2 )/(1-true.phi^2)
covYYh = (true.phi*est.phi*est.Kx*(varY - true.sigma.a^2) + est.w*true.sigma.e^2 )/
(1-true.phi*est.phi*(1-est.Kx));
varYh = ( est.phi^2*est.Kx^2*varY + est.w^2*(true.sigma.e^2+true.sigma.n^2) + 2*est.phi^2*(1-est.Kx)*est.Kx*covYYh )/
(1-est.phi^2*(1-est.Kx)^2);
covYY1 = true.phi*( varY - true.sigma.a^2);
covYhYh1 = est.phi*( (1-est.Kx)*varYh + est.Kx*covYYh );
covY1Yh = true.phi*covYYh;
covYh1Y = est.phi*( (1-est.Kx)*covYYh + est.Kx*varY );
ACF0 = varY + varYh - 2*covYYh;
ACF1 = covYY1 + covYhYh1 - covY1Yh - covYh1Y;
rho1 = ACF1/ACF0 ;
phiphihat_ar_est=(covYY1-covYh1Y)/varY
phi_ar_est=(covYY1)/varY
Imp = list(FE_rho = rho1, phi_ar_est=phi_ar_est, phiphihat_ar_est=phiphihat_ar_est)
return(Imp)
} |
3f0a2dcec5769d554c7d5dcc1c91c4f74cce5cb2 | f70e46bce9bb8a5dd5c1701456f5478259cf2a36 | /R/doji.R | ebf7b645e89c43c95d6c29fac971ff4d51ce585f | [] | no_license | Roshan2540/CandleStickPattern | 9a038d928d6b41ae08d4c1aaec4142127cec2668 | f4211077891b9231d2d34dcf0118676825fb7c00 | refs/heads/master | 2023-06-25T05:13:26.809779 | 2019-04-19T14:26:27 | 2019-04-19T14:26:27 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 357 | r | doji.R | #' Determine Doji Pattern using a OHLC price series
#'
#' @param x OHLC prices.
#' @param delta sensitivity parameter
#' @return TRUE if Doji pattern detected
#' @export
doji <- function(x, delta = 0.1) {
WC <- CandleStickLength(x)
BL <- CandleBodyLength(x)
result <- xts::reclass(delta * WC >= BL, x)
colnames(result) <- "doji"
return(result)
}
|
de7808c9c1afc2ea90fd5e8936393f4601146203 | 05c7279b7779f67294adf57caecdc5a4e69846c8 | /cachematrix.R | b82ba4645fbbc74065ae45a4c840aab0a405e4ec | [] | no_license | UHWesley/ProgrammingAssignment2 | 634c01a019af8d32ffad6f9d917ae541d86280ff | 1d607e9ccf3b415f8b715c9b5dfbf44112c0eae4 | refs/heads/master | 2021-01-14T14:29:00.930350 | 2014-11-23T09:33:34 | 2014-11-23T09:33:34 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,422 | r | cachematrix.R |
# This is not a beginners course in R
# This is a data container holding a matrix and optionally its inverse.
# There is no validation, you can add wrong values.
# The function will return a list of 4 functions that sets or gets the matrix
# or the inverse matrix. No computation, only data storage.
makeCacheMatrix <- function(m = matrix()) {
myInternalMatrix <- m
myInverseMatrix <- NULL
set <- function(y) {
myInternalMatrix <<- y
myInverseMatrix <<- NULL
}
get <- function() {
return(myInternalMatrix)
}
setInverse <- function(solvedMatrix) {
myInverseMatrix <<- solvedMatrix
}
getInverse <- function() {
return(myInverseMatrix)
}
return(list(set = set, get = get, setInverse = setInverse, getInverse = getInverse))
}
# This function checks if the inverse exist, if not it runs the function that calulates the inverse
# and saves it.
cacheSolve <- function(x, ...) {
inverse <- x$getInverse()
if(!is.null(inverse)) {
message("getting cached inverse")
return(inverse)
}
inverse <- solve(x$get(), ...)
x$setInverse(inverse)
return(inverse)
}
## These 5 rows test that I get the expected result.
#testmatris <- matrix(2:10, nrow=3, byrow=FALSE) ##Making a matrix to test the function on
#testmatris[3,3] <- 12
#tm <- makeCacheMatrix(testmatris)
#cacheSolve(tm)
#tm$get() |
bef46d6c0f92a66e4b7e68b306c0db665583924f | d896dd61719b72892fefa9d0bc5d8a26ab692123 | /cachematrix.R | 1a1f9c4cb6c0cbbc882b352b5df378bba2127c2b | [] | no_license | joycecc/ProgrammingAssignment2 | 55abdb5de2511028291636bf5bb84d86df2ecb69 | dd6b932c6c142668400ba60cad5821c014a00fe4 | refs/heads/master | 2021-01-18T02:26:42.773551 | 2015-09-27T07:08:07 | 2015-09-27T07:08:07 | 43,235,315 | 0 | 0 | null | 2015-09-27T05:13:47 | 2015-09-27T05:13:47 | null | UTF-8 | R | false | false | 1,859 | r | cachematrix.R | ## makeCacheMatrix creates a list of 4 functions, set(),get(),setinv(),getinv(),
##cacheSolve() can call the functions created by makeCacheMatrix()
## Test Example:
## > x <- makeCacheMatrix(matrix(c(1,2,3,4), nrow=2, ncol=2))
## > cacheSolve(x)
## .. calculate inverse and output
## > cacheSolve(x)
## .. output inverse matrix from cache
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
## set initial inverse as NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
#get the initial matrix
setinv <- function(invs) m <<- invs
#store the calculated inverse matrix into m, cacheSolve() will call this
##function to store the inverse after computing the inverse of the initial matrix
getinv <- function() m
## if the inverse matrix is already computed , then getinv() will return the inverse,
##if not, then getinv() will return NULL. cacheSolve will call this function to see
##if the inverse exists already
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
##the list of 4 functions that cacheSolve() will call
}
## cacheSolve mainly check if the inverse matrix is already computed. If not, then cacheSolve() compute
## the inverse matrix and store it back to x.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getinv()
##get the inverse matrix(or NULL if the inverse hasn't been calculated) from x
if(!is.null(m)) {
message("getting cached data")
return(m)
}
## if the inverse is already computed then just return cached data
data <- x$get()
##if the inverse isn't in x then get the initial matrix from x
m <- solve(data)
##calculate the inverse
x$setinv(m)
#store the calculated inverse matrix
m ## return the calculated inverse matrix
}
|
b551c93457d6bd79194a335c8f8d176102e33d6c | e060df826f64d3f850b113182bfd254eab4bdfa5 | /docs/Notes.R | 7e226433d52775b2f1b8db5182e64f365903bee4 | [] | no_license | UCSBaporter/geog-176A-labs | 6afaa19e1bc0187170bb942ae338677daf24eae2 | defa2a21af3fa6a92ed8a5cbfea713e45ebe5a6d | refs/heads/master | 2022-12-06T20:14:36.849686 | 2020-08-25T18:49:41 | 2020-08-25T18:49:41 | 287,131,235 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,526 | r | Notes.R | pop = readxl:::read_excel("data/populationEstimates.xls", skip = 2) %>%
select(pop2019 = POP_ESTIMATE_2019, State = State, fips = FIPStxt)
pop_ca2019 = pop %>%
filter(State == "CA") %>%
slice_max(pop2019, n = 1)
cases = covid %>%
filter(state %in% c("California")) %>%
group_by(county) %>%
mutate(newCases = cases - lag(cases)) %>%
ungroup() %>%
filter(date == max(date))
most_cases = cases %>%
slice_max(cases, n = 5) %>%
select(county, cases)
knitr::kable(most_cases,
caption = "Most Cases California Counties",
col.names = c("County", "Cases"))
most_new_cases = cases %>%
slice_max(newCases, n = 5) %>%
select(county, newCases)
knitr::kable(most_new_cases,
caption = "Most New Cases California Counties",
col.names = c("County", "New Cases"))
pop_data1 = right_join(pop, cases, by = "fips") %>%
mutate(cases_percapita = (cases / pop2019) * 100000,
newCases_percapita = (newCases / pop2019) * 100000)
most_cases_percapita = pop_data1 %>%
slice_max(cases_percapita, n = 5) %>%
select(county, cases_percapita)
knitr::kable(most_cases_percapita,
caption = "Most Cumulative Cases Per Capita",
col.names = c("County", "Cases"))
##Notes from labs
newData = filter(state ('CA')) %>% group_by(county) %>% mutate(newCase = cases - lag(cases))
regions_cases_deaths = covid %>%
right_join(region, by = 'state') %>%
group_by(region, date) %>%
summarize(cases = sum(cases), deaths = sum(deaths)) %>%
ungroup() %>%
pivot_longer(cols = c('cases', 'deaths')) #pivot the frame to long
p1 = ggplot(data=regions_cases_deaths, aes(x = date, y = value)) +
geom_line(aes(col = region)) +
facet_grid(name~region, scale = "free_y") +
ggthemes::theme_few() +
labs(title = "Cummulative Cases and Deaths: Region",
subtitle = "Data Source: NY-Times",
x = "Date",
y = "Daily Cummulative Count",
caption = "Daily Exercise 07: Abigail Porter")
ggplot(covid_region)
regions_cases_deaths %>%
pivot_wider(names_from = "name", values_from = "value") %>%
ggplot(aes(x=date)) +
geom_line(aes(y = cases), col = "blue") +
geom_line(aes(y = deaths), col = "red") +
facet_grid(~region)
covid %>%
filter(county == "Unknown", state == "Florida", date == max(date)) %>%
arrange(-deaths)
homes = read_csv('data/landdata-states.csv')
pop = readxl::read_excel("data/PopulationEstimates.xls", skip = 2) %>% select(pop2019 = POP_ESTIMATE_2019, fips = FIPStxt)
|
a876ddf9c4c285d85fa7a01bc82e83331dbc6098 | 534391940f301b49ea1f0ec4607ede26864a8ceb | /Shiny Dashboard/R/monetizationData.R | f3231064d200932bb157ea56114ce3e6b9bb0f69 | [] | no_license | dmullen17/work-samples | d2a7ff211d7d56f5090ea9b11852cc4638d90241 | fff28bf0ff9c628e3119e8e3a5ffe361097aefef | refs/heads/master | 2021-04-03T09:13:20.742952 | 2018-06-27T22:38:54 | 2018-06-27T22:38:54 | 124,466,426 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 16,944 | r | monetizationData.R | ## Dominic Mullen
## 7/14/2016
#Monetization Data and SQL queries
# library(data.table)
# library (RPostgreSQL)
# library(plyr)
driver <- dbDriver("PostgreSQL")
conn <- dbConnect(driver, host="superevilmegacorp.redshift.amplitude.com",
port="5439",
dbname="superevilmegacorp",
user="superevilmegacorp",
password="GQdPSadICW2lL5qCkS20U9Jk")
##============================================
## Deployment revenue metrics (multiple graphs)
## (g_revenue_period_spender) (g_arppu_deployment) ()
##============================================
sql_deployment_revenue <- "Select
country
,deployment
,Revenue
,Net_Revenue
,Spender_Count
,Net_ARPPU
,mau_count
,Net_ARPDAU
from deployment_revenue
Where deployment > 1.15
Union
Select
country
,deployment
,Revenue
,Net_Revenue
,Spender_Count
,Net_ARPPU
,mau_count
,Net_ARPDAU
from deployment_revenue_country
Where deployment > 1.15"
deployment_revenue <- dbGetQuery(conn, paste("SET search_path = app139203;", sql_deployment_revenue, sep = ""))
# data table
deployment_revenue <- data.table(deployment_revenue)
# deployment factor
deployment_revenue$deployment <- as.factor(deployment_revenue$deployment)
#correct the '1.2' labeling to '1.20' to selector works
deployment_revenue[deployment == '1.2', deployment := '1.20']
#order deployment factor levels
deployment_revenue$deployment <- factor(
deployment_revenue$deployment, levels = c("1.16",
"1.17",
"1.18",
"1.19",
"1.20",
"1.21",
"1.22",
"1.23"
))
##============================================
## Spender percent by acquisition period and spend period (g_spender_percent)
##============================================
sql_spender_percent <- "Select
a.country
,a.acquired_28_day_bucket
,b.deployment as acquisition_deployment
,a.spent_28_day_bucket
,c.deployment as spend_deployment
,a.engagement_status_total
,a.period_spender_count
,a.acquired_user_count
,a.spender_percent
From acquired_spenders_in_period_lifetime_28 a
Left Join deployments b
on a.acquired_28_day_bucket = b.trailing_28_day_bucket
Left Join deployments c
on a.spent_28_day_bucket = c.trailing_28_day_bucket
Group By 1,2,3,4,5,6,7,8,9
Union
Select
a.country
,a.acquired_28_day_bucket
,b.deployment as acquisition_deployment
,a.spent_28_day_bucket
,c.deployment as spend_deployment
,a.engagement_status_total
,a.period_spender_count
,a.acquired_user_count
,a.spender_percent
From acquired_spenders_in_period_lifetime_28_country a
Left Join deployments b
on a.acquired_28_day_bucket = b.trailing_28_day_bucket
Left Join deployments c
on a.spent_28_day_bucket = c.trailing_28_day_bucket
Group By 1,2,3,4,5,6,7,8,9
"
spender_percent <- dbGetQuery(conn, paste("SET search_path = app139203;", sql_spender_percent, sep = ""))
# status factor
spender_percent$engagement_status_total_f <-
factor(spender_percent$engagement_status_total, levels = c("Installed","Active30",
"Active200","Active2000"))
# data table
spender_percent <- data.table(spender_percent)
# acquisition_deployment factor
spender_percent$acquisition_deployment <- as.factor(spender_percent$acquisition_deployment)
#correct the '1.2' labeling to '1.20' to selector works
spender_percent[acquisition_deployment == '1.2', acquisition_deployment := '1.20']
#order deployment factor levels
spender_percent$acquisition_deployment <- factor(
spender_percent$acquisition_deployment, levels = c("1.16",
"1.17",
"1.18",
"1.19",
"1.20",
"1.21",
"1.22",
"1.23"
))
# spend_deployment factor
spender_percent$spend_deployment <- as.factor(spender_percent$spend_deployment)
#correct the '1.2' labeling to '1.20' to selector works
spender_percent[spend_deployment == '1.2', spend_deployment := '1.20']
#order deployment factor levels
spender_percent$spend_deployment <- factor(
spender_percent$spend_deployment, levels = c("1.16",
"1.17",
"1.18",
"1.19",
"1.20",
"1.21",
"1.22",
"1.23"
))
# spent period factor
# spender_percent$spent_28_day_bucket_f <- factor(spender_percent$spent_28_day_bucket,
# levels = c("6", "5", "4", "3", "2", "1", "0"))
##============================================
## percent of active users that have ever spent (g_percent_ever_spent)
##============================================
sql_percent_ever_spent <- "Select
country
,trailing_28_day_period
,lifetime_28_day_engagement_status
,deployment
,Spender_Count
,Non_Spender_Count
,LTD_Spender_Percent
from active_user_LTD_spender_status_main
Group By 1,2,3,4,5,6,7"
percent_ever_spent <- dbGetQuery(conn, paste("SET search_path = app139203;", sql_percent_ever_spent, sep = ""))
percent_ever_spent$lifetime_28_day_engagement_status_f <-
factor(percent_ever_spent$lifetime_28_day_engagement_status, levels = c("Installed","Active30",
"Active200","Active2000"))
# data table
percent_ever_spent <- data.table(percent_ever_spent)
#correct the '1.2' labeling to '1.20' to selector works
# deployment factor
percent_ever_spent$deployment <- as.factor(percent_ever_spent$deployment)
percent_ever_spent[deployment == '1.2', deployment := '1.20']
#order deployment factor levels
percent_ever_spent$deployment <- factor(
percent_ever_spent$deployment, levels = c("1.16",
"1.17",
"1.18",
"1.19",
"1.20",
"1.21",
"1.22",
"1.23"
))
##============================================
## DAU Spender Percent by Country (g_dau_spender_percent)
##============================================
sql_dau_spender_percent <- "Select
country
,event_date
,spender_percent
From DAU_spender_percent
Group By 1,2,3
Union
Select
country
,event_date
,spender_percent
From DAU_spender_percent_country
Group By 1,2,3"
dau_spender_percent <- dbGetQuery(conn, paste("SET search_path = app139203;", sql_dau_spender_percent, sep = ""))
dau_spender_percent <- data.table(dau_spender_percent)
##============================================
## Days to first spend (g_days_first_spend)
##============================================
sql_days_first_spend <- "Select
country
,days_to_first_spend
,User_Count
From days_to_first_spend
Group By 1,2,3
Union
Select
country
,days_to_first_spend
,User_Count
From days_to_first_spend_country
Group By 1,2,3"
days_first_spend <- dbGetQuery(conn, paste("SET search_path = app139203;", sql_days_first_spend, sep = ""))
days_first_spend <- data.table(days_first_spend)
##============================================
## Days to first spend (by Percent) (g_days_first_spend_percent)
##============================================
# calculate percentage by country
days_first_spend_percent <- ddply(days_first_spend,
.(country),
transform,
percent = user_count/sum(user_count))
# data table
days_first_spend_percent <- data.table(days_first_spend_percent)
##============================================
## Days to first spend by IAP (by Percent) (g_days_first_spend_iap_percent & g_days_first_spend_iap_2
##============================================
sql_days_first_spend_iap <-
"Select
country
,productid
,days_to_first_spend
,User_Count
From days_to_first_spend_iap
Group By 1,2,3,4
Union
Select
country
,productid
,days_to_first_spend
,User_Count
From days_to_first_spend_country_iap
Group By 1,2,3,4"
days_first_spend_iap <- dbGetQuery(conn, paste("SET search_path = app139203;",
sql_days_first_spend_iap, sep = ""))
days_first_spend_iap <- data.table(days_first_spend_iap)
#make productid a factor and order productid factor levels
days_first_spend_iap$productid <- factor(
days_first_spend_iap$productid, levels = c('gold.mini',
'bundle.mini.ice_and_key',
'gold.small',
'bundle.small',
'gold.medium',
'gold.large',
'gold.xlarge',
'gold.2xlarge',
'other'))
# calculate percentage by country
days_first_spend_iap_percent <- ddply(days_first_spend_iap,
.(country, days_to_first_spend),
transform,
percent = user_count/sum(user_count))
days_first_spend_iap_percent <- arrange(days_first_spend_iap_percent,
country,
productid,
-days_to_first_spend
)
sql_days_first_spend_iap_2 <-
"Select
country
,deployment
,productid
,days_to_first_spend
,User_Count
From days_to_first_spend_iap_2
Group By 1,2,3,4,5"
days_first_spend_iap_2 <- dbGetQuery(conn, paste("SET search_path = app139203;",
sql_days_first_spend_iap_2, sep = ""))
days_first_spend_iap_2 <- data.table(days_first_spend_iap_2)
#make productid a factor and order productid factor levels
days_first_spend_iap_2$productid <- factor(
days_first_spend_iap_2$productid, levels = c('gold.mini',
'bundle.mini.ice_and_key',
'gold.small',
'bundle.small',
'gold.medium',
'gold.large',
'gold.xlarge',
'gold.2xlarge',
'other'))
# calculate percentage by country
days_first_spend_iap_2_percent <- ddply(days_first_spend_iap_2,
.(deployment, country, days_to_first_spend),
transform,
percent = user_count/sum(user_count))
days_first_spend_iap_2_percent <- arrange(days_first_spend_iap_2_percent,
deployment,
country,
productid,
-days_to_first_spend)
##============================================
## percent of active users that have ever spent (g_percent_ever_spent)
##============================================
sql_percent_ever_spent <- "Select
country
,trailing_28_day_period
,lifetime_28_day_engagement_status
,deployment
,Spender_Count
,Non_Spender_Count
,LTD_Spender_Percent
from active_user_LTD_spender_status_main
Group By 1,2,3,4,5,6,7"
percent_ever_spent <- dbGetQuery(conn, paste("SET search_path = app139203;", sql_percent_ever_spent, sep = ""))
percent_ever_spent$lifetime_28_day_engagement_status_f <-
factor(percent_ever_spent$lifetime_28_day_engagement_status, levels = c("Installed","Active30",
"Active200","Active2000"))
# data table
percent_ever_spent <- data.table(percent_ever_spent)
#correct the '1.2' labeling to '1.20' to selector works
# deployment factor
percent_ever_spent$deployment <- as.factor(percent_ever_spent$deployment)
percent_ever_spent[deployment == '1.2', deployment := '1.20']
#order deployment factor levels
percent_ever_spent$deployment <- factor(
percent_ever_spent$deployment, levels = c("1.16",
"1.17",
"1.18",
"1.19",
"1.20",
"1.21",
"1.22",
"1.23"
))
##============================================
## ARPPU / ARPDAU by trailing 30 days (g_arppu_trailing30)
## (g_arpdau_trailing30)
##============================================
sql_arppu_arpdau_trailing30 <- "Select
country
,event_date
,revenue
,spender_count
,active_user_count
,arppu
,arpdau
From arppu_arpdau
Where event_date > current_date - 31
and event_date <= current_date - 1
Group By 1,2,3,4,5,6,7
Union
Select
country
,event_date
,revenue
,spender_count
,active_user_count
,arppu
,arpdau
From arppu_arpdau_country
Where event_date > current_date - 31
and event_date <= current_date - 1
Group By 1,2,3,4,5,6,7"
arppu_arpdau_trailing30 <- dbGetQuery(conn, paste("SET search_path = app139203;", sql_arppu_arpdau_trailing30, sep = ""))
arppu_arpdau_trailing30 <- data.table(arppu_arpdau_trailing30)
# Create arpdau data set
arppu_arpdau_trailing30.2 <- arppu_arpdau_trailing30
##============================================
## SPENDERS by Active Status (g_spenders_active_status)
##============================================
sql_spenders_active_status <- "Select
a.country
,a.lifetime_28_day_bucket
,b.deployment
,a.lifetime_28_day_engagement_status
,a.Spender_Count
,a.ARPPU
From spenders_active_status a
Left Join deployments b
on a.lifetime_28_day_bucket = b.trailing_28_day_bucket
Group By 1,2,3,4,5,6
Union
Select
a.country
,a.lifetime_28_day_bucket
,b.deployment
,a.lifetime_28_day_engagement_status
,a.Spender_Count
,a.ARPPU
From spenders_active_status_country a
Left Join deployments b
on a.lifetime_28_day_bucket = b.trailing_28_day_bucket
Group By 1,2,3,4,5,6"
spenders_active_status <- dbGetQuery(conn, paste("SET search_path = app139203;", sql_spenders_active_status, sep = ""))
# data table
spenders_active_status <- data.table(spenders_active_status)
# engagement status factors
spenders_active_status$lifetime_28_day_engagement_status_f <- factor(
spenders_active_status$lifetime_28_day_engagement_status, levels = c("Installed",
"Active30",
"Active200",
"Active2000"))
# deployment factor
spenders_active_status$deployment <- as.factor(spenders_active_status$deployment)
#correct the '1.2' labeling to '1.20' to selector works
spenders_active_status[deployment == '1.2', deployment := '1.20']
#order deployment factor levels
spenders_active_status$deployment <- factor(
spenders_active_status$deployment, levels = c("1.16",
"1.17",
"1.18",
"1.19",
"1.20",
"1.21",
"1.22",
"1.23"
))
## Disconnect from server
dbDisconnect(conn)
|
b008f208773269422115ae08bc5ee9672f774df9 | bf1d8cc5cda83091f948e5e02041abb24b471955 | /shiny_OSM_AusTiles/global.R | 0532aa8fe458fc4d618332510a5c131eccc9ea3f | [] | no_license | Lakminikw/aus_cloropleth | b43ce979f56a2f420b49f846bc669fe49bbcf536 | bacd0ccc1a40f5d6b6cc99d9a33ea4e18ea939ce | refs/heads/master | 2021-06-01T00:58:44.750173 | 2016-06-16T04:38:06 | 2016-06-16T04:38:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 480 | r | global.R | library(shiny) # web framework
library(leaflet) # map interactivity
library(rgdal) # spatial data processing
library(jsonlite) # read json files
# read geojson
gdal.states <- readOGR("maps/au-states-density.geojson", "OGRGeoJSON")
# cloropleth aesthetics
colors <- c("#FFEDA0", "#FED976", "#FEB24C", "#FD8D3C", "#FC4E2A", "#E31A1C", "#BD0026", "#800026")
bins <- c(-Inf, 10, 20, 50, 100, 200, 500, 1000, Inf) + 0.00000001
pal <- colorBin(colors, NULL, bins) |
ab5aeaef2fb293ad5be199da0d05d53144b29f4f | 0574647f4c0678d6073d64511ce04e5fcecb28b4 | /ML_Pipeline/Generate_Results_CSV.R | dc1d02c1aa1c8ee341a6e74daa0425b1ead626db | [] | no_license | atifghulamnabi/frAmework-clouD-bAsed-buG-predIctiOn | 516502a3dda6d4875b83df64e5e235a3b5e589c1 | bd18ce5e3632dd76ba4147af296f9db40ee7f5e2 | refs/heads/master | 2023-04-19T23:05:27.440786 | 2020-09-03T13:52:19 | 2020-09-03T13:52:19 | 230,427,835 | 1 | 2 | null | 2021-04-26T20:34:59 | 2019-12-27T10:57:29 | Java | UTF-8 | R | false | false | 1,878 | r | Generate_Results_CSV.R |
### Generate CSV files for each dataframe (J48 and Logistic models) also add average Precision and Recall values(Excluding cases: yes-yes=0)
generateResultsCSV<-function(releasenames, J48Results.df, logisticResults.df, CKMetricsFilesList, j48ResultsFileName, logisticResultsFileName){
## Calculate Average of Precision and Recall values (J48 and Logistic model)
# avgPrecisionJ48<-mean(J48Results.df$Precision)
# avgPrecisionLogistic<-mean(logisticResults.df$Precision)
avgPrecisionJ48<-mean(subset(J48Results.df$Precision, J48Results.df$yes_yes>0 ))
avgPrecisionLogistic<-mean(subset(logisticResults.df$Precision, logisticResults.df$yes_yes>0 ))
# avgRecallJ48<-mean(J48Results.df$Recall)
# avgRecallLogistic<-mean(logisticResults.df$Recall)
avgRecallJ48<-mean(subset(J48Results.df$Recall, J48Results.df$yes_yes>0 ))
avgRecallLogistic<-mean(subset(logisticResults.df$Recall, logisticResults.df$yes_yes>0 ))
j48Row<-data.frame("precision"=avgPrecisionJ48, "recall"=avgRecallJ48, "accuracy"=0, "false_positive_rate" =0, "yes_yes"=0, "no_no" =0, "yes_no"=0, "no_yes"=0)
logisticRow<-data.frame("precision"=avgPrecisionLogistic, "recall"=avgRecallLogistic, "accuracy"=0, "false_positive_rate" =0, "yes_yes"=0, "no_no" =0, "yes_no"=0, "no_yes"=0)
J48Results.df<-rbind(J48Results.df, data.frame(generatePredictionResults(releasenames[1:length(CKMetricsFilesList)],length(CKMetricsFilesList), 0, "Average","-", j48Row, j48ResultsFileName)))
logisticResults.df<-rbind(logisticResults.df, data.frame(generatePredictionResults(releasenames[1:length(CKMetricsFilesList)],length(CKMetricsFilesList),0, "Average","-", logisticRow, logisticResultsFileName)))
## Genrate CSV files for prediction results of J48 and Logistic models
write.csv(J48Results.df, j48ResultsFileName)
write.csv(logisticResults.df, logisticResultsFileName)
} |
56eca11d5f869d964509e187158980e13243ce09 | 2517225bcb19eeaf579aff46017b99de8d760546 | /R/3-pegar_dados.R | 21791e0cf7f62999a9a0316202e08616fd51ba61 | [] | no_license | cadutargino/ControledoControle | e25db594a5bc6bd8c358ce985d04621198f79cda | 0ed98b870089f30da0584c48ca79b869a6774255 | refs/heads/master | 2023-04-09T11:24:35.388026 | 2021-04-17T13:02:21 | 2021-04-17T13:02:21 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,555 | r | 3-pegar_dados.R | ##### Monitor do Controle Concentrado no STF ######
### Rodrigo Dornelles
### dezembro/2020
library(magrittr)
## As funções aqui vão servir para baixar os dados das partes e os andamentos
# e para ler os arquivos baixados
# garantir que existem as pastas
fs::dir_create("data-raw/partes/")
fs::dir_create("data-raw/andamentos/")
#### baixar_dados_processo ####
# Função que recebe o número do incidente e baixa do STF as partes e os
# andamentos
# Já colocada de forma a facilitar a iteração
baixar_dados_processo <- function (incidente, dormir = 0, naMarra = F, prog) {
# barra de progresso, para quando for iterar
if (!missing(prog)) {
prog()
}
# sleep para não causar
Sys.sleep(dormir)
# preparar a querry colocando o incidente
q_incidente <- list("incidente" = incidente)
# urls que serão buscadas
u_partes <- "http://portal.stf.jus.br/processos/abaPartes.asp"
u_andamentos <- "http://portal.stf.jus.br/processos/abaAndamentos.asp"
# nomes dos futuros arquivos
caminho_partes <- paste0("data-raw/partes/Partes-", incidente, ".html")
caminho_andamentos <- paste0("data-raw/andamentos/Andamentos-", incidente,
".html")
# baixar partes se não existir e se não precisar forçar
if(!file.exists(caminho_partes) | naMarra) {
httr::GET(url = u_partes, query = q_incidente,
httr::write_disk(caminho_partes, overwrite = TRUE),
httr::user_agent("Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.72 Safari/537.36"))
}
# baixar andamentos se não existir e se não precisar forçar
if(!file.exists(caminho_andamentos) | naMarra) {
httr::GET(url = u_andamentos, query = q_incidente,
httr::write_disk(caminho_andamentos, overwrite = TRUE),
httr::user_agent("Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.72 Safari/537.36"))
}
}
#### ler_aba_partes ####
# Função para ler a aba de partes e retornar um formato tidy
# Vai receber o incidente e retornar uma tibble para ser empilhada
ler_aba_partes <- function (incidente, naMarra = F, prog) {
# barra de progresso, para quando for iterar
if (!missing(prog)) {
prog()
}
# caminho do possível arquivo
caminho_partes <- paste0("data-raw/partes/Partes-", incidente, ".html")
# verificar se existe o arquivo:
if(!fs::file_exists(caminho_partes)) {
message(paste("Arquivo inexistente - Partes do incidente", incidente))
return()
}
# leitura propriamente dita
# primeiro o "papel" que atua (passivo, ativo, adv, promotor, etc)
partes_tipo <- caminho_partes %>%
xml2::read_html(encoding = "UTF-8") %>%
xml2::xml_find_all("//div[@class='detalhe-parte']") %>%
xml2::xml_text() %>%
stringr::str_extract("(?>[A-Z]*)")
# nome propriamente dito
# num futuro pensar em separar OAB e agrupar advs e procuradores
# da mesma parte
partes_nomes <- caminho_partes %>%
xml2::read_html(encoding = "UTF-8") %>%
xml2::xml_find_all("//div[@class='nome-parte']") %>%
xml2::xml_text() %>%
stringr::str_replace_all(" ", " ") %>%
stringr::str_squish()
# retornar o tibble
tibble::tibble(incidente = incidente,
tipo = partes_tipo,
nome = partes_nomes)
}
#### ler_aba_andamentos ####
# Função para ler a aba de andamentos e retornar um formato tidy
# Vai receber o incidente e retornar uma tibble
ler_aba_andamento <- function (incidente, naMarra = F, prog) {
# barra de progresso, para quando for iterar
if (!missing(prog)) {
prog()
}
# caminho do possível arquivo
caminho_andamentos <- paste0("data-raw/andamentos/Andamentos-", incidente,
".html")
# verificar se existe o arquivo:
if(!fs::file_exists(caminho_andamentos)) {
message(paste("Arquivo inexistente - Andamentos do incidente", incidente))
return()
}
# leitura propriamente dita
# carregar a página com o encode correto
pagina_andamento <- caminho_andamentos %>%
xml2::read_html(encoding = "UTF-8")
# ler a data
datas <- pagina_andamento %>%
xml2::xml_find_all("//*[contains(@class, 'andamento-data')]") %>%
xml2::xml_text() %>%
lubridate::dmy()
# ler o "título" de cada andamento
nome_andamento <- pagina_andamento %>%
xml2::xml_find_all("//*[contains(@class, 'andamento-nome')]") %>%
xml2::xml_text() %>%
stringr::str_squish()
# retornar o tibble
tibble::tibble(incidente = incidente,
data = datas,
andamento = nome_andamento)
}
|
0a6686d8a3eeb475d52c57e18d5506609ef0bef2 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/iFad/examples/tau_g_chain.Rd.R | 9e2d29806e0c80235e3ec9f0fc85927842ef0c0f | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 184 | r | tau_g_chain.Rd.R | library(iFad)
### Name: tau_g_chain
### Title: The updated tau_g in the Gibbs sampling process
### Aliases: tau_g_chain
### Keywords: datasets
### ** Examples
data(tau_g_chain)
|
ec4cb43d80676d2e8ff8ffc64be8e92d05b48ffc | 7cf3e05301d18dcea90c275f5afd05250c4f4fa4 | /plot2.R | 188086624e92040e846457d40357d94bc8739f36 | [] | no_license | jcassiojr/ExData_Plotting1 | cbe30440438b47c5bb1c723b25ef872aa5b4c559 | 56515debbb77c2a9131893215102ad7d9ed0ad5a | refs/heads/master | 2021-01-20T15:51:23.976143 | 2015-02-05T19:23:16 | 2015-02-05T19:23:16 | 30,312,939 | 0 | 0 | null | 2015-02-04T18:00:43 | 2015-02-04T18:00:43 | null | UTF-8 | R | false | false | 1,120 | r | plot2.R | ##########################################################
# PLOT 2
## Exploratory Data Analysys
### Project 1
### Author: Cassio
##########################################################
### Loading libraries
library(lubridate)
library(dplyr)
### Loading the Data (avoiding factors)
## put the file on a subfolder named 'data' under your working directory
epcData <- read.table("./data/household_power_consumption.txt", sep = ";",
header = TRUE, stringsAsFactors=FALSE)
### filtering data
epcData_ft <-
epcData %>%
## creating column DateTime
mutate(DateTime = paste(Date,Time)) %>%
## changing Date and Time
mutate(Date = dmy(Date)) %>%
## filtering from 2007/02/01 to 2007/02/01
filter(year(Date) == 2007 & month(Date) == 2 & (day(Date) == 1 | day(Date) == 2))
### plotting data
plot(strptime(epcData_ft$DateTime,'%d/%m/%Y %H:%M:%S'), as.numeric(epcData_ft$Global_active_power),
ylab= "Global Active Power (kilowatts)", xlab = "", type='l', col='black')
## copying plot to PNG
dev.copy(png, file = "plot2.png", width = 480, height = 480)
dev.off()
|
27c261189d5b584717eb64f2a8628b7bc0be5b91 | 820f8aca9a690688cd5a48caa9038fbeff6ba971 | /man/write_las.Rd | 42605903485e518e97deca1ec9e3395f47c91f29 | [] | no_license | jkennel/transducer | 07374d4967498762cb692e71068bf89b6f026bc3 | 881ae6eb2570a15c6dc6aa91a69308183c3023f2 | refs/heads/master | 2021-06-11T18:14:03.474803 | 2021-06-04T12:22:17 | 2021-06-04T12:22:17 | 195,269,441 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 281 | rd | write_las.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/las_header.R
\name{write_las}
\alias{write_las}
\title{write_las}
\usage{
write_las(dat, fn_las, gravity = 9.80665, density = 0.9989)
}
\arguments{
\item{fn_las}{}
}
\value{
}
\description{
write_las
}
|
03e4b84942752d1b157e2c49d40bed0eb7afb0fb | 9e107db0f73b03c91354550e5b1d6d77fef90775 | /api-json.R | cfc2c83dd3017405bac313a40434e410ae4f4024 | [] | no_license | ignputraa/data-acquistion | 411d8aec9999891205d838b7a0b33796b1291c92 | 706b94c0e9fba5f32606f924f0e1f12587bdffb4 | refs/heads/master | 2021-01-16T03:15:42.175899 | 2020-02-24T12:26:36 | 2020-02-24T12:26:36 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 291 | r | api-json.R | #httr
#jsonlite
urlapi <- "https://earthquake.usgs.gov/fdsnws/event/1/query?format=geojson&starttime=2020-02-22&endtime=2020-02-24"
getreq <- httr::GET(urlapi)
getcontent <- httr::content(getreq,"text")
datajs <- jsonlite::fromJSON(getcontent,flatten = TRUE)
dfgempa <- datajs$features
|
7a553bdd96784ef9cd80b0c36dbff816033cb183 | 2bec5a52ce1fb3266e72f8fbeb5226b025584a16 | /wk/tests/testthat/test-wksxp-translate.R | 0d58a6a78c5d4537d6c0ef0a12f0db56f8b59b08 | [] | no_license | akhikolla/InformationHouse | 4e45b11df18dee47519e917fcf0a869a77661fce | c0daab1e3f2827fd08aa5c31127fadae3f001948 | refs/heads/master | 2023-02-12T19:00:20.752555 | 2020-12-31T20:59:23 | 2020-12-31T20:59:23 | 325,589,503 | 9 | 2 | null | null | null | null | UTF-8 | R | false | false | 13,869 | r | test-wksxp-translate.R |
test_that("basic wksxp translation works to WKB", {
expect_identical(
wkb_translate_wksxp(wkt_translate_wkb("POINT (30 10)")),
list(
structure(matrix(c(30, 10), ncol = 2), class = "wk_point")
)
)
})
test_that("basic wksxp translation works on non-empty 2D geoms", {
expect_identical(
wkt_translate_wksxp("POINT (30 10)"),
list(
structure(matrix(c(30, 10), ncol = 2), class = "wk_point")
)
)
expect_identical(
wkt_translate_wksxp("LINESTRING (30 10, 0 0)"),
list(
structure(matrix(c(30, 10, 0, 0), ncol = 2, byrow = TRUE), class = "wk_linestring")
)
)
expect_identical(
wkt_translate_wksxp("POLYGON ((30 10, 0 0, 10 10, 30 10))"),
list(
structure(
list(
matrix(c(30, 10, 0, 0, 10, 10, 30, 10), ncol = 2, byrow = TRUE)
),
class = "wk_polygon"
)
)
)
expect_identical(
wkt_translate_wksxp("MULTIPOINT ((30 10), (0 0))"),
list(
structure(
list(
matrix(c(30, 10), ncol = 2),
matrix(c(0, 0), ncol = 2)
),
class = "wk_multipoint"
)
)
)
expect_identical(
wkt_translate_wksxp("MULTILINESTRING ((30 10, 0 0), (20 20, 0 0))"),
list(
structure(
list(
matrix(c(30, 10, 0, 0), ncol = 2, byrow = TRUE),
matrix(c(20, 20, 0, 0), ncol = 2, byrow = TRUE)
),
class = "wk_multilinestring"
)
)
)
expect_identical(
wkt_translate_wksxp("MULTIPOLYGON (((30 10, 0 0, 10 10, 30 10)), ((30 10, 0 0, 10 10, 30 10)))"),
list(
structure(
list(
list(
matrix(c(30, 10, 0, 0, 10, 10, 30, 10), ncol = 2, byrow = TRUE)
),
list(
matrix(c(30, 10, 0, 0, 10, 10, 30, 10), ncol = 2, byrow = TRUE)
)
),
class = "wk_multipolygon"
)
)
)
expect_identical(
wkt_translate_wksxp(
"GEOMETRYCOLLECTION (POINT (30 10), GEOMETRYCOLLECTION (POINT (12 6)), LINESTRING (1 2, 3 4))"
),
list(
structure(
list(
structure(matrix(c(30, 10), ncol = 2), class = "wk_point"),
structure(
list(
structure(
matrix(c(12, 6), ncol = 2),
class = "wk_point"
)
),
class = "wk_geometrycollection"
),
structure(
matrix(c(1, 2, 3, 4), ncol = 2, byrow = TRUE),
class = "wk_linestring"
)
),
class = "wk_geometrycollection"
)
)
)
})
test_that("basic wksxp translation works on non-empty Z geoms", {
expect_identical(
wkt_translate_wksxp("POINT Z (30 10 2)"),
list(
structure(matrix(c(30, 10, 2), ncol = 3), has_z = TRUE, class = "wk_point")
)
)
expect_identical(
wkt_translate_wksxp("MULTIPOINT Z ((30 10 5), (0 0 1))"),
list(
structure(
list(
matrix(c(30, 10, 5), ncol = 3),
matrix(c(0, 0, 1), ncol = 3)
),
has_z = TRUE,
class = "wk_multipoint"
)
)
)
expect_identical(
wkt_translate_wksxp(
"GEOMETRYCOLLECTION (
POINT Z (30 10 99),
GEOMETRYCOLLECTION (POINT Z (12 6 10)),
LINESTRING Z (1 2 3, 3 4 5)
)"
),
list(
structure(
list(
structure(matrix(c(30, 10, 99), ncol = 3), has_z = TRUE, class = "wk_point"),
structure(
list(
structure(
matrix(c(12, 6, 10), ncol = 3),
has_z = TRUE,
class = "wk_point"
)
),
class = "wk_geometrycollection"
),
structure(
matrix(c(1, 2, 3, 3, 4, 5), ncol = 3, byrow = TRUE),
has_z = TRUE,
class = "wk_linestring"
)
),
class = "wk_geometrycollection"
)
)
)
})
test_that("basic wksxp translation works on non-empty M geoms", {
expect_identical(
wkt_translate_wksxp("POINT M (30 10 2)"),
list(
structure(matrix(c(30, 10, 2), ncol = 3), has_m = TRUE, class = "wk_point")
)
)
expect_identical(
wkt_translate_wksxp("MULTIPOINT M ((30 10 5), (0 0 1))"),
list(
structure(
list(
matrix(c(30, 10, 5), ncol = 3),
matrix(c(0, 0, 1), ncol = 3)
),
has_m = TRUE,
class = "wk_multipoint"
)
)
)
expect_identical(
wkt_translate_wksxp(
"GEOMETRYCOLLECTION (
POINT M (30 10 99),
GEOMETRYCOLLECTION (POINT M (12 6 10)),
LINESTRING M (1 2 3, 3 4 5)
)"
),
list(
structure(
list(
structure(matrix(c(30, 10, 99), ncol = 3), has_m = TRUE, class = "wk_point"),
structure(
list(
structure(
matrix(c(12, 6, 10), ncol = 3),
has_m = TRUE,
class = "wk_point"
)
),
class = "wk_geometrycollection"
),
structure(
matrix(c(1, 2, 3, 3, 4, 5), ncol = 3, byrow = TRUE),
has_m = TRUE,
class = "wk_linestring"
)
),
class = "wk_geometrycollection"
)
)
)
})
test_that("basic wksxp translation works on non-empty 3D geoms", {
expect_identical(
wkt_translate_wksxp("POINT ZM (30 10 2 13)"),
list(
structure(
matrix(c(30, 10, 2, 13), ncol = 4),
has_z = TRUE, has_m = TRUE, class = "wk_point"
)
)
)
expect_identical(
wkt_translate_wksxp("MULTIPOINT ZM ((30 10 5 1), (0 0 1 6))"),
list(
structure(
list(
matrix(c(30, 10, 5, 1), ncol = 4),
matrix(c(0, 0, 1, 6), ncol = 4)
),
has_z = TRUE,
has_m = TRUE,
class = "wk_multipoint"
)
)
)
expect_identical(
wkt_translate_wksxp(
"GEOMETRYCOLLECTION (
POINT ZM (30 10 99 2),
GEOMETRYCOLLECTION (POINT ZM (12 6 10 9)),
LINESTRING ZM (1 2 3 4, 3 4 5 6)
)"
),
list(
structure(
list(
structure(matrix(c(30, 10, 99, 2), ncol = 4), has_z = TRUE, has_m = TRUE, class = "wk_point"),
structure(
list(
structure(
matrix(c(12, 6, 10, 9), ncol = 4),
has_z = TRUE,
has_m = TRUE,
class = "wk_point"
)
),
class = "wk_geometrycollection"
),
structure(
matrix(c(1, 2, 3, 4, 3, 4, 5, 6), ncol = 4, byrow = TRUE),
has_z = TRUE,
has_m = TRUE,
class = "wk_linestring"
)
),
class = "wk_geometrycollection"
)
)
)
})
test_that("basic wksxp translation works on non-empty 2D geoms", {
expect_identical(
wkt_translate_wksxp("SRID=837;POINT (30 10)"),
list(
structure(matrix(c(30, 10), ncol = 2), srid = 837, class = "wk_point")
)
)
expect_identical(
wkt_translate_wksxp("SRID=12;MULTIPOINT ((30 10), (0 0))"),
list(
structure(
list(
matrix(c(30, 10), ncol = 2),
matrix(c(0, 0), ncol = 2)
),
srid=12,
class = "wk_multipoint"
)
)
)
expect_identical(
wkt_translate_wksxp(
"SRID=89;GEOMETRYCOLLECTION (
POINT (30 10),
GEOMETRYCOLLECTION (POINT (12 6)),
LINESTRING (1 2, 3 4)
)"
),
list(
structure(
list(
structure(matrix(c(30, 10), ncol = 2), class = "wk_point"),
structure(
list(
structure(
matrix(c(12, 6), ncol = 2),
class = "wk_point"
)
),
class = "wk_geometrycollection"
),
structure(
matrix(c(1, 2, 3, 4), ncol = 2, byrow = TRUE),
class = "wk_linestring"
)
),
srid = 89,
class = "wk_geometrycollection"
)
)
)
})
test_that("basic reverse wksxp translation works on non-empty 2D geoms", {
expect_identical(
wksxp_translate_wkt(
list(
structure(matrix(c(30, 10), ncol = 2), class = "wk_point")
)
),
"POINT (30 10)"
)
expect_identical(
wksxp_translate_wkt(
list(
structure(matrix(c(30, 10, 0, 0), ncol = 2, byrow = TRUE), class = "wk_linestring")
)
),
"LINESTRING (30 10, 0 0)"
)
expect_identical(
wksxp_translate_wkt(
list(
structure(
list(
matrix(c(30, 10, 0, 0, 10, 10, 30, 10), ncol = 2, byrow = TRUE)
),
class = "wk_polygon"
)
)
),
"POLYGON ((30 10, 0 0, 10 10, 30 10))"
)
expect_identical(
wksxp_translate_wkt(
list(
structure(
list(
matrix(c(30, 10), ncol = 2),
matrix(c(0, 0), ncol = 2)
),
class = "wk_multipoint"
)
)
),
"MULTIPOINT ((30 10), (0 0))"
)
expect_identical(
wksxp_translate_wkt(
list(
structure(
list(
matrix(c(30, 10, 0, 0), ncol = 2, byrow = TRUE),
matrix(c(20, 20, 0, 0), ncol = 2, byrow = TRUE)
),
class = "wk_multilinestring"
)
)
),
"MULTILINESTRING ((30 10, 0 0), (20 20, 0 0))",
)
expect_identical(
wksxp_translate_wkt(
list(
structure(
list(
list(
matrix(c(30, 10, 0, 0, 10, 10, 30, 10), ncol = 2, byrow = TRUE)
),
list(
matrix(c(30, 10, 0, 0, 10, 10, 30, 10), ncol = 2, byrow = TRUE)
)
),
class = "wk_multipolygon"
)
)
),
"MULTIPOLYGON (((30 10, 0 0, 10 10, 30 10)), ((30 10, 0 0, 10 10, 30 10)))"
)
expect_identical(
wksxp_translate_wkt(
list(
structure(
list(
structure(matrix(c(30, 10), ncol = 2), class = "wk_point"),
structure(
list(
structure(
matrix(c(12, 6), ncol = 2),
class = "wk_point"
)
),
class = "wk_geometrycollection"
),
structure(
matrix(c(1, 2, 3, 4), ncol = 2, byrow = TRUE),
class = "wk_linestring"
)
),
class = "wk_geometrycollection"
)
)
),
"GEOMETRYCOLLECTION (POINT (30 10), GEOMETRYCOLLECTION (POINT (12 6)), LINESTRING (1 2, 3 4))"
)
})
test_that("basic reverse wksxp translation works on non-empty ZM geoms", {
expect_identical(
wksxp_translate_wkt(
list(
structure(
matrix(c(30, 10, 1, 2), ncol = 4),
has_z = TRUE, has_m = TRUE, class = "wk_point"
)
)
),
"POINT ZM (30 10 1 2)"
)
expect_identical(
wksxp_translate_wkt(
list(
structure(
list(
matrix(c(30, 10, 1, 2, 0, 0, 1, 2, 10, 10, 1, 2, 30, 10, 1, 2), ncol = 4, byrow = TRUE)
),
has_z = TRUE,
has_m = TRUE,
class = "wk_polygon"
)
)
),
"POLYGON ZM ((30 10 1 2, 0 0 1 2, 10 10 1 2, 30 10 1 2))"
)
expect_identical(
wksxp_translate_wkt(
list(
structure(
list(
matrix(c(30, 10, 1, 2), ncol = 4),
matrix(c(0, 0, 1, 2), ncol = 4)
),
has_z = TRUE,
has_m = TRUE,
class = "wk_multipoint"
)
)
),
"MULTIPOINT ZM ((30 10 1 2), (0 0 1 2))"
)
expect_identical(
wksxp_translate_wkt(
list(
structure(
list(
matrix(c(30, 10, 1, 2, 0, 0, 1, 2), ncol = 4, byrow = TRUE),
matrix(c(20, 20, 1, 2, 0, 0, 1, 2), ncol = 4, byrow = TRUE)
),
has_z = TRUE,
has_m = TRUE,
class = "wk_multilinestring"
)
)
),
"MULTILINESTRING ZM ((30 10 1 2, 0 0 1 2), (20 20 1 2, 0 0 1 2))",
)
expect_identical(
wksxp_translate_wkt(
list(
structure(
list(
list(
matrix(c(30, 10, 1, 2, 0, 0, 1, 2, 10, 10, 1, 2, 30, 10, 1, 2), ncol = 4, byrow = TRUE)
),
list(
matrix(c(30, 10, 2, 3, 0, 0, 2, 3, 10, 10, 2, 3, 30, 10, 2, 3), ncol = 4, byrow = TRUE)
)
),
has_z = TRUE,
has_m = TRUE,
class = "wk_multipolygon"
)
)
),
"MULTIPOLYGON ZM (((30 10 1 2, 0 0 1 2, 10 10 1 2, 30 10 1 2)), ((30 10 2 3, 0 0 2 3, 10 10 2 3, 30 10 2 3)))"
)
})
test_that("basic reverse wksxp translation works with SRID", {
expect_identical(
wksxp_translate_wkt(
list(
structure(
matrix(c(30, 10), ncol = 2),
srid = 43, class = "wk_point"
)
)
),
"SRID=43;POINT (30 10)"
)
})
test_that("basic reverse wksxp translation works with NULL", {
expect_identical(wksxp_translate_wkt(list(NULL)), NA_character_)
})
test_that("identity wksxp translation works", {
expect_identical(
wksxp_translate_wksxp(
list(
structure(matrix(c(30, 10), ncol = 2), class = "wk_point")
)
),
list(
structure(matrix(c(30, 10), ncol = 2), class = "wk_point")
)
)
})
test_that("wksxp to wkb works", {
expect_identical(
wksxp_translate_wkb(
list(
structure(matrix(c(30, 10), ncol = 2), class = "wk_point")
)
),
wkt_translate_wkb("POINT (30 10)")
)
})
test_that("wksxp_translate_* doesn't segfault on other inputs", {
expect_error(wksxp_translate_wkt("POINT (30 10)"), class = "WKParseException")
expect_error(wksxp_translate_wkt(as_wkb("POINT (30 10)")), class = "WKParseException")
})
|
77cd78de6c58c321d636280ad7c56cbbecd7e21a | 5de182245498aa1e32d8db7d1c4664b8d6e05ae8 | /hangboard_timer/app.R | 8424d5e6af65bd348342b7f0a6302c6997229ba8 | [] | no_license | danielward27/climbing_app | 063a7f4c479b161a7fbdb887fbd76006eaaaa55f | cc88518c08f1cdcbe9aaa91135cbd101c035a079 | refs/heads/master | 2022-07-17T08:00:25.696135 | 2020-05-22T11:02:16 | 2020-05-22T11:02:16 | 246,267,859 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,870 | r | app.R | #
# This is a Shiny web application for training climbing using a hangboard
#### Imports ####
library(shiny)
library(lubridate)
library(beepr)
library(tidyverse)
library(googlesheets4)
library(googledrive)
theme_set(
theme_bw(base_size = 25)
)
sheet_URL = "https://docs.google.com/spreadsheets/d/17qnd_BELvIaU-FIFs8tZSxY8Th0irG4Bw_CJk1Atxo8/edit?usp=sharing"
# Set up exercises
exercises = c("Warm Up", rep(c("Front Lever", "Front Three", "Front Three",
"Half Crimp", "Half Crimp", "Pinch Block",
"Pinch Block", "Half Crimp", "Half Crimp"), 2),
"Face Pulls", "External Rotations", "Finished")
# Exercises we want to record data for
tracked_exercises = choices = c("Front Lever", "Front Three",
"Front Three", "Half Crimp",
"Half Crimp", "Pinch Block")
bh = 30 # Time between left and right hangs
hand = c("B", "B", rep(c("R", "L", "L", "R"), 2), "B", rep(c("L", "R", "R", "L"), 2), "B", "B", "B")
hand[hand == "B"] <- "Both"
hand[hand == "L"] <- "Left"
hand[hand == "R"] <- "Right"
#### UI ####
ui <- fluidPage(
br(),
includeCSS("styles.css"),
sidebarLayout(
sidebarPanel(width=4,
h3("Next exercise:"),
textOutput('next_ex'),
textOutput('time_to_ex'),
actionButton('start','Start'),
actionButton('stop','Stop'),
actionButton('reset','Reset'),
actionButton('skip','Skip'),
textOutput('completed')
),
mainPanel(
tabsetPanel(type = "tabs",
tabPanel("Record results",
br(),
numericInput("bodyweight", "Bodyweight / kg", value=NA, min=0, max=200, step=0.5),
selectInput("record_ex", label="Exercise",
choices = tracked_exercises, selected = "Front Lever"),
radioButtons("record_hand", label="Hand",
choices = c("Left", "Right","Both"), selected = "Both",
inline = TRUE),
numericInput('record_time', 'Time / s:',
value=NA, min=0, step=0.5),
numericInput('record_weight', 'Weight / kg:',
value=NA, step=0.5),
actionButton("submit", "Submit")
),
tabPanel("Graphs",
br(),
selectInput("ex_choice", label="Exercise",
choices = tracked_exercises),
selectInput("metric_choice", label="Metric",
choices = c("Time", "Weight")),
plotOutput("plot")
),
tabPanel("Table",
br(),
p("Table of your results so far:"),
DT::dataTableOutput('results_df')
),
tabPanel("Settings",
br(),
sliderInput('interval', label = "Time between exercises / s:",
round = TRUE, value=240, min=40, max=440, step=10)
)
)
)
)
)
server <- function(input, output, session) {
# Initialize reactive values
counter <- reactiveVal(1) # Tracks which exercise we are on
timer <- reactiveVal(10) # Tracks time to next exercise
active <- reactiveVal(FALSE) # Tracks if timer is active or paused
# observers for actionbuttons
observeEvent(input$start, {active(TRUE)})
observeEvent(input$stop, {active(FALSE)})
observeEvent(input$reset, {
timer(10)
counter(1)
})
observeEvent(input$skip, {timer(1)})
# Dataframe that contains excercises and timings
rv = reactiveValues(
ex_df = tibble("exercise" = exercises, "hand" = hand,
"date" = as.character(Sys.Date()),
"timings" = NA)
)
# Add timings as reactive (user can choose exercise interval)
observe({
i = input$interval
rv$ex_df$timings <- c(10, i, i, rep(c(bh, i-bh), 4),
i, rep(c(bh, i-bh), 4), i, 10)})
#### Handle Results ####
# Initialise results table
output$results_df <- DT::renderDataTable({read_sheet(sheet_URL)})
observeEvent(input$submit, {
# Add data to google sheets
row = data.frame("exercise" = input$record_ex, "hand" = input$record_hand,
"time"=input$record_time, "weight"=input$record_weight,
"bodyweight" = input$bodyweight, "date" = as.character(Sys.Date()))
sheet_append(sheet_URL, row)
# After submission automatically add next exercise as defualt inputs
updateSelectInput(session, "record_ex",
selected = rv$ex_df$exercise[counter()])
updateRadioButtons(session, "record_hand",
selected = rv$ex_df$hand[counter()])
output$results_df <- DT::renderDataTable({read_sheet(sheet_URL)})
})
#### Plot results ####
output$plot = renderPlot({
df = read_sheet(sheet_URL)
df$date = ymd(df$date)
df$hand <- as.factor(df$hand)
colours = c("black", "goldenrod3", "darkslategray4")
if (input$metric_choice == "Time"){
p = df %>%
filter(exercise == input$ex_choice) %>%
ggplot(aes(date, time, color = hand, size=weight)) +
geom_point(alpha=0.6) +
scale_color_manual(values=colours) +
scale_size("weight", range = c(1,5))
}
if (input$metric_choice == "Weight"){
p = df %>%
filter(exercise == input$ex_choice) %>%
ggplot(aes(date, weight, color = hand, size=time)) +
geom_point(alpha=0.6) +
scale_color_manual(values=colours) +
scale_size("time", range = c(1,5))
}
p
})
# Show exercise and time until the hang
output$next_ex <- renderText({
hand_or_hands <- if (rv$ex_df$hand[counter()] == "Both") "hands" else "hand"
text = sprintf("%s (%s %s)", rv$ex_df$exercise[counter()], rv$ex_df$hand[counter()], hand_or_hands)})
output$time_to_ex <- renderText({paste(seconds_to_period(timer()))}) #####
# Show proportion of exercises completed
output$completed <- renderText(sprintf("Completed %s/%s", counter()-1, length(exercises)-1))
#### Timer (observer that invalidates every second) ####
observe({
invalidateLater(1000, session)
isolate({
if(active()){
timer(timer()-1) # Countdown
# Add 5 beep coundown to hang
if (timer()<=5 & timer()>0){
beep(sound = 10) # 5 beeps to countdown to hang.
}
# When timer is zero
if (timer()==0){
beep(sound = 1)
counter(counter()+1)
timer(rv$ex_df$timings[counter()])
# Check if workout finished
if (counter()==length(exercises)){
beep(sound = 4)
Sys.sleep(1)
beep(sound = 3)
active(FALSE)
showModal(modalDialog(
title = "Workout Completed!",
"Workout completed!"))
}
}
else if (counter() > 1 & timer() > (rv$ex_df$timings[counter()]-11)){
beep(sound = 2) # Beeps to count hangtime
}
}
})
}
)
}
shinyApp(ui, server)
|
b3650a0ba631ebeb40993e39cbe895ee80f19219 | fb079c2261f5d9e886a2b069ea45f66271f6abd2 | /scripts/r/analysis_tech.r | 3d193033f6022f58544d841d814a99bd975dbcb1 | [] | no_license | wdoyle42/diagnosis | 8de29ec2062f829f2fd085780872b3567601816b | 4e9a767f61695196117d65c2b87369a37ce51125 | refs/heads/master | 2020-04-06T03:48:32.983189 | 2018-02-16T21:11:51 | 2018-02-16T21:11:51 | 56,604,258 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,976 | r | analysis_tech.r | ################################################################################
##
## <PROJ> National Afforability Report: 50 States
## <FILE> analysis.r
## <AUTH> Will Doyle and Benjamin Skinner
## <INIT> 1 8 2016
##
################################################################################
## PURPOSE
## This file compares net price with family income in each state within each sector
## and outputs the results. It also create a data file with the aggregate levels
## of net price for each group in each state. This is just for three states with technical college systems: TN, OK, GA
## CODE
rm(list=ls())
##Libraries
library(dplyr)
library(tidyr)
##options
options(stringsAsFactors=FALSE)
## data dirs
cddir <- '../../data/acs/'
rddir <- '../../data/ipeds/'
mddir <- '../../data/misc/'
addir <- '../../data/analysis/'
##constants
my.labels=c("*****","****","***", "**" , "*")
statelist<-c("GA","OK","TN")
## open institution data
inst <- read.csv('../../data/analysis/institutions.csv', stringsAsFactors = F)
inst<-dplyr::filter(inst,stabbr%in%(statelist))
## Match with SREB data
sreb<-read.csv("../../data/misc/srebsectoripeds.csv")
sreb$sreb_group<-0
sreb$sreb_group[sreb$sector%in%c(61:69)]<-1
sreb<-dplyr::select(sreb,unitid,sreb_group)
inst<-left_join(inst,sreb,by="unitid")
inst$group[inst$sreb_group==1]<-6
## aggregate
inst <- inst %>%
group_by(year, stabbr, group, faminccat) %>%
summarize(avecost = round(weighted.mean(x=netprice, w = fteug, na.rm = TRUE)))
## merge with acs
acs<-read.csv(paste0(addir,"states.csv"))
afford<-left_join(inst,acs,by=c("stabbr","year","faminccat"))
## calculate percentages
afford$percent<-(afford$avecost/afford$aveinc)*100
## need faminccat levels
levels <- c('< 30k','30k to 48k','48k to 75k','75k to 110k','> 110k')
## reorder so that < 30k is first
afford$faminccat <- factor(afford$faminccat, levels = levels)
## Star Ratings
afford<-afford %>%
group_by(year,group,faminccat)%>%
mutate(quant=cut(percent,
breaks=quantile(percent,probs=seq(0,1,by=.2),na.rm=TRUE),
labels=my.labels,
include.lowest=TRUE
)
)
## Output results
write.csv(afford,file=paste0(addir,"afford_tech.csv"),row.names=FALSE)
## Create weighted average based on headcount
headcount<-read.csv(paste0(addir,"headcount.csv"))
afford_total<-left_join(afford,headcount,by=c("stabbr","group","year"))
## Weighted Net Price by Income level
afford_total<-afford_total%>%
group_by(year,stabbr,faminccat) %>%
summarize(net_price_ave=weighted.mean(x=avecost,w=sector_total_ug,na.rm=TRUE),
income=max(aveinc),
inc_pct_pop=max(inc_pct_pop)
)
## As percent of income
afford_total$percent<-(afford_total$net_price_ave/afford_total$income)*100
## Output
write.csv(afford_total, paste0(addir,"afford_total_data_tech.csv"),row.names=FALSE)
|
546f4d7125bc86f24251a5cc4c313311a12bcfb1 | f1ad76fa058a2235d3adb05ccefc6b262570478e | /man/cut_seasonyear.fun.Rd | a260f835f9d03d052c0057ee74d1f66c0795962a | [
"CC-BY-3.0",
"MIT"
] | permissive | Ostluft/rOstluft.plot | 863f733b949dd37e5eaf1d8c1e197596242ef072 | fbed7ce639ae6778e24c13773b73344942ca7dc2 | refs/heads/master | 2022-11-16T12:56:44.199402 | 2020-03-23T11:12:02 | 2020-03-23T11:12:02 | 180,803,285 | 3 | 0 | null | null | null | null | UTF-8 | R | false | true | 789 | rd | cut_seasonyear.fun.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cutfuns.R
\name{cut_seasonyear.fun}
\alias{cut_seasonyear.fun}
\title{Partial function constructor for cut_season}
\usage{
cut_seasonyear.fun(label = c("yearseason", "year"), labels = NULL)
}
\arguments{
\item{label}{choice between \code{c("yearseason", "year")}. \code{"yearseason"} will combine
the year and the output from \code{\link[=cut_season]{cut_season()}}, \code{"year"} will return only the
adjusted year.}
\item{labels}{forwarded to \code{\link[=cut_season]{cut_season()}}}
}
\value{
Partial function of \code{\link[=cut_seasonyear]{cut_seasonyear()}} with x as sole argument
}
\description{
Partial function constructor for cut_season
}
\seealso{
\code{\link[=cut_seasonyear]{cut_seasonyear()}}
}
|
e68c89ffcb8d49a2f4518519991f0bf00227afc6 | 7a0e657bbb60cc1bea50a1cb9c9222adc5c6aa4f | /mapped/R_filter_by_genome_location.R | d12c169af8fe677d89dccbaf8552f1854ef71a37 | [] | no_license | twpierson/nres721_genome | 7d6ebd7bc4fce044a1403a244275eccc9d17db94 | 179d756751343b2bd3e9064377d7dd02eb94324a | refs/heads/master | 2020-07-13T22:32:54.163749 | 2019-10-30T18:41:36 | 2019-10-30T18:41:36 | 205,169,158 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 11,343 | r | R_filter_by_genome_location.R | # navigate to directory
setwd("/Volumes/G-DRIVE/Dropbox/Teaching/2019_NRES_721/nres721_genome/mapped")
# read in BAM files
Brid_R1R2_BAM <- readLines("Ubrucei.txt")
Cala_R1R2_BAM <- readLines("../mapped/Cala_R1R2_mapped.txt")
Cala_male_R1R2_BAM <- readLines("../mapped/Cala_male_R1R2_mapped.txt")
# mine useful information
Brid_R1R2_seqs <- matrix(nrow=length(Brid_R1R2_BAM),ncol=4)
for(i in 1:length(Brid_R1R2_BAM)){
X <- strsplit(Brid_R1R2_BAM[i],"\t")[[1]]
Xn <- nchar(X[10])
Brid_R1R2_seqs[i,] <- c(X[c(1,3,10)],Xn)
}
Cala_R1R2_seqs <- matrix(nrow=length(Cala_R1R2_BAM),ncol=4)
for(i in 1:length(Cala_R1R2_BAM)){
X <- strsplit(Cala_R1R2_BAM[i],"\t")[[1]]
Xn <- nchar(X[10])
Cala_R1R2_seqs[i,] <- c(X[c(1,3,10)],Xn)
}
Cala_male_R1R2_seqs <- matrix(nrow=length(Cala_male_R1R2_BAM),ncol=4)
for(i in 1:length(Cala_male_R1R2_BAM)){
X <- strsplit(Cala_male_R1R2_BAM[i],"\t")[[1]]
Xn <- nchar(X[10])
Cala_male_R1R2_seqs[i,] <- c(X[c(1,3,10)],Xn)
}
# keep only loci that mapped
Brid_R1R2_seqMatching <- Brid_R1R2_seqs[Brid_R1R2_seqs[,2]!="*",]
Xn <- as.numeric(Brid_R1R2_seqMatching[,4])
Brid_R1R2_seqMatching <- as.data.frame(Brid_R1R2_seqMatching[,-4])
Brid_R1R2_seqMatching <- data.frame(Brid_R1R2_seqMatching,Xn)
names(Brid_R1R2_seqMatching) <- c("locus","genome","sequence","length")
Cala_R1R2_seqMatching <- Cala_R1R2_seqs[Cala_R1R2_seqs[,2]!="*",]
Xn <- as.numeric(Cala_R1R2_seqMatching[,4])
Cala_R1R2_seqMatching <- as.data.frame(Cala_R1R2_seqMatching[,-4])
Cala_R1R2_seqMatching <- data.frame(Cala_R1R2_seqMatching,Xn)
names(Cala_R1R2_seqMatching) <- c("locus","genome","sequence","length")
Cala_male_R1R2_seqMatching <- Cala_male_R1R2_seqs[Cala_male_R1R2_seqs[,2]!="*",]
Xn <- as.numeric(Cala_male_R1R2_seqMatching[,4])
Cala_male_R1R2_seqMatching <- as.data.frame(Cala_male_R1R2_seqMatching[,-4])
Cala_male_R1R2_seqMatching <- data.frame(Cala_male_R1R2_seqMatching,Xn)
names(Cala_male_R1R2_seqMatching) <- c("locus","genome","sequence","length")
dim(Brid_R1R2_seqMatching)[1]/dim(Brid_R1R2_seqs)[1] # >98% (of matches kept; not of loci)
dim(Cala_R1R2_seqMatching)[1]/dim(Cala_R1R2_seqs)[1] # >99% (of matches kept; not of loci)
dim(Cala_male_R1R2_seqMatching)[1]/dim(Cala_male_R1R2_seqs)[1] # >99% (of matches kept; not of loci)
# remove loci that map to more than one contig
# first, remove duplicate rows where a locus maps to the same contig twice
Brid_R1R2_seqMatching_nodups <- Brid_R1R2_seqMatching[!duplicated(Brid_R1R2_seqMatching[,1:2]),]
dim(Brid_R1R2_seqMatching_nodups)[1]/dim(Brid_R1R2_seqMatching)[1] # got rid of 62% of rows
# then, remove rows with duplicate locus names
# (because now, duplicate locus names occur only when a locus mapped to more than one contig)
Brid_R1R2_seqMatching_nodupsnodups <- Brid_R1R2_seqMatching_nodups[!(duplicated(Brid_R1R2_seqMatching_nodups[,1]) | duplicated(Brid_R1R2_seqMatching_nodups[,1], fromLast = TRUE)), ]
dim(Brid_R1R2_seqMatching_nodupsnodups)[1]/dim(Brid_R1R2_seqMatching_nodups)[1] # got rid of ~15% of loci
# remove loci that map to more than one contig
# first, remove duplicate rows where a locus maps to the same contig twice
Cala_R1R2_seqMatching_nodups <- Cala_R1R2_seqMatching[!duplicated(Cala_R1R2_seqMatching[,1:2]),]
dim(Cala_R1R2_seqMatching_nodups)[1]/dim(Cala_R1R2_seqMatching)[1] # got rid of 58% of rows
# then, remove rows with duplicate locus names
# (because now, duplicate locus names occur only when a locus mapped to more than one contig)
Cala_R1R2_seqMatching_nodupsnodups <- Cala_R1R2_seqMatching_nodups[!(duplicated(Cala_R1R2_seqMatching_nodups[,1]) | duplicated(Cala_R1R2_seqMatching_nodups[,1], fromLast = TRUE)), ]
dim(Cala_R1R2_seqMatching_nodupsnodups)[1]/dim(Cala_R1R2_seqMatching_nodups)[1] # got rid of ~1% of loci
# remove loci that map to more than one contig
# first, remove duplicate rows where a locus maps to the same contig twice
Cala_male_R1R2_seqMatching_nodups <- Cala_male_R1R2_seqMatching[!duplicated(Cala_male_R1R2_seqMatching[,1:2]),]
dim(Cala_male_R1R2_seqMatching_nodups)[1]/dim(Cala_male_R1R2_seqMatching)[1] # got rid of 58% of rows
# then, remove rows with duplicate locus names
# (because now, duplicate locus names occur only when a locus mapped to more than one contig)
Cala_male_R1R2_seqMatching_nodupsnodups <- Cala_male_R1R2_seqMatching_nodups[!(duplicated(Cala_male_R1R2_seqMatching_nodups[,1]) | duplicated(Cala_male_R1R2_seqMatching_nodups[,1], fromLast = TRUE)), ]
dim(Cala_male_R1R2_seqMatching_nodupsnodups)[1]/dim(Cala_male_R1R2_seqMatching_nodups)[1] # got rid of ~1% of loci
# get number of unique contigs matched
numUniqContigs_Brid <- length(unique(Brid_R1R2_seqMatching_nodupsnodups$genome)) #766
UniqContigs_Brid <- unique(Brid_R1R2_seqMatching_nodupsnodups$genome)
# plot all
barplot(table(Brid_R1R2_seqMatching_nodupsnodups$genome))
# plot only chromosomes
barplot(table(as.character(Brid_R1R2_seqMatching_nodupsnodups$genome[grep("NC_", Brid_R1R2_seqMatching_nodupsnodups$genome)])))
numUniqContigs_Cala <- length(unique(Cala_R1R2_seqMatching_nodupsnodups$genome)) #182
UniqContigs_Cala <- unique(Cala_R1R2_seqMatching_nodupsnodups$genome)
# plot all
barplot(table(Cala_R1R2_seqMatching_nodupsnodups$genome))
# plot only chromosomes
barplot(table(as.character(Cala_R1R2_seqMatching_nodupsnodups$genome[grep("NC_", Cala_R1R2_seqMatching_nodupsnodups$genome)])))
# how many map to x chromosome?
length(Cala_R1R2_seqMatching_nodupsnodups$genome[Cala_R1R2_seqMatching_nodupsnodups$genome=="NC_006621.3"])
# how many map to a chromosome?
length(Cala_male_R1R2_seqMatching_nodupsnodups$locus[grep("CM", Cala_male_R1R2_seqMatching_nodupsnodups$genome)])
numUniqContigs_Cala_male <- length(unique(Cala_male_R1R2_seqMatching_nodupsnodups$genome)) #182
UniqContigs_Cala_male <- unique(Cala_male_R1R2_seqMatching_nodupsnodups$genome)
# plot all
barplot(table(Cala_male_R1R2_seqMatching_nodupsnodups$genome))
# plot only chromosomes
barplot(table(as.character(Cala_male_R1R2_seqMatching_nodupsnodups$genome[grep("CM", Cala_male_R1R2_seqMatching_nodupsnodups$genome)])))
# how many map to x chromosome?
length(Cala_male_R1R2_seqMatching_nodupsnodups$genome[Cala_male_R1R2_seqMatching_nodupsnodups$genome=="CM016469.1"]) # 873
# how many map to y chromosome?
length(Cala_male_R1R2_seqMatching_nodupsnodups$genome[Cala_male_R1R2_seqMatching_nodupsnodups$genome=="CM016470.1"]) # 49
# export subset of loci for Cala
Cala_X_Chrom <- as.character(Cala_male_R1R2_seqMatching_nodupsnodups$locus[grep("CM016469.1", Cala_male_R1R2_seqMatching_nodupsnodups$genome)])
Cala_Y_Chrom <- as.character(Cala_male_R1R2_seqMatching_nodupsnodups$locus[grep("CM016470.1", Cala_male_R1R2_seqMatching_nodupsnodups$genome)])
Cala_All_Chrom <- as.character(Cala_male_R1R2_seqMatching_nodupsnodups$locus[grep("CM", Cala_male_R1R2_seqMatching_nodupsnodups$genome)])
Cala_Non_Sex_Chrom <- setdiff(Cala_All_Chrom, c(Cala_X_Chrom,Cala_Y_Chrom))
Cala_Non_Sex_Chrom_Subset <- sample(Cala_Non_Sex_Chrom, size = 20000 - length(c(Cala_X_Chrom,Cala_Y_Chrom)), replace = FALSE)
Cala_Subset <- c(Cala_X_Chrom, Cala_Y_Chrom, Cala_Non_Sex_Chrom_Subset)
length(Cala_Subset)
# export subset of loci for Brid
Brid_All_Chrom <- Brid_R1R2_seqMatching_nodupsnodups$locus[grep("NC_", Brid_R1R2_seqMatching_nodupsnodups$genome)]
# Brid_Subset <- sample(Brid_All_Chrom, size = 20000, replace = FALSE) # too few
# write loci kept
write.table(Cala_Subset, "Cala_20K_Loci.txt",
row.names = FALSE, col.names = FALSE, quote = FALSE)
write.table(Brid_All_Chrom, "Brid_15K_Loci.txt",
row.names = FALSE, col.names = FALSE, quote = FALSE)
# from: https://www.ncbi.nlm.nih.gov/books/NBK21091/table/ch18.T.refseq_accession_numbers_and_mole/
# "NC" = Complete genomic molecule, usually reference assembly
# "NW" = Contig or scaffold, primarily WGSa
pdf(file = "3RAD_loci_per_chromosome.pdf", height = 12)
par(mfrow = c(3,1), mar = c(7,4,4,2) + 0.1)
barplot(table(as.character(Cala_R1R2_seqMatching_nodupsnodups$genome[grep("NC_", Cala_R1R2_seqMatching_nodupsnodups$genome)])),
col = "cornflowerblue", xlab = "", ylab = "",
main = "coyotes", xaxt = 'n', space = 0, xlim = c(0,40))
axis (side = 1, labels = unique(Cala_R1R2_seqMatching_nodupsnodups$genome[grep("NC_", Cala_R1R2_seqMatching_nodupsnodups$genome)]),
las = 2, at = 0.5+c(0:c(length(unique(Cala_R1R2_seqMatching_nodupsnodups$genome[grep("NC_", Cala_R1R2_seqMatching_nodupsnodups$genome)]))-1)),
cex.axis = 0.5)
mtext(side = 1, text = "Chromosome", line = 5)
mtext(side = 2, text = "Number of Loci", line = 2.5)
barplot(table(as.character(Cala_male_R1R2_seqMatching_nodupsnodups$genome[grep("CM", Cala_male_R1R2_seqMatching_nodupsnodups$genome)])),
col = "cornflowerblue", xlab = "", ylab = "",
main = "coyotes", xaxt = 'n', space = 0)
axis (side = 1, labels = unique(Cala_male_R1R2_seqMatching_nodupsnodups$genome[grep("CM", Cala_male_R1R2_seqMatching_nodupsnodups$genome)]),
las = 2, at = 0.5+c(0:c(length(unique(Cala_male_R1R2_seqMatching_nodupsnodups$genome[grep("CM", Cala_male_R1R2_seqMatching_nodupsnodups$genome)]))-1)),
cex.axis = 0.5)
mtext(side = 1, text = "Chromosome", line = 5)
mtext(side = 2, text = "Number of Loci", line = 2.5)
barplot(table(as.character(Brid_R1R2_seqMatching_nodupsnodups$genome[grep("NC_", Brid_R1R2_seqMatching_nodupsnodups$genome)])),
col = "orange", xlab = "", ylab = "",
main = "pygmy rabbits", xaxt = 'n', space = 0)
axis (side = 1, labels = unique(Brid_R1R2_seqMatching_nodupsnodups$genome[grep("NC_", Brid_R1R2_seqMatching_nodupsnodups$genome)]),
las = 2, at = 0.5+c(0:c(length(unique(Brid_R1R2_seqMatching_nodupsnodups$genome[grep("NC_", Brid_R1R2_seqMatching_nodupsnodups$genome)]))-1)),
cex.axis = 0.5)
mtext(side = 1, text = "Chromosome", line = 5)
mtext(side = 2, text = "Number of Loci", line = 2.5)
dev.off()
pdf(file = "3RAD_loci_per_contig.pdf")
par(mfrow = c(2,1))
barplot(table(as.character(Cala_R1R2_seqMatching_nodupsnodups$genome)),
col = "cornflowerblue", xlab = "", ylab = "",
main = "coyotes", xaxt = 'n', space = 0)
axis (side = 1, labels = c("",""),
las = 2, at = 0.5+c(0,c(length(unique(Cala_R1R2_seqMatching_nodupsnodups$genome[grep("NC_", Cala_R1R2_seqMatching_nodupsnodups$genome)]))-1)),
cex.axis = 0.5, tick = TRUE)
axis (side = 1, labels = c("chrom."),
las = 1, at = mean(c(0,c(length(unique(Cala_R1R2_seqMatching_nodupsnodups$genome[grep("NC_", Cala_R1R2_seqMatching_nodupsnodups$genome)]))-1))),
cex.axis = 1, tick = FALSE)
mtext(side = 1, text = "Contig/Scaffold", line = 2)
mtext(side = 2, text = "Number of Loci", line = 2.5)
barplot(table(as.character(Brid_R1R2_seqMatching_nodupsnodups$genome)),
col = "orange", xlab = "", ylab = "",
main = "pygmy rabbits", xaxt = 'n', space = 0)
axis (side = 1, labels = c("",""),
las = 2, at = 0.5+c(0,c(length(unique(Brid_R1R2_seqMatching_nodupsnodups$genome[grep("NC_", Brid_R1R2_seqMatching_nodupsnodups$genome)]))-1)),
cex.axis = 0.5, tick = TRUE)
axis (side = 1, labels = c("chrom."),
las = 1, at = mean(c(0,c(length(unique(Brid_R1R2_seqMatching_nodupsnodups$genome[grep("NC_", Brid_R1R2_seqMatching_nodupsnodups$genome)]))-1))),
cex.axis = 1, tick = FALSE)
mtext(side = 1, text = "Contig/Scaffold", line = 2)
mtext(side = 2, text = "Number of Loci", line = 2.5)
dev.off() |
a70bf11cc1cfa4605422fc4138afb65b91cfd953 | 6fd5933e6ebe6240d05d37d45bb12f1e6ce7dc24 | /scripts/make_node_table.R | 77c45628686bbc146e72b16b2679f58e709a2ea8 | [] | no_license | mjfritz/TilS_RNAseq | cf3c637dd7e17901e8bd77ab41ebee1395cfe849 | 3bec75757ce3f49070c518d6d6659ea0810f8bf4 | refs/heads/master | 2022-11-10T05:21:53.249176 | 2020-06-17T21:19:27 | 2020-06-17T21:19:27 | 271,975,274 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 608 | r | make_node_table.R | library(igraph)
make_node_table<-function(edgetable){
#make new nodes table from edges
nodes<-unique(c(edgetable$source,edgetable$target))
nodeIDs<-c(1:(length(nodes)))
nodetable<-data.frame("label"=nodes,"id"=nodeIDs)
#add network stats
mynet <- graph_from_edgelist(as.matrix(edgetable[,c('source','target')]))
nodetable['Degree'] <- degree(mynet, v=nodetable$label)
nodetable['Betweenness'] <- betweenness(mynet, v=nodetable$label)
eigencen <- eigen_centrality(mynet)$vector
nodetable['Eigencentrality'] <- eigencen[match(nodetable$label, names(eigencen))]
return(nodetable)
}
|
ca56c8fea8eb2505f4d17c258d2d0c6006eeb227 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/QRM/examples/Credit.Rd.R | 1219713eb6f571711890ce2e5d439aa7b3bbfedf | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,808 | r | Credit.Rd.R | library(QRM)
### Name: Credit
### Title: Credit Risk Modelling
### Aliases: Credit cal.beta cal.claytonmix cal.probitnorm dclaytonmix
### pclaytonmix rclaytonmix dprobitnorm pprobitnorm rprobitnorm
### rlogitnorm rtcopulamix fit.binomial fit.binomialBeta
### fit.binomialLogitnorm fit.binomialProbitnorm momest rbinomial.mixture
### Keywords: models
### ** Examples
## calibrating models
pi.B <- 0.2
pi2.B <- 0.05
probitnorm.pars <- cal.probitnorm(pi.B, pi2.B)
probitnorm.pars
beta.pars <- cal.beta(pi.B, pi2.B)
beta.pars
claytonmix.pars <- cal.claytonmix(pi.B, pi2.B)
claytonmix.pars
q <- (1:1000) / 1001
q <- q[q < 0.25]
p.probitnorm <- pprobitnorm(q, probitnorm.pars[1],
probitnorm.pars[2])
p.beta <- pbeta(q, beta.pars[1], beta.pars[2])
p.claytonmix <- pclaytonmix(q, claytonmix.pars[1],
claytonmix.pars[2])
scale <- range((1 - p.probitnorm), (1 - p.beta), (1 - p.claytonmix))
plot(q, (1 - p.probitnorm), type = "l", log = "y", xlab = "q",
ylab = "P(Q > q)",ylim=scale)
lines(q, (1 - p.beta), col = 2)
lines(q, (1 - p.claytonmix), col = 3)
legend("topright", c("Probit-normal", "Beta", "Clayton-Mixture"),
lty=rep(1,3),col = (1:3))
## Clayton Mix
pi.B <- 0.0489603
pi2.B <- 0.003126529
claytonmix.pars <- cal.claytonmix(pi.B, pi2.B)
claytonmix.pars
q <- (1:1000) / 1001
q <- q[q < 0.25]
d.claytonmix <- dclaytonmix(q, claytonmix.pars[1], claytonmix.pars[2])
head(d.claytonmix)
## SP Data
data(spdata.raw)
attach(spdata.raw)
BdefaultRate <- Bdefaults / Bobligors
## Binomial Model
mod1a <- fit.binomial(Bdefaults, Bobligors)
## Binomial Logitnorm Model
mod1b <- fit.binomialLogitnorm(Bdefaults, Bobligors)
## Binomial Probitnorm Model
mod1c <- fit.binomialProbitnorm(Bdefaults, Bobligors)
## Binomial Beta Model
mod1d <- fit.binomialBeta(Bdefaults, Bobligors);
## Moment estimates for default probabilities
momest(Bdefaults, Bobligors)
pi.B <- momest(Bdefaults, Bobligors)[1]
pi2.B <- momest(Bdefaults, Bobligors)[2]
## Probitnorm
probitnorm.pars <- cal.probitnorm(pi.B, pi2.B)
q <- (1:1000)/1001
q <- q[ q < 0.25]
d.probitnorm <- dprobitnorm(q, probitnorm.pars[1], probitnorm.pars[2])
p <- c(0.90,0.95,0.975,0.99,0.995,0.999,0.9999,0.99999,0.999999)
sigma <- 0.2 * 10000 / sqrt(250)
VaR.t4 <- qst(p, df = 4, sd = sigma, scale = TRUE)
VaR.t4
detach(spdata.raw)
## Binomial Mixture Models
pi <- 0.04896
pi2 <- 0.00321
beta.pars <- cal.beta(pi, pi2)
probitnorm.pars <- cal.probitnorm(pi, pi2)
n <- 1000
m <- rep(500, n)
mod2a <- rbinomial.mixture(n, m, "beta", shape1 = beta.pars[1],
shape2 = beta.pars[2])
mod2b <- rbinomial.mixture(n, m, "probitnorm",
mu = probitnorm.pars[1],
sigma = probitnorm.pars[2])
|
c32a1d9357a7cbd68bbbdcdf0416d07f5aadb540 | 5202f628f698231d18bf70fc5fcb148842551398 | /RDataTools/dplyr/tutorial/Joins.R | 9798f180ba799cf7017211046bc45d5a0852d4ac | [] | no_license | statisticallyfit/R | 16d956c64b8abf0f4ef0840e97d13e772f70cbb3 | 06cd723eaf374af1b6846f10abd3440100607671 | refs/heads/master | 2020-12-14T18:11:58.430436 | 2018-11-23T08:01:24 | 2018-11-23T08:01:24 | 39,065,133 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 330 | r | Joins.R | # https://cran.r-project.org/web/packages/dplyr/vignettes/two-table.html
library(dplyr)
library(nycflights13)
# Drop unimportant variables
flightsOriginal <- flights
flights <- flights %>%
select(year:day, hour, origin, dest, tailnum, carrier)
flights %>%
left_join(airlines)
names(flights)
names(flightsOriginal)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.