blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2899c0629e0e0f84786fb88bb152b67d9b2e979b
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/nutshell/examples/consumption.Rd.R
|
5ff28b60b15c1a135e5031c2b18828fffe4b4e03
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 309
|
r
|
consumption.Rd.R
|
library(nutshell)
### Name: consumption
### Title: Per capita US Food Consumption 1980-2005
### Aliases: consumption
### Keywords: datasets
### ** Examples
data(consumption)
library(lattice)
dotplot(
Amount ~ Year | Food,
data=consumption,
aspect="xy",
scales=list(relation="sliced", cex=.4)
)
|
39540eaae3f621003e6c120df78868927f34755a
|
ecd850028010252cd24d7d67aa3ef3b8065bf109
|
/man/Shannon.wmppp.Rd
|
46f0b0532e985663342cc062dc47e2c756fc6c8d
|
[] |
no_license
|
EricMarcon/SpatDiv
|
de98877555e9d92ec7859d4ce18bf5dbb47bf2a1
|
c0346e1105130d18dc8b978a415569f653ae0cf7
|
refs/heads/master
| 2023-05-26T18:42:38.506723
| 2023-05-21T08:42:49
| 2023-05-21T08:42:49
| 111,210,097
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,204
|
rd
|
Shannon.wmppp.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/entropart.R
\name{Shannon.wmppp}
\alias{Shannon.wmppp}
\title{Shannon Entropy of a spatialized community}
\usage{
\method{Shannon}{wmppp}(NorP, Correction = "Best", ..., CheckArguments = TRUE)
}
\arguments{
\item{NorP}{A \link{wmppp.object}, with \code{PointType} values as species names.}
\item{Correction}{A string containing one of the possible corrections: see \link{Shannon}.}
\item{...}{Further arguments passed to \link[entropart:Tsallis]{entropart::Tsallis}, \link[entropart:Diversity]{entropart::Diversity}, \link[entropart:Richness]{entropart::Richness}, \link[entropart:Shannon]{entropart::Shannon} or \link[entropart:Simpson]{entropart::Simpson} (S3 methods for class 'AbdVector' or 'numeric')}
\item{CheckArguments}{If \code{TRUE} (default), the function arguments are verified.
Should be set to \code{FALSE} to save time in simulations for example, when the arguments have been checked elsewhere.}
}
\value{
A named number equal to the calculated entropy. The name is that of the bias correction used.
}
\description{
Calculates the Shannon entropy of a probability vector.
}
\examples{
Shannon(Paracou6)
}
|
2b51b5d9484b4e1974c93370e96473838667f21c
|
7eb6dc6f827d37662b3b980649e2961540ef57c6
|
/testcase.r
|
0337a363421a843d4eb060a2dfb1f48ca8beb359
|
[] |
no_license
|
baile2nm/ProgrammingAssignment2
|
2bbaaa4aa895608f622396e7d36d9031cc8b5651
|
3da666041803413f8b2a27e43fef6548630bc7db
|
refs/heads/master
| 2021-07-29T19:26:25.515814
| 2021-07-19T06:51:54
| 2021-07-21T01:36:17
| 25,451,327
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 393
|
r
|
testcase.r
|
## Simple test case for functions in cachematrix.r
source('cachematrix.r')
## Make an invertible cached matrix.
t = makeCacheMatrix()
t$set(matrix(c(2,0,2, 0,1,2, 0,0,2), 3, 3))
## Verify matrix is set up correctly.
t$get()
## For first call, the inverse must be computed.
i = cacheSolve(t)
i %*% t$get()
## On second call, the cached value should be used.
j = cacheSolve(t)
j %*% t$get()
|
1b1c48e732753ad794c042b293757502c90a2533
|
f2c739195928f207ab848063feefd0e69defd2cc
|
/R Studio House Prediction - RZ.R
|
2a2f171623fa652610f00f07efad28cefd855c24
|
[] |
no_license
|
OlegRyzhkov2020/house_price_prediction
|
fdcc2e37a26350de7897112826e01e2adbf217b6
|
491be6f9dc1a5d8116045e3a71f798e8aa91ddf8
|
refs/heads/master
| 2023-01-24T21:03:15.885676
| 2020-12-05T03:32:03
| 2020-12-05T03:32:03
| 310,643,052
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 399
|
r
|
R Studio House Prediction - RZ.R
|
lm(data$SalePrice ~ data$MSZoning + data$Street + data$LotArea)
full.model <- lm(SalePrice ~ ., data=data)
library(MASS)
fwd.step.model <- stepAIC(full.model, direction = "forward", trace = FALSE)
summary(fwd.step.model)
AIC.base = extractAIC(fwd.step.model, k=log(n))
signif(AIC.base, digits=6)
bwd.step.model <- stepAIC(full.model, direction = "backward", trace = FALSE)
summary(bwd.step.model)
|
7e9d49e18517e128e1b9ab9c7126db2f21d9801f
|
87a0961c529b257c89b9e91ca1300068f4918dc3
|
/scripts/analysis.R
|
02d295876fb6c27228a0458873d9375900532aef
|
[] |
no_license
|
g-tierney/polarization_migration_maps
|
a6a41081d457a0636ae1ddbac169bbac9d364b77
|
2c096a3ac4d9b180859d0baeef9404787c706bbe
|
refs/heads/master
| 2021-01-21T21:19:52.528438
| 2018-01-24T18:17:14
| 2018-01-24T18:17:14
| 94,823,253
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,092
|
r
|
analysis.R
|
# Script to load and format election data
# 1) Load data
# 2) Shape into panel of county-year level data
require(openxlsx)
require(dplyr)
require(classInt)
require(maptools) # loads sp library too
require(RColorBrewer) # creates nice color schemes
require(classInt) # finds class intervals for continuous variables
require(rgeos)
source("scripts/functions.R")
#################
### Load Data ###
#################
#load vote data
election_data <- read.xlsx("election_data/county_presidential_2004_2012.xlsx",
sheet = "us-presidential-election-county", colNames = T)
#election_data$fips[election_data$state == "AK"] <- gsub("AKL","2")
election_data[,c("fips")] <- sapply(election_data[,c("fips")],as.numeric) #AK as weird fips codes that become NA
election_data <- filter(election_data,state != "AK")
#check correlation bt county dem vote % accross years (x=2008,y=2004)
election_wide <- merge(election_data[election_data$year == 2008,],election_data[election_data$year == 2004,],by = "fips",all.x = T)
cor(x = election_wide$pct_dem.x, y = election_wide$pct_dem.y)
summary(lm(pct_dem.x ~ pct_dem.y, data = election_wide))
#seems to be a difference here (x=2008,y=2010)
election_wide <- merge(election_data[election_data$year == 2008,],election_data[election_data$year == 2012,],by = "fips",all.x = T)
cor(x = election_wide$pct_dem.x, y = election_wide$pct_dem.y)
summary(lm(pct_dem.x ~ pct_dem.y, data = election_wide))
#load full migration data
full_migration <- stack_migration("migration_data/county-to-county-2008-2012-ins-outs-nets-gross.xlsx")
###########################
### Transition Matrices ###
###########################
quintile_mat <- make_trans_mat(full_migration,election_data,
5,"quantile","flow_from_A_to_B_est",election_year = 2008)
quintile_mat
equal_length_mat <- make_trans_mat(full_migration,election_data,
8,"length","flow_from_A_to_B_est",election_year = 2008)
equal_length_mat
write.csv(equal_length_mat,file = "output/trans_matrix_equal_length.csv")
#########################
### Weighted Averages ###
#########################
merged_mig_votes <- merge_mig_elec(full_migration,election_data,2008,c("fips","pct_dem"))
merged_mig_votes <- filter(merged_mig_votes,pct_dem_A >= 0 & pct_dem_B >= 0 &
!is.na(pct_dem_A) & !is.na(pct_dem_B))
county_state <- select(merged_mig_votes,state_name_A,county_name_A,state_county_code_A,pct_dem_A)
county_state <- unique(county_state)
#average destination democratic vote percentage accross all counties
sum(merged_mig_votes$pct_dem_A*merged_mig_votes$flow_from_B_to_A_est/sum(merged_mig_votes$flow_from_B_to_A_est,na.rm=T),na.rm=T)
sum(merged_mig_votes$pct_dem_B*merged_mig_votes$flow_from_A_to_B_est/sum(merged_mig_votes$flow_from_A_to_B_est,na.rm=T),na.rm=T)
#average difference accross all counties
merged_mig_votes$dif_pct_dem_B_less_A <- merged_mig_votes$pct_dem_B - merged_mig_votes$pct_dem_A
sum(merged_mig_votes$dif_pct_dem_B_less_A*merged_mig_votes$flow_from_A_to_B_est/sum(merged_mig_votes$flow_from_A_to_B_est,na.rm=T),na.rm=T)
#Number moving to a more democratic county
num_more_dem <- sum(merged_mig_votes$flow_from_A_to_B_est[merged_mig_votes$dif_pct_dem_B_less_A > 0])
#number moving to a less democratic county
num_less_dem <- sum(merged_mig_votes$flow_from_A_to_B_est[merged_mig_votes$dif_pct_dem_B_less_A < 0])
c(num_more_dem,num_less_dem)
num_more_dem/(num_less_dem+num_more_dem)*100
#find average destination pct_dem (A to B)
county_state$average_to_pct_dem <- sapply(county_state$state_county_code_A,FUN = average_to_value,
mean_var = "pct_dem_B",data = merged_mig_votes,weight_var = "flow_from_A_to_B_est")
#find average difference between destination and origination pct_dem (A to B)
county_state$dif_dest_home <- sapply(county_state$state_county_code_A,FUN = average_to_value,
mean_var = "dif_pct_dem_B_less_A",data = merged_mig_votes,weight_var = "flow_from_A_to_B_est")
####################
### Plot Results ###
####################
#scatter plot of average destination and best-fit line
#regression
lm <- lm(average_to_pct_dem ~ pct_dem_A, data = county_state)
summary(lm)
slope <- round(lm$coefficients[2],2)
#pdf
pdf("output/scatter_plot.pdf")
plot(county_state$pct_dem_A,county_state$average_to_pct_dem,
main = "Figure 1\nDestination and Origin Democratic Vote Percentages \nCounty Level",
xlab = "Origin Democratic Vote Share",ylab = "Average Destination Democratic Vote Share")
abline(lm, col = "red",lwd = 2)
abline(a=0,b=1, col= "blue",lwd = 2)
legend("bottomright",legend = c(paste0("Regression (slope of ",slope,")"),"Unity (slope of 1)"),col = c("red","blue"),
lwd = 2)
dev.off()
#jpeg
jpeg("output/scatter_plot.jpeg")
plot(county_state$pct_dem_A,county_state$average_to_pct_dem,
main = "Figure 1\nDestination and Origin Democratic Vote Percentages \nCounty Level",
xlab = "Origin Democratic Vote Share",ylab = "Average Destination Democratic Vote Share")
abline(lm, col = "red",lwd = 2)
abline(a=0,b=1, col= "blue",lwd = 2)
legend("bottomright",legend = c(paste0("Regression (slope of ",slope,")"),"Unity (slope of 1)"),col = c("red","blue"),
lwd = 2)
dev.off()
#map results
county.shp <- readShapePoly("county_shapefile/cb_2013_us_county_500k.shp")
county.shp$fips <- as.numeric(paste0(county.shp$STATEFP,county.shp$COUNTYFP))
county.shp <- merge(county.shp,county_state,
by.y=c("state_county_code_A"),
by.x = c("fips"), all.x = T)
#Plot county maps
pdf("output/maps.pdf")
plot_county_results("pct_dem_A","Figure 2\nDemocratic Vote Share of County", fixed_median = 50,
sub_title = "Data are from 2008 presidential election.")
plot_county_results("average_to_pct_dem","Figure 3\nAverage Destination Democratic Vote Share for Out-Migration",fixed_median = 50,
sub_title = "Data are from 2008 presidential election and 2008-2012 migration.")
plot_county_results("dif_dest_home","Figure 4\nDifference Between Destination and Home Democratic Vote Share", fixed_median = 0,
sub_title = "Data are from 2008 presidential election and 2008-2012 migration.")
dev.off()
jpeg("output/figure2.jpeg")
plot_county_results("pct_dem_A","Figure 2\nDemocratic Vote Share of County", fixed_median = 50,
sub_title = "Data are from 2008 presidential election.")
dev.off()
jpeg("output/figure3.jpeg")
plot_county_results("average_to_pct_dem","Figure 3\nAverage Destination Democratic Vote Share for Out-Migration",fixed_median = 50,
sub_title = "Data are from 2008 presidential election and 2008-2012 migration.")
dev.off()
jpeg("output/figure4.jpeg")
plot_county_results("dif_dest_home","Figure 4\nDifference Between Destination and Home Democratic Vote Share", fixed_median = 0,
sub_title = "Data are from 2008 presidential election and 2008-2012 migration.")
dev.off()
|
4cae698d4debdc40366336df29b0b18b4561aed0
|
b2f61fde194bfcb362b2266da124138efd27d867
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Database/Amendola-Ricca-Truszczynski/selection-hard/ctrl.e#1.a#3.E#122.A#48.c#.w#9.s#52.asp/ctrl.e#1.a#3.E#122.A#48.c#.w#9.s#52.asp.R
|
9ec57dcea7921f6464ebaad1d7d2eed162f1d81b
|
[] |
no_license
|
arey0pushpa/dcnf-autarky
|
e95fddba85c035e8b229f5fe9ac540b692a4d5c0
|
a6c9a52236af11d7f7e165a4b25b32c538da1c98
|
refs/heads/master
| 2021-06-09T00:56:32.937250
| 2021-02-19T15:15:23
| 2021-02-19T15:15:23
| 136,440,042
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 91
|
r
|
ctrl.e#1.a#3.E#122.A#48.c#.w#9.s#52.asp.R
|
bdb0a73eb60314dfb69a0983f15e6714 ctrl.e#1.a#3.E#122.A#48.c#.w#9.s#52.asp.qdimacs 8953 26350
|
bcd2c3aedfe3f0d7780c360a18ea5ebc74871fbc
|
1ab7fa0da198f73b1594162cb3e910fd5af42355
|
/Fig1/Data_preprocessing/Data_formatting_fromRaw.R
|
e6339706afc1ae4909e161583d7c20615ffa29d1
|
[] |
no_license
|
mengysun/False_positive_ParTi
|
324d7bb3e7f725fb52f3db0a25f061b9f216f05b
|
fe49d470de08f6c9f14d3a4257c475294733f9f2
|
refs/heads/main
| 2023-02-08T19:16:40.606740
| 2021-01-03T05:08:05
| 2021-01-03T05:08:05
| 301,218,710
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 702
|
r
|
Data_formatting_fromRaw.R
|
library(dplyr)
dat_raw<-read.table(file="deleteome_all_mutants_controls.txt",sep="\t",header=TRUE,quote="",stringsAsFactors=F)
#get the columns contain the deletion line data
dat_raw_1<-dat_raw[,c(1:4455)]
dat_raw_2<-dat_raw_1
MA_columns<-as.character(dat_raw_1[1,])
#extract the M and A values, save into different table
dat_raw_1<-dat_raw_1[c(2:6183),MA_columns=="A"]
dat_raw_2<-dat_raw_2[c(2:6183),MA_columns=="M"]
dat_raw_1<-as.data.frame(t(dat_raw_1))
dat_raw_2<-as.data.frame(t(dat_raw_2))
write.table(dat_raw_1,file="dat_all_exp1.csv",sep=",",col.names = FALSE,row.names = FALSE,quote=FALSE)
write.table(dat_raw_2,file="dat_all_exp2.csv",sep=",",col.names = FALSE,row.names = FALSE,quote=FALSE)
|
269816315258b1b0ed60cdddf77b7717aa05e6f2
|
33b424627a5b4d7f7cc045043682b6e1955bbfc7
|
/snp2sim_analysis/vinaAnalysis/weighted_app.R
|
550860d5b5de60dd687ef8a2ef53cad669d53018
|
[] |
no_license
|
mccoymd/snp2sim
|
e24b198fa192d1eec5e4ed539d6fc88a66e1e236
|
f5a48b46a829cfc7c8de0d7528d9875cee9c7dd5
|
refs/heads/master
| 2021-08-09T01:02:20.340214
| 2021-06-16T20:43:57
| 2021-06-16T20:43:57
| 137,887,506
| 2
| 4
| null | 2019-09-11T13:57:22
| 2018-06-19T12:19:08
|
Python
|
UTF-8
|
R
| false
| false
| 8,128
|
r
|
weighted_app.R
|
library(shiny)
library(ggplot2)
library(highcharter)
library(viridis)
library(plotly)
args = commandArgs(trailingOnly = TRUE)
table <- read.table(args[1], header = TRUE, sep = "")
pdbSet <- unique(table$scaffold)
ligSet <- unique(table$ligand)
varSet <- unique(table$variant)
data <- table
fulldata <- data[data$rank == 1, ]
if(args[2] == "True"){
fulldata$part_weighted_affinity <- fulldata$affinity * fulldata$weight
affinities <- aggregate(part_weighted_affinity ~ ligand + trial + variant, fulldata, sum)
colnames(affinities)[colnames(affinities) == "part_weighted_affinity"] <-
"weighted_affinity"
fulldata <- merge(fulldata, affinities, by = c("ligand", "trial", "variant"))
wtdata.mean <- aggregate(weighted_affinity ~ ligand, wtdata, mean)
wtdata <- aggregate(weighted_affinity ~ ligand, wtdata, sd)
wtdata <- merge(wtdata, wtdata.mean, by="ligand")
colnames(wtdata)[colnames(wtdata) == "affinity.x"] <-
"wtstd"
colnames(wtdata)[colnames(wtdata) == "affinity.y"] <-
"wtmeanAffinity"
fulldata <- merge(fulldata, wtdata, by = "ligand")
colnames(fulldata)[colnames(fulldata) == "weighted_affinity"] <-
"absAffinity"
colnames(fulldata)[colnames(fulldata) == "wtmeanAffinity"] <-
"wtAffinity"
#for distribution plots
fulldata$trialRelEnergy <- fulldata$absAffinity-fulldata$wtAffinity
distdata <- fulldata
error <- aggregate(absAffinity ~ ligand + variant, fulldata, sd)
colnames(error)[colnames(error) == "absAffinity"] <- "std"
fulldata <- merge(fulldata, error, by = c("ligand", "variant"))
meanval <- aggregate(absAffinity ~ ligand + variant, fulldata, mean)
colnames(meanval)[colnames(meanval) == "absAffinity"] <- "meanAffinity"
fulldata <- merge(fulldata, meanval, by = c("ligand", "variant"))
fulldata <- fulldata[!duplicated(fulldata[,c("ligand", "variant", "meanAffinity")]),]
fulldata$relEnergy <- fulldata$meanAffinity-fulldata$wtAffinity
fulldata$varStd <- fulldata$std
fulldata$std <- fulldata$wtstd +fulldata$std
fulldata$perChange <- (fulldata$relEnergy / abs(fulldata$wtAffinity)) * 100
fulldata$perSD <- (fulldata$std / abs(fulldata$wtAffinity)) * 100
fulldata <- subset(fulldata, select = -c(rank, rmsd_ub, rmsd_lb, absAffinity, scaffold, trial))
} else {
fulldata$weighted_affinity <- fulldata$affinity * fulldata$weight
wtdata <- subset(fulldata, fulldata$variant == "wt")
fulldata <- fulldata[fulldata$variant != "wt",]
#wtdata <- aggregate(affinity ~ ligand, wtdata, min)
wtdata <- aggregate(weighted_affinity ~ ligand, wtdata, sum)
colnames(wtdata)[colnames(wtdata) == "weighted_affinity"] <-
"wtAffinity"
weighted_avgs <- aggregate(weighted_affinity ~ ligand + variant, fulldata, sum)
fulldata <- subset(fulldata, select = -c(rank, rmsd_ub, rmsd_lb, weighted_affinity, scaffold, affinity))
fulldata <- merge(fulldata, wtdata, by = "ligand")
fulldata <- merge(fulldata, weighted_avgs, by = c("ligand","variant"))
fulldata <- unique(fulldata)
fulldata$relEnergy <- fulldata$weighted_affinity - fulldata$wtAffinity
fulldata$perChange <-
(fulldata$relEnergy / abs(fulldata$wtAffinity)) * 100
}
# Define UI for application that draws a histogram
ui <- fluidPage(
# Application title
titlePanel("snp2sim Drug Binding Analysis"),
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
selectInput('x', label = "X Data",
choices = c("ligand", "variant"),
selected = "variant"
),
selectInput('y', label = "Y Data",
choices = c("relEnergy", "perChange"),
selected = "relEnergy"
),
selectInput('fill', label = "Fill",
choices = c("None", "ligand", "variant", "library"),
selected = "ligand"
),
selectInput('facety', label = "Group",
choices = c("None", "ligand", "variant", "library"),
selected = ""
),
selectInput('bargene', "Variant:",
choices = unique(fulldata$variant),
multiple=TRUE,
selectize=TRUE,
selected =unique(fulldata$variant)),
selectInput('barlig', "Ligand:",
choices = unique(fulldata$ligand),
multiple=TRUE,
selectize=TRUE,
selected =unique(fulldata$ligand)),
uiOutput('numgraphsrow'),
conditionalPanel(
"args[2] == True",
checkboxInput('errorBars', label = "Show error bars?",
value = FALSE)
),
downloadButton('download', 'Download figure')
),
# Show a plot of the generated distribution
mainPanel(
tabsetPanel(
id = "drugbinding_plots",
tabPanel("Barchart",plotlyOutput("plot")),
tabPanel("Heatmap",highchartOutput("heat"))
)
)
)
)
# Define server logic required to draw a histogram
server <- function(input, output) {
plots <- reactiveValues()
output$numgraphsrow <- renderUI({
if(input$facety != "None") {
if (input$facety == "ligand"){
sliderInput('numgraphsrow', label = "Number of charts per row",
min = 1, max = length(input$barlig),
value = length(unique(fulldata[,input$facety])),
step = 1)
} else if(input$facety == "variant"){
sliderInput('numgraphsrow', label = "Number of charts per row",
min = 1, max = length(input$bargene),
value = length(unique(fulldata[,input$facety])),
step = 1)
} else {
sliderInput('numgraphsrow', label = "Number of charts per row",
min = 1, max = length(unique(fulldata[,input$facety])),
value = length(unique(fulldata[,input$facety])),
step = 1)
}
}
})
output$heat <- renderHighchart({
part <- subset(fulldata, fulldata$variant %in% input$bargene & fulldata$ligand %in% input$barlig)
minval <- min(part$perChange)
maxval <- max(part$perChange)
maxthresh <- max(c(abs(minval), abs(maxval)))
hchart(part, "heatmap", hcaes(x = variant, y = ligand, value = perChange)) %>%
hc_colorAxis(stops = color_stops(4, inferno(4)), min = -1*maxthresh, max = maxthresh) %>%
hc_title(text = paste0("Binding energy of small molecules in ",unique(fulldata$protein)[1]," variants"))
})
output$plot <- renderPlotly({
part <- subset(fulldata, fulldata$variant %in% input$bargene & fulldata$ligand %in% input$barlig)
if (input$fill != "None"){
plot <- ggplot(part, aes_string(input$x, input$y, fill = input$fill)) +
theme_light() +
theme(text=element_text(size=15)) +
geom_bar(stat="identity",position=position_dodge()) +
theme(axis.text.x = element_text(angle = 90))
}
else{
plot <- ggplot(part, aes_string(input$x, input$y)) +
theme_light() +
theme(text=element_text(size=15)) +
geom_bar(stat="identity",position=position_dodge()) +
labs(y="Binding Affinity Relative to WT (kcal/mol)") +
theme(axis.text.x = element_text(angle = 90))
}
if (input$facety != "None"){
plot <- plot + facet_wrap(. ~ get(input$facety), ncol = input$numgraphsrow, scales="free_x")
}
if (input$y == "relEnergy") {
plot = plot + labs(y="Binding Energy Relative to WT (kcal/mol)")
}
else if (input$y == "perChange") {
plot = plot + labs(y="Percent Change in Binding Energy (%)")
}
if (args[2] == "True") {
if(input$errorBars){
plot <- plot + geom_errorbar(aes(ymin=relEnergy-std_dev, ymax=relEnergy+std_dev), position = position_dodge())
}
}
plots$plot <- plot
plots$plot
})
output$download <- downloadHandler(
filename = function() {"figure.png"},
content = function(file) {
ggsave(file, plot = plots$plot, device = "png")
}
)
}
# Run the application
runApp(list(ui = ui, server = server), launch.browser = TRUE)
|
47568b59581d7afde944b6979c82895dbd013aa1
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/RSDA/R/centers.interval.R
|
3e2037d98e3972fbbfcd42aaf9528b1b70a8683a
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 666
|
r
|
centers.interval.R
|
centers.interval <-
function(sym.data) {
idn <- all(sym.data$sym.var.types == sym.data$sym.var.types[1])
if (idn == FALSE)
stop("All variables have to be of the same type")
if ((sym.data$sym.var.types[1] != "$I"))
stop("Variables have to be continuos or Interval")
else
nn <- sym.data$N
mm <- sym.data$M
centers <- matrix(0, nn, mm)
centers <- as.data.frame(centers)
rownames(centers) <- sym.data$sym.obj.names
colnames(centers) <- sym.data$sym.var.names
for (i in 1:nn)
for (j in 1:mm)
centers[i, j] <- (sym.var(sym.data,j)$var.data.vector[i,1] + sym.var(sym.data, j)$var.data.vector[i, 2])/2
return(centers)
}
|
3aa0977ae8fb8e18ceca84729be90f0c0e079f19
|
88147e2bddc2add4f51b507dbf1eed86de849495
|
/bivcoptests/kldiv-test.r
|
2829e675b8e386c5c885d5c716f8988d91c5bc62
|
[] |
no_license
|
hoanguc3m/CopulaModel
|
39906379ed88d56f579851d45f157733c42bf926
|
1522b9a6476c5550736f652f902c61b3ac5e8fd3
|
refs/heads/master
| 2020-03-27T14:29:49.345584
| 2019-11-03T22:18:14
| 2019-11-03T22:18:14
| 146,665,628
| 2
| 0
| null | 2018-08-29T22:25:56
| 2018-08-29T22:25:56
| null |
UTF-8
|
R
| false
| false
| 558
|
r
|
kldiv-test.r
|
# KL divergence dcop1 vs dcop2
library(CopulaModel)
library(cubature)
tau=0.5
th.bvn=bvn.b2cpar(tau)
th.pla=depmeas2cpar(tau,"tau","plackett") # 11.40486
th.frk=depmeas2cpar(tau,"tau","frank") # 5.736287
cat("\nBVN vs Plackett, tau=0.5\n")
KLcopvsbvn(rh=th.bvn,dcop2=dpla,param2=th.pla,copname2="Plackett",UB=7,iprint=T)
cat("\nBVN vs Frank, tau=0.5\n")
KLcopvsbvn(rh=th.bvn,dcop2=dfrk,param2=th.frk,copname2="Frank",UB=7,iprint=T)
cat("\nPlackett vs Frank, tau=0.5\n")
KLcopvscop("Plackett",th.pla,dcop1=dpla,"Frank",th.frk,dcop2=dfrk,UB=7,iprint=T)
|
b871e634068fd9b6fd27f4bd6845eaf6488087da
|
7ff4354b5ce73a35811f2d53b4e9dc1a1bae6f89
|
/man/imports.most.Rd
|
85929e32e1c471871a0b69cb1c9fde1b59533deb
|
[] |
no_license
|
yikeshu0611/packagomatrics
|
e3183aace895f6ea868911b0a54b6ddf96be445b
|
6057f71760fb9b62d25fd8126d09e927f121dc38
|
refs/heads/master
| 2022-11-22T06:19:08.517311
| 2020-07-23T02:52:14
| 2020-07-23T02:52:14
| 281,833,186
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,279
|
rd
|
imports.most.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/imports.most.R
\name{imports.most}
\alias{imports.most}
\title{Mostly imported packages}
\usage{
imports.most(imports, n = 10, year = FALSE)
}
\arguments{
\item{imports}{results of function imports()}
\item{n}{the most frequency number}
\item{year}{logical, default is FALSE. Whether to include years}
}
\value{
a data.frame contains package, frequency or year
}
\description{
Mostly imported packages
}
\examples{
\donttest{
d <- loadData()
i <- imports(d)
imt <- imports.most(imports = i,20)
library(ggplot2)
ggplot(imt,aes(Imports,Freq))+
geom_col()+
coord_flip()+
theme(axis.text = element_text(size=16))
imt <- imports.most(i,10,T)
imt <- imt[imt$year >= 2011 & imt$year <= 2020,]
# the latest 10 years
library(ggplot2)
library(tidytext)
ggplot(imt,aes(reorder_within(Imports,Freq,year),Freq))+
geom_col()+
scale_x_reordered() +
facet_wrap(~year,scales="free")+
coord_flip()+
theme(
axis.text = element_text(size=16),
strip.text.x = element_text(size = 18,
colour = "red")
)+
xlab(NULL)+ylab(NULL)
}
}
|
0cdff8e9297b4e31849ef53615528a6d6e4bd619
|
b72c5fef88aad2e52c8f1221f0fa6d5fc165748c
|
/R/AIreport.R
|
0a57fad99562284876365a7cabebacc200d7ea5d
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
JVAdams/GLFC
|
62c6915efb6f5c4193ef0d580eb68ea8e8838692
|
82ea12b3d7e40465207728439d34f77068980c78
|
refs/heads/master
| 2023-01-05T07:51:52.885062
| 2022-12-20T21:45:29
| 2022-12-20T21:45:29
| 32,460,710
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 14,687
|
r
|
AIreport.R
|
#' Adult Index Report
#'
#' Create a draft template-style report of the Adult Index estimates of
#' sea lamprey in the Great Lakes.
#' @param streamPEs
#' A data frame of "complete" stream mark-recapture estimates
#' (meaning all contributions to the Adult Indices have
#' already been calculated). The data frame must
#' include: \code{year},
#' \code{lake}, lake-stream ID \code{lscode}
#' (see details), population estimate
#' \code{PEmr}, coefficient of variation \code{CVmr}
#' (100\% * sqrt(variance(PEmr)) / PEmr), \code{index}, a logical
#' identifying the index streams; \code{maintain} a logical identifying the
#' streams that will continue to have ongoing trapping even if not part of
#' the Adult Index; \code{indexContrib} a numeric, the stream population
#' estimate that will be used in the Adult Index (NA for new);
#' \code{indexContribCV} a numeric, the stream CV that will be used to
#' generate 95\% confidence intervals for the Adult Index (NA for new); and
#' \code{complete} a logical identifying streams and years for which the
#' Adult Index has already been estimated (should be all TRUE).
#' @param lakeIPEs
#' A data frame of annual lake-wide Adult Indices with 8 columns:
#' \code{lake}, \code{year}, the Adult Index \code{index}, its associated
#' lower and upper 95\% confidence interval \code{ilo} and \code{ihi},
#' and the corresponding expansion to a supposed population estimate,
#' \code{pe}, \code{pelo} and \code{pehi}.
#' The data frame may contain variables other than those required.
#' @param targets
#' A data frame with the calculated targets for the Adult Index and
#' expanded PE of each Great Lake, with 5 rows (Superior, Michigan, Huron,
#' Erie, Ontario) and 2 columns: \code{lake} and
#' \code{targInd}, typically the output from \code{\link{AItarget}}.
#' @param csvDir
#' A character scalar identifying the path where the rtf file will be
#' stored, e.g., \code{csvDir = "C:\\temp\\mydir"}.
#' @param outFile
#' Name of the output rtf file, default NULL, in which case the file will be
#' named "YYYY Adult Index - draft report.doc" where YYYY
#' is the latest year represented in \code{streamDat}.
#' @param proptargets
#' A data frame with any proposed targets for the Adult Index,
#' with 2 columns \code{lake} and \code{targInd}, default NULL.
#' May have from zero to several rows for a single Great Lake.
#' @details
#' Lake-stream IDs are combination of lake ID and stream ID
#' e.g., 1.064 = lake ID + (stream ID)/1000.
#' @return
#' A draft report document as an rtf file (with the file type *.doc,
#' so that MS Word will open it automatically).
#' @importFrom tidyr complete
#' @import maps
#' @export
#'
AIreport <- function(streamPEs, lakeIPEs, targets, csvDir, outFile=NULL,
proptargets=NULL) {
# library(GLFC)
# library(maps)
# library(tidyr)
# library(dplyr)
# streamPEs=streamPE
# lakeIPEs=lakeIndPE
# targets=oldtarg
# csvDir=DIRECTORY
# outFile="TestReport"
# proptargets=NULL
YEAR <- max(streamPEs$year)
if (is.null(outFile)) {
outFile <- paste(YEAR, "Adult Index - draft report.doc")
}
# calculate three-year running mean (moving average) for adult index
look <- tidyr::complete(lakeIPEs, lake, year) %>%
group_by(lake) %>%
arrange(lake, year) %>%
mutate(
lag1=lag(index, 1),
lag2=lag(index, 2),
nrun= (!is.na(index)) + (!is.na(lag1)) + (!is.na(lag2))
)
fullrun3 <- rowMeans(look[, c("index", "lag1", "lag2")], na.rm=TRUE)
look$index.3mn <- ifelse(look$nrun<2, NA, fullrun3)
lakeIPEs <- look
# create nice looking table with latest year of estimates and targets
targ2 <- lakeIPEs %>%
filter(year==YEAR) %>%
full_join(targets) %>%
mutate(
Lake = Lakenames[lake],
Status = ifelse(index.3mn <= targInd, "Less than", "Greater than")
) %>%
ungroup() %>%
select(Lake, index, index.3mn, Target=targInd, Status)
TAB.targs <- prettytable(as.data.frame(targ2), 0)
# plot lake-wide totals w/ confidence intervals on different scales
FIG.lakeCI <- function(lakeids=1:5, k=index2pe) {
with(lakeIPEs, {
par(mfrow=c(3, 2), mar=c(3, 3, 2, 3), oma=c(2, 2, 0, 2), cex=1)
for(i in seq(lakeids)) {
j <- lakeids[i]
sel <- lake==j
mymax <- max(ihi[sel & year>=1985], na.rm=TRUE)/1000
plot(1, 1, type="n", xlim=range(year), ylim=c(0, mymax),
xlab="", ylab="", main=Lakenames[i], las=1)
abline(h=targets$targInd[j]/1000, lty=2)
if(!is.null(proptargets)) {
abline(h=proptargets$targInd[proptargets$lake==j]/1000,
col="darkgray", lwd=2, lty=2)
}
lines(year[sel], index.3mn[sel]/1000, col="#fb8072", lwd=2)
arrows(year[sel], ilo[sel]/1000, year[sel], ihi[sel]/1000, length=0.03,
angle=90, code=3, col="darkgray")
points(year[sel], index[sel]/1000)
p4 <- pretty(k[i]*c(0, mymax))
axis(4, at=p4/k[i], labels=p4, las=1)
if (i==1) {
frame()
}
}
mtext("Year", outer=TRUE, side=1, cex=1.4)
mtext("Adult index (thousands)", outer=TRUE, side=2, cex=1.4)
mtext("Lake-wide adult abundance (thousands)", outer=TRUE, side=4, cex=1.4)
})
}
streamPEs$categ <- "Non-index"
streamPEs$categ[with(streamPEs, index & !is.na(PEmr))] <-
"Index w/ mark-recap"
streamPEs$categ[with(streamPEs, index & is.na(PEmr))] <-
"Index w/o mark-recap"
streamPEs$cle <- with(streamPEs,
paste(casefold(substring(country, 1, 2), upper=TRUE),
Lakeabbs[lake], estr, sep=" - "))
streamPEs$cleplus <- with(streamPEs, paste(
paste(cle, strname, sep=" "),
paste(" ", format(round(indexContrib), big.mark=",")), sep="\n"))
# df=streamPEs[streamPEs$year==YEAR, ]
# group="categ"
# var="indexContrib"
# lab="cleplus"
# sug=c("Index w/ mark-recap", "Index w/o mark-recap", "Non-index")
# cols=blindcolz[1+(1:length(sug))]
# legat="topright"
# leginset=c(0, 0)
# dr=range(sqrt(df[, var]), na.rm=TRUE)
# cr=c(0.04, 0.5)
# ox=-44
# oy=64
FIG.bubble1 <- function(df, group, var, lab, sug,
cols=blindcolz[1:length(sug)], lonR=-c(92.14, 75.97),
latR=c(41.36, 49.02), legat="topright", leginset=c(0, 0),
dr=range(sqrt(df[, var]), na.rm=TRUE), cr=c(0.04, 0.25), ox=-44, oy=64) {
g <- df[, group]
v <- df[, var]
n <- length(g)
xr <- lonR
yr <- latR
xrw <- diff(xr)
yrw <- diff(yr)
bufx <- xrw/40
bufy <- yrw/40
magic <- 20
par(mar=c(0, 0, 0, 0))
maps::map("world", type="n", xlim=xr + c(-1, 1)*bufx,
ylim=yr + c(-magic, 1)*bufy, mar=c(0, 0, 0, 0))
maps::map("lakes", col="cyan", add=TRUE)
pusr <- par("usr")
with(df, {
textx <- rep(NA, dim(df)[1])
textx <- seq(pusr[1], pusr[2],
length=n+2)[-c(1, n+2)][rank(long, ties.method="first")]
for(i in seq_along(sug)) {
sel <- g==sug[i]
if (sum(sel)>0) {
circles(long[sel], lat[sel], sqrt(v)[sel], data.range=dr,
circle.size.range=cr, outx=ox, outy=oy, add=TRUE, fg=cols[i], lwd=3)
text(textx[sel], yr[1] - (magic-1)*bufy, df[sel, lab],
adj=0, srt=90, col=cols[i], cex=0.8)
segments(textx[sel], yr[1] - 2*bufy, long[sel], lat[sel], col=cols[i],
lty=2)
}
}
})
par(xpd=NA)
legend(legat, sug, col=cols, lwd=3, bty="n", inset=leginset, cex=1.4)
}
FIG.bubble2 <- function() {
FIG.bubble1(
df=streamPEs[streamPEs$year==YEAR & streamPEs$categ!="Non-index", ],
group="categ", var="indexContrib", lab="cleplus",
sug=c("Index w/ mark-recap", "Index w/o mark-recap"))
}
### bar plot of individual index stream PEs
outcex <- 1.2
YEARb <- 1995
col7 <- c("#8dd3c7", "#ffffb3", "#bebada", "#fb8072", "#80b1d3", "#fdb462",
"#b3de69")
FIG.bar <- function() {
par(mar=c(2.5, 2.5, 1, 1), mfrow=c(3, 2), yaxs="i", oma=c(1.5, 1.5, 0, 0),
cex=1.2)
for(i in 1:5) {
mystreamdf <- with(streamPEs,
streamPEs[lake==i & index==TRUE & year >= YEARb, ])
p <- with(mystreamdf,
tapply(indexContrib, list(year, substring(strname, 1, 10)), mean))
p <- p[, rev(order(apply(p, 2, median, na.rm=TRUE)))]
yrz <- as.numeric(dimnames(p)[[1]])
pyrz <- pretty(yrz)
a <- barplot(t(p)/1000, las=1, col=col7, axes=FALSE,
names.arg=rep("", dim(p)[1]),
ylim=1.03*c(0, max(apply(p, 1, sum, na.rm=TRUE)))/1000,
xlab="", ylab="", main=Lakenames[i], border=NA)
abline(h=targets$targInd[i]/1000)
if(!is.null(proptargets)) {
abline(h=proptargets$targInd[proptargets$lake==i]/1000, lty=2)
}
axis(1, at=a[match(pyrz, yrz)], pyrz)
axis(2, las=1)
box()
legend("topleft", rev(colnames(p)), fill=rev(col7[1:dim(p)[2]]), cex=0.5,
bty="n", border=NA)
if (i==1) {
frame()
}
}
mtext("Year", outer=TRUE, side=1, cex=outcex)
mtext("Adult Index (thousands)", outer=TRUE, side=2, cex=outcex)
}
# create a file for the draft report
doc <<- startrtf(file=outFile, dir=csvDir)
heading("D R A F T")
heading(paste0(YEAR, " Lake-Wide Adult Sea Lamprey Index"))
para("Authors ...")
para(format(Sys.time(), "%B %d, %Y"))
para("<<< This is a rough draft to be used as a starting point in creating the final report. First, save the document as a *.docx Word file (even though it has a *.doc file extension already, it's really just an *.rtf file). Then, select all text in the document (Ctrl-a) and increase the font size to 12. Finally, delete this paragraph, add author names, edit text and insert/delete page breaks as needed. >>>")
# merge this year and last years' estimates
both <- merge(lakeIPEs[lakeIPEs$year==YEAR-1, ],
lakeIPEs[lakeIPEs$year==YEAR, ],
by="lake", suffixes = c(".last",".this"), all=TRUE)
thyr <- round(both$index.this)
layr <- round(both$index.last)
hier <- with(both, ilo.this > ihi.last)
loer <- with(both, ihi.this < ilo.last)
phrase <- rep("not significantly different from", 5)
delta <- with(both, round(100*abs(index.this - index.last) / index.last))
phrase[is.na(hier)] <- "not comparable to"
phrase[!is.na(hier) & hier] <-
paste0(delta[!is.na(hier) & hier], "% higher than")
phrase[!is.na(hier) & loer] <-
paste0(delta[!is.na(hier) & loer], "% lower than")
abta <- sum(targ2$Status=="Greater than")
beta <- sum(targ2$Status=="Less than")
insert1 <- ""
insert2 <- ""
if (abta>0) {
insert1 <-
paste(targ2$Lake[targ2$Status=="Greater than"], collapse=", ")
}
if (beta>0) {
insert2 <-
paste(targ2$Lake[targ2$Status=="Less than"], collapse=", ")
}
para("The index of adult sea lamprey abundance is estimated annually for each Great Lake. Based on the mean over the last 3 years (", YEAR-2, "-", YEAR, "), lakes ", insert2, " were less than the targets and lakes ", insert1, " were greater than the targets (Table 1, Figure 1). Index targets were determined for each lake as average abundance observed during a 5-year period when wounding rates were at an acceptable level. Adult sea lamprey indices and lake-wide abundances from 1985 to ", YEAR, " are reported in Tables 2 and 3.")
if(!is.null(proptargets)) {
ptl <- proptargets
ptl$targInd <- format(round(ptl$targInd), big.mark=",")
ptl <- split(ptl, ptl$lake)
pttext <- paste0(Lakenames[as.numeric(names(ptl))], ": ",
lapply(ptl, function(df) paste(df$targInd, collapse=", ")), collapse="; ")
para("In addition to the accepted targets, there are also the following proposed targets, ", pttext, ". <<< Explain further. >>>")
}
insert1 <- ""
insert2 <- ""
insert3 <- ""
sel1 <- !is.na(hier) & loer
sel2 <- !is.na(hier) & !hier & !loer
sel3 <- !is.na(hier) & hier
if (sum(sel1)>0) {
insert1 <- paste(Lakenames[both$lake[sel1]], collapse=", ")
}
if (sum(sel2)>0) {
insert2 <- paste(Lakenames[both$lake[sel2]], collapse=", ")
}
if (sum(sel3)>0) {
insert3 <- paste(Lakenames[both$lake[sel3]], collapse=", ")
}
para("Comparing the 95% confidence intervals of the single year ", YEAR, " estimates with those in ", YEAR-1, ", the number of adults significantly decreased in lakes ", insert1, "; remained the same in lakes ", insert2, "; and significantly increased in lakes ", insert3, " (Figure 1).")
para("The contribution from individual streams to the adult index is shown in Figure 2.")
misspe <- sum(with(streamPEs, index==TRUE & year==YEAR &
(is.na(PEmr) | is.na(CVmr))))
allstr <- sum(with(streamPEs, index==TRUE & year==YEAR))
para("The distribution of the ", YEAR, " stream estimates around the Great Lakes is shown in Figure 3. Mark-recapture estimates of adult sea lamprey abundance were available for ", allstr-misspe, " of the ", allstr, " index streams.")
prettyTAB.targs <- TAB.targs
names(prettyTAB.targs) <- c(
"Lake",
paste0(YEAR, "\nindex"),
paste0(YEAR-2, "-", YEAR, "\n mean index"),
"Target",
"Status")
tabl("The judgement of whether a lake is above target is based on the mean adult index over the last 3 years.",
TAB=prettyTAB.targs)
extraphrase <- ""
if(!is.null(proptargets)) {
extraphrase <- " Dashed horizontal lines represent proposed targets."
}
figu("Adult index values for each Great Lake through ", YEAR, ", with 3-year averages shown as red lines. Individual estimates with 95% confidence intervals are shown in gray. Targets are represented by the horizontal lines.", extraphrase,
FIG=FIG.lakeCI, newpage="port")#, w=6.5, h=7.5)
TAB.lakewide1 <- with(lakeIPEs, tapply(index, list(year, lake), mean))
colnames(TAB.lakewide1) <- Lakenames
tabl("Adult Indices, 1985-", YEAR, ".",
TAB=prettytable(TAB.lakewide1, 0))
TAB.lakewide2 <- with(lakeIPEs, tapply(pe, list(year, lake), mean))
colnames(TAB.lakewide2) <- Lakenames
tabl("Lake-wide adult sea lamprey abundances, 1985-", YEAR, ", which are based on the adult index estimates multiplied by lake-specific conversion factors (", paste(names(index2pe), as.numeric(index2pe), collapse=", "), ").",
TAB=prettytable(TAB.lakewide2, -3), newpage="port")
figu("Adult sea lamprey abundance estimates for index streams. Targets are represented by the horizontal lines.", extraphrase,
FIG=FIG.bar, newpage="port", w=6, h=7.5)
figu("Relative size of adult sea lamprey population estimates (PEs) in Great Lakes index streams, ", YEAR, ". Circle size represents size of PE, circle color represents the source of PE.",
FIG=FIG.bubble2, newpage="land", h=5.7)
endrtf()
}
|
8325412c0493a774fe7ec2349be044d7747058f9
|
c17df37f05205cbd428b37fa5b942020e6752e8c
|
/_render.r
|
88134fdbeee2b60b41d0a4c5248ed408c56a6b31
|
[] |
no_license
|
Kan-Pang/2020EconometricsTA
|
624644225021467486cddb4d9d0d1ee506e42ebc
|
e96fc031ac781d3fec1331367ef86304f9160fe6
|
refs/heads/main
| 2023-02-22T14:37:03.278603
| 2021-01-29T08:52:51
| 2021-01-29T08:52:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 780
|
r
|
_render.r
|
#Render
#'
#+ library and wd
library(rmarkdown)
library(bookdown)
Sys.setenv(RSTUDIO_PANDOC = "C:/Users/katoo/AppData/Local/Pandoc")
options(repo = "https://cran.rstudio.com/")
## ---- render
# pdf (rmarkdown)
rmarkdown::render(
input = "II/TAsession_13/handout.rmd",
output_file = "handout.pdf",
output_dir = "II/TAsession_13", #いじらない
clean = TRUE, #いじらない
encoding = "utf8" #いじらない
)
# pdf (bookdown)
bookdown::render_book(
input = "index.rmd",
output_format = "bookdown::pdf_book",
output_dir = ".",
output_file = "Econometrics2TA.pdf",
clean = TRUE,
encoding = "utf8"
)
#' from Rmd to R
knitr::purl(
input = "II/TAsession_13/handout.rmd",
output = "R/script/TASession_13.r",
documentation = 1
)
|
40e1f828f3dae098774270c333b50cf5372a4b8e
|
44803ba65f6fb45e837e40e9a422f0653d36883d
|
/SSMs_Exploration/Mutations_ChisqTEST.R
|
b51af0da38dca917600c3b6399b6247567189985
|
[] |
no_license
|
MMaqueda/MutSig_WGSvsWES
|
b5a265cc20dd1d9c4b4fbd57894c385c4a679f5a
|
bdf92a8eb696a9f6680c887261f6f88e964179fd
|
refs/heads/master
| 2022-09-07T04:03:03.630288
| 2022-07-26T12:02:04
| 2022-07-26T12:02:04
| 141,121,595
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,782
|
r
|
Mutations_ChisqTEST.R
|
#' This script computes a Chi-square test in order to test if the number of mutations in a specific region
#' of the genome is different from the rest (WGS-that specific region). We will compute the test with median
#' number of SSMs per tumor type and then per sample (and determine % samples with significant p-value)
# Load the needed data. CHANGE THE PATH IF FILES ARE STORED IN A DIFFERENT PLACE
load("/project/devel/PCAWG/mmaqueda/Data/Comparison_SSM96_WGS_WES/SSMs_Depletion_Enrichment_RoIs/MutationDensityRatios.RData")
# Test for median values per cancer type ----------------------------------
# The table should include following data (nrow = TumourTypes):
# N samples per tumor type, median SSMs in CDS, median SSMs in Exon,
# median SSMs in WGS-CDS, median SSMs in WGS-Exon,
# pval Chisq - CDS (median)
# pval Chisq - Exon (median)
# adj pval Chisq - CDS (median) - Corrected per number of cancer types
# adj pval Chisq - Exon (median) - Corrected per number of cancer types
Mutation_density_ratios$SSMs_CDS <- as.numeric(Mutation_density_ratios$SSMs_CDS)
Mutation_density_ratios$SSMs_Exon <- as.numeric(Mutation_density_ratios$SSMs_Exon)
Mutation_density_ratios$SSMs_WGSwoCDS <- as.numeric(Mutation_density_ratios$SSMs_WGSwoCDS)
Mutation_density_ratios$SSMs_WGSwoExon <- as.numeric(Mutation_density_ratios$SSMs_WGSwoExon)
stats <- function(x) {median = round(median(x),digits=0)}
Mutations_Chisq <- as.data.frame(matrix(ncol=8,
nrow=length(unique(Mutation_density_ratios$Tumour_Type))))
colnames(Mutations_Chisq) <- c("MedianSSMs_CDS","MedianSSMs_Exon","MedianSSMs_WGSwoCDS","MedianSSMs_WGSwoExon",
"pvalChisq_CDS", "pvalChisq_Exon","adjpvalChisq_CDS","adjpvalChisq_Exon")
for(i in 1:4)
{
# En el df de density ratios están en el mismo orden pero empiezan en la columna 3 (en vez de 1)
Mutations_Chisq[,i] <- as.data.frame(tapply(Mutation_density_ratios[,i+2], Mutation_density_ratios$Tumour_Type, stats))
}
Mutations_Chisq <- cbind(N=plyr::count(Mutation_density_ratios,'Tumour_Type')$freq,Mutations_Chisq)
Mutations_Chisq <- cbind(Tumour_Type = levels(Mutation_density_ratios$Tumour_Type),Mutations_Chisq)
# We are missing the median of non-mutations in each case. We should compute the values for all samples,
# and then compute the median
CDSregion <- 35194324
Exonregion <- 121892536
WGSwoCDSregion <- 2861327131 - CDSregion
WGSwoExonregion <- 2861327131 - Exonregion
NonMuts <- Mutation_density_ratios[,1:2]
NonMuts$CDSnonmuts <- rep(CDSregion,dim(Mutation_density_ratios)[1]) - Mutation_density_ratios[,3]
NonMuts$Exonnonmuts <- rep(Exonregion,dim(Mutation_density_ratios)[1]) - Mutation_density_ratios[,4]
NonMuts$WGSwoCDSnonm <- rep(WGSwoCDSregion,dim(Mutation_density_ratios)[1]) - Mutation_density_ratios[,5]
NonMuts$WGSwoExonnonm <- rep(WGSwoExonregion,dim(Mutation_density_ratios)[1]) - Mutation_density_ratios[,6]
# Median values for these non-muts
NonMuts_Median <- as.data.frame(matrix(ncol=4, nrow=length(unique(NonMuts$Tumour_Type))))
for(i in 1:4)
{NonMuts_Median[,i] <- as.data.frame(tapply(NonMuts[,i+2], NonMuts$Tumour_Type, stats))}
colnames(NonMuts_Median) <- paste0("Median",colnames(NonMuts)[3:dim(NonMuts)[2]])
NonMuts_Median <- cbind(Tumour_Type = levels(NonMuts$Tumour_Type),NonMuts_Median)
# Compute Chisq test for each tumour type based on the median SSMs values
# This is for testing association betwen muts vs non muts with region
for(i in 1:dim(Mutations_Chisq)[1])
{
Mutations_Chisq$pvalChisq_CDS[i] <- chisq.test(matrix(c(Mutations_Chisq$MedianSSMs_CDS[i],
Mutations_Chisq$MedianSSMs_WGSwoCDS[i],
NonMuts_Median$MedianCDSnonmuts[i],
NonMuts_Median$MedianWGSwoCDSnonm[i]),
ncol = 2))$p.value
Mutations_Chisq$pvalChisq_Exon[i] <- chisq.test(matrix(c(Mutations_Chisq$MedianSSMs_Exon[i],
Mutations_Chisq$MedianSSMs_WGSwoExon[i],
NonMuts_Median$MedianExonnonmuts[i],
NonMuts_Median$MedianWGSwoExonnonm[i]),
ncol = 2))$p.value
}
# Get the adjusted p-values
Mutations_Chisq$adjpvalChisq_CDS <- p.adjust(Mutations_Chisq$pvalChisq_CDS, method="fdr")
Mutations_Chisq$adjpvalChisq_Exon <- p.adjust(Mutations_Chisq$pvalChisq_Exon, method="fdr")
# Fix Table results (adj p-val) for representation ------------------------
Mutations_Chisq_xPublish <- Mutations_Chisq
fix_adjpval <- function(pval)
{
if(pval<0.001) {
pval <- "<.001"}else if(pval<0.05) {
pval <- "<.05"}else if(pval<0.01) {
pval <- "<.01"}else{pval <- "NS"}
return(pval)
}
Mutations_Chisq_xPublish$adjpvalChisq_CDS <- sapply(Mutations_Chisq_xPublish$adjpvalChisq_CDS, function(pval) fix_adjpval(pval))
Mutations_Chisq_xPublish$adjpvalChisq_Exon <- sapply(Mutations_Chisq_xPublish$adjpvalChisq_Exon, function(pval) fix_adjpval(pval))
# Test for values per sample ----------------------------------------------
# The objective is to apply a Chisq test per sample and then get the % of samples with
# adj pval <.05 JUST FOR CDS region
Mutations_Chisq_xSample <- NonMuts
colnames(Mutations_Chisq_xSample) <- c("Sample_ID","Tumour_Type",
"pvalCDS","pvalExon",
"adjpvalCDS","adjpvalExon")
for(i in 1:dim(Mutations_Chisq_xSample)[1])
{
Mutations_Chisq_xSample$pvalCDS[i] <- chisq.test(matrix(c(Mutation_density_ratios$SSMs_CDS[i],
Mutation_density_ratios$SSMs_WGSwoCDS[i],
NonMuts$CDSnonmuts[i],
NonMuts$WGSwoCDSnonm[i]),
ncol = 2))$p.value
Mutations_Chisq_xSample$pvalExon[i] <- chisq.test(matrix(c(Mutation_density_ratios$SSMs_Exon[i],
Mutation_density_ratios$SSMs_WGSwoExon[i],
NonMuts$Exonnonmuts[i],
NonMuts$WGSwoExonnonm[i]),
ncol = 2))$p.value
}
# Get the adjusted p-values
Mutations_Chisq_xSample$adjpvalCDS <- p.adjust(Mutations_Chisq_xSample$pvalCDS, method="fdr")
Mutations_Chisq_xSample$adjpvalExon <- p.adjust(Mutations_Chisq_xSample$pvalExon, method="fdr")
# And now let's get the percentage of significant samples per Tumour Type
percentage <- function(x,cutoff) {perc = ((length(which(x < cutoff)))/length(x))*100}
# % < .001
#tapply(Mutations_Chisq_xSample[,5], Mutations_Chisq_xSample$Tumour_Type,
# function(x) round(percentage(x,0.001),digits=1))
# % < .05
#tapply(Mutations_Chisq_xSample[,5], Mutations_Chisq_xSample$Tumour_Type,
# function(x) round(percentage(x,0.05),digits=1))
# Let's add this info to "Mutations_Chisq_xPublish"
Mutations_Chisq_xPublish$CDS_PercSignSamples001 <- paste(tapply(Mutations_Chisq_xSample[,5], Mutations_Chisq_xSample$Tumour_Type,
function(x) round(percentage(x,0.001),digits=1)), "%",sep="")
Mutations_Chisq_xPublish$Exon_PercSignSamples001 <- paste(tapply(Mutations_Chisq_xSample[,6], Mutations_Chisq_xSample$Tumour_Type,
function(x) round(percentage(x,0.05),digits=1)), "%",sep="")
|
7d6a257c62c71448ec624109e87cd78df1eda16e
|
077fd87077da8a399d51196ba7c81e970e752639
|
/man/works.Rd
|
82f3a9eb80edfb444c3688191f4f8d95ddaf3e99
|
[
"MIT"
] |
permissive
|
bibliometrics/rorcid
|
6c5cc58ae977204cfcabc4831020c02c3b891a1e
|
a4c2c24c292ac4812115b025ce469e39f7572cf4
|
refs/heads/master
| 2021-06-01T19:23:37.397645
| 2016-09-22T02:56:21
| 2016-09-22T02:56:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,113
|
rd
|
works.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/works.R
\name{works}
\alias{works}
\title{Get works data}
\usage{
works(x)
}
\arguments{
\item{x}{Input from a call to \code{\link{orcid_id}} or
\code{\link{as.orcid}}}
\item{...}{Ignored.}
}
\value{
An S3 object of class \code{works}
}
\description{
Get works data
}
\details{
The goal of this function is to get a pretty printed quick sense
of the works for 1 or more ORCID's. You can also access the complete
data.frame of results. If an ORCID has works, this function prints the
titles of the first 10.
}
\examples{
\dontrun{
out <- works(orcid_id("0000-0002-9341-7985"))
out
out$data
#works( orcid_id("0000-0003-1620-1408") )
works( orcid_id("0000-0002-1642-628X") )
works( orcid_id("0000-0003-1444-9135") )
works( orcid_id("0000-0003-1419-2405") )
out <- orcid(query="keyword:ecology")
works(orcid_id(out$`orcid-identifier.path`[7]))
works(orcid_id(out$`orcid-identifier.path`[8]))
works(orcid_id(out$`orcid-identifier.path`[9]))
works(orcid_id(out$`orcid-identifier.path`[10]))
works(as.orcid("0000-0002-1642-628X"))
}
}
|
6cf9da14d774f6cf3ecf3751e2a3470e038100d4
|
1b637d140ccad852fc1431ce48ae4da50629d0e6
|
/R/sumstat_omega.R
|
776b1f318904b599fa09b10be0e3c5398485333d
|
[] |
no_license
|
ijwilson/coala
|
aaa671daf9763057ea5a34efc90c98ba8dfbacf5
|
909e6f237d42e3972e3e54384473efde9d8841e7
|
refs/heads/master
| 2021-03-16T06:04:40.515782
| 2017-04-01T09:49:28
| 2017-04-01T09:49:28
| 91,571,290
| 0
| 0
| null | 2017-05-17T11:57:55
| 2017-05-17T11:57:55
| null |
UTF-8
|
R
| false
| false
| 5,870
|
r
|
sumstat_omega.R
|
#' @importFrom assertthat assert_that is.number
stat_omega_class <- R6Class("stat_omega", inherit = sumstat_class,
private = list(
req_segsites = TRUE,
binary = NULL,
min_win = NULL,
max_win = NULL,
grid = NULL,
create_empty_result = function(locus, locus_length) {
data.frame(locus = locus, pos = locus_length / 2, omega = 0)
}
),
public = list(
initialize = function(name, min_win, max_win,
grid, binary, transformation) {
assert_that(is.number(min_win))
private$min_win <- min_win
assert_that(is.number(max_win))
assert_that(min_win < max_win)
private$max_win <- max_win
assert_that(is.number(grid))
private$grid <- grid
if (identical(binary, "automatic")) {
binary <- search_executable("OmegaPlus", envir_var = "OMEGAPLUS")
if (is.null(binary)) stop("No binary for OmegaPlus found.")
} else {
assert_that(length(binary) == 1)
assert_that(is.character(binary))
assert_that(file.exists(binary))
}
private$binary <- binary
super$initialize(name, transformation)
},
check = function(model) {
if (has_trios(model)) {
stop("OmegaPlus can not be calculated from locus trios")
}
if (any(get_locus_length(model, total = TRUE) < self$get_grid())) {
stop("Grid value in stat_omega can not be larger than the locus length")
}
invisible(TRUE)
},
calculate = function(seg_sites, trees, files, model, sim_tasks = NULL) {
cur_wd <- getwd()
tmp_dir <- tempfile("omegaprime")
dir.create(tmp_dir)
setwd(tmp_dir)
grid <- self$get_grid()
op_list <- lapply(seq(along = seg_sites), function(i) {
locus_length <- get_locus_length(model, locus = i)
# Return 0 if there are few SNPs
if (ncol(seg_sites[[i]]) <= 2) {
return(private$create_empty_result(i, locus_length))
}
# Adjust number of grid points if needed
pos_rel <- get_positions(seg_sites[[i]])
max_grid <- floor(diff(pos_rel[c(1, length(pos_rel))] * locus_length))
if (grid > max_grid) grid <- max_grid - 2
if (grid < 1) return(private$create_empty_result(i, locus_length))
# Create an input file
tmp_file <- tempfile("omega")
cat(c("ms 10 1 -t 5",
"3579 27011 59243",
"",
"//",
conv_to_ms_output(seg_sites[[i]])),
"", sep = "\n", file = tmp_file)
# Execute OmegaPlus
system2(private$binary,
args = c("-name", i,
"-minwin", self$get_min_win(),
"-maxwin", self$get_max_win(),
"-grid", grid,
"-length", locus_length,
"-input", tmp_file),
stdout = TRUE)
unlink(tmp_file)
# Parse the results
self$parse_report(tmp_dir, grid, i)
})
unlink(tmp_dir, recursive = TRUE)
setwd(cur_wd)
do.call(rbind, op_list)
},
parse_report = function(dir, n_grid, locus) {
op_file <- file.path(dir, paste0("OmegaPlus_Report.", locus))
if (!file.exists(op_file)) stop("Calculation of omega failed.")
values <- read.delim(op_file, header = FALSE, comment.char = "/")
colnames(values) <- c("pos", "omega")
assert_that(nrow(values) %% n_grid == 0)
data.frame(locus = locus, values)
},
get_grid = function() private$grid,
get_min_win = function() private$min_win,
get_max_win = function() private$max_win
)
)
#' Summary Statistic: Omega
#'
#' Calculates the Omega Statistic introduced by
#' Kim & Nielsen (2004) from the simulated data. The statistic is sensitive for
#' hard selective sweeps. To calculate
#' the statistic, coala relies on the command line program
#' \href{http://sco.h-its.org/exelixis/web/software/omegaplus/index.html}{OmegaPlus},
#' which needs to be downloaded and compiled manually in order to use the
#' statistic.
#'
#' @references
#' Linkage disequilibrium as a signature of selective sweeps.
#' Y. Kim and R. Nielsen (2004). Genetics, 167, 1513-1524.
#'
#' OmegaPlus: a scalable tool for rapid detection of selective
#' sweeps in whole-genome datasets.
#' N. Alachiotis, A. Stamatakis and P. Pavlidis (2012).
#' Bioinformatics Vol. 28 no. 17 2012, pages 2274-2275
#' doi:10.1093/bioinformatics/bts419
#'
#' @inheritParams sumstat_four_gamete
#' @param min_win The minimum distance from the grid point that a SNP must have
#' to be included in the calculation of omega.
#' @param max_win The maximum distance from the grid point that a SNP must have
#' to be included in the calculation of omega.
#' @param grid The number of points for which omega is calculated on each
#' locus. Should be significantly lower than the locus length.
#' @param binary The path of the binary for OmegaPlus. If set to "automatic",
#' coala will try to find a binary called "OmegaPlus" using the PATH
#' environment variable.
#' @return A data frame listing of locus, genetic position and the
#' calculated omega value.
#' @export
#' @template summary_statistics
#' @examples
#' \dontrun{
#' model <- coal_model(20, 1, 50000) +
#' feat_recombination(50) +
#' feat_mutation(1000) +
#' feat_selection(strength_A = 1000, time = 0.03) +
#' sumstat_omega()
#' stats <- simulate(model)
#' plot(stats$omega$omega, type = "l")}
sumstat_omega <- function(name = "omega", min_win = 100, max_win = 1000,
grid = 1000, binary = "automatic",
transformation = identity) {
stat_omega_class$new(name, min_win, max_win, grid, binary, transformation)
}
has_omega <- function() {
!is.null(search_executable("OmegaPlus", envir_var = "OMEGAPLUS"))
}
|
e23e1bcc46e22751f3d8b22649ba47fe831ea6f9
|
6929d12941949f290d98379913048ee93143e491
|
/R/fct_get_filtered_stocks.R
|
3702ba9f0fde988ffd43a6125e5a48b3d07498c4
|
[
"MIT"
] |
permissive
|
srmatth/value.investing
|
9aeaaa476e7731a6cf5a6012f4c0ae38f016dc42
|
720dbd7ba4f38c2ca896515d72cb2bfa4009582d
|
refs/heads/master
| 2023-07-19T04:06:13.679639
| 2021-09-18T16:46:33
| 2021-09-18T16:46:33
| 275,306,180
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,443
|
r
|
fct_get_filtered_stocks.R
|
#' Get Filtered Stocks
#'
#' Use FinViz.com to filter stocks and get a list of stocks that
#' meet some certain set of specifications. Can return a dataframe
#' with lots of data, or just the stocks and their names
#'
#' @param exchange
#' @param cap
#' @param fa_div
#' @param fa_pe
#' @param fa_peg
#' @param fa_ps
#' @param fa_pb
#' @param fa_pc
#' @param fa_eps5years
#' @param fa_sales5years
#' @param fa_debteq
#' @param fa_payoutratio
#' @param all_data
#'
#' @return
#' @export
get_filtered_stocks <- function(geo = "geo_usa",
ind = "ind_any",
sec = "sec_any",
exchange = "exch_any",
cap = "cap_any",
fa_div = "any",
fa_pe = "any",
fa_peg = "any",
fa_ps = "any",
fa_pb = "any",
fa_pc = "any",
fa_eps5years = "any",
fa_sales5years = "any",
fa_debteq = "any",
fa_payoutratio = "any",
all_data = TRUE) {
# create the data frame that will be populated
stock_info <- data.frame(stringsAsFactors = FALSE)
for (i in seq(1, 7531, by = 20)) {
tryCatch({
logger::log_info("Reading entries {i} through {i + 19}")
page <- stringr::str_c(
"https://www.finviz.com/screener.ashx?v=152&f=",
exchange,
",",
cap,
",",
geo,
",",
ind,
",",
sec,
",fa_div_", fa_div,
",fa_pe_", fa_pe,
",fa_peg_", fa_peg,
",fa_ps_", fa_ps,
",fa_pb_", fa_pb,
",fa_pc_", fa_pc,
",fa_eps5years_", fa_eps5years,
",fa_sales5years_", fa_sales5years,
",fa_debteq_", fa_debteq,
",fa_payoutratio_", fa_payoutratio,
"&r=",
i,
"&c=",
dplyr::if_else(
all_data,
stringr::str_c(0:70, collapse = ","),
"1,2,65"
)
)
new_data <- xml2::read_html(page) %>%
rvest::html_nodes("table") %>%
`[[`(10) %>%
rvest::html_table(header = TRUE) %>%
magrittr::set_colnames(snakecase::to_snake_case(colnames(.)))
# check to see if the data has already been saved, if so jump out of the loop
if (i == 1) {
stock_info <- rbind(stock_info, new_data)
} else if (new_data %>% dplyr::slice(dplyr::n()) %>% dplyr::pull(ticker) ==
stock_info %>% dplyr::slice(dplyr::n()) %>% dplyr::pull(ticker)) {
logger::log_success("Finished retrieving data, the filters you set returned {nrow(stock_info)} stocks")
break
} else {
stock_info <- rbind(stock_info, new_data)
}
Sys.sleep(.5)
},
error = function(e) {
logger::log_error("There was an error with this page: {e}")
})
}
if (all_data) {
logger::log_info("Cleaning data...")
subset <- stock_info %>%
dplyr::select(market_cap:volume) %>%
purrr::map(string_to_numeric) %>%
tibble::as_tibble()
stock_info <- stock_info %>%
dplyr::select(ticker:country) %>%
cbind(subset)
logger::log_success("Done!")
}
return(stock_info)
}
|
16972366234e2c5e485d7b44c6a8de6536ac5841
|
cd0418e481f24acd9ef0ddd76fc454bf8c7509ec
|
/man/expectedDist.Rd
|
992f64360ba4ba6de373aca8be1a1290bf585a9d
|
[] |
no_license
|
MarioniLab/sarlacc
|
7dbacb52a2d7fe311262eb1344a8ee75138dea8a
|
712fdbd11e47205f0714df446cc60bd56354b64c
|
refs/heads/master
| 2021-07-14T22:39:52.159291
| 2019-02-27T15:22:35
| 2019-02-27T15:22:35
| 108,133,692
| 12
| 6
| null | 2018-07-06T12:18:25
| 2017-10-24T13:51:07
|
R
|
UTF-8
|
R
| false
| false
| 1,842
|
rd
|
expectedDist.Rd
|
\name{expectedDist}
\alias{expectedDist}
\title{Calculate expected distances}
\description{Calculate expected distances between subsequences of the adaptor that should be identical across reads.}
\usage{
expectedDist(sequences, max.err=NA)
}
\arguments{
\item{sequences}{A \linkS4class{QualityScaledDNAStringSet} of read subsequences corresponding to constant regions of the adaptor.}
\item{max.err}{A numeric scalar specifying the maximum error probability above which bases will be masked.}
}
\details{
The aim is to provide an expectation for the distance for identical subsequences, given that all reads should originate from molecules with the same adaptor.
In this manner, we can obtain an appropriate threshold for \code{\link{umiGroup}} that accounts for sequencing and amplification errors.
We suggest extracting a subsequence from the interval next to the UMI region.
This ensures that the error rate in the extracted subsequence is as similar as possible to the UMI at that position on the read.
Pairwise Levenshtein distances are computed between all extracted sequences.
This is quite computationally expensive, so we only process a random subset of these sequences by setting \code{number}.
If \code{align.stats} contains quality scores, bases with error probabilities above \code{max.qual} are replaced with \code{N}s.
Any \code{N}s are treated as missing and will contribute a mismatch score of 0.5, even for matches to other \code{N}s.
}
\value{
A numeric vector of pairwise distances between sequences that \emph{should} be identical.
}
\author{
Florian Bieberich,
with modifications by Aaron Lun
}
\seealso{
\code{\link{extractSubseq}} to extract a subsequence.
}
\examples{
constants <- c("ACTAGGAGA",
"ACTACGACCA",
"ACTACGATA",
"ACACGACA")
expectedDist(constants)
}
|
a104fda89d089fbf923dea12de88c40e860f6974
|
152c16bd35fe3536441db22f5c1634cf7660fe36
|
/scripts/evaluate_all_methods.R
|
f1cf3cf38c33b731c306d91b45ed36f0686c4632
|
[] |
no_license
|
computbiolgeek/kcnq1_predictive_modeling
|
970e58747fffd862ae1c6056708b8c0b36f15a42
|
9d3d78c6be164b9c0a083e77720dcd66f48d03d7
|
refs/heads/master
| 2020-06-25T08:28:18.840223
| 2017-10-26T23:00:38
| 2017-10-26T23:00:38
| 94,237,357
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,145
|
r
|
evaluate_all_methods.R
|
#!/usr/bin/env Rscript
# cross-validate optimal cutoff of SIFT
library(boot)
library(ROCR)
# load the input file
data <- read.csv(file = "dataset_all_method_evaluation.csv", header = TRUE, sep = ",")
# split the dataset into negatives and positives, drop levels that do not occur
subsets <- split(x = data, f = as.factor(data$label), drop = TRUE)
negatives <- subsets[[1]]
positives <- subsets[[2]]
compute_measures <- function(train, test) {
# find the optimimal cut-off value from the training set
rocr <- prediction(predictions = train$prediction, labels = train$label)
rocr.perf <- performance(prediction.obj = rocr, measure = "mat", x.measure = "cutoff")
optimal.cutoff.index <- which.max(unlist(rocr.perf@y.values))
optimal.cutoff <- unlist(rocr.perf@x.values)[optimal.cutoff.index]
# compute the Matthews correlation coefficient on the test set
test$prediction <- ifelse(test$prediction <= optimal.cutoff, 0, 1)
rocr.test <- prediction(predictions = test$prediction, labels = test$label)
rocr.test.perf <- performance(prediction.obj = rocr.test, measure = "mat", x.measure = "cutoff")
# mat.unlisted is a vector with all but one elements is NaN
mcc.unlisted <- unlist(rocr.test.perf@y.values)
mcc <- mcc.unlisted[2]
ifelse(length(mcc) == 0 || mcc < 0, mccs[i, j] <- NA, mccs[i, j] <- mcc)
# add tpr
tpr.unlisted <- unlist(performance(prediction.obj = rocr.test, measure = "tpr")@y.values)
tpr <- tpr.unlisted[2]
# add tnr
tnr.unlisted <- unlist(performance(prediction.obj = rocr.test, measure = "tnr")@y.values)
tnr <- tnr.unlisted[2]
# add ppv
ppv.unlisted <- unlist(performance(prediction.obj = rocr.test, measure = "ppv")@y.values)
# print(ppv.unlisted)
ppv <- ppv.unlisted[2]
# add tnr
npv.unlisted <- unlist(performance(prediction.obj = rocr.test, measure = "npv")@y.values)
npv <- npv.unlisted[2]
# add accuracy
acc.unlisted <- unlist(performance(prediction.obj = rocr.test, measure = "acc")@y.values)
acc <- acc.unlisted[2]
# compute auc and store it in the numeric vector aucs
auc <- unlist(performance(prediction.obj = rocr.test, measure = "auc")@y.values)
# return a vector
return(c(auc, mcc, ppv, npv, tpr, tnr, acc))
}
compute_measures_all <- function(train, test) {
k <- ncol(train) - 1
for(i in 1:k) {
train.cur <- cbind(train[[i]], train[[k+1]])
head(train.cur)
colnames(train.cur) <- c("prediction", "label")
#measures <- compute_measures(train = train.cur, test = test)
#print(measures)
}
}
# set random seed
set.seed(5255599)
for(i in 1:2) {
# shuffle each subset
negatives <- negatives[sample(x = 1:nrow(negatives)),]
positives <- positives[sample(x = 1:nrow(positives)),]
# create folds
neg.folds <- cut(x = 1:nrow(negatives), breaks = 3, labels = FALSE)
pos.folds <- cut(x = 1:nrow(positives), breaks = 3, labels = FALSE)
for(j in 1:3) {
# create test set and training set
test <- rbind(negatives[neg.folds == j,], positives[pos.folds == j,])
train <- rbind(negatives[neg.folds != j,], positives[pos.folds != j,])
compute_measures_all(train, test)
}
}
|
d5391c934164e0b5c29a8e7ddaf0cff9aad7e98c
|
93145f2c98ab2a3226cf9eec8ce81b22bf76ec6a
|
/NEP-PEP.pattern.R
|
afc360edf8be6eef8bd7ee69d4919f4b8d201c62
|
[] |
no_license
|
yanjunzan/expression_R
|
a4a334804043f2a07d7cafca2be4a82608b6de2e
|
c60672b30a25fe61fabd6d4300cf1aa77e48368d
|
refs/heads/master
| 2018-01-12T08:00:20.461355
| 2018-01-11T13:42:24
| 2018-01-11T13:42:24
| 43,816,564
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,041
|
r
|
NEP-PEP.pattern.R
|
c <- read.table("~/Desktop/C_matrix.txt",header=T,sep = "\t",stringsAsFactors = F,colClasses = c("character",rep("numeric",24)))
out <- array(NA,c(88,8))
c[,1] -> rownames(out)
colnames(out)<- c(0,1,2,4,5,7,10,14)
for( i in 1:nrow(c)){
for (j in 0:7){
out[i,j+1]<- mean(as.numeric(c[i,c(2,10,18)+j]),na.rm=T)
}
}
write.table(out,file="~/Desktop/mean.txt",sep = "\t",row.names = T,quote = F)
out <- out[,c(1:6)]
#id <- which(out[,1]< out[,2] & out[,1]< out[,3] & out[,3]>out[,4] & out[,2]>out[,4])
id <- which(out[,1]< out[,2] & out[,1] < out[,3] & out[,4]< out[,3] & out[,2]>out[,4])
out.p <- out[id,]
p1 <- rownames(out)[id]
id2 <- which(out[,4]< out[,6] & out[,1]< out[,4] )
p2 <- rownames(out)[id2]
# dir.create("~/Desktop/jy/")
#
# for(i in 1:nrow(out.p)){
# name<- paste("~/Desktop/jy/",rownames(out.p)[i],"pattern1.png",sep="")
# png(name)
# plot(out.p[i,],type="l",col="red",main=rownames(out.p)[i],xlab = "",ylab = "",frame.plot = F)
# dev.off()
# }
# write.table(rownames(out.p),file="~/Desktop/list1.txt",sep = "\t",row.names = T,quote = F)
# write.table(p1,file="~/Desktop/jy/p1.txt",sep = "\t",row.names = T,quote = F)
# write.table(p2,file="~/Desktop/jy/p2.txt",sep = "\t",row.names = T,quote = F)
#
# ##
#pdf(file = "~/Desktop/jy/test.pdf",width = 6,height = 6)
par(xpd=T,mar=c( 5.1,4.1,4.1,2.1))
x <- seq(0.5,5.5)
for(i in 1:nrow(out)){
y<- jitter(rank(out[i,]))
if(i ==1){
#plot(0,frame.plot = F,type = "n",col=rgb(0,0,0,0.14),ylim =c(0,8),xlab = 1:7,ylab = 1:8,main = "")
plot(x,y,frame.plot = F,type = "l",col=rgb(0,0,0,0.14),xlim = c(0,7),ylim =c(0,7),xlab = "",ylab = "",main = "",xaxt='n',yaxt='n',xaxs = "i",yaxs = "i")
}else if(rownames(out)[i] %in% p1 ){
points(x,y,type = "l",col=rgb(1,0,0,0.5),lwd=1)
}else if(rownames(out)[i] %in% p2 ){
points(x,y,type = "l",col=rgb(0,1,0,0.5),lwd=1)
}else{
points(x,y,type = "l",col=rgb(0,0,0,0.14),lwd=0.33)
}
}
axis(1,at =0:6,labels = F,tck=-0.00,lwd = 0.33)
text(x=seq(0.5,5.5,1),labels=as.numeric(colnames(out)),y=rep(-0.5,8),srt=0,cex=0.8)
text(x=2.5,labels="Time (days)",y=-1,srt=0,cex=1)
par(xpd=T)
axis(2,at = 0:7,labels = F,tck=0.01,lwd=0.33)
text(x=-0.3,y=0:7,srt=90,labels = 0:7,cex=0.8)
text(x=-0.6,4,srt=90,labels = "Expression rank",cex=1)
legend(5.5,4.5,legend = c("NEP profile","PEP profile"),fill = c("red","green"),border="white",box.lwd = 0,box.col = "white",bg = "white")
#dev.off()
#
# id <- which(out[,1]< out[,2] & out[,2]< out[,3] & out[,3]<out[,4] & out[,4]<out[,5])
# out.p <- out[id,]
#
# for(i in 1:nrow(out.p)){
# name<- paste("~/Desktop/jy/",rownames(out.p)[i],"pattern2.png",sep="")
# png(name)
# plot(out.p[i,],type="l",col="red",main=rownames(out.p)[i],xlab = "",ylab = "",frame.plot = F)
# dev.off()
# }
#
# ####all
# dir.create("~/Desktop/jy/all/")
# for(i in 1:nrow(c)){
# name<- paste("~/Desktop/jy/","all/",rownames(out)[i],"all.png",sep="")
# png(name)
# plot(out[i,],type="l",col="red",main=rownames(out)[i],xlab = "",ylab = "",frame.plot = F)
# dev.off()
# }
|
c64146054b7cfe42c7fc2f5f0787ad8bf039d987
|
90b053ff36d4abb9df860c667efdeed69391e597
|
/usefulCodeSnippets.R
|
8c69acf078d60ddc0f54b2527cc608eae9d823ea
|
[] |
no_license
|
hf-thompson-lab/LANDIS_Klamath_REU2018
|
27cd020b60587276309641f09afac8d2140fc565
|
3ff7efb14222e8d3ae7776cc2da63bca6eca3125
|
refs/heads/master
| 2023-08-16T12:45:25.757349
| 2018-08-01T15:29:38
| 2018-08-01T15:29:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,387
|
r
|
usefulCodeSnippets.R
|
#Evan Waldmann
#6/28/18
# Useful code with descriptions
## Section - ploting with pauses ----
sim <- function(path, n=100, pause=0.25) {
for (i in 1:n) {
x<- raster(paste0(path,i,".img"))
plot(x, legend=T, asp=NA, main=i)
dev.flush()
Sys.sleep(pause)
}
invisible(x)
}
setwd("C:/Users/hfintern/Desktop/Klamath_ForestXSiskiyouCounty/Saved Output/PnET+Fire+Fuel+NoClimate 50Years/")
setwd("C:/Users/hfintern/Desktop/Klamath_ForestXSiskiyouCounty/7-10 test/land-use-maps/")
sim(path = "land-use-", n=91, pause=1)
###########################################################
# Changing initial map codes to exclude all but 5 species #----
###########################################################
dir<- "C:/Users/hfintern/Desktop/movedFiles/BASE_historicalClimate/ICsimplified_E4_v3_270M.txt"
writedir<- "C:/Users/hfintern/Desktop/ICsimplified_E4_v4_270M_5Species.txt"
dir
lines<- readLines(dir)
lines
speciesToKeep<- c("LandisData", "MapCode","ABGRC","ACMA3","ALRU2","PSME","PIMO3")
for (i in 1: length(lines))
{
flag <- FALSE
for (j in 1:length(speciesToKeep))
{
if (grepl(speciesToKeep[j], lines[i])) #looks for all the species and changes flag if we should keep the line
{
flag<- TRUE
}
}
if (!flag) #if line was not flagged then delete it.
{
lines[i] <- ""
}
}
writeLines(lines, writedir)
# masking the empty inital communites out
getwd()
root.path <- "C:/Users/hfintern/Desktop/"
initialCommFilePath<- "ICsimplified_E4_v4_270M_5Species.txt"
setwd(root.path)
lines<- readLines(initialCommFilePath)
lines
newLines <- lines[ lines!="" ]
newLines
boolVect <- grepl("MapCode" , newLines)
newVect <- rep(F, length(boolVect))
for (i in 1:(length(boolVect)-1))
{
if (boolVect[i] ==T & boolVect[i+1]==T)
{
newVect[i] <- T
}
}
codes <- newLines[newVect]
codes <- as.numeric(substring(codes,9))
initCommRaster <- raster("initComClipped16S.img")
plot(initCommRaster)
values(initCommRaster)[which(values(initCommRaster) %in% codes)] <- 5000
ecoReg <- raster("ecoregclip.tif")
plot(ecoReg)
freq(ecoReg)
values(ecoReg)[which(values(initCommRaster) ==5000)] <- NA
writeRaster(ecoReg, "ecoregclipMasked", overwrite =T, format="HFA", datatype="INT2S", NAvalue=0)
# i then had to drag map into arc map and use copy raster to get landis to read it properly
#masking empty initial communitites END
|
2284f68f302063203097a38f1e173e004e920ded
|
7ccb8a44b40362b231ed81306fd4783374efdd6a
|
/cachematrix.R
|
77b792db336d80efa8e6e6331b12c0011de643b6
|
[] |
no_license
|
valerielim/JohnHopkins-RProgramming
|
bdf68dcfa5f3dd78499f5d9e03742d040889087e
|
416f8309b9e1ac87278ecaae65094d20cef2f7a1
|
refs/heads/master
| 2021-08-07T12:42:48.493537
| 2017-11-08T05:55:14
| 2017-11-08T05:55:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,495
|
r
|
cachematrix.R
|
# --------------------------------------------------------------------- #
# Title: Programming Assignment 2
# Date: 4 Nov 2017
# By: Valerie Lim (for Coursera)
# --------------------------------------------------------------------- #
## This script allows user to store a cached Matrix, and it's find its
## Inversed Matrix.
## Usage - initialize a Matrix, than send it to the makeCacheMatrix and
## save the returned list. Now you can use the list$get() to get the
## original matrix, and list$getinverse() to get the Inversed.
# (1)
## For a square matrix, caches it, and create the list to enable
## cached retrival of the Matrix and it's inverse at a later stage.
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y){
x <<- y
inv <<- NULL
}
get <- function() x
setinverse <- function(solve) inv <<- solve
getinverse <- function() inv
list(set = set, get = get, setinverse = setinverse, getinverse=getinverse)
}
# (2)
## Gets the cach object (the list that is created in makeCacheMatrix),
## calculates the Inverse matrix and fills that in the list.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getinverse()
if(!is.null(inv)){
message("getting cached inverse matrix")
return(inv)
}
data <- x$get()
inv <- solve(data,...)
x$setinverse(inv)
inv
}
# --------------------------------------------------------------------- #
|
366361713ac4c9998f279d85ec55b6f2b013f8f0
|
497e3dbc7b40c28246576b8f474451e3e44e69a4
|
/Project/utilities_optim.R
|
8130fe135d34f6929871fd09251eec3807690cb1
|
[] |
no_license
|
ibrahim00017/Optimisation_AI_Cotonou2021
|
a822d387cc302d629a31eb377eed159a77eb91d4
|
b2b46a3bdc0368aa4bec47ce784b2de800615824
|
refs/heads/main
| 2023-06-26T11:27:46.066406
| 2021-07-28T20:42:58
| 2021-07-28T20:42:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,453
|
r
|
utilities_optim.R
|
# set of utilities for simple optimization program
#
# Rodolphe Le Riche, CNRS LIMOS, july 2021
#
# calculate f and gradient of f by forward finite difference
f.gradf <- function(x,f,h=1.e-8){
d<-length(x)
res <- list()
res$gradf <- rep(NA,d)
res$fofx <- f(x)
for (i in 1:d){
xp <- x
xp[i] <- x[i]+h
res$gradf[i] <- (f(xp)-res$fofx)/h
}
return(res)
}
# record points online
updateRec <- function(arec,x,f,t){
if (is.null(arec$X)) {arec$X<-x} else {arec$X <- rbind(arec$X,x)}
if (is.null(arec$F)) {arec$F<-f} else {arec$F <- c(arec$F,f)}
if (is.null(arec$Time)) {arec$Time<-t} else {arec$Time <- c(arec$Time,t)}
return(arec)
}
# L2 norm
l2norm <- function(x){
return(sqrt(sum(x^2)))
}
# plot contour of function when d==2
plot_contour <- function(LB,UB,f){
no.grid <- 100
x1 <- seq(LB[1], UB[1], length.out=no.grid)
x2 <- seq(LB[2], UB[2], length.out=no.grid)
x.grid <- expand.grid(x1, x2)
z <- apply(x.grid, 1, f)
z.grid <- matrix(z, no.grid)
contour(x1, x2, z.grid, nlevels=20, xlab="x1", ylab="x2")
}
# increasing sequence: useful to plot first points and then fewer and fewer
inc.geom.seq <- function(from=1,to=1000,coef=1.4)
{
s <- c(round(from))
x <- from
i <- 1
ieff <- 1
done <- FALSE
while (!done){
x <- x*coef
sp <- round(x)
if (sp != s[ieff]){
s <- c(s,sp)
ieff <- ieff+1
if (sp>to) done<-TRUE
}
i <- i+1
}
s<-s[-ieff]
return(s)
}
|
8db412ca1ac0bdad2660ed9a774eaf05e0c99124
|
b0853612d9c52495cd86ff867295cb3c98320081
|
/K-NN알고리즘.R
|
878e1c08bb40fd03eee8ed702cd886cc9a775dc5
|
[] |
no_license
|
OHSEHEYON/machine-learning
|
0519bcc442054885a2ea44e24e75d987278afac2
|
ff4f3fcb19c0a8a22860be58180615edd2898f75
|
refs/heads/master
| 2022-11-12T19:41:00.418452
| 2020-07-07T06:39:16
| 2020-07-07T06:39:16
| 276,387,566
| 0
| 0
| null | null | null | null |
UHC
|
R
| false
| false
| 1,587
|
r
|
K-NN알고리즘.R
|
wbcd <- read.csv("C:\\R2\\dataset\\wisc_bc_data.csv")
str(wbcd)
# 첫번째변수는 id. 무의미한 정보를 제공하므로 제외.
wbcd <- wbcd[-1]
str(wbcd)
# B와 M이 직관적이지 않으니 표기법을 mapvalues()함수를 이용하여 팩터의 라벨명을 변경.
# 각각의 비율을 prop.table을 통해 확인
library(plyr)
wbcd$diagnosis <- mapvalues(wbcd$diagnosis, from=c('M','B'), to=c('악성','양성'))
prop.table(table(wbcd$diagnosis))
# 데이터전처리 - 정규화(0과 1사이의 값)
# lapply()함수를 사용하여 wbcd의 2열부터 31열까지 정규화함수 적용하고 그 값을 df로 반환
normalize <- function(x){
return((x-min(x)) / (max(x)-min(x)))
}
wbcd_n <- as.data.frame(lapply(wbcd[2:31], normalize))
summary(wbcd_n)
# 데이터전처리 - 훈련 및 시험 데이터셋 생성
# 훈련데이터로 처음 469개, 시험데이터로 나머지 100개
# 레이블은 훈련레이블과 시험레이블로 나누어 준비
wbcd_train <- wbcd_n[1:469,]
wbcd_test <- wbcd_n[470:569,]
train_labels <- wbcd[1:469,1]
test_labels <- wbcd[470:569,1]
# knn()함수로 K-NN 진행
# k값은 훈련용데이터 개수(469)의 루트값과 유사한 21로.
library(class)
fit <- knn(train=wbcd_train, test=wbcd_test, cl=train_labels, k=21)
summary(fit)
# 모델성능평가
# fit벡터에 있는 예측된 클래스가 test_labels 벡터에 있는 알려진값과 얼마나 잘 일치하는지 확인
# 모델의 정확도는 0.98
library(gmodels)
CrossTable(x=test_labels, y=fit, prop.chisq=FALSE)
|
670474453df6fde7d87e413c53c0c86b9f7d3658
|
c686ceafe8628946f1fdb60a7e9513a273747b3f
|
/R/PCA.R
|
4ac12ab9df59e6e926042f0b7ef071635787582b
|
[] |
no_license
|
cran/MVar.pt
|
a9edfb289984c39b144fad4f17629d586ce7b5c0
|
663e5e1cd34e3f0a00a31ca2597807c5b49c3f80
|
refs/heads/master
| 2023-08-31T09:10:37.861275
| 2023-08-19T15:02:31
| 2023-08-19T15:30:37
| 28,622,302
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,881
|
r
|
PCA.R
|
PCA <- function(data, type = 1) {
# Funcao Executa a Analise dos Componentes Principais - PCA
# Desenvolvida por Paulo Cesar Ossani em 07/2013
# Entrada:
# data - Dados a serem a analizados
# type - 1 para analise utilizando a matriz de covariancia (default)
# 2 para analise utilizando a matriz de correlacao
# Retorna:
# mtxC - Matriz de Covariancia ou de Correlacao conforme type
# mtxAutvlr - Matriz de Autovalores (Variancias) com as proporcoes e proporcoes acumuladas
# mtxAutvec - Matriz de Autovetores - Componentes Principais
# mtxVCP - Matriz da Covariancia dos Componentes Principais com as Variaveis Originais
# mtxCCP - Matriz da Correlacao dos Componentes Principais com as Variaveis Originais
# mtxscores - Matriz com os escores dos Componentes Principais
if (!is.data.frame(data) && !is.matrix(data))
stop("Entrada 'data' esta incorreta, deve ser do tipo dataframe ou matriz. Verifique!")
if (type!=1 && type!=2)
stop("Entrada para 'type' esta incorreta, deve ser numerica, sendo 1 ou 2. Verifique!")
if (type == 2) data <- scale(data) # normaliza os dados
MC <- cov(data) # Matriz de Covariancia
num.comp <- min(dim(data)) # numero de componentes
## Encontrando a Matriz de Decomposicao Expectral
MAV <- eigen(MC) # Encontra a matriz de autovalor e autovetor
MAutoVlr <- MAV$values[1:num.comp] # Matriz de Autovalores - Variancias
MAutoVec <- MAV$vectors # Matriz de Autovetores - Componentes Principais
## Matriz das Variancias
MEigen <- as.data.frame(matrix(NA, length(MAutoVlr), 3))
rownames(MEigen) <- paste("Comp", 1:length(MAutoVlr))
colnames(MEigen) <- c("Autovalor", "% da variancia","% acumulada da variancia")
MEigen[, "Autovalor"] <- MAutoVlr
MEigen[, "% da variancia"] <- (MAutoVlr/sum(MAutoVlr)) * 100
MEigen[, "% acumulada da variancia"] <- cumsum(MEigen[,"% da variancia"])
## Matriz de Autovetores,ou seja, os Componentes Principais
colnames(MAutoVec) <- paste("Comp.", 1:nrow(MC), sep = " ")
rownames(MAutoVec) <- colnames(data)
## Covariancia dos Componentes Principais com as Variaveis Originais
VCP <- diag(MAutoVlr,nrow(MC),ncol(MC))%*%t(MAutoVec)
rownames(VCP) <- paste("Comp", 1:nrow(MC))
## Correlacao dos Componentes Principais com as Variaveis Originais
CCP <- diag(sqrt(MAutoVlr),nrow(MC),ncol(MC))%*%t(MAutoVec)%*%diag(1/sqrt(diag(MC)),nrow(MC),ncol(MC))
colnames(CCP) <- colnames(data) # Nomeia as linhas
rownames(CCP) <- paste("Comp", 1:nrow(MC))
Esc = as.matrix(data)%*%MAutoVec # Escores do componentes principais
rownames(Esc) <- rownames(data)
Lista <- list(mtxC = MC, mtxAutvlr = MEigen,
mtxAutvec = MAutoVec, mtxVCP = VCP,
mtxCCP = CCP, mtxscores = Esc)
return(Lista)
}
|
f3e8033a63920a12555a9b2d1c9ae39958033746
|
60a346e86bc9c39ef069d13f78f0f06400d0045f
|
/server.R
|
32ac7e1ecbeb301abdf9203dcd5220bfd31bb90d
|
[] |
no_license
|
ZainebYahya/EV_Shiny
|
490cf8a753e30587a1151639896525b4435dcd4d
|
5ffee2f8eb1ca99a37f9885fedfb8029c58f4a2e
|
refs/heads/main
| 2023-02-24T06:36:36.451631
| 2021-02-01T04:41:50
| 2021-02-01T04:41:50
| 334,832,853
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,055
|
r
|
server.R
|
server <- shinyServer(function(input, output, session) {
reactable(df,
defaultPageSize = 5,
bordered = TRUE,
defaultColDef = colDef(footer = function(values) {
if (!is.numeric(values)) return()
sparkline(values, type = "box", width = 100, height = 30)
})
)
output$boxplot1 <- renderPlot({
ggplot(df, aes(x = no_passengers, y = battery_charg)) +
geom_boxplot(fill = semantic_palette[["green"]]) +
xlab("No of passenger") + ylab("Battery Charging Status")
})
output$corplotall <- renderPlot({
ggcorrplot(
corr,
hc.order = TRUE,
type = "lower",
lab = TRUE,
lab_size = 3,
method = "circle",
colors = c("blue", "white", "red"),
outline.color = "gray",
show.legend = TRUE,
show.diag = FALSE,
title="Correlogram of Efficiency variables"
)
})
output$dotplot2 <- renderPlot({
p <- df %>%
filter(fullcharge_efficiency>=0 & fullcharge_efficiency <=750) %>%
ggplot( aes(x=fullcharge_efficiency, y=charging_occurrences, group=1)) +
geom_point(color="#69b3a2", size=2, alpha=0.01) +
theme(
legend.position="none"
)
# add marginal histograms
ggExtra::ggMarginal(p, type = "histogram")
})
# Compute kde2d
kd <- with(data, MASS::kde2d(x, y, n = 50))
# Plot with plotly
plot_ly(x = kd$x, y = kd$y, z = kd$z) %>% add_surface()
output$dotplot1 <- renderPlotly({
plot_ly(x=colnames(df_map), y=rownames(df_map), z = df_map, type = "heatmap") %>%
layout(margin = list(l=120))
})
output$dotplot <- renderPlotly({
ggplotly(
ggplot(df, aes(x = trip_distance, y = battery_charg))
+ geom_point(aes(
colour = factor(no_passengers)
))
+ geom_smooth(method = "lm") +
coord_cartesian(ylim = c(-12, 12))
)
})
output$searchtable <- renderDataTable(df)
output$evtable <- renderReactable({reactable(df)})
})
|
234ce66c3340b4369a125ce1557180ead1a8afc7
|
7ad110193d4538f285b50051ebafe492cdc284bb
|
/tests/testthat/test-instant-metrics-format.R
|
798a40df92086286cddf8c045df08987e9e4218d
|
[
"MIT"
] |
permissive
|
glenn-m/promR
|
51e05d332ab7b6708a3fab5ef9fdf55e8d682ed7
|
111cf41791418fd7e4f208bc17d6d21f816d71c6
|
refs/heads/master
| 2023-04-04T18:20:20.373807
| 2023-03-16T00:44:42
| 2023-03-16T00:44:42
| 155,071,735
| 3
| 3
|
MIT
| 2020-09-19T04:16:04
| 2018-10-28T13:02:42
|
R
|
UTF-8
|
R
| false
| false
| 476
|
r
|
test-instant-metrics-format.R
|
context("Instant metrics data is formatted correctly")
test_that(desc = "Instant metrics results are data frame",
code = expect_is(object = metrics_current,
class = "data.frame"))
test_that(desc = "Instant metrics are named",
code = expect_named(object = metrics_current))
test_that(desc = "Instant metrics are not empty",
code = expect_gte(object = nrow(metrics_current),
expected = 2))
|
a21b0a3d2aca1d74f8379e7b76383fdf456cdcdc
|
f932835f7a32fed4801c3593399ccb64846412a5
|
/code/day2/challenges_and_answers.R
|
0e65a960d84226d8c6d4760915936cfa21501c3b
|
[] |
no_license
|
ocean-tracking-network/2021-03-30-glatos-workshop
|
411b72ccbe5410e50660f430a5eec14d17d3182c
|
eb3b03357b8077e84435f9b5dd4ccc16d9ef894c
|
refs/heads/master
| 2023-05-08T11:19:24.625685
| 2021-04-06T21:13:41
| 2021-04-06T21:13:41
| 347,158,565
| 0
| 0
| null | 2021-04-06T21:13:41
| 2021-03-12T18:21:15
|
R
|
UTF-8
|
R
| false
| false
| 1,188
|
r
|
challenges_and_answers.R
|
# Basic Visualization and Plotting: CHALLENGES ---------------------------------
# GLATOS workshop 2021-03-31
# Instructor: Ryan Gosse
# Challenge 1 ----
# Create a bubble plot of the station in Lake Erie only. Set the bounding box using the provided nw + se cordinates and
# resize the points. As a bonus, add points for the other receivers in Lake Erie.
# Hint: ?detection_bubble_plot will help a lot
erie_arrays <-c("DRF", "DRL", "DRU", "MAU", "RAR", "SCL", "SCM", "TSR") # Given
nw <- c(43, -83.75) # Given
se <- c(41.25, -82) # Given
erie_detections <- detections_filtered %>% filter(glatos_array %in% erie_arrays)
erie_rcvrs <- receivers %>% filter(glatos_array %in% erie_arrays) # For bonus
erie_bubble <- detection_bubble_plot(erie_detections,
receiver_locs = erie_rcvrs, # For bonus
location_col = 'station',
background_ylim = c(se[1], nw[1]),
background_xlim = c(nw[2], se[2]),
symbol_radius = 0.75,
out_file = 'erie_bubbles_by_stations.png')
|
a11a2e591a755463fb96e3ff8df9222cdbff3ada
|
e9f08e6597d447cd57df5647f109795d14c7c152
|
/Bioinformatics/PCAcovariates.r
|
dbd73b014de5578f5d8fcf667324bbc2021e4e6b
|
[] |
no_license
|
Liuy12/SomeUsefulScripts
|
81634b7a7f524d06bd41d1874109544d99505cc6
|
8b638e0ea8e267e18588021cf65499425b884f3c
|
refs/heads/master
| 2023-01-28T12:23:33.863612
| 2023-01-04T22:19:45
| 2023-01-04T22:19:45
| 27,837,879
| 3
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,350
|
r
|
PCAcovariates.r
|
PCAcovariate <- function(normdata, sampleinfo, output, npcs){
library(ggplot2)
library(heplots)
library(gridExtra)
pcares <- Principalstats(normdata, method = 'pca', cvCutoff = 0.1, npcs=npcs)
gps <- vector(mode = "list", length = npcs*ncol(sampleinfo))
for(i in 1:npcs){
for(j in 1:ncol(sampleinfo)){
if(is.numeric(sampleinfo[[j]])) cor1 <- cor.test(pcares[[1]][,i],sampleinfo[[j]],method = 's') else {
df <- data.frame(y=pcares[[1]][,i],x=sampleinfo[[j]])
mod1 <- aov(y~x,data=df)
cor1 <- data.frame(p.value= summary(mod1)[[1]][1,5],estimate=sqrt(etasq(mod1,partial = F)[1,1]))
}
if(round(cor1$p.value,3) <=0.01) labcol <- "red" else labcol <- 'black'
df <- data.frame(x=sampleinfo[[j]], y = pcares[[1]][,i],stringsAsFactors = F)
gp <- ggplot() + geom_point(aes(x=x,y=y),data=df) +
theme_classic() + labs(x=colnames(sampleinfo)[j],y=paste0('PC',i,' (',pcares[[2]][i],'%)')) +
theme(axis.title = element_text(face = 'bold',size=15),axis.text = element_text(size=10)) +
annotate("text", x=Inf, y = Inf,color=labcol,size=10,label = paste0('cor: ',formatC(cor1$estimate,digits = 2, format = 'e'), '; pval: ',formatC(cor1$p.value,digits = 2, format = 'e')), vjust=1, hjust=1)
gps[[(ncol(sampleinfo)*(i-1) + j)]] <- gp
}
}
pdf(paste0(output, "/PCAcovariate.pdf"),useDingbats = F,height = npcs*6,width = ncol(sampleinfo)*6)
grid.arrange(grobs=gps,nrow=npcs,ncol=ncol(sampleinfo))
dev.off()
png(paste0(output, "/PCAcovariate.png"),height = npcs*6,width = ncol(sampleinfo)*6,res = 300, units = 'in')
grid.arrange(grobs=gps,nrow=npcs,ncol=ncol(sampleinfo))
dev.off()
}
Principalstats <- function(dataMat, method, cvCutoff, npcs){
dataMat <- log2(dataMat)
cv.gene <- apply(dataMat, 1, function(x)
sd(x) / mean(x))
dataMat <- dataMat[which(cv.gene > cvCutoff),]
dataMat <- scale(dataMat)
if (method == 'mds') {
dd <- dist(t(dataMat))
mds.result <- cmdscale(dd, k = npcs, eig = TRUE)
ppoints <- mds.result$points
eig <- mds.result$eig
percent <- round(eig/sum(eig) * 100, 1)
}
else{
pca.result <- prcomp(t(dataMat))
ppoints <- pca.result$x[,1:npcs]
percent<-round((pca.result$sdev^2/sum(pca.result$sdev^2))*100,1)
}
ppoints <- as.data.frame(ppoints)
return(list(ppoints, percent))
}
|
3963918c6ec6291fa3bdc4272847e6bf1dd6c3f1
|
1f972b88bab3c8f930949d3db86747e1f0a4d517
|
/R/01_r_ps_run_msa_coverage_progress.R
|
15e57cfc52be05ed01abbb647390a013b039e739
|
[] |
no_license
|
achalak/kobohrtoolbox
|
0530ac5c694fa750d4f46aead7b9585fa20f6822
|
0e985beaa2e7d737fc5d7f804dd7312c08c5a868
|
refs/heads/master
| 2021-01-20T02:02:42.436576
| 2017-08-24T14:50:30
| 2017-08-24T14:50:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 607
|
r
|
01_r_ps_run_msa_coverage_progress.R
|
#load libraries
library(httr)
library(jsonlite)
library(lubridate)
library(tidyverse)
library(stringr)
library(readxl) #read excel file
library(dplyr)
library(ggplot2)
library(rgdal)
library(openxlsx) #'write xlsx'
#load file r_kobo_utils.R file first
options(stringsAsFactors = FALSE)
#language setting
Sys.setlocale(category = "LC_ALL",locale = "arabic")
source("./R/r_ps_kobo_authenticate.R")
source("./R/r_func_ps_kobo_utils.R")
source("./R/r_func_ps_utils.R")
#--for progress data export-----
source("./R/r_ps_kobo_export_data_csv_selected_fields.R")
source("./R/r_ps_msa_data_coverage_summary.R")
|
95fb01e5f17a101c624e6ddc99467ba6a78fc70e
|
cf606e7a3f06c0666e0ca38e32247fef9f090778
|
/test/integration/example-models/misc/gaussian-process/gp-predict.R
|
329b22a95671817715bf0e44a976d3d4237f89f0
|
[
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown"
] |
permissive
|
nhuurre/stanc3
|
32599a71d5f82c759fd6768b8b699fb5f2b2d072
|
5612b357c1cd5a08cf2a57db97ce0e789bb87018
|
refs/heads/master
| 2023-07-05T02:27:08.083259
| 2020-11-12T15:37:42
| 2020-11-12T15:37:42
| 222,684,189
| 0
| 0
|
BSD-3-Clause
| 2019-11-19T11:50:39
| 2019-11-19T11:50:38
| null |
UTF-8
|
R
| false
| false
| 235
|
r
|
gp-predict.R
|
library(rstan)
stan_dat <- read_rdump('gp-predict.data.R')
fit_predict <- stan(file="gp-predict.stan",
data=stan_dat,
iter=200, chains=3);
print(fit_predict, pars = c('rho','alpha','sigma'))
|
38a4f71945fd48908b02d8ab6b1e64a5e3f17dcf
|
3f1d1377ab5bcc2777db0e8b68209374f92bf6f8
|
/R/curlAuthConstants.R
|
b7502e51d2ee357aa40c7b563016b2b1c51c6fe3
|
[
"curl"
] |
permissive
|
omegahat/RCurl
|
5dc3299c75dce0d0ab2481902125aebaa9cb7c9e
|
e07c076963fc6436e0b05db04bb0b3a20ba378e9
|
refs/heads/master
| 2022-06-25T08:55:39.848816
| 2022-06-07T02:34:35
| 2022-06-07T02:34:35
| 4,004,831
| 20
| 18
|
NOASSERTION
| 2022-06-07T02:34:35
| 2012-04-12T13:17:38
|
Turing
|
UTF-8
|
R
| false
| false
| 1,218
|
r
|
curlAuthConstants.R
|
# RCurl:::AUTH_BASIC | RCurl:::AUTH_DIGEST | RCurl:::AUTH_NTLM
CURLAUTHValues = structure(c(-.Machine$integer.max, -17, 8, -18, 32, 16, 1, 2, 4, 0), .Names = c("CURLAUTH_ONLY",
"CURLAUTH_ANY", "CURLAUTH_NTLM", "CURLAUTH_ANYSAFE", "CURLAUTH_NTLM_WB",
"CURLAUTH_DIGEST_IE", "CURLAUTH_BASIC", "CURLAUTH_DIGEST", "CURLAUTH_GSSNEGOTIATE",
"CURLAUTH_NONE"))
# BitwiseValue( )
AUTH_ONLY <- CURLAUTH_ONLY <- BitwiseValue(-.Machine$integer.max, 'CURLAUTH_ONLY', 'CURLAuth')
AUTH_ANY <- CURLAUTH_ANY <- BitwiseValue(-17, 'CURLAUTH_ANY', 'CURLAuth')
AUTH_NTLM <- CURLAUTH_NTLM <- BitwiseValue(8, 'CURLAUTH_NTLM', 'CURLAuth')
AUTH_ANYSAFE <- CURLAUTH_ANYSAFE <- BitwiseValue(-18, 'CURLAUTH_ANYSAFE', 'CURLAuth')
AUTH_NTLM_WB <- CURLAUTH_NTLM_WB <- BitwiseValue(32, 'CURLAUTH_NTLM_WB', 'CURLAuth')
AUTH_DIGEST_IE <- CURLAUTH_DIGEST_IE <- BitwiseValue(16, 'CURLAUTH_DIGEST_IE', 'CURLAuth')
AUTH_BASIC <- CURLAUTH_BASIC <- BitwiseValue(1, 'CURLAUTH_BASIC', 'CURLAuth')
AUTH_DIGEST <- CURLAUTH_DIGEST <- BitwiseValue(2, 'CURLAUTH_DIGEST', 'CURLAuth')
AUTH_GSSNEGOTIATE <- CURLAUTH_GSSNEGOTIATE <- BitwiseValue(4, 'CURLAUTH_GSSNEGOTIATE', 'CURLAuth')
AUTH_NONE <- CURLAUTH_NONE <- BitwiseValue(0, 'CURLAUTH_NONE', 'CURLAuth')
|
917b11184ca54f6f3029ab9ff132bf4abebea705
|
f995139d5c4ff0c6dc2785edfebb84df43a6bb3f
|
/R/0282-extract-bcfishpass2-crossing-corrections.R
|
e8f301e39f914240143462ea85b0e2af91740e18
|
[
"Apache-2.0"
] |
permissive
|
Mateo9569/fish_passage_bulkley_2020_reporting
|
4633713f012edc331552f2ef367b2f3456f14bae
|
49e25c50cef023c3f93654816618f714431da94a
|
refs/heads/master
| 2023-04-12T20:39:05.124435
| 2021-05-04T21:43:43
| 2021-05-04T21:43:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,193
|
r
|
0282-extract-bcfishpass2-crossing-corrections.R
|
##all the data is in bcfishpass.crossings now so we will redo this and use it for analysis going forward
##in here we also have workflows to match our crossings to modelled crossings in case they are not already.
source('R/packages.R')
source('R/functions.R')
source('R/private_info.R')
source('R/0255-load-pscis.R')
conn <- DBI::dbConnect(
RPostgres::Postgres(),
dbname = dbname_wsl,
host = host_wsl,
port = port_wsl,
user = user_wsl,
password = password_wsl
)
#
# ##listthe schemas in the database
# dbGetQuery(conn,
# "SELECT schema_name
# FROM information_schema.schemata")
# #
# #
# # # ##list tables in a schema
dbGetQuery(conn,
"SELECT table_name
FROM information_schema.tables
WHERE table_schema='bcfishpass'")
# # # # #
# # # # # ##list column names in a table
dbGetQuery(conn,
"SELECT column_name,data_type
FROM information_schema.columns
WHERE table_name='fiss_fish_obsrvtn_pnt_sp'")
dbGetQuery(conn,
"SELECT a.total_lakereservoir_ha
FROM bcfishpass.crossings a
WHERE stream_crossing_id IN (58159,58161,123446)")
dbGetQuery(conn,
"SELECT o.observation_date, o.point_type_code FROM whse_fish.fiss_fish_obsrvtn_pnt_sp o;") %>%
filter(observation_date > '1900-01-01' &
observation_date < '2021-02-01') %>%
group_by(point_type_code) %>%
summarise(min = min(observation_date, na.rm = T),
max = max(observation_date, na.rm = T))
#first thing we want to do is match up our pha
dat <- pscis_all %>%
sf::st_as_sf(coords = c("easting", "northing"),
crs = 26909, remove = F) %>% ##don't forget to put it in the right crs buds
sf::st_transform(crs = 3005) ##get the crs same as the layers we want to hit up
# add a unique id - we could just use the reference number
dat$misc_point_id <- seq.int(nrow(dat))
# dbSendQuery(conn, paste0("CREATE SCHEMA IF NOT EXISTS ", "test_hack",";"))
# load to database
sf::st_write(obj = dat, dsn = conn, Id(schema= "ali", table = "misc"))
# sf doesn't automagically create a spatial index or a primary key
res <- dbSendQuery(conn, "CREATE INDEX ON ali.misc USING GIST (geometry)")
dbClearResult(res)
res <- dbSendQuery(conn, "ALTER TABLE ali.misc ADD PRIMARY KEY (misc_point_id)")
dbClearResult(res)
dat_info <- dbGetQuery(conn, "SELECT
a.misc_point_id,
b.*,
ST_Distance(ST_Transform(a.geometry,3005), b.geom) AS distance
FROM
ali.misc AS a
CROSS JOIN LATERAL
(SELECT *
FROM bcfishpass.crossings
ORDER BY
a.geometry <-> geom
LIMIT 1) AS b")
##get all the data and save it as an sqlite database as a snapshot of what is happening. we can always hopefully update it
query <- "SELECT *
FROM bcfishpass.crossings
WHERE watershed_group_code IN ('BULK','MORR')"
##import and grab the coordinates - this is already done
bcfishpass_morr_bulk <- st_read(conn, query = query) %>%
# st_transform(crs = 26909) %>% ##simon does this now on his end.
# mutate(utm_zone = 9,
# easting = sf::st_coordinates(.)[,1],
# northing = sf::st_coordinates(.)[,2]) %>%
st_drop_geometry()
# porphyryr <- st_read(conn, query =
# "SELECT * FROM bcfishpass.crossings
# WHERE stream_crossing_id = '124487'")
dbDisconnect(conn = conn)
##join the modelled road data to our pscis submission info
dat_joined <- left_join(
dat,
dat_info,
# select(dat_info,misc_point_id:fcode_label, distance, crossing_id), ##geom keep only the road info and the distance to nearest point from here
by = "misc_point_id"
)
##lets simiplify dat_joined to have a look up
my_pscis_modelledcrossings_streams_xref <- dat_joined %>%
select(pscis_crossing_id, stream_crossing_id, modelled_crossing_id, source) %>%
st_drop_geometry()
##this is how we update our local db.
##my time format format(Sys.time(), "%Y%m%d-%H%M%S")
# mydb <- DBI::dbConnect(RSQLite::SQLite(), "data/bcfishpass.sqlite")
conn <- rws_connect("data/bcfishpass.sqlite")
rws_list_tables(conn)
##archive the last version for now
bcfishpass_archive <- readwritesqlite::rws_read_table("bcfishpass_morr_bulk", conn = conn)
# rws_drop_table("bcfishpass_archive", conn = conn) ##if it exists get rid of it - might be able to just change exists to T in next line
rws_write(bcfishpass_archive, exists = F, delete = TRUE,
conn = conn, x_name = paste0("bcfishpass_morr_bulk_archive_", "_", format(Sys.time(), "%Y-%m-%d-%H%m")))
rws_drop_table("bcfishpass_morr_bulk", conn = conn) ##now drop the table so you can replace it
rws_write(bcfishpass_morr_bulk, exists = F, delete = TRUE,
conn = conn, x_name = "bcfishpass_morr_bulk")
# rws_drop_table("my_pscis_modelledcrossings_streams_xref", conn = conn)
# rws_write(my_pscis_modelledcrossings_streams_xref, exists = FALSE, delete = TRUE,
# conn = conn, x_name = "my_pscis_modelledcrossings_streams_xref")
rws_list_tables(conn)
rws_disconnect(conn)
##make a dataframe with our crossings that need a match
match_this <- dat_joined %>%
st_drop_geometry() %>%
select(pscis_crossing_id, stream_crossing_id, modelled_crossing_id, linear_feature_id, watershed_group_code) %>%
mutate(reviewer = 'AI',
notes = "Matched to closest stream model") %>%
filter(!is.na(pscis_crossing_id) &
is.na(stream_crossing_id))
match_this_to_join <- match_this %>%
select(-stream_crossing_id) %>%
mutate(linear_feature_id = NA_integer_) %>%
rename(stream_crossing_id = pscis_crossing_id) %>%
mutate(across(c(stream_crossing_id:linear_feature_id), as.numeric))
##test to see if the match_this hits are already assigned in crossings
bcfishpass_morr_bulk %>%
filter(stream_crossing_id %in% (match_this %>% pull(pscis_crossing_id)))
##need to learn to move from the other fork for now rename and grab from there
file.copy(from = "C:/scripts/bcfishpass/01_prep/02_pscis/data/pscis_modelledcrossings_streams_xref.csv",
to = "C:/scripts/pscis_modelledcrossings_streams_xref.csv",
overwrite = T)
##get the crossing data from bcfishpass
pscis_modelledcrossings_streams_xref <- readr::read_csv("C:/scripts/pscis_modelledcrossings_streams_xref.csv")
##check to make sure your match_this crossings aren't already assigned somehow
pscis_modelledcrossings_streams_xref %>%
filter(stream_crossing_id %in% (match_this %>% pull(pscis_crossing_id)))
##because the crossings are already there we will need to pull them out and then sub them back in
pscis_modelledcrossings_streams_xref_to_join <- pscis_modelledcrossings_streams_xref %>%
filter(!stream_crossing_id %in% (match_this %>% pull(pscis_crossing_id)))
pscis_modelledcrossings_streams_xref_joined <- bind_rows(
pscis_modelledcrossings_streams_xref_to_join,
match_this_to_join) %>%
# mutate(stream_crossing_id = as.integer(stream_crossing_id)) %>% ##i can't figure out why this needs to be an integer. it should sort as is (numeric)
dplyr::arrange(stream_crossing_id)
##now burn it back to bcfishpass ready for a pull request
readr::write_csv(pscis_modelledcrossings_streams_xref_joined, "C:/scripts/bcfishpass/01_prep/02_pscis/data/pscis_modelledcrossings_streams_xref.csv",
na = "")
|
02dd086b24002e47464f82e62c35bfaae9e9ea32
|
9bee3fb579661c4387db895569fc2e36be118b5e
|
/R/calmet_define_geophys.R
|
a073788956ba328fa1ba113f533f3884b1c1d4b2
|
[
"MIT"
] |
permissive
|
timelyportfolio/PuffR
|
717257618606b5b40c7247061d9a7018a2f47a07
|
04c6f301958b9eca4fe60d68ce524b357091d5f7
|
refs/heads/master
| 2021-01-20T16:27:48.290257
| 2014-09-08T07:03:24
| 2014-09-08T07:03:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 22,768
|
r
|
calmet_define_geophys.R
|
#' Define the CALMET domain and generate a geophysical input file
#' @description Define the CALMET domain and determine the best gridded values for land use, terrain heights, and micrometeorological parameters for creation of a geophysical input file.
#' @param lat_dec_deg the latitude of the CALMET domain in decimal degrees. The location of this point is defined in the lat_lon_grid_loc argument.
#' @param lon_dec_deg the longitude of the CALMET domain in decimal degrees. The location of this point is defined in the lat_lon_grid_loc argument.
#' @param lat_lon_grid_loc the location of the lat/long inputs in relation to the domain. Choices are: 1 (center), 2 (lower left), 3 (lower right), 4 (upper left), 5 (upper right).
#' @param domain_width_m the desired width of the meteorological domain in meters.
#' @param domain_height_m the desired height of the meteorological domain in meters.
#' @param download_SRTM a choice of whether to download the SRTM GeoTIFF height data from a server or read the identical files from a local folder.
#' @param SRTM_file_path path to a folder containing a collection of SRTM V4 zip archive files.
#' @export calmet_define_geophys
#' @examples
#' \dontrun{
#' # Create a CALMET domain of 100 by 100 km in the Los Angeles area.
#' # Chosen lat/lon coordinates are for the center of the domain.
#' calmet_define_geophys(lat_dec_deg = 34.050184,
#' lon_dec_deg = -118.253959,
#' lat_lon_grid_loc = 1,
#' domain_width_m = 8000,
#' domain_height_m = 8000,
#' download_SRTM = TRUE)
#'}
calmet_define_geophys <- function(lat_dec_deg = NULL,
lon_dec_deg = NULL,
lat_lon_grid_loc = 1,
domain_width_m = NULL,
domain_height_m = NULL,
download_SRTM = TRUE,
SRTM_file_path = NULL){
# Add require statements
require(rgdal)
require(plyr)
require(sp)
require(raster)
require(ggplot2)
require(stringr)
require(MODISTools)
# Define the cell resolution (square cells) as 250 m
cell_resolution_m <- 250
# Round the provided width and the height of the met domain to the resolution of the cell
domain_width_m <- round_any(domain_width_m, cell_resolution_m, round)
domain_height_m <- round_any(domain_height_m, cell_resolution_m, round)
# Get matrix of longitude and latitude for chosen point
lat_lon_dec_deg <- cbind(lon_dec_deg, lat_dec_deg)
# Determine the UTM zone
UTM_zone <- (floor((lon_dec_deg + 180)/6) %% 60) + 1
# Determine whether domain is in Northern Hemisphere or Southern Hemisphere
UTM_hemisphere <- ifelse(lat_dec_deg >= 0, "N", "S")
# Define a PROJ.4 projection string for a lat/lon projection
proj_string_longlat <- "+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs"
# Define a PROJ.4 projection string for a UTM projection
proj_string_UTM <- paste("+proj=utm +zone=",
UTM_zone,
" +ellps=WGS84 +datum=WGS84 +units=m +no_defs",
sep = '')
# Project as UTM coordinates from the determined UTM zone, round to nearest 250 m using the
# 'round_any' function from the 'plyr' package
UTM_location <- project(lat_lon_dec_deg, proj_string_UTM)
UTM_location <- round_any(UTM_location, cell_resolution_m, round)
# Do these length and width values accomodate an integer number of cells of the specified resolution?
# These checks will be later part of a function in setting domain width and height
is_number_cells_across_x_an_int <- ifelse(domain_width_m %% cell_resolution_m != 0, FALSE, TRUE)
is_number_cells_across_y_an_int <- ifelse(domain_height_m %% cell_resolution_m != 0, FALSE, TRUE)
# Get the number of cells in the x direction
number_cells_across_x <- ifelse(is_number_cells_across_x_an_int == TRUE,
domain_width_m/cell_resolution_m, NULL)
# Get the number of cells in the y direction
number_cells_across_y <- ifelse(is_number_cells_across_y_an_int == TRUE,
domain_height_m/cell_resolution_m, NULL)
# Get the total number of cells
total_cells <- number_cells_across_x * number_cells_across_y
# Get extents of UTM grid (left, right, bottom, top) in meters
left_UTM <- get_grid_extents_UTM(side = "left",
lat_lon_grid_loc = lat_lon_grid_loc,
UTM_location = UTM_location,
domain_width_m = domain_width_m,
domain_height_m = domain_height_m)
right_UTM <- get_grid_extents_UTM(side = "right",
lat_lon_grid_loc = lat_lon_grid_loc,
UTM_location = UTM_location,
domain_width_m = domain_width_m,
domain_height_m = domain_height_m)
bottom_UTM <- get_grid_extents_UTM(side = "bottom",
lat_lon_grid_loc = lat_lon_grid_loc,
UTM_location = UTM_location,
domain_width_m = domain_width_m,
domain_height_m = domain_height_m)
top_UTM <- get_grid_extents_UTM(side = "top",
lat_lon_grid_loc = lat_lon_grid_loc,
UTM_location = UTM_location,
domain_width_m = domain_width_m,
domain_height_m = domain_height_m)
# Create a data frame object for UTM values of LL, LR, UL, and UR
LL_LR_UL_UR_UTM_m_DF <- data.frame("x" = c(left_UTM, right_UTM, left_UTM, right_UTM),
"y" = c(bottom_UTM, bottom_UTM, top_UTM, top_UTM))
# Create a SpatialPoints object for UTM values of LL, LR, UL, and UR
LL_LR_UL_UR_UTM_m_SP <- SpatialPoints(as.matrix(LL_LR_UL_UR_UTM_m_DF),
proj4string = CRS(proj_string_UTM))
# Generate Extent object in UTM
bbox_UTM <- extent(LL_LR_UL_UR_UTM_m_SP)
# Create a RasterLayer object for UTM values
LL_LR_UL_UR_UTM_m_RL <- raster(nrows = number_cells_across_x,
ncols = number_cells_across_x,
ext = bbox_UTM,
crs = proj_string_UTM)
# Create a SpatialPoints object for lat/lon values of LL, LR, UL, and UR through a
# spatial transform
LL_LR_UL_UR_longlat_SP <- spTransform(LL_LR_UL_UR_UTM_m_SP, CRS("+proj=longlat +ellps=GRS80"))
# Obtain DEM data projected as long/lat for the domain as a RasterLayer object
srtm <- download_SRTMV4_GeoTIFF(lon = floor(lon_dec_deg),
lat = floor(lat_dec_deg),
download = download_SRTM,
SRTM_file_path = SRTM_file_path)
# Generate Extents object in long/lat projection for cropping
bbox_longlat <- extent(LL_LR_UL_UR_longlat_SP)
# Crop DEM data using 'bbox' Extent object in lat/lon projection
srtm_cropped <- crop(srtm, bbox_longlat)
# Reproject cropped RasterLayer object from lat/lon to UTM
srtm_UTM <- projectRaster(srtm_cropped,
crs = paste("+proj=utm +zone=",
UTM_zone,
" +ellps=WGS84 +datum=WGS84 +units=m +no_defs",
sep = ''))
# Crop DEM data again using 'bbox' Extent object in UTM projection
srtm_UTM_resampled <- resample(srtm_UTM, LL_LR_UL_UR_UTM_m_RL)
# Create a SpatialPixelsDataFrame from the resampled data
srtm_UTM_resampled.SPDF <- as(srtm_UTM_resampled, "SpatialPixelsDataFrame")
# Create a copy of the RasterLayer object for subsituting NA values with 0
srtm_UTM_resampled_no_NA <- srtm_UTM_resampled
# Substitute NA values with 0 values in RasterLayer copy
srtm_UTM_resampled_no_NA@data@values[is.na(srtm_UTM_resampled_no_NA@data@values)] <- 0
# Create a SpatialPixelsDataFrame from the resampled data with no NA values in the data/values slot
srtm_UTM_resampled_no_NA.SPDF <- as(srtm_UTM_resampled_no_NA, "SpatialPixelsDataFrame")
# Create a data frame for plotting in ggplot
srtm_UTM_resampled.df <- as.data.frame(srtm_UTM_resampled.SPDF)
# Change the column names to a standard set of labels
colnames(srtm_UTM_resampled.df) <- c("z", "x", "y")
# Plot the grid of heights using ggplot
g <- ggplot(srtm_UTM_resampled.df, aes(x = x/1000, y = y/1000, fill = z)) +
geom_tile(aes(fill = z)) +
scale_fill_gradient(low = "green", high = "red",
guide = guide_legend(title = "Heights")) +
coord_equal() +
theme_bw(base_size = 12, base_family = "") +
labs(x = paste("UTM (Zone ", UTM_zone, UTM_hemisphere, ") Easting, km", sep = '')) +
labs(y = paste("UTM (Zone ", UTM_zone, UTM_hemisphere, ") Northing, km", sep = '')) +
theme(axis.text = element_text(size = rel(1.2)),
axis.title = element_text(size = rel(1.2)),
legend.title = element_text(size = rel(1.2)))
# Save terrain plot as a pdf file
ggsave(filename = "terrain.pdf",
width = 8, height = 8, units = "in")
# Extract heights from the resampled DEM in UTM
gridded_heights_UTM_m_vector <- srtm_UTM_resampled@data@values
# Create a data frame for the extracted heights in UTM, in row-major order
gridded_heights_UTM_m_df <- as.data.frame(t(matrix(gridded_heights_UTM_m_vector,
ncol = number_cells_across_y)))
# Replace NA values with 0 values
gridded_heights_UTM_m_df[is.na(gridded_heights_UTM_m_df)] <- 0
# Create file header for GEO.DAT file
geo_dat_h <- vector(mode = "character", length = 9)
geo_dat_h[1] <- "GEO.DAT 2.0 Header structure with coordinate parameters"
geo_dat_h[2] <- "2"
geo_dat_h[3] <- "Produced using PuffR"
geo_dat_h[4] <- "Project Name - Time Period"
geo_dat_h[5] <- "UTM"
geo_dat_h[6] <- paste(" ", UTM_zone, UTM_hemisphere, sep = '')
geo_dat_h[7] <- "WGS-84 02-21-2003"
geo_dat_h[8] <- paste(" ", number_cells_across_x,
" ", number_cells_across_y,
" ", round(left_UTM/1000, digits = 3),
" ", round(bottom_UTM/1000, digits = 3),
" ", round(cell_resolution_m/1000, digits = 3),
" ", round(cell_resolution_m/1000, digits = 3),
sep = '')
geo_dat_h[9] <- "KM M"
# Generate a vector of comma-delimited strings containing heights of every row of cells;
# this is for writing to a file and eventual inclusion in the GEO.DAT file
for (i in 1:nrow(gridded_heights_UTM_m_df)){
if (i == 1) gridded_heights_UTM_m_row_major_strings <- vector(mode = "character", length = 0)
string <- paste(round(gridded_heights_UTM_m_df[i, ], digits = 2), collapse = ", ")
gridded_heights_UTM_m_row_major_strings <- c(gridded_heights_UTM_m_row_major_strings, string)
}
# Write the heights category subheader and data to disk
geo_dat_h_heights <- " 1.0000 - TERRAIN heights - HTFAC (Conversion to meters)"
# Create data frame for MODIS IGBP Type 1 codes for land cover
IGBP_Type_1_class_no <- c(seq(0, 16, 1), 254, 255)
IGBP_Type_1_class_name <- c("Water", "Evergreen needleleaf forest", "Evergreen broadleaf forest",
"Deciduous needleleaf forest", "Deciduous broadleaf forest",
"Mixed forest", "Closed shrublands", "Open shrublands",
"Woody savannas", "Savannas", "Grasslands", "Permanent wetlands",
"Croplands", "Urban and built-up", "Cropland/Natural vegetation mosaic",
"Snow and ice", "Barren or sparsely vegetated", "Unclassified",
"Fill value")
CALMET_categories <- c(50, 40, 40, 40, 40, 40, 40, 40, 30, 30,
30, 60, 20, 10, 20, 90, 70, NA, NA)
LU_classification <- data.frame(IGBP_Type_1_class_no, IGBP_Type_1_class_name, CALMET_categories,
stringsAsFactors = FALSE)
# Create a RasterLayer object with lat/lon coordinates for grid cells
srtm_latlon_RL <- raster(bbox_longlat,
nrows = number_cells_across_y,
ncols = number_cells_across_x,
crs = proj_string_longlat)
# Create a SpatialPixels object from the generated RasterLayer object
srtm_latlon_SP <- as(srtm_latlon_RL, "SpatialPixels")
# Extract lat/lon coordinates from 'srtm_latlon_SP'
modis_coordinates <- as.data.frame(srtm_latlon_SP@coords)
colnames(modis_coordinates) <- c("long", "lat")
# Create vectors of starting and ending dates for the land cover data
start.date <- rep(2008, nrow(modis_coordinates))
end.date <- rep(2008, nrow(modis_coordinates))
# Column-bind the 'start.date' and 'end.date' vectors with the coordinates data frame
modis_coordinates <- cbind(modis_coordinates, start.date)
modis_coordinates <- cbind(modis_coordinates, end.date)
# Acquire subsets of the landcover Type 1 codes from the MODIS MCD12Q1 product
MODISSubsets(LoadDat = modis_coordinates, Products = "MCD12Q1",
Bands = c("Land_Cover_Type_1"),
Size = c(0,0), TimeSeriesLength = 1)
# Generate a file list of acquired MODIS data for each set of coordinates
file_list <- list.files(pattern = ".*_MCD12Q1.asc")
# Extract the land use code from each acquired data file
for (i in 1:length(file_list)){
if (i == 1) IGBP_Type_1_class_no <- vector(mode = "numeric", length = 0)
class_no <-
as.numeric(unlist(str_split(readLines(con = file_list[i])[1],
pattern = ","))[length(unlist(str_split(readLines(con = file_list[i])[1],
pattern = ",")))])
IGBP_Type_1_class_no <- c(IGBP_Type_1_class_no, class_no)
}
# Delete the .asc files from the working folder
# file.remove(file_list)
# Delete the summary CSV file from the working folder
# file.remove(list.files(pattern = "Subset Download.*.csv"))
# Get the corresponding CALMET category from the IGBP Type 1 class data
CALMET_categories <- join(as.data.frame(IGBP_Type_1_class_no), LU_classification)[,3]
# Create a data frame for the LU categories, in row-major order
gridded_CALMET_categories <- as.data.frame(t(matrix(CALMET_categories,
ncol = number_cells_across_y)))
# Generate a vector of comma-delimited strings containing LU categories of every row of cells;
# this is for writing to a file and eventual inclusion in the GEO.DAT file
for (i in 1:nrow(gridded_CALMET_categories)){
if (i == 1) gridded_CALMET_categories_strings <- vector(mode = "character", length = 0)
string <- paste(gridded_CALMET_categories[i, ], collapse = ", ")
gridded_CALMET_categories_strings <- c(gridded_CALMET_categories_strings, string)
}
# Write the LU category subheader and data to disk
geo_dat_h_LU <- "0 --- LAND USE CATEGORIES 0 - DEFAULT CATEGORIES 1 - NEW CATEGORIES"
# Create new data frame object 'UTM_gridded_values' that contains gridded heights and
# LU categories
UTM_gridded_values <- cbind(srtm_UTM_resampled_no_NA.SPDF, as.data.frame(CALMET_categories))
# Force values in the 'CALMET_categories' column of the 'UTM_gridded_values' data frame to
# be 50 (water) if height is 0
for (i in 1:nrow(UTM_gridded_values)){
if (UTM_gridded_values[i,1] == 0.00000) UTM_gridded_values[i,4] <- 50
}
# Replace 'CALMET_categories' vector with revised values
CALMET_categories <- UTM_gridded_values$CALMET_categories
# Define the colours for each of the CALMET land use categories using a named vector
cols <- c("10" = "gold2",
"20" = "olivedrab2",
"30" = "springgreen",
"40" = "forestgreen",
"50" = "deepskyblue2",
"60" = "orchid",
"70" = "lightsalmon",
"80" = "moccasin",
"90" = "honeydew")
# Reclass 'CALMET_categories' as a factor for the purpose of generating a ggplot object
UTM_gridded_values$CALMET_categories <- as.factor(UTM_gridded_values$CALMET_categories)
# Plot the grid of land use categories using ggplot
h <- ggplot(UTM_gridded_values, aes(x = x/1000, y = y/1000,
fill = CALMET_categories)) +
geom_tile() +
scale_fill_manual(values = cols,
breaks = c(as.numeric(names(cols)), 100),
name = "Land Use\nCategories") +
coord_equal() +
theme_bw(base_size = 12, base_family = "") +
labs(x = paste("UTM (Zone ", UTM_zone, UTM_hemisphere, ") Easting, km", sep = '')) +
labs(y = paste("UTM (Zone ", UTM_zone, UTM_hemisphere, ") Northing, km", sep = '')) +
theme(axis.text = element_text(size = rel(1.2)),
axis.title = element_text(size = rel(1.2)),
legend.title = element_text(size = rel(1.2)))
# Save as land use plot as a pdf file
ggsave(filename = "landuse.pdf", device = pdf,
width = 8, height = 8, units = "in")
UTM_gridded_values$CALMET_categories <-
as.numeric(as.character(UTM_gridded_values$CALMET_categories))
# Get data frame containing micrometeorological parameters by land use category by season
mmet_seasons <- calmet_seasonal_micrometeorology()
# Create vector of short descriptions for each micrometeorological parameter
mmet_descriptions <- c("gridded z0 field",
"gridded albedo field",
"gridded Bowen ratio field",
"gridded soil heat flux parameters",
"gridded anthropogenic heat flux field",
"gridded leaf area index field")
# Get the corresponding micrometeorological parameters by gridded CALMET category by season
mmet_winter <- join(data.frame(CALMET_categories = CALMET_categories),
subset(mmet_seasons, season == "Winter"))
mmet_spring <- join(data.frame(CALMET_categories = CALMET_categories),
subset(mmet_seasons, season == "Spring"))
mmet_summer <- join(data.frame(CALMET_categories = CALMET_categories),
subset(mmet_seasons, season == "Summer"))
mmet_fall <- join(data.frame(CALMET_categories = CALMET_categories),
subset(mmet_seasons, season == "Fall"))
# Create "winter_geo.txt" file
for (i in 2:7){
if (i == 2){
cat(file = "winter_geo.txt", append = FALSE)
cat(geo_dat_h, file = "winter_geo.txt", sep = "\n", append = TRUE)
cat(geo_dat_h_LU, file = "winter_geo.txt", sep = "\n", append = TRUE)
cat(gridded_CALMET_categories_strings, file = "winter_geo.txt", sep = "\n", append = TRUE)
cat(geo_dat_h_heights, file = "winter_geo.txt", sep = "\n", append = TRUE)
cat(gridded_heights_UTM_m_row_major_strings, file = "winter_geo.txt", sep = "\n", append = TRUE)
}
cat(paste(" 2 - ", mmet_descriptions[i - 1], sep = ''),
file = "winter_geo.txt", sep = "\n", append = TRUE)
cat(vector_values_to_row_major_strings(values_vector = mmet_winter[,i],
number_cells_across_y = number_cells_across_y),
file = "winter_geo.txt", sep = "\n", append = TRUE)
}
# Create "spring_geo.txt" file
for (i in 2:7){
if (i == 2){
cat(file = "spring_geo.txt", append = FALSE)
cat(geo_dat_h, file = "spring_geo.txt", sep = "\n", append = TRUE)
cat(geo_dat_h_LU, file = "spring_geo.txt", sep = "\n", append = TRUE)
cat(gridded_CALMET_categories_strings, file = "spring_geo.txt", sep = "\n", append = TRUE)
cat(geo_dat_h_heights, file = "spring_geo.txt", sep = "\n", append = TRUE)
cat(gridded_heights_UTM_m_row_major_strings, file = "spring_geo.txt", sep = "\n", append = TRUE)
}
cat(paste(" 2 - ", mmet_descriptions[i - 1], sep = ''),
file = "spring_geo.txt", sep = "\n", append = TRUE)
cat(vector_values_to_row_major_strings(values_vector = mmet_spring[,i],
number_cells_across_y = number_cells_across_y),
file = "spring_geo.txt", sep = "\n", append = TRUE)
}
# Create "summer_geo.txt" file
for (i in 2:7){
if (i == 2){
cat(file = "summer_geo.txt", append = FALSE)
cat(geo_dat_h, file = "summer_geo.txt", sep = "\n", append = TRUE)
cat(geo_dat_h_LU, file = "summer_geo.txt", sep = "\n", append = TRUE)
cat(gridded_CALMET_categories_strings, file = "summer_geo.txt", sep = "\n", append = TRUE)
cat(geo_dat_h_heights, file = "summer_geo.txt", sep = "\n", append = TRUE)
cat(gridded_heights_UTM_m_row_major_strings, file = "summer_geo.txt", sep = "\n", append = TRUE)
}
cat(paste(" 2 - ", mmet_descriptions[i - 1], sep = ''),
file = "summer_geo.txt", sep = "\n", append = TRUE)
cat(vector_values_to_row_major_strings(values_vector = mmet_summer[,i],
number_cells_across_y = number_cells_across_y),
file = "summer_geo.txt", sep = "\n", append = TRUE)
}
# Create "fall_geo.txt" file
for (i in 2:7){
if (i == 2){
cat(file = "fall_geo.txt", append = FALSE)
cat(geo_dat_h, file = "fall_geo.txt", sep = "\n", append = TRUE)
cat(geo_dat_h_LU, file = "fall_geo.txt", sep = "\n", append = TRUE)
cat(gridded_CALMET_categories_strings, file = "fall_geo.txt", sep = "\n", append = TRUE)
cat(geo_dat_h_heights, file = "fall_geo.txt", sep = "\n", append = TRUE)
cat(gridded_heights_UTM_m_row_major_strings, file = "fall_geo.txt", sep = "\n", append = TRUE)
}
cat(paste(" 2 - ", mmet_descriptions[i - 1], sep = ''),
file = "fall_geo.txt", sep = "\n", append = TRUE)
cat(vector_values_to_row_major_strings(values_vector = mmet_fall[,i],
number_cells_across_y = number_cells_across_y),
file = "fall_geo.txt", sep = "\n", append = TRUE)
}
}
|
6367d45d22fef13b11a57a47d0ba6d597c060444
|
b2f61fde194bfcb362b2266da124138efd27d867
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query60_nreachq_1344/query60_nreachq_1344.R
|
64819f33d5ff4199756c4095d47869bfb2c87cef
|
[] |
no_license
|
arey0pushpa/dcnf-autarky
|
e95fddba85c035e8b229f5fe9ac540b692a4d5c0
|
a6c9a52236af11d7f7e165a4b25b32c538da1c98
|
refs/heads/master
| 2021-06-09T00:56:32.937250
| 2021-02-19T15:15:23
| 2021-02-19T15:15:23
| 136,440,042
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 717
|
r
|
query60_nreachq_1344.R
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 10479
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 10479
c
c Input Parameter (command line, file):
c input filename QBFLIB/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query60_nreachq_1344.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 2879
c no.of clauses 10479
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 10479
c
c QBFLIB/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query60_nreachq_1344.qdimacs 2879 10479 E1 [] 0 20 2856 10479 NONE
|
fb2c29e190cf1b12f53373e8af2ed415b8d1717b
|
df6cd74dfc3474a4c93a3bf1492b23bcb0cded0e
|
/server.R
|
f0e2ab791ac3f7fb209ef335cb6877a3877a535b
|
[] |
no_license
|
drbinzhao/Energy-Analytics-with-ShinyR
|
62ec8bd9ff674b2f2fc768afbf9fd63c9d3ac15b
|
16653861ad117e1c416b3315ac38d1846ed9ed8c
|
refs/heads/master
| 2021-01-23T17:42:12.499825
| 2016-12-29T21:38:41
| 2016-12-29T21:38:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 19,255
|
r
|
server.R
|
shinyServer(function(input,output){
# Date formatting
fulldata$date<-paste(substr(fulldata$date, 1, 4), "-", substr(fulldata$date,5, nchar(fulldata$date)), sep = "")
fulldata$date<-as.Date(as.yearmon(fulldata$date))
###########################################
# Overview Tab
###########################################
#Values for Boxes on top of overview screen
totaldis<-sum(fulldata$total_distribution)
totalengery<-sum(fulldata$total_energy)
totalbi<-sum(fulldata$present_bill)
totalgroup<-length(unique(fulldata$group_code))
#Value Boxes in Overview
# Box 1
output$totalBill<-renderInfoBox({
infoBox(
"Total Present Bill",totalbi,icon=icon("dollar"),color="yellow",fill=TRUE)
})
# Box 2
output$totalEnergy<-renderInfoBox({
infoBox(
"Total Energy Used",totalengery,icon=icon("calculator"),color="blue",fill=TRUE)
})
# Box 3
output$totalGroup<-renderInfoBox({
infoBox(
"Total Groups",totalgroup,icon=icon("users"),color="purple",fill=TRUE)
})
#Time Series graph for overview panel
#dygraphs package makes the graph, reshape2 (melt function) is needed to munge the data
#xts package changes to a time series object
output$usageplot<-renderDygraph({
rate_class_data<-melt(fulldata,id=c("date","rate_class"),measure.vars="total_energy")
rate_class_data2<-cast(rate_class_data, date~ rate_class,sum)
xtsdata<-data.frame(xts(rate_class_data2[,c(2:ncol(rate_class_data2))],rate_class_data2[,1]))
colnames(xtsdata)<- unique(as.character(fulldata$rate_class))
dygraph(xtsdata,
main="Overall Usage Trend for All Rate Classes") %>%
dyAxis("y", label = "Total Energy Used (kWh)", valueRange =c(0,NULL)) %>%
dyRangeSelector(dateWindow = c("2014-01-01", "2014-12-01")) %>%
dyHighlight(highlightSeriesOpts = list(strokeWidth = 2),highlightCircleSize = 5,
highlightSeriesBackgroundAlpha = 0.2) %>%
dyOptions(colors=RColorBrewer::brewer.pal(ncol(rate_class_data2), "Set2"))
})
#Top 10 usage groups graph
output$toptable <- renderChart2({
nn <- nPlot( total_energy ~ group_name, group = "year", data = fulldata, type = "multiBarHorizontalChart")
nn$chart(
color=c("purple","blue"),
margin=list( left=170),
showControls=FALSE,
width = 300
)
return(nn)
})
###############################################################
#Account Lookup Tab
###############################################################
#subset data for searching by group code/group name/acct code
reactdata<-reactive({switch(input$searchby,
"accnum"=subset(fulldata,fulldata$acc_num == input$accid & fulldata$year==input$year),
"gcode"=subset(fulldata,fulldata$group_code==input$group_code & fulldata$year==input$year),
"gname"=subset(fulldata,fulldata$group_name==input$group_name & fulldata$year==input$year))
})
# Raw data output for lookup tab
output$table<-renderDataTable({
if (input$searchby=="accnum") {
rawtable1<-aggregate(cbind(total_energy,total_distribution,present_bill,proposed_bill) ~ date, data=reactdata(), sum)
}
if (input$searchby=="gcode" | input$searchby=='gname') {
rawtable1<-aggregate(cbind(total_energy,total_distribution,present_bill,proposed_bill) ~ date, data=reactdata(), sum)
}
return(rawtable1)
},options = list(width=300,lengthMenu = c(12, 24), pageLength = 8))
# Plot One -Dygraph (time series)
output$plot<-renderDygraph({
if (length(input$accid) <1 && length(input$group_name)<1 && length(input$group_code)<1) {
return(NULL)}
else {
plotdata<-as.data.frame(aggregate(get(input$usage) ~ date,data=reactdata(),sum))
dygraph(xts(plotdata[,2],plotdata[,1]),main="Usage Trend") %>%
dyHighlight(highlightSeriesOpts = list(strokeWidth = 2),highlightCircleSize = 5, highlightSeriesBackgroundAlpha = 0.2) %>%
dyOptions(colors=RColorBrewer::brewer.pal(3, "Set2"))}
})
# Plot Two - GGPlot of usage over year
output$plot2<-renderPlot({
if(is.null(input$billing)) {plot<-NULL}
else if (length(input$billing)>1) {
data1=melt(reactdata(), id.vars =c("group_code","group_name","cust_name","acc_num","month"), measure.vars = c("present_bill", "proposed_bill"))
plot<-ggplot(data1, aes(month, y=value,fill=variable)) + geom_bar(stat="identity", position=position_dodge())+theme(legend.position="bottom")+ ggtitle("Present and Future Bill Comparison")+scale_y_continuous(expand=c(0.15,0)) }
else {
plot<-ggplot(data = reactdata(), aes_string('month', y=input$billing))+ ggtitle("12-Month Billing Trend") +stat_summary(fun.y = sum, geom="bar",fill="#33CCCC")+scale_y_continuous(expand=c(0.15,0))
}
print(plot)
})
#Output for summary statistics tab
output$summary<-renderTable({
data<-select(reactdata(),total_distribution,total_energy,present_bill,proposed_bill)
summary(data)
})
#########################################
## Rate Class Profile Tab
#########################################
#HD Tab
output$usage_var<-renderUI({selectInput("hd_var1","Select variable",
choices=c("Distribution Demand"="total_distribution","Energy"="total_energy","Peak Demand"="peak_max","Non-Peak Demand"="offpeak_max"))
})
output$bill_var<-renderUI({
selectInput("hd_var2","Select variable", choices=c("Present Bills"="present_bill","Proposed Bill"="proposed_bill"))
})
reactive_hd1<-reactive({
data<-fulldata[fulldata$rate_class=="HD",]
energy_data<-melt(data,id=c("date","group_name"),measure.vars=input$hd_var1)
energy_data2<-cast(energy_data, date~ group_name,sum)
xtsdata<-data.frame(xts(energy_data2[,2:ncol(energy_data2)],energy_data2[,1]))
colnames(xtsdata)<-c(unique(as.character(data$group_name)))
return(xtsdata)
})
reactive_hd2<-reactive({
data<-fulldata[fulldata$rate_class=="HD",]
energy_data<-melt(data,id=c("date","group_name"),measure.vars=input$hd_var2)
energy_data2<-cast(energy_data, date~ group_name,sum)
xtsdata<-data.frame(xts(energy_data2[,2:ncol(energy_data2)],energy_data2[,1]))
colnames(xtsdata)<-c(unique(as.character(data$group_name)))
return(xtsdata)
})
output$profile_usage1<-renderDygraph({
dygraph(reactive_hd1()) %>%
dyHighlight(highlightSeriesOpts = list(strokeWidth = 2),highlightCircleSize = 5, highlightSeriesBackgroundAlpha = 0.2) %>%
dyOptions(colors="purple")
})
output$profile_bill1<-renderDygraph({
dygraph(reactive_hd2()) %>%
dyHighlight(highlightSeriesOpts = list(strokeWidth = 2),highlightCircleSize = 5, highlightSeriesBackgroundAlpha = 0.2) %>%
dyOptions(colors="blue")
})
#PD Tab
output$usage_var2<-renderUI({selectInput("pd_var1","Select variable",
choices=c("Distribution Demand"="total_distribution","Energy"="total_energy","Peak Demand"="peak_max","Non-Peak Demand"="offpeak_max"))
})
output$bill_var2<-renderUI({
selectInput("pd_var2","Select variable", choices=c("Present Bills"="present_bill","Proposed Bill"="proposed_bill"))
})
reactive_pd1<-reactive({
data<-fulldata[fulldata$rate_class=="PD",]
energy_data<-melt(data,id=c("date","group_name"),measure.vars=input$pd_var1)
energy_data2<-cast(energy_data, date~ group_name,sum)
xtsdata<-data.frame(xts(energy_data2[,2:ncol(energy_data2)],energy_data2[,1]))
colnames(xtsdata)<-c(unique(as.character(data$group_name)))
return(xtsdata)
})
reactive_pd2<-reactive({
data<-fulldata[fulldata$rate_class=="PD",]
energy_data<-melt(data,id=c("date","group_name"),measure.vars=input$pd_var2)
energy_data2<-cast(energy_data, date~ group_name,sum)
xtsdata<-data.frame(xts(energy_data2[,2:ncol(energy_data2)],energy_data2[,1]))
colnames(xtsdata)<-c(unique(as.character(data$group_name)))
return(xtsdata)
})
output$profile_usage2<-renderDygraph({
dygraph(reactive_pd1()) %>%
dyHighlight(highlightSeriesOpts = list(strokeWidth = 2),highlightCircleSize = 5, highlightSeriesBackgroundAlpha = 0.2) %>%
dyOptions(colors="purple")
})
output$profile_bill2<-renderDygraph({
dygraph(reactive_pd2()) %>%
dyHighlight(highlightSeriesOpts = list(strokeWidth = 2),highlightCircleSize = 5, highlightSeriesBackgroundAlpha = 0.2) %>%
dyOptions(colors="blue")
})
#GS 100-500 Tab
output$usage_var3<-renderUI({selectInput("gs1_var1","Select variable",
choices=c("Distribution Demand"="total_distribution","Energy"="total_energy","Peak Demand"="peak_max","Non-Peak Demand"="offpeak_max"))
})
output$bill_var3<-renderUI({
selectInput("gs1_var2","Select variable", choices=c("Present Bills"="present_bill","Proposed Bill"="proposed_bill"))
})
reactive_gs11<-reactive({
data<-fulldata[fulldata$rate_class=="GS 100-500 kW",]
energy_data<-melt(data,id=c("date","group_name"),measure.vars=input$gs1_var1)
energy_data2<-cast(energy_data, date~ group_name,sum)
xtsdata<-data.frame(xts(energy_data2[,2:ncol(energy_data2)],energy_data2[,1]))
colnames(xtsdata)<-c(unique(as.character(data$group_name)))
return(xtsdata)
})
reactive_gs12<-reactive({
data<-fulldata[fulldata$rate_class=="GS 100-500 kW",]
energy_data<-melt(data,id=c("date","group_name"),measure.vars=input$gs1_var2)
energy_data2<-cast(energy_data, date~ group_name,sum)
xtsdata<-data.frame(xts(energy_data2[,2:ncol(energy_data2)],energy_data2[,1]))
colnames(xtsdata)<-c(unique(as.character(data$group_name)))
return(xtsdata)
})
output$profile_usage3<-renderDygraph({
dygraph(reactive_gs11()) %>%
dyHighlight(highlightSeriesOpts = list(strokeWidth = 2),highlightCircleSize = 5, highlightSeriesBackgroundAlpha = 0.2) %>%
dyOptions(colors="purple")
})
output$profile_bill3<-renderDygraph({
dygraph(reactive_gs12()) %>%
dyHighlight(highlightSeriesOpts = list(strokeWidth = 2),highlightCircleSize = 5, highlightSeriesBackgroundAlpha = 0.2) %>%
dyOptions(colors="blue")
})
#GS >500 Tab
output$usage_var4<-renderUI({selectInput("gs2_var1","Select variable",
choices=c("Distribution Demand"="total_distribution","Energy"="total_energy","Peak Demand"="peak_max","Non-Peak Demand"="offpeak_max"))
})
output$bill_var4<-renderUI({
selectInput("gs2_var2","Select variable", choices=c("Present Bills"="present_bill","Proposed Bill"="proposed_bill"))
})
reactive_gs21<-reactive({
data<-fulldata[fulldata$rate_class=="GS >500 kW",]
energy_data<-melt(data,id=c("date","group_name"),measure.vars=input$gs2_var1)
energy_data2<-cast(energy_data, date~ group_name,sum)
xtsdata<-data.frame(xts(energy_data2[,2:ncol(energy_data2)],energy_data2[,1]))
colnames(xtsdata)<-c(unique(as.character(data$group_name)))
return(xtsdata)
})
reactive_gs22<-reactive({
data<-fulldata[fulldata$rate_class=="GS >500 kW",]
energy_data<-melt(data,id=c("date","group_name"),measure.vars=input$gs2_var2)
energy_data2<-cast(energy_data, date~ group_name,sum)
xtsdata<-data.frame(xts(energy_data2[,2:ncol(energy_data2)],energy_data2[,1]))
colnames(xtsdata)<-c(unique(as.character(data$group_name)))
return(xtsdata)
})
output$profile_usage4<-renderDygraph({
dygraph(reactive_gs21()) %>%
dyHighlight(highlightSeriesOpts = list(strokeWidth = 2),highlightCircleSize = 5, highlightSeriesBackgroundAlpha = 0.2) %>%
dyOptions(colors="purple")
})
output$profile_bill4<-renderDygraph({
dygraph(reactive_gs22()) %>%
dyHighlight(highlightSeriesOpts = list(strokeWidth = 2),highlightCircleSize = 5, highlightSeriesBackgroundAlpha = 0.2) %>%
dyOptions(colors="blue")
})
#TLC Tab
output$usage_var5<-renderUI({selectInput("tlc_var1","Select variable",
choices=c("Distribution Demand"="total_distribution","Energy"="total_energy","Peak Demand"="peak_max","Non-Peak Demand"="offpeak_max"))
})
output$bill_var5<-renderUI({
selectInput("tlc_var2","Select variable", choices=c("Present Bills"="present_bill","Proposed Bill"="proposed_bill"))
})
reactive_tlc1<-reactive({
data<-fulldata[fulldata$rate_class=="TLC",]
energy_data<-melt(data,id=c("date","group_name"),measure.vars=input$tlc_var1)
energy_data2<-cast(energy_data, date~ group_name,sum)
xtsdata<-data.frame(xts(energy_data2[,2:ncol(energy_data2)],energy_data2[,1]))
colnames(xtsdata)<-c(unique(as.character(data$group_name)))
return(xtsdata)
})
reactive_tlc2<-reactive({
data<-fulldata[fulldata$rate_class=="TLC",]
energy_data<-melt(data,id=c("date","group_name"),measure.vars=input$tlc_var2)
energy_data2<-cast(energy_data, date~ group_name,sum)
xtsdata<-data.frame(xts(energy_data2[,2:ncol(energy_data2)],energy_data2[,1]))
colnames(xtsdata)<-c(unique(as.character(data$group_name)))
return(xtsdata)
})
output$profile_usage5<-renderDygraph({
dygraph(reactive_tlc1()) %>%
dyHighlight(highlightSeriesOpts = list(strokeWidth = 2),highlightCircleSize = 5, highlightSeriesBackgroundAlpha = 0.2) %>%
dyOptions(colors="purple")
})
output$profile_bill5<-renderDygraph({
dygraph(reactive_tlc2()) %>%
dyHighlight(highlightSeriesOpts = list(strokeWidth = 2),highlightCircleSize = 5, highlightSeriesBackgroundAlpha = 0.2) %>%
dyOptions(colors="blue")
})
#######################################
# Forecast Tab
#######################################
# Define dataset
reactdata2<-reactive({switch(input$forecast_select1,
"accnum2"=subset(fulldata,fulldata$acc_num == input$accid2 & fulldata$year==input$year2),
"gcode2"=subset(fulldata,fulldata$group_code==input$group_code2 & fulldata$year==input$year2),
"gname2"=subset(fulldata,fulldata$group_name==input$group_name2 & fulldata$year==input$year2))
})
# Forecasting based on selected range
output$forecastoutput<- renderDataTable({
if (input$forecast_select1=="gname2" | input$forecast_select1=="gcode2"){
newdata<-aggregate(cbind(total_energy,total_distribution,present_bill) ~ date, data=reactdata2(), sum)
proposed_bill<-
input$fixed_charge*12+input$vdc_kwh*newdata$total_energy+input$vdc_kw*newdata$total_distribution+input$eec*newdata$total_energy+input$genc*newdata$total_energy+input$tsc*newdata$total_distribution
dollar_diff<- (reactdata2()$proposed_bill-reactdata2()$present_bill)
percent_diff<- (reactdata2()$proposed_bill-reactdata2()$present_bill)/reactdata2()$present_bill
forecast.data<-cbind(newdata,data.frame(proposed_bill),data.frame(dollar_diff),data.frame(percent_diff))
colnames(forecast.data)<-c("Date", "Total Energy","Total Distribution Demand","Present Bill","Proposed Bill","$ Difference","% Difference")
}
if (input$forecast_select1=="accnum2")
{
newdata<-select(reactdata2(),date,total_energy,total_distribution,present_bill)
proposed_bill<-
input$fixed_charge*12+input$vdc_kwh*reactdata2()$total_energy+input$vdc_kw*reactdata2()$total_distribution+input$eec*reactdata2()$total_energy+input$genc*reactdata2()$total_energy+input$tsc*reactdata2()$total_distribution
dollar_diff<- (reactdata2()$proposed_bill-reactdata2()$present_bill)
percent_diff<- (reactdata2()$proposed_bill-reactdata2()$present_bill)/reactdata2()$present_bill
forecast.data<-cbind(newdata,data.frame(proposed_bill),data.frame(dollar_diff),data.frame(percent_diff))}
colnames(forecast.data)<-c("Date", "Total Energy","Total Distribution Demand","Present Bill","Proposed Bill","$ Difference","% Difference")
if (is.null(input$group_name2) | is.null(input$group_code2) | is.null(input$accid2)) {
forecast.data<-NULL
}
print(forecast.data)
}
,options = list(width=300,lengthMenu = c(12, 24), pageLength = 10)
)
####################################################
#map tab
####################################################
#define color pallete
pal <- colorNumeric(
palette = "YlGnBu",
domain = fulldata$max_usage)
#marker popup code
p2 <- paste("<b>", as.character(fulldata_map$cust_name), "</b><br>",
"<b>Acct:</b>", as.character(fulldata_map$acc_num), "<br>",
"<b>Max On Peak Usage:</b>", as.character(fulldata_map$max_usage), "<br>",
"<b>Current Bill:</b>", as.character(sprintf("$ %3.2f", fulldata_map$present_bill)), "<br>",
"<b>Proposed Bill:</b>", as.character(sprintf("$ %3.2f", fulldata_map$proposed_bill)), "<br>",
"<b>Percent Diff:</b>", as.character(sprintf("%.1f %%", 100*fulldata_map$percent_diff)), "<br>"
)
#create map with all locations
# addCircles command to create heatmap has bug; would like to include if resolved
map <- leaflet(fulldata_map) %>%
addTiles() %>%
setView(lng = -75.1626236, lat = 39.9600265, zoom = 12) %>%
#addCircles(lng = ~ meterlongitude, lat = ~meterlatitude, radius = ~max_usage, weight = 1, color = "#777777",
# fillColor = ~pal(max_usage), fillOpacity = 0.7) %>%
#addLegend(position = "bottomleft", pal = pal, values = ~max_usage, opacity = 1)
addMarkers(lng = ~ meterlongitude, lat = ~meterlatitude, popup = p2, icon = lightbulb)
output$map <- renderLeaflet({map})
#subset data based on user input in dropdown
filteredData <- reactive ({
if(input$group == ""){
fulldata
} else{
fulldata[fulldata$group_name == input$group,]
}
})
filteredData_map <- reactive ({
if (input$group == "") {
fulldata_map
} else{
fulldata_map[fulldata_map$group_name == input$group,]
}
})
#re-plot markers based upon user inputs
observe ({
leafletProxy("map", data = filteredData_map()) %>%
#clearShapes() %>%
#addCircles(lng = ~ meterlongitude, lat = ~meterlatitude, radius = ~max_usage, weight = 1, color = "#777777",
#fillColor = ~pal(max_usage), fillOpacity = 0.7, popup = p2)
clearMarkers() %>%
addMarkers(lng = ~ meterlongitude, lat = ~meterlatitude, popup = p2, icon = lightbulb)
})
#render plots to show Group usage plots in side panel
output$OnPeakPlot <- renderPlot ({
OnPeakPlot <- ggplot(filteredData(), aes(x=month, y=peak_max)) +
geom_bar(position="dodge", stat="identity", aes(fill=factor(year))) +
scale_x_discrete("Month", limit = c("Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug",
"Sep", "Oct", "Nov", "Dec")) +
ylab("On Peak Energy Usage")
print(OnPeakPlot)
})
output$OffPeakPlot <- renderPlot ({
OffPeakPlot <- ggplot(filteredData(), aes(x=month, y=offpeak_max)) +
geom_bar(position="dodge", stat="identity", aes(fill=factor(year))) +
scale_x_discrete("Month", limit = c("Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul",
"Aug", "Sep", "Oct", "Nov", "Dec")) +
ylab("Off Peak Energy Usage")
print(OffPeakPlot)
})
#close server function code
})
|
b74724a4f61dd9f3a0e5ffee9d864a4c32bbad58
|
50cd6fe561f74ff2afc85a222c11df4ba27ceda9
|
/Fig 1(geotimescale).R
|
1e29c6c2abbfdcdbc10429fb1ab77f428868fa5a
|
[] |
no_license
|
larijer/Masters_insect_work
|
42daf07305e59ac4fda8073612bcd7e1640de114
|
5a1d609861d2f98afe931a8e80106078c791957a
|
refs/heads/master
| 2021-01-25T04:08:32.721069
| 2015-02-20T01:34:22
| 2015-02-20T01:34:22
| 31,046,206
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,512
|
r
|
Fig 1(geotimescale).R
|
###JERED KARR
####CODE FOR BUBBLE PLOTS
####AUGUST 29 2013
###NEEDS to install gts and MLR and ratio function as well as line and text functions
gts <-read.csv("Geologic timescale.2013.csv") #geologic scale
gts <-read.csv("~/Desktop/untitled folder/Geologic timescale.2013.csv")
gts$Stage <- as.character(gts$Stage)
gts$Stage[gts$Stage=="Norian"]<-"Nor" #Norian is large Stage with short name better is abbrievated Nor
#functions
Ratio.function<-function(x){length(subset(x,x=="exoskeleton"))/length(x)}
#Functions for adding timescale to bottom of plot
add.lines.fun <-function(x,y,z,a){ #ADDS lines and boxes to fit different time periods
for(i in 1:y)
lines(c(x[i],x[i]),c(z,a))
lines(c(x[1],x[y]),c(z,z))
lines(c(x[1],x[y]),c(a,a))
}
add.text.fun <-function(labs,xl,yl,z){ #adds names, trimmed to fit into boxes of time lengths
for(i in 1:(length(xl)-1))
text(x=(xl[i]+xl[i+1])/2,y=yl,labels= strtrim(labs[i],width=(xl[i]-xl[i+1])/(((max(xl)-min(xl))/80)*z)),cex=z) #80 can be adjusted based on preference 50 gives larger space between names and boxes....
}
#add second plot for timescale
bf <- layout(matrix(c(1,1,1,2),ncol=1,4,byrow=TRUE), 2, TRUE)
layout.show(bf)
par(mar=c(0,5,3,4.2))
#plot
plot(levels((factor(MLR$age))),by(MLR$type.body.part,MLR$age,Ratio.function),cex=10*by(MLR$type.body.part,MLR$age,length)/(by(MLR$type.body.part,MLR$age,length)+500),bg="light grey",pch=21,ylab="Proportion Articulated",xlim=c(330.9,0), xaxt="n",yaxs="r",xaxs="i",type='n',cex.lab=2,font.lab=1)
abline(v=gts$S.age[29],lty=2)
points(levels(factor(MLR$age)),by(MLR$type.body.part,MLR$age,Ratio.function),cex=10*by(MLR$type.body.part,MLR$age,length)/(by(MLR$type.body.part,MLR$age,length)+500),bg="light grey",pch=21)
points(levels(factor(MR$age)),by(MR$type.body.part,MR$age,Ratio.function),cex=10*by(MR$type.body.part,MR$age,length)/(by(MR$type.body.part,MR$age,length)+500),bg="pink",pch=21)
legend(300,.8,c("Wings","All"), pt.bg=c("pink",'light grey'),pch=21,pt.cex=2)
par(mar=c(5,5,0,4.2))
plot(MLR$age,jitter(MLR$type.body1,.2), xlim=c(330.9,0),ylim=c(0,1),type="n",yaxt='n',ylab='',xlab="Age (Ma)",yaxs="i",xaxs='i',cex.lab=2,font.lab=1)
add.lines.fun(gts$P.age,length(na.omit(gts$P.age)),.4,.8)
add.lines.fun(gts$S.age,length(na.omit(gts$S.age)),.8,1)
add.lines.fun(gts$E.age,4,0,0.4)
na.omit(gts)
add.text.fun(na.omit(gts$Era),na.omit(gts$E.age),.2,2)
add.text.fun(na.omit(gts$Period),na.omit(gts$P.age),.6,1.5)
add.text.fun(na.omit(gts$Stage),na.omit(gts$S.age),.9,.9)
|
f6996b9354767f600d0b27d9f212f33e91c1493e
|
42ac78fed8e8494cc54a533e6cb9b4c18ca51369
|
/branches/trunk-lme4/R/AllClass.R
|
0873350974c1675f09512f9be2797506dad5d0a5
|
[] |
no_license
|
LTLA/Matrix
|
8a79cac905cdb820f95190e99352cd9d8f267558
|
2b80087cfebc9f673e345000aeaf2170fc15b506
|
refs/heads/master
| 2020-08-07T20:22:12.075155
| 2019-09-28T21:21:10
| 2019-09-28T21:21:10
| 213,576,484
| 0
| 1
| null | 2019-10-13T00:56:38
| 2019-10-08T07:30:49
|
C
|
UTF-8
|
R
| false
| false
| 7,543
|
r
|
AllClass.R
|
## Class definitions for the package
setClass("lmList",
representation(call = "call",
pool = "logical"),
contains = "list")
setClass("lmList.confint", contains = "array")
## -------------------- lmer-related Classes --------------------------------
setOldClass("data.frame")
setOldClass("family")
setOldClass("logLik")
setOldClass("terms")
## mixed effects representation
setClass("mer",
representation(## original data
flist = "list", # list of grouping factors
Zt = "dgCMatrix", # sparse representation of Z'
X = "matrix", # X
y = "numeric", # y
wts = "numeric", # weights
## do we need this for mer?
wrkres = "numeric",# working residuals (copy of y for LMMs)
## invariants derived from data structure
cnames = "list", # column names of model matrices
nc = "integer", # dimensions of blocks in Omega
Gp = "integer", # Pointers to groups of rows in Zt
## quantities that vary when Z, X or y are updated
XtX = "dpoMatrix", # X'X
ZtZ = "dsCMatrix", # Z'Z
ZtX = "dgeMatrix", # Z'X
Zty = "numeric", # Z'y
Xty = "numeric", # X'y
## primary slots that vary during the optimization
## When Omega is updated, these are updated
Omega = "list", # list of relative precision matrices
## Cholesky factor of inflated [Z:X:y]'[Z:X:y]
L = "dCHMsuper", # sparse Cholesky factor of Z'Z + Omega
RZX = "dgeMatrix",
RXX = "dtrMatrix",
rZy = "numeric",
rXy = "numeric",
devComp = "numeric", # Components of deviance
deviance = "numeric", # Current deviance (ML and REML)
## Secondary slots only evaluated when requested.
fixef = "numeric",
ranef = "numeric",
RZXinv = "dgeMatrix",
bVar = "list",
gradComp = "list",
## status indicator
status = "integer"
),
validity = function(object) .Call(mer_validate, object)
)
## Representation of linear and generalized linear mixed effects model
setClass("lmer",
representation(frame = "data.frame",
call = "call", # call to model-fitting function
terms = "terms"), # terms for fixed-effects
contains = "mer")
setClass("glmer",
representation(family = "family", # glm family
weights = "numeric"),
contains = "lmer")
## Representation of linear and generalized linear mixed effects model
setClass("lmer2",
representation(## original data
frame = "data.frame", # model frame or empty frame
call = "call", # matched call to model-fitting function
terms = "terms", # terms for fixed-effects
flist = "list", # list of grouping factors
ZXyt = "dgCMatrix", # sparse form of [Z;X;-y]'
weights = "numeric",# can be of length 0 for constant wts
offset = "numeric", # can be of length 0 for 0 offset
cnames = "list", # column names of model matrices
Gp = "integer", # pointers to groups of rows in ZXyt
dims = "integer", # dimensions and indicators
## quantities that vary with Z, X, y, weights or offset
A = "dsCMatrix", # tcrossprod(ZXyt) (w. wts and offset)
## slots that vary during the optimization
ST = "list", # list of TSST' rep of rel. var. mats
L = "CHMfactor", # sparse Cholesky factor of A*
deviance = "numeric", # ML and REML deviance and components
## Secondary slots only evaluated when requested.
fixef = "numeric",
ranef = "numeric"
),
validity = function(object) .Call(lmer2_validate, object)
)
setClass("glmer2",
representation(family = "family",
X = "matrix", # model matrix for fixed effects
eta = "numeric", # linear predictor
mu = "numeric", # inverse link of linear predictor
moff = "numeric", # model offset, if any
pwts = "numeric", # prior weights, if any
y = "numeric"), # response
contains = "lmer2")
setClass("nlmer",
representation(## original data
env = "environment", # evaluation environment for model
model = "call", # model function as a function call
frame = "data.frame", # model frame or empty frame
pnames = "character", # parameter names for nonlinear model
call = "call", # matched call to model-fitting function
flist = "list", # list of grouping factors
Xt = "dgCMatrix", # sparse form of X'
Zt = "dgCMatrix", # sparse form of Z'
y = "numeric", # response
weights = "numeric",# can be of length 0 for constant wts
cnames = "list", # column names of model matrices
Gp = "integer", # pointers to groups of columns in Z
dims = "integer", # dimensions and indicators
## slots that vary during the optimization
ST = "list", # list of TSST' rep of rel. var. mats
Vt = "dgCMatrix", # sparse form of V'=(ZTS)'
L = "CHMfactor", # sparse Cholesky factor of V'V + I
mu = "numeric", # fitted values at current values of beta and b
Mt = "dgCMatrix", # transpose of gradient matrix d mu/d u
deviance = "numeric", # ML and REML deviance and components
fixef = "numeric", # the fixed effects, beta
ranef = "numeric", # the random effects, b
uvec = "numeric" # orthogonal random effects, u, s.t. b=TSu
),
validity = function(object) .Call(nlmer_validate, object)
)
setClass("summary.mer", # the "mer" result ``enhanced'' :
representation(
isG = "logical",
methTitle = "character",
logLik= "logLik",
ngrps = "integer",
sigma = "numeric", # scale, non-negative number
coefs = "matrix",
vcov = "dpoMatrix",
REmat = "matrix",
AICtab= "data.frame"
),
contains = "mer")
setClass("summary.lmer2", # the "lmer2" result ``enhanced'' :
representation(
isG = "logical",
methTitle = "character",
logLik= "logLik",
ngrps = "integer",
sigma = "numeric", # scale, non-negative number
coefs = "matrix",
vcov = "dpoMatrix",
REmat = "matrix",
AICtab= "data.frame"
),
contains = "lmer2")
setClass("summary.lmer", contains = c("summary.mer", "lmer"))
setClass("summary.glmer", contains = c("summary.mer", "glmer"))
setClass("ranef.lmer", contains = "list")
setClass("coef.lmer", contains = "list")
setClass("pedigree", representation =
list(sire = "integer", dam = "integer", label = "character"),
validity = function(object) {
n <- length(sire <- object@sire)
if (length(dam <- object@dam) != n)
return("sire and dam slots must be the same length")
if (length(object@label) != n)
return("'label' slot must have the same length as 'sire' and 'dam'")
if(n == 0) return(TRUE)
animal <- 1:n
snmiss <- !is.na(sire)
dnmiss <- !is.na(dam)
if (any(sire[snmiss] >= animal[snmiss]) ||
any(dam[dnmiss] >= animal[dnmiss]))
return("the sire and dam must precede the offspring")
if (any(sire[snmiss] < 1 | sire[snmiss] > n) |
any(dam[dnmiss] < 1 | dam[dnmiss] > n))
return(paste("Non-missing sire or dam must be in [1,",
n, "]", sep = ''))
TRUE
})
|
50486609acd9dbe13eb32b8dbeb29f25f352afe2
|
9c9822731b8d63c9508d5e22f6623babb9f1b405
|
/run_analysis.R
|
948f1603c39ea02d691551e99bf9c2fc8af67448
|
[] |
no_license
|
maheshwickramarachchi/DataScienceCourse03
|
42c680eb67007d7f87a9a73845bdd4239883e780
|
7a9f908ffa9fbdca8bf9ce4976499cf94164409e
|
refs/heads/master
| 2021-01-18T22:13:52.441245
| 2016-06-11T16:14:39
| 2016-06-11T16:14:39
| 60,907,374
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,074
|
r
|
run_analysis.R
|
#Downloarding Data
folderName<-"UCI_HAR_Dataset.zip"
fileURL<-"https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(fileURL,folderName)
unzip(folderName)
#Loading Data
features<-read.table("./UCI HAR Dataset/features.txt",stringsAsFactors = T)
features[,2]<-as.character(features[,2])
#View(features)
activity_labels<-read.table("./UCI HAR Dataset/activity_labels.txt",stringsAsFactors = T)
activity_labels[,2]<-as.character(activity_labels[,2])
#View(activity_labels)
featuresRequired <- grep(".*mean.*|.*std.*", features[,2])
featuresRequiredNames <- features[featuresRequired,2]
featuresRequiredNames = gsub('-mean', 'Mean', featuresRequiredNames)
featuresRequiredNames = gsub('-std', 'Std', featuresRequiredNames)
featuresRequiredNames = gsub('[-()]', '', featuresRequiredNames)
train<-read.table("./UCI HAR Dataset/train/X_train.txt",stringsAsFactors = T)[featuresRequired]
trainActivity<-read.table("./UCI HAR Dataset/train/y_train.txt",stringsAsFactors = T)
trainSubject<-read.table("./UCI HAR Dataset/train/subject_train.txt",stringsAsFactors = T)
test<-read.table("./UCI HAR Dataset/test/X_test.txt",stringsAsFactors = T)[featuresRequired]
testActivity<-read.table("./UCI HAR Dataset/test/y_test.txt",stringsAsFactors = T)
testSubject<-read.table("./UCI HAR Dataset/test/subject_test.txt",stringsAsFactors = T)
#Marging data sets and adding labels
trainData<-cbind(trainSubject,trainActivity,train)
testData<-cbind(testSubject,testActivity,test)
#View(trainData);View(testData)
allData<-rbind(trainData,testData)
names(allData)<-c("Subject","Activity",featuresRequiredNames)
allData$Activity<-factor(allData$Activity,levels=activity_labels[,1],labels =activity_labels[,2])
#View(allData)
#average of each variable for each activity and each subject
library(reshape2)
meltData<-melt(allData,id=c("Subject","Activity"),measure.vars = as.vector(featuresRequiredNames))
finalTable<-dcast(meltData,Activity+Subject~variable,mean)
View(finalTable)
write.table(finalTable,file = "tidyData.txt",row.names = F)
|
3837a8a9d1727974c4d02f326ab703532035d455
|
f15b3be9ba52ba4b378590c73c23df6b590ad776
|
/R/hullMuirMaxDistance.R
|
98101989fdd2755d2fc1f579166a99f05b05dbf6
|
[] |
no_license
|
cran/windAC
|
76fc8747c2465b50d8d926e364147cf82d3d337b
|
ec65551fe0edf90d71291c188c7bceeb2f3291f7
|
refs/heads/master
| 2023-04-06T03:21:29.992300
| 2023-03-22T18:20:04
| 2023-03-22T18:20:04
| 218,116,250
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,924
|
r
|
hullMuirMaxDistance.R
|
#' @name hullMuirMaxDistance
#'
#' @title Calculate the Hull and Muir (2010) maximum distance
#'
#' @description Calculate the maximum fall distance from a turbine using the
#' regression model from Hull and Muir (2010).
#'
#' @param hubHeight Numeric, turbine hub height.
#' @param bladeRadius Numeric, turbine blade radius.
#' @param ... Currently ignored.
#'
#' @details Using the linear regression coefficients from Hull and Muir (2010), a
#' maximum distance is calculated. This is done for three size classes (bats,
#' small birds (SB), and large birds (LB)) separately.
#'
#' It is assumed that \code{hubHeight} and \code{bladeRadius} have the same units.
#'
#' Note: Hull and Muir (2010) used the range of 65 m < \code{hubHeight} < 94 m and 33 m < \code{bladeRadius} < 55 m.
#' Anything outside of this range is extrapolation and should only be done with care.
#'
## #' @usage hullMuirMaxDistance(hubHeight,bladeRadius)
#'
#' @return data frame of maximum distance by size class, \code{hubHeight}, and
#' \code{bladeRadius}. Distance will be in the same units as were provided for \code{hubHeight} and \code{bladeRadius}
#'
#' @export hullMuirMaxDistance
#'
#'
#' @references Hull, C. L., & Muir, S. (2010).
#' Search areas for monitoring bird and bat carcasses at wind farms using a Monte-Carlo model.
#' Australasian Journal of Environmental Management, 17(2), 77-87.
#'
#' @examples
#'
#' hubHeights <- rnorm(10, mean = 87.5, sd = 10)
#' bladeRadii <- rnorm(10, mean = 62.5, sd = 10)
#'
#' hullMuirMaxDistance(hubHeight = hubHeights, bladeRadius = bladeRadii)
## for testing
## hubHeight <- rep(87.5,times=10)
## bladeRadius <- rep(62.5,times=10)
hullMuirMaxDistance <- function(hubHeight,bladeRadius,...){
## come directly from the Hull and Muir (2010)
## HMcoef <- data.frame(hub=c(BAT=.672,SB=.637,LB=.581),
## rad=c(BAT=.046,SB=.097,LB=.176),
## constant=c(BAT=15.9,SB=31.6,LB=70.6))
## come directly from the Hull and Muir (2010)
HMcoef <- data.frame(BAT=c(constant=15.9,hub=.672,rad=.046),
SB=c(constant=31.6,hub=.637,rad=.097),
LB=c(constant=70.6,hub=.581,rad=.176))
# Check arguments.
if(length(hubHeight) != length(bladeRadius)){
stop('The length of hubHeight and bladeRadius must be the same.')
}# end if
## user inputs
input <- c(hubHeight,bladeRadius)
if(!is.numeric(input)|any(input<0)){
stop('hubHeight and bladeRadius must each be a nonnegative values.')
} # end if
## here is the max distance
maxDist <- as.data.frame(cbind(1,hubHeight,bladeRadius)%*%as.matrix(HMcoef))
maxDist[,'hubHeight'] <- hubHeight
maxDist[,'bladeRadius'] <- bladeRadius
return(maxDist)
}# end hullMuirMaxDistance function
|
0bf98833f9ed1c352223be2644b93d6a8a93e61e
|
f557e6f075e60798cff69f66f22dfaad93b22f8e
|
/noaa.R
|
9d6b3bcd085ac5868fa953b129e6b8c605d09118
|
[] |
no_license
|
eugenechoGH/mercuryGH
|
6b9d4bd176cb83747a989e0731827c3cd1505a84
|
580e366581fbf0bb94d917741896a2458981dd2f
|
refs/heads/master
| 2020-04-07T05:55:49.003540
| 2018-11-18T18:56:45
| 2018-11-18T18:56:45
| 158,115,819
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,417
|
r
|
noaa.R
|
library(rnoaa)
library(data.table)
library(lubridate)
library(devtools)
library(stringr)
# token: WMfLumOaZKcCSkuyMiPzdssqTTSnnkaR
ncdc(datasetid = "PRECIP_HLY", locationid = "ZIP:28801", datatypeid = "HPCP",
limit = 5, token = "WMfLumOaZKcCSkuyMiPzdssqTTSnnkaR")
ncdc_stations(token="WMfLumOaZKcCSkuyMiPzdssqTTSnnkaR")$data
str.station<-'Mercury Challenge/code and data/Data/ghcnd-stations.txt'
vec.station<-unlist(str_split(readChar(str.station,nchars=file.info(str.station)$size),pattern = '\r\n'))
dt.station<-data.table()
dt.station<-
data.table(ID=trimws(substr(vec.station,start=1,stop = 11)),
Latitude=as.numeric(substr(vec.station,start=13,stop = 20)),
Longitude=as.numeric(substr(vec.station,start=22,stop = 30)),
ELEVATION=as.numeric(substr(vec.station,start=32,stop = 37)),
STATE=trimws(substr(vec.station,start=39,stop = 40)),
NAME=trimws(substr(vec.station,start=42,stop = 71)),
`GSN FLAG`=trimws(substr(vec.station,start=73,stop = 75)),
`HCN/CRN FLAG`=trimws(substr(vec.station,start=77,stop = 79)),
`WMO ID`=trimws(substr(vec.station,start=81,stop = 85)))
fwrite(dt.station,'Mercury Challenge/code and data/Data/ghcnd-stations.csv')
# EGYPT stations
vec.egy_stations<-dt.station[substr(ID,start = 1,stop = 2)=='EG']$ID
ghcnd(stationid = vec.egy_stations[1])
|
b2776dbd304c38d2253305b2e4fe54d3d2fea931
|
13e8bd394fc0b606ceb0cf73d0be8ef720910758
|
/man/BadLM.Rd
|
5d142425d60fde55eb699e927352929d4ab972b3
|
[] |
no_license
|
cran/lmviz
|
7e6634a084a5469cf56505dae9c4577f0aba62b5
|
ed9121c3f19c7112815107bcab1189c41f46db00
|
refs/heads/master
| 2021-07-04T08:41:16.480716
| 2020-08-24T19:40:02
| 2020-08-24T19:40:02
| 158,114,582
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 4,040
|
rd
|
BadLM.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/BadLM.r
\name{BadLM}
\alias{BadLM}
\title{BadLM shiny app}
\usage{
BadLM(dist.custom = NULL, dist.custom.veravar = NULL, dist.custom.param = NULL)
}
\arguments{
\item{dist.custom}{custom generator for Y, see examples below}
\item{dist.custom.veravar}{variance function for dist.custom, see examples below}
\item{dist.custom.param}{parameters for dist.custom, see examples below}
}
\value{
None
}
\description{
Launches the BadLM shiny app, a tool to explore the consequences of the violation of homoscedasticity and/or normality assumptions in a linear model
}
\details{
Allows to set a data generating mechanism for a response variable \eqn{Y} and an explanatory variable \eqn{x} such that \eqn{E(Y|X=x)=\beta_1+\beta_2 x}, various possible distributions for \eqn{Y} are available, depending on the selected distributional assumptions the variance may also be set as a function of \eqn{x}. The program performs a number of simulations from the fit and visualizes the simulated sampling distributions of the estimators.
The user can also decide the distribution of the explanatory variable \eqn{x}: the shape is chosen by the user, then the variable is standardized to have minimum equal to 0 and maximum equal to \eqn{x^*<1}, also chosen by the user (the purpose of this is to explore the out of sample prediction performance of the estimated model). The observations \eqn{x_1,\ldots,x_n} are simulated only once, and kept fixed as appropriate for a regression model which is conditional on the explanatory variable.
Additional data generating mechanisms may be specified by the user and given as an input to the function calling the shiny app (see examples).
Full help is available from within the shiny app.
}
\examples{
## Not run:
if (interactive()){
BadLM()
# function to generate Y
dist=function(n,my,parvet,par,x) {
my+parvet*rt(n,df=par[1])
}
# function to give the true value of the variance
varfun=function(my,parvet,par,x){
if (par[1]>2) {
veravar=parvet^2*par[1]/(par[1]-2)
} else {
veravar=-1
}
return(veravar)
}
# dist and varfun must have those argument where
# my is the vector mean of Y
# parvet is g() computed at x values
# par is a vector of two parameters
param=list(nome="Student-t (bis)", #name of dist for drop down menu (optional)
nomepar1="Gradi di libertà ", #name of parameter 1 (optional)
minpar1=1,maxpar1=30, #min/max of param 1 (needed)
valuepar1=10, #initial value of param1 (optional)
steppar1=0.1, #increment of param1 (optional)
enableVarFunPanel=TRUE #whether the panel to input g should appear
)
BadLM(dist.custom=dist,dist.custom.veravar = varfun,dist.custom.param=param)
dist=function(n,my,parvet,par,x) {
my+rnorm(n,0,sqrt(par[1]+par[2]*x^2))
}
# function to give the true value of the variance
varfun=function(my,parvet,par,x){
return(par[1]+par[2]*x^2)
}
# dist and varfun must have those argument where
# my is the vector mean of Y
# parvet is g() computed at x values
# par is a vector of two parameters
param=list(nome="N(.,b1+b2*x^2)", #name of dist for drop down menu (optional)
nomepar1="b1", #name of parameter 1 (optional)
minpar1=1,maxpar1=3, #min/max of param 1 (needed)
valuepar1=1, #initial value of param1 (optional)
steppar1=0.1, #increment of param1 (optional)
nomepar2="b2", #name of parameter 1 (optional)
minpar2=0,maxpar2=3, #min/max of param 1 (needed)
valuepar2=1, #initial value of param1 (optional)
steppar2=0.1, #increment of param1 (optional)
enableVarFunPanel=FALSE, #whether the panel to input g should appear
showVarFun=TRUE
)
BadLM(dist.custom=dist,dist.custom.veravar = varfun,dist.custom.param=param)
}
## End(Not run)
}
\author{
Francesco Pauli, \email{francesco.pauli@deams.units.it}
}
|
ba24aad063dcf38d190b85756870d5f1942e1d51
|
733a62f25f5a690d6099aa31b5f31393d57c09f4
|
/R/load_packages.R
|
3122a15647cffb804ab81c9187435560d76efcce
|
[
"MIT"
] |
permissive
|
guhjy/ck37r
|
3788d5e1bc85a71d8fb1718af6372f6d0ff35cb1
|
2e93bcf56fe53a885b7cb176e6d1a0aec4a88262
|
refs/heads/master
| 2020-12-31T07:34:31.647291
| 2017-03-13T20:25:58
| 2017-03-13T20:25:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,085
|
r
|
load_packages.R
|
#' Load a list of packages.
#'
#' Load packages and install them from CRAN if they aren't already available.
#'
#' @param pkgs Character vector of packages to load.
#' @param auto_install Install any packages that could not be loaded.
#' @param update Update packages where possible.
#' @param verbose If T display more detailed information during execution.
#' @param ... Any additional parameters to pass through to install.packages()
#'
#' @importFrom utils capture.output install.packages update.packages
#'
#' @examples
#'
#' # Load these 4 packages and install them if necessary.
#' load_packages(c("MASS", "SuperLearner", "tmle", "doParallel"), auto_install = TRUE)
#'
#' @export
load_packages = function(pkgs = NULL, auto_install = F, update = F,
verbose = F, ...) {
# Attempt an update first, in case it will help with installing new packages.
update_result = NULL
if (update) {
# Update any R packages that can be updated.
update_result = update.packages(ask = F, checkBuilt = T)
}
# Try to load each package, and save whether or not it succeeded.
capture.output({ result = sapply(pkgs, require, character.only=T, quietly=T) })
install_result = NULL
result_retry = NULL
# Return a helpful message and the install.packages command if needed.
if (sum(!result) > 0) {
cat("\n\nThese packages need to be installed:", paste(pkgs[!result], collapse=", "), "\n")
install_code = paste0('install.packages(c("', paste(pkgs[!result], collapse='", "'), '"))')
cat(install_code, "\n")
if (auto_install) {
cat("Auto-installing from repository:", getOption("repos")[1], "\n")
install_result = install.packages(pkgs[!result], ...)
# Try to load newly installed packages.
capture.output({ result_retry = sapply(pkgs[!result], require, character.only=T, quietly=T) })
}
} else {
install_code = ""
}
results = list(packages = pkgs, pkgs_result = result, pkgs_retry = result_retry,
install_code = install_code, update_result = update_result)
invisible(result)
}
|
24f5d54c657cb0d5d78bf5d7ccdba2c7da91b684
|
00cfb5369afb87eb8d471886e6078b9f67b8d25a
|
/dynamics.R
|
9b287c221ff788e10fd0a57de2851dd238105133
|
[] |
no_license
|
munrohannah/PhD_Scripts
|
b9c8a55c6657881c0b31affbdc1fd70fd7b193eb
|
65eedc7381f7aae563ed53f35026604a64163234
|
refs/heads/master
| 2022-11-11T06:12:00.322187
| 2020-06-30T16:49:10
| 2020-06-30T16:49:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,122
|
r
|
dynamics.R
|
setwd("D:/Rfiles/TickPrev") #how to set my working directory
data<-read.csv("TickDynamics.csv",header=TRUE) #how to import a file, csv file, and has a header
head(data) # gives the first few lines of data
str(data) #check the data type and varible
#setting up each factor so not treated as a continous variable
data$Instar<-factor(data$Instar)
data$year<-factor(data$year)
data$dayOyear<-factor(data$dayOyear)
data$Date_Collected<-factor(data$Date_Collected)
#subset data
dataIN1<-subset(data,Instar==1)
dataIN2<-subset(data,Instar==2)
dataIN3<-subset(data,Instar==3)
library(MASS)
m1 <- glm.nb(Count ~ Host, data = dataIN1)
summary(m1)
anova(m1)
m1 <- glm.nb(Count ~ year, data = dataIN1)
summary(m1)
anova(m1)
m1 <- glm.nb(Count ~ Host, data = dataIN2)
summary(m1)
anova(m1)
m1 <- glm.nb(Count ~ year, data = dataIN2)
summary(m1)
anova(m1)
m1 <- glm.nb(Count ~ Host, data = dataIN3)
summary(m1)
anova(m1)
m1 <- glm.nb(Count ~ year, data = dataIN3)
summary(m1)
anova(m1)
m1 <- glm.nb(Count ~ year + X2week , data = dataIN3)
summary(m1)
anova(m1)
Anova(m1,type="II")
|
579b09842908f99470fac911b46d2d42d99d14c2
|
4a5e95779efd63836de9a3af8c19d4dba1f0e748
|
/rcdk/man/getsmiles.Rd
|
8f9951b9e1e7ccb88ac13a5987e29f84d13cf6c1
|
[] |
no_license
|
yccai/cdkr
|
2ac693148d878c2a7d8536d8366c6952187bdb27
|
562d39639e0f1c1b2d669c86e45260290a3ef044
|
refs/heads/master
| 2021-01-01T16:20:19.728781
| 2016-12-27T11:45:49
| 2016-12-27T11:45:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,590
|
rd
|
getsmiles.Rd
|
\name{get.smiles}
\alias{get.smiles}
\title{
Get the SMILES for a Molecule
}
\description{
The function will generate a SMILES representation of an
IAtomContainer object. The default parameters of the CDK SMILES
generator are used. This can mean that for large ring systems the
method may fail. See CDK Javadocs for more information
}
\usage{
get.smiles(molecule, type = 'generic', aromatic = FALSE, atomClasses = FALSE)
}
\arguments{
\item{molecule}{A Java object of class \code{IAtomContainer}}
\item{type}{The type of SMILES to output. Possible values are
\enumerate{
\item generic - non-canonical SMILES string, different atom ordering produces different SMILES. No isotope or stereochemistry encoded.
\item unique - canonical SMILES string, different atom ordering produces the same* SMILES. No isotope or stereochemistry encoded.
\item isomeric - non-canonical SMILES string, different atom ordering produces different SMILES. Isotope and stereochemistry is encoded.
\item absolute - canonical SMILES string, different atom ordering produces the same SMILES. Isotope and stereochemistry is encoded.
}}
\item{aromatic}{If \code{TRUE} aromatic SMILES are generated. The default is to output Kekule form}
\item{atomClasses}{If \code{TRUE} include atom classes.}
}
\value{
An R character object containing the SMILES
}
\examples{
sp <- get.smiles.parser()
smiles <- c('CCC', 'CCN', 'CCN(C)(C)', 'c1ccccc1Cc1ccccc1','C1CCC1CC(CN(C)(C))CC(=O)CC')
mols <- parse.smiles(smiles)
}
\keyword{programming}
\author{Rajarshi Guha (\email{rajarshi.guha@gmail.com})}
|
66b9e221e5c47139604b33790ec813a541793656
|
cf26b183b4a36144637938283813abd4a23cb303
|
/man/R2Matlab.Rd
|
c53dbeb060e9448e0790ab7fb38b442034395827
|
[] |
no_license
|
cran/popdemo
|
5312fbff68852f6cbb602ac1b1abae05e8879719
|
2f6b882b1b0851d1942c5892071c956a5457a6a0
|
refs/heads/master
| 2023-03-16T02:09:54.158218
| 2021-11-16T13:20:02
| 2021-11-16T13:20:02
| 17,719,255
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,559
|
rd
|
R2Matlab.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/R2Matlab.R
\name{R2Matlab}
\alias{R2Matlab}
\title{Convert matrices into Matlab style strings}
\usage{
R2Matlab(A, noquote = FALSE)
}
\arguments{
\item{A}{a numeric matrix of any dimension}
\item{noquote}{(optional) if \code{noquote=TRUE} then the returned character
vector is printed without quotes.}
}
\value{
Object of class character representing \code{A} in a Matlab style.
}
\description{
Convert \R objects of class matrix into character strings that represent the
matrix in a Matlab style
}
\details{
Matlab reads matrices using a unique one-line notation that can prove useful
for storage in databases and importing multiple matrices into a program at
once, amongst other applications. This notation is by row, with "[" and "]"
to specify the beginning and end of the matrix respectively, ";" to specify a
new row and a space between each matrix element. Thus, the \R matrix created
using \code{matrix(c(0,1,2,0.5,0.1,0,0,0.6,0.6), byrow=TRUE, ncol=3)} is
equivalent to [0 1 2;0.5 0.1 0;0 0.6 0.6].
\code{R2Matlab} takes an \R object of class matrix converts it into a
Matlab-style character string that may be useful for exporting into databases.
}
\examples{
# Create a 3x3 PPM
( A <- matrix(c(0,1,2,0.5,0.1,0,0,0.6,0.6), byrow=TRUE, ncol=3) )
# Code the matrix in a Matlab style
R2Matlab(A)
# Print without quotes
R2Matlab(A, noquote=TRUE)
}
\seealso{
\code{\link{Matlab2R}}
}
\concept{Matlab}
|
319f380ca96c7051df36ff76ca0da8f3874b5325
|
62f84d7157e0e3bfc57cc6d6942ea9205adc4463
|
/man/titer.toLog.Rd
|
e8637e0c29b6d68bb402cd1aadb439b40b1f26d2
|
[
"MIT"
] |
permissive
|
SamT123/acutilsLite
|
251da4cf955c05a4e52a6b10e59fa2876759ea4a
|
fb36cd0f0786b9a9822ebda76fe4a44538569c4b
|
refs/heads/master
| 2023-03-02T20:52:23.145170
| 2021-02-15T10:03:21
| 2021-02-15T10:03:21
| 315,282,286
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 228
|
rd
|
titer.toLog.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/acdatabase_merging.R
\name{titer.toLog}
\alias{titer.toLog}
\title{titer to logtiter}
\usage{
titer.toLog(titers)
}
\description{
titer to logtiter
}
|
eac3cfe3696c9b2fcdab4df903ddbb98b30f3740
|
af8aeaa3b352ee10ffadc073c2cb9e6940465dac
|
/Meta_QC/3. Correlation plot of betas.r
|
30dce8822a1737c6f05547386993110aec3c0833
|
[] |
no_license
|
ammegandchips/PACE_Paternal_BMI
|
357895485b6c17eba3901c4ce5267e45cbe688dc
|
724d1fde1b04a0a98083ba6fae1c1b161134f600
|
refs/heads/master
| 2020-06-26T00:49:41.386778
| 2020-02-10T19:33:16
| 2020-02-10T19:33:16
| 96,999,961
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 642
|
r
|
3. Correlation plot of betas.r
|
# Cohort QC: Correlation plot of betas in all 12 results files
time_point <-"birth"#or childhood
require(corrplot)
require(plyr)
extract.coefficients <- function(ewas.dataframe){
ewas.dataframe[,which(colnames(ewas.dataframe)=="Effect")]
}
correlation.plot<-function(cohort,cohort_name){
x <- data.frame(do.call(cbind, lapply(cohort,extract.coefficients)))
colnames(x)<-names(list.of.results)
filename <- paste0(cohort_name,".correlation.",time_point,".png")
png(filename,width=15,height=18,units="cm",res=300)
corrplot(cor(x),method="number",type="upper")
title(cohort_name)
dev.off()
}
correlation.plot(list.of.results,"meta_models")
|
232acd2405e7f401e38a68728c20bf8a9cef2648
|
c6ae9c0e47903ee3d672c013292caf4b182c6a7c
|
/R_basics.r
|
c6ff2d9cb91913d29bec97539ebb80dcbe2dec75
|
[] |
no_license
|
Fahofah/R_Exercises
|
2332750623a78c90fd74d776f074b8db617fea71
|
7b83a4d5e3724de5a4d0f4cb1776d14bd3ca757d
|
refs/heads/master
| 2021-06-25T20:53:54.294313
| 2017-08-22T16:10:41
| 2017-08-22T16:10:41
| 100,287,722
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,062
|
r
|
R_basics.r
|
## CTRL+L --> CLEARS CONSOLE
## rm(list = ls())
#BASIC
#...4
function(theString){
theString
}
#5,6,7
addThese <- function(num1,num2,isSum){
if(num1==0){
ans <- num2
}else if(num2==0){
ans <- num1
}else{
if(isSum){
ans <- num1+num2
}else{
ans <- num1*num2
}
}
print(ans)
}
#8
quickFunc<-function(func,data){
func(data)
}
quickFunc(function(vec){vec[[1]]},vec1)
#INTERMIDIATE
#Blackjack
blackjack<-function(hand1,hand2){
if(hand1==0 | hand2 ==0){
print("Invalid cards in deck, please clean deck and roll again")
}
else{
if(hand1>21 & hand2>21){
print("Both hands blown out")
}else if(hand2>21){
hand1
}else if(hand1>21){
hand2
}else{
if(hand1>hand2){
hand1
}else if(hand2>hand1){
hand2
}else if( hand1== hand2){
print("It's a draw")
}
}
}
}
#uniquesum
uniqueSum<- function(num1,num2,num3){
if(num1==num2 & num2==num3){
0
}else if(num1==num2){
num1+num3
}else if(num1==num3){
num1+num2
}else if(num2==num3){
num1+num2
}else{
num1+num2+num3
}
}
#TooHot
toohot<- function(temp,isSummer){
hiLim<-90
if(isSummer){
hiLim<-100
}
if(temp>=60 & temp<=hiLim){
T
}else {
F
}
}
#LeapYear
leapYear<-function (year){
if(year%%4==0){
T
}else{1
F
}
}
#WriteToFile
evens<-c()
for(i in seq(2,100,2)){
evens<-c(evens,i)
}
write.csv(evens,"evens.csv")
inNums<-read.csv("evens.csv")
odds=inNums[2]+1
write.csv(odds,"odds.csv")
#plot
data(iris)
boxplot(Sepal.Width~Species,iris,ylab="Sepal Width")
plot(iris$Petal.Length,iris$Petal.Width,
pch=c(15,16,17)[iris$Species],
col=c("black","red","blue")[iris$Species],
xlab = " Petal Lenght",ylab="Petal Width")
legend("topleft", title = "Species",legend = levels(iris$Species),
pch=c(15,16,17),
col=c("black","red","blue"))
#CO2
#1 Ordered Factor
#2
mean_uptake<-mean(CO2$uptake)
#3
boxplot(uptake~Type,CO2, ylab="uptake", main="CO2 Uptake per City")
#4
quebec_CO2<-subset(CO2,Type=="Quebec",select=names(CO2)!="Type")
missi_CO2<-subset(CO2,Type=="Missisipi",select=names(CO2)!="Type")
#5 **input (quebec_CO2$uptake,missi_CO2$uptake)**
mean_checker<- function(vec1,vec2){
if(mean(vec1)>mean(vec2)){
print("first set has bigegr mean")
}else if(mean(vec2)>mean(vec1)){
print("second set has bigegr mean")
}else{
print("Both sets seem equal")
}
}
#OrchardSprays
byTreat=aggregate(OrchardSprays$decrease,list(OrchardSprays$treatment), max)
colnames(byTreat)<-c("Treatment","Decrease")
max_decrease= byTreat$Treatment[which.max(byTreat$Decrease)]
boxplot(byTreat$Decrease~byTreat$Treatment, main="Decrease Per Treatment", xlab="Treatment",ylab="Decrease")
#Chicks
data(ChickWeight)
chicNweight<-subset(ChickWeight,select = names(ChickWeight)=="weight" | names(ChickWeight)=="Chick" | names(ChickWeight)=="Diet")
#~ overall diet performance, clues biggest wieght obtained by diet 3
plot(chicNweight$weight,
pch=c(16:19)[ChickWeight$Diet],
col=c(1:4)[ChickWeight$Diet],
xlab="Population", ylab="Chick Weight",main="Overall Diet Results")
legend("topleft", title = "Diet",legend = levels(ChickWeight$Diet),
pch=c(16:19),
col=c(1:4))
#group by chicks
byChick<-aggregate(weight ~ factor(Chick, levels = c(1:50)), data = ChickWeight, FUN = diff )
names(byChick)<-c("Chick","WeightChanges")
##calculating overall weight change from timestamp weightchanges
weight_change<-c()
for (i in 1:length(byChick$WeightChanges)){
x<-byChick$WeightChange[i]
xx<-Reduce("+",x)/length(x)
weight_change=c(weight_change, sum(xx))
}
byChick$WeightChange<-weight_change
diet1<-subset(ChickWeight,Diet==1,select = names(ChickWeight)== "weight" | names(ChickWeight)== "Time" | names(ChickWeight)=="Chick")
diet2<-subset(ChickWeight,Diet==2,select = names(ChickWeight)== "weight" | names(ChickWeight)== "Time" | names(ChickWeight)=="Chick")
diet3<-subset(ChickWeight,Diet==3,select = names(ChickWeight)== "weight" | names(ChickWeight)== "Time" | names(ChickWeight)=="Chick")
diet4<-subset(ChickWeight,Diet==4,select = names(ChickWeight)== "weight" | names(ChickWeight)== "Time" | names(ChickWeight)=="Chick")
install.packages(cowplot)
library(cowplot)
#for reordering according to factors, so diplayed sorted in legend
diet1$Chick<- factor(diet1$Chick, levels=(min(diet1$Chick):max(diet1$Chick)))
diet2$Chick<- factor(diet2$Chick, levels=(min(diet2$Chick):max(diet2$Chick)))
diet3$Chick<- factor(diet3$Chick, levels=(min(diet3$Chick):max(diet3$Chick)))
diet4$Chick<- factor(diet4$Chick, levels=(min(diet4$Chick):max(diet4$Chick)))
p1<-ggplot(diet1,aes(x=Time,y=weight, group=Chick)) + geom_line(aes(color=Chick)) + ggtitle("Diet 1 - Weight Gain Patterns Over Time") + xlab("Days") + ylab("Weight (g)")
p2<-ggplot(diet2,aes(x=Time,y=weight, group=Chick)) + geom_line(aes(color=Chick)) + ggtitle("Diet 2 - Weight Gain Patterns Over Time") + xlab("Days") + ylab("Weight (g)")
p3<-ggplot(diet3,aes(x=Time,y=weight, group=Chick)) + geom_line(aes(color=Chick)) + ggtitle("Diet 3 - Weight Gain Patterns Over Time") + xlab("Days") + ylab("Weight (g)")
p4<-ggplot(diet4,aes(x=Time,y=weight, group=Chick)) + geom_line(aes(color=Chick)) + ggtitle("Diet 4 - Weight Gain Patterns Over Time") + xlab("Days") + ylab("Weight (g)")
#multiplot
plot_grid(p1,p2,p3,p4)
#get relative diet types into byChick
diets<-c()
for (i in 1:max(byChick$Chick)){
set<-ChickWeight$Diet[which(ChickWeight$Chick==i)]
diets<-c(diets,set[1])
}
byChick$diet<-diets
#barplot
ggplot(byChick,aes(x=Chick,y=WeightChange, fill=diet))+geom_bar(stat="identity")+ylab("Weight Change") + ggtitle("Overall Weight Change of Chicks")
#Primes
primlist<-c(2)
for(x in seq(1,300000,2)){
if(x>1){
isnotprime<-0
for(p in seq(2,ceiling(sqrt(x))+1,2)){
if (x%%p == 0){
isnotprime<-isnotprime+1
}
}
if(isnotprime==0){
primlist<-c(primlist,x)
}
}
}
print(primlist)
##Salary Predict
train<-read.csv("censusData_train.csv")
colnames(train)<-c("Age","Workclass","Census","EducationLevel","EducationYears","MaritalStatus","Occupation","Relationship","Race","Sex","CapitalGain","CapitalLoss","HoursPerWeek","NativeCountry","Salary")
table(is.na(train)) #chekc if empty values present/ was none
#shifting ? to NA
trainNA<-train
trainNA[trainNA == " ?"]<-NA #eg for 1 col --raw_data$Electrical[is.na(raw_data$Electrical)]<-"SBrkr"
##models
fit <- rpart(Salary ~ Sex +Workclass +Occupation + EducationLevel +CapitalGain + EducationYears + MaritalStatus, data = train[1:25000,], method="class")
fitNA <- rpart(Salary ~ Sex +Workclass +Occupation + EducationLevel +CapitalGain + EducationYears + MaritalStatus, data = trainNA[1:25000,], method="class")
fitwo_NA<- rpart(Salary ~ Workclass +Occupation +CapitalGain + MaritalStatus,data = trainWO_na[1:25000,],method = "class")
fit1
rpart.plot(fit, type = 3, extra = 101)
checkfit<-predict(fit,train[25001:32560,],type="class")
table(train[25001:32560,15],predicted=checkfit)
checkfitNA<-predict(fitNA,train[25001:32560,],type="class")
table(train[25001:32560,15],predicted=checkfitNA)
checkfitwo_NA<-predict(fitwo_NA,train[25001:32560,],type="class")
table(train[25001:32560,15],predicted=checkfitwo_NA)
#Replace Unknown (?) values with NA
trainNA[train == " ?"]<- NA
#Create subset without NA values
trainWO_na<-train[complete.cases(train),]
##Random Forest Model
rf<-randomForest(Salary~ MaritalStatus+EducationYears+ CapitalGain+ Age + HoursPerWeek+ Workclass +EducationLevel + Occupation + Sex ,data=trainWO_na[1:25000,])
p1<-predict(rf,trainWO_na[25001:30161,])
confusionMatrix(p1,trainWO_na$Salary[25001:30161])
# ****** custom function for tabulating all colmn in a dataframe ****
tabul_cols<- function(dataFrame){
colmnnames<-colnames(dataFrame)
for(i in 1:length(colmnnames)){
print(colmnnames[i])
print(table(dataFrame[colmnnames[i]]))
}
##Day 2
factored_data<-read.csv("C:\\Users\\Administrator\\Documents\\HousePrices Comp\\train.csv")
test_data<-read.csv("C:\\Users\\Administrator\\Documents\\HousePrices Comp\\test.csv")
table_cols<- function(dataFrame){
colmnnames<-colnames(dataFrame)
for(i in 1:length(colmnnames)){
print(colmnnames[i])
print(table(dataFrame[colmnnames[i]]))
}
}
table(is.na(raw_data))
WoNA_data<-raw_data[complete.cases(raw_data),]
table_nas<- function(dataFrame){
colmnnames<-colnames(dataFrame)
for(i in 1:length(colmnnames)){
print(colmnnames[i])
print(table(is.na(dataFrame[colmnnames[i]])))
}
}
raw_data$LotFrontage[is.na(raw_data$LotFrontage)]<-0
sub_data$GarageYrBlt[is.na(sub_data$GarageYrBlt)]<- 0
raw_data[is.na(raw_data)]<-"None"
factored_data<- factored_data[-nrow(factored_data),]
rf1<-randomForest(raw_data$SalePrice[1:1000] ~ as.factor(raw_data$MSZoning[1:1000]) + raw_data$TotRmsAbvGrd[1:1000] + raw_data$YearBuilt[1:1000] + as.factor(raw_data$OverallQual[1:1000]),data=raw_data[1:1000,])
rff<-randomForest(SalePrice ~ +GarageYrBlt+OverallQual+HouseStyle+BldgType+ +Condition1+Condition2 + Neighborhood + LotArea,data=sub_data)
p1<-predict(rf1,raw_data[1001:1459,1:80])
write.csv(ps1, "C:\\Users\\Administrator\\Documents\\HousePrices Comp\\p5.csv")
sub_data<-subset(factored_data,select = names(factored_data)!= "Alley" & names(factored_data)!= "PoolQC" & names(factored_data)!= "Fence" & names(factored_data)!= "MiscFeature" & names(factored_data)!= "FireplaceQu" & names(factored_data)!= "GarageType" & names(factored_data)!= "GarageFinish" & names(factored_data)!= "GarageQual" & names(factored_data)!= "GarageCond" & names(factored_data)!= "BsmtQual" & names(factored_data)!= "BsmtCond" & names(factored_data)!= "BsmtExposure" & names(factored_data)!= "BsmtFinType1" & names(factored_data)!= "BsmtFinType2" )
sub_data$GarageYrBlt[is.na(sub_data$GarageYrBlt)==T]<-0
addNA(sub_data$MasVnrArea) #to all na cols
rfsd<-randomForest(SalePrice ~ MSZoning + TotRmsAbvGrd + YearBuilt + OverallQual, data = sub_data, ntree=10000)
na.omit(rfsd)
ps1<-predict(rfsd,test_data)
|
7a013abcb1d64ef7db2bfa45efaa4767bfdf9c58
|
968dec6ad5fa38ebf41d69cfde6eeacc924f187d
|
/ML Rasmus/LES_sandomega_nvarer_8var.R
|
ca6018011b22ef14d89c7a08163cc13e8abec0f3
|
[] |
permissive
|
Anaconda95/SpecialeJR
|
0ac32f93274e3c3b1a29b58632ca6e241ead1824
|
88349f3cdcd27984ffd53b52ba7ad828db02a8fe
|
refs/heads/main
| 2023-07-13T09:03:19.887729
| 2021-08-10T08:52:24
| 2021-08-10T08:52:24
| 337,058,938
| 1
| 0
|
Apache-2.0
| 2021-02-08T14:08:20
| 2021-02-08T11:49:17
| null |
UTF-8
|
R
| false
| false
| 15,440
|
r
|
LES_sandomega_nvarer_8var.R
|
#clear workspace
rm(list=ls())
library(mvtnorm)
setwd("~/Documents/GitHub/SpecialeJR /ML Rasmus")
#no scientific numbers
options(scipen=999)
# Indlæs data
df<-read.csv("simpeldata8grup.csv",sep=';')
df<-read.csv("C:/specialeJR/ML Rasmus/simpeldata8grup.csv",sep=';')
#make prices and shares
df <- transform( df,
p1 = FOEDEVARER.OG.IKKE.ALKOHOLISKE.DRIKKEVARER/Faste.FOEDEVARER,
p2 = ALKOHOLISKE.DRIKKEVARER.OG.TOBAK/Faste.ALKOHOL,
p3 = BEKLAEDNING.OG.FODTOEJ/Faste.BEKLAEDNING,
p4 = BOLIGBENYTTELSE..ELEKTRICITET.OG.OPVARMNING/Faste.BOLIG.EL.OG.OPVARMNING,
p5 = MOEBLER/Faste.MOEBLER,
p6 = SUNDHED/Faste.SUNDHED,
p7 = TRANSPORT/Faste.TRANSPORT,
p8 = RESTAURANTER.OG.HOTELLER/Faste.RESTAURANTER.OG.HOTELLER
)
#shares findes som forbrug i løbende priser/samlet forbrug af de otte varer.
df <- transform( df,
w1 = FOEDEVARER.OG.IKKE.ALKOHOLISKE.DRIKKEVARER/Sumloeb,
w2 = ALKOHOLISKE.DRIKKEVARER.OG.TOBAK/Sumloeb,
w3 = BEKLAEDNING.OG.FODTOEJ/Sumloeb,
w4 = BOLIGBENYTTELSE..ELEKTRICITET.OG.OPVARMNING/Sumloeb,
w5 = MOEBLER/Sumloeb,
w6 = SUNDHED/Sumloeb,
w7 = TRANSPORT/Sumloeb,
w8 = RESTAURANTER.OG.HOTELLER/Sumloeb
)
#phat findes som priser divideret med samlet forbrug
df <- transform( df,
phat1=p1/Sumloeb,
phat2=p2/Sumloeb,
phat3=p3/Sumloeb,
phat4=p4/Sumloeb,
phat5=p5/Sumloeb,
phat6=p6/Sumloeb,
phat7=p7/Sumloeb,
phat8=p8/Sumloeb
)
#Datasættet sættes op i 'pæne' matricer.
w = matrix(c(df$w1,df$w2,df$w3,df$w4,df$w5,df$w6,df$w7,df$w8), nrow=26, ncol=8)
phat = matrix(c(df$phat1,df$phat2,df$phat3,df$phat4,df$phat5,df$phat6,df$phat7,df$phat8), nrow=26, ncol=8)
x = matrix(c(df$x1,df$x2,df$x3,df$x4,df$x5,df$x6,df$x7,df$x8), nrow=26, ncol=8)
#x og phat skaleres. X er forbrug i faste priser. Det er for at få bedre konvergens når der optimeres. Uklart
# om det stadig er et problem
x <- x/10000
phat <- phat*10000
dims=dim(w)
T=dims[1]
n=dims[2]
#Løser ligningssystem, så gamma'erne afspejler de ønskede alphaer startværdier
#Sæt ønskede alpha fx lig budgetandele i sidste periode.
#gamma_n er lig 0.
gammafn <- function(par,alpha_goal) {
return( sum((alpha_goal - exp(par)/(1+sum(exp(par))) )^2) )
}
gammasol <- optim(par=rep(0,(n-1)),fn=gammafn, alpha_goal=w[T,1:(n-1)], method="BFGS",
control=list(maxit=5000))
print(gammasol)
gamma_start <- c(gammasol$par,0)
alpha_start <- exp(gamma_start)/sum(exp(gamma_start))
#tjekker at det passer.
print(w[T,1:(n)])
print(alpha_start)
#sætter startværdier for bstar: her z pct. af det mindste forbrug over årene af en given vare i fastepriser
b_start <- 0.4*apply(x, 2, min) # b skal fortolkes som 10.000 2015-kroner.
#finder startværdier for kovariansmatricen
a <- alpha_start #igen, a er en logit
b <- b_start # b er time-invariant
supernum <- 1-rowSums(phat %*% diag(b))
supernummat <- matrix(rep(supernum,n),ncol=n)
u <- w - phat %*% diag(b) - supernummat%*%diag(a)
#smid en variabel ud
uhat <- u[ ,1:(n-1)]
#find invers af cov(uhat) - jf. Peters note
covar <- cov(uhat)
#covar=t(chol(covar))%*%chol(covar)
#cholcovar <- chol(covar)
#covar_start <- c(cholcovar)
covar_start <- covar[lower.tri(covar,diag=TRUE)]
start_uhabit = c(gamma_start[1:(n-1)], b_start, covar_start)
print(start)
#sætter startværdier for habit formation og autocorr
habit=rep(0.4,n)
autocorr <- 0.6
start_habit = c(gamma_start[1:(n-1)], b_start, habit, covar_start, autocorr)
print(start_habit)
par=start_habit
# S?tter startv?rdier for uden habit formation
start_uhabit = c(gamma_start[1:(n-1)], b_start,covar_start)
print(start_uhabit)
#funktion til at lave symmetrisk matrix
makeSymm <- function(m) {
m[upper.tri(m)] <- t(m)[upper.tri(m)]
return(m)
}
#definerer funktionen - vigtigt
loglik <- function(par,w,phat,x,habitform) {
#sætter dimensioner
dims=dim(w)
T=dims[1]
n=dims[2]
#med habitformation
if (habitform==1){
gamma <- c(par[1:(n-1)],0) #gamma definereres - kun for de første n-1 parametre. gamma_n=0.
a <- exp(gamma)/sum(exp(gamma)) # a som en logit (sikrer mellem 0 og 1)
bstar <- c(par[n:(2*n-1)]) # bstar: n parametre
beta <- c(par[(2*n):(3*n-1)]) #beta: n parametre
# beta <- exp(beta)/(1+exp(beta)) #prøver at gøre det til logit
#Med habit formation må ét år fjernes fra estimeringen.
b <- matrix(rep(bstar,(T-1)),nrow=(T-1),ncol=n, byrow=TRUE) + x[1:(T-1),]%*%diag(beta) #b defineres som matrix.
supernum <- 1-rowSums(phat[2:T,] * b) #supernumerary income i hver periode sættes
supernummat <- matrix(rep(supernum,n),ncol=n) # for at lette beregningen af u replikeres n gange til en matrixe
u <- w[2:T,] - phat[2:T,]*b - supernummat%*%diag(a) #u beregnes ud fra modellen
#En kolonne u'er smides ud, da matricen ellers er singulær
uhat <- u[ , 1:(n-1)]
#vi prøver lige at fixe noget autocorrelation
ehat <- uhat[2:(T-1),]- par[((3*(n) + (n-1)*((n-1)+1)/2))]*uhat[1:(T-2),]
# omega skal være covariansmatricen for den normalfordeling
# omega skal også estimeres som parameter.
#find omega matrix()
omega <- matrix(NA,(n-1),(n-1))
omega[lower.tri(omega,diag=TRUE)] <- par[(3*n) : ((3*(n) + (n-1)*((n-1)+1)/2) - 1) ]
omega<-makeSymm(omega)
#omegainv <- solve(omega)
#udregn u_t'Au_t for at kunne tage summen
#uhatomegainvuhat <- apply(uhat,1,function(x) x %*% omegainv %*% x)
#likelihood funktionen
l1 = dmvnorm(x=ehat, mean=rep(0,n-1), sigma=omega, log=TRUE)
return( -sum(l1) )
#umiddelbart regner følgende rigtigt ud, men det går helt galt, når den skal optimere
#return( -( -(n-1)*(T-1)*log(2*pi)/2 -(T-1)/2*(det(omega, log=TRUE)) -1/2*sum(uhatomegainvuhat) ) )
}else if (habitform == 0) { #uden habit formation
gamma <- c(par[1:(n-1)],0)
a <- exp(gamma)/sum(exp(gamma)) #igen, a er en logit
b <- c(par[n:(2*n-1)]) # b er time-invariant
supernum <- 1-rowSums(phat %*% diag(b))
supernummat <- matrix(rep(supernum,n),ncol=n)
u <- w - phat %*% diag(b) - supernummat%*%diag(a)
#smid en variabel ud
uhat <- u[ ,1:(n-1)]
#find omega matrix()
omega <- matrix(NA,(n-1),(n-1))
omega[lower.tri(omega,diag=TRUE)] <- par[(2*n) : ((2*(n) + (n-1)*((n-1)+1)/2) - 1) ]
omega<-makeSymm(omega)
#omegainv <- solve(omega)
#udregn u_t'Au_t for at kunne tage summen
uhatomegainvuhat <- apply(uhat,1,function(x) x %*% omegainv %*% x)
#likelihood funktionen
l1 = dmvnorm(x=uhat, mean=rep(0,n-1), sigma=omega, log=TRUE)
return( -sum(l1) )
#return( -(- (n-1)/2*T*log(2*pi) - T/2*(log(det(omega))) - 1/2*sum(uhatomegainvuhat) ) )
} else
print("Set habitform = 1 or =0 ")
}
#Maksimererlikelihood.
#virker med BFGS, og konvergerer for forskellige startværdier.
# og B'erne er sindssygt afhængige af startværdier.
sol_uhabit <- optim( par = start_uhabit, fn = loglik, habitform=0,
phat=phat, w=w, x=x, method="BFGS",
# lower = lower , upper= upper ,
control=list(maxit=5000,
trace=99,
ndeps = rep(1e-10,length(start_uhabit))) )
sol_gamma <- c(sol_uhabit$par[1:(n-1)],0)
sol_b <- sol_uhabit$par[n:(2*n-1)]*10000
sol_alpha <- exp(sol_gamma)/sum(exp(sol_gamma))
print(sol_alpha)
print(sol_b)
sol_b_mat <- matrix(rep(sol_b,(T-1)),nrow=(T-1),ncol=n, byrow=TRUE)
matrix(c(sol_b_mat,10000*x[2:(T),]),nrow=25,ncol=8, byrow=FALSE)
sol_habit <- optim( par = start_habit, fn = loglik, habitform=1,
phat=phat, w=w, x=x, method="BFGS",
# lower = lower , upper= upper ,
control=list(maxit=5000,
trace=6,
ndeps = rep(1e-10,length(start_habit))) )
#Problem: den kan ikke l?se med L-BFGS-B. Lidt irriterende.
sol_gamma <- c(sol_habit$par[1:(n-1)],0)
sol_b <- sol_habit$par[n:(2*n-1)]*10000
sol_alpha <- exp(sol_gamma)/sum(exp(sol_gamma))
sol_beta <- sol_habit$par[(2*n):(3*n-1)]
print(sol_alpha)
print(sol_b)
print(sol_beta)
sol_b_mat <- matrix(rep(sol_b,(T-1)),nrow=(T-1),ncol=n, byrow=TRUE) + 10000*x[1:(T-1),]%*%diag(sol_beta)
matrix(c(sol_b_mat,10000*x[2:(T),]),nrow=25,ncol=8, byrow=FALSE)
supernum <- 1-rowSums(phat[3:T,] * sol_b_mat/10000) #supernumerary income i hver periode sættes
supernummat <- matrix(rep(supernum,n),ncol=n)
#umiddelbart temmeligt meget autocorrelation i error-terms:
uhat <- w[2:T,] - phat[2:T,]*sol_b_mat/10000 - supernummat%*%diag(sol_alpha)
#nu med autocorrelation - simpel udgave.
ehat <- uhat[2:25,]- sol_habit$par[((3*(n) + (n-1)*((n-1)+1)/2))]*uhat[1:24,]
#bottomline: det virker med mvtnorm - men ikke ved at skrive den op i hånden. Umiddelbart
#umiddelbart har det noget at gøre med at log(det(omega)) bliver NAN.
#Tjek for forskellige startværdier.
gamma_start
b_start
# Uden habitformation
min(x[,3])
gstart_1 = c(-1,-2)
gstart_2 = c(-2,-1)
bstart_1 = c(1,2.5)
bstart_2 = c(0.2,0.8)
bstart_3 = c(3,5)
Solution_uhabit <- data.frame(Likeli=1,a1=1,a2=1,a3=1,b1=1,b2=1,b3=1,o1=1,o2=1,o3=1)
Start_uhabit <- data.frame(a1=1,a2=1,a3=1,b1=1,b2=1,b3=1)
for (i in bstart_1) {
for (j in bstart_2) {
for (k in bstart_3) {
for (l in gstart_1) {
for (q in gstart_2) {
tryCatch({sol <- optim(par = c(l,q,i,j,k,covar_start), fn = loglik, habitform=0,
phat=phat, w=w, x=x, method="BFGS",
# lower = lower , upper= upper ,
control=list(maxit=5000,
trace=99,
ndeps = rep(1e-10,8)) )
print(sol)
sol_gamma <- c(sol$par[1:2],0)
sol_b <- sol$par[3:5]*10000
sol_alpha <- exp(sol_gamma)/sum(exp(sol_gamma))
list <- list(Likeli=sol$value,a1=sol_alpha[1],
a2=sol_alpha[2],a3=sol_alpha[3],
b1=sol_b[1],b2=sol_b[2],
b3=sol_b[3],o1=sol$par[6],o2=sol$par[7],o3=sol$par[8])
gamma_ini <- c(l,q,0)
alpha_ini <- exp(gamma_ini)/sum(exp(gamma_ini))
b_ini <- c(i,j,k)
list1 <- list(a1=alpha_ini[1],
a2=alpha_ini[2],a3=alpha_ini[3],
b1=b_ini[1],b2=b_ini[2],
b3=b_ini[3])
Solution_uhabit <- rbind(Solution_uhabit, list)
Start_uhabit <- rbind(Start_uhabit, list1)}, error=function(e){cat("ERROR :",conditionMessage(e), "\n")})
}
}
}
}
}
### Med habit formation
Solution_habit <- data.frame(Likeli=1,a1=1,a2=1,a3=1,b1=1,b2=1,b3=1,h1=1, h2=1, h3=1,o1=1,o2=1,o3=1)
Start_habit <- data.frame(a1=1,a2=1,a3=1,b1=1,b2=1,b3=1)
for (i in bstart_1) {
for (j in bstart_2) {
for (k in bstart_3) {
for (l in gstart_1) {
for (q in gstart_2) {
tryCatch({sol <- optim(par = c(l,q,i,j,k,0.2,0.2,0.2,covar_start), fn = loglik, habitform=1,
phat=phat, w=w, x=x, method="BFGS",
# lower = lower , upper= upper ,
control=list(maxit=5000,
trace=99,
ndeps = rep(1e-10,11)) )
print(sol)
sol_gamma <- c(sol$par[1:2],0)
sol_b <- sol$par[3:5]*10000
sol_alpha <- exp(sol_gamma)/sum(exp(sol_gamma))
list <- list(Likeli=sol$value,a1=sol_alpha[1],
a2=sol_alpha[2],a3=sol_alpha[3],
b1=sol_b[1],b2=sol_b[2],
b3=sol_b[3], h1=sol$par[6], h2=sol$par[7], h3=sol$par[8], o1=sol$par[9],o2=sol$par[10],o3=sol$par[11])
gamma_ini <- c(l,q,0)
alpha_ini <- exp(gamma_ini)/sum(exp(gamma_ini))
b_ini <- c(i,j,k)
list1 <- list(a1=alpha_ini[1],
a2=alpha_ini[2],a3=alpha_ini[3],
b1=b_ini[1],b2=b_ini[2],
b3=b_ini[3])
Solution_habit <- rbind(Solution_habit, list)
Start_habit <- rbind(Start_habit, list1)}, error=function(e){cat("ERROR :",conditionMessage(e), "\n")})
}
}
}
}
}
### Med habit formation og autocorrelation - ret anderledes estimater.
Solution_habit_acorr <- data.frame(Likeli=1,a1=1,a2=1,a3=1,a4=1,
b1=1,b2=1,b3=1,b4=1,
h1=1, h2=1, h3=1, h4=1,
o1=1,o2=1,o3=1,o4=1,o5=1,o6=1,
rho=1)
Start_habit <- data.frame(a1=1,a2=1,a3=1,b1=1,b2=1,b3=1)
for (i in bstart_1) {
for (j in bstart_2) {
for (k in bstart_3) {
for (l in gstart_1) {
for (q in gstart_2) {
tryCatch({sol <- optim(par = c(l,l,q,i,j,j,k,0.2,0.2,0.2,0.2,covar_start,autocorr), fn = loglik, habitform=1,
phat=phat, w=w, x=x, method="BFGS",
# lower = lower , upper= upper ,
control=list(maxit=5000,
trace=99,
ndeps = rep(1e-10,18)) )
print(sol)
sol_gamma <- c(sol$par[1:3],0)
sol_b <- sol$par[4:7]*10000
sol_alpha <- exp(sol_gamma)/sum(exp(sol_gamma))
list <- list(Likeli=sol$value,a1=sol_alpha[1],
a2=sol_alpha[2],a3=sol_alpha[3],a4=sol_alpha[4],
b1=sol_b[1],b2=sol_b[2],b3=sol_b[3], b4=sol_b[4],
h1=sol$par[8], h2=sol$par[9], h3=sol$par[10], h4=sol$par[11],
o1=sol$par[12],o2=sol$par[13],o3=sol$par[14], o4=sol$par[15], o5=sol$par[16], o6=sol$par[17],
rho=sol$par[18])
#gamma_ini <- c(l,q,0)
# alpha_ini <- exp(gamma_ini)/sum(exp(gamma_ini))
#b_ini <- c(i,j,k)
#list1 <- list(a1=alpha_ini[1],
# a2=alpha_ini[2],a3=alpha_ini[3],
# b1=b_ini[1],b2=b_ini[2],
# b3=b_ini[3])
Solution_habit_acorr <- rbind(Solution_habit_acorr, list)
#Start_habit <- rbind(Start_habit, list1)
},
error=function(e){cat("ERROR :",conditionMessage(e), "\n")})
}
}
}
}
}
## Problem: b3 bliver negativ
Solution_uhabit <- apply(Solution_uhabit,2,as.character)
Solution_habit <- apply(Solution_habit,2,as.character)
write.csv(Solution_uhabit,"C:/specialeJR/Estimering/Solution_uhabit.csv", row.names = FALSE)
write.csv(Solution_habit,"C:/specialeJR/Estimering/Solution_habit.csv", row.names = FALSE)
write.csv(Solution_habit_acorr,"Solution_habit_acorr.csv", row.names = FALSE)
|
4d916fbdd2a6defc13b1a00754558da3a9a1318f
|
68562f910349b41cdf4432c0921940f0513ab516
|
/tests/testthat/helper-session.R
|
92589c75d8ca95a55194c14d445b0f127e7bdbf6
|
[
"MIT"
] |
permissive
|
gadenbuie/xaringanthemer
|
2990406aff24a458695c6e4793c891dff5feb506
|
85091cd16af5a938b6d927ff5f6b0fe990ee0e63
|
refs/heads/main
| 2022-09-15T18:32:49.954381
| 2022-08-20T18:03:58
| 2022-08-20T22:47:52
| 129,549,154
| 446
| 28
|
NOASSERTION
| 2022-08-20T16:58:02
| 2018-04-14T19:44:17
|
R
|
UTF-8
|
R
| false
| false
| 453
|
r
|
helper-session.R
|
with_clean_session <- function(.f, args = list()) {
empty_wd <- tempfile()
dir.create(empty_wd)
owd <- setwd(empty_wd)
on.exit({setwd(owd); unlink(empty_wd, TRUE)})
args$.f <- .f
res <- callr::r_safe(function(.f, ...) {
tryCatch(
list(result = .f(...), error = NULL),
error = function(e) list(result = NULL, error = e$message)
)
}, args)
if (!is.null(res$error)) {
stop(res$error)
} else {
res$result
}
}
|
d12b9e6ebd0c64db36466588bf65b32472f93e4a
|
3689d75f984128f39e0e609aad51619aeafb0905
|
/R/unify_rent_and_value.R
|
3ebf07c85e4828bd55fd09fadff31b1bf1be89e0
|
[] |
no_license
|
buchmayne/housing-policy-tools
|
29c2ff841e6093ab1dd9442dbbe10002c6f3327c
|
2748e86181d0ae4b02400d9c4274904db38af278
|
refs/heads/main
| 2023-03-11T03:04:55.464754
| 2021-02-10T20:01:07
| 2021-02-10T20:01:07
| 337,231,530
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,414
|
r
|
unify_rent_and_value.R
|
#' Internal function to unify rent and ownership CHAS bins
#'
#' @param renters_data renters data from CHAS already formatted
#' @param owners_data owners data from CHAS already formatted
#' @export
unify_rent_and_value <- function(renters_data, owners_data) {
unified_renters <- renters_data %>%
dplyr::mutate(rent = dplyr::case_when(
rent == "0-30%" ~ "0-50%",
rent == "30-50%" ~ "0-50%",
rent == "50-80%" ~ "50-80%",
rent == "+80%" ~ "+80%",
)) %>%
dplyr::group_by(
geoid,
name,
state_code,
state_abbreviation,
state_name,
rent,
household_income
) %>%
dplyr::summarise(renter_occ_hu = sum(renter_occ_hu)) %>%
dplyr::ungroup() %>%
dplyr::rename(affordability = rent)
unified_owners <- owners_data %>%
dplyr::mutate(home_value = dplyr::case_when(
home_value == "0-50%" ~ "0-50%",
home_value == "50-80%" ~ "50-80%",
home_value == "80-100%" ~ "+80%",
home_value == "+100%" ~ "+80%"
)) %>%
dplyr::group_by(
geoid,
name,
state_code,
state_abbreviation,
state_name,
home_value,
household_income
) %>%
dplyr::summarise(ownership_units = sum(ownership_units)) %>%
dplyr::ungroup() %>%
dplyr::rename(affordability = home_value)
unified_data <- dplyr::inner_join(unified_renters, unified_owners)
return(unified_data)
}
|
7e6140f3bc644b4519cf8519f9d3199d70cb5447
|
6a7947971ca15e8d3c4aa5b38b668562439a784c
|
/network_informed_clustering_function.R
|
10e944bc726da0b8ea15881bed49ca30be94ea2a
|
[] |
no_license
|
SutherlandRuss/TCGA
|
8679de5d1dbd210e847bab317355361cef1df326
|
b89cfcb4e4ecdb2ab66aa5a17a774606abf1b40c
|
refs/heads/master
| 2021-01-25T10:29:00.454489
| 2013-09-16T16:41:16
| 2013-09-16T16:41:16
| 11,202,576
| 1
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,954
|
r
|
network_informed_clustering_function.R
|
#' @param mutM A logical matrix genes are rows and samples are columns.
#' TRUE if the gene is mutated in the sample and FALSE otherwise.
#' @param geneAdj A list of genes that are adjacent to.....
#' @param geneID An lexicographically ordered list of genes.
#'
#' @return mutNet The logical matrix MutM updated using network information.
#'
#' Requires packages "igraph" "pam" and "fpc".
#'
#' The length(geneAdj)==length(geneID)==nrows(mutM)
#' The rownames(mutM)==geneID==names(geneAdj)
#'
#'
#'
setwd("C:/Users/rsutherland/Dropbox/PhD/tumour_classifier_data/colorectal_somatic_mutations/combined/1014_06_2013")
#setwd("C:/Users/rds/Dropbox/PhD/tumour_classifier_data/colorectal_somatic_mutations/combined/2428_06_2013")
mutM<-mutMatLogicalOrdered
geneAdj<-geneAdjIndex
isColon<- cancerType[,1]
names(isColon) <- cancerType[,2]
if(FALSE) {
geneID = c("A", "B", "C", "D", "E", "F")
mutM = cbind(S1 = c(T, F, F, F, T, F),
S2 = c(F, T, F, F, T, F),
S3 = c(T, F, F, F, F, T),
S4 = c(F, F, F, T, F, F))
rownames(mutM) = geneID
geneAdj = list(A = c(2L, 5L),
B = c(1L, 3L, 4L),
C = c(2L, 4L),
D = c(2L, 3L),
E = c(1L),
F = integer())
}
netInf <- function(mutM, geneAdj){
## iterate over each sample/column and add the network information
for(s in seq_len(ncol(mutM))) {
## select the functional/true genes
selG <- which(mutM[, s])
if(length(selG) == 0L)
next
## select all adjacent genes
adjG <- unique.default(unlist(geneAdj[selG], use.names = FALSE))
if(length(adjG) > 0L)
mutM[adjG, s] <- TRUE
}
return(mutM)
}
##############################################################################
#using the above functions and plotting a comparison between before and after network inference
#result<-func1(mutM,geneAdj)
#resultdist<-dist(t(result),method= "binary")
#edist <- dist(t(result))
#countM0<-rowSums(mutM)
#countM1<-rowSums(result)
#smoothScatter(countM0,countM1)
#countS0<-colSums(mutM)
#countS1<-colSums(result)
#smoothScatter(countS0,countS1)
######################################################################
#pos = (which(mutM) - 1L) %% nrow(mutM) + 1L
#col = (which(mutM) - 1L) %/% nrow(mutM) + 1L
##split(pos, col)
#l = lapply(split(geneAdj[pos], col), unlist, use.names = FALSE)
#newcol = rep.int(seq_along(l), sapply(l, length))
#newtrue = unlist(l, use.names = FALSE)
#newidx = (newcol - 1) * nrow(mutM) + newtrue
#a = mutM
#a[newidx] <- TRUE
#a
#m
#identical(a,m)
#####################################################################################
#counts the number of TRUE matches between observations.
countMatch1<- function(mutM){
nc<-ncol(mutM)
countM<- matrix(NA,nc,nc)
for(s in seq_len(nc-1)){
for(t in (s+1):nc){
countM[t, s]<-length(which(mutM[which(mutM[,s]),t]))
#countM[s,t]<-sum(mutM[,s]*(mutM[,s]==mutM[,t]))
}
}
return(countM)
}
#function to compute the disimilarity between
#a binary matrix of n observations and m variables
countMatch2<- function(mutM){
nc<-ncol(mutM)
countM<- matrix(NA,nc,nc)
for(s in seq_len(nc-1)){
a <- mutM[,s,drop=TRUE]
countM[(s+1):nc, s] <- colSums((mutM[,(s+1):nc, drop = FALSE]==a) * a)
}
return(countM)
}
# computes the jaccard similarity between all samples. Either this should be run or
#countMatch AND compDiss.
jaccardSim<- function(mutM){
nc<-ncol(mutM)
jaccard<- matrix(NA,nc,nc)
for(s in seq_len(nc-1)){
for(t in (s+1):nc){
jaccard[t, s]<-clujaccard(mutM[,s], mutM[,t])
}
}
jaccard<-jaccard[!is.na(jaccard)]
return(jaccard)
}
#compute the dissimilarity matrix
compDiss<-function(countM, type, mutM){
if(type==1){
distM<- as.dist(1-countM/(max(countM, na.rm=TRUE)+1))
}else if(type==2){
distM<- as.dist(max(countM, na.rm =TRUE)-countM)
}#add in jaccards similarity here
attr(distM, "Labels") = colnames(mutM)
return(distM)
}
#function for applying various cluster analyses including hierarchical
#and partition around medoid methods
grouping<- function(distM,isColon,cmethod,cutN,title){
if(cmethod=="pam"){
clustr<- pam(distM,k=cutN,diss=TRUE)
clustCut<-clustr$clustering
plot(clustr, main =title)
}else{
clustr<-hclust(distM,method=cmethod)
clustCut<-cutree(clustr,k=cutN)
plot(clustr,main=title,labels = c("o", "--------")[(isColon[clustr$labels] == "colon") + 1])
}
names(clustCut)<-colnames(mutM)
groupT<-table(as.integer(clustCut), isColon[names(clustCut)])
enrichment<-fisher.test(groupT)
return(list(groupT,enrichment))
}
##example
a<-netInf(mutM,geneAdj)
b<-countMatch1(a)
c<-compDiss(b,1,mutM)
d<-grouping(c,isColon,"ward",5,"dissimilarity=(number of samples - matchcount) \n cluster method= ward k=2")
##for jaccards as d
d<-grouping(b[!is.na(b)],isColon,"pam",2,"jaccards and two groups")
|
62d4bfa6980549cb7c22cdab1149f4b3ed887aa9
|
883fe42938c7d6b27ea646374df0aee4145f7774
|
/lab-5-SnarkyPuppy23/model.R
|
6068be884efd1e9e9d9a7bb0e5dfcdc780439bef
|
[] |
no_license
|
AaronNadell/COGS-319
|
36f64b89e1e5b7955ae42491551c6689a7fd8ddf
|
e6f7cb4f7d9924eadbbec5fcb492b081d5ddad03
|
refs/heads/master
| 2023-03-19T12:05:31.467953
| 2021-03-13T22:30:31
| 2021-03-13T22:30:31
| 207,401,056
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,854
|
r
|
model.R
|
library(dplyr)
library(ggplot2)
# this model will simulate a group of subjects completing the experiment described in the README.
# your job is to implement the reinforcement learning model
# experiment parameters
n.subjects <- 60 # how many simulated subjects to include
n.trials <- 30 # how many trials will each subject complete
prob.reward.A <- 0.8 # the probability of a positive reward for chosing option A
prob.reward.B <- 0.2 # the probability of a positive reward for chosing option B
reward.value <- 1 # the value of a positive reward
# model parameters
alpha <- 0.1 # controls the learning rate of the model
temperature <- 0.125 # controls the explore/exploit tradeoff of the decision rule
# implementing softmax ####
# the softmax decision rule is described in the README. implement the function here,
# returning the probability of selecting option A.
softmax.rule <- function(a.value, b.value, temperature){
Pa <- exp((a.value*1)/temperature)/(exp(a.value/temperature)+exp(b.value*1/temperature))
return(Pa)
}
softmax.rule(a.value=2, b.value=1, temperature=1) # should be 0.731
softmax.rule(a.value=10, b.value=1, temperature=5) # should be 0.858
# model ####
# this function should run one subject through all trials.
# you should store the choice that the model makes and
# the model's probability of choosing option 'a' at each trial.
# at the end, you will return a data frame that contains this information.
# this part of the code has been provided to you.
# for the model, follow the description in the README file.
#initialize values
A <- 0
B <- 0
choices.history <- c()
prob.a.history <-c()
run.one.subject <- function(){
for (i in 1:n.trials){ #start a loop or 1:n.trials
#initialize values
reward <- 0
sample.1 <- softmax.rule(A,B, temperature) #compute probability of choosing 'a'
sample.2 <- sample(c('a', 'b'), 1, prob=c(sample.1, (1-sample.1)), replace = T) #choose 'a' or 'b'
if (sample.2 == 'a'){ #if sample chosen is 'a'
reward <- sample(c(0, 1), 1, prob = c((1-prob.reward.A), prob.reward.A))#do they get a reward?
A <- A + alpha*(reward - A) #update A's value based on reward or not
}else if (sample.2 == 'b'){ #if sample chosen is 'b'
reward <- sample(c(0, 1), 1, prob = c((1-prob.reward.B), prob.reward.B))#do they get a reward?
B <- B + alpha*(reward - B) #update B's value based on reward or not
}
choices.history <- c(choices.history, sample.2) # please make sure that the values added to this are either 'a' or 'b'
prob.a.history <- c(prob.a.history, sample.1)
}
return(data.frame(trial=1:n.trials, choice=choices.history, probability=prob.a.history))
}
# if you've implemented the model above correctly, then running this block of code with the
# default parameters (30 trials, prob.reward.A = 0.8, prob.reward.B = 0.2, reward.value = 1, alpha = 0.1, temperature = 0.125)
# should produce a final probability of selecting option A on trial 30 of 0.9975339. the model will also, by strange coincidence,
# choose option 'a' for every trial. This doesn't have to match up... may depend on how I implemented it...
set.seed(12604)
test.run.data <- run.one.subject()
# this code is provided to you to run multiple subjects, and aggregate their data into a single data frame
# note that it will cause R to display a WARNINGS message, but this is OK.
experiment.data <- NA # create variable to hold data.
for(s in 1:n.subjects){ # loop through each subject
subject.data <- run.one.subject() # run the subject with a fresh model
if(is.na(experiment.data)){ # if there is no data yet...
experiment.data <- subject.data # ... then make this subject's data the experiment.data
} else { # otherwise...
experiment.data <- rbind(experiment.data, subject.data) # ... add this subject's data to the end of the set
}
}
# this code uses the dplyr library to calculate the percentage of subjects who chose 'a' on each trial
# and to calculate the mean probability of selecting 'a' according to the model.
summarized.data <- experiment.data %>%
group_by(trial) %>%
summarize(choice_pct = sum(choice=="a")/n(), prob_mean = mean(probability))
# this code uses the ggplot2 library to make a plot similar to Figure 1 in the Pessiglione et al. (2006) paper.
ggplot(summarized.data, aes(x=trial, y=choice_pct))+
geom_point()+
geom_line(aes(y=prob_mean))+
ylim(c(0,1))+
labs(x="Trial Number", y="Modelled choices (%)")+
theme_bw()
########################################################
#PREDICTION ERROR
A <- 0
B <- 0
choices.history <- c()
prob.a.history <-c()
PE.history <-c()
run.one.subject <- function(){
for (i in 1:n.trials){ #start a loop or 1:n.trials
#initialize values
reward <- 0
sample.1 <- softmax.rule(A,B, temperature) #compute probability of choosing 'a'
sample.2 <- sample(c('a', 'b'), 1, prob=c(sample.1, (1-sample.1)), replace = T) #choose 'a' or 'b'
if (sample.2 == 'a'){ #if sample chosen is 'a'
reward <- sample(c(0, 1), 1, prob = c((1-prob.reward.A), prob.reward.A))#do they get a reward?
PE <- reward - A
A <- A + alpha*PE#update A's value based on reward or not
}else if (sample.2 == 'b'){ #if sample chosen is 'b'
reward <- sample(c(0, 1), 1, prob = c((1-prob.reward.B), prob.reward.B))#do they get a reward?
B <- B + alpha*(reward - B)#update B's value based on reward or not
PE <- reward - B
}
choices.history <- c(choices.history, sample.2) # please make sure that the values added to this are either 'a' or 'b'
prob.a.history <- c(prob.a.history, sample.1)
PE.history <- c(PE.history, PE)
}
return(data.frame(trial=1:n.trials, choice=choices.history, probability=prob.a.history, PE=PE.history))
}
experiment.data <- NA # create variable to hold data.
for(s in 1:n.subjects){ # loop through each subject
subject.data <- run.one.subject() # run the subject with a fresh model
if(is.na(experiment.data)){ # if there is no data yet...
experiment.data <- subject.data # ... then make this subject's data the experiment.data
} else { # otherwise...
experiment.data <- rbind(experiment.data, subject.data) # ... add this subject's data to the end of the set
}
}
summarized.data <- experiment.data %>%
group_by(trial) %>%
summarize(choice_pct = sum(choice=="a")/n(), prob_mean = mean(probability)/n(), PE_mean=mean(PE))
ggplot(summarized.data, aes(x=trial, y=PE_mean))+
geom_point()+
ylim(c(0,1))+
labs(x="Trial Number", y="PE")+
theme_bw()
# QUESTIONS ####
# 1. Try running the model with different values for alpha and temperature. What is the effect of each parameter
# on the final figure that is produced? Explain why these effects happen.
#with alpha = 0.5 and same Temp, The model curve aproached the assymptote sooner after
#approx. 10 trials
#with alpha = 0.001 and same Temp, The model's cure was linear and never approched the assymptote.
#with same alpha and Temp = 0.5, This made the model's curve more linear with more variability in % modelled choices
#which is expected.
#with same alpha and Temp = 0.001, This made the models curve approach 0.00 very quickly
#because the model has very low chances of choosing 'a' after it chose 'b' on the first round
# 2. Pessiglione et al. also included a condition where the reward was negative instead of positive. They plot
# the results as squares in Figure 1. Simulate this data. Can you match the general result? Why is the probability
# curve in both Figure 1 and your simulation less smooth for this simulation than when the reward is positive?
#it will inherently be less smooth because there's more variability for each stimulus
#more specifically on some trials the model may be thrown off because it might get a punishment
#for stimulus 'a' and thereby avoid it for a bit before learning that
# 'a' has the best probability for reward.
# 3. CHALLENGE (If you completed the rest of the lab relatively quickly, do this problem. If it took you plenty of
# effort to complete the model, you can choose whether to pursue this problem or not.):
# In the paper, the authors use the model's reward prediction error to find brain regions that
# correlate with this signal. Modify the model to save the reward prediction error on each step. Then plot
# the average reward prediction error for the 30 trials. Explain the shape of the graph. You may want to copy-and-paste
# the model code into a new file to do this.
# The model has decreasing reward prediction errors which makes sense because as the model
# learns, the predicted and actual values should become closer so their difference should
# go toward zero.
|
8f4b50dfe775085dd2a8414afd22a418eb918671
|
36596de50312413cb2d42223c05e009b4c2da50a
|
/recuperador_de_datos.R
|
8435826c6a5bbdee3aba5746611400dc1e34ee0f
|
[] |
no_license
|
RayanroBryan/rastreador_covid_19_costa_rica
|
dbf3692939ba956b7dc1c6f201cb57764591e71e
|
bf473160209afa9e35f2447ecdbc194146902634
|
refs/heads/master
| 2021-05-23T13:29:01.363605
| 2020-06-23T01:11:34
| 2020-06-23T01:11:34
| 253,311,030
| 1
| 0
| null | 2020-04-05T19:04:58
| 2020-04-05T19:04:57
| null |
UTF-8
|
R
| false
| false
| 18,227
|
r
|
recuperador_de_datos.R
|
# Dependencias
if(!require("pacman")) install.packages("pacman")
library(tidyverse)
library(lubridate)
library(echarts4r)
library(echarts4r.maps)
library(easynls)
library(reticulate)
####### Cargar/tratar datos -----------
#### llamado a data por cantones
tem_cr_caso_pc <- read.csv("datos/covid19_cantones_cr.csv")
#### limpia fechas
cr_caso_limpio <- tem_cr_caso_pc %>%
pivot_longer(-c(provincia, canton),
names_to = "fecha", values_to = "total") %>%
filter(canton != "DESCONOCIDO") %>%
filter(!is.na(total)) %>%
mutate(fecha = str_replace_all(fecha,"\\.", "-"),
fecha = str_remove(fecha,"X"),
fecha = as.Date(fecha, format = "%d-%m-%Y"))
cr_caso_provincia <- cr_caso_limpio %>%
group_by(provincia, fecha) %>%
summarize(total = sum(total))
#### almacena datos
saveRDS(cr_caso_limpio, file = "datos/cr_caso_limpio.RDS")
saveRDS(cr_caso_provincia, file = "datos/cr_caso_provincia.RDS")
#### carga datos generales
temp_casos_general <- read.csv("datos/covid19_general_cr.csv")
#### modificacion datos generales
temp_casos_general <- temp_casos_general %>%
mutate(anterior = lag(Confirmados),
anterior = if_else(is.na(anterior),
0,
as.numeric(anterior)),
casos_activos = Confirmados - Fallecidos - Recuperados,
casos_nuevos = Confirmados - anterior,
periodo_de_duplicacion = round(casos_activos/casos_nuevos, 0),
periodo_de_duplicacion = if_else(is.infinite(periodo_de_duplicacion),
0,
periodo_de_duplicacion),
casosdia = casos_nuevos + 1,
logcasos = log(casosdia),
dias = 1:nrow(temp_casos_general),
descartados_anterior = lag(Descartados),
descartados_anterior = if_else(is.na(descartados_anterior),
0,
as.numeric(descartados_anterior)),
descartados_por_dia = Descartados - descartados_anterior
)
temp_casos_general$Fecha <- as.Date(as.character(temp_casos_general$Fecha),
format = "%d/%m/%Y")
names(temp_casos_general)[names(temp_casos_general) == "casos_nuevos"] <- "Casos"
#### almacena datos generales
saveRDS(temp_casos_general, file = "datos/casos_general.RDS")
######### seccion "general" -----------------
ultima_fila = tail(temp_casos_general,1)
maximo_casos <- max(temp_casos_general$Casos)
#Codigo para obtener infectados por genero
genero <- factor(x = c("Hombres", "Mujeres"))
dfgeneros <- data.frame(Genero = genero,
Infectados = c(ultima_fila$Hombres, ultima_fila$Mujeres))
#Codigo para obtener infectados por nacionalidad
nacionalidad <- factor(x = c("Extranjeros", "Costarricenses"))
dfnacionalidad <- data.frame(Nacionalidad = nacionalidad,
Infectados = c(ultima_fila$Extranjeros, ultima_fila$Costarricenses))
#Codigo para obtener infectados por grupos etarios
dfedad <- data.frame(
Grupos = c("Adultos", "Adultos mayores", "Menores"),
Infectados = c(ultima_fila$Adultos, ultima_fila$Adultos.Mayores, ultima_fila$Menores)
)
#Grafico comparativo entre infectados por dia e infectados acumulados
graf_infectados <- temp_casos_general %>%
select(Fecha, Confirmados, Casos) %>%
`colnames<-`(c("Fecha", "Acumulados", "Diarios")) %>%
e_charts(Fecha) %>%
e_line(Acumulados) %>%
e_area(Diarios) %>%
e_tooltip(trigger = "axis") %>%
e_mark_point("Acumulados", data = list(type = "max")) %>%
e_mark_point("Diarios", data = list(type = "max")) %>%
e_legend(right = 0) %>%
e_title("Infectados", top = 0) %>%
e_x_axis(name = "Fecha", nameLocation = "center", nameGap = 40) %>%
e_y_axis(name = "Cantidad") %>%
e_text_style(fontSize = 12)
saveRDS(graf_infectados, file = "datos/graf_infectados.RDS")
#Grafico cantidad descartados
graf_descartados <- temp_casos_general %>%
e_charts(Fecha) %>%
e_line(Descartados, name = "Acumulados") %>%
e_area(descartados_por_dia, name = "Diarios") %>%
e_tooltip(trigger = "axis") %>%
e_mark_point("Acumulados", data = list(type = "max")) %>%
e_mark_point("Diarios", data = list(type = "max")) %>%
e_legend(right = 0) %>%
e_title("Descartados") %>%
e_x_axis(name = "Fecha", nameLocation = "center", nameGap = 40) %>%
e_y_axis(name = "Cantidad") %>%
e_text_style(fontSize = 12)
saveRDS(graf_descartados, file = "datos/graf_descartados.RDS")
#Mapa de calor: cantidad de infecciones por dia
graf_calendario <- temp_casos_general %>%
e_charts(Fecha) %>%
e_calendar(range = c(temp_casos_general[1,1], temp_casos_general[nrow(temp_casos_general), 1]),
dayLabel = list(nameMap = c("D", "L", "K", "M", "J", "V", "S")),
monthLabel = list(nameMap = c("Ene", "Feb", "Mar", "Abr", "May", "Jun", "Jul", "Ago", "Sep", "Oct", "Nov", "Dic")),
left = "25%",
width = "50%",
yearLabel = list(position = "right")) %>%
e_heatmap(Casos, coord_system = "calendar") %>%
e_visual_map(max = maximo_casos, top = 60) %>%
e_title("Calendario: nuevos casos por día") %>%
e_tooltip(formatter = htmlwidgets::JS("
function(params){
return('Fecha: ' + params.value[0] +
'</strong><br />Infectados: ' + params.value[1])
}
"))
saveRDS(graf_calendario, file = "datos/graf_calendario.RDS")
#Tabla top 10 cantones
cr_caso_limpio$canton <- as.character(cr_caso_limpio$canton)
tabla_top10 <- cr_caso_limpio %>%
group_by(canton) %>%
summarize(Casos = max(total),
first(provincia)) %>%
arrange(desc(Casos)) %>%
head(n = 10)
colnames(tabla_top10) <- c("Canton", "Infectados", "Provincia")
saveRDS(tabla_top10, file = "datos/tabla_top10.RDS")
#Grafico comparativo entre infectados por genero
graf_genero <- dfgeneros %>%
e_charts(Genero) %>%
e_bar(Infectados) %>%
e_title("Infectados según género") %>%
e_legend(right = 0) %>%
e_tooltip()
saveRDS(graf_genero, file = "datos/graf_genero.RDS")
#Grafico comparativo entre infectados por nacionalidad
graf_nacionalidad <- dfnacionalidad %>%
e_charts(Nacionalidad) %>%
e_pie(Infectados, radius = c("50%", "70%")) %>%
e_title("Infectados según nacionalidad") %>%
e_tooltip(axisPointer = list(type = "cross"))
saveRDS(graf_nacionalidad, file = "datos/graf_nacionalidad.RDS")
#Grafico comparativo infectados adultos, adultos mayores y menores
graf_edades <- dfedad %>%
e_charts(Grupos) %>%
e_pie(Infectados, radius = c("50%", "70%")) %>%
e_title("Infectados según grupo etario") %>%
e_tooltip(axisPointer = list(type = "cross"))
saveRDS(graf_edades, file = "datos/graf_edades.RDS")
######### Grafico hospitalizados
graf_hosp <- temp_casos_general %>%
select(Fecha, Hospitalizados, CI) %>%
`colnames<-`(c("Fecha", "Hospitalizados", "C.Intensivos")) %>%
filter(Fecha > "2020-03-30") %>%
e_charts(Fecha) %>%
e_bar(Hospitalizados) %>%
e_bar(C.Intensivos) %>%
e_tooltip(
trigger = "axis"
) %>%
e_x_axis(name = "Fecha", nameLocation = "center", nameGap = 40) %>%
e_y_axis(name = "Cantidad") %>%
e_text_style(fontSize = 12)
saveRDS(graf_hosp, file = "datos/graf_hosp.RDS")
######### definición de funciones para la sección de modelos -----------------
#Modelo exponencial
estimacion<-function(x0,b,t){
return(x0*(b^t))
}
#Modelo gompertz
gompertz_mod = function(params, t) {
(params[1] * exp(-params[2] * exp(-params[3] * (t - 1))))
}
#Definir funcion logistica en R
logistic_model<-function(params, x){
return (params[3]/(1+exp(-(x-params[2])/params[1])))
}
#Función pronóstico fecha de aplanamiento de la curva
cuando_acaba <- function(params, t, ultima_fecha, idmodel) {
i = 0
while (TRUE) {
if (idmodel == 2) {
model_max <- gompertz_mod(params, t)
} else if (idmodel == 3) {
model_max <- logistic_model(params, t)
}
finales <- tail(model_max,2)
ultimo <- round(finales[2])
penultimo <- round(finales[1])
if (ultimo == penultimo) {
break()
}
t <- 1:(length(t) + 1)
i = i + 1
}
#return(data.frame(model_max, t))
return(ultima_fecha + i)
}
######### seccion de modelo exponencial -----------------
#### crear modelo
modelo_log <- lm(logcasos~dias,data = temp_casos_general)
#### transfromar variables
x0 <- exp(modelo_log$coefficients[[1]])
b <- exp(modelo_log$coefficients[[2]])
#### datos ajustados
temp_casos_general <- temp_casos_general %>%
mutate(ajuste = estimacion(x0 = x0,
b = b,
t = (1:length(casosdia))),
casosdia = casosdia - 1)
names(temp_casos_general)[names(temp_casos_general) %in% c("casosdia","ajuste")] <- c("Casos Reales", "Estimado")
prediccion <- round(estimacion(x0 = x0,
b = b,
t = ((nrow(temp_casos_general) + 1):(nrow(temp_casos_general) + 7))),0)
prediccion <- data.frame(
"Casos_estimados"= prediccion,
time = ((nrow(temp_casos_general) + 1):(nrow(temp_casos_general) + 7))
)
prediccion <- prediccion %>%
mutate(Fecha = as.Date(temp_casos_general[1,"Fecha"] + days(time - 1), format = "%Y-%m-%d")) %>%
mutate(Fecha = paste(month(Fecha, label = TRUE, abbr = FALSE), day(Fecha))) %>%
select(
Fecha, Casos_estimados
)
colnames(prediccion) <- c("Fecha", "Casos diarios estimados")
#### almacena la tabla para el output
saveRDS(prediccion, file = "datos/prediccion.RDS")
ajuste_prediccion <- data.frame(
time = (1:(nrow(temp_casos_general) + 7)),
casos = round(estimacion(x0 = x0,b = b,t = (1:(nrow(temp_casos_general) + 7))))
)
colnames(ajuste_prediccion) <- c("time","Estimados")
#### agregar fechas
ajuste_prediccion <- ajuste_prediccion %>%
mutate(Fecha = as.Date(temp_casos_general[1,"Fecha"] + days(time - 1), format = "%Y-%m-%d")) %>%
mutate(Fecha = paste(month(Fecha, label = TRUE, abbr = FALSE), day(Fecha)))
general_temporal <- temp_casos_general %>%
mutate(Fecha = paste(month(Fecha, label = TRUE, abbr = FALSE), day(Fecha))) #Eliminar esto apenas se haya modificado la base de datos general
ajuste_prediccion <- ajuste_prediccion %>%
e_charts(Fecha) %>%
e_line(Estimados) %>%
e_tooltip(trigger = "axis") %>%
e_data(general_temporal) %>%
e_scatter(Casos, symbol_size = 7, name = "Confirmados") %>%
e_legend(right = 0) %>%
e_x_axis(name = "Fecha", nameLocation = "center", nameGap = 40) %>%
e_y_axis(name = "Diarios")
#### almacena el grafico para el output
saveRDS(ajuste_prediccion, file = "datos/ajuste_prediccion.RDS")
### Error medio absoluto exponencial
infoextra_exponencial<-data.frame(c("No aplica"),
round(mean(abs(general_temporal$`Casos Reales`- general_temporal$Estimado)),2),
c("No aplica")
)
colnames(infoextra_exponencial) <- c("Pronóstico del aplanamiento de la curva",
"Error medio absoluto (MAE)",
"Error porcentual medio absoluto (MAPE %)")
saveRDS(infoextra_exponencial,file="datos/infoextra_exponencial.RDS")
######### seccion de modelo gompertz -----------------
#### Cargar y preparar datos
df <- temp_casos_general %>%
select(Confirmados) %>%
tibble::rownames_to_column("Dia") %>%
mutate(Dia = lag(Dia),
Dia = if_else(is.na(Dia),
0,
as.numeric(Dia)
)
)
tiempo <- 1:(length(df$Dia) + 7)
tiemporeal <- 1:length(df$Dia)
ultima_fecha <- tail(temp_casos_general$Fecha, 1)
#### Ajustar modelo
modelo <- nlsfit(df,
model = 10,
start = c(a = 800,
b = 6,
c = 0.004
)
)
param_gompertz <- modelo$Parameters[1:3,1]
#### Graficar datos
modelado <- gompertz_mod(param_gompertz, tiempo)
predicciones_gompertz <- temp_casos_general %>%
select(Fecha) %>%
add_row(Fecha = seq.Date(ultima_fecha + 1,
length.out = 7,
by = "day")
) %>%
mutate(Estimados = round(modelado, 0),
Fecha = paste(month(Fecha, label = TRUE, abbr = FALSE), day(Fecha)))
modelo_gompertz <- predicciones_gompertz %>%
e_charts(Fecha) %>%
e_line(Estimados) %>%
e_data(general_temporal) %>% #Este es un data.frame con los casos reales acum. de covid y se crea en la seccion del modelo exponencial
e_scatter(Confirmados, symbol_size = 7) %>%
e_legend(right = 0) %>%
e_tooltip(trigger = "axis") %>%
e_x_axis(name = "Fecha", nameLocation = "center", nameGap = 40) %>%
e_y_axis(name = "Acumulados")
saveRDS(modelo_gompertz, file = "datos/modelo_gompertz.RDS")
#### Obtener salidas
colnames(predicciones_gompertz) <- c("Fecha", "Casos acum. estimados")
saveRDS(tail(predicciones_gompertz, 7), file = "datos/predicciones_gompertz.RDS")
infoextra_gompertz <- data.frame(as.character(cuando_acaba(param_gompertz, tiemporeal, ultima_fecha, 2)),
round(mean(abs(
general_temporal$Confirmados - modelado[1:nrow(general_temporal)]
)), 2),
round(mean( ( abs(
general_temporal$Confirmados - modelado[1:nrow(general_temporal)]
) / general_temporal$Confirmados ) ), 4)
)
colnames(infoextra_gompertz) <- c("Pronóstico del aplanamiento de la curva",
"Error medio absoluto (MAE)",
"Error porcentual medio absoluto (MAPE %)")
saveRDS(infoextra_gompertz, file = "datos/infoextra_gompertz.RDS")
######### seccion de modelo logistico -----------------
dias<-temp_casos_general$dias
confirmados<-temp_casos_general$Confirmados
#Funcion que me permite usar python dentro de R de manera interactiva
repl_python()
import numpy as np
import scipy.optimize as optim
#Definir la funcion logistica a optimizar
def logistic_model(x,a,b,c):
return c/(1+np.exp(-(x-b)/a))
#Establecer valores aleatorios y limites para los parametros
p0=np.random.exponential(size=3)
bounds=(0,[100000.,10000.,4900000.])
#Estimar el valor de los parametros a partir de los datos originales
ajuste = optim.curve_fit(logistic_model,r.dias,r.confirmados,p0=p0,bounds=bounds, maxfev = 1000)
#salir de python
exit
#guardar parametros del modelo logístico
a=as.numeric(py$ajuste[[1]][1])
b=as.numeric(py$ajuste[[1]][2])
c=as.numeric(py$ajuste[[1]][3])
param_logistico <- c(a, b, c)
#Generar ajuste y predicciones del modelo logístico
ajuste_regresion_logistica <- logistic_model(params = param_logistico, x=(1:(nrow(temp_casos_general) + 7)))
#Generar DataFrame para el grafico de regresion logística
data_regresion_logistica <- data.frame(
Estimados = ajuste_regresion_logistica,
time = (1:(nrow(temp_casos_general) + 7))
)
#cambiar fecha para gráfico acumulado
ajuste_logistico_acum <- data_regresion_logistica%>%
mutate(
Fecha = as.Date(temp_casos_general[1,"Fecha"] + days(time - 1), format = "%Y-%m-%d"),
Estimados = round(Estimados)
) %>%
mutate(Fecha = paste(month(Fecha, label = TRUE, abbr = FALSE), day(Fecha))) %>%
select(
Fecha, Estimados
)
#gráfico acumulado
modelo_logistico <- ajuste_logistico_acum %>%
e_charts(Fecha) %>%
e_line(Estimados) %>%
e_data(general_temporal) %>% #Este es un data.frame con los casos reales acum. de covid y se crea en la seccion del modelo exponencial
e_scatter(Confirmados, symbol_size = 7) %>%
e_legend(right = 0) %>%
e_tooltip(trigger = "axis") %>%
e_y_axis(name = "Acumulados") %>%
e_x_axis(name = "Fecha",nameLocation="center",nameGap = 40)
colnames(ajuste_logistico_acum) <- c("Fecha", "Casos acum. estimados")
saveRDS(modelo_logistico,file = "datos/modelo_logistico.RDS")
predicciones_logistica <- ajuste_logistico_acum[((nrow(temp_casos_general) + 1):(nrow(temp_casos_general) + 7)) ,]
saveRDS(predicciones_logistica, file="datos/predicciones_logistica.RDS")
#Error medio absoluto logístico
infoextra_logistico <- data.frame(as.character(cuando_acaba(param_logistico, tiemporeal, ultima_fecha, idmodel = 3)),
round(mean(
abs(general_temporal$Confirmados - ajuste_regresion_logistica[1 : nrow(general_temporal)])),2),
round(mean( (
abs(general_temporal$Confirmados - ajuste_regresion_logistica[1 : nrow(general_temporal)]
) / general_temporal$Confirmados )
), 4)
)
colnames(infoextra_logistico)<-c("Pronóstico del aplanamiento de la curva",
"Error medio absoluto (MAE)",
"Error porcentual medio absoluto (MAPE %)")
saveRDS(infoextra_logistico,file="datos/infoextra_logistico.RDS")
######## seccion mapa -----------------
json <- jsonlite::read_json("mapas-json/provincias.geojson")
json$features <- json$features %>%
map(function(x){
x$properties$name <- x$properties$NPROVINCIA
return(x)})
prov_map <- cr_caso_provincia %>%
group_by(fecha) %>%
e_charts(provincia, timeline = TRUE) %>%
e_map_register("cr_provincia", json) %>%
e_map(total, map = "cr_provincia", name = "Confirmados") %>%
e_visual_map(min = 0,
max = max(cr_caso_provincia$total),
inRange = list(color = c('yellow','orange', 'orangered', 'red')),
show = TRUE) %>%
e_tooltip() %>%
e_timeline_opts(axis_type = "category",
playInterval = 1000,
currentIndex = length(unique(cr_caso_provincia$fecha))-1,
symbolSize = 4,
label = list(
show = FALSE
),
checkpointStyle = list(
symbol = "pin",
symbolSize = 25
))
saveRDS(prov_map, file = "datos/mapa_provincia.RDS")
|
34c06600148ab075acd841405e60f115ba80050e
|
fc7e1f1d1efc653df716dbad0e9047abc1c0039f
|
/CHEMDisplay.R
|
fbb41834d765193a0c1b666864c0eb4ffa8b0cf3
|
[] |
no_license
|
AspirinCode/ChemInformatics-1
|
07d926a6bb7176ee022c5b5ada26f36618a5256a
|
ff434ea508d393dcb4d389d250054dff9db184cb
|
refs/heads/master
| 2020-07-06T05:44:22.455752
| 2019-06-20T06:44:57
| 2019-06-20T06:44:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,844
|
r
|
CHEMDisplay.R
|
displayClusterDend <- function(simMatrix, Namevector) {
colnames(simMatrix) <- Namevector
row.names(simMatrix) <- Namevector
hCluster <- hclust(as.dist(1-simMatrix), method="complete")
dend <- as.dendrogram(hCluster)
dend %>% dendextend::set('labels_cex',0.5) %>% highlight_branches_col(viridis(100))
return(dend)
}
#========================================================================
displayClusterHeatMap <- function(simMatrix, compoundName) {
colnames(simMatrix) <- compoundName
row.names(simMatrix) <- compoundName
hc <- hclust(as.dist(1-simMatrix), method="single")
heatmap <- heatmap.2(1-simMatrix, Rowv=as.dendrogram(hc), Colv=as.dendrogram(hc), col=colorpanel(40, "darkred", "yellow", "white"), density.info="none", trace="none")
return(heatmap)
}
#========================================================================
displayFMCSSize <- function(df, fileNameJPEG) {
#jpeg(fileNameJPEG, units="in", width=24, height=8, res=300)
g <- ggplot(df, aes(df$drug, df$size)) + geom_point(shape=21, colour="#999999", fill="#56B4E9", size=4, stroke=1) + theme_classic() + theme(axis.text.x = element_text(angle = 90, hjust = 1), axis.text = element_text(size=8)) + ylab("MCS Size (number of atoms)") + xlab("Drug")
#g
#dev.off()
return(g)
}
#========================================================================
getNCBIFractionActivity <- function(drugTargetsMat, title=NULL) {
df <- as.data.frame(drugTargetsMat)
g <- ggplot(data=df, aes(y=df$fraction_active, x=row.names(df))) + theme_classic() + theme(axis.text.x = element_text(angle = 90, hjust = 1), axis.text = element_text(size=8)) + ylab("Total Fraction of Activity") + xlab("NCBI Target Identifier") + geom_bar(color="#999999", fill="#56B4E9", stat="identity")
if(!is.null(title)) {
g <- g + ggtitle(title)
}
return(g)
}
|
49822620ce4ba6b4f37e0032ced58f41ea26338f
|
b4b5a5998f2fecc590a5df4026c0d12d83d46cf7
|
/man/add_grid_layer.Rd
|
55efde3f5980861269fb5a201fbd07f52eef07eb
|
[
"MIT",
"BSD-3-Clause"
] |
permissive
|
crazycapivara/deckgl
|
50335a155c99307e40b21b2f82561f3cf15afb17
|
1741c8bc84d69e26694d670879911a0d2bb2c5c2
|
refs/heads/master
| 2023-04-03T06:19:35.377655
| 2023-03-26T10:46:36
| 2023-03-26T10:46:36
| 145,043,708
| 83
| 10
|
NOASSERTION
| 2023-03-26T10:46:41
| 2018-08-16T22:07:48
|
R
|
UTF-8
|
R
| false
| true
| 1,899
|
rd
|
add_grid_layer.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/layers_grid-layer.R
\name{add_grid_layer}
\alias{add_grid_layer}
\title{Add a grid layer to the deckgl widget}
\usage{
add_grid_layer(
deckgl,
data = NULL,
properties = list(),
...,
id = "grid-layer"
)
}
\arguments{
\item{deckgl}{A deckgl widget object.}
\item{data}{The url to fetch data from or a data object.}
\item{properties}{A named list of properties with names corresponding to the properties defined
in the \href{https://deck.gl/#/documentation/deckgl-api-reference}{deckgl-api-reference}
for the given layer class. The \code{properties} parameter can also be an empty list. In this case
all props must be passed as named arguments.}
\item{...}{Named arguments that will be added to the \code{properties} object. Identical parameters
are overwritten.}
\item{id}{The unique id of the layer.}
}
\description{
The \code{GridLayer} renders a grid heatmap based on an array of points. It takes the constant size all each cell, projects points into cells.
The color and height of the cell is scaled by number of points it contains.
}
\examples{
data("sf_bike_parking")
properties <- list(
filter = "spaces > 4",
visible = TRUE,
extruded = TRUE,
cellSize = 200,
elevationScale = 4,
getPosition = "@=[lng, lat]", #~lng + lat,
colorRange = RColorBrewer::brewer.pal(6, "YlOrRd"),
tooltip = "{{position.0}}, {{position.1}}<br/>Count: {{count}}"
)
deck <- deckgl(zoom = 11, pitch = 45, bearing = 35, element_id = "grid-layer") \%>\%
add_source("sf-bike-parking", sf_bike_parking) \%>\%
add_grid_layer(
source = "sf-bike-parking",
properties = properties
) \%>\%
add_control("Grid Layer") \%>\%
add_basemap() \%>\%
add_json_editor(wrap = 50, maxLines = 23)
if (interactive()) deck
}
\seealso{
\url{https://deck.gl/#/documentation/deckgl-api-reference/layers/grid-layer}
}
|
ecfcf370ca574dc3d2ce383968a70de4ea0909d8
|
7917fc0a7108a994bf39359385fb5728d189c182
|
/cran/paws.management/man/configservice_put_external_evaluation.Rd
|
c677b8570dd2c1b15688311abdc3af8a2e8f6403
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
TWarczak/paws
|
b59300a5c41e374542a80aba223f84e1e2538bec
|
e70532e3e245286452e97e3286b5decce5c4eb90
|
refs/heads/main
| 2023-07-06T21:51:31.572720
| 2021-08-06T02:08:53
| 2021-08-06T02:08:53
| 396,131,582
| 1
| 0
|
NOASSERTION
| 2021-08-14T21:11:04
| 2021-08-14T21:11:04
| null |
UTF-8
|
R
| false
| true
| 874
|
rd
|
configservice_put_external_evaluation.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/configservice_operations.R
\name{configservice_put_external_evaluation}
\alias{configservice_put_external_evaluation}
\title{Put external evaluation}
\usage{
configservice_put_external_evaluation(ConfigRuleName,
ExternalEvaluation)
}
\arguments{
\item{ConfigRuleName}{[required]}
\item{ExternalEvaluation}{[required]}
}
\value{
An empty list.
}
\description{
Put external evaluation
}
\section{Request syntax}{
\preformatted{svc$put_external_evaluation(
ConfigRuleName = "string",
ExternalEvaluation = list(
ComplianceResourceType = "string",
ComplianceResourceId = "string",
ComplianceType = "COMPLIANT"|"NON_COMPLIANT"|"NOT_APPLICABLE"|"INSUFFICIENT_DATA",
Annotation = "string",
OrderingTimestamp = as.POSIXct(
"2015-01-01"
)
)
)
}
}
\keyword{internal}
|
2ff7def8feffb7ae3abaa29d5d76a93dafab8a5a
|
0e96f69fad9a8cd571eacecec2774e85c9d47454
|
/thirteen.R
|
4bfd745c0369f02bcb3b17fabf21fe953a97b3f9
|
[] |
no_license
|
Tarun-Sharma9168/R_programming
|
748e55b00682a07ff17b2e06aa7952e345141a7f
|
9c31a494d1047fab0ef1224ed6db07574931b6af
|
refs/heads/master
| 2020-09-26T14:36:18.442916
| 2019-12-06T07:51:38
| 2019-12-06T07:51:38
| 226,274,649
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 171
|
r
|
thirteen.R
|
marks=c(70,95,80,40,30)
barplot(marks,main = "Comparing marks of the five subject",xlab="Subject",ylab="Marks",names.arg=c("A","B","C","D","E"),col="darkred",horiz=FALSE)
|
46ae850b7f31e73c82d3ffc523ebb94a13330474
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/glrt/R/Chisqstat3.R
|
019f8249a5a35b63dcb88442636016e2e6fc48d3
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 599
|
r
|
Chisqstat3.R
|
Chisqstat3 <-
function(U, V, counts)
{
k = length(U)
n = sum(counts)
N = apply(counts, 2, sum)
if(N[1] == 0)
{
chisq = t(U[1:(k-1)]) %*% solve(V[1:(k-1), 1:(k-1)]) %*% U[1:(k-1)] / n
df = k - 1
}
else
{
equal = 1
ratios = t(apply(counts, 1, "/", N))
r = 2
while(equal == 1 && r <= k)
{
if(any(ratios[r,] != ratios[1,]))
equal = 0
r = r + 1
}
if(equal == 1)
{
chisq = t(U[1:(k-1)]) %*% solve(V[1:(k-1), 1:(k-1)]) %*% U[1:(k-1)] / n
df = k - 1
}
else
{
chisq = t(U) %*% solve(V) %*% U / n
df = k
}
}
c(chisq, df)
}
|
47ae03281966baff213e27c997081c2c9fc18bce
|
66a863e177ecef6830da370498981f0bbb935446
|
/run_analysis.R
|
295c297884cf6d4f2f11aff9b7cc7f034c9ac12c
|
[] |
no_license
|
shahanuj855/Getting-Cleaning-Data-Project
|
e04c908653dfbc324eaaefdc7e2f828ae89c5954
|
3e6e98f074ddff1be92ae7c609c5a18d0e477fa0
|
refs/heads/master
| 2020-03-29T00:43:16.613816
| 2018-09-18T21:47:31
| 2018-09-18T21:47:31
| 149,348,212
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,201
|
r
|
run_analysis.R
|
fileURL <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(fileURL,"./UCI HAR Dataset.zip")
unzip("UCI HAR Dataset.zip",exdir = getwd())
#Creating data frames for testing and training
library(data.table)
features <- read.csv('./UCI HAR Dataset/features.txt', header = FALSE,sep = ' ')
features <- as.character(features[,2])
traindata_x <- read.table('./UCI HAR Dataset/train/X_train.txt')
traindata_activity <- read.csv('./UCI HAR Dataset/train/y_train.txt',header = FALSE,sep = ' ')
traindata_subject <- read.csv('./UCI HAR Dataset/train/subject_train.txt',header = FALSE,sep = " ")
train_data <- data.frame(traindata_subject,traindata_activity,traindata_x)
names(train_data) <- c(c('subject','activity'),features)
testdata_x <- read.table('./UCI HAR Dataset/test/X_test.txt')
testdata_activity <- read.csv('./UCI HAR Dataset/test/y_test.txt',header = FALSE,sep = ' ')
testdata_subject <- read.csv('./UCI HAR Dataset/test/subject_test.txt',header = FALSE,sep = ' ')
test_data <- data.frame(testdata_subject, testdata_activity, testdata_x)
names(test_data) <- c(c('subject','activity'),features)
data.all <- rbind(train_data,test_data) #merge the training and testing dataset
sub_meanStd <- grep('mean|std',features)
sub_data <- data.all[,c(1,2,sub_meanStd + 2)]
activity.labels <- read.table('./UCI HAR Dataset/activity_labels.txt', header = FALSE)
activity.labels <- as.character(activity.labels[,2])
data.sub$activity <- activity.labels[data.sub$activity]
name.new <- names(sub_data)
name.new <- gsub("[(][)]", "", name.new)
name.new <- gsub("^t", "TimeDomain_", name.new)
name.new <- gsub("^f", "FrequencyDomain_", name.new)
name.new <- gsub("Acc", "Accelerometer", name.new)
name.new <- gsub("Gyro", "Gyroscope", name.new)
name.new <- gsub("Mag", "Magnitude", name.new)
name.new <- gsub("-mean-", "_Mean_", name.new)
name.new <- gsub("-std-", "_StandardDeviation_", name.new)
name.new <- gsub("-", "_", name.new)
names(sub_data) <- name.new
data.tidy <- aggregate(sub_data[,3:81], by = list(activity = sub_data$activity, subject = sub_data$subject),FUN = mean)
write.table(x = data.tidy, file = "data_tidy.txt", row.names = FALSE)
|
82f2c5bbcc6a93ad5dc779e49e7dbb117ca411f1
|
af84f4fb4bd7c41432482cce6170da3e6af3a130
|
/PE/9.R
|
a82f3f9d2aa77901ed03a08c225dc94f5d713e97
|
[] |
no_license
|
tkmckenzie/pan
|
a65fc375eea8171c9b64f8360c5cf7b152830b7d
|
5337e4c1d09f06f2043551e1dd1ec734aab75b49
|
refs/heads/master
| 2023-01-09T11:54:29.636007
| 2022-12-21T22:26:58
| 2022-12-21T22:26:58
| 156,240,022
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 233
|
r
|
9.R
|
rm(list = ls())
max.N = 1000
a.b.range = 1:max.N
a.b = combn(a.b.range, 2)
a.b.sums = apply(a.b, 2, function(v) sum(v^2))
a.b.c = rbind(a.b, sqrt(sums))
a.b.c.sums = apply(a.b.c, 2, sum)
prod(a.b.c[,which(a.b.c.sums == 1000)])
|
82c874f44d8b7a9f089a974e36279fcfb9df1630
|
a717f53ded765a2e7cf52e829ae4a31d1b8cdd34
|
/R/msaeDB-Package.R
|
0b9646d84728d61c4505e196770b733593d4c9f4
|
[] |
no_license
|
zazaperwira/msaeDB
|
79f43dca177cd233fbc0c0c947be74ba245be660
|
070089d107c48caebc6024ae6bd615535f40033b
|
refs/heads/master
| 2023-03-31T01:45:38.180574
| 2021-04-01T03:08:05
| 2021-04-01T03:08:05
| 327,575,056
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,253
|
r
|
msaeDB-Package.R
|
#' msaeDB : Multivariate Small Area Estimation with Difference Benchmarking
#'
#' Implements Benchmarking Method for Multivariate Small Area Estimation under Fay Herriot Model. Multivariate Small Area Estimation (MSAE) is a development of Univariate Small Area Estimation that considering the correlation among response variables and borrowing the strength from related areas and auxiliary variables to increase the effectiveness of sample size, the multivariate model in this package is based on multivariate model 1 proposed by Roberto Benavent and Domingo Morales (2016) <doi:10.1016/j.csda.2015.07.013>. Benchmarking in Small Area Estimation is a modification of Small Area Estimation model to guarantee that the aggregate weighted mean of the county predictors equals the corresponding weighted mean of survey estimates. Difference Benchmarking is the simplest benchmarking method but widely used by multiplying empirical best linear unbiased prediction (EBLUP) estimator by the common adjustment factors (J.N.K Rao and Isabel Molina, 2015).
#'
#' @section Author(s):
#' Zaza Yuda Perwira, Azka Ubaidillah
#'
#' \strong{Maintainer}: Zaza Yuda Perwira \email{221710086@@stis.ac.id}
#'
#' @section Functions:
#' \describe{
#' \item{\code{link{msaedb}}}{Produces EBLUPs, MSE, and Aggregation of Multivariate SAE with Difference Benchmarking}
#' \item{\code{link{saedb}}}{Produces EBLUPs, MSE, and Aggregation of Univariate SAE with Difference Benchmarking}
#' \item{\code{link{msaefh}}}{Produces EBLUPs and MSE of Multivariate SAE}
#' \item{\code{link{saefh}}}{Produces EBLUPs and MSE of Univariate SAE}
#' \item{\code{link{msaedbns}}}{Produces EBLUPs, MSE, and Aggregation of Multivariate SAE with Difference Benchmarking for non-sampled areas}
#' \item{\code{link{saedbns}}}{Produces EBLUPs, MSE, and Aggregation of Univariate SAE with Difference Benchmarking for non-sampled areas}
#' \item{\code{link{msaefhns}}}{Produces EBLUPs and MSE of Multivariate SAE for non-sampled areas}
#' \item{\code{link{saefhns}}}{Produces EBLUPs and MSE of Univariate SAE for non-sampled areas}
#'}
#' @section Reference:
#' \itemize{
#' \item{Benavent, Roberto & Morales, Domingo. (2016). Multivariate Fay-Herriot models for small area estimation. Computational Statistics and Data Analysis 94 2016 372-390. <doi:10.1016/j.csda.2015.07.013>.}
#' \item{Rao, J.N.K & Molina. (2015). Small Area Estimation 2nd Edition. New York: John Wiley and Sons, Inc.}
#' \item{Steorts, Rebecca & Ghosh, Malay. (2013). On estimation of mean square Errors of Benchmarked Empirical Bayes Estimators. Article in Statistica Sinica April 2013. <doi:10.5705/ss.2012.053>.}
#' \item{Ubaidillah, Azka et al. (2019). Multivariate Fay-Herriot models for small area estimation with application to household consumption per capita expenditure in Indonesia. Journal of Applied Statistics. 46:15. 2845-2861. <doi:10.1080/02664763.2019.1615420>.}
#' \item{Permatasari, Novia. (2020). Pembangunan paket R pada model Fay Herriot multivariat untuk pendugaan area kecil (Bachelor Thesis). Jakarta: Polytechnic Statistics of STIS}
#' }
#'
#'
#' @docType package
#' @name msaeDB
#'
#' @import magic
#' @import MASS
#' @import stats
NULL
|
89355b3aa57b5a628e11f3f4f500e93747585ab6
|
6159b0aa17a0b3e6eb4edaf8021d70963a9bca96
|
/TCR_analyses/TCR_clonality_umaps.R
|
f6acd59c70947353b9b35163213e1d0bde9e7761
|
[
"MIT"
] |
permissive
|
IzarLab/fresh_vs_frozen_comparison
|
c9854a7cbeb1da5e627e664c4677a46502447338
|
02ed44b0851dd5391967339b4900aa3384b9932f
|
refs/heads/main
| 2023-04-15T04:12:50.648881
| 2023-02-24T19:57:53
| 2023-02-24T19:57:53
| 446,448,844
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,463
|
r
|
TCR_clonality_umaps.R
|
library(rlist)
library(ggplot2)
library(Seurat)
library(infercnv)
library(stringr)
### title: Create umaps of T cells with TCR clonality plotted in different colors according to frequency, for cutaneous melanoma, uveal melanoma primary, uveal melanoma metastasis, and sequential cutaneous melanoma samples under pembrolizumab therapy
### author: Yiping Wang date: 11/08/2022
#load list of rds objects corresponding to each dataset
foldersList = c("",
"",
"",
"")
integrated_name_arr = c("reannotate_ribas_melanoma_merged_tcells","reannotate_uveal_melanoma_tcells_reintegrated_with_1_subclustered_dim_num_25_then_15","fresh_vs_frozen_tcells_reannotate_BI5","fresh_vs_frozen_tcells_reannotate_cpoi-uvealprimarydata")
dataset_names = c("ribas","UMEL","BI5","UM")
display_individual_clonalities = FALSE
for (i in 3:4) {
seu = readRDS(paste0("/data/",integrated_name_arr[i],".rds"))
DefaultAssay(seu) = "RNA"
#extract specific seurat clusters or cell types corresponding to t-cells in each dataset
if (dataset_names[i]=="ribas")
{
seu = subset(seu, seurat_clusters %in% c(0,1,2,3,4,6,7,9,10,11,12,14))
}
else if (dataset_names[i]=="UMEL")
{
seu = subset(seu, seurat_clusters %in% c("0","1_CD8","4","6","8"))
}
else if (dataset_names[i]=="BI5")
{
seu = subset(seu, orig.ident %in% c("CD45pos","5snseq"))
}
else if (dataset_names[i]=="UM")
{
}
#if specific seurat clusters were selected, recluster and rerun umap
if (dataset_names[i]=="ribas" || dataset_names[i]=="UMEL")
{
dim_num = 15
seu <- ScaleData(object = seu)
seu <- FindVariableFeatures(seu, selection.method = "vst", nfeatures = 2000)
seu <- RunPCA(object = seu)
seu <- FindNeighbors(seu, dims = 1:dim_num)
seu <- FindClusters(seu)
seu <- RunUMAP(object = seu, dims = 1:dim_num)
}
#assign TCR clonality groups to rds object using function
source("fresh_vs_frozen_comparison/TCR_analyses/assign_TCR_clonality.R")
seu = assign_TCR_clonality(seu, dataset_names[i])
#for uveal melanoma metastasis data only, write out csv file containing clonality and clonality_group information
if (dataset_names[i]=="UM")
{
writetable = data.frame(barcode = seu$barebarcodes, orig.ident = seu$orig.ident, clonality = seu$clonality, clonality_group = seu$clonality_group)
write.table(writetable, "uveal_melanoma_tcell_clonality.csv", sep=",", quote=F, col.names=T, row.names=F)
}
#if display_individual_clonalities is TRUE, create list of cell barcodes corresponding to each clonality value
#label list using human-friendly clonality labels
if (display_individual_clonalities)
{
highlight_list = list()
clone_numbers = sort(unique(seu$clonality))
if (length(clone_numbers)!=0)
{
seu$dummy_index = 1:length(seu$orig.ident)
for (i1 in 1:length(clone_numbers)) {
highlight_list = list.append(highlight_list,colnames(seu)[seu$clonality==clone_numbers[i1]])
}
display_numbers = as.character(clone_numbers)
for (i1 in 1:length(display_numbers))
{
if (str_length(display_numbers[i1])==1)
{
display_numbers[i1] = paste0("0",display_numbers[i1])
}
}
names(highlight_list) = paste0("Clonality: ", display_numbers)
}
}
else
{
#otherwise, create list of cell barcodes corresponding to each clonality_group value
unique_clonality_groups = unique(seu$clonality_group)
highlight_list = list()
for (agroup in unique_clonality_groups)
{
highlight_list = list.append(highlight_list, colnames(seu)[seu$clonality_group==agroup])
}
names(highlight_list) = unique_clonality_groups
}
#use gray for cell barcodes not matched with TCR sequencing, yellow-red spectrum for expanded clonotypes, and either blue for all unexpanded clones, or additionally black and purple for CD4 and CD8 unexpanded clones
if (i==1 || i==2)
{
clonality_palette = c("gray","blue",colorRampPalette(c("yellow","red"))(4))
point_scale = c(0.2,1,1,1,1,1)
names(clonality_palette) = c("Unmatched with TCR sequencing","Unexpanded clones","Expanded clones with clonality 2","Expanded clones with clonality > 2 and <= 5","Expanded clones with clonality > 5 and <= 20","Expanded clones with clonality > 20")
names(point_scale) = names(clonality_palette)
}
else if (i==3 || i==4)
{
clonality_palette = c("gray","black","purple","blue",colorRampPalette(c("yellow","red"))(4))
point_scale = c(0.5,0.5,0.5,1,1,1,1,1)
names(clonality_palette) = c("Unmatched with TCR sequencing","CD4+ T-cells unmatched with TCR sequencing","CD8+ T-cells unmatched with TCR sequencing","Unexpanded clones","Expanded clones with clonality 2","Expanded clones with clonality > 2 and <= 5","Expanded clones with clonality > 5 and <= 20","Expanded clones with clonality > 20")
names(point_scale) = names(clonality_palette)
}
#print umaps of tcell by fresh/frozen status, original sample identity, and clonality group, as well as expression of cell cycle and exhaustion markers
pdf(paste0(integrated_name_arr[i],"_clonality_umap.pdf"),height=7,width=12)
umap_df = data.frame(UMAP_1=seu@reductions$umap[[,1]], UMAP_2=seu@reductions$umap[[,2]], clonality_group=seu$clonality_group)
theme_set(theme_bw())
print(DimPlot(seu, reduction = "umap", label = T, group.by = "fresh_frozen", repel = T, label.size = 3, shuffle = T) + guides(col = guide_legend(nrow = 30,override.aes = list(size=5))) + theme(legend.text=element_text(size=10)) + ggtitle("fresh_frozen"))
print(DimPlot(seu, reduction = "umap", label = T, group.by = "orig.ident", repel = T, label.size = 3, shuffle = T) + guides(col = guide_legend(nrow = 30,override.aes = list(size=5))) + theme(legend.text=element_text(size=10)) + ggtitle("orig.ident"))
print(ggplot(umap_df) + geom_point(aes(x=UMAP_1, y=UMAP_2, color=clonality_group, size=clonality_group)) + scale_color_manual(values=clonality_palette, breaks=names(clonality_palette)) + scale_size_manual(values=point_scale, breaks=names(point_scale)) + xlab("UMAP_1") + ylab("UMAP_2") + guides(color = guide_legend(title="Clonality"), size = "none"))
if (integrated_name_arr[i]=="reannotate_ribas_melanoma_merged_tcells")
{
print(FeaturePlot(seu, features = c("TOP2A","MKI67","TOX","TCF7"), min.cutoff = "1", max.cutoff = "4"))
}
else
{
print(FeaturePlot(seu, features = c("TOP2A","MKI67","TOX","TCF7"), min.cutoff = "1", max.cutoff = "3"))
}
dev.off()
#nonsense = nonsense+1
}
|
d298fc845acd70f4eb0d203211f14b465072a9f3
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/spatstat/examples/nndist.lpp.Rd.R
|
46434bbb7a3a70174f37f982216458bc9f2a0164
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 230
|
r
|
nndist.lpp.Rd.R
|
library(spatstat)
### Name: nndist.lpp
### Title: Nearest neighbour distances on a linear network
### Aliases: nndist.lpp
### Keywords: spatial
### ** Examples
X <- runiflpp(12, simplenet)
nndist(X)
nndist(X, k=2)
|
f192d6bac1d90db06cef850a812cbc47372569d1
|
78dbd8fd96aed35aee83352e6c57e1dee727600c
|
/R_Scripts/total_trials_enrollment_count.R
|
f7fc972492ed88cf6e6221fb62336e0a00069a32
|
[
"MIT"
] |
permissive
|
kylienorwood/ClinicalTrialsViz
|
9b8cb92df05fc35f0428da7a6ed3ae944f783982
|
9e6083b6a3297fdb5b3b52e42d2d7f4ec54210f6
|
refs/heads/master
| 2020-06-06T01:04:11.580591
| 2019-07-29T18:26:48
| 2019-07-29T18:26:48
| 192,596,260
| 1
| 0
| null | 2019-06-18T18:54:05
| 2019-06-18T18:54:05
| null |
UTF-8
|
R
| false
| false
| 1,384
|
r
|
total_trials_enrollment_count.R
|
# Install or load libraries
tryCatch({
library(RPostgreSQL)
}, error = function(e) {
install.packages("RPPostgreSQL")
library(RPPostgreSQL)
})
tryCatch({
library(tidyverse)
}, error = function(e) {
install.packages("tidyverse")
library(tidyverse)
})
# Connect to database
drv <- dbDriver('PostgreSQL')
con <- dbConnect(drv,
dbname="aact",
host=Sys.getenv("host"),
port=5432,
user=Sys.getenv("userid"),
password=Sys.getenv("userpass")
)
# Make ctgov schema public
dbExecute(con, "SET search_path TO ctgov,public")
# Query database for enrollment of completed trials
completed <- dbGetQuery(con, "SELECT enrollment, overall_status FROM Studies WHERE overall_status = 'Completed'")
# Summarize the total number of trials with the same enrollment count
completed <- completed %>%
group_by(enrollment) %>%
summarize(count = n())
# Create histogram to visualize the number of people enrolled in each completed trial
completed %>%
filter(enrollment <= 20000) %>%
ggplot() + geom_histogram(mapping = aes(enrollment), bins = 200, fill = 'black') + labs(title = "Total Number of Trials by Enrollment Count", caption = "This histogram helps visualize the general trend of how many trials have a certain number of people enrolled.") + theme(plot.title = element_text(hjust = 0.5))
|
8ec9f9bcd2b4e613abb6aa63ad72d80c698bad5c
|
bc9b42cd4cb22cd3eccd5d27838f48fec54f9d1e
|
/Rcode/temp.R
|
f00b089a7ff6eb95d758df3c81672b0881703fe6
|
[] |
no_license
|
mbh038/Waves
|
61dd0a0a3334dae6314899fa1d6c08b58ca0411c
|
ec829db267f17bf2477748be3cd7cad2fef54287
|
refs/heads/master
| 2021-01-19T03:59:46.710943
| 2017-05-24T15:25:08
| 2017-05-24T15:25:08
| 84,421,038
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,215
|
r
|
temp.R
|
# EnergySim
windMW<-20#seq(0,50,5)
solarMWp<-0#seq(0,50,5)
windPower<-read.table("../data/specs/windPowerCurve.csv",header=FALSE,sep=",")
# read in demand files
houses=18000
ddata<-read.csv("../data/profiles/EESP/domDem10.csv")
demand<-numeric()
demand<-houses*ddata$W/1e6
rm(ddata)
sipfilepathstem<-"../data/synthetic/CamBSRN_Solar10minSyn/CamSolarSyn10min"
wipfilepathstem<-"../data/synthetic/CallywithWind10minSyn/Cally"
sipfilepathtail<-".csv"
wipfilepathtail<-"_10min.csv"
# set up input file numbers
ipfilename<-function(file,ipfilepathstem,ipfilepathtail){
ipfilehandle<-as.character(file)
if (file < 10){
ipfilehandle<-paste0("00",ipfilehandle)
}
if (file >= 10 && file < 100){
ipfilehandle<-paste0("0",ipfilehandle)
}
ipfilename<-paste0(ipfilepathstem,ipfilehandle,ipfilepathtail)
}
wp<-function(x){
windPower[which(windPower[,1]==x),2]
}
## loop through solar and wind files
numTrials<-10
trial=0
stored=0
res<-data.frame()
#start<-proc.time()
for (i in 1:numTrials){
trial<<-trial+1
wfile<-floor(100*runif(1)+1)
sfile<-floor(100*runif(1)+1)
print(paste("Trial: ",trial," Solar file:",sfile,", Wind file: ",wfile,sep=" "))
wfilename<-ipfilename(wfile,wipfilepathstem,wipfilepathtail)
sfilename<-ipfilename(sfile,sipfilepathstem,sipfilepathtail)
wdata<-read.csv(wfilename)[,2]
sdata<-read.csv(sfilename)[,2]
# data$day<-min(365,data$t %/% 144 +1)
solarop<-numeric()
windop<-numeric()
totalop<-numeric()
#solarop<-matrix(length(solarMWp)*length(sdata),length(sdata),length(solarMWp))
#windop<-matrix(length(windMW)*length(wdata),length(wdata),length(windMW))
# windop<-unlist(sapply(wdata,function(x){
# windMW*windPower[which(windPower[,1]==x),2]
# }))
#print(paste("Solar: ",SolarMWp,", Wind: ",WindMW))
windop<-windMW*unlist(sapply(wdata,wp))
# windop<-sapply(windMW,function(x){
# x*unlist(sapply(wdata,wp))
# })
solarop<-solarMWp*sdata/1000
# solarop<-sapply(solarMWp,function (x){
# x*sdata/1000
# })
totalop<-windop+solarop
balance<-totalop-demand# sweep(totalop,1,demand,FUN="-") #totalop-demand
ebalance<-cumsum(balance)/6000 # in GWh
#powerop<-data.frame(windop,solarop,totalop,demand,balance,ebalance)
# summary(powerop)
#diff<-proc.time()-start
#print(diff)
res[i]<-c(min(balance),max(balance),min(ebalance),max(ebalance))
}
library(rafalib)
mypar(4,1)
hist(res[,1],breaks=50,main="min pbalance")
hist(res[,2],breaks=50,main="max pbalance")
hist(res[,3],breaks=50,main="min ebalance")
hist(res[,4],breaks=50,main="max ebalance")
summary(res)
mypar(3,1)
days<-seq(1,1000)/144
plot(days,demand[1:1000],type="l",
ylim=c(-12,12),
xlab="Winter days",
ylab="Power (MW)"
)
lines(days,solarop[1:1000],col="red")
lines(days,windop[1:1000],col="blue")
lines(days,balance[1:1000],col="green")
plot(days,demand[25001:26000],type="l",
ylim=c(-12,12),
xlab="Summer days",
ylab="Power (MW)"
)
lines(days,solarop[25001:26000],col="red")
lines(days,windop[25001:26000],col="blue")
lines(days,balance[25001:26000],col="green")
ydays<-seq(1,length(ebalance))/144
plot(ydays,ebalance,type="l")
mypar(3,1)
hist(demand)
hist(totalop)
hist(balance)
|
84a3f4922c32dbe5379e2a98e10982e40737f05c
|
83c489d0e7fca84beb4ecb5a8a3d7f666b076fcf
|
/man/TGGLMix.Rd
|
61b66689d8b7aa5f7c1606e64c8a2b8887200323
|
[] |
no_license
|
tohein/linearMTL
|
9f3a02640b5b6e51425f62005744ef3047683a06
|
bd27f8e90ea297ddca7c6f5e110e6b62ba0cff55
|
refs/heads/master
| 2021-03-27T14:40:25.364761
| 2018-11-09T15:37:36
| 2018-11-09T15:37:36
| 106,738,759
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,955
|
rd
|
TGGLMix.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tggl_mixture.R
\name{TGGLMix}
\alias{TGGLMix}
\title{Fit a tree-guided group lasso mixture model model (TGGLMix).}
\usage{
TGGLMix(X = NULL, task.specific.features = list(), Y, M, groups, weights,
lambda, gam = 1, homoscedastic = FALSE, EM.max.iter = 1000,
EM.epsilon = 1e-04, EM.verbose = 0, sample.data = FALSE,
TGGL.mu = 1e-05, TGGL.epsilon = 1e-05, TGGL.iter = 25)
}
\arguments{
\item{X}{N by J1 matrix of features common to all tasks.}
\item{task.specific.features}{List of features which are specific to each
task. Each entry contains an N by J2 matrix for one particular task (where
columns are features). List has to be ordered according to the columns of
Y.}
\item{Y}{N by K output matrix for every task.}
\item{M}{Number of Clusters.}
\item{groups}{Binary V by K matrix determining group membership: Task k in
group v iff groups[v,k] == 1.}
\item{weights}{V dimensional vector with group weights.}
\item{lambda}{Regularization parameter.}
\item{gam}{(Optional) Regularization parameter for component m will be lambda
times the prior for component m to the power of gam.}
\item{homoscedastic}{(Optional) Force variance to be the same for all tasks
in a component. Default is FALSE.}
\item{EM.max.iter}{(Optional) Maximum number of iterations for EM algorithm.}
\item{EM.epsilon}{(Optional) Desired accuracy. Algorithm will terminate if
change in penalized negative log-likelihood drops below EM.epsilon.}
\item{EM.verbose}{(Optional) Integer in {0,1,2}. verbose = 0: No output.
verbose = 1: Print summary at the end of the optimization. verbose = 2:
Print progress during optimization.}
\item{sample.data}{(Optional) Sample data according to posterior probability
or not.}
\item{TGGL.mu}{(Optional) Mu parameter for TGGL.}
\item{TGGL.epsilon}{(Optional) Epsilon parameter for TGGL.}
\item{TGGL.iter}{(Optional) Initial number of iterations for TGGL. Will be
increased incrementally to ensure convergence. When the number of samples
is much larger than the dimensionalty, it can be beneficial to use a large
initial number of iterations for TGGL. This is because every run of TGGL
requires precomputation of multiple n-by-n matrix products.}
}
\value{
List containing
\item{models}{List of TGGL models for each component.}
\item{posterior}{N by M Matrix containing posterior probabilities.}
\item{prior}{Vector with prior probabilities for each component.}
\item{sigmas}{M by K Matrix with standard deviations for each component.}
\item{obj}{Penalized negative log-likelihood (final objective value).}
\item{loglik}{Likelihood for training data.}
\item{groups}{groups argument.}
\item{weights}{weights argument.}
\item{lambda}{lambda argument.}
}
\description{
Fit a tree-guided group lasso mixture model using a generalized EM
algorithm. May be trained on shared or task specific feature matrices.
}
\seealso{
\code{\link{TreeGuidedGroupLasso}}
}
|
31b0845941e08e15335b0f5427c56e4f7ee5440a
|
69e39dbd8fc9d4c1584d53ad1b1195f63783f68d
|
/cachematrix.R
|
91b70bc9d550a1c050659538442ff054a9be0c11
|
[] |
no_license
|
poplock10/Coursera_Course_Work
|
e12a890d8bcdfc4747f6521e8f1b7c2a1aacc064
|
9991f28e83539b97e473153370b41aa359bc9957
|
refs/heads/master
| 2021-01-17T13:40:08.712776
| 2016-05-27T15:49:18
| 2016-05-27T15:49:18
| 34,121,984
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 713
|
r
|
cachematrix.R
|
## Put comments here that give an overall description of what your
## functions do
## This matrix object can cahe its inverse.
makeCacheMatrix <- function(x = matrix()) {
v<-NULL
set<-function(y){
x<<-y
m<<-NULL
}
get<-function() x
setmatrix<-function(solve) v<<- solve
getmatrix<-function() v
list(set=set, get=get,
setmatrix=setmatrix,
getmatrix=getmatrix)
}
## This function computes the inverse returned by makeCacheMatrix above.
cacheSolve <- function(x=matrix(), ...) {
v<-x$getmatrix()
if(!is.null(m)){
message("retrieving cached data")
return(m)
}
matrix<-x$get()
v<-solve(matrix, ...)
x$setmatrix(v)
m
}
|
9624b8d0fddb2687e73ede1e8abd986abc899650
|
a6d290acb31b1ec2df14f8713c11f61f254dbe33
|
/Ideas/geog4ga3.Rcheck/00_pkg_src/geog4ga3/man/Paez_Mart.Rd
|
c524a6263b6ced558f7b6eea2c814eba4c8930a5
|
[] |
no_license
|
snowdj/Spatial-Statistics-Course
|
c1e9f66af3515dfd5d9673702d08d2c2068dd97d
|
fbf4e815b5eecdb804a474444a8417ac1a39085a
|
refs/heads/master
| 2021-05-23T17:39:26.129887
| 2020-03-29T21:34:54
| 2020-03-29T21:34:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 672
|
rd
|
Paez_Mart.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/geog4ga3.R
\docType{data}
\name{Paez_Mart}
\alias{Paez_Mart}
\title{Paez_Mart}
\format{A data frame with 395 rows and 3 variables}
\source{
Data frame developed by A Paez in 2008
}
\usage{
data(Paez_Mart)
}
\description{
A dataset containing planned locations of convenience stores in Toronto.
The variables are as follows:
}
\details{
\itemize{
\item x. list of x coordinates for a project to cover Toronto with convenience stores (\-79.54108--\-79.17440)
\item y. list of y coordinates for a project to cover Toronto with convenience stores (43.58793--43.84853)
}
}
\keyword{datasets}
|
7f6cb61e3b53e017d5f65dcc35550d6325404b4d
|
4bcf7ad50dd79c619ed271b32f31029e9495a128
|
/data/Lake/process_Lake_dataset.R
|
633c680fccfe80342b9aba93a148305865183f84
|
[] |
no_license
|
kevinc13/single-cell-deep-learning
|
11db2497654193433835522dbaae00a2cab29f18
|
3600353d798f03909655150ec6113f7f449b210b
|
refs/heads/master
| 2020-08-04T04:51:41.141189
| 2019-10-01T04:56:53
| 2019-10-01T04:56:53
| 212,012,543
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 201
|
r
|
process_Lake_dataset.R
|
library(Biobase)
library(data.table)
library(scater)
setwd(paste("~/Documents/Research/XinghuaLuLab/single-cell-deep-learning/",
"data/Lake", sep=""))
lake <- readRDS("original/lake.rds")
|
8d875015d8b416c7045bd860fa93e3882d8b1986
|
0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb
|
/cran/paws.application.integration/man/locationservice_batch_put_geofence.Rd
|
c8e4fe2fad0057a1fc664d4b96a30d0926acb5be
|
[
"Apache-2.0"
] |
permissive
|
paws-r/paws
|
196d42a2b9aca0e551a51ea5e6f34daca739591b
|
a689da2aee079391e100060524f6b973130f4e40
|
refs/heads/main
| 2023-08-18T00:33:48.538539
| 2023-08-09T09:31:24
| 2023-08-09T09:31:24
| 154,419,943
| 293
| 45
|
NOASSERTION
| 2023-09-14T15:31:32
| 2018-10-24T01:28:47
|
R
|
UTF-8
|
R
| false
| true
| 942
|
rd
|
locationservice_batch_put_geofence.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/locationservice_operations.R
\name{locationservice_batch_put_geofence}
\alias{locationservice_batch_put_geofence}
\title{A batch request for storing geofence geometries into a given geofence
collection, or updates the geometry of an existing geofence if a
geofence ID is included in the request}
\usage{
locationservice_batch_put_geofence(CollectionName, Entries)
}
\arguments{
\item{CollectionName}{[required] The geofence collection storing the geofences.}
\item{Entries}{[required] The batch of geofences to be stored in a geofence collection.}
}
\description{
A batch request for storing geofence geometries into a given geofence collection, or updates the geometry of an existing geofence if a geofence ID is included in the request.
See \url{https://www.paws-r-sdk.com/docs/locationservice_batch_put_geofence/} for full documentation.
}
\keyword{internal}
|
0c63ee16fc790c7cec0fd59b0000fca3749e68dd
|
9246290d41983050547f8c2547a4f7ac99b916b2
|
/man/createEnvDataFrame.Rd
|
35bfb740b75311070769396e8e551cd0a912cb9e
|
[] |
no_license
|
ABoVE-AotM/above
|
88f6ebbae5f31668ae14af0924fa5ae4c63f8548
|
38beba4117809c46b6c10deb1c4c005949ac1a6c
|
refs/heads/master
| 2021-05-22T11:54:21.681864
| 2020-05-28T05:04:29
| 2020-05-28T05:04:29
| 83,578,459
| 1
| 4
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,098
|
rd
|
createEnvDataFrame.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/createEnvDataDataFrame.R
\name{createEnvDataFrame}
\alias{createEnvDataFrame}
\title{Convert data frame to Movebank Env-Data request file}
\usage{
createEnvDataFrame(lats, lons, times, savefile = TRUE, fileout = NULL)
}
\arguments{
\item{savefile}{whether to save a csv file}
\item{fileout}{name of csv file to save}
\item{{lats, lons}}{vectors of latitude and longitude}
\item{{times}}{vectors of POSIX times}
}
\value{
Either nothing (if csv file saved) or the character string data frame with correct formatting.
}
\description{
Takes a data frame of latitudes, longitudes and times and generates the strictly formatted data frame needed to upload to Env-Data to obtain covariates for movebank.
}
\examples{
lats <- seq(38.8, 39.0, length = 40)
lons <- seq(-77.12, -76.91, length = 40)
times <- seq(ymd("2010-12-31"), ymd("2011-12-31"), length = 40)
example <- createEnvDataFrame(lats, lons, times, savefile = FALSE)
head(example)
}
\seealso{
createEnvDataGrid, createEnvDataRequest, uploadEnvDataRequest
}
|
6143fda2d2c0f5ea45643e0fc1e00c2d581c0242
|
d3be5a411632b06e90aa7f96ae77e2d47be68e9b
|
/Potionmaster/app.R
|
63d63f0a7c93a57e826629b08d8623a82cbecffc
|
[] |
no_license
|
mpawliwllevac/R-Scripts-Reference
|
737a9d6dbf53f7a68f0b88e581cfb7fcba501e15
|
eb7e452e4ab57424af0ea6d5b50eafc0b7a407b9
|
refs/heads/master
| 2022-12-27T15:07:23.322794
| 2020-10-04T15:57:58
| 2020-10-04T15:57:58
| 287,603,988
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,063
|
r
|
app.R
|
library(shiny)
library(tidyverse)
library(DT)
library(shinydashboard)
library(kableExtra)
library(dplyr)
# To upload:
# library(rsconnect)
# deployApp("C:/Users/Matthew/Dropbox/R Working Directory/RShiny/folder")
theme_update(
plot.margin= unit(c(0.25,0.25,0.25,0.25), "cm"),
plot.title = element_text (colour="black", size=14,hjust = 0.5, face = "bold"),
plot.subtitle = element_text (colour="black", size=10,hjust = 0.5),
panel.background = element_rect(fill="NA"),
panel.border = element_blank(),
panel.spacing = unit(1, "lines"),
panel.grid.major.y = element_line(colour="grey90"),
panel.grid.minor.y = element_line(colour="NA"),
panel.grid.major.x = element_line(colour="NA"),
panel.grid.minor.x = element_line(colour="NA"),
axis.text.y = element_text (colour="black", size=10, hjust=1),
axis.title.y = element_text (colour="black", size=12, angle=90),
axis.text.x = element_text (colour="black", size=10,angle=0),
axis.title.x = element_text (colour="black", size=12),
axis.ticks = element_blank(),
legend.text = element_text (colour="black", size = 12),
legend.position = ("right"),
legend.title = element_blank(),
legend.key = element_blank()
)
gedsbGreen <- "#59AD46"
gedsbBlue <- "#04559F"
gedsbGreen2 <- "#8CE079"
gedsbBlue2 <- "#51A2EC"
new <- c("Alberta", "British Columbia", "Manitoba", "New Brunswick", "Newfoundland\n& Labrodor", "Nova Scotia", "Ontario", "Prince Edward Island", "Quebec", "Saskatchewan")
old <- c("AB", "BC", "MB", "NB", "NL", "NS", "ON", "PE", "QC", "SK")
race_list <- c("Black", "Indigenous", "Latino", "Middle Eastern", "South-East Asian", "South Asian", "White")
samp_race <- sample(race_list, 500, replace = TRUE)
assign1_range <-c(40:100, 50:90, 70:85, 70:85)
assign1_grade <- sample(assign1_range, 500, replace=T)
assign2_range <- c(40:100, 60:90, 70:95, 75:85)
assign2_grade <- sample(assign2_range, 500, replace=T)
assign3_range <- c(40:100, 50:100, 65:100, 75:100)
assign3_grade <- sample(assign3_range, 500, replace=T)
assign4_range <- c(40:100, 50:100, 65:100, 75:100)
assign4_grade <- sample(assign4_range, 500, replace=T)
samples <-read.csv("ca-500.csv") %>%
bind_cols(race = samp_race, assign1 = assign1_grade, assign2 = assign2_grade, assign3 = assign3_grade, assign4 = assign4_grade) %>%
mutate(`Current Average` = (assign1+assign2+assign3+assign4)/4) %>%
select(-company_name, -phone2, -phone1, -web)
icon(name = "pencil", class = "small_icon_test")
server <- function(input, output, session) {
if (!interactive()) {
session$onSessionEnded(function() {
stopApp()
q("no")
})
}
data_race <- reactive({ # we use data from reactive like a function but we can just have it output the finished product
input$checkGroup
#print(test)
#test
})
data_prov <- reactive({
input$checkprov
})
output$sampleplot <- renderPlot({
samples %>%
filter(race %in% data_race(), province %in% data_prov()) %>%
ggplot(aes(x=`Current Average`))+
geom_density(fill = gedsbBlue2, color = gedsbBlue)+
scale_y_continuous(limits = c(0,0.08))+
scale_x_continuous(labels=c("50%", "60%", "70%", "80%", "90%", "100%"), limits = c(45,100), breaks = c(50, 60, 70, 80, 90, 100))+
labs(title = "Distribution of Student Grades",
x= "Grade", y= "Density")
})
output$histogram <- renderPlot({
samples %>%
filter(race %in% data_race(), province %in% data_prov()) %>%
ggplot(aes(x= `Current Average`))+
geom_histogram(bins = 15, fill = gedsbBlue2, color = gedsbBlue)+
scale_x_continuous(labels=c("50%", "60%", "70%", "80%", "90%", "100%"),limits = c(45,100), breaks = c(50, 60, 70, 80, 90, 100))+
labs(title = "Distribution of Student Grades",
x= "Grade", y= "Count")
})
output$ibox <- renderInfoBox({
infoBox(
"Title",
input$count,
icon = icon("percentage")
)
})
output$vbox <- renderValueBox({
valueBox(
paste0("Class Avg: ",
samples %>%
filter(race %in% data_race(), province %in% data_prov()) %>%
select(`Current Average`) %>%
summarise(temp = round(mean(`Current Average`),2)), "% "
),
subtitle = NULL,
icon = icon("pencil")
)
})
output$samp_size <- renderValueBox({
valueBox("",
paste0("Students Selected:",
samples %>%
filter(race %in% data_race(), province %in% data_prov()) %>%
select(`Current Average`) %>%
summarise(temp = n())
),
color = "black"
)
# valueBox("title", 2+2)
})
# output$grade_boxplots <- renderPlot({
# samples %>%
# filter(race %in% data_race(), province %in% data_prov()) %>%
# select(starts_with("assign"), `Current Average`) %>%
# gather(key = assignment, value = mark) %>%
# mutate(mark = mark/100) %>%
# ggplot(aes(x=assignment, y=mark))+
# geom_boxplot(color = "#222D32", fill = gedsbBlue2)+
# labs(title = "Comparisons of Student Evaluations",
# x= "",
# y="Grade"
# )+
# scale_y_continuous(labels=scales::percent, limits = c(0.45,1), breaks = c(0.5, 0.6,0.7,0.8,0.9,1))
# })
#
output$tableDT <- DT::renderDataTable(samples %>% filter(race %in% data_race(), province %in% data_prov()) ,
options = list(paging = F),
rownames = F,
filter = "top")
output$grade_violin <- renderPlot({
samples %>%
filter(race %in% data_race(), province %in% data_prov()) %>%
select(starts_with("assign"), `Current Average`) %>%
gather(key = assignment, value = mark) %>%
mutate(mark = mark/100) %>%
ggplot(aes(x=assignment, y=mark))+
labs(title = "Comparisons of Student Evaluations",
x= "",
y="Grade"
)+
scale_y_continuous(labels=scales::percent, limits = c(0.45,1), breaks = c(0.5, 0.6,0.7,0.8,0.9,1))+
geom_violin(color = "#222D32", fill = gedsbBlue2)+
geom_boxplot(alpha = 0.01, width = 0.15)
})
output$kable_prov <- function() {
samples %>%
filter(race %in% data_race(), province %in% data_prov()) %>%
mutate(province = plyr::mapvalues(province, from = old, to = new),
province = factor(province, levels =new)) %>%
group_by(province, .drop=FALSE) %>%
summarise(n=n()) %>%
mutate(prop = n/nrow(samples %>% filter(race %in% data_race(), province %in% data_prov())),
temp = paste0(n, " (", round(prop, 2)*100, "%)")
) %>%
arrange(desc(n)) %>%
transmute(Province = province, `n(%)` = temp) %>%
#spread(key = province, value = temp) %>%
knitr::kable("html", caption = "Selected Students by Province") %>%
kable_styling("striped", full_width = F)
}
output$kable_race <- function(){
samples %>%
mutate(race = factor(race, levels = race_list)) %>%
filter(race %in% data_race(), province %in% data_prov()) %>%
group_by(race, .drop=FALSE) %>%
summarise(n=n()) %>%
mutate(prop = n/nrow(samples %>% filter(race %in% data_race(), province %in% data_prov())),
temp = paste0(n, " (", round(prop, 2)*100, "%)")) %>%
arrange(desc(n)) %>%
transmute(Race = race, `n(%)` = temp) %>%
#spread(key = race, value = temp) %>%
knitr::kable("html", caption = "Selected Students by Race") %>%
kable_styling("striped", full_width = F)
}
output$kable_grade <- function(){
samples %>%
filter(race %in% data_race(), province %in% data_prov()) %>%
mutate(grade_bin = ifelse(`Current Average` <50, "<50%",
ifelse(
`Current Average` <60, "50% - 59%",
ifelse(
`Current Average` <70, "60% - 69%",
ifelse(
`Current Average` <80, "70% - 79%",
ifelse(
`Current Average` <90, "80% - 89%", "90% - 100%"))))),
grade_bin = factor(grade_bin, levels = c("90% - 100%", "80% - 89%", "70% - 79%", "60% - 69%", "50% - 59%", "<50%"))
) %>%
group_by(grade_bin, .drop = FALSE) %>%
summarise(n=n()) %>%
mutate(prop = n/nrow(samples %>% filter(race %in% data_race(), province %in% data_prov())),
temp = paste0(n, " (", round(prop, 2)*100, "%)")) %>%
transmute(`Current Grade` = grade_bin, `n(%)` = temp) %>%
#spread(key = grade_bin, value = temp) %>%
knitr::kable("html", caption = "Selected Students by Grade") %>%
kable_styling("striped", full_width = F)
}
observe({ #running this requires the session argument in the main server function
if(input$selectall == 0) return(NULL)
else if (input$selectall%%2 == 0)
{
updateCheckboxGroupInput(session,"checkGroup",choices=race_list)
}
else
{
updateCheckboxGroupInput(session,"checkGroup",choices=race_list, selected=race_list)
}
})
observe({ #running this requires the session argument in the main server function
if(input$selectallprov == 0) return(NULL)
else if (input$selectallprov%%2 == 0)
{
updateCheckboxGroupInput(session,
"checkprov",
choices=list("Alberta" = "AB","British Columbia"= "BC","Manitoba"= "MB","New Brunswick"= "NB","Newfoundland & Labrodor"= "NL","Nova Scotia"= "NS","Ontario"= "ON","Prince Edward Island"= "PE","Quebec"= "QC","Saskatchewan"= "SK"))
}
else
{
updateCheckboxGroupInput(session,
"checkprov",
choices=list("Alberta" = "AB","British Columbia"= "BC","Manitoba"= "MB","New Brunswick"= "NB","Newfoundland & Labrodor"= "NL","Nova Scotia"= "NS","Ontario"= "ON","Prince Edward Island"= "PE","Quebec"= "QC","Saskatchewan"= "SK"),
selected=old)
}
})
}
####
ui <- dashboardPage(
dashboardHeader(title = "Potionmaster Dashboard", titleWidth = 300),
dashboardSidebar(
valueBoxOutput("samp_size", width = 11),
selectInput("classname", label = h3("Class"), choices = c("Grade 10 AP", "Grade 8 Honors")),
hr(),
checkboxGroupInput("checkGroup", label = h3("Race"),
choices = list("Black", "Indigenous", "Latino", "Middle Eastern", "South-East Asian", "South Asian", "White"),
selected = race_list),
actionLink("selectall","Select All"),
checkboxGroupInput("checkprov", label = h3("Province"),
choices = list("Alberta" = "AB","British Columbia"= "BC","Manitoba"= "MB","New Brunswick"= "NB","Newfoundland & Labrodor"= "NL","Nova Scotia"= "NS","Ontario"= "ON","Prince Edward Island"= "PE","Quebec"= "QC","Saskatchewan"= "SK"),
selected = c("AB", "BC", "MB", "NB", "NL", "NS", "ON", "PE", "QC", "SK")),
actionLink("selectallprov","Select All")
),
dashboardBody(
tabsetPanel(
tabPanel(
"Class Overview",
fluidRow(
tags$head(
tags$style(HTML(".fa { font-size: 35px; }"))
),
valueBoxOutput("vbox", width = 6)
),
plotOutput("histogram"),
plotOutput("sampleplot")
),
tabPanel("Evaluations",
# plotOutput("grade_boxplots"),
plotOutput("grade_violin")
),
tabPanel(
"Summary Data",
column(tableOutput("kable_prov"),width = 4),
column(tableOutput("kable_race"),width = 4),
tableOutput("kable_grade")
),
tabPanel("Raw Student Data",
DT::dataTableOutput("tableDT")
)
)
))
####
shinyApp(ui, server)
|
13eedfd1d1598a33f4873998c40d308509578218
|
0fd22566d3a72d3b0fc321e2a5d6803e4e88a9d9
|
/lib/Comparison/DiffBind.r
|
dfdf596acc9c6e3897831098972acc0785f0e956
|
[
"Apache-2.0"
] |
permissive
|
marisolrs/ngsperl
|
fdf0a8c0ad659782017dae309c4443d0f2cea313
|
6a26d2fa282a816a74bc7af1513466aca34709ea
|
refs/heads/master
| 2020-03-28T06:28:10.110835
| 2018-09-06T14:55:06
| 2018-09-06T14:55:06
| 147,836,773
| 0
| 0
| null | 2018-09-07T14:47:04
| 2018-09-07T14:47:04
| null |
UTF-8
|
R
| false
| false
| 1,994
|
r
|
DiffBind.r
|
options(bitmapType='cairo')
args = commandArgs(trailingOnly = TRUE)
pvalue<-0.05
foldChange<-1.5
library(DiffBind)
DEBUG=0
if(!DEBUG){
configFile=args[1]
comparisonFile=args[2]
outputPrefix=args[3]
}else{
configFile="/scratch/shavertm/20170512_atac-seq/bwa_macs2callpeak_narrow_CallAsSingleEnd_diffbind/result/Genotypes/Genotypes.config.txt"
comparisonFile="/scratch/shavertm/20170512_atac-seq/bwa_macs2callpeak_narrow_CallAsSingleEnd_diffbind/result/Genotypes/Genotypes.comparison.txt"
outputPrefix="/scratch/shavertm/20170512_atac-seq/bwa_macs2callpeak_narrow_CallAsSingleEnd_diffbind/result/Genotypes/Genotypes"
}
cat("configFile=", configFile, "\n")
cat("comparisonFile=", comparisonFile, "\n")
cat("outputPrefix=", outputPrefix, "\n")
samplesheet<-read.table(configFile, sep="\t", header=T, stringsAsFactors=F)
mb1<-dba(sampleSheet=samplesheet, bCorPlot=F)
mb1<-dba.count(mb1,score=DBA_SCORE_READS)
comparisons<-read.table(comparisonFile, se="\t", header=T, stringsAsFactors = F)
allres<-NULL
allres_sig<-NULL
idx<-1
for (idx in c(1:nrow(comparisons))){
compName<-comparisons[idx, 1]
compPrefix<-paste0(outputPrefix, ".", compName)
group1name<-comparisons[idx, 2]
group2name<-comparisons[idx, 3]
group1<-samplesheet$Condition == group1name
group2<-samplesheet$Condition == group2name
mb2<-dba.contrast(mb1,group1=group1, group2=group2, name1=group1name, name2=group2name, categories=c(DBA_CONDITION), minMembers=2)
mb2<-dba.analyze(mb2, bSubControl=FALSE, bFullLibrarySize=TRUE, bTagwise=FALSE, bCorPlot=FALSE)
res<-dba.report(mb2,bCounts=TRUE,th=1)
write.table(as.data.frame(res,row.names=NULL),file=paste0(compPrefix, ".tsv"),quote=F,sep="\t",row.names=F)
select<-(!is.na(res$FDR)) & (res$FDR<pvalue) & ((res$Fold >= foldChange) | (res$Fold <= foldChange))
res_sig<-res[select,]
write.table(as.data.frame(res_sig,row.names=NULL),file=paste0(compPrefix, ".sig.tsv"),quote=F,sep="\t",row.names=F)
}
|
f9e012fe0f3544100bf31228ba2605613683f0f5
|
3cc6265e82e373d377dae488831cfdb1caad1dfe
|
/codedepends/simple2.R
|
e6407f89ce1b19d7990a11a224bb17d0be4e2e41
|
[] |
no_license
|
clarkfitzg/phd_research
|
439ecc0d650da23bfad1e1a212e490c2746a6656
|
dfe46c49f6beba54389b0074e19f3c9b1ea04645
|
refs/heads/master
| 2020-04-15T14:02:03.890862
| 2019-09-20T02:33:07
| 2019-09-20T02:33:07
| 59,333,323
| 6
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 207
|
r
|
simple2.R
|
# Fri Jan 27 10:28:50 PST 2017
# Simple script as test case
n = 10
y = rnorm(n)
x = 1:n
# Never used
z = runif(n)
df_xy = data.frame(x = x , y = y)
fit = lm(y ~ x, data = df_xy)
plot(x, residuals(fit))
|
311b1c120cac70b293afe5c24d1c0f64ba5e1db3
|
442cd73dafb1a10df4d7546f20c4a912e56403cb
|
/test_repo.R
|
9843f8261af494bc5a3d8ebd360624903775cf71
|
[] |
no_license
|
devine8845/test_repo
|
d6a3fc115a98acf0ef4aea01b3af389998e73c55
|
ef9b052646494fdd8cfb8ace68ef698eac64b961
|
refs/heads/master
| 2020-04-18T08:08:35.853898
| 2019-01-24T15:55:48
| 2019-01-24T15:55:48
| 167,386,013
| 0
| 1
| null | 2019-01-24T15:55:49
| 2019-01-24T15:02:38
|
R
|
UTF-8
|
R
| false
| false
| 119
|
r
|
test_repo.R
|
# test
# devine8845/test_repo
library(tidyverse)
starwars %>% glimpse()
starwars %>% select(mass, species)
|
184804668b92b418a4c98356d1691f3994c39377
|
8fd3836f4292a8a3d917e9737f037afabb5db502
|
/R/consec.R
|
2c25de36578bef8ce83b25a345b65e11a43cf6cd
|
[] |
no_license
|
everydayduffy/climvars
|
beb8202b60db00012273a6fac50c8548274fcc1e
|
886a5d2642cc132642563ab3fffcc6b9994706cf
|
refs/heads/master
| 2023-06-12T15:42:48.936770
| 2021-07-09T08:59:09
| 2021-07-09T08:59:09
| 256,446,786
| 0
| 0
| null | 2020-04-17T08:32:12
| 2020-04-17T08:32:11
| null |
UTF-8
|
R
| false
| false
| 1,124
|
r
|
consec.R
|
#' consec: Calculate consecutive measurements below or above a specified
#' threshold.
#'
#' @description `consec` is used to find consecutive values above or below
#' a user-defined threshold.
#'
#' @param var a vector of values for which a threshold is to be applied.
#' @param threshold a value with which to threshold the data in `var`.
#' @param below a logical indicating whether values above or below the threshold
#' are to be considered. Default = TRUE.
#'
#' @return a single numeric value of the longest run of measurements that adhere
#' to the threshold criteria.
#' @export
#'
#' @details ???
#'
#' @examples
#' # calculate waterlogged hours
#' set.seed(100)
#' rain <- rgamma(365,4) * rbinom(365,1,0.3)
#' evap <- rep(1.15,365)
#' cn <- 90
#' s_min <- 0.074
#' s_max <- 0.422
#' s_depth <- 0.5
#' sm <- soil_moisture(rain, evap, cn)
#' wlh <- consec(sm, threshold = 0.422, below = FALSE)
consec <- function(x, threshold, below = TRUE) {
if (below) {
y <- ifelse(x < threshold,1,0)
} else {
y <- ifelse(x > threshold,1,0)
}
y2 <- rle(y)
sel <- which(y2$values == 1)
max(y2$lengths[sel])
}
|
a72de0ad5fcac56d4c7409fa70add294bcfe64b8
|
a21e9a2003bbee8b38607874cbe15db96bd12dd2
|
/clean.R
|
9cb65f5555c43c6922bb806345a8a8c1470f5534
|
[] |
no_license
|
aronlindberg/VOSS-Sequencing-Toolkit
|
53f6af8d46c5b026fecb1c0b32fdef3133da3d60
|
2fa54ed3c3f2d421eb35a5ced2b1e990b9e49f41
|
refs/heads/master
| 2016-09-08T01:21:43.117669
| 2013-09-24T14:23:46
| 2013-09-24T14:23:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 188
|
r
|
clean.R
|
# Insert data cleaning/transformation commands here. Possibly the parser (JSON->CSV->Sequence format)?
# Needs to result in a file named "cleaned_data.rdata", which is then loaded in do.R
|
48b62d18d45f6ae7363013be308b2aa0a20ad49d
|
bad538073a6ed5bd9f8f0653db02fdd433137380
|
/packages/RSuite/R/13_load_prj_parameters.R
|
4e0ae494448a317c920cf62f36b332c36006b7b8
|
[
"Apache-2.0"
] |
permissive
|
WLOGSolutions/RSuite
|
5372cfc031b518e38c765b574a99944324a46275
|
da56a5d1f4a835c9b84b6cebc1df6046044edebe
|
refs/heads/master
| 2021-06-08T14:07:27.322986
| 2021-05-03T14:09:11
| 2021-05-03T14:09:11
| 104,214,650
| 157
| 11
|
Apache-2.0
| 2021-05-03T08:27:54
| 2017-09-20T12:48:44
|
R
|
UTF-8
|
R
| false
| false
| 4,684
|
r
|
13_load_prj_parameters.R
|
#----------------------------------------------------------------------------
# RSuite
# Copyright (c) 2017, WLOG Solutions
#
# Utilities for management of project parameters.
#----------------------------------------------------------------------------
#'
#' Loads project parameters from specified path.
#'
#' @param prj_path path to base directory of project (the one containing
#' PARAMETERS file). (type: character)
#' @return object of rsuite_project_parameters
#'
#' @keywords internal
#' @noRd
#'
load_prj_parameters <- function(prj_path) {
assert(is.character(prj_path) && length(prj_path) == 1,
"character(1) expected for prj_path")
assert(file.exists(file.path(prj_path, "PARAMETERS")),
"No project PARAMETERS file found at %s", prj_path)
dcf <- read.dcf(file.path(prj_path, "PARAMETERS"))
params <- list(
prj_path = rsuite_fullUnifiedPath(prj_path),
rsuite_ver = ifelse("RSuiteVersion" %in% colnames(dcf), dcf[1, "RSuiteVersion"], NA),
r_ver = ifelse("RVersion" %in% colnames(dcf), dcf[1, "RVersion"], current_rver()), # from 97_rversion.R
# Date of CRAN snapshot to look for dependencies in.
# if empty will use official CRAN and newest package versions available
snapshot_date = ifelse("SnapshotDate" %in% colnames(dcf), dcf[1, "SnapshotDate"], ""),
pkgs_path = rsuite_fullUnifiedPath(file.path(prj_path,
ifelse("PackagesPath" %in% colnames(dcf),
dcf[1, "PackagesPath"], "packages"))),
script_path = rsuite_fullUnifiedPath(file.path(prj_path,
ifelse("ScriptPath" %in% colnames(dcf),
dcf[1, "ScriptPath"], "R"))),
irepo_path = rsuite_fullUnifiedPath(file.path(prj_path, "deployment", "intrepo")),
# Specifies where to put local project environment
lib_path = rsuite_fullUnifiedPath(file.path(prj_path, "deployment", "libs")),
# Specifies where to put user installed libraries
sbox_path = rsuite_fullUnifiedPath(file.path(prj_path, "deployment", "sbox")),
# Specifies where to put environment lock file
lock_path = rsuite_fullUnifiedPath(file.path(prj_path, "deployment", "env.lock")),
zip_version = ifelse("ZipVersion" %in% colnames(dcf), dcf[1, "ZipVersion"], ""),
project = ifelse("Project" %in% colnames(dcf), dcf[1, "Project"], basename(prj_path)),
artifacts = ifelse("Artifacts" %in% colnames(dcf), dcf[1, "Artifacts"], ""),
excludes = ifelse("Excludes" %in% colnames(dcf), dcf[1, "Excludes"], ""),
# repo_adapters to use for the project
repo_adapters = ifelse("Repositories" %in% colnames(dcf), dcf[1, "Repositories"], "CRAN"),
# This defines which type of packages are expected on the platform
# and how to build project packages.
pkgs_type = switch(get_os_type(),
windows = "win.binary",
macos = .Platform$pkgType, # e.g. mac.binary.el-capitan
"source"),
aux_pkgs_type = switch(get_os_type(),
windows = "source",
macos = "source",
"binary"),
bin_pkgs_type = switch(get_os_type(),
windows = "win.binary",
macos = .Platform$pkgType, # e.g. mac.binary.el-capitan
"binary")
)
if (!dir.exists(params$lib_path)) {
dir.create(params$lib_path, recursive = TRUE, showWarnings = FALSE)
}
if (!dir.exists(params$sbox_path)) {
dir.create(params$sbox_path, recursive = TRUE, showWarnings = FALSE)
}
params$get_safe_project_name <- function() {
gsub("[\\/\"\'<>]+", "_", params$project)
}
params$get_repo_adapter_names <- function() {
specs <- unlist(strsplit(params$repo_adapters, ","))
return(names(parse_repo_adapters_spec(specs)))
}
params$get_repo_adapter_arg <- function(repo_adapter_name, default, ix) {
specs <- unlist(strsplit(params$repo_adapters, ","))
parsed_specs <- parse_repo_adapters_spec(specs)
if (!is.na(ix)) {
parsed_specs <- parsed_specs[ix]
}
arg <- parsed_specs[names(parsed_specs) == repo_adapter_name]
arg[is.null(arg) || nchar(arg) == 0] <- default
names(arg) <- NULL
return(arg)
}
params$get_intern_repo_path <- function() {
intern_mgr <- repo_manager_dir_create(params$irepo_path, params$pkgs_type, params$r_ver)
repo_manager_init(intern_mgr)
return(rsuite_fullUnifiedPath(params$irepo_path))
}
class(params) <- "rsuite_project_params"
return(params)
}
|
ff85d0a056a9d7e91f063094b27a2a97c40e3bbf
|
272defd587476d83f956c25bdfe2227d940dfe03
|
/R/ipcf-cross-all-st.R
|
38c6b9dab2f597a642e728c076af501606c426f1
|
[] |
no_license
|
antiphon/Kcross
|
cb867df8165b68835f464060b0a9c18685f3d491
|
4314df5a8f544f1f835a784582c1042e32af1431
|
refs/heads/master
| 2021-01-11T08:42:39.039491
| 2019-05-09T11:26:19
| 2019-05-09T11:26:19
| 76,651,019
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,125
|
r
|
ipcf-cross-all-st.R
|
#' Space-Time SOIRS Kcross-PCF
#'
#'
#' @param x point pattern as an n x (d+2) -matrix of coordinates
#' @param int intensity at points
#' @param r vector of spatial lags
#' @param t vector of temporal lags
#' @param bbox bounding box for translation correction
#' @param sigmas vector c(ss, st) with space and time bandwidth (sd of gaussian), respectively
#' @param do_correction translation correction? only sensible for cuboidal regions
#' @details
#' Input matrix x of dimensions n x (d+2): columns 1:d taken as space dimensions, d+1 taken as the time dimension and d+2 taken as the type.
#'
#' Note that the bandwidths are fixed for all pairs of types.
#' This is not optimal for highly imbalanced patterns.
#'
#' @return array of dimensions (ntype, ntype, nt, nr)
#'
#' @export
ipcf_st_cross_all_box <- function(x, int, r, t, bbox, sigmas, do_correction = TRUE) {
xy <- as.matrix(x)
n <- nrow(xy)
d <- ncol(xy)
types <- as.integer(xy[,d])
ntypes <- length(unique(types))
# check int
m <- "intensities needed. Vector, int per point."
if(missing(int)) stop(m)
if(length(int) != n) stop(m)
# check bbox for trans
if(missing(bbox)) bbox <- apply(xy[,1:(d-1)], 2, range) # in case
# check bandwidths
m <- "smoothing bandwidths (gaussian sd's) needed. Vector of length 2."
if(missing(sigmas)) stop(m)
if(length(sigmas) != 2) stop(m)
# go
V <- c_ipcf_st_cross_2d_box(xy, bbox, ntypes, types,
int,
r, t,
sigmas,
as.integer( do_correction) )
#
vol <- prod( apply(bbox, 2, diff) )
nt <- length(t)
nr <- length(r)
# scaling
Ss <- sapply(split(int, types), function(x) vol/sum(1/x))
S <- matrix(Ss, ntypes, ntypes)
if(!do_correction) S <- S / vol
# compile the pcf's
if(ntypes > 1) {
G <- array(dim = c(ntypes, ntypes, nt, nr))
for(ir in 1:nr)
for(it in 1:nt){
G[,,it,ir] <- matrix(V[[(ir-1)*nt + it]], ncol = ntypes, nrow = ntypes) * S
}
}
else{
#browser()
G <- t( matrix(unlist(V), nrow = nt ) )
}
# done
G
}
|
33fd389a0c26c19562a3a7fd06165c9a4c12465a
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/rstanarm/R/predict.R
|
3f75cd24016b8865e95f27ae9223dfd5a459511c
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,908
|
r
|
predict.R
|
# Part of the rstanarm package for estimating model parameters
# Copyright (C) 2015, 2016 Trustees of Columbia University
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#' Predict method for stanreg objects
#'
#' This method is primarily intended to be used only for models fit using
#' optimization. For models fit using MCMC or one of the variational
#' approximations, see \code{\link{posterior_predict}}.
#'
#' @export
#' @templateVar stanregArg object
#' @template args-stanreg-object
#' @param ... Ignored.
#' @param newdata Optionally, a data frame in which to look for variables with
#' which to predict. If omitted, the model matrix is used.
#' @param type The type of prediction. The default \code{'link'} is on the scale
#' of the linear predictors; the alternative \code{'response'} is on the scale
#' of the response variable.
#' @param se.fit A logical scalar indicating if standard errors should be
#' returned. The default is \code{FALSE}.
#'
#' @return A vector if \code{se.fit} is \code{FALSE} and a list if \code{se.fit}
#' is \code{TRUE}.
#'
#' @seealso \code{\link{posterior_predict}}
#'
predict.stanreg <- function(object, ..., newdata = NULL,
type = c("link", "response"), se.fit = FALSE) {
type <- match.arg(type)
if (!se.fit && is.null(newdata)) {
preds <- if (type == "link")
object$linear.predictors else object$fitted.values
return(preds)
}
if (!used.optimizing(object) && type == "response")
stop("type='response' should not be used for models estimated by MCMC",
"\nor variational approximation. Instead, use posterior_predict() ",
"to draw \nfrom the posterior predictive distribution.",
call. = FALSE)
dat <- pp_data(object, newdata)
stanmat <- as.matrix.stanreg(object)
beta <- stanmat[, seq_len(ncol(dat$x))]
eta <- linear_predictor(beta, dat$x, dat$offset)
if (type == "response") {
inverse_link <- linkinv(object)
eta <- inverse_link(eta)
if (is(object, "polr") && ("alpha" %in% colnames(stanmat)))
eta <- apply(eta, 1L, FUN = `^`, e2 = stanmat[, "alpha"])
}
fit <- colMeans(eta)
if (!se.fit)
return(fit)
se.fit <- apply(eta, 2L, sd)
nlist(fit, se.fit)
}
|
3b8f8a5a6203d2a9b55c63a094bd385aa443ea2b
|
08481da2b6d3690aa157a161f9df284c802a5177
|
/man/create_nmrdata.Rd
|
3d2e4478ad26ae4766b6a5cb65a248f975f1f4cc
|
[
"MIT"
] |
permissive
|
brgordon17/coralclass
|
605dfedaaaf48dfd4ad589b6aaf3c7d0bfc44603
|
18de22b48a3bf0cff99c2c82bb206d92d5a53058
|
refs/heads/master
| 2020-06-24T11:01:51.980043
| 2020-06-15T11:02:49
| 2020-06-15T11:02:49
| 198,944,888
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,442
|
rd
|
create_nmrdata.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/create_nmrdata.R
\name{create_nmrdata}
\alias{create_nmrdata}
\title{create nmrdata.}
\usage{
create_nmrdata(savecsv = FALSE, saverda = TRUE, csvname = "nmrdata",
remove.vars = TRUE, vars = c("3.31"), ...)
}
\arguments{
\item{savecsv}{Logical indicating if output should be saved as a \code{.csv}
file to the current working directory}
\item{saverda}{Logical indicating if a .rda file should be saved to /data}
\item{csvname}{The name of the output .csv file to be saved if TRUE}
\item{remove.vars}{Logical indicating if user-defined variables should be
removed}
\item{vars}{A character vector of variables to be removed}
\item{...}{Other arguments passed on to individual methods}
}
\value{
Returns a dataframe of class tbl_df
}
\description{
\code{create_nmrdata()} preprocesses the 1H-NMR data used for modelling
in this PhD chapter.
}
\details{
\code{create_nmrdata()} takes the binned data from Bruker's Amix and creates
new categorical variables based on the sample ID's. It then removes any
unwanted variables as defined by the user (e.g. residual methanol or water
peaks) before saving the results to \code{./data}
}
\note{
\code{create_nmrdata()} was not intended to be used outside of this
package.
}
\seealso{
\href{https://www.bruker.com/products/mr/nmr/nmr-software/nmr-software/amix/overview.html}{Bruker AMIX}
}
\author{
Benjamin R. Gordon
}
|
1c6277f15315efd47b3a515ebccc0b84160ee144
|
011ef657c4c1fd79cb4ac68d86a9d04e70056e6e
|
/tests/testthat/test-Binary.R
|
e60e980c8b91090ec84cee4dd4cefe9d99ca64f2
|
[] |
no_license
|
cdcepi/predx
|
8cf3d52c6a2c75a277f5de08b8f8793810c74ee3
|
82917511064bdaebaf9982eec98be2169b8d1288
|
refs/heads/master
| 2021-06-24T16:26:32.315548
| 2019-12-27T16:35:16
| 2019-12-27T16:35:16
| 174,559,039
| 6
| 3
| null | 2019-10-28T16:40:07
| 2019-03-08T15:13:15
|
R
|
UTF-8
|
R
| false
| false
| 1,353
|
r
|
test-Binary.R
|
context("Binary")
test_that("Binary accepts probabilities", {
expect_is(Binary(0.9), "Binary")
expect_is(Binary(0), "Binary")
expect_is(Binary(1), "Binary")
})
test_that("Binary rejects non probabilities", {
expect_silent(Binary(0.9))
expect_error(Binary("0.9"))
expect_error(Binary(NA))
expect_error(Binary(-0.5))
expect_error(Binary(1.1))
expect_error(Binary(c(0.3, 0.7)))
})
test_that("Binary data frame objects convert to predx", {
expect_silent(to_predx(list(data.frame(prob=0.1),
data.frame(prob=0.5)), rep('Binary', 2)))
})
test_that("Generics function", {
this_binary <- Binary(0.5)
expect_equal(predx_to_json(this_binary), list(prob = 0.5))
expect_equal(as.data.frame(this_binary), data.frame(prob = 0.5))
})
test_that("CSV import/export works", {
fcast <- dplyr::tibble(target = 'x', predx_class = 'Binary',
predx = list(Binary(0.5)))
csv_file <- tempfile()
export_csv(fcast, csv_file)
fcast_import <- import_csv(csv_file)
expect_equal(as.data.frame(fcast_import), as.data.frame(fcast))
})
test_that("JSON import/export works", {
fcast <- dplyr::tibble(target = 'x', predx_class = 'Binary',
predx = list(Binary(0.5)))
json_file <- tempfile()
export_json(fcast, json_file)
fcast_import <- import_json(json_file)
expect_equal(as.data.frame(fcast_import), as.data.frame(fcast))
})
|
620980f425ee79053b1106ee6bbca4a44e78fc33
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/bayesplot/examples/MCMC-traces.Rd.R
|
cfc788e252aa919a23a3f5ac7004392cad3eacbd
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,587
|
r
|
MCMC-traces.Rd.R
|
library(bayesplot)
### Name: MCMC-traces
### Title: Trace plot (time series plot) of MCMC draws
### Aliases: MCMC-traces mcmc_trace mcmc_trace_highlight trace_style_np
### ** Examples
# some parameter draws to use for demonstration
x <- example_mcmc_draws(chains = 4, params = 6)
dim(x)
dimnames(x)
# trace plots of the betas
color_scheme_set("viridis")
mcmc_trace(x, regex_pars = "beta")
## No test:
color_scheme_set("viridisA")
mcmc_trace(x, regex_pars = "beta")
color_scheme_set("viridisC")
mcmc_trace(x, regex_pars = "beta")
## End(No test)
# mix color schemes
color_scheme_set("mix-blue-red")
mcmc_trace(x, regex_pars = "beta")
# use traditional ggplot discrete color scale
mcmc_trace(x, pars = c("alpha", "sigma")) +
ggplot2::scale_color_discrete()
# zoom in on a window of iterations, increase line size,
# add tick marks, move legend to the top, add gray background
color_scheme_set("viridisA")
mcmc_trace(x[,, 1:4], window = c(100, 130), size = 1) +
panel_bg(fill = "gray90", color = NA) +
legend_move("top")
## Not run:
##D # parse facet label text
##D color_scheme_set("purple")
##D p <- mcmc_trace(
##D x,
##D regex_pars = "beta\\[[1,3]\\]",
##D facet_args = list(labeller = ggplot2::label_parsed)
##D )
##D p + facet_text(size = 15)
##D
##D # mark first 100 draws as warmup
##D mcmc_trace(x, n_warmup = 100)
##D
##D # plot as points, highlighting chain 2
##D color_scheme_set("brightblue")
##D mcmc_trace_highlight(x, pars = "sigma", highlight = 2, size = 2)
##D
##D # for models fit using HMC/NUTS divergences can be displayed in the trace plot
##D library("rstanarm")
##D fit <- stan_glm(mpg ~ ., data = mtcars,
##D # next line to keep example fast and also ensure we get some divergences
##D prior = hs(), iter = 400, adapt_delta = 0.8)
##D
##D # extract draws using as.array (instead of as.matrix) to keep
##D # chains separate for trace plot
##D posterior <- as.array(fit)
##D
##D # for stanfit and stanreg objects use nuts_params() to get the divergences
##D mcmc_trace(posterior, pars = "sigma", np = nuts_params(fit))
##D
##D color_scheme_set("viridis")
##D mcmc_trace(
##D posterior,
##D pars = c("wt", "sigma"),
##D size = 0.5,
##D facet_args = list(nrow = 2),
##D np = nuts_params(fit),
##D np_style = trace_style_np(div_color = "black", div_size = 0.5)
##D )
##D
##D color_scheme_set("viridis")
##D mcmc_trace(
##D posterior,
##D pars = c("wt", "sigma"),
##D size = 0.8,
##D facet_args = list(nrow = 2),
##D divergences = nuts_params(fit),
##D div_color = "black"
##D )
## End(Not run)
|
61a0051efad2cd9b65041309cdcd45da8070f4f6
|
7d38918f07c1ea0a00c3a1e0af7edec7aa17dde0
|
/man/rWBclimate.Rd
|
8ed4b0fb39d63212e193f0c22d84073233cdc3e0
|
[] |
no_license
|
cran/rWBclimate
|
8da14296a6330d0387a7c03c483dbd1b1282c907
|
90b18d09b5bbcb6cb09bf914f52ff90e66633aff
|
refs/heads/master
| 2021-01-19T03:18:37.288168
| 2014-04-18T00:00:00
| 2014-04-18T00:00:00
| 18,929,953
| 1
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 129
|
rd
|
rWBclimate.Rd
|
\docType{package}
\name{rWBclimate}
\alias{rWBclimate}
\alias{rWBclimate-package}
\title{rWBclimate}
\description{
rWBclimate
}
|
2c00a6f42c69761e9f540a1d124e2288fa51e7df
|
18d59963400b3e6b116c5ba12fb111cca0e6ff0c
|
/r-files/run-monte-carlo.R
|
f072f18eab51f5754f1d1611128d8abdf9f9347f
|
[
"Apache-2.0"
] |
permissive
|
kbrannan/Big-Elk-Cadmus-HydCal-Updated-WDM
|
10000608e9455e0c9b53a505a8dfff7788b8935e
|
7fc4fe34667fda0d0e5bbabcd7126423f726bf54
|
refs/heads/master
| 2020-12-18T12:34:52.880254
| 2016-08-03T22:40:15
| 2016-08-03T22:40:15
| 55,079,166
| 0
| 0
| null | 2016-05-09T22:35:50
| 2016-03-30T16:30:58
|
TeX
|
UTF-8
|
R
| false
| false
| 1,226
|
r
|
run-monte-carlo.R
|
## script run monte carlo anaysis for calib.pst
## follows the preocedure recommended by John Doherty (2016-04-04)
## 1. Calibrate
## 2. Get the posterior covariance matrix using PREDUNC7
## 3. Sample that
## 4. Put each sample into a PEST control file using PARREP
## 5. Run ADDREG1 to add regularisation which tells the parameters to stay as close to initial values as possible.
## 6. Set PHIMLIM to a suitable value - a little above the best measurement objecive function that you got for calibration.
## 7. Run PEST with /i switch using JCO obtained on basis of calibrated parameter field (first iteration is thus free)
## 8. Repeat steps 4 -7.
## path to rec files
chr.unc.dir <- "\\\\deqhq1/tmdl/TMDL_WR/MidCoast/Models/Bacteria/HSPF/Big-Elk-Cadmus-HydCal-Updated-WDM/upd-uncert"
chr.curwd.dir <- getwd() ## get current directory to reset bact to at end of script
## file names
chr.pst.cal <- "calib.pst" ## PEST control with calibrated parameter values
chr.rec.cal <- "calib.rec" ## PEST record for calibrated parameter values
gsub("/","\\\\",chr.unc.dir)
setwd(chr.unc.dir)
shell(paste0("cd ",gsub("/","\\\\",chr.unc.dir)))
shell("m:\ ; dir")
## reset back to original working directory
setwd(chr.curwd.dir)
|
16702f6cf7a7e21f2ea554471d853e1b68fa263f
|
f1c70a922f34dfb38bcd9b949db5f6da1a3ab57a
|
/man/format_numbers.Rd
|
6a7e7b9c990917fa97b716d495529b0ef13fe0a7
|
[
"MIT"
] |
permissive
|
neyhartj/neyhart
|
0d8705edd9888119ba58f1f5716f0b84858a87da
|
a33d6aae63446c1c6046cba35ea0fe0846ccf9af
|
refs/heads/master
| 2022-10-24T06:08:39.844403
| 2022-10-07T17:56:44
| 2022-10-07T17:56:44
| 60,227,524
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 447
|
rd
|
format_numbers.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/misc.R
\name{format_numbers}
\alias{format_numbers}
\title{Format numbers for printing}
\usage{
format_numbers(x, signif.digits = 3L)
}
\arguments{
\item{x}{A numeric vector.}
\item{signif.digits}{An integer specifying the number of significant digits}
}
\description{
Formats numbers using specified significant digits for printing tables or graphs
}
|
5dd7391ab2f72fb1687fa85ae922b5f01b95d6f2
|
91622d34905a1ac8c585cc6205418e9b4e499167
|
/R/thetaUncon2thetaList.R
|
338bc686667460bb58ee3f7cd1f095794efd88c2
|
[] |
no_license
|
minghao2016/fHMM
|
46766503e2ee7c177b288bdd4b0221e67c559150
|
d5fe6baeb41f2f003ddf19c5619d51042d44ffd7
|
refs/heads/master
| 2023-03-12T22:58:13.100210
| 2021-02-23T14:55:05
| 2021-02-23T14:55:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 345
|
r
|
thetaUncon2thetaList.R
|
#' Bring uncontrained model parameters in list form
#' @param thetaUncon Unconstrained model parameters in vector form
#' @param controls A list of controls
#' @return Constrained model parameters in list form
thetaUncon2thetaList = function(thetaUncon,controls){
return(thetaCon2thetaList(thetaUncon2thetaCon(thetaUncon,controls),controls))
}
|
90cadcaa2777889b69c054adefa73dfacbe0dd36
|
2a0d1fc07d673b8c7cf07c6596dac2630ae3fd7c
|
/scripts/geocache-weight.R
|
e5df348ed20f49fb25eb1dd5dac00cbf433a6f48
|
[] |
no_license
|
stelmacm/WNS
|
aef0320ac63d789590cc229e7f24e2fe248c036d
|
a455ce91c547c8174444d8fb811b4236c2ae38f3
|
refs/heads/master
| 2022-04-29T11:23:05.577810
| 2022-03-04T14:31:02
| 2022-03-04T14:31:02
| 241,282,675
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,471
|
r
|
geocache-weight.R
|
# summary of GC finds - GC
# polygons are included here in order to grab the centroid later
# pull out a unique list of county polys
# not from Cali or Wash for now!
uniq.df<-presence.df %>% filter(.,STATEPROV != c("California","Washington"))
uniq.df<-uniq.df[!duplicated(uniq.df$county),]
# convert coords from county poly to centroid points for each county. Then reproject.
united.xy <- uniq.df$geoms %>% st_centroid() %>%
st_transform(., "+proj=longlat +datum=WGS84")
county_visits <- presence.scrape %>%
filter(Type %in% c("Type.found_it", "Type.didnt_find_it", "Type.owner_maintenance", "Type.publish_listing")) %>%
group_by(county,year) %>%
distinct(User) %>%
summarise(total = length(User))
# need the number of visits at county level that match up with county centroids
# match centroid back to county
# then grab total vists
just.gc <- presence.scrape %>% filter(!is.na(GC))
# get the number of shared users
shared.users<-NULL
for (i.year in unique(just.gc$year)) {
s<-filter(just.gc,year == i.year)
for (county1 in unique(s$county)) {
for (county2 in rev(unique(s$county))) {
num.shared<-length(intersect(as.character(s[which(s$county == county1),]$User),
as.character(s[which(s$county == county2),]$User)))
shared.users<-as.data.frame(rbind(shared.users,cbind(year,county1,county2,num.shared)))
}
}
}
shared.users<-expand(shared.users,county1,county2,year) %>% left_join(shared.users)
shared.users<-shared.users[shared.users$county1!=shared.users$county2,]
shared.users$year<-as.numeric(as.character(shared.users$year))
shared.users$county1<-as.character(shared.users$county1)
shared.users$county2<-as.character(shared.users$county2)
# merge back to the original data
all.shared.users <- presence.df %>%
left_join(shared.users,by=c("year"="year","county"="county1"))
# fix number of users
# put in NA's where there was no traffic between caves
all.shared.users$num.shared <- as.numeric(replace_na(all.shared.users$num.shared,0))
# create binary incidence value
all.shared.users$incidence <- ifelse(all.shared.users$YR_CONFIRM == " ",0,1)
county_rate<-all.shared.users %>%
arrange(date) %>%
group_by(date) %>%
summarise(county.inf.count = sum(incidence>0),
uninf.counties = sum(incidence==0)) %>%
mutate(inf.counties = cumsum(county.inf.count))
# cumulative number of infected and uninfected counties
all.shared.users$inf.counties<-county_rate[match(all.shared.users$date,county_rate$date),]$inf.counties
all.shared.users$uninf.counties<-county_rate[match(all.shared.users$date,county_rate$date),]$uninf.counties
# which counties are touching?
touching<-st_intersects(uniq.df$geoms,sparse = F)
touching.m <- as.matrix(touching)
rownames(touching.m)<-colnames(touching.m)<-uniq.df$county
touching.m2 <- reshape2::melt(touching.m)[reshape2::melt(upper.tri(touching.m))$value,]
names(touching.m2) <- c("county","county2","touching")
# merge gc weights with adjacency score: 1 = touching, 0 = not touching
both.weights<-left_join(all.shared.users,touching.m2,by=c("county","county2"))
both.weights$touching<-if_else(is.na(both.weights$touching),0,1)
# save
write.csv(both.weights,"data/gc-shared-users.csv")
ggplot(uniq.df$geoms)+
borders("world") +
borders("state") +
geom_sf(aes(fill=uniq.df$WNS_MAP_YR))+
coord_sf(xlim = c(-100, -60), ylim = c(32, 50))+
# coord_sf(xlim = c(-100, -57.5), ylim = c(35, 50))+
theme_bw()
|
256a77502ffe7aeee6f69433d3438622f0d5b921
|
b2f61fde194bfcb362b2266da124138efd27d867
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/A1/Database/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query04_query42_1344/query04_query42_1344.R
|
52fd4134e9e73bafad8d364820d0dabc424e86cd
|
[] |
no_license
|
arey0pushpa/dcnf-autarky
|
e95fddba85c035e8b229f5fe9ac540b692a4d5c0
|
a6c9a52236af11d7f7e165a4b25b32c538da1c98
|
refs/heads/master
| 2021-06-09T00:56:32.937250
| 2021-02-19T15:15:23
| 2021-02-19T15:15:23
| 136,440,042
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 70
|
r
|
query04_query42_1344.R
|
2c849bc775f9c3b3738411302b579f9f query04_query42_1344.qdimacs 732 2172
|
6911a7c9b27561d20a3a275be80b7fcc3b286522
|
aa52164361900c4ce7c708c7a4cb28b03aa68970
|
/Models/Simple+Interactions+more-modified.R
|
161ebad333f6dced67d65c19574088be8412c56d
|
[
"MIT"
] |
permissive
|
kvdesai/wikipedia-challenge
|
3297561a48e7aa0eddf02e51e00e2864d300fc41
|
6d558b0b50c67cc37410ccefeba9471c1374ff94
|
refs/heads/master
| 2020-03-17T01:16:19.281604
| 2018-05-17T13:26:14
| 2018-05-17T13:26:14
| 133,146,074
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,766
|
r
|
Simple+Interactions+more-modified.R
|
#source('D:/ICDM11/mygit/Kalpit/not_so_simple9a.r')
rm(list = ls(all.names=T))
#setwd('D:/ICDM11/wikichallenge_data_all/Honourable Mention')
source('Models/helper_functions.r')
data_train = read.csv("AllFeatureSets/training_features.csv", header = TRUE);
data_lead = read.csv("AllFeatureSets/leaderboard_features.csv", header = TRUE);
###################################################################################################
#### Fit our latest best model - 12/08/2011
###################################################################################################
y.train = data_train$edit;
x.train = data_train[, 1:119];
x.lead = data_lead;
#### More features
x.train$rate = data_train$sum_edits/(data_train$LSDT- data_train$FSDT+1);
x.lead$rate = data_lead$sum_edits/(data_lead$LSDT- data_lead$FSDT+1);
### interaction 1
x.train$int1 = sqrt(x.train$edit_p*x.train$edit_pp);
x.lead$int1 = sqrt(x.lead$edit_p)*sqrt(x.lead$edit_pp);
### interaction 2
x.train$int2 = x.train$UniqDays5months*x.train$Edits_mth_cutoff_minus_0;
x.lead$int2 = x.lead$UniqDays5months*x.lead$Edits_mth_cutoff_minus_0;
model1 = c("edit_p", "edit_pp", "int2", "UniqDays5months", "Edits_mth_cutoff_minus_0", "edit_days_month_last", "rate", "sum_articles" );
model2 = c("edit_p", "edit_pp", "int1", "int2", "UniqDays5months", "Edits_mth_cutoff_minus_0", "edit_days_month_last", "rate", "sum_articles" );
model3 = c("edit_p", "edit_pp", "int1", "int2", "UniqDays5months", "Edits_mth_cutoff_minus_0", "edit_days_month_last", "rate", "sum_articles" );
#### Initialize beta
beta1 = rep(0, 9);
beta2 = rep(0, 10);
beta3 = rep(0, 10);
f19 = FitValidatePredictAll(y.train, x.train, x.lead, model1, model2, model3, beta1, beta2, beta3);
l19 = f19$pred.lead;
#mat = cbind(editor_id = data_lead[,1], solution = l19);
#write.csv(mat, file = "Simple+Interactions+more.csv", row.names = FALSE);
#### Generate training predictions
fit = rep(0, length(y.train));
fit[x.train$seg ==1] = as.matrix(cbind(1, x.train[x.train$seg ==1, model1]))%*%f19$seg1.beta;
fit[x.train$seg ==2] = as.matrix(cbind(1, x.train[x.train$seg ==2, model2]))%*%f19$seg2.beta;
fit[x.train$seg ==3] = as.matrix(cbind(1, x.train[x.train$seg ==3, model3]))%*%f19$seg3.beta;
fit[fit <0] =0;
#### Modify edits of high error editors
err = log(y.train+1) - log(fit +1);
sum(err >3)
high_err_editors = data_train$editor_id[err >3];
high_err_edits = data_train$edit[err >3];
l20 = l19;
for(j in 1:length(high_err_editors)){
l20[data_lead$editor_id == high_err_editors[j]] = high_err_edits[j];
}
mat = cbind(editor_id = data_lead[,1], solution = l20);
write.csv(mat, file = "Simple+Interactions+more-modified.csv", row.names = FALSE);
|
a04652277eeae9930f3faa2b86749f5ca3b24ec3
|
411837f5378044985c31f1014b276857f358511e
|
/Dataset_videogamesales/vgsales_DecisionTree with Convert sales.r
|
302bcdc55cbcc5173e0da8e0ec9a1470b19e23e6
|
[
"Apache-2.0"
] |
permissive
|
siliconninja/CSSE286-VideoGameSaleTeam
|
6580555874f959967361e7901d959eec065efa6f
|
aced01e75f52b91858aab8e7103a12d8005c276b
|
refs/heads/main
| 2023-08-15T23:47:35.393873
| 2021-10-14T14:04:08
| 2021-10-14T14:04:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,385
|
r
|
vgsales_DecisionTree with Convert sales.r
|
vgsales <- read.csv("vgsales.csv")
table(vgsales$Genre)
set.seed(123)
str(vgsales)
vgsales <- vgsales[-1]
vgsales <- vgsales[-1]
vgsales <- vgsales[-c(4)]
convert_sales <- function(x){
x <- ifelse(x > 1 ,"High" ,ifelse(x<0.1, "Low", "Medium"))
}
vgsales$NA_Sales <- sapply(vgsales$NA_Sales, convert_sales)
vgsales$EU_Sales <- sapply(vgsales$EU_Sales, convert_sales)
vgsales$JP_Sales <- sapply(vgsales$JP_Sales, convert_sales)
vgsales$Other_Sales <- sapply(vgsales$Other_Sales, convert_sales)
vgsales$Global_Sales <- sapply(vgsales$Global_Sales, convert_sales)
table(vgsales$NA_Sales)
table(vgsales$EU_Sales)
table(vgsales$JP_Sales)
table(vgsales$Global_Sales)
table(vgsales$Other_Sales)
train_sample <- sample(16598,14938)
str(train_sample)
vgsales_train <- vgsales[train_sample, ]
vgsales_test <- vgsales[-train_sample, ]
vgsales_test <- vgsales_test[-c(1439), ]
prop.table(table(vgsales$Genre))
prop.table(table(vgsales_train$Genre))
prop.table(table(vgsales_test$Genre))
library(C50)
vgsales_model <- C5.0(vgsales_train[-6], as.factor(vgsales_train$Genre))
vgsales_model
summary(vgsales_model)
vgsales_pred <- predict(vgsales_model, vgsales_test)
library(gmodels)
CrossTable(vgsales_test$Genre, vgsales_pred,
prop.chisq = FALSE, prop.c = FALSE, prop.r = FALSE,
dnn = c('actual Genre', 'predicted Genre'))
|
7d8868fb26dbdbbe5d0f2e63585273aefa268ac2
|
ea6a12f3aa403ff73b4146104e52ab06acb3c190
|
/run_analysis.R
|
ecea0d1215d42f47ecffe9809fa7ab6f0acb36eb
|
[] |
no_license
|
louislouisloui/project_week4
|
dd040c799612f4e015163fdadc8322d098a02c32
|
5f614f0f89bfb1f70d1877fe7cd602ccef6665f5
|
refs/heads/master
| 2021-01-10T08:17:43.419836
| 2016-03-04T00:36:09
| 2016-03-04T00:36:09
| 52,871,363
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,546
|
r
|
run_analysis.R
|
extract_test <- function() {
require (dplyr)
#Create the data frames
tbl_y_test <- tbl_df(read.table(file = "./data/UCI HAR Dataset/test/y_test.txt"))
tbl_subject_test <- tbl_df(read.table(file = "./data/UCI HAR Dataset/test/subject_test.txt"))
tbl_x_test <- tbl_df(read.table(file = "./data/UCI HAR Dataset/test/X_test.txt"))
tbl_features <- tbl_df(read.table(file = "./data/UCI HAR Dataset/features.txt"))
#name the tables
names(tbl_y_test) <- "ActivityNumber"
names(tbl_subject_test) <- "SubjectNumber"
# We need to force the names
names(tbl_x_test) <- make.names(names = as.character(tbl_features$V2), unique = TRUE)
# Join all the tables by column
result_test <- cbind(tbl_y_test,tbl_subject_test,tbl_x_test)
}
extract_train <- function () {
require (dplyr)
#Create the data frames
tbl_y_train <- tbl_df(read.table(file = "./data/UCI HAR Dataset/train/y_train.txt"))
tbl_subject_train <- tbl_df(read.table(file = "./data/UCI HAR Dataset/train/subject_train.txt"))
tbl_x_train <- tbl_df(read.table(file = "./data/UCI HAR Dataset/train/X_train.txt"))
tbl_features <- tbl_df(read.table(file = "./data/UCI HAR Dataset/features.txt"))
#name the tables
names(tbl_y_train) <- "ActivityNumber"
names(tbl_subject_train) <- "SubjectNumber"
# We need to force the names
names(tbl_x_train) <- make.names(names = as.character(tbl_features$V2), unique = TRUE)
# Join all the tables by column
result_train <- cbind(tbl_y_train,tbl_subject_train,tbl_x_train)
}
join_test_train <- function() {
require (dplyr)
# Join the rows from the 2 sources without printing it
result_join <- rbind(extract_test(), extract_train())
}
filter_mean_std <- function() {
require(dplyr)
# Generate the main table
tbl <- join_test_train()
# Select only the mean and std relevant columns
columnsMeanStd <- grep(names(tbl), pattern="mean|std", value = FALSE)
tbl <- select(tbl, ActivityNumber, SubjectNumber, columnsMeanStd)
}
name_activities <- function() {
require(dplyr)
# Extract the activity numbers
tbl <- filter_mean_std()
activities_number <- select(tbl, ActivityNumber)
activities_labels <- read.table(file = "./data/UCI HAR Dataset/activity_labels.txt")
# Create a list with the names in the place of the numbers
activities_name <- lapply(activities_number, function(x) {activities_labels$V2[x]})
names(activities_name) <- "ActivityName"
# Reconstruct the table with the new activity names & deleting the activity numbers
tbl <- cbind(tbl,activities_name)
tbl <- select(tbl, -ActivityNumber)
}
tidy_names <- function () {
require(dplyr)
#We are going to follow the UpperCamelCase naming convention
#Get the table
tbl <- name_activities()
#Tidy the spatial dimension
names(tbl) <- unlist(lapply(names(tbl),function(x){sub(x = x,pattern="^f", replacement = "Frequency")}))
names(tbl) <- unlist(lapply(names(tbl),function(x){sub(x = x,pattern="^t", replacement = "Time")}))
#Tidy the .mean and .std
names(tbl) <- unlist(lapply(names(tbl),function(x){sub(x = x,pattern=".std", replacement = "Std")}))
names(tbl) <- unlist(lapply(names(tbl),function(x){sub(x = x,pattern=".mean", replacement = "Mean")}))
result <- tbl
}
average_bygroup <- function () {
require(dplyr)
#Get the tqble
tbl <- tidy_names()
#Group the data
tbl <- group_by(tbl, ActivityName, SubjectNumber)
#Generate the table of the means
tbl <- summarise_each(tbl,funs(mean))
}
|
d7f5ba68aa071b542b0802550312e483af67d045
|
c53ed8d506bd5b805ab284b854841af32ad51c42
|
/man/atomic.Rd
|
02c6821e08296f50fdafefe490a2ba33b176e2f0
|
[] |
no_license
|
hdhe/animaker
|
478b5b3da0e2e272301bad00d50e104cf380d105
|
b7f4481312d90609ea1807f32e3eb29860d1253c
|
refs/heads/master
| 2020-05-29T11:39:47.299017
| 2013-06-10T05:55:30
| 2013-06-10T05:55:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 816
|
rd
|
atomic.Rd
|
\name{atomic}
\alias{atomic}
\title{
Create an atomic animation
}
\description{
An atomic animation describes an animation by providing two key
parameters, the starting time and the duration. A label can be given
to describe what an animation is supposed to be doing.
}
\usage{
atomic(start = 0, durn = 0, label = labelSel())
}
\arguments{
\item{start}{
The starting time of the animation.
}
\item{durn}{
The duration of the animation.
}
\item{label}{
A label used to describe the animation.
}
}
\details{
If \code{durn} is \code{NA}, the duration of the animation will fill
to the remaining space in its container.
}
\value{
An atomic animation object.
}
\author{
Paul Murrell
}
\examples{
a <- atomic(start = 2, durn = 4, label = "a")
b <- atomic(durn = 3, label = "b")
a
}
|
234f972715bfff71d898de79d7723d7f4252d2e2
|
16bacff32811d40d33b9c9a5a8a9cce76e2478a2
|
/wilcox_tests/wilcox_test_RLCC.R
|
3d898bbde6d1e18d40177bc8a9529b0d79811464
|
[
"MIT"
] |
permissive
|
wangpanqiao/BioScripts
|
c6f792035a782dfbdac01da3e0069fef6a49227a
|
8285ae1078f2be42f2c92f85850d58ebcfd69ef6
|
refs/heads/master
| 2022-04-12T16:28:34.944651
| 2020-02-28T18:47:37
| 2020-02-28T18:47:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,485
|
r
|
wilcox_test_RLCC.R
|
install.packages("coin")
require(coin)
library(ggplot2)
getwd()
setwd("d:\\Hernan\\Proyectos\\Por-Usuario\\Gaby\\")
grupo_control_RLCC <- scan("joined_2019-12-20_20-15-51_nsp.txt")
grupo_caso_RLCC <- c(3650, 730, 2555, 365, 2190, 1460, 1825, 730, 2920, 4380, 2920, 1095, 1095, 1095, 730, 2920, 365, 1825, 3285, 4015, 1095, 365, 2920, 730, 2555, 1095, 2920, 1460, 2190, 1460, 4380, 2190, 2920, 2190, 730, 1825, 2920, 730, 3285, 2555, 2555, 1095, 3650, 730, 365, 1825, 2555, 1825, 2920, 2190, 2190, 3650, 4015, 3285, 730, 2920, 365, 1460, 365, 4380, 2190, 2920, 1460, 2555, 1460, 1460, 730, 1460, 2555, 1460, 1825, 730)
# grp_control_RLCC <- as.numeric(grupo_control_RLCC)
ts <- length(grupo_caso_RLCC) + length(grupo_control_RLCC)
datos <- data.frame(
grupo = rep(c("CASO_RLCC", "CONTROL_RLCC"),
c(length(grupo_caso_RLCC), length(grupo_control_RLCC))),
valores = c(grupo_caso_RLCC, grupo_control_RLCC),
cordenada_y = rep(0, ts))
r_exact <- wilcox_test(
valores ~ grupo,
data = datos,
distribution = "exact",
conf.int = 0.95)
r_approx <- wilcox_test(
valores ~ grupo,
data = datos,
distribution = "approximate",
conf.int = 0.95)
ggplot(
data = datos,
aes(x = valores, y = cordenada_y)) +
geom_point(aes(colour = grupo), size = 3) +
ylab("") +
xlab("rango") +
theme_bw() +
theme(axis.text.y = element_blank()) +
ggtitle("Muestras procedentes de la misma poblacion")
|
c90c51b520979bd5531d9021b1f276c04f4c45e8
|
34304457fbc594cd3e20847a6c03c0feee0de6ab
|
/analyses/decsensSimsFstarAuerbach.R
|
e363ae996d8cc02627ad80f88c5a98fb98db2e9f
|
[] |
no_license
|
lizzieinvancouver/decsens
|
0d5acea1c671e1c6cf6b93a96e9019520e6e4230
|
41fc5fa662bac88ca390d4804073f14c6b8087d9
|
refs/heads/master
| 2023-07-21T22:55:34.257871
| 2023-07-13T10:21:12
| 2023-07-13T10:21:12
| 233,109,280
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,303
|
r
|
decsensSimsFstarAuerbach.R
|
## Wrote 15 Oct 2020 ##
## From J Auerbach's 12 Oct 2020 email ##
#Plot 1 (model from section 1.1)
# Example plot for Section 2.1
thresholds <- seq(100, 400, length.out = 10)
cov_experiment <- var_experiment <- numeric(length(thresholds))
for(experiment in seq_along(cov_experiment)) {
mean_temp <- leaf_out <- numeric(1e5)
for(year in seq_along(mean_temp)) {
temp <- rnorm(100, 5)
leaf_out[year] <- which.min(!cumsum(temp) > thresholds[experiment])
mean_temp[year] <- mean(temp[1:leaf_out[year]])
}
cov_experiment[experiment] <- cov(mean_temp, leaf_out)
var_experiment[experiment] <- var(mean_temp)
}
par(mar = c(5, 5, 3, 5))
plot(thresholds, cov_experiment, axes=F,
ylim=range(cov_experiment),
xlab="threshold", ylab = "covariance", type="l",
col="blue", main="",xlim=range(thresholds))
axis(side = 1)
axis(side = 2)
par(new=T)
plot(thresholds, var_experiment, axes=F,
ylim=range(var_experiment), type="l", lty = 2,
xaxt = "n", yaxt = "n", ylab = "", xlab = "",
col="red", main="",xlim=range(thresholds))
axis(side = 4)
mtext("variance", side = 4, line = 3)
legend("topleft", c("covariance", "variance"),
col = c("blue", "red"), lty = c(1, 2))
#Plot 2 (model from section 1.2)
thresholds <- seq(100, 400, length.out = 10)
cov_experiment <- var_experiment <- numeric(length(thresholds))
for(experiment in seq_along(cov_experiment)) {
mean_temp <- leaf_out <- numeric(1e5)
for(year in seq_along(mean_temp)) {
temp <- rnorm(100, 5)
leaf_out[year] <- which.min(!cumsum(temp) > thresholds[experiment])
mean_temp[year] <- mean(temp)
}
cov_experiment[experiment] <- cov(mean_temp, leaf_out)
var_experiment[experiment] <- var(mean_temp)
}
par(mar = c(5, 5, 3, 5))
plot(thresholds, cov_experiment, axes=F,
ylim=range(cov_experiment),
xlab="threshold", ylab = "covariance", type="l",
col="blue", main="",xlim=range(thresholds))
axis(side = 1)
axis(side = 2)
par(new=T)
plot(thresholds, var_experiment, axes=F,
ylim=range(var_experiment), type="l", lty = 2,
xaxt = "n", yaxt = "n", ylab = "", xlab = "",
col="red", main="",xlim=range(thresholds))
axis(side = 4)
mtext("variance", side = 4, line = 3)
legend("topleft", c("covariance", "variance"),
col = c("blue", "red"), lty = c(1, 2))
|
49171d67c6737b817bee95f5f26c6c9473217c50
|
ed8739273f94dffb05f22913bd36ccae4e520196
|
/3. Getting and Clearing Data/summarizing_data.R
|
5584559a380950052a80b184093ee8a95f41dc37
|
[] |
no_license
|
martafd/datasciencecoursera
|
5b5a24e62f5e5d01968d33cb8a46379682569446
|
5104e6b0439558683ea9fd03a8e562ae473ffff4
|
refs/heads/master
| 2020-05-25T12:47:07.023734
| 2016-10-08T19:01:53
| 2016-10-08T19:01:53
| 65,119,593
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,916
|
r
|
summarizing_data.R
|
setwd('/home/marta/Documents/Data_Science/3. Getting and Clearing Data')
fileUrl <- 'https://data.baltimorecity.gov/api/views/k5ry-ef3g/rows.csv?accessType=DOWNLOAD'
download.file(fileUrl, destfile = './restaurants.csv', method = 'curl')
data <- read.csv('restaurants.csv')
head(data, 3)
tail(data, 3)
summary(data)
str(data) ## structure
quantile(data$councilDistrict, na.rm = TRUE)
quantile(data$councilDistrict, probs = c(0.4, 0.7, 0.95))
table(data$zipCode, useNA = 'ifany')
table(data$councilDistrict, data$zipCode) ## to know relation between councilDistrict and zipCode
sum(is.na(data$councilDistrict)) ## check missing values
any(is.na(data$councilDistrict))
all(is.na(data$zipCode > 0))
colSums(is.na(data))
all(colSums(is.na(data)) == FALSE)
table(data$zipCode %in% c('21212'))
data[data$zipCode %in% c('21212'),]
data(UCBAdmissions)
df <- as.data.frame(UCBAdmissions)
summary(df)
xt <- xtabs(Freq ~ Gender + Admit, data = df)
warpbreaks$replicate <- rep(1:9, len = 54)
xt <- xtabs(breaks~., data = warpbreaks)
ftable(xt)
fakeData <- rnorm(1e5)
object.size(fakeData)
print(object.size(fakeData), units = 'Mb')
data$nearMe <-data$zipCode %in% c('21212', '21213')
table(data$nearMe)
data$zipWrong <- ifelse(data$zipCode < 0, TRUE, FALSE) ## true if zipCode<0
table(data$zipWrong, data$zipCode < 0)
data$zipGroups <- cut(data$zipCode, breaks = quantile(data$zipCode))
table(data$zipGroups)
table(data$zipGroups, data$zipCode)
library(Hmisc)
data$zipGroups <- cut2(data$zipCode, g = 5)
table(data$zipGroups)
data$zcf <- factor(data$zipCode)
head(data$zcf, 10)
class(data$zcf)
yesno <- sample(c('yes', 'no'), size = 10, replace = TRUE)
yesnofactor <- factor(yesno, levels = c('yes', 'no'))
relevel(yesnofactor, ref = 'yes')
as.numeric(yesnofactor)
## using the mutate(changing) function
library(Hmisc)
library(plyr)
data2 <- mutate(data, zipGroups = cut2(zipCode, g =5))
table(data2$zipGroups)
|
a979766e1a5e77a0d6b4ba6a54e09ca0f5f9cef5
|
1c13cd855fefb41c98fa2e8c9cfda5e2bd1a503e
|
/man/getDevianceResiduals.Rd
|
bed30726100e8fac45b3c706fffa8206706b6350
|
[] |
no_license
|
daniellemccool/REMThesis
|
08867411e58ff5680e69d3aa1350e1537dfa586b
|
82a2578a096fd8c1c86b49ebb50d6b7b0eaefe22
|
refs/heads/master
| 2021-01-15T13:18:10.158596
| 2014-09-15T10:31:15
| 2014-09-15T10:31:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 351
|
rd
|
getDevianceResiduals.Rd
|
% Generated by roxygen2 (4.0.2.9000): do not edit by hand
\name{getDevianceResiduals}
\alias{getDevianceResiduals}
\title{Calculate Deviance Residuals}
\usage{
getDevianceResiduals(fits)
}
\arguments{
\item{fits}{}
}
\description{
Calculates the Deviance of the residuals
}
\examples{
getDevianceResiduals()
}
\keyword{deviance,}
\keyword{residuals}
|
9c8bdd51d098a4ad00edf23570a09c9821bd7631
|
fd365694237edb699e53eef04f1c3c0ff649f3c8
|
/man/opal.file_mv.Rd
|
f6e34dbaf6006e85d568671c5284d827799bbe2d
|
[] |
no_license
|
obiba/opalr
|
f73a0eb0280bc768b47711d6a1a08ce0eded7ce1
|
5ca4936deae7e3410db5ee6a02df7994ff5fa336
|
refs/heads/master
| 2023-08-03T06:18:07.954481
| 2023-07-21T06:58:07
| 2023-07-21T06:58:07
| 166,788,279
| 3
| 3
| null | 2021-05-13T15:50:49
| 2019-01-21T09:45:41
|
R
|
UTF-8
|
R
| false
| true
| 1,411
|
rd
|
opal.file_mv.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/opal.file.R
\name{opal.file_mv}
\alias{opal.file_mv}
\title{Move and/or rename a file}
\usage{
opal.file_mv(opal, source, destination)
}
\arguments{
\item{opal}{Opal object.}
\item{source}{Path to the file in the Opal file system.}
\item{destination}{New path to the file in the Opal file system.}
}
\description{
Move and/or rename a file or a folder in the Opal file system.
}
\examples{
\dontrun{
o <- opal.login('administrator','password', url='https://opal-demo.obiba.org')
# move a file to another folder
opal.file_mv(o, '/home/administrator/export/some-data.csv', '/home/userx/deliverables')
# rename a file
opal.file_mv(o, '/home/administrator/export/some-data-20170123.csv',
'/home/administrator/export/some-data.csv')
# move and rename a file
opal.file_mv(o, '/home/administrator/export/some-data-20170123.csv',
'/home/userx/deliverables/some-data.csv')
opal.logout(o)
}
}
\seealso{
Other file functions:
\code{\link{opal.file_cp}()},
\code{\link{opal.file_download}()},
\code{\link{opal.file_ls}()},
\code{\link{opal.file_mkdir_tmp}()},
\code{\link{opal.file_mkdir}()},
\code{\link{opal.file_read}()},
\code{\link{opal.file_rm}()},
\code{\link{opal.file_unzip}()},
\code{\link{opal.file_upload}()},
\code{\link{opal.file_write}()},
\code{\link{opal.file}()}
}
\concept{file functions}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.