blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e0d576ed4566aa9fb44bb6299f5e75abc0dc2b0c
|
f694ae8dde4e35f83913a70b85e427983823be59
|
/scratch/2015-01-22-scratch.r
|
53ea30d3bbe465bcd03b35b4b708158c5cabba87
|
[] |
no_license
|
wes-brooks/opticalww
|
b27cc1763f5d3d3be03e142c696310589798e540
|
0ed1afa4a983865fbdd65fc3667a54b60322cfdb
|
refs/heads/master
| 2021-05-29T14:15:47.726895
| 2015-02-26T16:55:17
| 2015-02-26T16:55:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,236
|
r
|
2015-01-22-scratch.r
|
#find the columns corresponding to excitation-emission data
indx = grepl("f(\\d{3})\\.(\\d{3})", colnames(ss.eem)) %>% which
#set up a data frame with the excitation-emission frequencies
matches = gregexpr("\\d{3}", colnames(ss.eem)[indx])
freqs = regmatches(colnames(ss.eem)[indx], matches) %>% as.data.frame %>% t %>% as.data.frame
rownames(freqs) = NULL
colnames(freqs) = c("excite", "emit")
freqs = within(freqs, {
excite <- as.numeric(levels(excite)[excite])
emit <- as.numeric(levels(emit)[emit])
})
#for observation i, extract the values of the excitation-emission spectrum
a = array(NA, c(55,41,156))
eem = matrix(NA,0,3)
for (i in 1:55) {
temp = cbind(freqs, t(ss.eem[i,indx]))
rownames(temp) = NULL
colnames(temp)[3] = 'val'
#eem = rbind(eem, temp)
wide = acast(temp, excite~emit)
a[i,,] = wide
}
eem2 = freqs
for (i in indx4) {
temp = t(ss.eem[i,indx])
eem2 = cbind(eem2, temp)
}
r.mean = cbind(freqs, rowMeans(eem2[,3:ncol(eem2)]))
colnames(r.mean)[3] = 'val'
wide4 = acast(r.mean, excite~emit)
zz = range(c(wide1, wide2, wide3, wide4), na.rm=TRUE)
anomaly = sweep(eem2[,3:ncol(eem2)], 1, r.mean$val, '-')
anomaly = cbind(freqs, anomaly)
an1 = cbind(freqs, anomaly[,1])
|
4c99ea6ea6f586fdc615d23f0f6641917def3f68
|
e2101543bf3421c7d7b5bdc5fcd0513701f85b67
|
/Assignments/2 - R Programming/Assignment2.R
|
e57dbbbf8f1513c6a915f801e1a8605252015456
|
[] |
no_license
|
kdivis/DataScienceSpecialization
|
92f13b6d1876fadd12c0b14affaf04aa0cc574a6
|
9e30f72034684171b6cb9b1d01e144f92a2b7a1c
|
refs/heads/master
| 2021-01-10T08:53:19.975677
| 2015-11-04T03:24:11
| 2015-11-04T03:24:11
| 45,509,483
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,471
|
r
|
Assignment2.R
|
##R Programming: Assignment 2
# -K. Divis (July 2015)
# -Created as part of Coursera Data Science Specialization
makeVector = function(x = numeric()) {
m = NULL
set = function(y) {
x <<- y
#My understanding is this: it searches the parent environment to see if x already has a value and then replaces it with y
#so not just defined within the function and then lost. So when call "set", the value of x is set to the input (y) and the
#mean is cleared out to null
m <<- NULL
}
get = function() {x}
#When call get, just return the function definition: "function() x"
setmean = function(mean) {m <<- mean}
#When call setmean, get function with "mean" argument and then rewriting "m" with mean ... the inputted value.
getmean = function() {m}
#Just returns function definition: function() m
list(set = set, get = get, setmean = setmean, getmean = getmean)
#Weird format just provides label/factor formatting
}
cachemean = function(x, ...) {
m = x$getmean()
#Check the mean stored in the vector from the prior function. If it's null, then just skip to regular calc of mean and store in cache
if(!is.null(m)) {
#If isn't null, can pull from the cached data and don't have to recalculate
message("getting cached data")
return(m)
}
data = x$get()
m = mean(data, ...)
x$setmean(m)
m
}
|
6a83734c8932358302e3a195f42ea26f9126cae3
|
1b4df2f1c29fb7bf251098ce8b4779cc9d73d7ef
|
/man/rotate_dot_plot_dendrogram.Rd
|
c2ae1d3205ddc824394d0ce4a2f3b4ab6713a3b8
|
[] |
no_license
|
Simon-Leonard/FlexDotPlot
|
a00c8547cac9a8cf22992cf59934e57fd0e1fbae
|
d6cf3048060370fefa637a21d4e1a82d974af74f
|
refs/heads/master
| 2022-06-23T20:45:35.391673
| 2022-06-17T09:10:28
| 2022-06-17T09:10:28
| 252,197,677
| 25
| 5
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,697
|
rd
|
rotate_dot_plot_dendrogram.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rotate_dot_plot_dendrogram.R
\encoding{UTF-8}
\name{rotate_dot_plot_dendrogram}
\alias{rotate_dot_plot_dendrogram}
\title{Interactively rotate dendrograms from dot_plot outputs}
\usage{
rotate_dot_plot_dendrogram(dot_plot_output, axis_to_rotate = c("x", "y"))
}
\arguments{
\item{dot_plot_output}{Output from \code{\link{dot_plot}} function function}
\item{axis_to_rotate}{Dendrogram to rotate "x" or "y"}
}
\value{
Print and return rotated dot plot
}
\description{
Take a output from dot_plot function and allow interactive dendrogram rotation with dendextend package
}
\examples{
# Perform dot_plot
if(interactive()){
library(FlexDotPlot)
data(CBMC8K_example_data)
# Run dot_plot
dotplot_output = dot_plot(data.to.plot=CBMC8K_example_data, size_var="RNA.avg.exp.scaled",
dend_x_var=c("RNA.avg.exp.scaled","ADT.avg.exp.scaled"),
dend_y_var=c("RNA.avg.exp.scaled","ADT.avg.exp.scaled"),
dist_method="euclidean",hclust_method="ward.D", do.return = TRUE)
# The following command has to be run when the user
#is running example("rotate_dot_plot_dendrogram") only.
dotplot_output$command=call("dot_plot", data.to.plot=as.name("CBMC8K_example_data"),
size_var="RNA.avg.exp.scaled",
dend_x_var=c("RNA.avg.exp.scaled","ADT.avg.exp.scaled"),
dend_y_var=c("RNA.avg.exp.scaled","ADT.avg.exp.scaled"),
dist_method="euclidean",hclust_method="ward.D", do.return = TRUE)
# y dendrogram rotation
r1=rotate_dot_plot_dendrogram(dotplot_output, axis_to_rotate = "y")
# add x dendrogram rotation to previous result
#r2=rotate_dot_plot_dendrogram(r1, axis_to_rotate = "x")
}
}
\author{
Simon Leonard - simon.leonard@univ-rennes1.fr
}
|
c50282079a7aaeeea1b95ed8058af113bda4de9a
|
7af1ca1589f16ee9e1ca03ea0de7605a25a879cd
|
/CreatePackage.R
|
4ee34dc4eb7480528fe95281439ed419558d9549
|
[] |
no_license
|
dispersing/HexGrid
|
2298919432ce48857f72e98634aa21145c0e6a30
|
5369c0c7c955c90b7ffb674ba1ba073375cfec57
|
refs/heads/master
| 2021-01-17T12:50:00.296049
| 2016-07-01T14:33:45
| 2016-07-01T14:33:45
| 57,258,495
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 168
|
r
|
CreatePackage.R
|
library(devtools)
library(roxygen2)
pkg.dir <- "~/Dropbox/Projects/HexGrid_Package"
setwd(pkg.dir)
# create("HexGrid")
# document(paste(pkg.dir, "/HexGrid", sep = ""))
|
1f545a045734f148e783662a7082c39bbccbc58d
|
2d34708b03cdf802018f17d0ba150df6772b6897
|
/googlepubsubv1beta2.auto/man/PushConfig.attributes.Rd
|
dd15ec52bc384e05aee6d82aa4cef02b48786de7
|
[
"MIT"
] |
permissive
|
GVersteeg/autoGoogleAPI
|
8b3dda19fae2f012e11b3a18a330a4d0da474921
|
f4850822230ef2f5552c9a5f42e397d9ae027a18
|
refs/heads/master
| 2020-09-28T20:20:58.023495
| 2017-03-05T19:50:39
| 2017-03-05T19:50:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,431
|
rd
|
PushConfig.attributes.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pubsub_objects.R
\name{PushConfig.attributes}
\alias{PushConfig.attributes}
\title{PushConfig.attributes Object}
\usage{
PushConfig.attributes()
}
\value{
PushConfig.attributes object
}
\description{
PushConfig.attributes Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
Endpoint configuration attributes.Every endpoint has a set of API supported attributes that can be used tocontrol different aspects of the message delivery.The currently supported attribute is `x-goog-version`, which you canuse to change the format of the push message. This attributeindicates the version of the data expected by the endpoint. Thiscontrols the shape of the envelope (i.e. its fields and metadata).The endpoint version is based on the version of the Pub/SubAPI.If not present during the `CreateSubscription` call, it will default tothe version of the API used to make such call. If not present during a`ModifyPushConfig` call, its value will not be changed. `GetSubscription`calls will always return a valid version, even if the subscription wascreated without this attribute.The possible values for this attribute are:* `v1beta1`: uses the push format defined in the v1beta1 Pub/Sub API.* `v1` or `v1beta2`: uses the push format defined in the v1 Pub/Sub API.
}
\seealso{
Other PushConfig functions: \code{\link{PushConfig}}
}
|
75ffe2913c2c543713eee42f612659369d850163
|
aba55c7ed6c36fa9e3058378758471219a9268ad
|
/income_quintiles/income_quintiles_master_script.R
|
fcab90594fb550ab11221c5bf6c91a4c3492fa8e
|
[] |
no_license
|
sjkiss/CES_Analysis
|
b69165323d992808a9d231448bcc3fe507b26aee
|
4c39d30f81cbe01b20b7c72d516051fc3c6ed788
|
refs/heads/master
| 2023-08-18T15:18:44.229776
| 2023-08-07T22:49:06
| 2023-08-07T22:49:06
| 237,296,513
| 0
| 1
| null | 2020-05-07T19:19:08
| 2020-01-30T20:12:01
|
R
|
UTF-8
|
R
| false
| false
| 1,983
|
r
|
income_quintiles_master_script.R
|
#census income quintiles master script
source("income_quintiles/income_quintiles_1971.R")
source("income_quintiles/income_quintiles_1981.R")
source("income_quintiles/income_quintiles_1986.R")
source("income_quintiles/income_quintiles_1991.R")
source("income_quintiles/income_quintiles_1996.R")
#This is a check
quintile_average_1971 %>%
bind_rows(., quintile_average_1981) %>%
bind_rows(., quintile_average_1986) %>%
bind_rows(., quintile_average_2001)
#print out the boundaries
df71<-data.frame(Year=rep(1971, 4),boundary= quintiles_1971, quintile=c(seq(1,4,1)))
df81<-data.frame(Year=rep(1981, 4),boundary= quintiles_1981, quintile=c(seq(1,4,1)))
df86<-data.frame(Year=rep(1986, 4),boundary= quintiles_1986, quintile=c(seq(1,4,1)))
df91<-data.frame(Year=rep(1991, 4),boundary= quintiles_1991, quintile=c(seq(1,4,1)))
df96<-data.frame(Year=rep(1996, 4),boundary= quintiles_1996, quintile=c(seq(1,4,1)))
df01<-data.frame(Year=rep(2001, 4), boundary=quintiles_2001, quintile=c(seq(1,4,1)))
bind_rows(df71, df81) %>%
bind_rows(., df86) %>%
bind_rows(., df91) %>%
bind_rows(., df96) %>%
bind_rows(., df01) %>%
write.csv(., file=here("Results", "quintile_boundaries.csv"))
list.files()
ls()
ls %>%
starts_with('quintile')
quintile_average_1971
quintile_average_1971 %>%
bind_rows(., quintile_average_1981) %>%
bind_rows(., quintile_average_1986) %>%
bind_rows(., quintile_average_1991) %>%
bind_rows(., quintile_average_1996) %>%
bind_rows(., quintile_average_2001) %>%
filter(., is.na(quintile)==F) %>%
mutate(quintile=Recode(quintile, "5=1; 4=2; 3=3; 2=4; 1=5")) %>%
ggplot(., aes(x=year, y=avg, group=quintile))+geom_line(aes(linetype=quintile))+labs(title="Average Real Total Household Income By Quintile, Canada, 1971-2001", y="Average", x="Year")+theme_bw()->income_inequality
income_inequality %>%
ggsave(., filename=here("Plots", "average_income_by_quintile.png"))
#Read in from the Statistics Canada Quintile Boundary file
|
c68e5fbd569cc403b4ce09578bbe11ce41b0001a
|
f250476a3355c700099a93dada2be754c93a834d
|
/R-Code-Day4 - All.R
|
deed2919a09da1dd07dc4d28313194d9a5ebff76
|
[] |
no_license
|
balluhardik96/Data-Visualisation
|
63cf4afc50d1dbf2e30bfbca5d816775eddb9e92
|
1f13c3889e3f0aa2f204a1147c70d3f64350604b
|
refs/heads/master
| 2021-07-16T06:05:20.655893
| 2020-05-18T22:36:22
| 2020-05-18T22:36:22
| 143,678,246
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,859
|
r
|
R-Code-Day4 - All.R
|
############### Time series plot ####################################################################
# Data Set used: Economics data set
# Source of data set: R-Inbuild data set
# Problem statement: Plot the unemployment rate over the year.
library(ggplot2)
data1 = economics
chart1 = ggplot(data1, aes(x=date, y = unemploy)) + geom_line()
chart1
# Problem statement 2: Want to change the line thickness based on unemployment % over the population
data1$rate = round((data1$unemploy/data1$pop)*100, digits = 2)
chart2 = ggplot(data1, aes(x = date, y = unemploy)) + geom_line(aes(size = rate))
chart2
# Problem statement 3: Plotting multiple line charts
chart4 = ggplot(data1, aes(x = date)) +
geom_line(aes(y = unemploy), col = "Red") +
geom_line(aes(y = pce), col = "Green")
chart4
# Or
chart4 = ggplot(data1, aes(x = date)) +
geom_line(aes(y = unemploy, color = "Unemployment")) +
geom_line(aes(y = pce, color = "Price"))
chart4
# Melting the data frame on date to plot all the variables
library(reshape2)
library(dplyr)
data1 = data1[,1:6]
data2 = melt(data1, id = "date")
data2.1 = filter(data2, variable == "pce" | variable == "unemploy")
chart5 = ggplot(data2, aes(x = date, y = value, col = variable)) + geom_line()
chart5
chart5 + scale_color_manual(labels = c("pce", "unemploy"),
values = c("pce"="Red", "unemploy"="Green"))
# My chart is showing data over a period of 10 years. I want to show for each year
library(lubridate)
brks <- data1$date[seq(1, length(data1$date), 12)]
lbls <- lubridate::year(brks)
chart4 + scale_x_date(labels = lbls, breaks = brks) + theme(axis.text.x = element_text(angle = 90))
###################################### Candle stick chart #####################################
# Data set: Stock market data from Yahoo
# Data Source: Yahoo
# Problem statement: Analyise the stock price of last 30 days
# How to get the data from yahoo
library(quantmod)
getSymbols("AAPL",src='yahoo')
# basic example of ohlc charts
df <- data.frame(Date=index(AAPL),coredata(AAPL))
df <- tail(df, 30)
library(plotly)
df = read.csv("E://Training data//DV for Batch 3//Data Set//Stockmarket.csv")
df = tail(df, 30)
p = plot_ly(data = df, x = ~Date, type="candlestick",
open = ~AAPL.Open, close = ~AAPL.Close,
high = ~AAPL.High, low = ~AAPL.Low) %>% layout(title = "Basic Candlestick Chart")
p
## Custom color
i = list(line = list(color = 'Green'))
d = list(line = list(color = 'Red'))
p = plot_ly(data = df, x = ~Date, type="candlestick",
open = ~AAPL.Open, close = ~AAPL.Close,
high = ~AAPL.High, low = ~AAPL.Low, increasing = i, decreasing = d) %>% layout(title = "Basic Candlestick Chart")
p
########### Pie chart ####################################################################
# Data Set used: Cost per event and cost per athlete in the Olympics.
# Source of Data : data.world
# Problem statement : To identify the cost per event in the olympics category wise.
library(plotly)
library(dplyr)
data1 = read.csv("E://Training data//DV for Batch 3//Data Set//Cost.csv")
data_final = data1 %>% group_by(Type) %>% summarise(Total_Cost = sum(Cost.per.event..mio..USD))
pie = plot_ly(data_final, labels = ~Type, values = ~Total_Cost, type = 'pie',
textposition = 'inside', textinfo = 'label+percent', showlegend = FALSE,
hoverinfo = 'text', text = ~paste('$', Total_Cost, ' millions')) %>%
layout(title = 'Expense on Olympic')
pie
################################# Tree Map ##############################################
# Data Set used: ODI
# Source of Data : data.world
# Problem statement :Plot the average score rate for top 50 indian player.
library(treemapify)
library(readxl)
odi = read_excel("E://Training data//DV for Batch 3//Data Set//odi-batting-analysis.xlsx")
indian_players_summary = odi %>% filter(Country=='India') %>% group_by(Player) %>% summarise(Total_Runs = sum(Runs, na.rm=T), Avg_SR=mean(ScoreRate, na.rm=T)) %>% arrange(-Total_Runs) %>%
head(50)
indian_players_summary
g = ggplot(indian_players_summary, aes(area=Total_Runs, label=Player, fill=-Avg_SR)) + geom_treemap()
g = g + geom_treemap_text()
g
############ Stacked Area chart ############################################
# Time Series Plot From a Data Frame
# Data Set used: Economics data set
# Source of data set: R-Inbuild data set
# Problem statement: To draw stacked area chart for Unemployment and Price
data1 = economics
library(ggplot2)
chart6 = ggplot(data1, aes(x=date)) +
geom_area(aes(y=unemploy, fill="Unemployment")) +
geom_area(aes(y=pce, fill="Price"))
chart6
|
49dfdd06bdaba1fc4e636914ce6189f5d1ad60cf
|
177296a4370c7578dcad54cca9c7401c8ed4a87e
|
/Scripts/R_scripts/masterPlot_summStats_severalIndependentRuns.R
|
40442a29dab66d5264c3c987cf5a99c165940d93
|
[
"MIT"
] |
permissive
|
diegoharta/prdm9_2020
|
f6769af8fc29e55a72632c3865aa78e4764f54f3
|
44d9be7c678a7b8c0ca29ddb66a33c9c143465da
|
refs/heads/master
| 2023-01-31T17:18:07.437443
| 2020-12-14T17:55:49
| 2020-12-14T17:55:49
| 304,371,161
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 24,454
|
r
|
masterPlot_summStats_severalIndependentRuns.R
|
#THIS FILE CREATS THE MASTER PLOT
#IT INCLUDES REAL AND EFFECTIVE MUTATION RATES, SUMMARY STATS AND RELATIVE TO THE SIZE OF ARRAY AND MUTATION TYPE
#IT GETS THE INFO FROM SEVERAL INDEPENDENT RUNS LABELLED BY _s0, _s1, etc
#THIS FILE INTENDS TO SHOW THE EVOLUTION OF SEVERAL SUMMARY STATISTICS
#This file intends to show the results from several PZIFE simulations of several runs each
#For each simulation it will plot the evolution in time of the summary statistics (diversity, recActivity,selCoeff)
# and will print the average histogram
#It shows the relevant statistics that characterize the evolutionary scenario under which the Red-Queen is developing
#It generates one pdf file
setwd(paste("~/Documents/Projects/PZIFE/C_scripts_and_data/dataFromCluster/TrialRun_2018_09_29/s5ff",sep=""))
label="N1000_t200_s5ff"
# PRINT PDF
#pdf(paste("masterPlot_",label,".pdf",sep=""), width=8, height= 9)
arr_color=c("#009E73", "#e79f00", "#0072B2", "#9ad0f3", "#D55E00",
"#CC79A7", "#F0E442","#000000")
tamany=c(1.2,1.5,1.8,2)
tamany=c(1,1,1,1,1)
m <- rbind(c(1,1,1,1),c(2,2,2,2), c(3,3,4,4),c(5,5,6,6),c(7,7,8,8),c(9,9,10,10))
m <- rbind(c(1,1,1,1),c(2,2,3,3), c(4,4,5,5),c(6,6,7,7),c(8,8,9,9),c(10,10,11,11))
layout(m,heights = c(1,2,2,2,2,3))
# m <- rbind(c(1,1,1,1),c(2,2,3,3), c(4,4,5,5),c(6,6,7,7),c(8,8,9,9))
# layout(m,heights = c(1,2,2,2,3))
par(mar = c(0.5,5, 0.5, 0.5))
#,xaxs="i")
tipo=c(22,23,24,25,1)
tipo2=c(15,16,17,18)
simID="N1000_t400_s10m"
paths="~/Documents/Projects/PZIFE/C_scripts_and_data/dataFromCluster/TrialRun_2018_09_14/s10m"
arrayP=c(1)
arrayE=c(6,5,4)
arrayD=c(3,2,1,0)
arrayU=c(5,4,3,2)
simID="N1000_t200_s1dd"
paths="~/Documents/Projects/PZIFE/C_scripts_and_data/dataFromCluster/TrialRun_2018_09_29/s1dd"
arrayP=c(1)
arrayE=c(6,5,4)
arrayD=c(2,1,0)
arrayU=c(6,4)
simID="N1000_t200_s5ff"
paths="~/Documents/Projects/PZIFE/C_scripts_and_data/dataFromCluster/TrialRun_2018_09_29/s5ff"
arrayP=c(1)
arrayE=c(6,5,4)
arrayD=c(2,1,0)
arrayU=c(6,4)
intervalo=1000
nsims=5
Propor=1.5
plot.new()
legend("top",c("C=0.04", "C=0.4", "C=4", "U=0.000004", "U=0.0004", "X=0.000004","X=0.00004","X=0.0004"),ncol = 8,
col=c(1,1,1,arr_color[1],arr_color[2],1,1,1),pch=c(tipo[1],tipo[2],tipo[3],NA,NA,tipo[5],tipo[5],tipo[5]),
lty=c(NA,NA,NA,1,1,NA,NA,NA),pt.cex=c(1,1,1,2,2,tamany[1],tamany[2],tamany[3]),
lwd=c(NA,NA,NA,3,3,NA,NA,NA),bty = "n",x.intersp=0.05)
#legend("top",c("C=0.004", "C=0.04", "C=0.4" , "U=0.00004", "U=0.0004","U=0.004","X=0.00004","X=0.004","X=0.4"),ncol = 9,
#col=c(1,1,1,arr_color[1],arr_color[2],arr_color[3],1,1,1),pch=c(tipo[1],tipo[2],tipo[3],NA,NA,NA,tipo[4],tipo[4],tipo[4]),
#lty=c(NA,NA,NA,1,1,1,NA,NA,NA),pt.cex=c(1,1,1,2,2,2,tamany[1],tamany[2],tamany[3]),
#lwd=c(NA,NA,NA,3,3,3,NA,NA,NA),bty = "n",x.intersp=0.05)
valoresEff=c()
valoresReal=c()
valores=c()
valores2=c()
valores3=c()
init=0
limX=98
Prop=5
tamanyo=.8
ymax=0.01
xmin=-100
xmax=1000
count=0
plot(-100,10000,ylim=c(.00001,ymax),
xlim=c(xmin,xmax),ylab="Real mutation rate",xlab="",xaxt="n",las=2,log="y")
for(i in 1:length(arrayP)){
count=0
for(k in 1:length(arrayD)){
for(l in 1:length(arrayU)){
countE=0
for(o in 1:length(arrayE)){
for(uu in 1:nsims){
p=arrayP[i]
X=arrayE[o]
D=arrayD[k]
C=D
U=arrayU[l]
uu=uu-1
label=paste("prueba_pzife_1.97_",simID,"_p",p,"_X",X,"_D",D,"_C",C,"_U",U,"_s",uu,sep="")
setwd(paste(paths,"/",label,sep=""))
# if(count == 1 && i==1) {text(1.5,1000,label,cex=1)}
er=("std_error.dat")
val=file.info(er)$size
if(is.na(val) == TRUE){
fileName <- "std_output.txt"
conn <- file(fileName,open="r")
linn <-readLines(conn)
eco=(linn[length(linn)])
close(conn)
substr(eco, 1,5)
if(substr(eco,1,5)=="alpha"){
#Real Effective mutation rate
gralStats=c()
mutRate=c()
mutEffRate=c()
mutRealRate=c()
epsilonRatio=c()
bStats=read.table(paste("generalStatistics_",label,".dat",sep=""))
profile=read.table(paste("profile_",label,".dat",sep=""),header = TRUE)
mutRealRate=(bStats[m,13])/((profile$Generations-profile$BurnIn))/(2*profile$PopulationSize)*(4*profile$PopulationSize)
valoresReal[uu]=mutRealRate
}
}
}
prom=mean(as.numeric(valoresReal))
stand=sd(as.numeric(valoresReal))
points(((i-1)*1000+((k-1)*250)+((l-1)*60)+(o-1)*15)*Propor,prom,col=arr_color[l],pch=tipo[k],cex=tamany[o])
points(((i-1)*1000+((k-1)*250)+((l-1)*60)+(o-1)*15)*Propor,prom+stand,col=1,pch=1,cex=0.3)
points(((i-1)*1000+((k-1)*250)+((l-1)*60)+(o-1)*15)*Propor,prom-stand,col=1,pch=1,cex=0.3)
countE=countE+1
count=count+1
}
}
}
}
init=0
limX=98
Prop=5
tamanyo=.8
ymax=100
xmax=1000
count=0
plot(-100,10000,ylim=c(.05,ymax),
xlim=c(xmin,xmax),ylab="Effective mutation rate",xlab="",xaxt="n",las=2,log="y")
for(i in 1:length(arrayP)){
count=0
for(k in 1:length(arrayD)){
for(l in 1:length(arrayU)){
countE=0
for(o in 1:length(arrayE)){
for(uu in 1:nsims){
p=arrayP[i]
X=arrayE[o]
D=arrayD[k]
C=D
U=arrayU[l]
uu=uu-1
#U=C+2
label=paste("prueba_pzife_1.97_",simID,"_p",p,"_X",X,"_D",D,"_C",C,"_U",U,"_s",uu,sep="")
setwd(paste(paths,"/",label,sep=""))
# if(count == 1 && i==1) {text(1.5,1000,label,cex=1)}
er=("std_error.dat")
val=file.info(er)$size
if(is.na(val) == TRUE){
fileName <- "std_output.txt"
conn <- file(fileName,open="r")
linn <-readLines(conn)
eco=(linn[length(linn)])
close(conn)
substr(eco, 1,5)
if(substr(eco,1,5)=="alpha"){
#Effective mutation rate
gralStats=c()
mutRate=c()
mutEffRate=c()
mutRealRate=c()
epsilonRatio=c()
bStats=read.table(paste("generalStatistics_",label,".dat",sep=""))
profile=read.table(paste("profile_",label,".dat",sep=""),header = TRUE)
mutEffRate=bStats[m,9]/((profile$Generations-profile$BurnIn))/(2*profile$PopulationSize)*(4*profile$PopulationSize)
valoresEff[uu]=mutEffRate
}
}
}
promEff=mean(as.numeric(valoresEff))
standEff=sd(as.numeric(valoresEff))
points(((i-1)*1000+((k-1)*250)+((l-1)*60)+(o-1)*15)*Propor,promEff,col=arr_color[l],pch=tipo[k],cex=tamany[o])
points(((i-1)*1000+((k-1)*250)+((l-1)*60)+(o-1)*15)*Propor,promEff+standEff,col=1,pch=1,cex=0.3)
points(((i-1)*1000+((k-1)*250)+((l-1)*60)+(o-1)*15)*Propor,promEff-standEff,col=1,pch=1,cex=0.3)
countE=countE+1
count=count+1
}
}
}
}
init=0
limX=98
Prop=5
tamanyo=.8
ymax=8
count=0
plot(-100,-100,ylim=c(0,ymax),xlim=c(xmin,xmax),ylab="Prdm9 diversity",xlab="",xaxt="n",las=2)
for(i in 1:length(arrayP)){
count=0
# for(j in 1:length(arrayU)){
for(k in 1:length(arrayD)){
for(l in 1:length(arrayU)){
countE=0
for(o in 1:length(arrayE)){
for(uu in 1:nsims){
p=arrayP[i]
X=arrayE[o]
D=arrayD[k]
C=D
U=arrayU[l]
uu=uu-1
#U=C+2
label=paste("prueba_pzife_1.97_",simID,"_p",p,"_X",X,"_D",D,"_C",C,"_U",U,"_s",uu,sep="")
setwd(paste(paths,"/",label,sep=""))
if(count == 1 && i==1) {text(1.5,xmax-1,label,cex=1)}
er=("std_error.dat")
val=file.info(er)$size
if(is.na(val) == TRUE){
fileName <- "std_output.txt"
conn <- file(fileName,open="r")
linn <-readLines(conn)
eco=(linn[length(linn)])
close(conn)
substr(eco, 1,5)
if(substr(eco,1,5)=="alpha"){
#PRDM9 DIVERSITY
divt=read.table(paste("prdmDiversity_",label,".dat",sep=""))
promDiv=mean(as.numeric(divt[1,(ncol(divt)/4):ncol(divt)]))
#points(((i-1)*1000+((k-1)*250)+((l-1)*60)+(o-1)*15),promDiv,col=arr_color[l],pch=tipo[k],cex=tamany[o])
valores[uu]=promDiv
}
}
}
prom=mean(as.numeric(valores))
stand=sd(as.numeric(valores))
points(((i-1)*1000+((k-1)*250)+((l-1)*60)+(o-1)*15)*Propor,prom,col=arr_color[l],pch=tipo[k],cex=tamany[o])
points(((i-1)*1000+((k-1)*250)+((l-1)*60)+(o-1)*15)*Propor,prom+stand,col=1,pch=1,cex=0.3)
points(((i-1)*1000+((k-1)*250)+((l-1)*60)+(o-1)*15)*Propor,prom-stand,col=1,pch=1,cex=0.3)
countE=countE+1
count=count+1
}
}
}
}
ymax=1
count=0
plot(-100,-100,ylim=c(0,ymax),xlim=c(xmin,xmax),ylab="Rec. Activity",xlab="",xaxt="n",las=2)
for(i in 1:length(arrayP)){
count=0
# for(j in 1:length(arrayU)){
for(k in 1:length(arrayD)){
for(l in 1:length(arrayU)){
countE=0
for(o in 1:length(arrayE)){
for(uu in 1:nsims){
p=arrayP[i]
X=arrayE[o]
D=arrayD[k]
C=D
U=arrayU[l]
uu=uu-1
#U=C+2
label=paste("prueba_pzife_1.97_",simID,"_p",p,"_X",X,"_D",D,"_C",C,"_U",U,"_s",uu,sep="")
setwd(paste(paths,"/",label,sep=""))
if(count == 1 && i==1) {text(1.5,xmax-1,label,cex=1)}
er=("std_error.dat")
val=file.info(er)$size
if(is.na(val) == TRUE){
fileName <- "std_output.txt"
conn <- file(fileName,open="r")
linn <-readLines(conn)
eco=(linn[length(linn)])
close(conn)
substr(eco, 1,5)
if(substr(eco,1,5)=="alpha"){
#PRINT RECOMBINATION ACTIVITY AS LATRILLE ET AL 2017
rec=read.table(paste("recombinationActivity_",label,".dat",sep=""))
promRec=mean(as.numeric(rec[1,(ncol(rec)/4):ncol(rec)]))
#points(((i-1)*1000+((k-1)*250)+((l-1)*60)+(o-1)*15),promRec,col=arr_color[l],pch=tipo[k],cex=tamany[o])
valores[uu]=promRec
}
}
}
prom=mean(as.numeric(valores))
stand=sd(as.numeric(valores))
points(((i-1)*1000+((k-1)*250)+((l-1)*60)+(o-1)*15)*Propor,prom,col=arr_color[l],pch=tipo[k],cex=tamany[o])
points(((i-1)*1000+((k-1)*250)+((l-1)*60)+(o-1)*15)*Propor,prom+stand,col=1,pch=1,cex=0.3)
points(((i-1)*1000+((k-1)*250)+((l-1)*60)+(o-1)*15)*Propor,prom-stand,col=1,pch=1,cex=0.3)
countE=countE+1
count=count+1
}
}
}
}
ymax=2000
count=0
plot(-100,1,ylim=c(1,ymax),xlim=c(xmin,xmax),ylab="Selection coefficient",xlab="",xaxt="n",las=2,log="y")
for(i in 1:length(arrayP)){
count=0
# for(j in 1:length(arrayU)){
for(k in 1:length(arrayD)){
for(l in 1:length(arrayU)){
countE=0
for(o in 1:length(arrayE)){
for(uu in 1:nsims){
p=arrayP[i]
X=arrayE[o]
D=arrayD[k]
C=D
U=arrayU[l]
uu=uu-1
#U=C+2
label=paste("prueba_pzife_1.97_",simID,"_p",p,"_X",X,"_D",D,"_C",C,"_U",U,"_s",uu,sep="")
setwd(paste(paths,"/",label,sep=""))
er=("std_error.dat")
val=file.info(er)$size
if(is.na(val) == TRUE){
fileName <- "std_output.txt"
conn <- file(fileName,open="r")
linn <-readLines(conn)
eco=(linn[length(linn)])
close(conn)
substr(eco, 1,5)
if(substr(eco,1,5)=="alpha"){
# if(count == 1 && i==1) {text(1.5,xmax-1,label,cex=1)}
#PRINT selection coefficient as Latrille et al 2017
sel=read.table(paste("selectionCoefficient_",label,".dat",sep=""))
promSel=mean(as.numeric(sel[1,(ncol(sel)/4):ncol(sel)]))*4*profile$PopulationSize
# points(((i-1)*1000+((k-1)*250)+((l-1)*60)+(o-1)*15),promSel,col=arr_color[l],pch=tipo[k],cex=tamany[o])
valores[uu]=promSel
}
}
}
prom=mean(as.numeric(valores))
stand=sd(as.numeric(valores))
points(((i-1)*1000+((k-1)*250)+((l-1)*60)+(o-1)*15)*Propor,prom,col=arr_color[l],pch=tipo[k],cex=tamany[o])
points(((i-1)*1000+((k-1)*250)+((l-1)*60)+(o-1)*15)*Propor,prom+stand,col=1,pch=1,cex=0.3)
points(((i-1)*1000+((k-1)*250)+((l-1)*60)+(o-1)*15)*Propor,prom-stand,col=1,pch=1,cex=0.3)
countE=countE+1
count=count+1
}
}
}
}
ymax=1
count=0
plot(-100,-100,ylim=c(0,ymax),xlim=c(xmin,xmax),ylab="AA Div. at Binding",xlab="",xaxt="n",las=2)
for(i in 1:length(arrayP)){
count=0
# for(j in 1:length(arrayU)){
for(k in 1:length(arrayD)){
for(l in 1:length(arrayU)){
countE=0
for(o in 1:length(arrayE)){
for(uu in 1:nsims){
p=arrayP[i]
X=arrayE[o]
D=arrayD[k]
C=D
U=arrayU[l]
uu=uu-1
#U=C+2
label=paste("prueba_pzife_1.97_",simID,"_p",p,"_X",X,"_D",D,"_C",C,"_U",U,"_s",uu,sep="")
setwd(paste(paths,"/",label,sep=""))
if(count == 1 && i==1) {text(1.5,xmax-1,label,cex=1)}
er=("std_error.dat")
val=file.info(er)$size
if(is.na(val) == TRUE){
fileName <- "std_output.txt"
conn <- file(fileName,open="r")
linn <-readLines(conn)
eco=(linn[length(linn)])
close(conn)
substr(eco, 1,5)
if(substr(eco,1,5)=="alpha"){
#PRINTS MEAN AA DIVERSITY AT PRDM9 BINDING SITES
divW=read.table(paste("meanDiversityWithinZnf_",label,".dat",sep=""))
if(any(is.na(divW)) == FALSE){
Res = profile$RelevantResidues*2+1;
aaDivInPrdm9Binding=mat.or.vec(1,nrow(divW))
for(nn in 1:nrow(divW)){
sumRelRes=divW[nn,2]+divW[nn,4]+divW[nn,6]
if(sum(divW[nn,])!=0){
aaDivInPrdm9Binding[nn]=sumRelRes/sum(divW[nn,])
}
else{
aaDivInPrdm9Binding[nn]=0.5
}
}
# for(m in 1:(nrow(divW)/nrow(sel))){
promAADiv=mean(as.numeric(aaDivInPrdm9Binding[(ncol(aaDivInPrdm9Binding)/4):ncol(aaDivInPrdm9Binding)]))
valores[uu]=promAADiv
}
}
}
}
prom=mean(as.numeric(valores))
stand=sd(as.numeric(valores))
points(((i-1)*1000+((k-1)*250)+((l-1)*60)+(o-1)*15)*Propor,prom,col=arr_color[l],pch=tipo[k],cex=tamany[o])
points(((i-1)*1000+((k-1)*250)+((l-1)*60)+(o-1)*15)*Propor,prom+stand,col=1,pch=1,cex=0.3)
points(((i-1)*1000+((k-1)*250)+((l-1)*60)+(o-1)*15)*Propor,prom-stand,col=1,pch=1,cex=0.3)
countE=countE+1
count=count+1
}
}
}
}
ymax=2
count=0
plot(-100,-100,ylim=c(0,ymax),xlim=c(xmin,xmax),ylab="Dispersion in array",xlab="",xaxt="n",las=2)
for(i in 1:length(arrayP)){
count=0
# for(j in 1:length(arrayU)){
for(k in 1:length(arrayD)){
for(l in 1:length(arrayU)){
countE=0
for(o in 1:length(arrayE)){
for(uu in 1:nsims){
p=arrayP[i]
X=arrayE[o]
D=arrayD[k]
C=D
U=arrayU[l]
uu=uu-1
#U=C+2
label=paste("prueba_pzife_1.97_",simID,"_p",p,"_X",X,"_D",D,"_C",C,"_U",U,"_s",uu,sep="")
setwd(paste(paths,"/",label,sep=""))
if(count == 1 && i==1) {text(1.5,xmax-1,label,cex=1)}
er=("std_error.dat")
val=file.info(er)$size
if(is.na(val) == TRUE){
fileName <- "std_output.txt"
conn <- file(fileName,open="r")
linn <-readLines(conn)
eco=(linn[length(linn)])
close(conn)
substr(eco, 1,5)
if(substr(eco,1,5)=="alpha"){
a=read.table(paste("histogramOfSizeOfZnfArray_",label,".dat",sep=""),header=FALSE)
novA=a[,3:length(a)]
avs=mean(as.numeric(unlist(novA)))
vars=sd(as.numeric(unlist(novA)))^2
promDisp=vars/avs
valores[uu]=promDisp
}
}
}
prom=mean(as.numeric(valores))
stand=sd(as.numeric(valores))
points(((i-1)*1000+((k-1)*250)+((l-1)*60)+(o-1)*15)*Propor,prom,col=arr_color[l],pch=tipo[k],cex=tamany[o])
points(((i-1)*1000+((k-1)*250)+((l-1)*60)+(o-1)*15)*Propor,prom+stand,col=1,pch=1,cex=0.3)
points(((i-1)*1000+((k-1)*250)+((l-1)*60)+(o-1)*15)*Propor,prom-stand,col=1,pch=1,cex=0.3)
countE=countE+1
count=count+1
}
}
}
}
ymax=1
count=0
plot(-100,-100,ylim=c(-0.5,ymax),xlim=c(xmin,xmax),ylab="Hetz in array",xlab="",xaxt="n",las=2)
for(i in 1:length(arrayP)){
count=0
for(k in 1:length(arrayD)){
for(l in 1:length(arrayU)){
countE=0
for(o in 1:length(arrayE)){
for(uu in 1:nsims){
p=arrayP[i]
X=arrayE[o]
D=arrayD[k]
C=D
U=arrayU[l]
uu=uu-1
#U=C+2
label=paste("prueba_pzife_1.97_",simID,"_p",p,"_X",X,"_D",D,"_C",C,"_U",U,"_s",uu,sep="")
setwd(paste(paths,"/",label,sep=""))
if(count == 1 && i==1) {text(1.5,xmax-1,label,cex=1)}
er=("std_error.dat")
val=file.info(er)$size
if(is.na(val) == TRUE){
fileName <- "std_output.txt"
conn <- file(fileName,open="r")
linn <-readLines(conn)
eco=(linn[length(linn)])
close(conn)
substr(eco, 1,5)
if(substr(eco,1,5)=="alpha"){
a=read.table(paste("histogramOfSizeOfZnfArray_",label,".dat",sep=""),header=FALSE)
novA=a[,3:length(a)]
longA=(profile$Generations-profile$BurnIn)/intervalo
novNovA=mat.or.vec(nrow(sel),longA)
hetz=mat.or.vec(profile$Runs,longA)
for(gg in 1:profile$Runs){
for(hh in 1:longA){
numbers <- unlist(novA[((gg-1)*longA+hh),])
suma=0
for(jj in 1:length(table(numbers))){
freq=(table(numbers)[[jj]])/longA
suma=suma+freq*freq
}
hetz[gg,hh]=1-suma
}
}
promHetz=(mean(hetz))
valores[uu]=promHetz
}
}
}
prom=mean(as.numeric(valores))
stand=sd(as.numeric(valores))
points(((i-1)*1000+((k-1)*250)+((l-1)*60)+(o-1)*15)*Propor,prom,col=arr_color[l],pch=tipo[k],cex=tamany[o])
points(((i-1)*1000+((k-1)*250)+((l-1)*60)+(o-1)*15)*Propor,prom+stand,col=1,pch=1,cex=0.3)
points(((i-1)*1000+((k-1)*250)+((l-1)*60)+(o-1)*15)*Propor,prom-stand,col=1,pch=1,cex=0.3)
countE=countE+1
count=count+1
}
}
}
}
par(mar = c(5,5, 0.5, 0.5),xaxs="i")
ymax=20
count=0
plot(-100,-100,ylim=c(0,ymax),xlim=c(xmin,xmax),ylab="Size or array",xlab="",xaxt="n",las=2)
for(i in 1:length(arrayP)){
count=0
# for(j in 1:length(arrayU)){
for(k in 1:length(arrayD)){
for(l in 1:length(arrayU)){
countE=0
for(o in 1:length(arrayE)){
for(uu in 1:nsims){
p=arrayP[i]
X=arrayE[o]
D=arrayD[k]
C=D
U=arrayU[l]
uu=uu-1
#U=C+2
label=paste("prueba_pzife_1.97_",simID,"_p",p,"_X",X,"_D",D,"_C",C,"_U",U,"_s",uu,sep="")
setwd(paste(paths,"/",label,sep=""))
if(count == 1 && i==1) {text(1.5,xmax-1,label,cex=1)}
er=("std_error.dat")
val=file.info(er)$size
if(is.na(val) == TRUE){
fileName <- "std_output.txt"
conn <- file(fileName,open="r")
linn <-readLines(conn)
eco=(linn[length(linn)])
close(conn)
substr(eco, 1,5)
if(substr(eco,1,5)=="alpha"){
a=read.table(paste("histogramOfSizeOfZnfArray_",label,".dat",sep=""),header=FALSE)
novA=a[,3:length(a)]
promSize=mean(unlist(as.numeric(unlist(novA))))
valores[uu]=promSize
}
}
}
prom=mean(as.numeric(valores))
stand=sd(as.numeric(valores))
points(((i-1)*1000+((k-1)*250)+((l-1)*60)+(o-1)*15)*Propor,prom,col=arr_color[l],pch=tipo[k],cex=tamany[o])
points(((i-1)*1000+((k-1)*250)+((l-1)*60)+(o-1)*15)*Propor,prom+stand,col=1,pch=1,cex=0.3)
points(((i-1)*1000+((k-1)*250)+((l-1)*60)+(o-1)*15)*Propor,prom-stand,col=1,pch=1,cex=0.3)
countE=countE+1
count=count+1
}
}
}
}
par(mar = c(5,5, 0.5, 0.5),xaxs="i")
ymax=1
count=0
plot(-100,-100,ylim=c(0,ymax),xlim=c(xmin,xmax),ylab="Mut. proportions",xlab="",xaxt="n",las=2)
for(ii in 1:length(arrayP)){
count=0
# for(j in 1:length(arrayU)){
for(kk in 1:length(arrayD)){
for(ll in 1:length(arrayU)){
countE=0
for(oo in 1:length(arrayE)){
for(uu in 1:nsims){
p=arrayP[ii]
X=arrayE[oo]
D=arrayD[kk]
C=D
U=arrayU[ll]
uu=uu-1
#U=C+2
label=paste("prueba_pzife_1.97_",simID,"_p",p,"_X",X,"_D",D,"_C",C,"_U",U,"_s",uu,sep="")
setwd(paste(paths,"/",label,sep=""))
if(count == 1 && i==1) {text(1.5,xmax-1,label,cex=1)}
er=("std_error.dat")
val=file.info(er)$size
if(is.na(val) == TRUE){
fileName <- "std_output.txt"
conn <- file(fileName,open="r")
linn <-readLines(conn)
eco=(linn[length(linn)])
close(conn)
substr(eco, 1,5)
if(substr(eco,1,5)=="alpha"){
gralStats=c()
gralStatsSD=c()
mutRate=c()
epsilonRatio=c()
bStats=read.table(paste("generalStatistics_",label,".dat",sep=""))
promGeneConv=bStats[12]/bStats[13]
promZnf=bStats[10]/bStats[13]
promPoint=bStats[11]/bStats[13]
valores[uu]=promGeneConv
valores2[uu]=promZnf
valores3[uu]=promPoint
}
}
}
prom=mean(as.numeric(valores))
stand=sd(as.numeric(valores))
prom2=mean(as.numeric(valores2))
stand2=sd(as.numeric(valores2))
prom3=mean(as.numeric(valores3))
stand3=sd(as.numeric(valores3))
points(((ii-1)*1000+((kk-1)*250)+((ll-1)*60)+(oo-1)*15)*Propor,prom,col=arr_color[ll],pch=tipo[kk],cex=tamany[oo])
points(((ii-1)*1000+((kk-1)*250)+((ll-1)*60)+(oo-1)*15)*Propor,prom+stand,col=1,pch=1,cex=0.3)
points(((ii-1)*1000+((kk-1)*250)+((ll-1)*60)+(oo-1)*15)*Propor,prom-stand,col=1,pch=1,cex=0.3)
points(((ii-1)*1000+((kk-1)*250)+((ll-1)*60)+(oo-1)*15)*Propor,prom2,col=arr_color[ll],pch=tipo2[kk],cex=tamany[oo])
points(((ii-1)*1000+((kk-1)*250)+((ll-1)*60)+(oo-1)*15)*Propor,prom2+stand2,col=1,pch=1,cex=0.3)
points(((ii-1)*1000+((kk-1)*250)+((ll-1)*60)+(oo-1)*15)*Propor,prom2-stand2,col=1,pch=1,cex=0.3)
points(((ii-1)*1000+((kk-1)*250)+((ll-1)*60)+(oo-1)*15)*Propor,prom3,col=arr_color[ll],pch=tipo3[kk],cex=tamany[oo])
points(((ii-1)*1000+((kk-1)*250)+((ll-1)*60)+(oo-1)*15)*Propor,prom3+stand3,col=1,pch=1,cex=0.3)
points(((ii-1)*1000+((kk-1)*250)+((ll-1)*60)+(oo-1)*15)*Propor,prom3-stand3,col=1,pch=1,cex=0.3)
countE=countE+1
count=count+1
}
}
}
}
axis(1, at=c(500,1500,2500,3500), labels=c("0.1","0.01","0.1","1"))
title(xlab="Alpha",sub = label)
#}
#dev.off()
|
db73bcb24b12aaab2cd5976772d7fcb4486122a0
|
05304ecee805e10390c185513306d4db02ba81b5
|
/NormalCompara.R
|
5a40f8ed8a7108131f1b1aaee2f51a453f9ec4f4
|
[] |
no_license
|
Cefor/eleicoes-gerais-2014-AL-PR
|
5fea5187e2d4bcc292edd561f4fba1729679a76a
|
6efdf7aacf083fffa08cea5d0cf1a0ccba21ad86
|
refs/heads/master
| 2023-02-19T22:13:50.270110
| 2023-02-10T19:34:31
| 2023-02-10T19:34:31
| 69,697,156
| 0
| 0
| null | null | null | null |
ISO-8859-1
|
R
| false
| false
| 2,118
|
r
|
NormalCompara.R
|
dnormalComp <- function(media1=0, dp1=1, media2=0, dp2=1, nc=.95, rc="=")
{
########################
# Script principal
########################
# eixo x da curva normal
lim <- c(
min(c(media1+c(-4,4)*dp1, media2+c(-4,4)*dp2)),
max(c(media1+c(-4,4)*dp1, media2+c(-4,4)*dp2))
)
x <- seq(lim[1], lim[2], by = 0.01)
# curva normal
cn1 <- function(x) {dnorm(x,media1,dp1)} # curva normal
cn2 <- function(x) {dnorm(x,media2,dp2)} # curva normal
# traça as curvas normais 1 e 2
if(cn1(media1)>=cn2(media2)){
plot(x,cn1(x),ylab="Densidade",xlab="x",
main="Curva Normal",type="l",lwd=2)
lines(x,cn2(x),lwd=2, col="red")
} else {
plot(x,cn2(x),ylab="Densidade",xlab="x",
main="Curva Normal",type="l",lwd=2,col="red")
lines(x,cn1(x),lwd=2)
}
# linha horzontal em zero
lines(lim,c(0,0))
# linhas da média
lines(c(media1,media1),c(-1,cn1(media1)),lwd=4,type="l")
lines(c(media2,media2),c(-1,cn2(media2)),lwd=4,type="l",col="red")
# intervalos de confiaça
if(rc=="="){
xI11 <- media1 - qnorm(nc+(1-nc)/2)*dp1
xI12 <- media1 + qnorm(nc+(1-nc)/2)*dp1
xI21 <- media2 - qnorm(nc+(1-nc)/2)*dp2
xI22 <- media2 + qnorm(nc+(1-nc)/2)*dp2
} else if(rc=="<"){
xI11 <- media1 - 4*dp1
xI12 <- media1 + qnorm(1-nc)*dp1
xI21 <- media2 - 4*dp2
xI22 <- media2 + qnorm(1-nc)*dp2
} else if(rc==">"){
xI11 <- media1 + qnorm(nc)*dp1
xI12 <- media1 + 4*dp1
xI21 <- media2 + qnorm(nc)*dp2
xI22 <- media2 + 4*dp2
}
inc <- (xI12-xI11)/20
i<-xI11+inc
lines(c(i,i),c(-1,cn1(i)),col="black",lty=4,lwd=2)
while(i < xI12){
lines(c(i,i),c(0,cn1(i)),col="black",lwd=0.5)
i<-i+inc
}
lines(c(i,i),c(-1,cn1(i)),col="black",lty=4,lwd=2)
inc <- (xI22-xI21)/20
i<-xI21+inc
lines(c(i,i),c(-1,cn2(i)),col="red",lty=4,lwd=2)
while(i < xI22){
lines(c(i,i),c(0,cn2(i)),col="red",lwd=0.5)
i<-i+inc
}
lines(c(i,i),c(-1,cn2(i)),col="red",lty=4,lwd=2)
}
|
9218d0804b12d64102a7e11de10aa8dc535f732a
|
807e5c79815760e935694563f247235aed81ab51
|
/devdataprod-016/quizzes/quiz1/server.R
|
3cb567fa67fc8dcd301993c1dbd018a752004930
|
[] |
no_license
|
gitrons62/coursera
|
19233553c261e371daf740f9fb18ad945033285e
|
4ff47d16e843b500f6f88a88b024a86d3093350a
|
refs/heads/master
| 2020-04-05T23:45:54.522845
| 2015-01-10T21:34:06
| 2015-01-10T21:34:06
| 23,267,228
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 274
|
r
|
server.R
|
setwd("~/R/coursera/devdataprod-016/quizzes")
shinyServer(function(input,output){
output$myname=renderText(input$name)
output$myn2 =renderText(input$n2)
output$mygender=renderText(input$gender)
output$myslide1=renderText(paste("value is:",input$slide1))
}
)
|
2e5bb84746da56d2fbeaed8289e759f38ee8f6e1
|
13eb5ef9e429d6eb25739047ccda3932a580cd07
|
/knn_algorithm.R
|
77e2dfe2181bc8de36e88b0805e9ed26a4ac6522
|
[] |
no_license
|
FlonairLenz/knn_r
|
094f7e1863a98aed88e20b8bac72e4052c9eb2a7
|
4bcab4b79feb860a9e9da1cb79d7b90aaf87ea76
|
refs/heads/master
| 2020-03-13T14:05:40.834449
| 2018-04-26T16:14:14
| 2018-04-26T16:14:14
| 131,151,827
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 535
|
r
|
knn_algorithm.R
|
distance <- function(x_i, x_j) {
d <- 0
for (i in 1:(length(x_i) - 1)) {
s <- x_i[i] - x_j[i]
d <- d + (s * s)
}
return(sqrt(d))
}
knn <- function(k, x_i, points) {
x_i <- as.numeric(x_i)
distances <- c()
categories <- c()
for (j in 1:nrow(points)) {
x_j <- as.numeric(points[j,])
distances[j] <- distance(x_i, x_j)
categories[j] <- points[j,]$l
}
cd <- data.frame(distances,categories)[order(distances),][1:k,]
return(names(sort(summary(as.factor(cd$categories)), decreasing=T)[1]))
}
|
f65c8512171bf3dc0f97f2a1b92f956f985238e3
|
eaa49ba6013f548f5db9ee9921e1c62f91451569
|
/LinearModelingLectureNotes2016.R
|
78a29e5a03ce8923eac24d102938ed3fe29599b0
|
[] |
no_license
|
vasishth/LM
|
775d90c14105157d6f17bb7dc346ea852b293e87
|
3b5d686485a7f14360db49f16cdaf93f6f2dbf42
|
refs/heads/master
| 2020-12-24T05:21:49.157988
| 2020-07-22T08:37:58
| 2020-07-22T08:37:58
| 38,547,250
| 19
| 10
| null | 2016-06-07T19:30:03
| 2015-07-04T20:47:50
|
R
|
UTF-8
|
R
| false
| false
| 32,501
|
r
|
LinearModelingLectureNotes2016.R
|
## ----include=FALSE-------------------------------------------------------
library(knitr)
# set global chunk options, put figures into folder
options(replace.assign=TRUE,show.signif.stars=FALSE)
opts_chunk$set(fig.path='figures/figure-', fig.align='center', fig.show='hold')
options(replace.assign=TRUE,width=75)
opts_chunk$set(dev='postscript')
options(show.signif.stars=FALSE)
library(lme4)
## ----cdfbinomial---------------------------------------------------------
## sample size
n<-10
## prob of success
p<-0.5
probs<-rep(NA,11)
for(x in 0:10){
## Cumulative Distribution Function:
probs[x+1]<-round(pbinom(x,size=n,prob=p),digits=2)
}
## ----echo=TRUE-----------------------------------------------------------
## Plot the CDF:
plot(1:11,probs,xaxt="n",
xlab="Prob(X<=x)",
main="CDF")
axis(1,at=1:11,labels=0:10)
## ------------------------------------------------------------------------
pbinom(1,size=10,prob=0.5)-pbinom(0,size=10,prob=0.5)
choose(10,1) * 0.5 * (1-0.5)^9
## ----pdfbinomial---------------------------------------------------------
## P(X=0)
dbinom(0,size=10,prob=0.5)
## ------------------------------------------------------------------------
## Plot the pdf:
plot(1:11,
dbinom(0:10,size=10,prob=0.5),
main="PDF",
xaxt="n")
axis(1,at=1:11,labels=0:10)
## ----normaldistr,echo=FALSE,fig.width=6----------------------------------
plot(function(x) dnorm(x), -3, 3,
main = "Normal density",ylim=c(0,.4),
ylab="density",xlab="X")
## ------------------------------------------------------------------------
pnorm(Inf)-pnorm(-Inf)
pnorm(2)-pnorm(-2)
pnorm(1)-pnorm(-1)
## ------------------------------------------------------------------------
pnorm(2)
## ------------------------------------------------------------------------
x<-0:10
## expectation in our binomial example:
sum(x*dbinom(x,size=10,prob=0.5))
## ----gamma,echo=FALSE,fig.width=6----------------------------------------
## fn refers to the fact that it
## is a function in R, it does not mean that
## this is the gamma function:
gamma.fn<-function(x){
lambda<-1
alpha<-1
(lambda * exp(1)^(-lambda*x) *
(lambda*x)^(alpha-1))/gamma(alpha)
}
x<-seq(0,4,by=.01)
plot(x,gamma.fn(x),type="l")
## ----chisq,echo=FALSE,fig.width=6----------------------------------------
gamma.fn<-function(x){
lambda<-1/2
alpha<-8/2 ## n=4
(lambda * (exp(1)^(-lambda*x)) *
(lambda*x)^(alpha-1))/gamma(alpha)
}
x<-seq(0,100,by=.01)
plot(x,gamma.fn(x),type="l")
## ------------------------------------------------------------------------
(x<-rbinom(3,size=10,prob=0.5))
## ----likfun0,echo=TRUE,fig.width=6---------------------------------------
## probability parameter fixed at 0.5
theta<-0.5
prod(dbinom(x,size=10,prob=theta))
## probability parameter fixed at 0.1
theta<-0.1
prod(dbinom(x,size=10,prob=theta))
## probability parameter fixed at 0.9
theta<-0.9
prod(dbinom(x,size=10,prob=theta))
## let's compute the product for
## a range of probabilities:
theta<-seq(0,1,by=0.01)
store<-rep(NA,length(theta))
for(i in 1:length(theta)){
store[i]<-prod(dbinom(x,size=10,prob=theta[i]))
}
plot(1:length(store),store,xaxt="n",xlab="theta",
ylab="f(x1,...,xn|theta")
axis(1,at=1:length(theta),labels=theta)
## ------------------------------------------------------------------------
(x<-rbinom(3,size=10,prob=0.1))
## ----likfun,echo=TRUE,fig.width=6----------------------------------------
theta<-seq(0,1,by=0.01)
store<-rep(NA,length(theta))
for(i in 1:length(theta)){
store[i]<-prod(dbinom(x,size=10,prob=theta[i]))
}
plot(1:length(store),store,xlab="theta",
ylab="f(x1,...,xn|theta",xaxt="n")
axis(1,at=1:length(theta),labels=theta)
## ----echo=FALSE,include=FALSE--------------------------------------------
#hindi10<-read.table("datacode/hindi10.txt",header=T)
#hindi10a<-hindi10[,c(1,3,13,22,24,25,26,27,28,29,32,33)]
#write.table(hindi10a,file="datacode/hindi10a.txt")
## ----echo=FALSE----------------------------------------------------------
hindi10<-read.table("datacode/hindi10a.txt",header=T)
colnames(hindi10)
summary(hindi10$TFT)
hindi10<-subset(hindi10,TFT>0)
summary(hindi10$TFT)
## ------------------------------------------------------------------------
hist(log(hindi10$TFT),freq=FALSE)
## ------------------------------------------------------------------------
(xbar<-mean(log(hindi10$TFT)))
(xvar<-var(log(hindi10$TFT)))
## ------------------------------------------------------------------------
xvals<-seq(0,12,by=0.01)
plot(xvals,dnorm(xvals,
mean=xbar,
sd=sqrt(xvar)),
type="l",ylab="density",xlab="x")
## ----empdist,echo=TRUE,fig.width=6---------------------------------------
## The empirical distribution and
## our theoretical distribution:
hist(log(hindi10$TFT),freq=FALSE)
xvals<-seq(0,4000,by=0.01)
lines(xvals,dnorm(xvals,
mean=xbar,sd=sqrt(xvar)))
## ----solutionex1,echo=FALSE,include=FALSE--------------------------------
xbar2<-mean(hindi10$TFT)
xvar2<-var(hindi10$TFT)
hist(hindi10$TFT,freq=FALSE)
lines(xvals,dnorm(xvals,
mean=xbar2,sd=sqrt(xvar2)))
## Sample distrn is truncated at 0.
## ------------------------------------------------------------------------
## define negative log lik:
nllh.normal<-function(theta,data){
## mean and sd
m<-theta[1]
s<-theta[2]
x <- data
n<-length(x)
logl<- sum(dnorm(x,mean=m,sd=s,log=TRUE))
## return negative log likelihood:
-logl
}
## example output:
nllh.normal(theta=c(40,4),log(hindi10$TFT))
## find the MLEs using optim:
## need to specify some starting values:
opt.vals.default<-optim(theta<-c(500,50),
nllh.normal,
data=log(hindi10$TFT),
hessian=TRUE)
## result of optimization:
(estimates.default<-opt.vals.default$par)
## compare with MLE:
xbar
## bias corrected sd:
sqrt(xvar)
## ----sampleexp,fig.width=6-----------------------------------------------
n_rep<-1000
samp_distrn_mean<-rep(NA,n_rep)
for(i in 1:n_rep){
x<-rexp(1000)
samp_distrn_mean[i]<-mean(x)
}
op<-par(mfrow=c(1,2),pty="s")
hist(x,xlab="x",ylab="density",freq=FALSE,main="Exponentially distributed data")
hist(samp_distrn_mean,xlab="x",ylab="density",freq=FALSE,
main="Sampling distribution of mean")
## ----sampunif,fig.width=6------------------------------------------------
n_rep<-1000
samp_distrn_mean<-rep(NA,n_rep)
for(i in 1:n_rep){
x<-runif(1000)
samp_distrn_mean[i]<-mean(x)
}
op<-par(mfrow=c(1,2),pty="s")
hist(x,xlab="x",ylab="density",freq=FALSE,main ="Sampling from uniform")
hist(samp_distrn_mean,xlab="x",ylab="density",freq=FALSE,
main="Sampling from uniform")
## ----ratesofchange,echo=F,fig.width=6------------------------------------
op<-par(mfrow=c(1,2),pty="s")
plot(function(x) dnorm(x,log=F,sd=0.001), -3, 3,
main = "Normal density",#ylim=c(0,.4),
ylab="density",xlab="X")
plot(function(x) dnorm(x,log=F,sd=10), -3, 3,
main = "Normal density",#ylim=c(0,.4),
ylab="density",xlab="X")
## ----estimatedSE,fig.width=6---------------------------------------------
## analytic calculation of SE from a single expt:
## number of heads in 100 coin tosses:
n<-100
p<-0.5
(x<-rbinom(1,n=n,prob=p))
hat_p <- sum(x)/n
(SE_2<-(hat_p*(1-hat_p))/n)
(SE<-sqrt(SE_2))
## by repeated sampling:
samp_distrn_means<-rep(NA,1000)
for(i in 1:1000){
x<-rbinom(1,n=n,prob=p)
samp_distrn_means[i]<-sum(x)/n
}
hist(samp_distrn_means,xlab="x",ylab="density",
freq=F,main="The sampling distribution (binomial)")
## this is the SE of the SDSM:
sd(samp_distrn_means)
## ----samplingdistrnmeans_setup_variables,echo=FALSE----------------------
nsim<-1000
n<-100
mu<-500
sigma<-100
## ----samplingdistrnmeans_runloop-----------------------------------------
nsim<-1000
n<-100
mu<-500
sigma<-100
samp_distrn_means<-rep(NA,nsim)
samp_distrn_var<-rep(NA,nsim)
for(i in 1:nsim){
x<-rnorm(n,mean=mu,sd=sigma)
samp_distrn_means[i]<-mean(x)
samp_distrn_var[i]<-var(x)
}
## ----samplingdistrnmeans_fig,fig.width=6,echo=FALSE----------------------
op<-par(mfrow=c(1,2),pty="s")
hist(samp_distrn_means,main="Samp. distrn. means",
freq=F,xlab="x",ylab="density")
hist(samp_distrn_var,main="Samp. distrn. sd",
freq=F,xlab="x",ylab="density")
## ------------------------------------------------------------------------
## estimate from simulation:
sd(samp_distrn_means)
## estimate from a single sample of size n:
sigma/sqrt(n)
## ----variancesdsm--------------------------------------------------------
## estimate from simulation:
sd(samp_distrn_var)
## theoretical value:
(sqrt(2)*sigma^2)/sqrt(n)
## ----confint1------------------------------------------------------------
## lower bound:
mu-(2*sigma/sqrt(n))
## upper bound:
mu+(2*sigma/sqrt(n))
## ----confint2,fig.width=6------------------------------------------------
lower<-rep(NA,nsim)
upper<-rep(NA,nsim)
for(i in 1:nsim){
x<-rnorm(n,mean=mu,sd=sigma)
lower[i]<-mean(x) - 2 * sd(x)/sqrt(n)
upper[i]<-mean(x) + 2 * sd(x)/sqrt(n)
}
## check how many CIs contain mu:
CIs<-ifelse(lower<mu & upper>mu,1,0)
table(CIs)
## 95% CIs contain true mean:
table(CIs)[2]/sum(table(CIs))
## ------------------------------------------------------------------------
(X<-matrix(c(rep(1,8),rep(c(-1,1),each=4),
rep(c(-1,1),each=2,2)),ncol=3))
library(Matrix)
## full rank:
rankMatrix(X)
## det non-zero:
det(t(X)%*%X)
## ------------------------------------------------------------------------
y<-as.matrix(hindi10$TFT)
x<-log(hindi10$word_len)
m0<-lm(y~x)
## design matrix:
X<-model.matrix(m0)
head(X,n=4)
## (X^TX)^{-1}
invXTX<-solve(t(X)%*%X)
## estimated beta:
(beta<-invXTX%*%t(X)%*%y)
## estimated variance of beta:
(hat_sigma<-summary(m0)$sigma)
(hat_var<-hat_sigma^2*invXTX)
## ------------------------------------------------------------------------
## hat rho:
-21.61/(sqrt(31.36)*sqrt(16.88))
## ------------------------------------------------------------------------
round(summary(m0)$coefficients[,1:3],
digits=3)
## ----tvsnormal,fig.width=6-----------------------------------------------
range <- seq(-4,4,.01)
op<-par(mfrow=c(2,2),pty="s")
for(i in c(2,5,15,20)){
plot(range,dnorm(range),type="l",lty=1,
xlab="",ylab="",
cex.axis=1)
lines(range,dt(range,df=i),lty=2,lwd=1)
mtext(paste("df=",i),cex=1.2)
}
## ------------------------------------------------------------------------
summary(m0)$coef
## ------------------------------------------------------------------------
2*pnorm(210.78,mean=0,sd=sqrt(31.36),
lower.tail=FALSE)
2*pt(210.78/sqrt(31.36),df=length(y)-1,
lower.tail=FALSE)
## ----typesandm,cache=TRUE,echo=TRUE--------------------------------------
## probable effect size derived from past studies:
D<-15
## SE from the study of interest:
se<-46
stddev<-se*sqrt(37)
nsim<-10000
drep<-rep(NA,nsim)
for(i in 1:nsim){
drep[i]<-mean(rnorm(37,mean=D,sd=stddev))
}
##power: a depressingly low 0.056
pow<-mean(ifelse(abs(drep/se)>2,1,0))
## which cells in drep are significant at alpha=0.05?
signif<-which(abs(drep/se)>2)
## Type S error rate | signif: 19%
types_sig<-mean(drep[signif]<0)
## Type S error rate | non-signif: 37%
types_nonsig<-mean(drep[-signif]<0)
## Type M error rate | signif: 7
typem_sig<-mean(abs(drep[signif])/D)
## Type M error rate | not-signif: 2.3
typem_nonsig<-mean(abs(drep[-signif])/D)
## ------------------------------------------------------------------------
x<-1:10
y<- 10 + 2*x+rnorm(10,sd=10)
## ----simulatelm,fig.width=6----------------------------------------------
plot(x,y)
## ------------------------------------------------------------------------
## null hypothesis model:
m0<-lm(y~1)
## alternative hypothesis model:
m1<-lm(y~x)
## ------------------------------------------------------------------------
lambda<- -2*(logLik(m0)-logLik(m1))
## observed value:
lambda[1]
## critical value:
qchisq(0.95,df=1)
# p-value:
pchisq(lambda[1],df=1,lower.tail=FALSE)
## ------------------------------------------------------------------------
anova(m0,m1)
## ------------------------------------------------------------------------
sqrt(anova(m0,m1)$F[2])
summary(m1)$coefficients[2,3]
## ------------------------------------------------------------------------
X<-matrix(rep(1,10),ncol=1)
##
t(X)%*%X
## ------------------------------------------------------------------------
library(car)
vif(lm(TFT~syll_len+word_len,hindi10))
## ------------------------------------------------------------------------
m<-lm(TFT ~ word_complex + word_freq + type_freq+
word_bifreq + type_freq+
word_len + IC + SC,
hindi10)
summary(m)
round(vif(m),digits=3)
## ----residualslm,fig.width=6---------------------------------------------
library(car)
qqPlot(residuals(m))
## ----normalityresiduals,fig.width=6--------------------------------------
op<-par(mfrow=c(1,2),pty="s")
x<-1:100
y1<- 10 + 2*x+rchisq(100,df=1)
qqPlot(residuals(lm(y1~x)))
y2<- 10 + 2*x+rnorm(100,sd=10)
qqPlot(residuals(lm(y2~x)))
## ------------------------------------------------------------------------
nsim<-1000
n<-100
x<-1:n
store_y1_results<-rep(NA,nsim)
store_y2_results<-rep(NA,nsim)
for(i in 1:nsim){
e<-rchisq(n,df=1)
e<-scale(e,scale=F)
y1<- 10 + 0.01*x + e
m1<-lm(y1~x)
store_y1_results[i]<-summary(m1)$coefficients[2,4]
y2<- 10 + 0.01*x + rnorm(n,sd=1.2)
m2<-lm(y2~x)
store_y2_results[i]<-summary(m2)$coefficients[2,4]
}
## power
y1_results<-table(store_y1_results<0.05)
y1_results[2]/sum(y1_results)
y2_results<-table(store_y2_results<0.05)
y2_results[2]/sum(y2_results)
## ----acftest,fig.width=6-------------------------------------------------
acf(residuals(m))
## ----lmdiagnostics,fig.width=6-------------------------------------------
op<-par(mfrow=c(2,2),pty="s")
plot(m)
## ----boxcox1,fig.width=6-------------------------------------------------
## generate some non-normally distributed data:
data<-rchisq(100,df=1)
m<-lm(data~1)
qqPlot(residuals(m))
## ----boxcox2,fig.width=6-------------------------------------------------
library(MASS)
## suggests log:
boxcox(m)
m<-lm(log(data)~1)
## ------------------------------------------------------------------------
(beetle<-read.table("datacode/beetle.txt",header=TRUE))
## ------------------------------------------------------------------------
(beetle$propn.dead<-beetle$killed/beetle$number)
## ------------------------------------------------------------------------
with(beetle,plot(dose,propn.dead))
## ------------------------------------------------------------------------
fm<-lm(propn.dead~scale(dose,scale=FALSE),beetle)
summary(fm)
## ------------------------------------------------------------------------
with(beetle,plot(scale(dose,scale=FALSE),
propn.dead))
abline(coef(fm))
## ------------------------------------------------------------------------
fm1<-glm(propn.dead~dose,
binomial(logit),
weights=number,
data=beetle)
summary(fm1)
## ----propndeadplot,fig.width=6-------------------------------------------
plot(propn.dead~dose,beetle)
points(fm1$fitted~dose,beetle,pch=4)
## ------------------------------------------------------------------------
## compute log odds of death for
## concentration 1.7552:
x<-as.matrix(c(1, 1.7552))
#log odds:
(log.odds<-t(x)%*%coef(fm1))
## ------------------------------------------------------------------------
### compute CI for log odds:
## Get vcov matrix:
(vcovmat<-vcov(fm1))
## x^T VCOV x for dose 1.7552:
(var.log.odds<-t(x)%*%vcovmat%*%x)
## ------------------------------------------------------------------------
##lower
log.odds-1.96*sqrt(var.log.odds)
##upper
log.odds+1.96*sqrt(var.log.odds)
## ------------------------------------------------------------------------
## eta=xbeta:
eta.i<- -60+35*beetle$dose
## ------------------------------------------------------------------------
n.i <- beetle$number
w.ii.fn<-function(n.i,eta.i){
(n.i*exp(eta.i))/(1+exp(eta.i))^2
}
w.iis<-w.ii.fn(n.i,eta.i)
##weights matrix:
W<-diag(as.vector(w.iis))
## ------------------------------------------------------------------------
mu.i<-exp(eta.i)/(1+exp(eta.i))
z.i<-eta.i + ((beetle$propn.dead-mu.i))/
(mu.i*(1-mu.i))
## ------------------------------------------------------------------------
##The design matrix:
col1<-c(rep(1,8))
X<-as.matrix(cbind(col1,beetle$dose))
## update coefs:
eta.i<-solve(t(X)%*%W%*%X)%*%
t(X)%*%W%*%z.i
## ------------------------------------------------------------------------
glm1<-glm(propn.dead~dose,binomial(logit),
weights=number,data=beetle)
## ------------------------------------------------------------------------
summary(glm1)
## ----propndead2,fig.width=6----------------------------------------------
# beta.hat is (-60.71745 , 34.27033)
(eta.hat<- -60.71745 + 34.27033*beetle$dose)
(mu.hat<-exp(eta.hat)/(1+exp(eta.hat)))
# compare mu.hat with observed proportions
plot(mu.hat,beetle$propn.dead)
abline(0,1)
## ----propndead3,fig.width=6----------------------------------------------
null.glm<-glm(propn.dead~1,binomial(logit),
weights=number,data=beetle)
summary(null.glm)
plot(beetle$dose,beetle$propn.dead,xlab="log concentration",
ylab="proportion dead",main="minimal model")
points(beetle$dose,null.glm$fitted,pch=4)
## ----propndead4,fig.width=6----------------------------------------------
dose.glm<-glm(propn.dead~dose,binomial(logit),
weights=number,data=beetle)
summary(dose.glm)
plot(beetle$dose,beetle$propn.dead,xlab="log concentration",
ylab="proportion dead",main="dose model")
points(beetle$dose,dose.glm$fitted,pch=4)
## ------------------------------------------------------------------------
anova(null.glm,dose.glm)
## ------------------------------------------------------------------------
anova(dose.glm)
## ------------------------------------------------------------------------
deviance(null.glm)
## critical value:
qchisq(0.95,df=7)
## ------------------------------------------------------------------------
deviance(dose.glm)
qchisq(0.95,df=6)
## ----residualsglm,fig.width=6--------------------------------------------
op<-par(mfrow=c(2,2),pty="s")
plot(dose.glm)
## ----qqnormglm,fig.width=6-----------------------------------------------
op<- par(mfrow=c(2,2),pty="s")
plot(dose.glm$resid,
xlab="index",ylab="residuals",main="Index plot")
qqnorm(dose.glm$resid,main="QQ-plot")
hist(dose.glm$resid,xlab="Residuals",main="Histogram")
plot(dose.glm$fit,dose.glm$resid,xlab="Fitted values",
ylab="Residuals",
main="Residuals versus fitted values")
## ----loadnoisedeg--------------------------------------------------------
noisedeg<-read.table("datacode/noisedeg.txt")
## ------------------------------------------------------------------------
## returning to our noise data (noisedeg):
## here's an important fact about our data:
# different subjects have different means for no.noise and noise
# and different means for the three levels of deg
t(means.noise<-with(noisedeg,tapply(rt,list(subj,noise),mean)))
t(means.deg<-with(noisedeg,tapply(rt,list(subj,deg),mean)))
## ----xyplotnoisedeg,fig.width=6------------------------------------------
## We can visualize these differences graphically:
library(lattice)
## noise by subject (data points):
print(xyplot(rt~noise|subj,
panel=function(x,y,...){panel.xyplot(x,y,type="r")},noisedeg))
## ----xyplotnoisedeg2,fig.width=6-----------------------------------------
## same as above, but for deg:
print(xyplot(rt~deg|subj,
panel=function(x,y,...){panel.xyplot(x,y,type="r")},noisedeg))
## ------------------------------------------------------------------------
## fit a separate linear model for subject s1:
s1data<-subset(noisedeg,subj=="s1")
lm(rt~noise,s1data)
## ------------------------------------------------------------------------
## do the same for each subject using a for-loop
subjects<-paste("s",rep(1:10),sep="")
for(i in subjects){
sdata<-subset(noisedeg,subj==i)
lm(rt~noise,sdata)
}
## ------------------------------------------------------------------------
library(lme4)
lmlist.fm1<-lmList(rt~noise|subj,noisedeg)
print(lmlist.fm1$s1)
## ----noisedegplot,fig.width=6--------------------------------------------
plot(as.numeric(noisedeg$noise)-1,
noisedeg$rt,axes=F,
xlab="noise",ylab="rt")
axis(1,at=c(0,1),
labels=c("no.noise","noise"))
axis(2)
subjects<-paste("s",1:10,sep="")
for(i in subjects){
abline(lmlist.fm1[[i]])
}
abline(lm(rt~noise,noisedeg),lwd=3,col="red")
## ------------------------------------------------------------------------
t.test(coef(lmlist.fm1)[2])
## ------------------------------------------------------------------------
## the following command fits a linear model,
## but in addition estimates between-subject variance:
summary(m0.lmer<-lmer(rt~noise+(1|subj),noisedeg))
## ------------------------------------------------------------------------
ranef(m0.lmer)
## ----ranefsplot,fig.width=6----------------------------------------------
print(dotplot(ranef(m0.lmer,condVar=TRUE)))
## ----ranefsnoisedeg,fig.width=6------------------------------------------
a<-fixef(m0.lmer)[1]
newa<-a+ranef(m0.lmer)$subj
ab<-data.frame(newa=newa,b=fixef(m0.lmer)[2])
plot(as.numeric(noisedeg$noise)-1,noisedeg$rt,xlab="noise",ylab="rt",axes=F)
axis(1,at=c(0,1),labels=c("no.noise","noise"))
axis(2)
for(i in 1:10){
abline(a=ab[i,1],b=ab[i,2])
}
abline(lm(rt~noise,noisedeg),lwd=3,col="red")
## ------------------------------------------------------------------------
summary(m1.lmer<-lmer(rt~noise+(1+noise|subj),noisedeg))
## ----ranefsnoisedeg2,fig.width=6-----------------------------------------
(a<-fixef(m1.lmer)[1])
(b<-fixef(m1.lmer)[2])
newa<-a+ranef(m1.lmer)$subj[1]
newb<-b+ranef(m1.lmer)$subj[2]
## make this into a data frame:
ab<-data.frame(newa=newa,b=newb)
plot(as.numeric(noisedeg$noise)-1,noisedeg$rt,xlab="noise",ylab="rt",axes=F,
main="varying intercepts and slopes for each subject")
axis(1,at=c(0,1),labels=c("no.noise","noise"))
axis(2)
for(i in 1:10){
abline(a=ab[i,1],b=ab[i,2])
}
abline(lm(rt~noise,noisedeg),lwd=3,col="red")
## ----echo=FALSE,fig.width=6----------------------------------------------
op<- par(mfrow=c(1,2),pty="s")
plot(as.numeric(noisedeg$noise)-1,noisedeg$rt,axes=F,xlab="noise",ylab="rt",main="ordinary linear model")
axis(1,at=c(0,1),labels=c("no.noise","noise"))
axis(2)
subjects<-paste("s",1:10,sep="")
for(i in subjects){
abline(lmlist.fm1[[i]])
}
abline(lm(rt~noise,noisedeg),lwd=3,col="red")
a<-fixef(m1.lmer)[1]
b<-fixef(m1.lmer)[2]
newa<-a+ranef(m1.lmer)$subj[1]
newb<-b+ranef(m1.lmer)$subj[2]
ab<-data.frame(newa=newa,b=newb)
plot(as.numeric(noisedeg$noise)-1,noisedeg$rt,axes=F,
main="varying intercepts and slopes",xlab="noise",ylab="rt")
axis(1,at=c(0,1),labels=c("no.noise","noise"))
axis(2)
for(i in 1:10){
abline(a=ab[i,1],b=ab[i,2])
}
abline(lm(rt~noise,noisedeg),lwd=3,col="red")
## ------------------------------------------------------------------------
m<-lmer(rt~noise + (1+noise|subj),noisedeg)
summary(m)
## ------------------------------------------------------------------------
contrasts(noisedeg$noise)
## set to sum contrasts:
contrasts(noisedeg$noise)<-contr.sum(2)
contrasts(noisedeg$noise)
m<-lmer(rt~noise + (1+noise|subj),noisedeg)
summary(m)
## ------------------------------------------------------------------------
c1<-ifelse(noisedeg$noise=="noise",-1,1)
m<-lmer(rt~c1 + (c1||subj),noisedeg)
summary(m)
## ------------------------------------------------------------------------
BHHshoes<-read.table("datacode/BHHshoes.txt")
lm.full<-lmer(wear~material-1+
(1|Subject),
data = BHHshoes)
## ------------------------------------------------------------------------
b1.vals<-subset(BHHshoes,
material=="A")$wear
b2.vals<-subset(BHHshoes,
material=="B")$wear
vcovmatrix<-var(cbind(b1.vals,b2.vals))
## get covariance from off-diagonal:
covar<-vcovmatrix[1,2]
sds<-sqrt(diag(vcovmatrix))
## correlation of fixed effects:
covar/(sds[1]*sds[2])
#cf:
covar/((0.786*sqrt(10))^2)
## ------------------------------------------------------------------------
dbinom(46, 100, 0.5)
## ----betaeg,echo=F,fig.width=6-------------------------------------------
plot(function(x)
dbeta(x,shape1=2,shape2=2), 0,1,
main = "Beta density",
ylab="density",xlab="X",ylim=c(0,3))
text(.5,1.1,"a=2,b=2")
plot(function(x)
dbeta(x,shape1=3,shape2=3),0,1,add=T)
text(.5,1.6,"a=3,b=3")
plot(function(x)
dbeta(x,shape1=6,shape2=6),0,1,add=T)
text(.5,2.6,"a=6,b=6")
## ----binomplot,echo=F,fig.width=6----------------------------------------
theta=seq(0,1,by=0.01)
plot(theta,dbinom(x=46,size=100,prob=theta),
type="l",main="Likelihood")
## ----betaforbinom,echo=F,fig.width=6-------------------------------------
plot(function(x)
dbeta(x,shape1=46,shape2=54),0,1,
ylab="",xlab="X")
## ----binomexample1,echo=F,fig.width=6------------------------------------
##lik:
plot(function(x)
dbeta(x,shape1=46,shape2=54),0,1,
ylab="",xlab="X",col="red")
## prior:
plot(function(x)
dbeta(x,shape1=2,shape2=2), 0,1,
main = "Prior",
ylab="density",xlab="X",add=T,lty=2)
## posterior
plot(function(x)
dbeta(x,shape1=48,shape2=56), 0,1,
main = "Posterior",
ylab="density",xlab="X",add=T)
legend(0.1,6,legend=c("post","lik","prior"),
lty=c(1,1,2),col=c("black","red","black"))
## ------------------------------------------------------------------------
y<-1
n<-1
thetas<-seq(0.2,0.8,by=0.2)
likelihoods<-rep(NA,4)
for(i in 1:length(thetas)){
likelihoods[i]<-dbinom(y,n,thetas[i])
}
## ------------------------------------------------------------------------
sum(likelihoods)
## ------------------------------------------------------------------------
(priors<-rep(0.25,4))
## ------------------------------------------------------------------------
liks.times.priors<-likelihoods * priors
## normalizing constant:
sum.lik.priors<-sum(liks.times.priors)
posteriors<- liks.times.priors/sum.lik.priors
## ------------------------------------------------------------------------
n<-20
y<-15
priors<-rep(0.25,4)
likelihoods<-rep(NA,4)
for(i in 1:length(thetas)){
likelihoods[i]<-dbinom(y,n,thetas[i])
}
liks.priors<-likelihoods * priors
sum.lik.priors<-sum(liks.priors)
(posteriors<- liks.priors/sum.lik.priors)
## ------------------------------------------------------------------------
posteriors
## ------------------------------------------------------------------------
thetas<-seq(0,1,by=0.2)
priors<-rep(1/6,6)
y<-15
n<-20
likelihoods<-rep(NA,6)
for(i in 1:length(thetas)){
likelihoods[i]<-dbinom(y,n,thetas[i])
}
liks.priors<-likelihoods * priors
sum.lik.priors<-sum(liks.priors)
(posteriors<- liks.priors/sum.lik.priors)
## ------------------------------------------------------------------------
thetas<-seq(0,1,by=0.2)
priors<-rep(1/6,6)
y<-1
n<-1
j<-6 ## no. of thetas
likelihoods<-rep(NA,6)
for(i in 1:length(thetas)){
likelihoods[i]<-dbinom(y,n,thetas[i])
}
liks.priors<-likelihoods * priors
sum.lik.priors<-sum(liks.priors)
posteriors<- liks.priors/sum.lik.priors
## ----echo=F--------------------------------------------------------------
x<-seq(0,1,length=100)
plot(x,dbeta(x,shape1=9.2,shape2=13.8),type="l")
## ----echo=F--------------------------------------------------------------
thetas<-seq(0,1,length=100)
probs<-rep(NA,100)
for(i in 1:100){
probs[i]<-dbinom(15,20,thetas[i])
}
plot(thetas,probs,main="Likelihood of y|theta_j",type="l")
## ----likbetaexample2,echo=F,fig.width=6----------------------------------
x<-seq(0,1,length=100)
plot(x,dbeta(x,shape1=15,shape2=5),type="l")
## ----fig.keep='none',echo=F----------------------------------------------
thetas<-seq(0,1,length=100)
a.star<-9.2+15
b.star<-13.8+5
plot(thetas,dbeta(thetas,
shape1=a.star,
shape2=b.star),
type="l")
## ----fig.keep='none',echo=F----------------------------------------------
par(mfrow=c(3,1))
## prior
plot(thetas,dbeta(x,shape1=9.2,shape2=13.8),
type="l",
main="Prior")
## lik
probs<-rep(NA,100)
for(i in 1:100){
probs[i]<-dbinom(15,20,thetas[i])
}
plot(thetas,probs,main="Likelihood of y|theta_j",type="l")
## post
x<-seq(0,1,length=100)
a.star<-9.2+15
b.star<-13.8+5
plot(x,dbeta(x,shape1=a.star,shape2=b.star),type="l",
main="Posterior")
## ----echo=F,include=F----------------------------------------------------
plot.it<-function(m=0.4,s=0.1,k=15,n=20){
## compute a,b
a.plus.b<-((m*(1-m))/s^2)-1
a<-a.plus.b*m
b<-a.plus.b-a
##prior
thetas<-seq(0,1,length=100)
plot(thetas,dbeta(thetas,shape1=a,shape2=b),type="l",main="",ylab="")
probs<-dbinom(k,n,thetas)
lines(thetas,probs,type="l",lty=2)
## post
a.star<-a+k
b.star<-b+(n-k)
lines(thetas,dbeta(thetas,shape1=a.star,shape2=b.star),lty=3,lwd=3,type="l")
}
plot.it()
plot.it(m=0.5,s=0.4,k=15,n=20)
## ----fig1,echo=F,fig.width=6---------------------------------------------
x<-0:200
plot(x,dgamma(x,10000/225,100/225),type="l",lty=1,main="Gamma prior",ylab="density",cex.lab=2,cex.main=2,cex.axis=2)
## ------------------------------------------------------------------------
## load data:
data<-c(115,97,79,131)
a.star<-function(a,data){
return(a+sum(data))
}
b.star<-function(b,n){
return(b+n)
}
new.a<-a.star(10000/225,data)
new.b<-b.star(100/225,length(data))
## post. mean
post.mean<-new.a/new.b
## post. var:
post.var<-new.a/(new.b^2)
new.data<-c(200)
new.a.2<-a.star(new.a,new.data)
new.b.2<-b.star(new.b,length(new.data))
## new mean
new.post.mean<-new.a.2/new.b.2
## new var:
new.post.var<-new.a.2/(new.b.2^2)
## ----echo=T--------------------------------------------------------------
## specify data:
dat<-list(y=c(115,97,79,131))
## model specification:
cat("
model
{
for(i in 1:4){
y[i] ~ dpois(theta)
}
##prior
## gamma params derived from given info:
theta ~ dgamma(10000/225,100/225)
}",
file="datacode/poissonexample.jag" )
## specify variables to track
## the posterior distribution of:
track.variables<-c("theta")
## load rjags library:
library(rjags,quietly=T)
## define model:
pois.mod <- jags.model(
data = dat,
file = "datacode/poissonexample.jag",
n.chains = 4,
n.adapt =2000 ,quiet=T)
## run model:
pois.res <- coda.samples( pois.mod,
var = track.variables,
n.iter = 50000,
thin = 50 )
## ------------------------------------------------------------------------
## summarize and plot:
plot(pois.res)
## ----echo=TRUE-----------------------------------------------------------
print(summary(pois.res))
## ----fig3,echo=F,fig.width=6---------------------------------------------
## lik:
x<-0:200
plot(x,dpois(x,lambda=mean(dat$y)),type="l",ylim=c(0,.1),ylab="")
## normal approximation:
#lines(x,dnorm(x,mean=mean(dat$y),sd=sqrt(mean(dat$y))),lty=2,col="red",lwd=3)
## gamma for the likelihood:
#a/b=105.5, a/b^2=105.5
## a = 105.5*b and a=105.5*b^2
## 105.5*b = 105.5*b^2
## 105.5=105.5 * b -> b=1
## a=105.5, b=1
#lines(x,dgamma(x,shape=105.5,rate=1),
# lty=1,col="red",lwd=3)
## prior: gamma(10000/225,100/225)
lines(0:200,dgamma(0:200,shape=10000/225, rate = 100/225),
lty=2)
#posterior from JAGS:
lines(0:200,dgamma(0:200,shape=466.44, rate = 4.44),col="red",lwd=3)
legend(x=150,y=0.08,legend=c("lik","prior","post"),
lty=c(1,2,1),col=c("black","red","red"))
## ------------------------------------------------------------------------
dat2<-list(y=c(115,97,79,131,200))
## model specification:
cat("
model
{
for(i in 1:4){
y[i] ~ dpois(theta)
}
y[5] ~ dpois(2*theta)
##prior
## gamma params derived from given info:
theta ~ dgamma(10000/225,100/225)
}",
file="datacode/poisexample2.jag" )
## specify variables to track
## the posterior distribution of:
track.variables<-c("theta")
## define model:
poisex2.mod <- jags.model(
data = dat2,
file = "datacode/poisexample2.jag",
n.chains = 4,
n.adapt =2000 ,quiet=T)
## run model:
poisex2.res <- coda.samples( poisex2.mod,
var = track.variables,
n.iter = 100000,
thin = 50 )
## ------------------------------------------------------------------------
print(summary(poisex2.res))
|
e693220681d6be057bcd9fb500f6a20eaab2ba0b
|
eb609bed8415c07c74967efb85c9ab063d6754a5
|
/R/toJSON.AAAgeneric.R
|
b9dca46a784cb3e8a7fbb1e1dd5db019aae845f8
|
[] |
no_license
|
cran/opencpu.encode
|
44015acd51f7fd1fb41abdef96eaf193883a8e49
|
922e50864ece2c4a778af2603a329b5bab1958b0
|
refs/heads/master
| 2016-09-06T10:53:08.837639
| 2011-08-05T00:00:00
| 2011-08-05T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,408
|
r
|
toJSON.AAAgeneric.R
|
# TODO: Add comment
#
# Author: jeroen
###############################################################################
#This file is called AAA so that it will be run first.
#' Serialize an R object to JSON.
#'
#' This is a slightly modified version of the asJSON function in RJSONIO. This function is mostly for internal use.
#' Please use opencpu.encode instead.
#'
#' @importFrom RJSONIO fromJSON
#' @importFrom base64 encode decode
#' @export fromJSON
#' @export asJSON
#'
#' @aliases asJSON,ANY-method asJSON,AsIs-method asJSON,character-method asJSON,integer-method
#' asJSON,list-method asJSON,logical-method asJSON,matrix-method asJSON,NULL-method
#' asJSON,numeric-method asJSON,scalar-method
#' @param x the object to be serialized
#' @param container if the object always should be in a json array, even if it has length 1.
#' @param collapse a string that is used as the separator when combining the individual lines of the generated JSON content
#' @return A valid JSON string
#'
#' @note All encoded objects should pass the validation at www.jsonlint.org
#' @references
#' \url{http://www.jsonlint.org}
#' @author Jeroen Ooms \email{jeroen.ooms@@stat.ucla.edu}
#' @examples jsoncars <- opencpu.encode(cars);
#' cat(jsoncars);
#' identical(opencpu.decode(jsoncars), cars);
setGeneric("asJSON",
function(x, container = TRUE, collapse = "\n", ...){
standardGeneric("asJSON")
}
);
|
50d590267b4080dfb4e58a9a78a5975821b668ba
|
bea761df375b43eed5edaf219cf7f3f5d2cbaba3
|
/man/ovl4.Rd
|
1f8270fedc36e179e6f109dbf1e4424be5998465
|
[] |
no_license
|
cran/activity
|
e33f70b7f26e62b4e16dbac3677db0558da046bc
|
8e86e73148d0285a89962441d267ac840cd9505b
|
refs/heads/master
| 2023-03-23T14:32:57.830107
| 2023-03-02T16:20:05
| 2023-03-02T16:20:05
| 24,599,140
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 937
|
rd
|
ovl4.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/activity_code.r
\name{ovl4}
\alias{ovl4}
\title{Index of overlap between circular distributions.}
\usage{
ovl4(fit1, fit2)
}
\arguments{
\item{fit1, fit2}{Fitted activity models of class actmod created using function fitact.}
}
\value{
Scalar overlap index (specifically Dhat4).
}
\description{
Calculates Dhat4 overlap index (see reference) between two kernel distributions.
}
\details{
Uses linear interpolation to impute values from kernel distributions.
}
\examples{
data(BCItime)
oceAct <- fitact(subset(BCItime, species=="ocelot")$time*2*pi)
broAct <- fitact(subset(BCItime, species=="brocket")$time*2*pi)
ovl4(oceAct, broAct)
}
\references{
Ridout, M.S. & Linkie, M. (2009) Estimating overlap of daily activity patterns from camera trap data. Journal of Agricultural Biological and Environmental Statistics, 14, 322-337.
}
|
d3f1eb723c8f88758cf7b96827929f9433fb50f9
|
bdc8a312721efe4f4d41fed1a6b5cfb03f04f1a8
|
/Diatom_counts/Basic_clusters.R
|
79db45a4b011d90eb38bf4355dd7a01b7d8e1c24
|
[] |
no_license
|
sergemayombo/Diatoms
|
1a09063e72787855820305bbc5136ad4d8662e81
|
03c7d37f3616261f2718af9c8f5cb9626fbb7f5a
|
refs/heads/master
| 2020-04-22T20:52:32.113273
| 2019-05-27T12:55:26
| 2019-05-27T12:55:26
| 170,654,883
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,179
|
r
|
Basic_clusters.R
|
# Epiphytic diatoms associated with the south African kelp
# Diatom data exploration, analysis and presentation.
# Mayombo Ntambwe
# 11th Febraury 2018
library(tidyverse)
library(vegan)
library(cluster)
library(ggplot2)
library(ggdendro)
library(tcltk)
library(BiodiversityR)
library(readr)
# read in the data
counts <- read_csv("Diatom_counts_tidy.csv")
# select only some columns
counts.spec <- counts %>%
select(Site, Host, Replicate, Host_spp, Host_size, Genus, Density) %>%
na.omit()
counts.spec1 <- Diatom_counts_tidy %>%
select(Site, Host, Replicate, Host_spp, Host_size, Species, Density) %/%
na.omit()
counts.gen <- counts %>%
select(Site, Host, Replicate, Host_spp, Host_size, Genus, Density) %>%
na.omit()
counts.dens <- counts %>%
select(Site, Host, Replicate, Host_spp, Host_size, Species, Density) %>%
na.omit()
summary(counts.spec)
# make into a wide data frame
counts.spec <- spread(counts.spec, key = Genus, value = Density, fill = 0)
# select only some columns
counts <- counts %>%
select(Site, Host, Replicate, Host_spp, Host_size, Species, Genus, Density) %>%
na.omit()
diat_counts <- Diatom_counts_tidy %>%
select(Site, Host, Replicate, Host_spp, Host_size, Species, Genus, Density) %>%
na.omit()
# make into a wide data frame
counts <- spread(counts, key = Species, value = Density, fill = 0)
# presence/absence only
counts.spec.bin <- decostand(counts.spec[, 7:41], method = "pa")
counts.spec.dis1 <- vegdist(counts.spec.bin, method = "bray", binary = TRUE)
counts.spec.clst1 <- hclust(counts.spec.dis1, method = "ward.D2")
par(mfrow = c(2, 1))
plot(counts.spec.clst1, labels = counts$Host_spp, hang = -1)
plot(counts.clst1, labels = counts$Host_size, hang = -1)
par(mfrow = c(1, 1))
# Bray-Curtis with cell densities
counts.dis2 <- vegdist(counts[, 7:41], method = "bray")
counts.clst2 <- hclust(counts.dis2, method = "ward.D2")
par(mfrow = c(2, 2))
# presence/absence only
plot(counts.clst1, labels = counts$Host_spp, hang = -1, ann = TRUE, xlab = "Host species",
main = "Presence/absence")
plot(counts.clst1, labels = counts$Host_size, hang = -1, ann = TRUE, xlab = "Host age",
main = "Presence/absence")
# Bray-Curtis (densities)
plot(counts.clst2, labels = counts$Host_spp, hang = -1, ann = TRUE, xlab = "Host species",
main = "Cell density")
plot(counts.clst2, labels = counts$Host_size, hang = -1, ann = TRUE, xlab = "Host age",
main = "Cell density")
par(mfrow = c(1, 2))
# More complex and imaginative analyses are possible, as well as ordination if desired
# Analyses of diatom community strtuctures on the South African kelps
# Shannon and Simpson diversity index based on presence/absence data
head(counts.bin)
tail(counts.bin)
names(counts.bin)
ncol(counts.bin)
nrow(counts.bin)
# Shannon diversity index
shann <- diversity(counts.bin)
# Simpson diveristy index
simp <- diversity(counts.bin, "simpson")
par(mfrow = c(2,2))
hist(shann)
hist(simp)
# Pair-wise distance mesures between samples based on presence/absence data
bray = vegdist(counts.bin, "bray")
gower = vegdist(counts.bin, "gower")
hist(bray)
hist(gower)
par(mfrow = c(1,2))
# Shannon and Simpson diversity index based on abundace data
counts.spec.abund <- counts.spec[, 7:41]
head(counts.spec.abund)
tail(counts.abund)
names(counts.abund)
ncol(counts.abund)
nrow(counts.abund)
# Shannon diversity index
shann_abund <- diversity(counts.abund)
# Simpson diveristy index
simp_abund <- diversity(counts.abund, "simpson")
par(mfrow = c(2,2))
hist(shann_abund)
hist(simp_abund)
# Pair-wise distance mesures between samples based on presence/absence data
bray_abund = vegdist(counts.abund, "bray")
gower_abund = vegdist(counts.abund, "gower")
hist(bray_abund)
hist(gower_abund)
par(mfrow = c(1,2))
# Rarefaction (Rarefy and rarecurve functions) based on species abundance data
specnumber(counts.abund)
sp.abund_1 <- rowSums(counts.abund)
raremax_1 <- min(rowSums(counts.abund))
raremax_1
range(rowSums(counts.abund))
rowSums(counts.abund)
Srare_1 <- rarefy(counts.abund, raremax_1)
par(mfrow = c(1,2))
plot(sp.abund_1, Srare_1, xlab = "Observed No. of species", ylab = "Rarefied No. of species")
abline(0, 1)
rarecurve(counts.abund, step = 20, col = "Blue", cex = 0.6)
# Species accumulation curve
rowSums(counts.spec.abund)
par(mfrow = c(1,2))
diat_sp.acc = specaccum(counts.spec.abund, method = "rarefaction")
names(diat_sp.acc)
plot(diat_sp.acc, xvar = "individual", main = "individual based accumulator")
plot(diat_sp.acc, ci.type = "polygon",xlab = "Replicate", main = "confidence polygon", ci.col = "gray50")
diat_sp.acc1 = specaccum(counts.spec.bin, method = "rarefaction")
names(diat_sp.acc1)
par(mfrow = c(1,2))
plot(diat_sp.acc1, xvar = "individual", main = "individual based accumulator")
plot(diat_sp.acc1, ci.type = "polygon", main = "confidence polygon", xlab = "Replicate", ci.col = "gray50")
# Fit non-linear model to species accumulation curves
diat_sp.acc_random = specaccum(counts.spec.bin, method = "random")
diat_sp.acc_nlm = fitspecaccum(diat_sp.acc_random, model = "arrhenius")
names(diat_sp.acc_nlm)
par(mfrow = c(1,1))
plot(diat_sp.acc_nlm, xlab = "Replicate", col = "gray70")
boxplot(diat_sp.acc_random, add = TRUE, xlab = "Replicate", main = "Fit non-linear model to diatom taxa accumulation curves", pch = "+", col = "gray80")
# COmparison between species area curves for subsets of community data
# Species accumulation model on Ecklonia maxima
specaccum(counts.abund[counts$Host == "E_max_A",])
accumresult(counts.abund, y = counts, factor = "Host", level = "E_max_A")
accumresult(counts.abund, y = counts, factor = "Host", level = "E_max_J")
accumresult(counts.abund, y = counts, factor = "Host", level = "L_pal_A")
accumresult(counts.abund, y = counts, factor = "Host", level = "L_pal_J")
accumcomp(counts.spec.abund, y = counts.spec, factor = "Host", method = "exact", conditioned = TRUE)
accumcomp(counts.spec.abund, y = counts.spec, factor = "Host", xlim = c(0, 7), plotit = T)
?accumcomp
dim(counts.abund)
dim(counts)
# Species richness and 95% confidence intervals for kelp associated diatom assemblagesusing four incidence-based estimators
specpool(counts, pool = counts$Host)
(diat_pool_counts = poolaccum(counts.abund))
plot(diat_pool_counts)
# Plotting with ggplot2
library(grid)
library(gridExtra)
ggplot(data = diat_counts, aes(x = diat_counts$Host, y = diat_counts$Density, colour = diat_counts$Diatom_genus))+
geom_point()
# Comparing subsets of my data for estimated species richness
diat_counts$index = 1:length(diat_counts$Host)
diat_counts.index = as.list(unstack(diat_counts, form = index ~ Host))
diat_counts.index
pacc = function(x, data,...) {poolaccum(data[x,])}
diat_counts.sp = lapply(diat_counts.index, FUN = pacc, data = diat_counts)
diat_counts.sp
diat_counts.sp$E_max_A
par(mfrow = c(2,2))
plot(diat_counts.sp$E_max_A)
plot(diat_counts.sp$E_max_J)
plot(diat_counts.sp$L_pal_A)
plot(diat_counts.sp$L_pal_J)
par(mfrow = c(1,2))
# Abundance-based richness estimation
eacc = function(x, data,...) {estaccumR(data[x,])}
diat_counts.spe = lapply(diat_counts, FUN = eacc, data = diat_counts)
# Ordination
# Detrended correspondance analysis
library(vegan)
ord <- decorana(counts.spec[, 7:41])
ord
summary(ord)
# Non-metric multidimensional scaling (NMDS)
ord1 <- metaMDS(counts.spec[, 7:41])
ord1
plot(ord1, type = "n")
points(ord1, display = "sites", cex = 1.2, pch = 21, col = "gray50", bg = "gray70")
ggplot() +
geom_point(data = counts.spec.mds1, aes(x = NMDS1, y = NMDS2, fill = Host), pch = 21, size = 3, colour = NA)
# Analysis of similarity (ANOSIM)
# Host species
counts.ano <- with(counts, anosim(counts.dis1, Host_spp))
plot(counts.ano)
counts.ano
summary(counts.ano)
# Host size
counts.ano1 <- with(counts, anosim(counts.dis1, Host_size))
plot(counts.ano1)
counts.ano1
summary(counts.ano1)
# Host
counts.ano2 <- with(counts, anosim(counts.dis1, Host))
plot(counts.ano2)
counts.ano2
summary(counts.ano2)
# Similarity percentages (SIMPER)
# Host species
sim <- with(counts, simper(counts.abund, Host_spp))
summary(sim)
sim
# Host age
sim1 <- with(counts, simper(counts.abund, Host_size))
summary(sim1)
# Host
sim2 <- with(counts, simper(counts.abund, Host))
summary(sim2)
sim2
# Plotting species abundances
?ggplot
ggplot(data = Diatom_counts_tidy, aes(x = Host, y = Density, fill = Genus)) +
geom_bar(stat = "identity", position = position_dodge()) +
labs(x = "Host", y = "Density [cells/mm^2]")
ggplot(data = diat_counts, aes(x = Host, y = Density, fill = Diatom_genus)) +
geom_bar(stat = "identity", position = position_dodge()) +
labs(x = "Host", y = "Density (mm-2)")
ggplot(counts.spec, aes(Genera, Density, fill = Species)) +
geom_bar(stat = "identity") +
facet_grid(.~Host, drop = TRUE, scales = "free", space = "free_x") +
theme_bw() + ylab("Density") + xlab("Samples") +
scale_y_continuous(expand = c(0,0))+theme(strip.background = element_rect(fill="gray85"))+theme(panel.margin = unit(0.3, "lines")) +
theme(axis.text.x=element_text(angle=90,hjust=1,vjust=0.5))
ggplot(counts.av, aes(Genus, Density, fill = Genus)) +
geom_bar(stat = "identity") +
facet_grid(.~Host, drop = TRUE, scales = "free", space = "free_x") +
theme_bw() + ylab("Diatom density (cells/square millimeter)") + xlab("Diatom genus") +
scale_y_continuous(expand = c(0,0))+theme(strip.background = element_rect(fill="gray85"))+theme(panel.margin = unit(0.3, "lines")) +
theme(axis.text.x=element_text(angle=90,hjust=1,vjust=0.5))
ggplot_alternative <- function()
# Summary stats:
library(Rmisc)
library(ggplot2)
counts.av <- summarySE(counts.spec, measurevar = "Density", groupvars = c("Host", "Genus"), na.rm = TRUE)
counts.av
# Plotting mean diatom abundances with error bars
ggplot(counts.av, aes(Genus, Density, fill = Genus)) +
geom_bar(stat = "identity") +
geom_errorbar(aes(ymin = Density-se, ymax = Density+se), size = .3, width = .2, position = position_dodge(.9)) +
facet_grid(.~Host, drop = TRUE, scales = "free", space = "free_x") +
theme_bw() + ylab("Diatom density (cells/square millimeter)") + xlab("Diatom genus") +
scale_y_continuous(expand = c(0,0))+theme(strip.background = element_rect(fill="gray85"))+theme(panel.margin = unit(0.3, "lines")) +
scale_fill_hue(name = "Diatom genus") +
theme(axis.text.x=element_text(angle=90,hjust=1,vjust=0.5))
ggsave(
"ggtest.png",
ggplot_alternative(),
width = 18,
height = 15,
units = "cm",
dpi = 600
)
# Ordination: Basic method
# Non-metric multidimensional sclaing
library(vegan)
library(MASS)
install.packages()
counts.spec.bin <- decostand(counts.spec[, 7:41], method = "pa")
counts.spec.abund <- counts.spec[, 7:41]
# NMDS with aabundance data
counts.spec.mds1 <- metaMDS(counts.spec.abund, distance = "euclidean", k = 3, autotransform = TRUE)
names(counts.spec.mds1)
counts.spec.mds1
# NMDS with p/a data
counts.spec.mds2 <- metaMDS(counts.spec.bin, distance = "euclidean", k = 3, autotransform = TRUE)
names(counts.spec.mds2)
counts.spec.mds2
# NMDS plot : Sites/samples are shown by black circles, the taxa by red crosses
par(mfrow = c(1,2))
plot(counts.spec.mds1)
plot(counts.spec.mds2)
par(mfrow = c(1,2))
ordiplot(counts.spec.mds1, type = "t")
ordiplot(counts.spec.mds2, type = "t")
plot(counts.spec.mds1, type = "n")
points(counts.spec.mds1, display = "sites", cex = 1.2, pch = 21, col = "gray50", bg = "gray70")
plot(counts.spec.mds2, type = "n")
points(counts.spec.mds2, display = "sites", cex = 1.2, pch = 21, col = "gray50", bg = "gray70")
ggplot(counts.spec.mds1) +
geom_point(aes(x = NMDS1, y = NMDS2, col = Replicate, Shape = Host))
library(grid)
counts.keep <- as.numeric(unlist(strsplit(counts[, 8]), ","))
counts.fit <- envfit(counts.spec.mds1, counts[ , 2, drop = F], perm = 999, na.rm = TRUE)
counts.fit
df <- scores(counts.spec.mds1, display = c("sites"))
ggplot() +
geom_point(data = counts.spec.mds1, aes(NMDS1, NMDS2, colour = "Host"))
plot(df)
?split
# PCA
counts.spec.pca <- rda(counts.spec.abund)
counts.spec.pca
plot(counts.spec.pca)
sum(apply(counts.spec.pca, 2, var))
biplot(counts.spec.pca, scaling = -1)
citation()
|
f45e2b624387fe63c888aca90ec0ca89a4e990c0
|
dab05df8a6ddf8947638c2bc2c3b5946d13771e2
|
/man/Web2.Rd
|
0277a153077faae9e05789bb40897ecc478286fe
|
[
"MIT"
] |
permissive
|
tpemartin/econR
|
2011047b7ef100b27fffd99148a7698ce7f99930
|
5df4fd5bf61b417b9860b3efc7ff20339e694fe4
|
refs/heads/master
| 2023-09-05T03:34:20.354596
| 2021-11-23T12:22:42
| 2021-11-23T12:22:42
| 335,521,237
| 0
| 4
| null | 2021-03-17T07:18:16
| 2021-02-03T05:48:23
|
HTML
|
UTF-8
|
R
| false
| true
| 229
|
rd
|
Web2.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/webapp2.R
\name{Web2}
\alias{Web2}
\title{web instance generator}
\usage{
Web2()
}
\value{
}
\description{
web instance generator
}
\examples{
None
}
|
4848891d5bc8dbfd9a136885f79a1d9f59dd1b84
|
051fd5c23ce8ebbedc506ac9028d08b3d3a45e25
|
/scripts/OC_2011.R
|
338d17b6afbd9b43e08d3b540ce7dfd97a8a2a16
|
[] |
no_license
|
TheresaStoll/Practical_session_own_data
|
cc6d13b2964045bc716b3f24e968afdd87bd58d2
|
58730c5e4a99a25b55328efd0b193334226ec26c
|
refs/heads/master
| 2020-08-09T08:29:23.546214
| 2019-10-10T05:38:42
| 2019-10-10T05:38:42
| 214,048,531
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,432
|
r
|
OC_2011.R
|
library(tidyverse)
#to check working directory
getwd()
#to set the working directory
#se the Tools | Change Working Dir... menu (Session | Set Working Directory on a mac).
#This will also change directory location of the Files pane.
#https://support.rstudio.com/hc/en-us/articles/200711843-Working-Directories-and-Workspaces
#to tidy up data
#Import raw data
#import data predictions from Jambeck paper
Jambeck_data_tidyverse <- read_csv("data/FullDataWPredictions.csv")
glimpse(Jambeck_data_tidyverse)
#use Lauren's approach
#header = TRUE - makes info in first row the header/info for labels
#na.strings = c("NA","") and na = c("NA","") tells R to treat both NA and empty strings
#in columns of character data to missing. This is actually the default, but I include
#it because it is possible to change the code for missing data when you read a data
#file into R.
Jambeck_data <- read.csv("data/FullDataWPredictions.csv", header = TRUE, na.strings=c("", "NA"))
Jambeck_data
glimpse(Jambeck_data)
#import OC data for 2011
OC_2011_tidyverse <- read_csv("data/GPSPPM_2011.csv")
nrow(OC_2011_tidyverse)
#nwor = to check how many rows the data has
#number of rows: 6291
#use Lauren's approach
OC_2011 <- read.csv("data/GPSPPM_2011.csv", header = TRUE, na.strings = c("", "NA"))
OC_2011
nrow(OC_2011)
#number of rows: 6291
#Clean data:
#standardize names/units and find locations where only partial info included ####
|
310ebf6e6769f02eed4a0e1b7d19324a1ca41f9f
|
5b7a0942ce5cbeaed035098223207b446704fb66
|
/man/lsGetSurveyProperties.Rd
|
f47aa8c89b15ca650d7b09f396ac4f8dd6461f39
|
[
"MIT"
] |
permissive
|
k127/LimeRick
|
4f3bcc8c2204c5c67968d0822b558c29bb5392aa
|
a4d634981f5de5afa5b5e3bee72cf6acd284c92a
|
refs/heads/master
| 2023-04-11T21:56:54.854494
| 2020-06-19T18:36:05
| 2020-06-19T18:36:05
| 271,702,292
| 0
| 1
| null | 2020-06-12T03:45:14
| 2020-06-12T03:45:14
| null |
UTF-8
|
R
| false
| true
| 1,321
|
rd
|
lsGetSurveyProperties.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lsGetSurveyProperties.R
\name{lsGetSurveyProperties}
\alias{lsGetSurveyProperties}
\title{Get survey properties}
\usage{
lsGetSurveyProperties(
surveyID,
properties = NULL,
lsAPIurl = getOption("lsAPIurl"),
sessionKey = NULL
)
}
\arguments{
\item{surveyID}{ID of the survey}
\item{properties}{\emph{(optional)} A vector with the particular property names to request, otherwise get all settings}
\item{lsAPIurl}{\emph{(optional)} The URL of the \emph{LimeSurvey RemoteControl 2} JSON-RPC API}
\item{sessionKey}{\emph{(optional)} Authentication token, see \code{\link{lsGetSessionKey}()}}
}
\value{
A list of survey properties or a single property
}
\description{
Get properties of a survey. All internal properties of a survey are available.
}
\examples{
\dontrun{
lsGetSurveyProperties("123456")
lsGetSurveyProperties("123456", properties = list("anonymized"))
lsGetSurveyProperties("123456", properties = list("adminemail",
"anonymized"))
}
}
\references{
\itemize{
\item \url{https://api.limesurvey.org/classes/remotecontrol_handle.html#method_get_survey_properties}
\item \url{https://api.limesurvey.org/classes/Survey.html} (for a list of available properties)
}
}
|
16dfa7a95b262daf083dcef169d91a8ea2cc8563
|
961523bd8d12ac6e6f5fa5a6c85bda7261d6035f
|
/computational_statistics/hw4.R
|
2a7e8a8cb3d0ef4b14a8dbdd20fe9576d0513fbd
|
[] |
no_license
|
keepproceeding/Study_R
|
d7a58bd230a1c05ea0dd6dd00551395012f186a1
|
06397f5b2aee2041e7228f2baeb40574b09de782
|
refs/heads/master
| 2023-03-02T02:35:36.589620
| 2021-02-14T14:20:51
| 2021-02-14T14:20:51
| 334,445,440
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 919
|
r
|
hw4.R
|
#1
random_uni<-function(seed){
x<<-c()
for(i in 1:100){
seed <- (16807*seed)%%2147483647
x[i]<<-seed/2147483647
}
x
}
random_uni(2020)
ks.test(random_uni(2020),runif(100))
install.packages("snpar")
library('snpar')
runs.test(random_uni(2020))
#2
length(which(rbinom(1000,6,0.2)>=1))/1000
mean(rbinom(1000,6,0.2)>=1)
0.74-(1-(0.8)^6)
#3
Buffon = function(n, lofneedle, distance)
{
lofneedle = lofneedle / 2
distance = distance / 2
r1 = runif(n)
r2 = runif(n)
prob = mean(r1*distance < lofneedle*sin(r2*pi))
return(prob)
}
f<-function(x){
((2/(pi*20))*15*sin(x)/2)
}
integrate(f,0,pi)
result<-c()
result[5]<-Buffon(5000,15,20)
result[4]<-Buffon(1000,15,20)
result[3]<-Buffon(100,15,20)
result[2]<-Buffon(50,15,20)
result[1]<-Buffon(10,15,20)
abs(result-integrate(f,0,pi)$value)
barplot(abs(result-integrate(f,0,pi)$value),main="Buffon",names=c("10","50","100","1000","5000"),xlab="n")
|
ce08f8d8c406686c369edc12d9fd7967aa0d6fdd
|
753e3ba2b9c0cf41ed6fc6fb1c6d583af7b017ed
|
/service/paws.ses/man/create_configuration_set.Rd
|
96ccb834310eafe88946fcfc3ec20132941a6096
|
[
"Apache-2.0"
] |
permissive
|
CR-Mercado/paws
|
9b3902370f752fe84d818c1cda9f4344d9e06a48
|
cabc7c3ab02a7a75fe1ac91f6fa256ce13d14983
|
refs/heads/master
| 2020-04-24T06:52:44.839393
| 2019-02-17T18:18:20
| 2019-02-17T18:18:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 856
|
rd
|
create_configuration_set.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.ses_operations.R
\name{create_configuration_set}
\alias{create_configuration_set}
\title{Creates a configuration set}
\usage{
create_configuration_set(ConfigurationSet)
}
\arguments{
\item{ConfigurationSet}{[required] A data structure that contains the name of the configuration set.}
}
\description{
Creates a configuration set.
}
\details{
Configuration sets enable you to publish email sending events. For information about using configuration sets, see the \href{http://docs.aws.amazon.com/ses/latest/DeveloperGuide/monitor-sending-activity.html}{Amazon SES Developer Guide}.
You can execute this operation no more than once per second.
}
\section{Accepted Parameters}{
\preformatted{create_configuration_set(
ConfigurationSet = list(
Name = "string"
)
)
}
}
|
07815479c6b0d82177d1945f755ea5007d81c63b
|
e6fe0284ac73cb98b1ffd4cd4831bd65d43bd546
|
/PredictiveTextAnalyticsToCourtRoom.R
|
8e53126e02031237d8fc9919ddc45cafedd6f4af
|
[] |
no_license
|
zahidmak/dataAnalysis
|
08df3707647e6aa08721d0827f288e980028f90d
|
88814f11ba41f07275be26d1330c0b86219a5bc3
|
refs/heads/master
| 2021-04-30T22:37:07.599997
| 2016-10-17T02:25:25
| 2016-10-17T02:25:25
| 71,092,579
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,192
|
r
|
PredictiveTextAnalyticsToCourtRoom.R
|
rm(list=ls())
emails=read.csv("C:/Users/Zahid/Downloads/energy_bids.csv", stringsAsFactors=FALSE)
str(emails)
emails$email[1]
emails$responsive[1]
emails$email[2]
emails$responsive[2]
table(emails$responsive)
library(tm)
corpus=Corpus(VectorSource(emails$email))
corpus[[1]]
corpus=tm_map(corpus, tolower)
corpus=tm_map(corpus, removePunctuation)
corpus=tm_map(corpus, removeWords, stopwords("english"))
corpus=tm_map(corpus, stemDocument)
dtm=DocumentTermMatrix(corpus)
dtm
dtm=removeSparseTerms(dtm,0.97)
labeledTerms=as.data.frame(as.matrix(dtm))
labeledTerms$responsive=emails$responsive
str(labeledTerms)
library(caTools)
set.seed(144)
split=sample.split(labeledTerms$responsive, 0.7)
train = subset(labeledTerms, split==TRUE)
test = subset(labeledTerms, split==FALSE)
library(rpart)
library(rpart.plot)
emailCART= rpart(responsive~., data=train, method="class")
prp(emailCART)
pred=predict(emailCART, newdata=test)
pred[1:10,]
pred.prob= pred[,2]
table(test$responsive, pred.prob>=0.5)
table(test$responsive)
library(ROCR)
predROCR= prediction(pred.prob, test$responsive)
ROCRperf= performance(predROCR, "tpr", "fpr")
plot(ROCRperf, colorize=TRUE)
performance(predROCR, "auc")@y.values
|
66b4be1d3df20bbbda171144273e0c92cdff82a5
|
4664b04b2bfc82ed82e300822f61229932cbfd32
|
/tools/phylotime_tools.R
|
fb68cf62e4e1a67f2f14b7ca790120b40816fb49
|
[] |
no_license
|
DomBennett/Project-EPI
|
d71bac553c5c988b5f7798f062faa329dafd62ac
|
f464dc1c53643ee101063e4d9c4cb7b06f0a4b83
|
refs/heads/master
| 2021-05-01T10:40:55.384250
| 2018-04-16T09:57:33
| 2018-04-16T09:57:33
| 14,775,462
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,331
|
r
|
phylotime_tools.R
|
assgnWMean <- function(val, nm) {
# assign a value to node_obj multiple times
# if value already present, work out mean
if(is.null(node_obj[[txid]][[nm]])) {
node_obj[[txid]][[nm]] <- val
} else {
# otherwise get mean
node_obj[[txid]][[nm]] <-
(node_obj[[txid]][[nm]] + val)/2
}
}
getTree <- function(i, tree_file) {
# looks up ith tree in tree_file
# trees are saved as RData to save processing
# if tree is not updated, tree is updated
load(file=file.path(tree_dir, tree_file))
if('tree' %in% ls()) {
return(tree)
}
tree <- trees[[i]]
rm(trees)
tree
}
getNtrees <- function(tree_file) {
load(file=file.path(tree_dir, tree_file))
if('tree' %in% ls()) {
return(1)
}
trees['ntrees']
}
calcFrPrp2 <- function(tree, tids, progress="none") {
# treeman function without bigmemory
.calc <- function(i) {
id <- tree@all[i]
spn <- getNdSlt(tree, "spn", id)
kids <- getNdKids(tree, id)
if(length(kids) == 0) {
spn_shres[i, id] <<- spn
} else {
spn_shre <- spn/length(kids)
spn_shres[i, kids] <<- spn_shre
}
}
spn_shres <- matrix(0, ncol=tree@ntips, nrow=tree@nall)
colnames(spn_shres) <- tree@tips
plyr::m_ply(.data=data.frame(i=1:tree@nall), .fun = .calc,
.progress=progress)
colSums(spn_shres[, tids])
}
|
a1fd5a5a354bdea295d122cd5234411b02b8ddb4
|
ffc352b6c70d8a9ee7712cede9afeba5a028fc06
|
/scripty.R
|
02e8cbe13439126b9e98e000d2690db340589fb1
|
[] |
no_license
|
ssarapark/polity-genderdiscrimination
|
11bbed90166c47287a743eca16fd817e6d36bffb
|
f1fc1b385d0eca81396e43275a1c9eb61eec680e
|
refs/heads/main
| 2023-02-06T14:03:50.636798
| 2020-12-13T02:28:49
| 2020-12-13T02:28:49
| 320,966,006
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,315
|
r
|
scripty.R
|
oecd <- read_csv("finalproject/oecd_data.csv",
col_types = cols(LOCATION = col_character(),
INDICATOR = col_character(),
SUBJECT = col_character(),
MEASURE = col_character(),
FREQUENCY = col_character(),
TIME = col_double(),
Value = col_double(),
'Flag Codes' = col_logical())) %>%
select(LOCATION, SUBJECT, TIME, Value) %>%
arrange()
oecd_country_continent <- read_csv("finalproject/oecd_country_continent.csv",
col_types = cols(X1 = col_double(),
LOCATION = col_character(),
SUBJECT = col_character(),
TIME = col_double(),
Value = col_double(),
Continent = col_character()))
V_Dem <- read_csv("finalproject/V-Dem-CY-Full+Others-v10.csv")
model <- lm(v2x_gencl ~ v2x_polyarchy,
data = V_Dem)
summary(model)
|
925f88e5ee9b24804412d8760fcee50f751e2bbf
|
fe254ef6be0bd316d41b6796ef28f1c9e1d5551e
|
/R/aDist.R
|
e691c5571da84b8a779465c279da76aa2592a5d0
|
[] |
no_license
|
matthias-da/robCompositions
|
89b26d1242b5370d78ceb5b99f3792f0b406289f
|
a8da6576a50b5bac4446310d7b0e7c109307ddd8
|
refs/heads/master
| 2023-09-02T15:49:40.315508
| 2023-08-23T12:54:36
| 2023-08-23T12:54:36
| 14,552,562
| 8
| 6
| null | 2019-12-12T15:20:57
| 2013-11-20T09:44:25
|
C++
|
UTF-8
|
R
| false
| false
| 4,076
|
r
|
aDist.R
|
#' Aitchison distance
#'
#' Computes the Aitchison distance between two observations, between two data
#' sets or within observations of one data set.
#'
#' This distance measure accounts for the relative scale property of
#' compositional data. It measures the distance between two compositions if
#' \code{x} and \code{y} are vectors. It evaluates the sum of the distances between
#' \code{x} and \code{y} for each row of \code{x} and \code{y} if \code{x} and
#' \code{y} are matrices or data frames. It computes a n times n distance matrix (with n
#' the number of observations/compositions) if only \code{x} is provided.
#'
#'
#' The underlying code is partly written in C and allows a fast computation also for
#' large data sets whenever \code{y} is supplied.
#'
#' @aliases aDist iprod
#' @param x a vector, matrix or data.frame
#' @param y a vector, matrix or data.frame with equal dimension as \code{x} or NULL.
#' @return The Aitchison distance between two compositions or between two data
#' sets, or a distance matrix in case code{y} is not supplied.
#' @author Matthias Templ, Bernhard Meindl
#' @export
#' @seealso \code{\link{pivotCoord}}
#' @references Aitchison, J. (1986) \emph{The Statistical Analysis of
#' Compositional Data} Monographs on Statistics and Applied Probability.
#' Chapman and Hall Ltd., London (UK). 416p.
#'
#' Aitchison, J. and Barcelo-Vidal, C. and Martin-Fernandez, J.A. and
#' Pawlowsky-Glahn, V. (2000) Logratio analysis and compositional distance.
#' \emph{Mathematical Geology}, \bold{32}, 271-275.
#'
#' Hron, K. and Templ, M. and Filzmoser, P. (2010) Imputation of missing values
#' for compositional data using classical and robust methods
#' \emph{Computational Statistics and Data Analysis}, vol 54 (12), pages
#' 3095-3107.
#' @keywords math arith
#' @examples
#'
#' data(expenditures)
#' x <- xOrig <- expenditures
#' ## Aitchison distance between two 2 observations:
#' aDist(x[1, ], x[2, ])
#'
#' ## Aitchison distance of x:
#' aDist(x)
#'
#' ## Example of distances between matrices:
#' ## set some missing values:
#' x[1,3] <- x[3,5] <- x[2,4] <- x[5,3] <- x[8,3] <- NA
#'
#' ## impute the missing values:
#' xImp <- impCoda(x, method="ltsReg")$xImp
#'
#' ## calculate the relative Aitchsion distance between xOrig and xImp:
#' aDist(xOrig, xImp)
#'
`aDist` <-
function(x, y = NULL){
if(!is.null(y)){
if(is.vector(x)) x <- matrix(x, ncol=length(x))
if(is.vector(y)) y <- matrix(y, ncol=length(y))
n <- dim(x)[1]
p <- D <- dim(x)[2]
rn <- rownames(x)
matOrig <- as.numeric(t(x))
matImp <- as.numeric(t(y))
dims <- as.integer(c(n, p))
rowDists <- as.numeric(rep(0.0, n))
distance <- as.numeric(0.0)
out <- .C("da",
matOrig,
matImp,
dims,
rowDists,
distance,
PACKAGE="robCompositions", NUOK=TRUE
)[[5]]
# } else if(is.null(y) & method == "R"){
# out <- matrix(, ncol = n, nrow = n)
# gms <- apply(x, 1, function(x) gm(as.numeric(x)))
# for(i in 1:(n-1)){
# for(j in (i+1):n){
# out[i, j] <- out[j, i] <-
# sqrt(sum((log(as.numeric(x[i, ]) / gms[i]) -
# log(as.numeric(x[j, ]) / gms[j]))^2))
# }
# }
# diag(out) <- 0
# rownames(out) <- colnames(out) <- rn
} else {
if(is.vector(x)) x <- matrix(x, ncol=length(x))
n <- dim(x)[1]
p <- D <- dim(x)[2]
rn <- rownames(x)
out <- dist(cenLR(x)$x.clr)
}
return(out)
}
#' @rdname aDist
#' @export
#' @examples
#' data("expenditures")
#' aDist(expenditures)
#' x <- expenditures[, 1]
#' y <- expenditures[, 2]
#' aDist(x, y)
#' aDist(expenditures, expenditures)
iprod <- function(x, y){
warning("wrong formula, has to be fixed.")
D <- length(x)
if(D != length(y)) stop("x and y should have the same length")
ip <- 1 / D * sum(log(as.numeric(x[1:(D-1)]) / as.numeric(x[2:D])) *
log(as.numeric(y[1:(D-1)]) / as.numeric(y[2:D])))
return(ip)
}
|
ce546cc37c5941301f8bbe7dd40db27f2ff83235
|
829ddd5de43968ccd4da02cdce17bf5a5a343a57
|
/src/a/phytometer/plantcounts.R
|
34ff20eff5311e05d0eede6b1630ffeaeda80b37
|
[] |
no_license
|
martinzbinden/droughtlegacy_old
|
cd99f453f804474423f109a7d666f106d8ea6090
|
97154c99f83adde0f94948867d51ce8e3c3e2b23
|
refs/heads/master
| 2021-01-19T22:33:34.521179
| 2014-11-20T22:07:36
| 2014-11-20T22:07:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 771
|
r
|
plantcounts.R
|
##example dataset
## 11 means more than 10 -> means may not be accurate!
df <- data.frame(
place=c(rep("ZOL",4), rep("ZOL",4), rep("THU",6)),
plot=c(rep(1,4), rep(2,2), rep(1,6) ),
subplot=c(rep(2,4), rep(3,2), rep(2,6)),
species=c(rep("linum",6), rep("linum",4), rep("silene",2)),
rep=c(1:4,1:2, 1:4, 1:2),
count=c(4,7,9,2,0,1,7,10,11,11,9,7)
)
# instead read csv-file
df <- read.csv("data/a/phytometer/plancounts.R",sep = ";") # maybe more options needed, e.g. header etc.
str(df)
df
aggregate(count~place+plot+subplot+species,df,sum)
aggregate(count~place+plot+subplot+species,df,mean)
aggregate(count~place+plot+subplot+species,df,summary)
## to do: statistics (plots with at least y plants etc.)
mean(df$)
table(count~plot,df)
n
rep("c",3)
|
922617cbbeb98bc20463a84ad9b9e0910a8212ac
|
8ce288d090c16bfc69402e69ee2c6a3cd540fb12
|
/inst/unitTests/test_dim.R
|
501e9d2b2a779c14c97353e2fe4169f30180521e
|
[] |
no_license
|
MattNapsAlot/bigDataFrame
|
df57bb9dc71ea6f3b2e21bb4c50f6a04d17271b4
|
68ef32d618559aa5a615601b85c2101b0f5c5137
|
refs/heads/master
| 2020-05-16T22:43:34.991909
| 2012-01-03T04:17:29
| 2012-01-03T04:17:29
| 3,043,211
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 560
|
r
|
test_dim.R
|
.setUp <-
function()
{
}
.tearDown <-
function()
{
}
unitTestSetDims <-
function()
{
df <- new(Class="BigDataFrame", hdfFile=tempfile(fileext=".h5"))
checkTrue(all(dim(df) == 0))
checkEquals(length(dim(df)), 2)
dim(df) <- c(0,0,0)
checkTrue(all(dim(df) == c(0,0)))
dim(df) <- c(1,1)
checkTrue(all(dim(df) == c(0,0)))
df <- new(Class="BigDataFrame", hdfFile=tempfile(fileext=".h5"))
checkTrue(all(dim(df) == 0))
dim(df) <- c(10,20)
checkTrue(all(dim(df) == c(10,20)))
dim(df) <- c(1,1)
checkTrue(all(dim(df) == c(10,20)))
}
|
c08c3c63f8fc25267ec3700febf6d589f39d6365
|
b6f822b70438a41ff973fb0bacad20321805606a
|
/figures/Encelia_maps.R
|
d7b5a012ec157e43ddbc5a0fde9470d141907a0f
|
[] |
no_license
|
singhal/encelia_phylogeny
|
213a89e178ce754bf2264a5097fa67aec96c5b33
|
962ffe685835c6dc95399f763a6681ec502a1f85
|
refs/heads/main
| 2023-02-11T03:17:03.128569
| 2021-01-14T00:06:41
| 2021-01-14T00:06:41
| 329,459,390
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,199
|
r
|
Encelia_maps.R
|
library(ggplot2)
library(cowplot)
library(rnaturalearth)
library("rnaturalearthdata")
theme_set(theme_cowplot())
library(readr)
library(dplyr)
library(tidyr)
library(RColorBrewer)
library(ggtree)
world <- ne_countries(scale = "small", returnclass = "sf")
# sampled points
spoints = read_csv("~/Dropbox/Encelia/analysis/spatial_analyses/georef_samples.csv")
x = read_csv("~/Dropbox/Encelia/ddRAD/analysis/encelia_samples_v4.csv")
spoints = inner_join(spoints, x, by = c("sample" = "sample"))
# pts
pts = read_csv("~/Dropbox/Encelia/analysis/spatial_analyses/encelia/all_points_thinned.csv")
pts2 = pts[grep("Enc", pts$species), ]
pts3 = pts2[which(pts2$species == "Encelia canescens"), ]
pts4 = pts2[which(pts2$species != "Encelia canescens"), ]
########################
# tree plot
########################
t = ggtree::read.tree("~/Dropbox/Encelia/analysis/phylogeny/concatenated/RAxML_bipartitions.boot")
outs = c("XYL", "ENC-1", "ENC-2")
t1 = root(t, outs, resolve.root = T)
t2 = drop.tip(t1, outs)
t3 = read.tree(text = write.tree(ladderize(t2)))
lins = unique(dplyr::pull(x[match(t3$tip.label, x$sample), "lineage"]))
lins2 = data.frame(lineage = x[match(t3$tip.label, x$sample), "lineage"],
tips = t3$tip.label, stringsAsFactors = F)
getPalette = colorRampPalette(brewer.pal(12, "Set3"))
cols = getPalette(length(lins))
names(cols) = sort(lins)
linnames = gsub("Encelia_", "E. ", lins)
linnames = gsub("_", " ", linnames)
tt = ggtree(t3)
for (i in 1:length(lins)) {
tips = lins2[lins2$lineage == lins[i], "tips"]
node = findMRCA(t3, tips, type = 'node')
tt = tt + geom_hilight(node= node, fill = cols[lins[i]], alpha=0.5) +
geom_cladelabel(node, linnames[i], fontface = "italic", offset=0,
barsize = NA, angle=0, offset.text=0.0005,
align = T, size = 1)
}
tt = tt + xlim(0, 0.05) +
geom_point2(aes(subset = !is.na(as.numeric(label)) & as.numeric(label) >= 95),
size = 0.2) +
geom_point2(aes(subset = !is.na(as.numeric(label)) & as.numeric(label) < 95),
size = 0.7, fill = "white", shape = 21)
spmaps = lins
spmaps = spmaps[!spmaps %in% c("Encelia_farinosa_phenicodonta",
"Encelia_californica2",
"Encelia_virginensis2",
"Encelia_frutescens_glandulosa")]
spmaps = gsub("\\d+", "", spmaps)
spmaps = gsub("_", " ", spmaps)
spmaps[which(spmaps == "Encelia actoni")] = "Encelia actoni"
spmaps[which(spmaps == "Encelia frutescens frutescens")] = "Encelia frutescens"
spmaps[which(spmaps == "Encelia farinosa farinosa")] = "Encelia farinosa"
spplots = vector("list", length(spmaps))
tips2n = gsub("Encelia ", "E. ", spmaps)
spoints[spoints$lineage.x == "Encelia actonii", "lineage.x"] = "Encelia actoni"
pts2[pts2$species == "Encelia actonii", "species"] = "Encelia actoni"
ptsalpha = 1 - log(table(pts2$species)) / 8
names(cols) = gsub("_", " ", names(cols))
spoints$lineage.y = gsub("_", " ", spoints$lineage.y)
for (i in 1:length(spmaps)) {
if (spmaps[i] == "Encelia canescens") {
xlim = c(-80, -50)
ylim = c(-40, 10)
} else {
xlim = c(-120.51, -108.7)
ylim = c(24.33, 37.9)
}
sub = spoints %>% filter(lineage.x == spmaps[i])
spplots[[i]] = ggplot(data = world) +
geom_sf(color = "gray80", fill = "gray80") +
xlim(xlim) +
ylim(ylim) +
geom_point(data = pts2 %>% filter(species == spmaps[i]),
aes(Longitude, Latitude), size = 0.5,
alpha = ptsalpha[spmaps[i]]) +
geom_point(data = sub,
aes(LONGITUDE, LATITUDE, fill = lineage.y),
size = 1.8, shape = 21) + ggtitle(tips2n[i]) +
scale_fill_manual(values = cols[unique(sub$lineage.y)]) +
theme_void() +
theme(plot.title = element_text(size=10, face="italic", hjust = 0.5),
legend.position = "none")
}
# special ones
# farinosa, californica, virginensis, frutescens
layout <- "
ADBC
AEF#
AGHI
AJK#
ALM#
"
png("~/Dropbox/Encelia/manuscripts/Encelia_Phylogeny/figures/Encelia_maps_gray.png", width = 8, height = 6, units = "in", res = 200)
tt + spplots + plot_layout(design = layout, widths = c(4, 1, 1, 1))
dev.off()
|
462bb09bd6998a1ab68159e5e97db809a7b78360
|
13fcf4ad90ebdaf4cb04cfd0c3453e57cd32d851
|
/man/getDoTerm.Rd
|
780d5558044f496360e195417c358397e24c5f38
|
[] |
no_license
|
cran/DOSim
|
eaf99f78e9dee69d12811ff5cd8726d957fa3aaf
|
a8d198f75aa4e90910612c42c4092f3bb1763819
|
refs/heads/master
| 2021-01-19T07:54:29.405777
| 2012-02-12T00:00:00
| 2012-02-12T00:00:00
| 17,717,409
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 845
|
rd
|
getDoTerm.Rd
|
\name{getDoTerm}
\alias{getDoTerm}
\title{
Get DO term's name
}
\description{
Returns the list of DO term's name associated to each DO ID.
}
\usage{
getDoTerm(dolist)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{dolist}{
character vector of DO IDs
}
}
\value{
List with entry names for each DO ID. Each entry contains a character represents DOID's term name.
}
\author{
Jiang Li<\email{riverlee2008@gmail.com}>
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
\code{\link{getDoAnno}}
}
\examples{
################################
#Example
terms<-c("DOID:934","DOID:1579")
res<-getDoTerm(terms)
print(res)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
0cd58dde90ad759ed96357dc08046e61cd88ba8d
|
17207b55047c6a7141fae2c6c0a3326a114206be
|
/man/Canonicalization-class.Rd
|
55f4cbae7c8716724084a75351b9b54efec54325
|
[
"Apache-2.0"
] |
permissive
|
bedantaguru/CVXR
|
eab395c4262b8404c11d8ba6196d0368a0b1887f
|
b1b9b0cb98ab909bc3781e96d3720dde37706dbd
|
refs/heads/master
| 2020-11-30T00:04:26.138136
| 2019-12-11T21:31:28
| 2019-12-11T21:31:28
| 230,246,740
| 1
| 0
| null | 2019-12-26T10:43:28
| 2019-12-26T10:43:27
| null |
UTF-8
|
R
| false
| true
| 1,620
|
rd
|
Canonicalization-class.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/reductions.R
\docType{class}
\name{Canonicalization-class}
\alias{Canonicalization-class}
\alias{.Canonicalization}
\alias{perform,Canonicalization,Problem-method}
\alias{invert,Canonicalization,Solution,InverseData-method}
\alias{canonicalize_tree,Canonicalization-method}
\alias{canonicalize_expr,Canonicalization-method}
\title{The Canonicalization class.}
\usage{
\S4method{perform}{Canonicalization,Problem}(object, problem)
\S4method{invert}{Canonicalization,Solution,InverseData}(object, solution, inverse_data)
\S4method{canonicalize_tree}{Canonicalization}(object, expr)
\S4method{canonicalize_expr}{Canonicalization}(object, expr, args)
}
\arguments{
\item{object}{A \linkS4class{Canonicalization} object.}
\item{problem}{A \linkS4class{Problem} object.}
\item{solution}{A \linkS4class{Solution} to a problem that generated the inverse data.}
\item{inverse_data}{An \linkS4class{InverseData} object that contains the data encoding the original problem.}
\item{expr}{An \linkS4class{Expression} object.}
\item{args}{List of arguments to canonicalize the expression.}
}
\description{
This class represents a canonicalization reduction.
}
\section{Methods (by generic)}{
\itemize{
\item \code{perform}: Recursively canonicalize the objective and every constraint.
\item \code{invert}: Performs the reduction on a problem and returns an equivalent problem.
\item \code{canonicalize_tree}: Recursively canonicalize an Expression.
\item \code{canonicalize_expr}: Canonicalize an expression, w.r.t. canonicalized arguments.
}}
|
7d2e0696382f169ca77ece3d7edf92f5bc0f3858
|
ec2c6eb45f2155d66f4daa62cb9cf60211b769d7
|
/factors.R
|
d3bff1a4aef2825c2b3e9550f74fd1a46d67d32c
|
[] |
no_license
|
mindcrime/LearningR
|
dedf431d39d4622e6d601b395bbae9147cab1e41
|
04de699585927edc73797bf78a4b5bf18ce45a7e
|
refs/heads/master
| 2020-05-09T18:10:34.277064
| 2015-12-25T12:14:59
| 2015-12-25T12:14:59
| 40,801,362
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,263
|
r
|
factors.R
|
# Working with factors
# Filename: factors.R
# Author: prhodes
###############################################################################
# factors are for storing "categorical" data (ex, "male" and "female" or "small", "medium", "large", etc.)
# if you create a data frame with a column of text data, R will assume by
# default that the text is categorical and convert to factors
heights <- data.frame(
height_cm = c( 153, 181, 150, 172, 165, 149, 174, 169, 198, 163),
gender = c( "female", "male", "female", "male", "male",
"female", "female", "male", "male", "female" )
)
heights
# confirm that gender is a factor now
class( heights$gender )
heights$gender
# "female" and "male" have been defined as the "levels" of our factor
# so now we can't do this
heights$gender[1] <- "Female" # note the uppercase F
# this would give us "invalid factor level, NA generated"
# another way to see the levels
levels( heights$gender )
# if we just want a count of the levels
nlevels( heights$gender )
# You can also create factors explicitly like so:
gender_char = c( "female", "male", "female", "male", "male",
"female", "female", "male", "male", "female" )
gender_fac <- factor( gender_char )
levels( gender_fac )
str( gender_fac )
|
303de41ae2d97bd805f7244e3668ecbc4e5f788f
|
ed69b9b15821c94608e172cd7113e9a73d51c1c3
|
/FLVoters/FLVoters.R
|
70f0944497595814648e6591ab29ba4faacb5780
|
[] |
no_license
|
anhnguyendepocen/Econ_5043
|
410e85a5fae43d2a653052204273fb5e3808a43b
|
592451ca6c2fef2cd22dc905c8acaad51d896860
|
refs/heads/master
| 2021-09-14T09:01:58.774073
| 2018-05-10T21:45:12
| 2018-05-10T21:45:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,160
|
r
|
FLVoters.R
|
'''
Note that there were some parts that were buggy
'''
#Load Data
#census data
cnames<-read.csv("cnames.csv")
head(cnames)
#Florida census data
FLCensus<-read.csv("FLCensusDem.csv")
head(FLCensus)
FLVoters<-read.csv("FLVoters.csv")
head(FLVoters)
FLCensusVTD<-read.csv("FLCensusVTD.csv")
head(FLCensusVTD)
#1) split the data by races
white <- subset(FLVoters, subset = (race == "white"))
#2) match the florida data and the census data using surname
w.index <- match(white$surname, cnames$surname)
head(w.index)
#3) For the sample of whites, the maximum of conditional probabilities should be the conditional
# probability of being white given the surname max{pctwhite, pctblack, pctapi, pcthispanic}
vars<-c("pctwhite", "pctwhite", "pctapi", "pctaian", "pct2prace", "pcthispanic")
#4) the success rates are defined as the instances when these two are indeed the same
comparison <- apply(cnames[w.index,vars], 1, max) == cnames$pctwhite[w.index]
head(comparison)
#Calculations on data
#matrix_voters<-matrix(c(1:5, 11:15), nrow = 5, ncol = 2)
#mean of the rows
#apply(matrix_voters, 1, mean)
#mean of the columns
#apply(matrix_voters, 2, mean)
#use function on data
#divide all values by 2
#apply(matrix_voters, 1:2, function(x) x/2)
#5) repeat the process for 3 and 4 for other races
#Black
black <- subset(FLVoters, subset = (race == "black"))
w.index.b <- match(black$surname, cnames$surname)
comparison.b <- apply(cnames[w.index,vars], 1, max) == cnames$pctblack[w.index]
head(comparison.b)
#Asian
asian <- subset(FLVoters, subset = (race == "asian"))
a.index <- match(asian$surname, cnames$surname)
comparison.a <- apply(cnames[a.index,vars], 1, max) == cnames$pctaian[a.index]
head(comparison.a)
#Hispanic
hispanic <- subset(FLVoters, subset = (race == "hispanic"))
h.index <- match(hispanic$surname, cnames$surname)
comparison.h <- apply(cnames[h.index,vars], 1, max) == cnames$pcthispanic[h.index]
head(comparison.h)
#Backing out information from compiled information
#P[race|surname,residence] = P[surname|race, residence] * P[race|residence] / P[surname|residence]
#relaxing the model we can solve for the alternative function
#we are missing P[surname|race, residence] but can approximate it using P[surname|race]
#this means, that we assume that residence has no impact on surname
#Question 1
#P[race | surname] and P[surname] from Census data
race.prop <- apply(FLCensus[,c("white", "black", "api", "hispanic", "others")],
2,
weighted.mean,
weights = FLCensus$total.pop)
##############################
# Race Prop not populating
##############################
race.prop
total.count<-sum(cnames$count)
cnames$names.white <- (cnames$pctwhite/100) * (cnames$count/total.count)/race.prop["white"]
cnames$names.black <- (cnames$pctblack/100) * (cnames$count/total.count)/race.prop["black"]
cnames$names.hispanic <- (cnames$pcthispanic/100) * (cnames$count/total.count)/race.prop["hispanic"]
cnames$names.api <- (cnames$pctapi/100) * (cnames$count/total.count)/race.prop["api"]
cnames$names.other <- (cnames$pctothers/100) * (cnames$count/total.count)/race.prop["others"]
#Merge data together
#P[race|surname,residence] = P[surname|race, residence] * P[race|residence] / P[surname|residence]
FLVoters2<-merge(x=FLVoters, y=FLCensus, by=c("county", "VTD"), all = FALSE)
head(FLVoters2)
index2<- match(FLVoters2$surname, cnames$surname)
FLVoters2$name.residence <- cnames$name.white[index2]*FLVoters2$white +
cnames$name.black[index2]*FLVoters2$black +
cnames$name.hispanic[index2]*FLVoters2$hispanic +
cnames$name.api[index2]*FLVoters2$api +
cnames$name.others[index2]*FLVoters2$others
FLVoters2$predict
vars2<- c("predict.white", "predict.black", "predict.hispanic", "predict.api", "predict.others")
whites2<-subset(FLVoters2, subset = (race == "white"))
mean(apply(white2[, vars2], 1, max) == whites2$predict.white)
black<-subset(FLVoters2, subset = (race == "black"))
mean(apply(blacks[, vars2], 1, max) == blacks2$predict.black)
|
1635f8ccfe3cce6da0025e405e333f19a9614eb2
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/suncalc/examples/getMoonTimes.Rd.R
|
7f421f9ee6b9fcf16ddd28af03a4f5aa9d70d9f8
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 599
|
r
|
getMoonTimes.Rd.R
|
library(suncalc)
### Name: getMoonTimes
### Title: Get Moon times
### Aliases: getMoonTimes
### ** Examples
# one date
getMoonTimes(date = Sys.Date(), lat = 47.21, lon = -1.557, tz = "CET")
# multiple date + subset
getMoonTimes(date = seq.Date(Sys.Date()-9, Sys.Date(), by = 1),
keep = c("rise", "set", "alwaysUp"),
lat = 47.21, lon = -1.557, tz = "CET")
# multiple coordinates
data <- data.frame(date = seq.Date(Sys.Date()-9, Sys.Date(), by = 1),
lat = c(rep(50.1, 10), rep(49, 10)),
lon = c(rep(1.83, 10), rep(2, 10)))
getMoonTimes(data = data, tz = "CET")
|
64834ad393bb9a3e5a252138e776f5b179b1e7ee
|
567ae9b443f9e3599b8f19d8616ad75a924eecef
|
/HW1.R
|
9a425d823afb7db7cf4bf823daa46f978682daa0
|
[] |
no_license
|
sdmurff/STATS-240P
|
40c811eeebd60f728b37c77ddced2dc33f5478c5
|
f547281b4843aec8192b2890588af8245cc7d770
|
refs/heads/master
| 2022-08-10T23:14:09.419415
| 2022-07-29T03:49:13
| 2022-07-29T03:49:13
| 43,308,225
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,755
|
r
|
HW1.R
|
## @knitr HW1point6
#### Exercise 1.6 ####
X <- read.table('http://web.stanford.edu/~xing/statfinbook/_BookData/Chap06/w_logret_3stocks.txt', header=T)
# Convert Date column from a factor to an R date
X[,1]<-as.Date(X[,1],"%m/%d/%Y")
## @knitr a1_6_1
#### 1.6 (a) ####
# Plot Pfizer returns and add lines to highlight behavior before and after March 8, 1999
plot(x=X$Date,y=X$PFE)
abline(v=as.Date('1999-03-08'))
abline(h=0)
## @knitr a1_6_2
# Box plot makes difference more apparent
boxplot(X$PFE[1:897],X$PFE[897:length(X$PFE)],names = c("Before March 8, 1999","After March 8, 1999"))
abline(h=0)
## @knitr b1_6_1
#### 1.6 (b) ####
# Create independent variables
X$less.than.t0<-ifelse(X$Date<as.Date('1999-03-08'),1,0)
X$more.than.t0<-ifelse(X$Date>=as.Date('1999-03-08'),1,0)
# Run regression and view output
fit.full<-lm(PFE ~ less.than.t0 + more.than.t0 - 1,data=X)
summary(fit.full)
## @knitr b1_6_2
confint(fit.full,level=0.95)
## @knitr c1_6_1
#### 1.6 (c) ####
# Fit the reduced model
X$x1.plus.x2<-X$less.than.t0+X$more.than.t0
fit.reduced<-lm(PFE ~ x1.plus.x2 - 1,data=X)
summary(fit.reduced)
# Use anova to carry out an F-test
anova(fit.reduced,fit.full)
## @knitr HW2point2
#### Exercise 2.2 ####
# Read in data from website
X <- read.table('http://web.stanford.edu/~xing/statfinbook/_BookData/Chap02/m_swap.txt', skip=1, header=T)
## @knitr a2_2_1
#### 2.2 (a) ####
# Do Manual PCA with covariance matrix
standardize<-function(x){(x-mean(x))}
# Standardize data. Since all variable are in the same units scaling by standard deviation is not necessary
X.standardized<-apply(X[2:length(X)],2,standardize)
X.covar<-cov(X.standardized)
X.eig<-eigen(X.covar)
# Use R function princomp to do PCA
cov.PCA<-princomp(X[2:length(X)])
# Compare manaul PCA with that from princomp
summary(cov.PCA)
# Standard deviation
formatC(sqrt(X.eig$values),format='f',digits = 6)
# Proportion of Variance
formatC(X.eig$values/sum(X.eig$values),format='f',digits = 6)
# Cumulative Proportion
formatC(cumsum(X.eig$values/sum(X.eig$values)),format='f',digits = 6)
# Plot the Variance
screeplot(cov.PCA)
## @knitr b2_2_1
#### 2.2 (b) ####
# Do Manual PCA with covariance matrix
X.corr<-cor(X.standardized)
X.eig<-eigen(X.corr)
# Use R function princomp to do PCA with correlation matrix
corr.PCA<-princomp(X[2:length(X)], cor=T)
# Compare manaul PCA with correlation matrix with that from princomp
summary(corr.PCA)
# Standard deviation
formatC(sqrt(X.eig$values),format='f',digits = 6)
# Proportion of Variance
formatC(X.eig$values/sum(X.eig$values),format='f',digits = 6)
# Cumulative Proportion
formatC(cumsum(X.eig$values/sum(X.eig$values)),format='f',digits = 6)
# Plot the variance
screeplot(corr.PCA)
## @knitr next
cor.PCA<-princomp(X[2:length(X)], cor=T)
## @knitr c2_2_1
#### 2.2 (c) ####
# Read in daily and monthly data in order to compare PCA results
D<-read.table("http://web.stanford.edu/~xing/statfinbook/_BookData/Chap02/d_swap.txt",skip=1,header=T)
M<-read.table('http://web.stanford.edu/~xing/statfinbook/_BookData/Chap02/m_swap.txt',skip=1,header=T)
#Replicate Results from Section 2.2.3
D.diff<-apply(D,2,diff)
D.diff.center<-scale(D.diff,center=T,scale=F)
cov.D.diff<-princomp(D.diff.center)
corr.D.diff<-princomp(D.diff.center,cor=T)
# Results from monthly swap file as calculated for (a) and (b) of this problem (not differenced)
M.center<-scale(M[2:length(M)],center=T,scale=F)
cov.M<-princomp(M.center)
corr.M<-princomp(M.center,cor=T)
# Comparsison 1: Results from Section 2.2.3 to results from non-differenced version of monthly swap data.
# PCA Comparison with Covariance Matrix
summary(cov.D.diff)
summary(cov.M)
# PCA Comparison with Correlation Matrix
summary(corr.D.diff)
summary(corr.M)
## @knitr c2_2_2
# Comparsison 2: Results from Section 2.2.3 to results from differenced version of monthly swap data.
# Results from monthly swap file after differencing
M.diff<-apply(M[2:length(M)],2,diff)
M.diff.center<-scale(M.diff,center=T,scale=F)
cov.M.diff<-princomp(M.diff.center)
corr.M.diff<-princomp(M.diff.center,cor=T)
# PCA Comparison with Covariance Matrix
summary(cov.D.diff)
summary(cov.M.diff)
# PCA Comparison with Correlation Matrix
summary(corr.D.diff)
summary(corr.M.diff)
## @knitr HW2point3
S <- read.table('http://stanford.edu/~xing/statfinbook/_BookData/Chap01/d_logret_12stocks.txt', header=T)
# Eliminate Date Column for simplicity
S[,1]<-NULL
S.center<-scale(S,center=T,scale=F)
## @knitr c2_3_1
#### Exercise 2.3 ####
#### 2.3 (a) ####
# Run PCA analysis using princomp function with Covariance Matrix
cov.S<-princomp(S.center)
summary(cov.S)
## @knitr c2_3_2
#### 2.3 (b) ####
# Run PCA analysis using princomp function with Correlation Matrix
corr.S<-princomp(S.center,cor=T)
summary(corr.S)
|
4d2bc8a39b5fc9d17723871c8fb16a5c244c2276
|
52ccefaad3cbdd746065b0501962516207646140
|
/Chpp 6.R
|
96b375fc35c5e9543829ddd9870e64c45009cf80
|
[] |
no_license
|
Pushpit07/R--Programming--Cotton
|
030274e563db60e605ffa36ff0b9e2b112b0f7c3
|
4828e806533d30fffa6bdab160154264b95c7357
|
refs/heads/master
| 2023-04-14T22:10:09.622979
| 2021-04-24T04:30:31
| 2021-04-24T04:30:31
| 336,554,121
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,874
|
r
|
Chpp 6.R
|
an_environment <- new.env()
an_environment[["pythag"]] <- c(12, 15, 20, 21)
an_environment$root <- polyroot(c(6, -5, 1))
assign(
"moonday",
weekdays(as.Date("1969/07/20")),
an_environment
)
an_environment[["pythag"]]
an_environment$root
ls(envir = an_environment)
ls.str(envir = an_environment)
exists("pythag", an_environment)
(a_list <- as.list(an_environment))
as.environment(a_list)
list2env(a_list)
nested_environment <- new.env(parent = an_environment)
exists("pythag", nested_environment)
exists("pythag", nested_environment, inherits = FALSE)
non_stormers <<- c(3, 7, 8, 13, 17, 18, 21)
get("non_stormers", envir = globalenv())
head(ls(envir = baseenv()), 20)
rt
hypotenuse <- function(x, y) {
sqrt(x^2 + y^2)
}
hypotenuse(3,4)
hypotenuse(y = 24, x = 7)
hypotenuse <- function(x = 5, y = 12) {
sqrt(x ^ 2 + y ^ 2)
}
hypotenuse()
formals(hypotenuse)
args(hypotenuse)
formalArgs(hypotenuse)
hypotenuse
normalize <- function(x, m = mean(x), s = sd(x)) {
(x - m) / s
}
normalized <- normalize(c(1, 3, 6, 10, 15))
mean(normalized)
sd(normalized)
normalize(c(1, 3, 6, 10, 15))
normalize(c(1, 3, 6, 10, NA))
normalize <- function(x, m = mean(x, na.rm = na.rm), s = sd(x, na.rm = na.rm), na.rm = FALSE)
{
(x - m) / s
}
normalize(c(1, 3, 6, 10, NA))
normalize(c(1, 3, 6, 10, NA), na.rm = TRUE)
do.call(hypotenuse, list(x = 3, y = 4)) #same as hypotenuse(3, 4)
dfr1 <- data.frame(x = 1:5, y = rt(5, 1))
dfr2 <- data.frame(x = 6:10, y = rf(5, 1, 1))
dfr3 <- data.frame(x = 11:15, y = rbeta(5, 1, 1))
do.call(rbind, list(dfr1, dfr2, dfr3)) #same as rbind(dfr1, dfr2, dfr3)
do.call(function(x, y) x + y, list(1:5, 5:1))
(emp_cum_dist_fn <- ecdf(rnorm(50)))
is.function(emp_cum_dist_fn)
plot(emp_cum_dist_fn)
h <- function(x) {
x*y
}
h(9)
y <- 16
h(9)
h2 <- function(x) {
if(runif(1) > 0.5) y <- 12
x*y
}
replicate(10, h2(9))
|
a9bc3ff07a51e0d2cdbf4e6f2b423e6fa5235909
|
c88a451c5dd8dab775fc61e2c007bdcdde33149e
|
/ui.R
|
3d3921d5bc8e3ab39516818237c0c4c2996caf3d
|
[] |
no_license
|
davidmanero/Shiny-Spanish-Books
|
0682fdc6525493adad187543b19fc9cf94fdfb3b
|
ea0bdbd8e77faacd058ab55615bc536740d74b73
|
refs/heads/master
| 2021-01-10T12:51:14.390477
| 2015-09-27T18:22:25
| 2015-09-27T18:22:25
| 43,258,079
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,854
|
r
|
ui.R
|
library(shiny)
shinyUI(
navbarPage("Best Spanish Books Word Cloud",
tabPanel("Detail",
h2("Spanish Best Books Word Cloud"),
hr(),
h3("Description"),
helpText("This is an application that gives a Word Cloud Analysis of Some of the Best Spanish Books",
" ever wroten. In the application you can choose the minimum frequency of the words, and",
" the maximum number of words in the Cloud"),
hr(),
h3("Instructions"),
helpText("In the Application Tab you can choose the Book you want to analize, then click in Change Button",
" an the analysis start. First the Corpus is indexed and then some transformations are done.",
" The Word Cloud is done with the parameters in the input panel."),
hr(),
h3("Inspiration"),
p("I'm working in this type of analysis in Twitter with a huge number of tweets around a hashtag."),
p("But this kind of analysis is not able to do in a Shiny applications beacause of the tweet downloads."),
p("So I found in a blog this kind of application done in Shiny. I have change the cloud analysis, prepare it to the spanish language, and change a little the IU layout (but not really much in the application, most in the panel visualization) "),
hr(),
h3("Source"),
p("The Books you can find in Gutenberg Project:"),
p("Don Quijote: http://www.gutenberg.org/cache/epub/2000/pg2000.txt"),
p("El Lazarillo de Tormes: http://www.gutenberg.org/cache/epub/320/pg320.txt"),
p("La Celestina: http://www.gutenberg.org/cache/epub/1619/pg1619.txt"),
p("The idea of word cloud for Books: http://pirategrunt.com/2013/12/11/24-days-of-r-day-11/"),
p("The Shiny Application part: http://shiny.rstudio.com/gallery/word-cloud.html")
),
tabPanel("Application",
#fluidPage(
# Application title
# titlePanel("Spanish Best Books Word Cloud"),
sidebarLayout(
# Sidebar with a slider and selection inputs
sidebarPanel(
selectInput("selection", "Choose a book:",
choices = books),
actionButton("update", "Change"),
hr(),
sliderInput("freq",
"Minimum Frequency:",
min = 1, max = 50, value = 15),
sliderInput("max",
"Maximum Number of Words:",
min = 1, max = 300, value = 100)
),
# Show Word Cloud
mainPanel(
plotOutput("plot")
)
)
),
tabPanel("Analysis",
h3("Load the entire book"),
helpText("The first thing done is to read the book from a txt file (thanks to Gutenberg Project),",
" this will be the base for the analysis. The name of the book is compared to the list",
" of the books that we have downloaded. In order to not break."),
hr(),
h3("Corpus construction"),
helpText("Then some modifications and preparations are done to the text: ",
" lower any upper case, take punctations, numbers, and some stop words from spanish."),
hr(),
h3("Word Matrix"),
helpText("With the tm library we can do Corpus and transform this list of words in a Matrix. ",
" This MAtrix is necesary for the WordCloud Application."),
hr(),
h3("Word Cloud Plot"),
helpText("The Word Cloud Plot is done using the wordcloud library, with the wordcloud_rep function",
" than plots the information with minimum frequency an the number of words plotted.")
),
tabPanel("SourceCode",
p("Shiny Spanish Books"),
a("https://github.com/davidmanero/Shiny-Spanish-Books/")
)
)
)
|
001fc26e340aac9f65976936f34f13e10822178c
|
0f43b7df4006ca85de76f5209b85aa39c649150f
|
/R/export_csv.R
|
cbd33d857cb8a09c603b6d354871061e80367bb4
|
[] |
no_license
|
sybrendeuzeman/WIOD_package
|
29c66e8b17415236421c534c309c1f712cf860ac
|
edcb5485bd4d49b9a6310d7dfc172f1d8864366c
|
refs/heads/master
| 2020-07-02T04:44:56.131660
| 2020-01-23T18:22:31
| 2020-01-23T18:22:31
| 201,419,749
| 1
| 0
| null | 2020-01-23T18:22:32
| 2019-08-09T07:54:13
|
R
|
UTF-8
|
R
| false
| false
| 2,051
|
r
|
export_csv.R
|
# Function to extract measures from iots into a .csv file
# Created by: Sybren Deuzeman
# Maintained by: GGDC
# First version May 2019
# Current version: 11 June 2019
#' @import tcltk
#' @title Make a CSV file from measures in IOTs
#' @description Function to extract measures from IOTs into a CSV file
#' @param measures: vector with the names of the measures
#' @param iots: list of input-output tables for which measures are already calculated
#' @param long: Whether data is in long or wide format.
#' @param filename: Where to store the data
#'
#' @details
#' filename = "choose" is default and will prompt a file-choose dialog.
#'
#' First element in vector will be used to find the description of the data
#' Make sure the output of the measures are of the same length.
#' (i.e. all need to be on e.g. country-industry, country or industry level)
#'
#' @seealso
#' export_dataframe(): [export_dataframe()]
#'
#' @examples
#' \dontrun{
#' iots <- load_iots("WIOD2013", 2000:2001)
#' iots <- oniots(wiod_gii, iots)
#'
#' Not specifying directory prompts a file-choose dialog
#' export_csv("gii", iots)
#' export_csv("gii", iots, long = TRUE)
#'
#' Save table in working directory
#' export_csv("gii", iots, filename = "myresults.csv")
#'
#' Or specify a directory:
#' export_csv("gii", iots, filename = "D:/Research/myresults.csv")
#' }
#'
#' @export
export_csv <- function(measures, iots, long = FALSE, filename = "choose"){
# Create the dataframe
df <- export_dataframe(measures, iots, long)
# Either choose directory and filename via dialog box or use existing filename:
if (filename == "choose"){
filename <- tclvalue(tkgetSaveFile(filetypes = "{ {Comma Seperated Values} {*.csv} }", defaultextension = ".csv", initialdir = getwd()))
}
# Save data:
if (filename != ""){
write.table(df, file = filename, row.names = FALSE, sep=';', dec = ".")
# Print filename such that it can be copied and checked
print("Table saved to")
print(filename)
}
else warning("No file selected. Data not saved")
}
|
505531c39d735fc177c75f046b7ecf3889016315
|
fde3f786a46570dcdc728538f756d6e3f30045eb
|
/R/QM12-02D/02-anaResults.R
|
eb2c33bc49554f93a647c11f15764f3fd820aa0d
|
[] |
no_license
|
jrminter/snippets
|
3fcb155721d3fd79be994c9d0b070859447060d7
|
bb3daaad5c198404fef6eaa17b3c8eb8f91a00ed
|
refs/heads/master
| 2021-01-13T14:20:51.451786
| 2017-04-29T05:26:11
| 2017-04-29T05:26:11
| 16,347,209
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,132
|
r
|
02-anaResults.R
|
# 02-anaResults.R
rm(list=ls())
library(xtable)
str.id <- 'FY26H-09-01'
# str.mode <- 'raw'
str.mode <- 'ellipse'
str.wd <- "~/work/qm12-02d-02/R/R/"
i.digits <- 4
# png dimentions
png.w=800
png.h=600
# should not need to change below here...
# set up functions
# compute the standard error
stderr <- function(x) sqrt(var(x)/length(x))
# plot and analyze the results from segment 0....n
plotSegment <- function( df, # dataframe
chip.id, # chip number
str.seg, # seg ID A,...
str.mode, # mode (raw, ellipse)
noz.per.seg=512, #
do.title=TRUE, # print a title
legend.offset.x=0, # offset from center
legend.offset.y=0) # of the legend
{
nLo <- -1
nHi <- -1
df.seg <- NA
if(str.seg=='A')
{
nLo <- 0
nHi <- noz.per.seg
}
if(str.seg=='B')
{
nLo <- noz.per.seg
nHi <- 2*noz.per.seg-1
}
if(str.seg=='C')
{
nLo <- 2*noz.per.seg
nHi <- 3*noz.per.seg-1
}
if(str.seg=='D')
{
nLo <- 3*noz.per.seg
nHi <- 4*noz.per.seg-1
}
if(str.seg=='E')
{
nLo <- 4*noz.per.seg
nHi <- 5*noz.per.seg-1
}
if(nLo > -1)
{
x <- df$nozzle.number[df$nozzle.number >= nLo]
aif <- df$aif.dia.inner[df$nozzle.number >= nLo]
sem <- df$sem.dia.inner[df$nozzle.number >= nLo]
delta <- df$delta[df$nozzle.number >= nLo]
df.t <- data.frame(nozzle.number=x,
aif.dia.inner=aif,
sem.dia.inner=sem,
delta=delta)
x <- df.t$nozzle.number[df.t$nozzle.number < nHi]
aif <- df.t$aif.dia.inner[df.t$nozzle.number < nHi]
sem <- df.t$sem.dia.inner[df.t$nozzle.number < nHi]
delta <- df.t$delta[df.t$nozzle.number < nHi]
x.t <- c(min(x), max(x))
y.t <- c(min(min(aif), min(sem)),
max(max(aif), max(sem)))
str.title=paste('Segment', str.seg, 'from chip', chip.id,
'with', str.mode, 'boundaries')
if(do.title)
{
plot(x.t, y.t, type='n', xlab='nozzle number',
ylab='diameter', main=str.title)
}
else
{
plot(x.t, y.t, type='n', xlab='nozzle number',
ylab='diameter')
}
p.c.x <- mean(x.t)
p.c.y <- mean(y.t)
points(x, aif, pch=17, col='red')
points(x, sem, pch=19, col='blue')
legend(p.c.x + legend.offset.x,
p.c.y + legend.offset.y,
legend =c('AIF', 'SEM'),
col=c('red', 'blue'),
pch=c(17, 19) )
sem.avg <- mean(sem)
sem.se <- stderr(sem)
aif.avg <- mean(aif)
aif.se <- stderr(aif)
delta.avg <- mean(delta)
delta.se <- stderr(delta)
avg <- c(sem.avg, aif.avg, delta.avg)
se <- c(sem.se, aif.se, delta.se)
df.seg <- data.frame(avg=avg, se=se)
rownames(df.seg) <- c('SEM', 'AIF', 'Delta')
# return the segment statistics
df.seg <- round(df.seg, i.digits)
df.seg
}
}
nozzleNumber <- function(nozzleName, noz.per.seg=512)
{
nozzle.number <- NA
lA <- strsplit(nozzleName,"_")
num.ss <- length(lA[[1]])
if(num.ss==2)
{
seg <- toupper(lA[[1]][1])
noz <- toupper(lA[[1]][2])
lB <- strsplit(seg,"SEG")
num.ssb <- length(lB[[1]])
if(num.ssb==2)
{
offset <- -1
seg.name <- (lB[[1]][2])
if(seg.name=='A') offset=0
if(seg.name=='B') offset=noz.per.seg
if(seg.name=='C') offset=2*noz.per.seg
if(seg.name=='D') offset=3*noz.per.seg
if(seg.name=='E') offset=4*noz.per.seg
if(offset > -1)
{
lC <- strsplit(noz,"N")
num.ssc <- length(lC[[1]])
if(num.ssc==2)
{
noz.num.in.seg <- as.numeric(lC[[1]][2])
# numbers are zero based
nozzle.number <- offset + noz.num.in.seg
}
}
}
}
nozzle.number
}
setwd(str.wd)
str.file <- paste('../dat/sem/', str.mode,'/', str.id, '.csv', sep='')
data <- read.csv(str.file, header = TRUE, as.is=T)
noz.name <- data[, 1]
sem.dia.inner <- round(data[, 3], i.digits)
sem.dia.outer <- round(data[, 4], i.digits)
the.name <- noz.name[1]
nozzle.number <- sapply(noz.name, nozzleNumber)
df <- data.frame(chip=str.id,
nozzle.number=nozzle.number,
sem.dia.inner=sem.dia.inner,
sem.dia.outer=sem.dia.outer,
aif.dia.inner=0)
rownames(df) <- NULL
rm(data)
str.file <- paste('../dat/aif-sys1/', str.id, '.txt', sep='')
data <- read.csv(str.file, header=F, as.is=T)
# now get the aif
for(i in 1:nrow(df))
{
nn <- df$nozzle.number[i]
# n.b. row numbers are 1 based, nozzle numbers are 0 based...
aif <- data[nn+1, 1]
df$aif.dia.inner[i] <- aif
}
df$delta <- df$aif.dia.inner - df$sem.dia.inner
print(tail(df))
print(summary(df$delta))
statsA <- plotSegment(df, str.id, 'A', str.mode, noz.per.seg=512,
do.title=T,
legend.offset.x=0,
legend.offset.y=0)
# Create the plot with a larger point size
str.png <- paste('../../TeX/png/',str.id,'-SegA-',
str.mode, '.png', sep='')
png(str.png, pointsize=24, width=png.w, height=png.h)
statsA <- plotSegment(df, str.id, 'A', str.mode, noz.per.seg=512,
do.title=F,
legend.offset.x=0,
legend.offset.y=0)
dev.off()
str.tex <- paste('../../TeX/methods/',str.id,'-SegA-',
str.mode, '.tex', sep='')
str.label <- paste('tab:ana',str.id,'A-',str.mode ,sep='')
str.align <- '|r|r|r|'
str.caption <- paste('Analysis of', str.id, 'Segment A',
'with', str.mode, 'boundaries')
xt.dig <- c( i.digits, i.digits, i.digits)
xt <- xtable(statsA, digits=xt.dig, caption=str.caption,
label=str.label, align=str.align)
sink(str.tex)
print(xt)
sink()
str.ei <- '\\endinput'
cat(str.ei, file=str.tex, sep='\n', append=T)
statsE <- plotSegment(df, str.id, 'E', str.mode, noz.per.seg=512,
do.title=T,
legend.offset.x=0,
legend.offset.y=0)
# Create the plot with a larger point size
str.png <- paste('../../TeX/png/',str.id,'-SegE-',
str.mode, '.png', sep='')
png(str.png, pointsize=24, width=png.w, height=png.h)
statsE <- plotSegment(df, str.id, 'E', str.mode, noz.per.seg=512,
do.title=F,
legend.offset.x=0,
legend.offset.y=0)
dev.off()
str.tex <- paste('../../TeX/methods/',str.id,'-SegE-',
str.mode, '.tex', sep='')
str.label <- paste('tab:ana',str.id,'E-',str.mode ,sep='')
str.align <- '|r|r|r|'
str.caption <- paste('Analysis of', str.id, 'Segment E',
'with', str.mode, 'boundaries')
xt.dig <- c( i.digits, i.digits, i.digits)
xt <- xtable(statsE, digits=xt.dig, caption=str.caption,
label=str.label, align=str.align)
sink(str.tex)
print(xt)
sink()
str.ei <- '\\endinput'
cat(str.ei, file=str.tex, sep='\n', append=T)
|
7b1f51cc4f4763c08b9261b04edf76569a04c340
|
b9c5fe4799dfd8b0e73f604158f4e7071974fa85
|
/ogbox.r
|
e5b08ade46b80eedd72239d31c3face2571416c6
|
[] |
no_license
|
oganm/Rotation-3
|
50b12ea35d3ee611f11d227d96a83714e62c49f8
|
f0fecc0c64efe95a1a732c2ca73a3cbbb69cf114
|
refs/heads/master
| 2021-01-13T02:36:22.277925
| 2014-11-07T00:13:36
| 2014-11-07T00:13:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,018
|
r
|
ogbox.r
|
gsubMult = function(patterns, replacements, x,
ignore.case = FALSE, perl = FALSE, fixed = FALSE, useBytes = FALSE) {
for (i in 1:length(patterns)){
x = gsub(patterns[i],replacements[i],x,
ignore.case, perl, fixed, useBytes)
}
return(x)
}
#paste to directory, now replaced by modified +. here for historical reasons
dpaste = function (...){
paste(..., sep='')
}
#remove this when you can. dangerous for many things.
"+" = function(x, y) {
if(is.character(x) | is.character(y)) {
return(paste(x, y, sep = ""))
} else {
.Primitive("+")(x, y)
}
}
getParent = function(step = 1){
wd = getwd()
for (i in 1:step){
setwd('..')
}
parent = getwd()
setwd(wd)
return(paste(parent,'/',sep=''))
}
findInList = function(object, aList){
indexes = vector()
for (i in 1:length(aList)){
if (object %in% aList[[i]]){
indexes = c(indexes, i)
}
}
return(indexes)
}
listCount = function(aList){
length(unlist(aList))
}
trimNAs = function(aVector) {
return(aVector[!is.na(aVector)])
}
trimElement = function (aVector,e){
return(aVector[!(aVector %in% e)])
}
listDepth = function(deList){
step = 1
while (T){
if (typeof(eval( parse(text = paste(c("deList",rep('[[1]]',step)),sep='',collapse = '')))) != "list"){
return(step)
}
step = step +1
}
}
#source
#http://www.r-bloggers.com/a-quick-way-to-do-row-repeat-and-col-repeat-rep-row-rep-col/
repRow<-function(x,n){
matrix(rep(x,each=n),nrow=n)
}
repCol<-function(x,n){
matrix(rep(x,each=n), ncol=n, byrow=TRUE)
}
repIndiv = function (aVector, n){
output = vector(length = length(aVector) * n)
step = 1
for (i in aVector){
output[(step * n - n + 1):(n * step)] = rep(i, n)
step = step + 1
}
return(output)
}
# http://stackoverflow.com/questions/6513378/create-a-variable-capturing-the-most-frequent-occurence-by-group
mode <- function(x) {
ux <- unique(x)
ux[which.max(tabulate(match(x, ux)))]
}
#load that bloody function no matter what
insist = function(name){
name = substitute(name)
name = as.character(name)
if (!require(name, character.only = T)) {
install.packages(name)
Sys.sleep(5)
library(name, character.only = T, logical.return = F)
}
}
#direct text eval
teval = function(daString){
eval(parse(text=daString))
}
# for navigating through list of lists with teval
listParse = function (daList,daArray){
out = ''
for (i in daArray){
out = out + '[[' + daArray[i] + ']]'
}
eval(parse(text='daList' + out))
}
#returns the final step as a list
listParseW = function (daList,daArray){
out = ''
if (length(daArray) > 1){
for (i in daArray[1 : (length(daArray) - 1)]){
out = out + '[[' + i + ']]'
}
}
out = out +'['+ daArray[length(daArray)]+ ']'
eval(parse(text='daList' + out))
}
# sets the list element
listSet = function(daList,daArray ,something){
name = substitute(daList)
name = as.character(name)
out = ''
for (i in daArray){
out = out + '[[' + i + ']]'
}
eval(parse(text = name + out + '<<-something'))
}
listStr = function(daArray){
out = ''
for (i in daArray[1 : length(daArray)]){
out = out + '[[' + i + ']]'
}
return(out)
}
listStrW = function(daArray){
out = ''
if (length(daArray) > 1){
for (i in daArray[1 : (length(daArray) - 1)]){
out = out + '[[' + i + ']]'
}
}
out = out +'['+ daArray[length(daArray)]+ ']'
return(out)
}
#lovely Pythonlike + operator that pastes strings and concatanes lists
#concatanate to preallocated. only works for non zero values and with numeric or boolean stuff
"%c%" = function (x, y){
start = which(x == 0)[1]
x[start:(start+length(y) - 1)]= y
return(x)
}
# turn every member of daList to a color from the palette
toColor = function(daList, palette = rainbow(20)){
daList = as.factor(daList)
uniq = unique(daList)
colors = vector (length = length(daList))
for (i in 1:length(uniq)){
colors[daList == uniq[i]]= palette[i]
}
return(colors)
}
#to use with ggplot violins. adapted from http://stackoverflow.com/questions/17319487/median-and-quartile-on-violin-plots-in-ggplot2
median.quartile <- function(x){
out <- quantile(x, probs = c(0.25,0.5,0.75))
ICR = out[3] - out[1]
out = c(out[1] - 1.5 * ICR ,out, out[3] + 1.5 * ICR)
if (out[1] < min(x)){
out[1] = min(x)
}
if (out[5] > max(x)){
out[5] = max(x)
}
names(out) <- c("whisDown","ymin","y","ymax","whisUp")
return(out)
}
threeQuartile <- function(x){
out <- quantile(x, probs = c(0.25,0.5,0.75))
names(out) <- c("ymin","y","ymax")
return(out)
}
len = length
coVar = function(x) ( 100*sd(x)/mean(x) )
|
e93bc25352b43e4d6646e105f2b0348d64f93bd1
|
ed400c77295c4e95a26576a5dd8aac436aa0e2f9
|
/code/library/generateVarpepSAAVs/generateVarpepSAAVs.R
|
df8d0bfc5f31fe5aa02b07bb04856f0778ff71cb
|
[
"Apache-2.0"
] |
permissive
|
KnowledgeCaptureAndDiscovery/wings-genomics
|
960819a6d96108a4b2e145e13fbe92ac3f382393
|
0e6387eecd0ab7af24290d3441785a02ef19adf9
|
refs/heads/main
| 2023-04-09T23:30:08.322715
| 2022-06-30T18:16:59
| 2022-06-30T18:16:59
| 350,090,124
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,087
|
r
|
generateVarpepSAAVs.R
|
# Format and generate variant peptide and SAAV data
formatVarpepAndSAAVs <- function(var.pep.df, saav.colname,
pro.colname, var.colname,
dbsnp.colname, pep.format.colname,
pep.seq.colname, to.remove.colnames,
group.by.cols) {
var.pep.df[,saav.colname] <- do.call(paste, c(var.pep.df[c(pro.colname, var.colname, dbsnp.colname)], sep=":"))
var.pep.df <- var.pep.df[,c(saav.colname,
colnames(var.pep.df)[!colnames(var.pep.df)
%in%
saav.colname])]
colnames(var.pep.df)[colnames(var.pep.df) == pep.format.colname] <- pep.seq.colname
write.table(var.pep.df, var.pep.outfile, row.names = F, sep = "\t", quote = F)
## SAAV level table
group.by.cols.df <- var.pep.df[group.by.cols]
var.saav.df <- aggregate(x = var.pep.df[,!colnames(var.pep.df)
%in% c(to.remove.colnames,
group.by.cols)],
by = group.by.cols.df,
FUN = sum)
write.table(var.saav.df, var.saav.outfile, row.names = F, sep = "\t", quote = F)
return(list(var.pep.df, var.saav.df))
}
generateVarpepSAAVs <- function() {
args = commandArgs(trailingOnly=TRUE)
var.pep.file <- args[1]
pep.format.colname <- args[2]
pro.colname <- args[3]
dbsnp.colname <- args[4]
var.colname <- args[5]
saav.colname <- args[6]
pep.seq.colname <- args[7]
var.pep.outfile <- args[8]
var.saav.outfile <- args[9]
var.pep.df <- read.delim(var.pep.file, header = T, stringsAsFactors = F, check.names = F)
to.remove.colnames <- unlist(strsplit(to.remove.cols.str, ","))
group.by.cols <- unlist(strsplit(group.by.cols.str, ","))
print("Format the input variant peptide table to generate SAAV and variant peptide tables...")
var.pep.saav.df.list <- formatVarpepAndSAAVs(var.pep.df, saav.colname,
pro.colname, var.colname,
dbsnp.colname, pep.format.colname,
pep.seq.colname, to.remove.colnames,
group.by.cols)
print("Done!")
print(paste("Filenames:", var.saav.outfile, "and", var.pep.outfile))
}
generateVarpepSAAVs()
|
912877987c9d3d3aa38bbc7718b496deb4159f58
|
0c4ede35db089f24d1c5e26c2a92d485debe681d
|
/man/check_files.Rd
|
d1d2f0f33a8e57f5c763b27e9b544eb4c4b9fb89
|
[] |
no_license
|
cran/assertable
|
9b1c3032d47dcb19be1249198302d54f71eb8c2f
|
06f179e4dc06b55d4f2612782f098a580dfd7bc2
|
refs/heads/master
| 2021-08-07T15:46:52.715033
| 2021-01-27T05:30:15
| 2021-01-27T05:30:15
| 78,937,106
| 0
| 1
| null | 2020-04-21T17:55:27
| 2017-01-14T11:47:59
|
R
|
UTF-8
|
R
| false
| true
| 2,097
|
rd
|
check_files.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/check_files.R
\name{check_files}
\alias{check_files}
\title{Check for the existence of a vector of files, optionally repeated for a set amount of time.}
\usage{
check_files(filenames, folder = "", warn_only = FALSE,
continual = FALSE, sleep_time = 30, sleep_end = (60 * 3),
display_pct = 75)
}
\arguments{
\item{filenames}{A character vector of filenames (specify full paths if you are checking files that are not in present working directory)}
\item{folder}{An optional character containing the folder name that contains the files you want to check (if used, do not include folderpath in the filenames characters). If not specified, will search in present working directory.}
\item{warn_only}{Boolean (T/F), whether to end with a warning message as opposed to an error message if files are still missing at the end of the checks.}
\item{continual}{Boolean (T/F), whether to only run once or to continually keep checking for files for \emph{sleep_end} minutes. Default = F.}
\item{sleep_time}{numeric (seconds); if \emph{continual} = T, specify the number of seconds to wait in-between file checks. Default = 30 seconds.}
\item{sleep_end}{numeric (minutes); if \emph{continual} = T, specify number of minutes to check at \emph{sleep_time} intervals before terminating. Default = 180 minutes.}
\item{display_pct}{numeric (0-100); at what percentage of files found do you want to print the full list of still-missing files? Default = 75 percent of files.}
}
\value{
Prints the number of files that match. If \emph{warn_only} = T, returns a character vector of missing files
}
\description{
Given a character vector of filenames, check how many of them currently exist.
Optionally, can keep checking for a specified amount of time, at a given frequency
}
\examples{
\dontrun{
for(i in 1:3) {
data <- CO2
data$id_var <- i
write.csv(data,file=paste0("file_",i,".csv"),row.names=FALSE)
}
filenames <- paste0("file_",c(1:3),".csv")
check_files(filenames)
}
}
|
6327c9318b65f9d7b3dc12624a95f59f0a0dd621
|
73fc537bb4ca79f15edebcbfef0c90878666380c
|
/man/dateCheck.Rd
|
78f5e783694cf1779c943561588a4e47bb90933b
|
[] |
no_license
|
cran/ensembleBMA
|
b3012f476e3c7e44580edb9fb23e06bec7fce12c
|
2bbb7ed69a64dd97b55a40d832b19fbc77e89b10
|
refs/heads/master
| 2022-09-16T14:20:24.345306
| 2022-09-02T06:20:05
| 2022-09-02T06:20:05
| 17,695,812
| 2
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 943
|
rd
|
dateCheck.Rd
|
\name{dateCheck}
\alias{dateCheck}
\alias{getHH}
\title{
Checks date format.
}
\description{
Checks that the character form of a vector of dates conforms to
YYYYMMDDHH or YYYYMMDD.
}
\usage{
dateCheck(YYYYMMDDHH)
}
\arguments{
\item{YYYYMMDDHH}{
A character vector (or its factor equivalent) of dates which
should be in the form YYYYMMDDHH or YYYYMMDD,
in which YYYY specifies the year, MM the month,
DD the day, and (optionally) HH the hour.
}
}
\value{
A logical vector indicating whether or not each element of YYYYMMDDHH
has the correct format.
}
\details{
If both YYYYMMDDHH and YYYYMMDD are present,
the YYYYMMDD dates are assumed to be in error
even if HH == 00 for all of the longer dates. \cr
Requires the \code{chron} library.
}
\seealso{
\code{\link{ymdhTOjul},\link{julTOymdh}}
}
\examples{
dateCheck(c("2008043000", "20080431", "20080501"))
}
\keyword{chron}
% docclass is function
|
25e4a994fdec9c19cfc82130cdfec5e7b0cb29fc
|
07f9ba53c35091bb55094a0244a2701056bd3323
|
/man/player_scores.Rd
|
4344948cf993fcfe15623bc355cb05f7dd0220d5
|
[] |
no_license
|
MrDAndersen/mfl2R
|
cd1b9e4ed479c4cc7595a4cb5f8a5fd01deed4e7
|
fa3496dbffaf7a55e2b990709e75a9122f8ae5a9
|
refs/heads/master
| 2022-12-12T15:53:21.890433
| 2022-11-30T15:08:19
| 2022-11-30T15:08:19
| 249,209,957
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 333
|
rd
|
player_scores.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/league.R
\name{player_scores}
\alias{player_scores}
\title{Player scores for active league}
\usage{
player_scores(
week = NULL,
season = NULL,
player_id = NULL,
position = NULL,
status = NULL,
rules = NULL,
count = NULL
)
}
\description{
}
|
12bba0104306d6a9b26fb36efd02dac42efce2ce
|
1f33a90808ba87ebfabaeea8ce74f52af2a6a6f9
|
/example-points.R
|
9ee83dee8d80362ea5bf54c1a52f49fa50abf4c8
|
[] |
no_license
|
kelvingl/osm-examples
|
771dc811067895f750b011d90e453cda5d4bb4b7
|
bc3c62ceb58bc8b9cf8652d2f4331a9e378801aa
|
refs/heads/master
| 2021-05-15T09:22:18.610099
| 2017-10-30T10:03:01
| 2017-10-30T10:03:01
| 108,037,541
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 815
|
r
|
example-points.R
|
# Instala pacote - CUIDADO, necessita dos pacotes gdal-bin e libgdal-dev no linux
install.packages(c("OpenStreetMap"))
# Carrega pacote
library(OpenStreetMap)
# Gera um mapa mundial simples
map <- openmap(
upperLeft = c(85.0, -180.0),
lowerRight = c(-85.0, 180.0),
type = "osm"
)
# Cria projeção do mapa, para plot com latitude e longitude
mapLatLon = openproj(map)
# Plota projeção
plot(mapLatLon)
# Dataset : http://datadrivensecurity.info/blog/pages/dds-dataset-collection.html
dataset <- read.delim(
file = "marx-geo.csv",
sep = ","
)
# Colore protocolos
dataset$color <- "red"
dataset[dataset$proto == "ICMP",]$color <- "blue"
dataset[dataset$proto == "UDP",]$color <- "green"
# Gera pontinhos
points(
x = dataset$longitude,
y = dataset$latitude,
col = dataset$color,
cex = .8
)
|
df5bd6b5d1e9f3e0c2e9299e9cb2073134e18f0a
|
8bb44eb98bfd9163de85e190ab809998498ef5b3
|
/forecasting/forecasting_class_examples/7SARIMA(1).R
|
79e85e156d4720e8775fb0a7733e7c8705ab0563
|
[] |
no_license
|
Tom-a-Hawk/VCU_DAPT2018
|
c63e416d7d2ada00f6d4822828e257560fa4ef95
|
0c8972037ed06c4d88c738eb488cfb72a1b69919
|
refs/heads/master
| 2021-07-24T20:57:13.676108
| 2017-11-02T21:01:10
| 2017-11-02T21:01:10
| 108,593,161
| 0
| 0
| null | 2017-11-02T20:57:47
| 2017-10-27T20:34:22
|
Python
|
UTF-8
|
R
| false
| false
| 1,147
|
r
|
7SARIMA(1).R
|
library("forecast")
Amtrak.data <- read.csv("C:\\Users\\jrmerric\\Dropbox\\Teaching\\Exec Ed\\Decision Analytics\\Forecasting\\2017\\Amtrak data.csv")
ridership.ts <- ts(Amtrak.data$Ridership, start = c(1991,1), end = c(2004, 3), freq = 12)
plot(ridership.ts)
nValid <- 36
nTrain <- length(ridership.ts) - nValid
train.ts <- window(ridership.ts, start = c(1991, 1), end = c(1991, nTrain))
valid.ts <- window(ridership.ts, start = c(1991, nTrain + 1), end = c(1991, nTrain + nValid))
tsdisplay(train.ts)
diff.train.ts <- diff(train.ts, lag = 1)
tsdisplay(diff.train.ts)
fitSARIMA <- auto.arima(train.ts)
summary(fitSARIMA)
Box.test(residuals(fitSARIMA), lag=24, fitdf=1, type="Ljung-Box")
residualSARIMA <- arima.errors(fitSARIMA)
tsdisplay(residualSARIMA)
forecastSARIMA <- forecast(fitSARIMA, level=c(80,95), h=nValid)
plot(forecastSARIMA)
par(mfrow = c(2, 1))
hist(forecastSARIMA$residuals, ylab = "Frequency", xlab = "Fit Error", bty = "l", main = "")
hist(valid.ts - forecastSARIMA$mean, ylab = "Frequency", xlab = "Forecast Error", bty = "l", main = "")
accuracy(forecastSARIMA$mean, valid.ts)
|
690f569abb26130a65c50f65e966e7eb9b01b17f
|
e7b6bc6856ce7e42dceae1de7da92dbc9667dadc
|
/man/SeqDataFrame-class.Rd
|
31a38c87726c59e16c41a023f1685b85b4944f18
|
[] |
no_license
|
cran/distrSim
|
78ec9de63d081d6bcba03289582e3607b94cf05b
|
6ca1b59990b53198b52d5e42f904c817842f7200
|
refs/heads/master
| 2022-11-20T09:17:24.943963
| 2022-11-12T21:10:02
| 2022-11-12T21:10:02
| 17,695,555
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,454
|
rd
|
SeqDataFrame-class.Rd
|
\name{SeqDataFrames-class}
\docType{class}
\alias{SeqDataFrames-class}
\alias{SeqDataFrames}
\alias{seqDataFrames}
\alias{obsdimnames}
\alias{obsdimnames-method}
\alias{obsdimnames,SeqDataFrames-method}
\alias{obsdimnames<-,SeqDataFrames-method}
\alias{names,SeqDataFrames-method}
\alias{names<-,SeqDataFrames-method}
\alias{runnames}
\alias{runnames-method}
\alias{runnames,SeqDataFrames-method}
\alias{runnames<-,SeqDataFrames-method}
\alias{print,SeqDataFrames-method}
\alias{show,SeqDataFrames-method}
\alias{rbind}
\alias{rbind-method}
\alias{rbind,ANY-method}
\alias{rbind,SeqDataFrames-method}
\title{Class "SeqDataFrames" }
\description{
An object of type "SeqDataFrames" is a list of data frames, all of which
with the same numbers and names of columns (ideally with the same data-types
for the columns), but with possibly varying number of rows;
with correponding index operators it behaves like a three-dimensional array
with dimensions sample size x observation dimension x runs.
}
\section{Objects from the Class}{
Objects can be created by calls of the form
\code{SeqDataFrames(...)}, where the \dots are a list of dataframes
with according column structure.
}
\section{Slots}{
\describe{
\item{\code{data}:}{
a list of data frames}
}
}
\details{There is a validity method checking for each member of the list being
a data frame and for the accordance of the
column structures of the data frames.}
\section{Methods}{
\describe{
\item{[}{\code{signature(x = "SeqDataFrames")}:
returns (slices of) the data}
\item{[<-}{\code{signature(x = "SeqDataFrames")}:
modifies (slices of) the data}
\item{print}{\code{signature(x = "SeqDataFrames", obs0 = NULL,
dims0 = NULL, runs0 = NULL, short = FALSE, ...)}:
slices can be printed and, if argument \code{short== TRUE}
only a bounded number of dimensions is shown.
}
\item{show}{\code{signature(object = "SeqDataFrames")}: a call to
\code{print(x)}}
\item{names}{\code{signature(x = "SeqDataFrames")}:
returns the names of the runs}
\item{runnames}{\code{signature(x = "SeqDataFrames")}:
returns the names of the runs}
\item{obsdimnames}{\code{signature(x = "SeqDataFrames")}:
returns the names of the observation dimensions}
\item{obsDim}{\code{signature(x = "SeqDataFrames")}:
returns the dimension of the observations}
\item{runs}{\code{signature(x = "SeqDataFrames")}:
returns the number of runs}
\item{samplesize}{\code{signature(x = "SeqDataFrames")}:
returns the size of the samples for each run}
\item{rbind}{\code{signature(x = "SeqDataFrames")}:
concatenates different a list of \code{SeqDataFrames} object (with
the same column structure) to a new object of class \code{SeqDataFrames}
to do so we mask the \code{rbind} method from package \pkg{base}}
}
}
\author{
Thomas Stabla \email{statho3@web.de},\cr
Florian Camphausen \email{fcampi@gmx.de},\cr
Peter Ruckdeschel \email{peter.ruckdeschel@uni-oldenburg.de},\cr
Matthias Kohl \email{Matthias.Kohl@stamats.de}
}
\seealso{
\code{\link{[-methods}}
\code{\link{print-methods}}
\code{\link{summary-methods}}
}
%\examples{}
\keyword{manip}
\concept{S4 data class}
\concept{S4 simulation class}
|
2bfd16e564a35821a666dc2f2cef3189f9b777be
|
97ba3a8e81ddeca9a60f1b042e0c6db0e612f84e
|
/R/OutOfSampleTesting.R
|
335dc4c395d99c8866b443735ce19fbc011ea54a
|
[] |
no_license
|
avnit/Project
|
d3bfa9d45b9c44ad89bee2dc77ba3bb8dc21e153
|
6bd24921178dbd0a1b6ac0281c78a2f3f6f7f097
|
refs/heads/master
| 2020-04-11T10:44:34.628821
| 2016-02-20T14:58:37
| 2016-02-20T14:58:38
| 51,187,007
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 563
|
r
|
OutOfSampleTesting.R
|
#Parameters
initDate = "2001-01-01"
# In sample
from="2012-01-01"
to = "2013-01-01"
#decisions
BuyChange<-0.1
sellChange<--0.2
buyRSi<-70
sellRsi<-30
BuyCci<-60
SellCci<-20
buyBbanbs<-0.7
sellBbands<-0.3
thresholdVol <- 0
initEq = 50000
# initialize the portfolio and download data
source('~/Project/R/initialize.R')
# initial the functions that are required for quant start
source('~/Project/R/functions.R')
# call quant start and get all the data into env
source('~/Project/R/ProjectStart.R')
# call Monte Carlo simulator
source('~/Project/R/MonteCarlo.R')
|
4627438308308972b9ff63afc45be023f584caba
|
e3bb40da9819ee080f19b7cb710edd029f18baa3
|
/plot3.R
|
74b365fe5b36a4fd31da792b8aa04e5fde8d09fb
|
[] |
no_license
|
diegocaggiano/Exploratory_Data_Analysis
|
8a97d30a834bf45f1df49c3b79f1030e65d32608
|
dab529c0115eac60fe35eca7f75af6b76d0943a2
|
refs/heads/master
| 2020-03-31T07:06:00.080271
| 2018-10-24T01:00:05
| 2018-10-24T01:00:05
| 152,008,061
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,015
|
r
|
plot3.R
|
#Load libraries
library(readr)
library(dplyr)
library(chron)
library(lubridate)
#Load dataset from file
ds<- read.csv2(file="household_power_consumption.txt",header = TRUE, sep=";", na.strings=c("?"), stringsAsFactors = FALSE)
#Create dataset with new field DateandTime
hhpc_total<- mutate(ds, DateandTime = as.POSIXct(paste(ds$Date, ds$Time),format="%d/%m/%Y %H:%M:%S"))
#Filter only required dates
hhpc<- filter(hhpc_total, hhpc_total$DateandTime >= ymd("2007-02-01") & hhpc_total$DateandTime < ymd("2007-02-03"))
#Create PNG file in the current directory
png(filename="plot3.png", width = 480, height = 480 )
#Make plot
with(hhpc, plot(DateandTime, Sub_metering_1, type="l", col="black", ylab="Energy sub metering"))
lines(hhpc$DateandTime, hhpc$Sub_metering_2, type="l", col="red")
lines(hhpc$DateandTime, hhpc$Sub_metering_3, type="l", col="blue")
legend("topright", c("Sub_metering_1","Sub_metering_2", "Sub_metering_3"),lty=c(1,1), lwd=c(2.5,2.5),col=c("black", "red", "blue"))
#Close file
dev.off()
|
72e52368b3ae9e3937259f379cd98fb5052b410b
|
56d937e8df4e5a6bf6b8f45acd68a2efdcf9d1e1
|
/main.R
|
561994514cea98bbdc3c2e9157732e9ece521179
|
[] |
no_license
|
efcaguab/pollen-competition
|
d46b970979db61dfce1e807f9ee0b59671a178b8
|
bb0db4eaa5010d126df111c3fa877a3a5f1fcd29
|
refs/heads/master
| 2022-04-19T14:47:43.818651
| 2020-03-28T05:58:44
| 2020-03-28T05:58:44
| 83,257,340
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,715
|
r
|
main.R
|
# Prepare workspace -------------------------------------------------------
pkgconfig::set_config("drake::strings_in_dots" = "literals")
library(magrittr)
library(foreach)
library(drake)
# load functions
f <- lapply(list.files("code", full.names = T), source)
n_replicates <- 99
transformation <- function(x) log(x + 1)
# Configuration -----------------------------------------------------------
configuration_plan <- drake_plan(
config = yaml::read_yaml(file_in("config.yaml")),
bib_retrieved = config$bibliography_retrieved
)
# Clean data --------------------------------------------------------------
# plan to clean data
clean_data_plan <- drake_plan(
sites = site_data(file_in('./data/raw/marrero-estigmatic_pollen.csv'), file_in('./data/raw/site_names.csv')),
deposition = clean_deposition(file_in('./data/raw/marrero-estigmatic_pollen.csv'), sites),
visitation_quant = clean_visitation_quant(file_in('./data/raw/marrero-quantitative_visits.csv'), sites),
visitation_qual = clean_visitation_qual(file_in('./data/raw/marrero-qualitative_visits.csv'), sites),
transfer = clean_transfer(file_in('./data/raw/marrero-pollen_transfer.csv'), sites),
abundance = clean_abundance(file_in('./data/raw/marrero-abundance.csv'), sites),
random_effects = readr::read_csv(file_in("./data/raw/random_effects.csv")),
armonised_data = armonise_species_names(deposition, visitation_quant, visitation_qual, transfer, abundance)
)
format_data_plan <- drake_plan(
dep_frame = extract_dep_frame(armonised_data),
abu_frame = extract_abu_frame(armonised_data),
plant_rel_abu = calculate_relative_abundance(abu_frame, dep_frame),
# plant_pheno_overlap = calculate_phenology_overlap(abu_frame, dep_frame),
vis_frame = extract_vis_frame(armonised_data),
degree = get_degree(vis_frame, dep_frame),
shar_pol = get_shared_pol(vis_frame),
tra_frame = extract_tra_frame(armonised_data),
pollen_dominance = get_pollen_dominance(tra_frame, vis_frame),
pollen_contribution = get_pollen_contribution(tra_frame)
)
traits_plan <- drake_plan(
plant_traits = read_plant_traits(file_in('data/raw/plant_traits.csv')),
trait_matrices = make_trait_matrices(plant_traits, abu_frame, TRUE, TRUE),
species_coords = get_species_coords(trait_matrices, weighted = TRUE),
unq_frame = get_species_uniqueness(species_coords),
org_frame = get_species_originality(species_coords, abu_frame)
)
imputation_plan <- drake_plan(
imputed_degree_legacy = impute_degree(degree),
imputed_degree = impute_shared(shar_pol),
imputed_abundance = impute_abundace(plant_rel_abu),
imputed_originality = impute_originality(org_frame),
imputed_pollen = impute_pollen_dominance(pollen_dominance),
imputed_pollen_legacy = impute_pollen_contrib(pollen_contribution)
)
# Basic analyses ----------------------------------------------------------
basic_analyses_plan <- drake_plan(
consp_self = model_conspecific_self(dep_frame),
significant_gain_global = mann_withney_part_df(
dplyr::filter(dep_frame, pollen_category == 'conspecific'),
by = 'recipient',
var = 'treatment',
conf.int = T),
significant_gain_site = mann_withney_part_df(
dplyr::filter(dep_frame, pollen_category == 'conspecific'),
by = c('recipient', 'site_name'),
var = 'treatment',
conf.int = T)
)
# Bootstrap models --------------------------------------------------------
boot_replicates <- drake_plan(
rep = data_replicate(
dep_frame,
imputed_abundance,
imputed_pollen,
imputed_degree,
imputed_originality,
sites,
transformation,
N)) %>%
evaluate_plan(rules = list(N = 1:n_replicates))
random_models <- drake_plan(
random_mod = run_random_models(rep_N, random_effects)
) %>%
evaluate_plan(rules = list(N = 1:n_replicates))
glanced_random_models <- random_models %>%
gather_plan(., gather = "glance_random_models", target = "glanced_random")
random_summaries <- drake_plan(
best_random = best_random_effect(glanced_random, random_effects)
)
fixed_models <- drake_plan(
fixed_mod = run_model(rep_N, best_random)) %>%
evaluate_plan(rules = list(N = 1:n_replicates))
glanced_fixed_models <- fixed_models %>%
gather_plan(., gather = "glance_fixed_models", target = "glanced_fixed")
tidied_fixed_models <- fixed_models %>%
gather_plan(., gather = "tidy_fixed_models", target = "tidied_fixed")
aic_plan <- drake_plan(
model_formula_ranking = get_best_fixed_model_formula(glanced_fixed)
)
model_corr <- fixed_models %>%
gather_plan(., gather = "get_model_correlations", target = "model_correlations")
het_con_linear_fit <- fixed_models %>%
gather_plan(., gather = "get_model_linear_fits", target = "model_linear_fits")
het_con_linear_fit_sp <- fixed_models %>%
gather_plan(., gather = "get_model_linear_fits_species", target = "model_linear_fits_species")
best_model_formula <- "pollen_gain ~ abn + poc + deg + org"
fixed_summaries <- drake_plan(
wilcox_glo_com = global_vs_community(glanced_fixed, model_formula = best_model_formula),
summary_effects = get_summary_effects(tidied_fixed),
coefficient_averages = get_coefficient_averages(tidied_fixed, model_formula_ranking, N = 99),
variable_importance = get_variable_importance(model_formula_ranking),
r2_values = calc_model_r2_values(model_formula_ranking, glanced_fixed)
)
predictions <- drake_plan(
trade_off_predictions = trade_off_pred(
tidied_fixed,
wilcox_glo_com,
list(imputed_abundance, imputed_pollen, imputed_degree, imputed_originality),
chosen_criteria = "r2c",
model_formula = best_model_formula)
)
model_plans <- rbind(
random_models, glanced_random_models,
random_summaries,
fixed_models, glanced_fixed_models, tidied_fixed_models,
model_corr,
het_con_linear_fit, het_con_linear_fit_sp,
fixed_summaries,
predictions
)
pca_plan <- drake_plan(
pca_data = get_pca_data(plant_rel_abu, pollen_contribution, degree, org_frame, sites),
pcas = get_pca(pca_data, imputation_variants = 0:2),
random_plant_distances = all_randomisations_plant_name(pcas, 99),
random_site_distances = all_randomisations_site_name(pcas, 99),
permanova_plant_distances = get_permanova(random_plant_distances, "plant_name"),
permanova_site_distances = get_permanova(random_site_distances, "site_name"),
fig_pca = plot_pca(pcas, chosen_threshold = 0),
fig_distances = plot_permanova_dist(permanova_plant_distances, permanova_site_distances)
)
facilitation_plan <- drake_plan(
facilitation_models = model_facilitation(dep_frame),
fig_pca_contrib = plot_pca_variances_and_contributions(pcas, chosen_threshold = 0),
facilitation_random_effects = extract_random_effects(facilitation_models),
facilitation_plot_df = get_facilitation_plot_df(dep_frame, facilitation_random_effects),
fig_random_slopes = plot_random_slopes(facilitation_plot_df)
)
analyses_plan <- rbind(
clean_data_plan,
traits_plan,
format_data_plan,
imputation_plan,
boot_replicates,
model_plans,
aic_plan,
basic_analyses_plan,
pca_plan,
facilitation_plan
)
# Paper -------------------------------------------------------------------
figure_plan <- drake_plan(
fig_model_results_global = make_fig_model_results_global(tidied_fixed),
fig_con_hetero_gain = make_fig_con_hetero_gain(tidied_fixed, model_linear_fits, model_formula_ranking, model_linear_fits_species),
fig_hetero_con = make_fig_con_hetero_empirical(dep_frame),
open_bagged_model = model_open_bagged(dep_frame),
coef_open_bagged = get_coef_open_bagged(open_bagged_model),
con_con_plot_df = get_con_con_plot_df(coef_open_bagged),
fig_con_con = plot_bagged_vs_open_conspecific(con_con_plot_df),
fig_proportion_vs_variables = make_fig_proportion_vs_variables(trade_off_predictions),
fig_pollen_density = make_fig_pollen_density(dep_frame),
fig_pollen_density_diff = make_fig_pollen_density_diff(rep_1),
fig_abundance = make_fig_abundance(plant_rel_abu, sites),
fig_all_model_results = make_fig_all_model_results(tidied_fixed, sites, model_formula_ranking),
fig_community_global_scatter = make_fig_community_global_scatter(plant_rel_abu, org_frame, degree, sites, pollen_contribution),
fig_effect_quant_qual = make_fig_effect_quant_qual(summary_effects, model_formula_ranking),
fig_coefficient_averages = make_fig_coefficient_avarages(coefficient_averages, variable_importance),
fig_average_qual_quant = make_fig_average_quant_qual(coefficient_averages),
fig_correlation = make_fig_correlation(rep_1),
fig_var_importance = plot_variable_importance(variable_importance),
fig_coef_avg = plot_coefficient_averages(coefficient_averages, variable_importance)
)
reporting_plan <- drake_plan(
graphical_abstract_small = export_graphical_abstract(fig_distances, file_out("paper/graphical-abstract-small.png"), 150),
graphical_abstract_big = export_graphical_abstract(fig_distances, file_out("paper/graphical-abstract-big.png"), 300),
references = get_bibliography(
"https://raw.githubusercontent.com/efcaguab/phd-bibliography/master/pollen-competition.bib",
file_out("paper/bibliography.bib"), bib_retrieved),
abstract = readLines(file_in("./paper/abstract.md")),
keywords = process_keywords(file_in("./paper/keywords.md")),
acknowledgements = readLines(file_in("./paper/acknowledgements.md")),
intro_line_number = get_line_number(file_in("paper/manuscript.Rmd"), "# Introduction"),
abs_wordcount = count_words(file_in("paper/abstract.md")),
msc_wordcount = count_words(file_in('paper/manuscript.Rmd'), lines_to_ignore = 1:intro_line_number),
n_references = count_references(file_in('paper/manuscript.Rmd'), lines_to_ignore = 1:intro_line_number, refs_to_exclude = "@ref"),
n_displays = count_displays(file_in('paper/manuscript.Rmd'), lines_to_ignore = 1:intro_line_number),
msc_title = get_yaml_title(file_in('paper/manuscript.Rmd')),
render_pdf(knitr_in('paper/supp-info.Rmd'), file_out('paper/supp-info.pdf'), clean_md = FALSE),
render_pdf(file_in('paper/draft-info.Rmd'), file_out('paper/draft-info.pdf'), clean_md = FALSE),
render_pdf(knitr_in('paper/manuscript.Rmd'), file_out('paper/manuscript.pdf'), clean_md = FALSE),
knitr::knit2pdf(knitr_in("paper/cover-letter.Rnw"), output = file_out("paper/cover-letter.tex"))
)
paper_plan <- rbind(
figure_plan,
reporting_plan
)
# Export for thesis -------------------------------------------------------
dir.create("data/processed/plot_data", showWarnings = FALSE, recursive = TRUE)
export_figure_data_plan <- drake::drake_plan(
saveRDS(object = variable_importance,
file = drake::file_out("data/processed/plot_data/variable_importance.rds"),
ascii = TRUE, compress = FALSE),
saveRDS(object = coefficient_averages,
file = drake::file_out("data/processed/plot_data/coefficient_averages.rds"),
ascii = TRUE, compress = FALSE),
saveRDS(object = pcas,
file = drake::file_out("data/processed/plot_data/pcas.rds"),
ascii = TRUE, compress = FALSE),
saveRDS(object = permanova_plant_distances,
file = drake::file_out("data/processed/plot_data/permanova_plant_distances.rds"),
ascii = TRUE, compress = FALSE),
saveRDS(object = permanova_site_distances,
file = drake::file_out("data/processed/plot_data/permanova_site_distances.rds"),
ascii = TRUE, compress = FALSE)
)
# Make all ----------------------------------------------------------------
# set up plan
project_plan <- rbind(
configuration_plan,
analyses_plan,
paper_plan,
export_figure_data_plan
)
project_config <- drake_config(project_plan)
# vis_drake_graph(project_config, targets_only = T)
# execute plan
# make(project_plan, parallelism = "parLapply", jobs = 3)
make(project_plan)
|
dd19a06be6ee78d09fc756075d280989fcc1ac42
|
2d34708b03cdf802018f17d0ba150df6772b6897
|
/googledataflowv1b3.auto/man/SeqMapTask.userFn.Rd
|
80ebce96dcfa77319d9bf65b8763f1b10bb55285
|
[
"MIT"
] |
permissive
|
GVersteeg/autoGoogleAPI
|
8b3dda19fae2f012e11b3a18a330a4d0da474921
|
f4850822230ef2f5552c9a5f42e397d9ae027a18
|
refs/heads/master
| 2020-09-28T20:20:58.023495
| 2017-03-05T19:50:39
| 2017-03-05T19:50:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 461
|
rd
|
SeqMapTask.userFn.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dataflow_objects.R
\name{SeqMapTask.userFn}
\alias{SeqMapTask.userFn}
\title{SeqMapTask.userFn Object}
\usage{
SeqMapTask.userFn()
}
\value{
SeqMapTask.userFn object
}
\description{
SeqMapTask.userFn Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
The user function to invoke.
}
\seealso{
Other SeqMapTask functions: \code{\link{SeqMapTask}}
}
|
dde10120c43722c6efe914376cc3b1c2da65be34
|
6ff4577459aec8c589bab40625301f7eefc82e73
|
/R/lmWrapper-glmer.R
|
720e408e4fdaf9e4c3dd790ef2b71c9405de655c
|
[] |
no_license
|
lagzxadr/MAST
|
f1cb34efdb42d2c4eb2b6383eff02193a8e69409
|
a079646898349315a676b56b6a77ca7dd17ec449
|
refs/heads/master
| 2021-04-27T16:27:16.229846
| 2017-12-22T16:19:32
| 2017-12-22T16:19:32
| 122,302,743
| 1
| 0
| null | 2018-02-21T06:59:29
| 2018-02-21T06:59:29
| null |
UTF-8
|
R
| false
| false
| 12,892
|
r
|
lmWrapper-glmer.R
|
## This is a horrible hack, and should be rewritten to use the lmer internals.
## But in the meantime: we make the fixed effects model matrix, then call the formula method for lmer/glmer
## This is to allow us to do arbitrary LRT tests and add/drop columns of the design
## Details:
## Invariants:
## 1. model.matrix contains fixed effects--so whenever we set the model.matrix, we'll delete the random effects
## 2. formula contains full model (fixed and random), so we can update it normally
## 3. Random portion of the model will be parsed off
## Construction:
## 1 & 2
## Fitting:
## establish pseudodesign and mutilate the formula
getREvars <- function(Formula){
termNames <- labels(terms(Formula))
hasRE <- str_detect(termNames, fixed('|'))
## collapse all variables into something that can be used for model.frame
REvar <- str_replace_all(paste(termNames[hasRE], collapse='+', sep='+'), '[|]+', '+')
## save portion of formula that contained random effects
REform <- paste(sprintf('(%s)', termNames[hasRE]), collapse='+')
FEform<- paste(sprintf('%s', termNames[!hasRE]), collapse='+')
if(str_trim(FEform)=='') FEform <- '1'
## REvar: Random effects variables concatenated with +
## REform: the actual formula specifying the random effects
## FEform: the actual formula specifying the fixed effects
## All are character vectors of length 1.
list(vars=REvar, REform=REform, FEform=FEform)
}
toAdditiveString <- function(string){
if(length(string)>1)
string <- paste(string, collapse='+')
paste0('~', string)
}
toAdditiveFormula <- function(string){
string <- as.formula(toAdditiveString(string))
}
##' @export
##' @describeIn LMERlike update the formula or design matrix
##' @param formula. \code{formula}
##' @param design something coercible to a \code{data.frame}
setMethod('update', signature=c(object='LMERlike'), function(object, formula., design, ...){
if(!missing(formula.)){
object@formula <- update.formula(object@formula, formula.)
}
reComponents <- getREvars(object@formula)
if(!missing(design)){
object@design <- as(design, 'data.frame')
}
model.matrix(object) <- model.matrix(as.formula(paste0('~', reComponents$FEform)), object@design, ...)
object@fitC <- object@fitD <- numeric(0)
object@fitted <- c(C=FALSE, D=FALSE)
object
})
setMethod('initialize', 'LMERlike', function(.Object, ...){
.Object <- callNextMethod()
reComponents <- getREvars(.Object@formula)
model.matrix(.Object) <- model.matrix(as.formula(paste0('~', reComponents$FEform)), .Object@design)
.Object
})
setReplaceMethod('model.matrix', signature=c(object='LMERlike'), function(object, value){
reComponents <- getREvars(object@formula)
object <- callNextMethod()
object@pseudoMM <- as.data.frame(cbind(model.matrix(object),
model.frame(toAdditiveFormula(reComponents$vars), object@design)))
object
})
## lmerMM <- function (formula, data = NULL, REML = TRUE, control = lmerControl(),
## start = NULL, verbose = 0L, subset, weights, na.action, offset,
## contrasts = NULL, devFunOnly = FALSE, modelMatrix, ...)
## {
## mc <- mcout <- match.call()
## missCtrl <- missing(control)
## if (!missCtrl && !inherits(control, "lmerControl")) {
## if (!is.list(control))
## stop("'control' is not a list; use lmerControl()")
## warning("passing control as list is deprecated: please use lmerControl() instead",
## immediate. = TRUE)
## control <- do.call(lmerControl, control)
## }
## if (!is.null(list(...)[["family"]])) {
## warning("calling lmer with 'family' is deprecated; please use glmer() instead")
## mc[[1]] <- quote(lme4::glmer)
## if (missCtrl)
## mc$control <- glmerControl()
## return(eval(mc, parent.frame(1L)))
## }
## mc$control <- control
## mc[[1]] <- quote(lme4::lFormula)
## lmod <- eval(mc, parent.frame(1L))
## lmod$X <- modelMatrix
## mcout$formula <- lmod$formula
## lmod$formula <- NULL
## devfun <- do.call(mkLmerDevfun, c(lmod, list(start = start,
## verbose = verbose, control = control)))
## if (devFunOnly)
## return(devfun)
## opt <- optimizeLmer(devfun, optimizer = control$optimizer,
## restart_edge = control$restart_edge, boundary.tol = control$boundary.tol,
## control = control$optCtrl, verbose = verbose, start = start,
## calc.derivs = control$calc.derivs, use.last.params = control$use.last.params)
## cc <- checkConv(attr(opt, "derivs"), opt$par, ctrl = control$checkConv,
## lbound = environment(devfun)$lower)
## mkMerMod(environment(devfun), opt, lmod$reTrms, fr = lmod$fr,
## mcout, lme4conv = cc)
## }
## glmerMM <- function (formula, data = NULL, family = gaussian, control = glmerControl(),
## start = NULL, verbose = 0L, nAGQ = 1L, subset, weights, na.action,
## offset, contrasts = NULL, mustart, etastart, devFunOnly = FALSE, modelMatrix,
## ...)
## {
## if (!inherits(control, "glmerControl")) {
## if (!is.list(control))
## stop("'control' is not a list; use glmerControl()")
## msg <- "Use control=glmerControl(..) instead of passing a list"
## if (length(cl <- class(control)))
## msg <- paste(msg, "of class", dQuote(cl[1]))
## warning(msg, immediate. = TRUE)
## control <- do.call(glmerControl, control)
## }
## mc <- mcout <- match.call()
## if (is.character(family))
## family <- get(family, mode = "function", envir = parent.frame(2))
## if (is.function(family))
## family <- family()
## if (isTRUE(all.equal(family, gaussian()))) {
## warning("calling glmer() with family=gaussian (identity link) as a shortcut to lmer() is deprecated;",
## " please call lmer() directly")
## mc[[1]] <- quote(lme4::lmer)
## mc["family"] <- NULL
## return(eval(mc, parent.frame()))
## }
## mc[[1]] <- quote(lme4::glFormula)
## glmod <- eval(mc, parent.frame(1L))
## glmod$X <- modelMatrix
## mcout$formula <- glmod$formula
## glmod$formula <- NULL
## devfun <- do.call(mkGlmerDevfun, c(glmod, list(verbose = verbose,
## control = control, nAGQ = 0)))
## if (nAGQ == 0 && devFunOnly)
## return(devfun)
## if (is.list(start) && !is.null(start$fixef))
## if (nAGQ == 0)
## stop("should not specify both start$fixef and nAGQ==0")
## opt <- optimizeGlmer(devfun, optimizer = control$optimizer[[1]],
## restart_edge = if (nAGQ == 0)
## control$restart_edge
## else FALSE, boundary.tol = if (nAGQ == 0)
## control$boundary.tol
## else 0, control = control$optCtrl, start = start, nAGQ = 0,
## verbose = verbose, calc.derivs = FALSE)
## if (nAGQ > 0L) {
## start <- updateStart(start, theta = opt$par)
## devfun <- updateGlmerDevfun(devfun, glmod$reTrms, nAGQ = nAGQ)
## if (devFunOnly)
## return(devfun)
## opt <- optimizeGlmer(devfun, optimizer = control$optimizer[[2]],
## restart_edge = control$restart_edge, boundary.tol = control$boundary.tol,
## control = control$optCtrl, start = start, nAGQ = nAGQ,
## verbose = verbose, stage = 2, calc.derivs = control$calc.derivs,
## use.last.params = control$use.last.params)
## }
## cc <- if (!control$calc.derivs)
## NULL
## else {
## if (verbose > 10)
## cat("checking convergence\n")
## checkConv(attr(opt, "derivs"), opt$par, ctrl = control$checkConv,
## lbound = environment(devfun)$lower)
## }
## mcout <- call('LMERlike')
## mkMerMod(environment(devfun), opt, glmod$reTrms, fr = glmod$fr,
## mcout, lme4conv = cc)
## }
##' @include AllClasses.R
##' @include AllGenerics.R
##' @param silent mute some warnings emitted from the underlying modeling functions
##' @rdname fit
setMethod('fit', signature=c(object='LMERlike', response='missing'), function(object, response, silent=TRUE, ...){
prefit <- .fit(object)
if(!prefit){
if(!silent) warning('No positive observations')
return(object)
}
fitArgsC <- object@fitArgsC
fitArgsD <- object@fitArgsD
## Mutilate the formula and replace it with the colnames of the fixed effects
##
reComp <- getREvars(object@formula)
protoForm <- sprintf('~ 0 + %s + %s',
paste(escapeSymbols(colnames(model.matrix(object))), collapse='+'),
reComp$REform)
formC <- as.formula(paste0('response ', protoForm))
formD <- as.formula(paste0('response>0', protoForm))
dat <- cbind(response=object@response, object@pseudoMM)
if(inherits(object, 'bLMERlike')){
cfun <- blme::blmer
dfun <- blme::bglmer
} else{
cfun <- lme4::lmer
dfun <- lme4::glmer
}
if(any(pos)){
datpos <- dat[pos,]
object@fitC <- do.call(cfun, c(list(formula=formC, data=quote(datpos), REML=FALSE), fitArgsC))
ok <- length(object@fitC@optinfo$conv$lme4)==0
object@fitted['C'] <- TRUE
if(!ok){
object@optimMsg['C'] <- object@fitC@optinfo$conv$lme4$messages[1]
object@fitted['C'] <- !object@strictConvergence
}
}
if(!all(pos)){
object@fitD <- do.call(dfun, c(list(formula=formD, data=quote(dat), family=binomial()), fitArgsD))
object@fitted['D'] <- length(object@fitD@optinfo$conv$lme)==0
ok <- length(object@fitD@optinfo$conv$lme4)==0
object@fitted['D'] <- TRUE
if(!ok){
object@optimMsg['D'] <- object@fitD@optinfo$conv$lme4$messages[[1]]
object@fitted['D'] <- !object@strictConvergence
}
}
if(!silent & !all(object@fitted)) warning('At least one component failed to converge')
object
})
#' @describeIn LMERlike return the variance/covariance of component \code{which}
#' @param object \code{LMERlike}
#' @param which \code{character}, one of 'C', 'D'.
#' @param ... In the case of \code{vcov}, ignored. In the case of \code{update}, passed to \code{model.matrix}.
#' @return see the section "Methods (by generic)"
setMethod('vcov', signature=c(object='LMERlike'), function(object, which, ...){
stopifnot(which %in% c('C', 'D'))
vc <- object@defaultVcov
if(which=='C' & object@fitted['C']){
V <- vcov(object@fitC)
} else if(which=='D' & object@fitted['D']){
V <- vcov(object@fitD)
} else{
V <- matrix(nrow=0, ncol=0)
}
nm <- str_replace_all(colnames(V), fixed('`'), '')
dimnames(V) <- list(nm, nm)
ok <- colnames(V)
vc[ok,ok] <- as.numeric(V)
vc
})
if(getRversion() >= "2.15.1") globalVariables(c('fixef', 'lmer', 'glmer'))
#' @describeIn LMERlike return the coefficients. The horrendous hack is attempted to be undone.
#' @param singular \code{logical}. Should NA coefficients be returned?
setMethod('coef', signature=c(object='LMERlike'), function(object, which, singular=TRUE, ...){
stopifnot(which %in% c('C', 'D'))
co <- setNames(rep(NA, ncol(model.matrix(object))), colnames(model.matrix(object)))
if(which=='C' & object@fitted['C']){
co <- fixef(object@fitC)}
else if(object@fitted['D']){
co <- fixef(object@fitD)
}
if(!singular) co <- co[!is.na(co)]
conm <- names(co)
## because of backtick shenanigans
names(co) <- str_replace_all(conm, fixed('`'), '')
co
})
##' @describeIn LMERlike return the log-likelihood
setMethod('logLik', signature=c(object='LMERlike'), function(object){
L <- c(C=0, D=0)
if(object@fitted['C']) L['C'] <- logLik(object@fitC)
if(object@fitted['D']) L['D'] <- logLik(object@fitD)
L
})
setMethod('dof', signature=c(object='LMERlike'), function(object){
setNames(ifelse(object@fitted, c(attr(logLik(object@fitC), 'df'), attr(logLik(object@fitD), 'df')), c(0,0)), c('C', 'D'))
})
setMethod('summarize', signature=c(object='LMERlike'), function(object, ...){
li <- list(coefC=coef(object, which='C'), vcovC=vcov(object, 'C'),
deviance=rowm(deviance(object@fitC), deviance(object@fitD)),
df.null=rowm(nobs(object@fitC),nobs(object@fitD)),
dispersion=rowm(sigma(object@fitC), NA),
coefD=coef(object, which='D'), vcovD=vcov(object, 'D'),
loglik=torowm(logLik(object)),
converged=torowm(object@fitted))
li[['df.resid']] <- li[['df.null']]-c(sum(!is.na(li[['coefC']])), sum(!is.na(li[['coefD']])))
li[['dispersionNoshrink']] <- li[['dispersion']]
li
})
|
cc6203a079fc4cde41e884f4b2424e781d3245f0
|
c61ab862399d908d556ee7af346ab8fbbd9777b4
|
/man/plot_hierarchy_shape.Rd
|
6bf8b1614a41f24f91df410c25cb54ba7d64d0e5
|
[] |
no_license
|
cran/aniDom
|
539593cb59e77747a5da8cf15673049b63d862c1
|
cb39e082602cb1e640a0a8b39057664500adcc7a
|
refs/heads/master
| 2021-07-15T10:27:05.038337
| 2021-03-06T22:50:36
| 2021-03-06T22:50:36
| 81,476,233
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,125
|
rd
|
plot_hierarchy_shape.Rd
|
\name{plot_hierarchy_shape}
\alias{plot_hierarchy_shape}
\title{
Plots the shape of a dominance hierarchy from empirical data
}
\description{
This function takes a set of winners and losers from observed interactions and plots the probability of the dominant individual in an interaction winning given the difference in rank to the subordinate in the same interaction.
}
\usage{
plot_hierarchy_shape(identity, rank, winners, losers, fitted = FALSE)
}
\arguments{
\item{identity}{
A vector containing the identities of all individuals in the data.
}
\item{rank}{
A vector giving the ranks for each individual (in the same order as the identities).
}
\item{winners}{
A vector giving the identity of the winner for each interaction.
}
\item{losers}{
A vector giving the identity of the loser for each interaction in the same order as the winners.
}
\item{fitted}{
A Boolean (TRUE/FALSE) describing whether to add a fitted line to the plot
}
}
\details{
This function is useful for examining how the probability of winning is shaped by the difference in rank. The shape of this graph provides information about the shape of the dominance hierarchy.
}
\value{
This function will return the data for x (difference in rank) and y (probability of dominant winning) coordinates of the plot as a data frame.
}
\references{
Sanchez-Tojar, A., Schroeder, J., Farine, D.R. (in prep) Methods for inferring dominance hierarchies and estimating their uncertainty.
}
\author{
Written by Damien R. Farine & Alfredo Sanchez-Tojar
Maintainer: Damien R. Farine <damien.farine@ieu.uzh.ch>
}
\examples{
par(mfrow=c(1,2))
# Set population size
N <- 20
# Set shape parameters
a = 15
b = 3
# See what this looks like
plot_winner_prob(1:N,a,b)
# Generate some input data
data <- generate_interactions(N,400,a,b)
# See what the hierarchy looks like from the output data
winners <- data$interactions$Winner
losers <- data$interactions$Loser
identities <- data$hierarchy$ID
ranks <- data$hierarchy$Rank
shape <- plot_hierarchy_shape(identities,ranks,winners,losers,fitted=TRUE)
# Data is contained in shape
shape
}
|
9c9086b82be9b15282a1db10a69758c5b25b0b53
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/mipfp/examples/confint.mipfp.Rd.R
|
f7f7d0987589aa6bb78ec5892dd18360af2b1435
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 741
|
r
|
confint.mipfp.Rd.R
|
library(mipfp)
### Name: confint.mipfp
### Title: Computing confidence intervals for the mipfp estimates
### Aliases: confint.mipfp
### Keywords: multivariate
### ** Examples
# true contingency (2-way) table
true.table <- array(c(43, 44, 9, 4), dim = c(2, 2))
# generation of sample, i.e. the seed to be updated
seed <- ceiling(true.table / 10)
# desired targets (margins)
target.row <- apply(true.table, 2, sum)
target.col <- apply(true.table, 1, sum)
# storing the margins in a list
target.data <- list(target.col, target.row)
# list of dimensions of each marginal constrain
target.list <- list(1, 2)
# using ipfp
res <- Estimate(seed, target.list, target.data)
# computing and printing the confidence intervals
print(confint(res))
|
5afb2415c0f0396e3521c60461278be423e9ca2b
|
5ffa646540a7a377795a5bf93bdc2f269605932d
|
/R/build_site.R
|
d51db8d34f599aa2e8994264337b04a64f56c61d
|
[] |
no_license
|
melanie-demeure/stateoftheRinRennes
|
ec852091d9f0b29860f7b4b4c99b380445797ebd
|
b1256229b4c645e1dafa4ff8d31164c2d9d35b93
|
refs/heads/master
| 2022-12-23T02:13:12.459567
| 2020-09-28T13:47:35
| 2020-09-28T13:47:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 287
|
r
|
build_site.R
|
### Given the problems encountered with lapply in xaringan files generation, avoid laply and go for a loop
for(f in list.files('_posts/', recursive = TRUE, pattern = '.Rmd',
full.names = TRUE))
rmarkdown::render(f)
rmarkdown::render_site(encoding = 'UTF-8')
|
242b2489924eb0f34a91e11a08f140c3e99c57bd
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/lessR/examples/simCLT.Rd.R
|
bac3ab8ea992851ccad8e541afa30dee04c5e791
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 583
|
r
|
simCLT.Rd.R
|
library(lessR)
### Name: simCLT
### Title: Pedagogical Simulation for the Central Limit Theorem
### Aliases: simCLT
### Keywords: central limit theorem
### ** Examples
# plot of the standardized normal
# and corresponding sampling distribution with 10000 samples
# each of size 2
simCLT(ns=1000, n=2)
# plot of the uniform dist from 0 to 4
# and corresponding sampling distribution with 10000 samples
# each of size 2
simCLT(ns=1000, n=2, p1=0, p2=4, type="uniform", bin.width=0.01)
# save the population and sample distributions to pdf files
simCLT(100, 10, pdf=TRUE)
|
0c37b98867a6fb9c50d51fec17e9d287bac277c0
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/IMIFA/examples/G_priorDensity.Rd.R
|
75e5f5b4f31987543118f2f0cab609b19d425466
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 701
|
r
|
G_priorDensity.Rd.R
|
library(IMIFA)
### Name: G_priorDensity
### Title: Plot Pitman-Yor / Dirichlet Process Priors
### Aliases: G_priorDensity
### Keywords: plotting
### ** Examples
# Plot Dirichlet process priors for different values of alpha
(DP <- G_priorDensity(N=50, alpha=c(3, 10, 25)))
# Non-zero discount requires loading the "Rmpfr" library
# require("Rmpfr")
# Verify that these alpha/discount values produce Pitman-Yor process priors with the same mean
# G_expected(N=50, alpha=c(19.23356, 6.47006, 1), discount=c(0, 0.47002, 0.7300045))
# Now plot them to examine tail behaviour as discount increases
# (PY <- G_priorDensity(N=50, alpha=c(19.23356, 6.47006, 1), discount=c(0, 0.47002, 0.7300045)))
|
58b3e5ac6babfa7c85892f6c7e343ae38385f473
|
1e3728c33d28b9d6da73d9b5d0b195ae0f9e8cac
|
/Clustering/R Scripts/StackCols.R
|
17911f404919efde3ad2b27487138aa192e42ab8
|
[] |
no_license
|
saurabh-devgun-iiitb/StackOverflowAnalytics
|
b2b705c3a6ec21550c07f30c4419e4ff00e4dc58
|
82d5a9dcd4adeaf14fb69952b3280e164dade702
|
refs/heads/master
| 2021-01-21T06:25:24.448403
| 2017-02-26T20:06:29
| 2017-02-26T20:06:29
| 83,233,293
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 687
|
r
|
StackCols.R
|
tech <- read.csv(file.choose(),header=T)
View(tech)
#function for calculating Cramer's v
cramer <- function(y,x){
K <- nlevels(y)
L <- nlevels(x)
n <- length(y)
chi2 <- chisq.test(y,x,correct=F)
print(chi2$statistic)
v <- sqrt(chi2$statistic/(n*min(K-1,L-1)))
return(v)
}
#similarity matrix
sim <- matrix(1,nrow=ncol(tech),ncol=ncol(tech))
rownames(sim) <- colnames(tech)
colnames(sim) <- colnames(tech)
for (i in 1:(nrow(sim)-1)){
for (j in (i+1):ncol(sim)){
y <- tech[,i]
x <- tech[,j]
sim[i,j] <- cramer(y,x)
sim[j,i] <- sim[i,j]
}
}
#distance matrix
dissim <- as.dist(1-sim)
#clustering
tree <- hclust(dissim,method="ward.D")
plot(tree, hang=-1)
|
701fac5cda5f3781ffcb4062d723f2df1fdeb0e6
|
652a00e139bf9cf1ad32ebbb812e195d6f1ce276
|
/fetchGO.R
|
06585422fb5186ff907eb118296e82529cf25f70
|
[] |
no_license
|
scalefreegan/R-tools
|
7354dc5906b4082f8f097f182e6e8defeb996bc4
|
3b6bea3918e5f11b6f836ae582ff5fab5d9eadee
|
refs/heads/master
| 2021-01-10T21:01:00.650652
| 2017-11-09T14:39:55
| 2017-11-09T14:39:55
| 2,478,837
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,247
|
r
|
fetchGO.R
|
dlf <- function (f, url, msg = NULL, mode = "wb", quiet = F, ...)
{
err <- 0
if (mode == "wb" || !file.exists(f) || file.info(f)$size ==
118) {
if (!file.exists(dirname(f)))
try(dir.create(dirname(f), recursive = T))
if (!is.null(msg))
cat(msg, "\n")
err <- try(download.file(url, destfile = f, mode = mode,
quiet = quiet, ...))
}
closeAllConnections()
err
}
load_go_microbes_online <- function(IdOverride=NULL) {
require(topGO)
# Currently requires an active egrin env for the species of interest
cat("Using GO annotations from MicrobesOnline...\n")
cat("Storing results in ./data/...\n")
try(dir.create("./data"))
if (!is.null(IdOverride)) {
fname <- paste("data/", e$rsat.species, "/microbesonline_geneontology_",
IdOverride, ".named", sep = "")
err <- dlf(fname, paste("http://www.microbesonline.org/cgi-bin/genomeInfo.cgi?tId=",
IdOverride, ";export=tab", sep = ""),mode="wb")
} else {
fname <- paste("data/", e$rsat.species, "/microbesonline_geneontology_",
e$taxon.id, ".named", sep = "")
err <- dlf(fname, paste("http://www.microbesonline.org/cgi-bin/genomeInfo.cgi?tId=",
e$taxon.id, ";export=tab", sep = ""),,mode="wb")
if (e$genome.info$org.id$V1[1] != e$taxon.id && (!file.exists(fname) || file.info(fname)$size ==
118)) {
fname <- paste("data/", e$rsat.species, "/microbesonline_geneontology_",
e$genome.info$org.id$V1[1], ".named", sep = "")
err <- dlf(fname, paste("http://www.microbesonline.org/cgi-bin/genomeInfo.cgi?oId=",
e$genome.info$org.id$V1[1], ";export=tab", sep = ""),mode="wb")
}
}
if (file.exists(fname))
cat("Succesfully fetched GO annotations. Parsing...\n")
f <- read.delim(fname)
# try to match appropriate names
# use accession to pull out names that overlap with ratios matrix
# remove entries without accession
f <- f[which(sapply(f[,"accession"],nchar)>1),]
syns <- e$get.synonyms(f[,"accession"])
syns.trans <- lapply(seq(1,length(syns)),function(i){syns[[i]][syns[[i]]%in%rownames(e$ratios[[1]])][1]})
ind <- which(sapply(syns.trans,length)>0)
fname.map <- paste("data/", e$rsat.species, "/microbesonline_geneontology_",
e$genome.info$org.id$V1[1], ".map", sep = "")
write.table(data.frame(unlist(syns.trans[ind]),f[ind,"GO"]),fname.map,sep="\t",quote=F,row.names=F,col.names=F)
gene2go <- readMappings(fname.map)
return(gene2go)
}
load_topgo_map <- function(file) {
require(topGO)
gene2go <- readMappings(file)
return(gene2go)
}
get_topGO_object <- function(genes,gene2go,ontology=c("BP","MF","CC")[1]) {
require(topGO)
# genes is a vector containing genes of interest
geneList <- factor(as.integer(names(gene2go)%in%genes))
names(geneList) <- names(gene2go)
GOdata <- new("topGOdata", ontology = ontology, allGenes = geneList, annot = annFUN.gene2GO, gene2GO = gene2go)
#GOdata can be used directly for analysis, e.g.
# test <- runTest(GOdata,algorithm="classic",statistic="fisher")
# results <- GenTable(GOdata,test,topNodes=10)
return(GOdata)
}
|
0150d7eb67c1444923ff6956bd6099c402777d8e
|
9497ffe6f9feb5d740c18293844aa59d381c1a41
|
/man/writeMgf.Rd
|
87055e2ec2ad77765e93592b216632444d3f530b
|
[
"MIT"
] |
permissive
|
ohgane/ShotgunLipidomicsR
|
63efe1413c92d865665bfb9cabc67d6d90ba867a
|
b657c419eb2a4fd192c5b1e7db90fc79290f00ff
|
refs/heads/master
| 2021-01-21T14:32:55.459687
| 2016-07-28T08:28:30
| 2016-07-28T08:28:30
| 59,251,794
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 603
|
rd
|
writeMgf.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/writeMgf.R
\name{writeMgf}
\alias{writeMgf}
\title{A function that export mgf file from filename, precursor mz, and a peak table.}
\usage{
writeMgf(file, precursor, peak.table, charge = "+1")
}
\arguments{
\item{file}{A file name for MGF file to be exported.}
\item{precursor}{A precursor m/z value.}
\item{peak.table}{A data.frame with 2 column (mz, int).}
\item{charge}{Charge of the precursor. Character (default "+1").}
}
\description{
A function that export mgf file from filename, precursor mz, and a peak table.
}
|
a11879ddfad2b64fa119be39e6ccdd977f6afc80
|
6cc2112b30258c1174fe3b2b1d7ca178ac769f16
|
/metrics.R
|
763f998ab0dd08abcffd559653060c034b09f956
|
[] |
no_license
|
tmuffly/obgynlit
|
42c0f67c698b297839bc096ec10dfa9e3c07327a
|
7988fa49ec989dd9c94baf526754a273c14655e1
|
refs/heads/master
| 2020-03-21T11:52:41.648343
| 2018-06-25T00:41:57
| 2018-06-25T00:41:57
| 138,526,037
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,075
|
r
|
metrics.R
|
## utilities ---------------------------------------------------------------
library(tidyverse)
library(fs)
library(futile.logger)
not_null <- negate(is_null)
`%<>%` <- magrittr::`%<>%`
## read data ---------------------------------------------------------------
#DATA_DIR <- path("/media", "garrett", "ExtraDrive1", "data", "europepmc")
#setwd("~/Dropbox/Pubmedsearch/Scraper/Version from 6.22.2018")
DATA_DIR <- path("~/Dropbox/Pubmedsearch/Scraper/Version from 6.22.2018")
# flog.info("Reading in %s", path(DATA_DIR, "combined.rds.bz"))
# combined <- read_rds(path(DATA_DIR, "combined.rds.bz"))
flog.info("Reading in %s", path(DATA_DIR, "combined_unnested.csv"))
combined <- data.table::fread(path(DATA_DIR, "combined_unnested.csv"),
header = T,
nThread = 4)
dim(combined)
str(combined %>% ungroup %>% select(id:bookid))
combined %<>% rename(pubCount = n)
## I-10 index --------------------------------------------------------------
flog.info("Getting I-10 index.")
combined %<>%
group_by(full_name, `NPI Number`) %>%
mutate(I10_index = sum(citedByCount >= 10))
## hc-index ----------------------------------------------------------------
flog.info("Getting HC-index.")
combined %<>%
group_by(full_name, `NPI Number`) %>%
mutate(hc_index = citedByCount * 4 / (2018L - pubYear))
## hi-norm -----------------------------------------------------------------
# get author count for hi-norm
flog.info("Getting author count.")
combined %<>%
group_by(full_name, `NPI Number`) %>%
mutate(authorCount = str_count(authorString, ",") + 1L)
# get max pubYear
# get min pubYear
flog.info("Getting min and max publication years.")
combined %<>%
group_by(full_name, `NPI Number`) %>%
mutate(maxPubYear = max(pubYear),
minPubYear = min(pubYear))
# hi-norm
flog.info("Getting HI-norm.")
combined %<>%
group_by(full_name, `NPI Number`) %>%
mutate(hi_norm = citedByCount / authorCount / (maxPubYear - minPubYear))
## hi-annual ---------------------------------------------------------------
flog.info("Writing results to %s", path(DATA_DIR, "data.feather"))
combined %>%
ungroup %>%
group_by(full_name, `NPI Number`, pmid) %>%
select(minPubYear, maxPubYear, pubYear, pmid, citedByCount) %>%
feather::write_feather(path(DATA_DIR, "data.feather"))
## create h_indices via python
flog.warn("Setting up H-indices data structure via python.")
# system("~/path/to/my/python h_indices.py")
system("~/anaconda3/envs/myenv/bin/python h_indices.py")
flog.info("Reading in python results from %s", path(DATA_DIR, "h_indices.feather"))
h_indices <- feather::read_feather(path(DATA_DIR, "h_indices.feather"))
years <- seq.int(1943L, 2018L)
h_indices$year <- years
flog.info("Creating H-indices.")
h_indices <- tidyr::gather(h_indices, "author", "h_index", -year)
hi_annual <- h_indices %>%
group_by(author) %>%
fill(h_index) %>%
mutate(lag_h_index = lag(h_index),
h_index_diff = h_index - lag_h_index) %>%
summarise(hi_annual = mean(h_index_diff, na.rm = TRUE))
flog.info("Getting Hi-annual.")
combined %<>%
ungroup() %>%
left_join(hi_annual, by = c("full_name" = "author"))
## total citations ------------------------------------------------------------
flog.info("Getting total citations.")
combined %<>%
group_by(full_name, `NPI Number`) %>%
mutate(citeCount = sum(citedByCount, na.rm = TRUE))
## max citations ------------------------------------------------------------
flog.info("Getting max citations.")
combined %<>%
group_by(full_name, `NPI Number`) %>%
mutate(maxCite = max(citedByCount, na.rm = TRUE))
## first publication ----------------------------------------------------------
flog.info("Getting first publication.")
combined %<>%
group_by(full_name, `NPI Number`) %>%
mutate(firstPub = min(pubYear, na.rm = TRUE))
## write results --------------------------------------------------------------
flog.info("Writing results to %s", path(DATA_DIR, "combined.fst"))
combined %>% fst::write_fst(path(DATA_DIR, "combined.fst"), compress = 100)
|
a4af4f7a1cd2fe235e0c41ad205b0725ddae5764
|
1e9d0ad51afcb498a1e784c832ebe0c6ad3d2d00
|
/shiny/ui.R
|
479be3b2d316ac6dab874ca1deeb7fdccebb9a0a
|
[
"MIT"
] |
permissive
|
MatijaGH/APPR-2017-18
|
6c4d4a701c80bcee6f489c7cf8e7c47e8d8225c9
|
b1e9e23cb03c6a815970ca9b5619e0d8b4737691
|
refs/heads/master
| 2022-02-18T05:12:04.648116
| 2019-08-21T09:19:03
| 2019-08-21T09:19:03
| 111,707,870
| 0
| 0
| null | 2017-11-22T16:29:34
| 2017-11-22T16:29:34
| null |
UTF-8
|
R
| false
| false
| 1,706
|
r
|
ui.R
|
library(shiny)
# shinyUI(fluidPage(
#
# titlePanel("Slovenske občine"),
#
# tabsetPanel(
# tabPanel("Velikost družine",
# DT::dataTableOutput("druzine")),
#
# tabPanel("Število naselij",
# sidebarPanel(
# uiOutput("pokrajine")
# ),
# mainPanel(plotOutput("naselja")))
# )
# ))
shinyUI(fluidPage(
titlePanel('Analiza vpliva cen nafte na izbrana gospodarstva'),
tabsetPanel(
tabPanel('Cena nafte',
mainPanel(plotOutput('graf.cen'))),
tabPanel('BDP',
sidebarPanel(
selectInput('Drzava', label = 'Izberi drzavo',
choices = unique(BDP$Drzava))),
mainPanel(plotOutput('graf.BDP'))
),
tabPanel('Primerjava BDPja po svetu',
sidebarPanel(
selectInput('Leto', label = 'Leto',
choices = unique(BDP$Leto))),
mainPanel(plotOutput('BDPsvet'))),
tabPanel('Vrednost valut',
sidebarPanel(
radioButtons('Valuta', label = 'Izberi valuto',
choices = unique(valute$Valuta))
),
mainPanel(plotOutput('graf.valuta'))),
tabPanel('Uvoz in izvoz',
sidebarPanel(
selectInput('Drzava1', label = 'Izberi drzavo',
choices = unique(uvoz.izvoz$Drzava)),
radioButtons('UvozIzvoz', label = 'Uvoz ali izvoz?',
choices = unique(uvoz.izvoz$tip)
)
),
mainPanel(tableOutput('tabela.uvoz.izvoz')))
)
)
)
|
f811307e216e20160eb3d0a27cad72c67902d572
|
ba54c637f784a2b6ddf16697c58d483af7054f07
|
/DMR.r
|
3c2edc7f08676eb15cf11adb1c2f234d48d24b59
|
[] |
no_license
|
mfaisalshahid/Data-Mining
|
fa18bea3ed148c99520a0a73abdf30d291cf1892
|
828cfa79f764889f573b18f2c88fe4874c1e2467
|
refs/heads/master
| 2022-12-08T03:57:48.704055
| 2020-08-30T00:15:43
| 2020-08-30T00:15:43
| 291,365,480
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,540
|
r
|
DMR.r
|
library(tidyverse)
library(PerformanceAnalytics)
library(caret)
housing_price <- read.csv("/Users/muhammadshahid/Desktop/ASS05_Data.csv")
df <- data.frame(housing_price)
# Part1
part1 <- function(df, p){
n <- nrow(df)
shuffled_df <- df[sample(n), ]
train_indices <- 1:round(p * n)
train <- shuffled_df[train_indices, ]
test_indices <- (round(p * n) + 1):n
test <- shuffled_df[test_indices, ]
lin_model_1 <- lm(SalePrice ~ ., data = train)
mse <- mean(lin_model_1$residuals^2)
rsq <- summary(lin_model_1)$r.squared
arsq <- summary(lin_model_1)$adj.r.squared
pred <- predict(lin_model_1, test)
ase <- mean((pred-test$SalePrice)**2)
return(c(mse, rsq, arsq, ase))
}
for (i in 1:5){
print(part1(df, 0.7))
}
# Part2
sets <- split(df, sample(1:1460, 5, replace=F))
folds_test <- c()
folds_train <- c()
for (i in 1:length(sets)){
folds_test[i] <- list(as.data.frame(sets[i]))
folds_train[i] <- list(as.data.frame(do.call(rbind, sets[-i])))
}
for (i in 1:5){
train <- as.data.frame(folds_train[i])
test <- as.data.frame(folds_test[i])
columnnames <- c("LotArea","TotalBsmtSF","GarageCars","SalePrice","AGE","TotalArea")
colnames(test) <- columnnames
colnames(train) <- columnnames
lin_model_1 <- lm(SalePrice ~ ., data = train)
mse <- mean(lin_model_1$residuals^2)
rsq <- summary(lin_model_1)$r.squared
arsq <- summary(lin_model_1)$adj.r.squared
pred <- predict(lin_model_1, test)
ase <- mean((pred-test$SalePrice)**2)
print(c(mse, rsq, arsq, ase))
print (c(average(mse, rsq, arsq, ase)))
}
|
98873415f87bd0c34ca16dc6ededb273a0e74b1a
|
ed009043cc51f25c4d2bcb1a365a9f5b9ad4c8b8
|
/tests/r/mran/verify
|
b15da154761c64328d0a03649476ef4a96a970c7
|
[
"BSD-3-Clause"
] |
permissive
|
data-workspaces/dws-repo2docker
|
2ad54b357e0d567be30111d7836ad2f405142905
|
4d8736d7e3d79b8cdfa1f644f590aa7fdede183b
|
refs/heads/master
| 2022-05-07T02:22:28.559789
| 2022-03-10T15:35:11
| 2022-03-10T15:35:11
| 209,244,059
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 203
|
verify
|
#!/usr/bin/env Rscript
library('testthat')
print(version)
# Fail if MRAN isn't the configured CRAN mirror
if (!(startsWith(options()$repos["CRAN"], "https://mran.microsoft.com"))) {
quit("yes", 1)
}
|
|
803f1705ccb194cfd29154bc5d9ce399d9c950d1
|
8b60c33ca0d37d67a0ee3ccb1aa464fec537ccfc
|
/code/DataPreparation.R
|
fa15909bc55943f48041852b1099cd5355697805
|
[] |
no_license
|
alicebalard/Article_IntensityEimeriaHMHZ
|
7df0a14685d5bb84b9ae13020ee24b930e04974c
|
82f6b424646645f4c7953c9f48c64e54020dd3c4
|
refs/heads/master
| 2022-10-02T20:27:12.079801
| 2022-09-14T11:06:29
| 2022-09-14T11:06:29
| 205,409,239
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,285
|
r
|
DataPreparation.R
|
# Installation
## Packages
list.of.packages <- c("parasiteLoad",
"bbmle",
"devtools",
"optimx", # for bbmle it needs to be required(?)
"ggplot2",
"VennDiagram",
"fitdistrplus", # evaluate distribution
"epiR", # Sterne's exact method
"simpleboot", # BS
"plyr", # revalue and other
"ggmap",
"gridExtra",# several plots in one panel
"wesanderson", # nice colors
"cowplot",# several plots in one panel
"ggpubr")
ipak <- function(pkg){
new.pkg <- pkg[!(pkg %in% installed.packages()[, "Package"])]
if (length(new.pkg))
install.packages(new.pkg, dependencies = TRUE)
sapply(pkg, require, character.only = TRUE)
}
ipak(list.of.packages)
## Reinstall the package in case I updated it
# devtools::install_github("alicebalard/parasiteLoad@v2.0")
devtools::install_github("alicebalard/parasiteLoad") # version with full Gtest Chisquare
library(parasiteLoad)
## Install_github case
if(!"legendMap" %in% installed.packages()[,"Package"]){
devtools::install_github("3wen/legendMap")
}
library(legendMap)
# Define function to be used to test, get the log lik and aic
tryDistrib <- function(x, distrib){
# deals with fitdistr error:
fit <- tryCatch(MASS::fitdistr(x, distrib), error=function(err) "fit failed")
return(list(fit = fit,
loglik = tryCatch(fit$loglik, error=function(err) "no loglik computed"),
AIC = tryCatch(fit$aic, error=function(err) "no aic computed")))
}
findGoodDist <- function(x, distribs, distribs2){
l =lapply(distribs, function(i) tryDistrib(x, i))
names(l) <- distribs
print(l)
listDistr <- lapply(distribs2, function(i){
if (i %in% "t"){
fitdistrplus::fitdist(x, i, start = list(df =2))
} else {
fitdistrplus::fitdist(x,i)
}}
)
par(mfrow=c(2,2))
denscomp(listDistr, legendtext=distribs2)
cdfcomp(listDistr, legendtext=distribs2)
qqcomp(listDistr, legendtext=distribs2)
ppcomp(listDistr, legendtext=distribs2)
par(mfrow=c(1,1))
}
## Define functions used for data analysis
area <- get_map(location = c(12, 51.5, 15, 53.5),
source = "stamen", maptype = "toner-lite")
plotMap <- function(df){
ggmap(area) +
geom_point(data = df, shape = 21, size = 2,
aes(Longitude, Latitude, fill = HI), alpha = .4) + # set up the points
scale_fill_gradient("Hybrid\nindex", high="red",low="blue") +
theme_bw() +
geom_rect(xmin = 12, xmax = 12.7, ymin = 51.5, ymax = 51.9, fill = "white") +
scale_bar(lon = 12.1, lat = 51.5, arrow_length = 10, arrow_distance = 20,
distance_lon = 20, distance_lat = 7, distance_legend = 10,
dist_unit = "km", orientation = TRUE, legend_size = 2,
arrow_north_size = 4) +
theme(legend.position = 'none', axis.ticks=element_blank())
}
myQuantitativeParasitology <- function(x){
intensity <- round(median(x[x>0]),3)
abundance <- round(median(x), 3)
max <- max(x)
Ni <- length(x)
NiPos <- length(x[x>0])
# Confidence intervals for prevalence calculated with Sterne's exact method
sternetest <- epiR::epi.prev(pos = length(x[x > 0]), tested = length(x),
se = 1, sp=1, conf.level = .95, method = "sterne")
cilow <- sternetest$ap["lower"]
cihigh <- sternetest$ap["upper"]
prevalence <- sternetest$ap["est"]
## Printout results
Result <- cat(paste0("Prevalence % [CI 95%] (N infected hosts/ N hosts)\n",
round(prevalence,1), " [", round(cilow,1), "-", round(cihigh,1), "]",
" (", NiPos, "/", Ni, ")\n",
"Abundance (Max parasite load)\n",
round(abundance,1), " (", max, ")\n",
"Intensity (Max parasite load)\n",
round(intensity,1), " (", max, ")"))
return(Result)
}
## Prepare datasets for each analysis
# Load datasets from parasiteLoad
WATWMdata <- read.csv("https://raw.githubusercontent.com/alicebalard/parasiteLoad/master/data/WATWMdata.csv", na.strings = c("", " ", NA))
BALdata <- read.csv("https://raw.githubusercontent.com/alicebalard/parasiteLoad/master/data/BALdata.csv", na.strings = c("", " ", NA))
# Keep individuals with hybrid index and sex
WATWMdata <- WATWMdata[!is.na(WATWMdata$HI) & !is.na(WATWMdata$Sex),]
# pinworms "where are the wormy mice"
pinwormsdata_watwm <- WATWMdata[!is.na(WATWMdata$Aspiculuris.Syphacia),]
pinwormsdata_watwm$`Aspiculuris.Syphacia+1` <-
pinwormsdata_watwm$Aspiculuris.Syphacia + 1
pinwormsdata_watwm$presence_oxyurids <- 1
pinwormsdata_watwm$presence_oxyurids[pinwormsdata_watwm$Aspiculuris.Syphacia == 0] <- 0
BALdata <- BALdata[!is.na(BALdata$HI) & !is.na(BALdata$Sex),]
BALdata$Status[BALdata$Status %in% "BA"] <- NA # error
BALdata$Status[is.na(BALdata$Status)] <- "adult" # NAs in the field data status were adults
BALdata$Status <- droplevels(BALdata$Status)
getinfotab <- function(df){
return(list(Nmice = nrow(df),
SexRatio = table(df$Sex),
tableYear = table(df$Year),
Nfarms = length(table(df$farm)),# define at 0.0001 degree
meanAnimalperfarm = mean(table(df$farm)),
medianAnimalperfarm = median(table(df$farm)),
sdAnimalperfarm = qnorm(0.975)*sd(table(df$farm))/
sqrt(sum(table(df$farm))),
latrange = range(df$Latitude),
lonrange = range(df$Longitude)))
}
nrow(WATWMdata)
table(WATWMdata$Sex)
## HERE PREPARE THE CLEAN TABLE THAT IS USED FOR EACH ANALYSIS OF THIS ARTICLE (sup table S1)
markersHI <- c("mtBamH", "YNPAR", "X332", "X347", "X65", "Tsx", "Btk", "Syap1", "Es1C", "Gpd1C", "Idh1C", "MpiC", "NpC", "Sod1C")
listWorms <- c("Aspiculuris_Syphacia", "Hymenolepis", "Taenia", "Trichuris", "Heterakis", "Mastophorus")
cleanData <- BALdata[c("Mouse_ID", "Sex", "Longitude", "Latitude", "Year", "farm", "Status",
markersHI, "HI_NLoci", "HI", listWorms,
"Body_weight", "Body_length", "Tail_length", "Capture",
"delta_ct_ilwe_MminusE", "delta_ct_cewe_MminusE", "eimeriaSpecies")]
cleanData$eimeriaSpecies <- as.character(cleanData$eimeriaSpecies)
cleanData$eimeriaSpecies[cleanData$eimeriaSpecies %in% c("Other", "Negative")] <- "no sp. identified"
# All 6 double infections were E.ferrisi in cecum and E.falciformis in ileum
cleanData$eimeriaSpecies[grep("Double", cleanData$eimeriaSpecies)] <- "E_ferrisi_cecum_E_vermiformis_ileum"
# Remove embryos N=7 mice used in no part of the study
embryos <- cleanData[grep("E", cleanData$Mouse_ID),"Mouse_ID"]
cleanData <- cleanData[!cleanData$Mouse_ID %in% embryos,]
# Verify the number of HI markers
cleanData$HI_NLoci <- as.numeric(gsub("HI ", "", cleanData$HI_NLoci))
table(cleanData$HI_NLoci == apply(cleanData, 1, function(x) sum(!is.na(x[markersHI]))))
cleanData[is.na(cleanData$HI_NLoci) |
cleanData$HI_NLoci != apply(cleanData, 1, function(x) sum(!is.na(x[markersHI]))),]
# Remove 3 mice with few markers used in no part of the study (SK_2891, SK_3153-5, Sk3173)
cleanData <- cleanData[!cleanData$Mouse_ID %in% c("SK_2891", "SK_3153-5", "Sk3173"),]
# Correct the 2 wrong HI_NLoci (AA_0164, AA_0171)
cleanData$HI_NLoci <- apply(cleanData, 1, function(x) sum(!is.na(x[markersHI])))
# Indicate which mouse used in which part of the study: see further sections
cleanData$UsedForMap <- "no"
cleanData$UsedForEimeriaRes <- "no"
cleanData$UsedForPinwormsRes <- "no"
cleanData$UsedForEimeriaImpactHealth <- "no"
cleanData$UsedForPinwormsImpactHealth <- "no"
##### Geneland map
diploidMarkers <- c("Es1C", "Gpd1C", "Idh1C", "MpiC", "NpC", "Sod1C")
# use for map all individuals with 6 diploid markers
cleanData$UsedForMap <- rowSums(is.na(cleanData[diploidMarkers]))
cleanData$UsedForMap[cleanData$UsedForMap %in% 0] <- "yes"
cleanData$UsedForMap[cleanData$UsedForMap != "yes"] <- "no"
##### Eimeria qpcr #####
qpcrdata <- cleanData[!is.na(cleanData$delta_ct_cewe_MminusE) | !is.na(cleanData$delta_ct_ilwe_MminusE),]
df <- qpcrdata[, c("delta_ct_cewe_MminusE", "delta_ct_ilwe_MminusE")]
qpcrdata$delta_ct_max_MminusE <- apply(df, 1, function(x){max(x, na.rm = T)})
rm(df)
# threshold of detection by qPCR = -5. Then we add -5 to all to have positive values
qpcrdata$delta_ct_max_MminusE[qpcrdata$delta_ct_max_MminusE <= -5] <- -5
# 0 will be non infected :
qpcrdata$`delta_ct_max_MminusE+5` <- qpcrdata$delta_ct_max_MminusE + 5
# 1 will be non infected :
qpcrdata$`delta_ct_max_MminusE+6` <- qpcrdata$delta_ct_max_MminusE + 6
# presence/absence
qpcrdata$presence_eimeria_tissues <- 1
qpcrdata$presence_eimeria_tissues[qpcrdata$delta_ct_max_MminusE == -5] <- 0
qpcrdata$presence_eimeria_tissues <- as.factor(qpcrdata$presence_eimeria_tissues)
table(qpcrdata$presence_eimeria_tissues)
qpcrdata$presence_eferrisi_identified <- 0
qpcrdata$presence_eferrisi_identified[grep("ferrisi", qpcrdata$eimeriaSpecies)] <- 1
table(qpcrdata$presence_eferrisi_identified)
getinfotab(qpcrdata)
# for model intensity
qpcr_intensity_data <- qpcrdata[qpcrdata$`delta_ct_max_MminusE+5` > 0,]
cleanData$UsedForEimeriaRes[cleanData$Mouse_ID %in% qpcrdata$Mouse_ID] <- "yes"
##### All mice that were investigated for pinworms were also investigated for other helminths #####
pinwormsdata_bal <- cleanData[!is.na(cleanData$Aspiculuris_Syphacia),]
idToCorrect <- pinwormsdata_bal[rowSums(is.na(pinwormsdata_bal[,listWorms])) > 0, "Mouse_ID"]
cleanData[cleanData$Mouse_ID %in% idToCorrect, listWorms][
is.na(cleanData[cleanData$Mouse_ID %in% idToCorrect, listWorms])] <- 0
pinwormsdata_bal <- cleanData[!is.na(cleanData$Aspiculuris_Syphacia),]
pinwormsdata_bal$`Aspiculuris.Syphacia+1` <-
pinwormsdata_bal$Aspiculuris_Syphacia + 1
pinwormsdata_bal$presence_oxyurids <- 1
pinwormsdata_bal$presence_oxyurids[pinwormsdata_bal$Aspiculuris_Syphacia == 0] <- 0
pinwormsdata_bal$presence_oxyurids <- as.factor(pinwormsdata_bal$presence_oxyurids)
getinfotab(pinwormsdata_bal)
cleanData$UsedForPinwormsRes[cleanData$Mouse_ID %in% pinwormsdata_bal$Mouse_ID] <- "yes"
##### Body condition index in Eimeria qpcr #####
getBodyCondition <- function(df){
df <- df[!is.na(df$Body_length) & !is.na(df$Body_weight) & !is.na(df$Sex),]
# Remove pregnant/post partum and juveniles
df <- df[!df$Status %in% c("young", "pregnant"),]
df <- df[df$Body_length > 50,]
# Regression of BM/BS. Advantage: independant of size!!
# Step 1: fit the model
fitRes <- lm(Body_weight ~ Body_length * Sex, data = df)
# Step 2: obtain predicted and residual values
df$predicted <- predict(fitRes) # Save the predicted values
df$residuals <- residuals(fitRes) # Save the residual values -> to be used as indices!
# # plot of residuals by sex
# Plot the actual and predicted values (supplementary figure)
myplot <- ggplot2::ggplot(df, ggplot2::aes(x = Body_length, y = Body_weight)) +
ggplot2::geom_smooth(method = "lm", se = FALSE, color = "lightgrey") + # Plot regression slope
ggplot2::geom_segment(ggplot2::aes(xend = Body_length, yend = predicted)) +
ggplot2::geom_point(size = 4, pch = 21, alpha = .8,
aes(fill = HI)) +
ggplot2::scale_fill_gradient(low = "blue", high = "red")+
ggplot2::geom_point(ggplot2::aes(y = predicted), shape = 1) +
ggplot2::facet_grid(~ Sex, scales = "free_x") + # Split panels here by `iv`
ggplot2::theme_bw() # Add theme for cleaner look
return(list(df, myplot))
}
body_data_eimeria <- getBodyCondition(qpcrdata)[[1]]
figResEimeria <- getBodyCondition(qpcrdata)[[2]]
cleanData$UsedForEimeriaImpactHealth[cleanData$Mouse_ID %in% body_data_eimeria$Mouse_ID] <- "yes"
##### Body condition index in pinworms #####
body_data_pinworms <- getBodyCondition(pinwormsdata_bal)[[1]]
figResWorm <- getBodyCondition(pinwormsdata_bal)[[2]]
cleanData$UsedForPinwormsImpactHealth[cleanData$Mouse_ID %in% body_data_pinworms$Mouse_ID] <- "yes"
# clean farms
cleanData$farm <- as.numeric(factor(cleanData$farm))
write.csv(cleanData, "../data/cleanedData.csv", row.names = F)
|
1c9895f06e47e22f749460a4f8c02706fab6a58b
|
68b7f425408cb4188dccf7c91558370c6a91ae04
|
/ORIGAMI/man/make_map.Rd
|
671b6b518062197626a6097022f7864572f74384
|
[] |
no_license
|
qlu-lab/ORIGAMI
|
19af8636263a433088fbc1d228541e0ba1ee5d52
|
8c317adeb59f27c47bae9c1c3e17cc10d22bebf2
|
refs/heads/master
| 2021-07-01T05:09:03.045889
| 2021-02-05T20:41:20
| 2021-02-05T20:41:20
| 220,383,273
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 482
|
rd
|
make_map.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/make_map.R
\name{make_map}
\alias{make_map}
\title{make_map}
\usage{
make_map(chr, ref_file, rs_file, output_path)
}
\arguments{
\item{chr}{chromosome number}
\item{ref_file}{Ref file of your vcf data}
\item{rs_file}{Reference file contains SNP and BP information}
\item{output_path}{output file path}
}
\description{
This function is to generate map files to map BP and rsid together
}
\keyword{map}
|
a64a2a351790e0c3865fc4a4a4ae0de5a42a337c
|
6fb04083c9d4ee38349fc04f499a4bf83f6b32c9
|
/man/createView.Rd
|
ac737115db7d841fd02d290962b2d39b5cd5440d
|
[] |
no_license
|
phani-srikar/AdapteR
|
39c6995853198f01d17a85ac60f319de47637f89
|
81c481df487f3cbb3d5d8b3787441ba1f8a96580
|
refs/heads/master
| 2020-08-09T10:33:28.096123
| 2017-09-07T09:39:25
| 2017-09-07T09:39:25
| 214,069,176
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 571
|
rd
|
createView.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/constructAbstractSQL.R
\name{createView}
\alias{createView}
\title{Create View}
\usage{
createView(pViewName, pSelect, pDatabase = getOption("ResultDatabaseFL"), ...)
}
\arguments{
\item{pViewName}{Name of view}
\item{pSelect}{SELECT clause for view creation}
\item{pDatabase}{Name of the database}
}
\value{
Name of view if operation is successful
}
\description{
Create an in-database view from a SELECT clause
}
\examples{
vres <- createView("myview120","SELECT * FROM tblmatrixmulti")
}
|
1d682bfdaf8b2fcc22549be9d7aac99dc5dfd089
|
857a7f4229065a77df1c5b856530b7db880255f0
|
/lib/confusionMatrix.R
|
66c8d386a7f65ea35a120622d39ea9ca17e47222
|
[] |
no_license
|
XJieWei/Fall2018-Project4-sec1-grp7
|
e43758bc3e0e2dabb354674fbbaa9bf28a028d01
|
40d15604f8ea5d311336fa057ec592c4e02dc781
|
refs/heads/master
| 2020-04-10T18:26:52.042257
| 2018-11-29T02:52:38
| 2018-11-29T02:52:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,514
|
r
|
confusionMatrix.R
|
# library(stringr)
# library(tm)
# library(dplyr)
# library(tidytext)
# library(broom)
commonMistakes <- function(truthTrain, ocrTrain){
groundTruth <- array(NA, c(1, length(truthTrain)))
for(x in 1:length(truthTrain)) groundTruth[,x] <- readChar(truthTrain[x], file.info(truthTrain[x])$size)
groundTruth <- strsplit(groundTruth,"\n")
tesseract <- array(NA, c(1, length(ocrTrain)))
for(x in 1:length(ocrTrain)) tesseract[,x] <- readChar(ocrTrain[x], file.info(ocrTrain[x])$size)
tesseract <- strsplit(tesseract,"\n")
# plotting documents and see where lines differ in length
# pdf("plots.pdf", width=100, height=100)
# par(mfrow = c(10,10))
# for(i in 1: length(files)) barplot(nchar(tesseract[[i]]) / nchar(groundTruth[[i]]))
# dev.off()
possibilities <- c(0:9, letters)
confMat <- matrix(0, 36, 36, dimnames = list(possibilities, possibilities))
for(i in 1:length(truthTrain)){
truthBag <- strsplit(groundTruth[[i]]," ")
tessBag <- strsplit(tesseract[[i]]," ")
for(j in 1 : min(length(truthBag), length(tessBag))){
if(length(truthBag[[j]]) == length(tessBag[[j]])){
truthWords <- truthBag[[j]]
tessWords <- tessBag[[j]]
for(k in 1: length(truthWords)){
if((nchar(truthWords[k]) == nchar(tessWords[k]))){
truthLetters <- unlist(strsplit(truthWords[k], ""))
tessLetters <- unlist(strsplit(tessWords[k], ""))
for(l in 1:length(truthLetters)){
if(truthLetters[l] != tessLetters[l]){
confMat[match(tolower(truthLetters[l]), possibilities),
match(tolower(tessLetters[l]), possibilities)] =
confMat[match(tolower(truthLetters[l]), possibilities),
match(tolower(tessLetters[l]), possibilities)] + 1
}
}
}
}
}
}
}
save(confMat, file = "../output/confusionMatrix.RData")
return(confMat)
}
#
# # tesseract <- ""
# # for(x in files) tesseract <- paste(tesseract, readChar(x, file.info(x)$size))
# # tesseract <- strsplit(tesseract,"\n")[[1]]
#
# barplot(nchar(groundTruth) - nchar(tesseract))
# length(groundTruth) - length(tesseract) # 14 missing lines in tesseract
#
# # cbind(groundTruth[580:600], tesseract[580:600])
# # length(groundTruth)
# # length(tesseract)
# # groundTruth[(29758 - 2): (29758 + 2)]
# # tesseract[(29758 - 2): (29758 + 2)]
#
#
# truth <- groundTruth
# ocr <- tesseract
#
# # recursive method
#
# offsetting <- function(truth, ocr){
# abnormal <- nchar(truth) > 3 * nchar(ocr)
# for(i in 2:length(abnormal)){
# if(abnormal[i] & ocr[i] != ""){
# print(i)
# ocr <- c(ocr[1:(i - 1)], "", ocr[(i):length(ocr)])
# offsetting(truth, ocr)
# }
# }
# return(ocr)
# }
# tessNew <- offsetting(groundTruth, tesseract)
# comp <- nchar(groundTruth) - nchar(tessNew)
# barplot(comp)
#
#
# # normal method
#
# offsetSpots <- c()
# abnormal <- nchar(truth) > 3 * nchar(ocr) # arbitrary
#
# for(i in 2:length(abnormal)){
# if(abnormal[i]){
# ocr <- c(ocr[1:(i - 1)], "", ocr[(i):length(ocr)])
# abnormal <- nchar(truth) > 3 * nchar(ocr)
# offsetSpots <- c(offsetSpots, i)
# }
# }
#
# comp <- nchar(truth[-offsetSpots]) - nchar(ocr[-offsetSpots])
# comp <- nchar(truth) - nchar(ocr)
# barplot(comp)
#
# length(offsetSpots) # 14 lines were insserted to tesseract
#
# cbind(tail(truth, 50), tail(ocr, 50))
#
#
# cbind(tail(groundTruth), tail(tesseract))
|
31bd4781a4c04d03426edb35c9ce5f1aa487072d
|
4346f0677cfd7f8994a34eb105fbb8459e0da810
|
/man/checkBreakPoints.Rd
|
28f7e543b93f1fdfa16068d47060cc0e1aa86fa1
|
[] |
no_license
|
cran/handwriter
|
cd5ae5358c81b96d34e11fd2bc7964a1489c9885
|
ae51bdcde5692e5d51a3035049517e5a11e5b508
|
refs/heads/master
| 2023-07-12T18:13:37.321916
| 2021-08-16T15:20:02
| 2021-08-16T15:20:02
| 393,098,094
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 733
|
rd
|
checkBreakPoints.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/JunctionDetection.R
\name{checkBreakPoints}
\alias{checkBreakPoints}
\title{checkBreakPoints}
\usage{
checkBreakPoints(candidateNodes, allPaths, nodeGraph, terminalNodes, dims)
}
\arguments{
\item{candidateNodes}{possible breakpoints}
\item{allPaths}{list of paths}
\item{nodeGraph}{graph of nodes; call the getNodeGraph function}
\item{terminalNodes}{nodes at the endpoints of the graph}
\item{dims}{graph dimensions}
}
\value{
a graph without breakpoints and separated letters
}
\description{
Internal function called by processHandwriting that eliminates breakpoints based on rules to try to coherently separate letters.
}
|
781e44a6a91014443466ffb429f9214d829e8d06
|
d62d9ea2f6aa749fa48455bddbd3208279ce6449
|
/man/plot_diet.Rd
|
bf408f307a668a6ef982d30593028633b5f39901
|
[] |
no_license
|
jporobicg/atlantistools
|
3bffee764cca1c3d8c7a298fd3a0b8b486b7957e
|
75ea349fe21435e9d15e8d12ac8060f7ceef31a2
|
refs/heads/master
| 2021-01-12T03:06:55.821723
| 2017-05-26T04:03:33
| 2017-05-26T04:03:33
| 78,160,576
| 1
| 0
| null | 2017-05-25T23:35:23
| 2017-01-06T00:51:21
|
R
|
UTF-8
|
R
| false
| true
| 2,184
|
rd
|
plot_diet.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot-diet.R
\name{plot_diet}
\alias{plot_diet}
\title{Plot contribution of diet contents for each functional group.}
\usage{
plot_diet(bio_consumed, species = NULL, wrap_col = "agecl",
combine_thresh = 7)
}
\arguments{
\item{bio_consumed}{Consumed biomass of prey groups by predatorgroup and agecl in tonnes
for each timestep and polygon. Dataframe with columns 'pred', 'agecl', 'polygon', 'time', 'prey'.
Consumed biomass in [t] is stored in column 'atoutput'. Should be generated with
\code{link{calculate_consumed_biomass}}.}
\item{species}{Character string giving the acronyms of the species you aim to plot. Default is
\code{NULL} resulting in all available species being ploted.}
\item{wrap_col}{Character specifying the column of the dataframe to be used as multipanel plot.
Default is \code{"agecl"}.}
\item{combine_thresh}{Number of different categories to plot. Lets say predator X has eaten
20 different prey items. If you only want to show the 3 most important prey items set
\code{combine_thresh} to 3. As rule of thumb values < 10 are useful otherwise to many
colors are used in the plots. Default is \code{7}.}
}
\value{
List of grobs composed of ggplot2 objects.
}
\description{
Visualize diet proportions form predator and prey perspective. The upper panel
plot shows the predator perspective while the lower panel plot shows the prey perspective
for a given group. Please note that this function only works with models
based on the trunk code. Bec_dev models should use \code{\link{plot_diet_bec_dev}} to get an indication
of the feeding interactions.
}
\examples{
\dontrun{
plots <- plot_diet(ref_bio_cons, wrap_col = "agecl")
gridExtra::grid.arrange(plots[[1]])
gridExtra::grid.arrange(plots[[7]])
# Use names() to get the species names!
names(plots)
}
plot <- plot_diet(ref_bio_cons, species = "Small planktivorous fish", wrap_col = "agecl")
gridExtra::grid.arrange(plot[[1]])
}
\seealso{
Other plot functions: \code{\link{plot_bar}},
\code{\link{plot_boxes}},
\code{\link{plot_diet_bec_dev}}, \code{\link{plot_line}},
\code{\link{plot_rec}}, \code{\link{plot_species}}
}
|
8683f96091680eccc87e2588b34dae549a3243e6
|
6a28ba69be875841ddc9e71ca6af5956110efcb2
|
/Schaum'S_Outline_Series_-_Theory_And_Problems_Of_Statistics_by_Murray_R._Spiegel/CH2/EX2.2.15/Ex2_2_15.R
|
3d84d065092b03c2014147c990fe1bbf73bf4e4e
|
[] |
permissive
|
FOSSEE/R_TBC_Uploads
|
1ea929010b46babb1842b3efe0ed34be0deea3c0
|
8ab94daf80307aee399c246682cb79ccf6e9c282
|
refs/heads/master
| 2023-04-15T04:36:13.331525
| 2023-03-15T18:39:42
| 2023-03-15T18:39:42
| 212,745,783
| 0
| 3
|
MIT
| 2019-10-04T06:57:33
| 2019-10-04T05:57:19
| null |
UTF-8
|
R
| false
| false
| 622
|
r
|
Ex2_2_15.R
|
#PAGE=50
a=c(250,260,270,280,290,300,310)
b=c(259.99,269.99,279.99,289.99,299.99,309.99,319.99)
c=c(a,320)
c
n=c(8,10,16,14,10,5,2)
n=c(n,0)
n1=sum(n)
n4=rep(c,n)
n4
d0=n[7]
d1=n[7]+n[6]
d2=d1+n[5]
d3=n[4]+d2
d4=n[3]+d3
d5=n[2]+d4
d6=n[1]+d5
d=c(0,d0,d1,d2,d3,d4,d5,d6)
d=rev(d)
d
c
e=c('or more','or more','or more','or more','or more','or more','or more','or more')
y <- matrix(c(c,e,d),ncol=3,byrow=FALSE)
colnames(y) <- c("Wages"," ","or more cf")
rownames(y) <- c(" "," "," "," "," "," "," "," ")
y <- as.table(y)
y
n4=rep(c,d)
n4
plot(table(n4),type='c',xlab = 'WAGES',ylab='CF')
|
6650dd7f06d72c15dd6638df2d157a72882d7797
|
8b394510187514efb88c8e0c4608a151e221dbc7
|
/ui.R
|
4ecbf765088c3b56142881193b850eecdde4905d
|
[] |
no_license
|
itoDreamer/apmTool2
|
98d76772baa5cc0b879940e43940d30af4c84164
|
343ce11a7ca5d38a1239aa400e9721654b2d992e
|
refs/heads/master
| 2022-11-19T05:39:07.586401
| 2020-07-20T19:36:52
| 2020-07-20T19:36:52
| 279,189,620
| 0
| 0
| null | 2020-07-13T02:28:07
| 2020-07-13T02:28:07
| null |
UTF-8
|
R
| false
| false
| 23,396
|
r
|
ui.R
|
library(shinydashboard)
library(DT)
library(shiny)
library(shinyWidgets)
library(rhandsontable)
library(quantmod)
shinyUI(dashboardPage(skin = "black" ,
dashboardHeader(title = "Portfolio Allocation Demo"),
dashboardSidebar(
sidebarUserPanel("", img(src="carey.png",width="80%")),
br(),
sidebarMenu(
menuItem("About", tabName = "about", icon = icon("book")),
menuItem("Theory", tabName = "theory", icon = icon("graduation-cap"),
menuSubItem("Risk/Return Ratio", tabName = "theory_1"),
menuSubItem("Optimal Portfolio", tabName = "theory_2"),
menuSubItem("Performance Measures", tabName = "theory_3")
),
menuItem("Backtest", tabName = "backtest", icon = icon("line-chart"),
menuSubItem("Your Allocation", tabName = "user_port"),
menuSubItem("Allocation Comparison", tabName = "opt_port"),
menuSubItem("ALM Comparison Simulation", tabName = "sim_port")
),
menuItem("Disclaimers", tabName = "discl", icon = icon("exclamation-triangle")))
),
dashboardBody(
tabItems(
####ABOUT PAGE
# tabItem(tabName = "about", fluidRow(column(6, htmlOutput("abt")))),
####ABOUT PAGE
tabItem(tabName = "about",
fluidRow(column(3,h2("About the Application"))),
fluidRow(column(6,
div(br(),br(),
p("This Shiny App was developed for the Advanced Portfolio Management course from Carey Business School of Johns Hopkins University."),
p("The application illustrates the key principles of portfolio optimization."),
p("In Theory section we talk about diversification and portfolio composition. Also, we introduce key performance measures that are later used in our backtesting."),
p("Backtesting section allows user to choose a portfolio comprised of up to 10 assets from quantmod package as well as to choose a desired rebalancing schedule. The resulting portfolio is compared to S&P500 performance and the performance of the portfolio consisting of 60% S&P500, 10% Treasury Bonds, and 30% Corporate Bonds as a proxy of typical 60/40 portfolio."),
p("The user can select a date range for which the backtesting is performed (don't forget to press Backtest button). On Allocation Comparison tab the user portfolio is compared to two optimal portfolios for the same date range: a portfolio with the same return and lower risk, and a portfolio with the same risk and higher return."),
p("Please be informed that information in this application is provided for illustrative purposes only and does not constitute a financial advice. For more information please see the Disclaimer."),
# br(),br(),br(),br(),br(),br(),br(),br(),br(),br(),br(),br(),br(),br(),br(),br(),
p("The author of the first version of this application is Dr. Mikhail Stukalo, who has over 15 years of experience in financial markets."),
p("The author of current version is Qiang Sheng, who has solid background for quantitative finance, machine learning and algorithmic trading.")
)))
),
##### Legal Disclaimer Page
tabItem(tabName = "discl", div(htmlOutput("disclaimer"))),
####Risk/Return Page
tabItem(tabName = "theory_1",
fluidPage(h1("Risk/Return Ratio"),
p("In 1952 Harry Markowitz suggested that assets should be evaluated based on their risk/return ratio.
For the purposes of this app, I look at the asset returns measured by corresponding indices in 1Q2000
- 2Q2020. "),
p("The assets are:"),
p(em("Equities:")),
tags$div(tags$ul(
tags$li("S&P 500"),
tags$li("MSCI Europian Stock Index"),
tags$li("MSCI Emerging Market Stock Index"))
),
p(em("Bonds:")),
tags$div(tags$ul(
tags$li("Barclays US Treasury Total Return Index"),
tags$li("Barclays US Corporate Bonds Total Return Index")
)
),
p(em("Real Estate:")),
tags$div(tags$ul(
tags$li("Dow Jones Real Estate Index"))
),
tabsetPanel(
tabPanel("Whole Period", br(), plotlyOutput("graph1")),
tabPanel("By Years", plotlyOutput("graph2")),
tabPanel("Compound Return", plotlyOutput("graph3"))
)
)
),
#####Optimal potrfolio page
tabItem(tabName = "theory_2",
fluidPage(fluidRow(
column(6,h1("Optimal portfolio"),
p("Asset returns are not perferctly correlated. Therefore, we can combine assets into portfolios, and harverst
the results of the diversification."),
p("However, diversification is not limitless. For each expected risk there will be a portfolio with
a maximum achievable return.The graph below shows risk/return profiles of simulated portfolios (gray) and
a line (blue) depicting portfolios offering highest return for a given risk."),
p("In Harry Markowitz (1952) framework, such line is called the Efficient Frontier. However, Markowitz' theory
assumes that investors hold long-short portfolios. In our analysis, we limit ourselves to long-only portfolios,
as it is the type of portfolios retail investors usually hold. Therefore, we will refer to portfolios on this line as
'Optimal Portfolios', and the line itself as the 'Optimal Line'."),
br(),
plotlyOutput("graph4")
)))
),
tabItem(tabName = "theory_3",
fluidRow(column(8,div(htmlOutput("measures"))))
),
##### HERE IS WHERE FUN BEGINS
#####
#### Your allocation Page
tabItem(tabName = "user_port",
fluidRow(div(column(6, h4("Select Portfolio Allocation:", align = "center")),
column(2, h4("Modify Expected Return:", align = "center")),
column(1, h4("Select Rebalance Schedule:", align = "left")),
column(3, h4("Allocation", align = "center")))
),
fluidRow(div(column(1),
column(1, downloadButton("downloadData", "Download")),
column(1, fileInput("p1upload", NULL,
buttonLabel = "Upload",
multiple = TRUE,
accept = ".csv")),
column(1, switchInput(inputId = "auto", label = "AUTO", value = TRUE,
onLabel = "ON", offLabel = "OFF", size = "mini",
width = "100%")),
column(2, align="right", h5(textOutput("currentsum")))
)),
fluidRow(
column(1, align="left",
# textAreaInput("pp1", label = NULL, "SPY", height = "40px", resize = "none"),
# textAreaInput("pp2", label = NULL, "PRESX", height = "40px", resize = "none"),
# textAreaInput("pp3", label = NULL, "", height = "40px", resize = "none"),
# textAreaInput("pp4", label = NULL, "", height = "40px", resize = "none"),
# textAreaInput("pp5", label = NULL, "", height = "40px", resize = "none"),
# textAreaInput("pp6", label = NULL, "", height = "40px", resize = "none")
textAreaInput("pp1", label = NULL, "SPY", height = "40px", resize = "none"),
textAreaInput("pp2", label = NULL, "PRESX", height = "40px", resize = "none"),
textAreaInput("pp3", label = NULL, "EEM", height = "40px", resize = "none"),
textAreaInput("pp4", label = NULL, "DGS10", height = "40px", resize = "none"),
textAreaInput("pp5", label = NULL, "LQD", height = "40px", resize = "none"),
textAreaInput("pp6", label = NULL, "IYR", height = "40px", resize = "none")
),
column(2, align="left",
uiOutput("p1ui"),
uiOutput("p2ui"),
uiOutput("p3ui"),
uiOutput("p4ui"),
uiOutput("p5ui"),
uiOutput("p6ui")
),
column(1, align="left",
# textAreaInput("pp7", label = NULL, "", height = "40px", resize = "none"),
# textAreaInput("pp8", label = NULL, "", height = "40px", resize = "none"),
# textAreaInput("pp9", label = NULL, "", height = "40px", resize = "none"),
# textAreaInput("pp10", label = NULL, "", height = "40px", resize = "none"),
# textAreaInput("pp12", label = NULL, "", height = "40px", resize = "none"),
# textAreaInput("pp11", label = NULL, "", height = "40px", resize = "none")
textAreaInput("pp7", label = NULL, "PSP", height = "40px", resize = "none"),
textAreaInput("pp8", label = NULL, "DFGBX", height = "40px", resize = "none"),
textAreaInput("pp9", label = NULL, "", height = "40px", resize = "none"),
textAreaInput("pp10", label = NULL, "", height = "40px", resize = "none"),
textAreaInput("pp11", label = NULL, "", height = "40px", resize = "none"),
textAreaInput("pp12", label = NULL, "", height = "40px", resize = "none")
),
column(2, align="left",
uiOutput("p7ui"),
uiOutput("p8ui"),
uiOutput("p9ui"),
uiOutput("p10ui"),
uiOutput("p11ui"),
uiOutput("p12ui")
),
column(2, rHandsontableOutput("table5")),
column(1, align="left",
fluidRow(
radioButtons(inputId="rebalance",
label=NULL,
choices=c("Monthly","Quarterly", "Annually", "Never"),
selected = "Never")),
fluidRow(
br(),
actionBttn("update", label = "FetchData", color = "primary"),
hr(),
actionBttn("go", label = "Backtest", color = "primary")
)),
column(3,
div(plotlyOutput("graph5"), align = "center", style = "height:250px"))),
fluidRow(column(12,
# verbatimTextOutput("tttest"),
div(sliderTextInput(
inputId = "date_range", label = h4("Time interval:"), width = "80%",
choices = date_choices, selected = range(date_choices),
grid = TRUE, dragRange = FALSE
), align = "center"))
),
fluidRow(column(6, h4("Compound Return", align="center")),
column(6, h4("Performance Measures", align="center"))),
fluidRow(column(6, div(plotlyOutput("graph6"), align="center")),
column(6, div(tableOutput("bt_table1"), align="center"))
)
),
####Allocation Comparison Page
tabItem(tabName = "opt_port",
fluidRow(column(4, h4("Your Allocation", align="center")),
column(4, h4("Similar Return", align="center")),
column(4, h4("Similar Risk", align="center"))
),
fluidRow(column(4,
br(),br(),
div(plotlyOutput("graph7"), align="center")),
column(4,
br(),br(),
div(plotlyOutput("graph8"), align="center")),
column(4,
br(),br(),
div(plotlyOutput("graph9"), align="center"))
),
fluidRow(column(6, h4("Compound Return", align = "center")),
column(6, h4("Performance Measures", align="center"))
),
fluidRow(column(6, div(plotlyOutput("graph10"), allign = "center")),
column(6, div(br(),tableOutput("bt_table2"), align="center"))
)
),
###ALM Comparison Page
tabItem(tabName = "sim_port",
fluidRow(column(9, h4("Simulations", align="center")),
column(3, h4("Liability Cashflow", align="center"))),
fluidRow(
column(9, wellPanel(
fluidRow(column(3,
strong("Historical annually log return (%)"),
verbatimTextOutput("simu11")),
column(3,
strong("Expected annually log return (%)"),
verbatimTextOutput("simu12")
# numericInput("simu12", label = "Expected annually log return (%)",
# "")
),
column(1),
column(1,downloadButton("p3download", "Download"), allign = "center"),
column(1),
column(1,fileInput("p3upload", NULL,
buttonLabel = "Upload",
multiple = TRUE,
accept = ".csv"), allign = "right")
),
div(plotOutput("graph11_2"), allign = "center"),
br(),
div(plotOutput("graph11"), allign = "center"),
# fluidRow(column(3,
# strong("Historical annually log return (%)"),
# verbatimTextOutput("simu21")),
# column(3, numericInput("simu22", label = "Expected annually log return (%)",
# ""))
# ),
div(plotOutput("graph12"), allign = "center"),
# fluidRow(column(3,
# strong("Historical annually log return (%)"),
# verbatimTextOutput("simu31")),
# column(3, numericInput("simu32", label = "Expected annually log return (%)",
# ""))
# ),
div(plotOutput("graph13"), allign = "center"))
)
,
column(3,
wellPanel(
div(
# verbatimTextOutput("debug"),
selectInput("simuWay", "Choose a scenario:",
c("default", "Recently Retired", "Pre Retired", "Couple and Young Kids", "Cook County","Custom"),
selected = "default"),
actionBttn("go2", label = "Run Sim", color = "primary"),
# actionBttn("getAlm", label = "Retrieve Custom ALM", color = "primary"),
# actionBttn("saveAlm", label = "Save Custom ALM", color = "primary"),
align = "left"),
br(),
div(
fluidRow(column(4, rHandsontableOutput("table3"), align="left"),
column(1, rHandsontableOutput("table4"), align="center"))
)
)
)
)
)
)
)
)
)
|
cb00563cde3ef9a9052c84f9bf89d8f24af3d4b2
|
0a906cf8b1b7da2aea87de958e3662870df49727
|
/biwavelet/inst/testfiles/rcpp_row_quantile/libFuzzer_rcpp_row_quantile/rcpp_row_quantile_valgrind_files/1610555316-test.R
|
c3477f4f62ba4b3efdaf101d5e609c503dc33a69
|
[] |
no_license
|
akhikolla/updated-only-Issues
|
a85c887f0e1aae8a8dc358717d55b21678d04660
|
7d74489dfc7ddfec3955ae7891f15e920cad2e0c
|
refs/heads/master
| 2023-04-13T08:22:15.699449
| 2021-04-21T16:25:35
| 2021-04-21T16:25:35
| 360,232,775
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 319
|
r
|
1610555316-test.R
|
testlist <- list(data = structure(c(-8.65145885556673e+303, 2.27541883785622e-317, 1.32548933609124e-309, 1.67141905462553e-112, 3.52953630161737e+30, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = 5:6), q = 0)
result <- do.call(biwavelet:::rcpp_row_quantile,testlist)
str(result)
|
c6ae6917bed48876f97b015d193708657b78bc25
|
7fd95b701e50edbe16f9bb349ad240acf2a1df73
|
/scripts/results.R
|
bdb34b9504d3d562b0cade48a7a211fbedc29f17
|
[] |
no_license
|
ryali93/SedimentYield
|
75817d6868ea0e13d9bd0cbdf95f931874c6f44d
|
e93216a3df2cc1689e992e80138d1b0b14cf9417
|
refs/heads/master
| 2020-04-07T13:59:32.709522
| 2019-06-09T11:31:30
| 2019-06-09T11:31:30
| 158,430,004
| 4
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,387
|
r
|
results.R
|
# rm(list=ls())
library(dplyr)
library(reshape2)
library(ggplot2)
df = read.csv("E:/TESIS/process/results.csv", sep = ",", header = T)
df2 = df %>% filter(ts != "1981-01-01" & ts != "1982-01-01" & ts != "1983-01-01" & ts != "1985-01-01" & ts != "2016-01-01")
df3 = df2 %>% select(-r_c3_ls1_sdr1, -r_c3_ls1_sdr2, -r_c3_ls1_sdr3, -r_c3_ls1_sdr4, -r_c3_ls2_sdr1, -r_c3_ls2_sdr2, -r_c3_ls2_sdr3, -r_c3_ls2_sdr4,
-r_c3_ls3_sdr1, -r_c3_ls3_sdr2, -r_c3_ls3_sdr3, -r_c3_ls3_sdr4, -X, -ts)
# meltdf <- melt(df3, id="ts")
# ggplot(meltdf, aes(x=ts, y=value,colour=variable,group=variable)) + geom_line()
df4 = df3/(1.65*1000000*7.5)
anos = data.frame(seq(as.Date("1984-01-01"), as.Date("2015-01-01"), by="year"))
names(anos) = "anos"
anos = filter(anos, anos != "1985-01-01")
df5 = cbind(anos, df4)
PEJEZA = c(NA, NA, NA, 2, 3, 4, 5, 11, 16, 17, 21, 22, 60, 62, 63, 64, NA, NA, NA, NA, 80, 82, NA, NA, NA, NA, NA, NA, 104.53,
NA, NA)
df6 = cbind(df5, PEJEZA)
results = df6 %>% select(r_c5_ls3_sdr4,
r_c2_ls3_sdr4,
r_c4_ls1_sdr1,
r_c6_ls1_sdr4,
r_c6_ls3_sdr2,
PEJEZA)
results = cbind(anos, results)
results
plot(results)
#--------------------------------------------------------
df = read.csv("E:/TESIS/results/results2_missing.csv", sep = ";", header = T)
anos = data.frame(seq(as.Date("1988-01-01"), as.Date("2015-01-01"), by="year"))
names(anos) = "anos"
df2 = cbind(anos, df)
meltdf <- melt(df2, id="anos")
ggplot(meltdf, aes(x=anos, y=value, colour=variable, group=variable))
+ geom_line()
+ geom_boxplot(alpha = 0)
ggplot(meltdf,aes(x=value, y=variable, group=variable))+
geom_bar(stat="identity", position="dodge")
ggplot(meltdf, aes(x = anos, y=value, group=variable))+
geom_boxplot()
install.packages("PerformanceAnalytics")
library("PerformanceAnalytics")
chart.Correlation(df, histogram=TRUE, pch=19)
library(corrplot)
corrplot(df)
df
col<- colorRampPalette(c("blue", "white", "red"))(20)
heatmap(x = cor(df), col = col, symm = TRUE)
corr = cor(df, method = c("pearson", "kendall", "spearman"))
corrplot(corr, type = "upper", order = "hclust",
tl.col = "black", tl.srt = 45)
p <- ggplot(data = df2, aes(x = anos, y = mortes, group=interaction(date, trmt)))
p + geom_boxplot(aes(fill = factor(dtm$trmt)))
|
d0397fd95d2fd2e0de3017cdf946643982a91368
|
60a99dc425d9edca7b3dec562f5cf6367d9c61ec
|
/prettyGraphs/man/contributionBars.Rd
|
81b09b641f5fbb4d31bbde595defa80f1385894f
|
[] |
no_license
|
LukeMoraglia/ExPosition1
|
e7718ae848608f1dc3934513c6588f53f2c45a7f
|
a69da6c5b0f14ef9fd031b98c3b40b34dad5240f
|
refs/heads/master
| 2022-12-31T17:45:10.909002
| 2020-10-22T19:45:49
| 2020-10-22T19:45:49
| 255,486,130
| 0
| 1
| null | 2020-10-22T18:08:38
| 2020-04-14T02:01:12
|
R
|
UTF-8
|
R
| false
| false
| 1,779
|
rd
|
contributionBars.Rd
|
\name{contributionBars}
\alias{contributionBars}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
contributionBars
}
\description{
Produces bar charts for multivariate analyses. Plots the contribution to the variance from each data point for upwards of two axes (components).
}
\usage{
contributionBars(factor_scores, contributions, x_axis = 1, y_axis = 2, col = NULL,
main = NULL, upper = 'steelblue4', lower = 'firebrick2', threshold = 0,
sortContributions = TRUE, pretty = FALSE, show.bg.bars = FALSE)
}
\arguments{
\item{factor_scores}{
The factor scores, or x- and y-axis points of a data set.
}
\item{contributions}{
The amount of contribution to the variance (between 0-1) by each data point.
}
\item{x_axis}{
Which axis is the x-axis? Default is 1.
}
\item{y_axis}{
Which axis is the y-axis? Default is 2.
}
\item{col}{
A single-column matrix of colors for each data point.
}
\item{main}{
A title to be placed at the top of the graph.
}
\item{upper}{
The color used to identify the upper bound items that contribute above average variance.
}
\item{lower}{
The color used to identify the lower bound items that contribute above average variance.
}
\item{threshold}{
A threshold (between 0-1) to draw upper and lower bounds for important items. Default is 1/number of items.
}
\item{sortContributions}{
boolean, if TRUE, items will be sorted by contribution. if FALSE, items will appear in their row order.
}
\item{pretty}{
a boolean. If FALSE, use the current version. If TRUE, make the bars with \code{\link{prettyBars}}.
}
\item{show.bg.bars}{
a boolean. Only used if \code{pretty} is TRUE. If TRUE, background bars are plotted for a fill effect.
}
}
\author{
Derek Beaton
}
\keyword{ graphs }
\keyword{ multivariate }
|
a3e516e5b4d26ba90f788888f733e2624fbd8566
|
2e49deee0b0e8060e08af969baa9b1fc0d8a6c77
|
/app.R
|
228a65d6b299f583f6e9cfa7aff64a9a9ca440e7
|
[] |
no_license
|
DrMattG/AcademicCVShinyDashboard
|
8d1d0e49c6633336ac9fe13cc0f7cebb3bfc8388
|
e14ff6ea0f13abc04ab8fa1922cef287a3a1f9e2
|
refs/heads/master
| 2021-07-17T02:56:27.074653
| 2020-08-04T12:30:11
| 2020-08-04T12:30:11
| 197,767,565
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,502
|
r
|
app.R
|
#Source the datafile (updates it)
source("code/Data_save.R")
updateData()
# load the required packages
library(shiny)
require(shinydashboard)
library(tidyverse)
library(scholar)
library(timevis)
library(tm)
library(wordcloud)
library(rsconnect)
library(readr)
###########################
#Build shinyapp
#UI
ui <- dashboardPage(
dashboardHeader(
title = "Dr Matthew J Grainger",
titleWidth = 300
),
dashboardSidebar(
sidebarMenu(
menuItem("Academic CV", tabName = "dashboard", icon = icon("dashboard")),
menuItem("My website", icon = icon("send",lib='glyphicon'),
href = "https://uncertainecologist.netlify.com/")
)
),
dashboardBody(
tabsetPanel(
id = "tabs",
tabPanel(
title = "My academic CV",
value = "page1",
fluidRow(
box("My timeline", timevisOutput("timeline"), width=12)
),
fluidRow(
infoBoxOutput("value1"),
infoBoxOutput("value2"),
infoBoxOutput("value3")
),
fluidRow(
box(
title = "Citations over time"
,status = "primary"
,solidHeader = TRUE
,collapsible = TRUE
,plotOutput("cites", height = "650px")
),
box(
title="Wordcloud of Abstracts"
,status = "primary"
,solidHeader = TRUE
,collapsible = TRUE
,plotOutput("Words", height = "650px")
)
),
fluidRow(
box(title="My publications"
,status = "primary"
,solidHeader = TRUE
,collapsible = TRUE
,selectInput("sort_on",
"Choose variable to sort on",
choices = c("Title" = "Title",
"Year" = "Year",
"Journal"="Journal"))
,tableOutput('table')
),
box(title="My predicted H-Index"
,status = "primary"
,solidHeader = TRUE
,collapsible = TRUE
,plotOutput('predictH')))
)
)
)
)
# create the server functions for the dashboard
server <- function(input, output) {
#some data manipulation to derive the values of boxes
profile<-readRDS("Profile.RDS")
pubs<-readRDS("pubs.RDS")
predH<-readRDS("predH.RDS")
cite.yr<-readRDS("citeyr.RDS")
d<-readRDS("d.RDS")
timeline_dat<-readRDS("timeline_dat.RDS")
pubstab<-pubs %>%
select(title, journal, number, year) %>%
mutate(year=round(year)) %>%
rename("Title"=title, "Journal"=journal, "Journal & Page numbers"=number, "Year"=year)
#reactive data
sortTable <- reactive({
pubstab[do.call(order, pubstab[as.character(input$sort_on)]),]
})
WoSpapers <- read_delim("WoSpapers.csv", ";", escape_double = FALSE, trim_ws = TRUE)
#predict_h_index(GS_id)
output$timeline <- renderTimevis({
timevis(timeline_dat)
})
#creating the valueBoxOutput content
output$value1 <- renderInfoBox({
infoBox("Affiliation:", profile$affiliation,
icon = icon("briefcase",lib='font-awesome')
,color = "purple")})
output$value2 <- renderInfoBox({
infoBox('Total citations:', profile$total_cites,
icon = icon("book-reader",lib='font-awesome')
,color = "purple")})
output$value3 <- renderInfoBox({
infoBox("H-Index:", profile$h_index,
icon = icon("hospital-symbol",lib='font-awesome')
,color = "purple")})
#creating the plotOutput content
output$cites <- renderPlot({
ggplot(cite.yr, aes(year,cites)) +
geom_bar(stat='identity',fill=colors()[35])+
ylab("number of citations")+
xlab("Year")+
scale_x_continuous(
breaks = c(2005,2006,2007,2008,2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017,2018,2019,2020))+
theme_classic()
})
output$Words<-renderPlot({
wordcloud(d$word, d$freq, scale = c(3,1),min.freq=8,colors=brewer.pal(8, "Dark2"))
} )
output$table <- renderTable(sortTable(), digits = 0)
output$predictH<-renderPlot({
ggplot(predH, aes(years_ahead,h_index, colour="red")) +
geom_point()+
geom_smooth()+
ylab("Potential H index")+
xlab("Years ahead")+
theme_classic()+
theme(legend.position = "none")
})
}
#Launch the App
shinyApp(ui, server)
|
2835e9256613118a9a3cebab991caef3e433ff30
|
0d5fb58c69e80a16f7ce8496bff1d37dd5040763
|
/water_param_DEoptim.R
|
f4feba52ee6230a844a32fd766d4f694686ff3b4
|
[] |
no_license
|
kevinwolz/hisafe-calibration
|
b64c731b16cc811ecc58b0867682a43e1a9e9249
|
46067ed2200d73c0daef06017f7eab91274ea865
|
refs/heads/master
| 2023-07-14T09:06:49.274190
| 2019-03-23T19:40:46
| 2019-03-23T19:40:46
| 110,268,940
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,913
|
r
|
water_param_DEoptim.R
|
### hisafe water module parameter optimization
### Author: Kevin J. Wolz
library(hisafer)
library(tidyverse)
library(parallel)
library(DEoptim)
library(plotly)
N.ITER <- 3
TRACE <- TRUE
YEARS <- 1994
METHOD <- "DISCRETE" # TOTAL
CROPS <- c("durum-wheat-restinclieres", "weed-restinclieres", "rape", "winter-pea")
BASE.PATH <- "/Users/kevinwolz/Desktop/RESEARCH/ACTIVE_PROJECTS/HI-SAFE/hisafe_testing/"
REFERENCE.PROFILES <- c("cells", "voxels", "voxelsDetail", "plot")
DELETE <- TRUE
input.path <- "./raw_data/"
BASE.PATH <- "./output/water_param_optimization/"
PARAMS <- read_csv(paste0(input.path, "crop_water_calibration_parameters.csv"), col_types = cols())
for(CROP in CROPS) {
#CROP = CROPS[1]
PATH <- paste0(BASE.PATH, CROP, "/", METHOD, "/")
dir.create(PATH, showWarnings = FALSE, recursive = TRUE)
common.params <- list(nbSimulations = length(YEARS),
waterTable = 0,
simulationYearStart = YEARS[1],
mainCropSpecies = paste0(CROP, ".plt"),
mainCropItk = paste0(CROP, ".tec"),
interCropSpecies = paste0(CROP, ".plt"),
interCropItk = paste0(CROP, ".tec"),
layers = layer_params(template = "monocrop",
thick = c(0.4, 0.4, 0.6, 0.6)))
##### REFERENCE SIMULATION #####
ref.hip <- define_hisafe(path = PATH,
template = "monocrop",
profiles = REFERENCE.PROFILES,
SimulationName = "stics",
sticsWaterExtraction = 1,
bulk.pass = common.params)
build_hisafe(ref.hip, plot.scene = FALSE, summary.files = FALSE)
run_hisafe(ref.hip, capsis.path = "/Applications/Capsis/")
ref.hop <- read_hisafe(path = PATH,
simu.name = "stics",
profiles = REFERENCE.PROFILES,
show.progress = FALSE,
read.inputs = FALSE)
lai.output <- ref.hop$cells %>%
dplyr::select(Day, Month, Year, JulianDay, lai)
write_delim(lai.output, paste0(PATH, "lai.obs"), delim = "\t")
GROWTH.DATES <- ref.hop$cells %>%
dplyr::filter(lai > 0) %>%
.$Date %>%
range()
if(METHOD == "ALL") {
STICS <- ref.hop$voxels %>%
dplyr::filter(Date >= GROWTH.DATES[1], Date <= GROWTH.DATES[2]) %>%
dplyr::select(Date, z, cropWaterUptake) %>%
dplyr::rename(stics = cropWaterUptake)
} else if(METHOD == "TOTAL") {
STICS <- ref.hop$voxels %>%
dplyr::filter(Date >= GROWTH.DATES[1], Date <= GROWTH.DATES[2]) %>%
.$cropWaterUptake %>%
sum()
}
##### WATER COMP FUNCTION #####
water_comp <- function(params) {
cat(paste0("\n", paste(params, collapse = "\t")), file = paste0(PATH, "log_file.txt"), append = TRUE)
params <- list(cropRootDiameter = params[1],
cropRootConductivity = params[2],
cropAlpha = params[3],
cropMinTranspirationPotential = params[4],
cropMaxTranspirationPotential = params[4] + params[5],
cropBufferPotential = params[6],
cropLongitudinalResistantFactor = params[7])
NAME <- gsub("0\\.", "", paste("sim", paste(params, collapse = "_"), sep = "_"))
hip <- define_hisafe(path = PATH,
template = "monocrop",
profiles = "voxelsOptim",
SimulationName = NAME,
sticsWaterExtraction = 0,
laiFileName = "lai.obs",
bulk.pass = c(common.params, params))
build_hisafe(hip, plot.scene = FALSE)
dum <- file.copy(paste0(PATH, "lai.obs"), paste0(hip$path, "/", NAME, "/lai.obs"))
run_hisafe(hip, capsis.path = "/Applications/Capsis/", quietly = TRUE)
hop <- read_hisafe(hip,
profiles = "voxelsOptim",
show.progress = FALSE,
read.inputs = FALSE)
if(DELETE) dum <- unlink(paste0(hip$path, "/", NAME), recursive = TRUE)
if(METHOD == "ALL") {
HISAFE <- hop$voxels %>%
dplyr::filter(Date >= GROWTH.DATES[1], Date <= GROWTH.DATES[2]) %>%
dplyr::select(Date, z, cropWaterUptake) %>%
dplyr::rename(hisafe = cropWaterUptake)
rmse <- HISAFE %>%
dplyr::left_join(STICS, by = c("Date", "z")) %>%
dplyr::mutate(sqdif = (hisafe - stics) ^ 2) %>%
dplyr::summarize(rmse = sqrt(mean(sqdif))) %>%
.$rmse
} else if(METHOD == "TOTAL") {
HISAFE <- hop$voxels %>%
dplyr::filter(Date >= GROWTH.DATES[1], Date <= GROWTH.DATES[2]) %>%
.$cropWaterUptake %>%
sum()
rmse <- abs(STICS - HISAFE)
}
cat(paste0("\t", rmse), file = paste0(PATH, "log_file.txt"), append = TRUE)
return(rmse)
}
##### OPTIMIZATION #####
# INITIAL.POP <- as.matrix(expand.grid(c(0.005, 0.065, 0.02),
# c(0.20, 0.12, 0.1),
# c(-27000, -24000, -30000),
# c(25000, 20000)))
mappingFun <- function(x) {
x[1] <- round(x[1] / 0.001) * 0.001
x[2] <- round(x[2] / 0.000001) * 0.000001
x[3] <- round(x[3] / 0.01) * 0.01
x[4] <- round(x[4] / 1000) * 1000
x[5] <- round(x[5] / 1000) * 1000
x[6] <- round(x[6] / 0.01) * 0.01
x[7] <- round(x[7] / 1) * 1
return(x)
}
set.seed(333)
DEout <- DEoptim(fn = water_comp,
lower = PARAMS$param.min,
upper = PARAMS$param.mx,
control = DEoptim.control(itermax = N.ITER,
trace = TRACE),
fnMap = mappingFun)
#DEout$optim
#DEout$member
out <- DEout$optim$bestmem %>%
matrix(nrow = 1) %>%
as_tibble()
names(out) <- PARAMS$param.name
write_csv(out, paste0(PATH, CROP, "_", METHOD, "_optimized_water_params.csv"))
save(DEout, file = paste0(PATH, CROP, "_", METHOD, "_Water_Param_Optimization.RData"))
##### TEST FINAL SOLUTION #####
old.winner <- as.numeric(PARAMS[CROP])
new.winner <- DEout$optim$bestmem
params <- list(cropRootDiameter = c(new.winner[1], old.winner[1]),
cropRootConductivity = c(new.winner[2], old.winner[2]),
cropAlpha = c(new.winner[3], old.winner[3]),
cropMinTranspirationPotential = c(new.winner[4], old.winner[4]),
cropMaxTranspirationPotential = c(new.winner[4] + new.winner[5], old.winner[4] + old.winner[5]),
cropBufferPotential = c(new.winner[6], old.winner[6]),
cropLongitudinalResistantFactor = c(new.winner[7], old.winner[7]))
win.hip <- define_hisafe(path = PATH,
exp.name = "hisafe",
template = "monocrop",
profiles = REFERENCE.PROFILES,
SimulationName = c("new_winner", "old_winner"),
sticsWaterExtraction = 0,
laiFileName = "lai.obs",
bulk.pass = c(common.params, params))
build_hisafe(win.hip, plot.scene = FALSE)
dum <- file.copy(paste0(BASE.PATH, "lai.obs"), paste0(win.hip$path, "/", c("new_winner", "old_winner"), "/lai.obs"))
run_hisafe(win.hip,
capsis.path = "/Applications/Capsis/",
parallel = TRUE,
num.cores = 2,
quietly = TRUE)
win.hop <- read_hisafe(win.hip,
profiles = REFERENCE.PROFILES,
show.progress = FALSE,
read.inputs = FALSE)
hop <- hop_merge(ref.hop, win.hop)
hop$exp.path <- PATH
dum <- purrr::map(paste0(PATH, c("voxels", "cells")), dir.create, showWarnings = FALSE)
##### PLOTS #####
voxels.to.plot <- seq(0.1, 1.1, 0.2)
diag_hisafe_voxels(hop,
output.path = PATH,
date.min = paste0(min(YEARS), "-12-01"),
date.min = paste0(max(YEARS + 1), "-7-01"),
X = voxels.to.plot,
facet.simu = FALSE,
facet.z = TRUE)
diag_hisafe_ts(hop,
profile = "cells",
output.path = PATH,
date.min = paste0(min(YEARS), "-12-01"),
date.min = paste0(max(YEARS + 1), "-7-01"))
diag_hisafe_ts(hop,
profile = "plot",
output.path = PATH,
date.min = paste0(min(YEARS), "-12-01"),
date.min = paste0(max(YEARS + 1), "-7-01"))
## GA Diagnostics
DE <- tibble(RMSE = DEout$member$bestvalit) %>%
mutate(Generation = 1:nrow(.))
de.plot <- ggplot(DE, aes(x = Generation, y = RMSE)) +
geom_line() +
theme_hisafe_ts() +
theme(panel.grid = element_blank())
ggsave_fitmax(paste0(PATH, CROP, "_", METHOD, "_RMSE_GA_Trajectory.png"), de.plot)
# plot_hisafe_voxels(hop,
# variable = "cropWaterUptake",
# date.min = paste0(min(YEARS), "-12-01"),
# date.min = paste0(max(YEARS + 1), "-7-01"),
# X = voxels.to.plot,
# facet.simu = FALSE,
# facet.z = TRUE)
# test.plot <- ggplot(filter(hop$voxels, z <= 1.1),
# aes(x = Date,
# y = cropNitrogenUptake,
# color = SimulationName)) +
# geom_line(size = 1, na.rm = TRUE) +
# scale_x_date(limits = lubridate::ymd(c(paste0(YEAR, "-12-01"),
# paste0(YEAR + 1, "-7-01")))) +
# facet_wrap(~z, ncol = 1) +
# scale_color_manual(values = c("black", "red", "blue"))
# ggplotly(test.plot)
# test.plot <- ggplot(hop$plot,
# aes(x = Date,
# y = mainCropMeanBiomass,
# color = SimulationName)) +
# geom_line(size = 1, na.rm = TRUE) +
# scale_x_date(limits = lubridate::ymd(c(paste0(YEAR, "-12-01"),
# paste0(YEAR + 1, "-7-01")))) +
# scale_color_manual(values = c("black", "red", "blue"))
# ggplotly(test.plot)
#
#
# plot_hop <- hop %>%
# hop_filter(c("stics", "new_winner"))# %>%
# #hop_rename(c("stics", "new_winner"), c("stics", "hisafe"))
#
# for(i in names(hop$voxels)[13:60]) {
# voxel.plot <- ggplot(filter(plot_hop$voxels, z <= 1.1),
# aes_string(x = "Date",
# y = i,
# color = "SimulationName")) +
# geom_line(size = 1, na.rm = TRUE) +
# scale_x_date(limits = lubridate::ymd(c(paste0(YEAR, "-12-01"),
# paste0(YEAR + 1, "-7-01")))) +
# facet_wrap(~z, ncol = 1) +
# scale_color_manual(values = c("black", "red", "blue"))
# ggsave_fitmax(paste0(PATH, "voxels/", i, ".png"), voxel.plot)
# }
#
# for(i in names(hop$cells)[13:28]) {
# cell.plot <- ggplot(plot_hop$cells,
# aes_string(x = "Date",
# y = i,
# color = "SimulationName")) +
# geom_line(size = 1, na.rm = TRUE) +
# scale_x_date(limits = lubridate::ymd(c(paste0(YEAR, "-12-01"),
# paste0(YEAR + 1, "-7-01")))) +
# scale_color_manual(values = c("black", "red", "blue"))
# ggsave_fitmax(paste0(PATH, "cells/", i, ".png"), cell.plot)
# }
#
# for(i in names(hop$plot)[11:155]) {
# plot.plot <- ggplot(plot_hop$plot,
# aes_string(x = "Date",
# y = i,
# color = "SimulationName")) +
# geom_line(size = 1, na.rm = TRUE) +
# scale_x_date(limits = lubridate::ymd(c(paste0(YEAR, "-12-01"),
# paste0(YEAR + 1, "-7-01")))) +
# scale_color_manual(values = c("black", "red", "blue"))
# ggsave_fitmax(paste0(PATH, "plot/", i, ".png"), plot.plot)
# }
}
|
0cfdf419f76a2b42087ab29977f6405d033df44e
|
62c1dd454d9ce2046792545d1cbcce0af0285d93
|
/R/preview.R
|
17c5b60eab287bf010a45407ff091efaa971a5b5
|
[] |
no_license
|
davidallen02/employment-situation
|
a9ab3a0a024612be58e4e15ccbb1e2b3f134f73b
|
ff9a8805fc72a1a65cdccae5cb693e2f1766fe71
|
refs/heads/master
| 2021-07-14T03:41:49.476051
| 2021-02-01T17:15:58
| 2021-02-01T17:15:58
| 232,154,779
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,210
|
r
|
preview.R
|
library(magrittr)
# source('./R/functions/read_data.R')
# source('./R/functions/ppt_output.R')
consensus <- pamngr::get_data("nfp tch", flds = "BN_SURVEY_MEDIAN") %>%
dplyr::left_join(pamngr::get_data('usurtot', flds = "BN_SURVEY_MEDIAN"), by = "dates") %>%
dplyr::left_join(pamngr::get_data('ahe yoy%', flds = "BN_SURVEY_MEDIAN"), by = "dates") %>%
set_colnames(c('date','payrolls','u3','ahe')) %>%
dplyr::filter(date == max(date))
current.period <- consensus %>%
dplyr::select(date) %>%
dplyr::pull() %>%
format('%B %Y')
payrolls <- consensus %>%
dplyr::select(payrolls) %>%
dplyr::pull() %>%
paste0('k') %>%
grid::textGrob(gp = grid::gpar(fontsize = 40, col = '#850237'),
just = 'center')
u3 <- consensus %>%
dplyr::select(u3) %>%
dplyr::pull() %>%
paste0('%') %>%
grid::textGrob(gp = grid::gpar(fontsize = 40, col = '#850237'),
just = 'center')
ahe <- consensus %>%
dplyr::select(ahe) %>%
dplyr::pull() %>%
paste0('%') %>%
grid::textGrob(gp = grid::gpar(fontsize = 40, col = '#850237'),
just = 'center')
title <- 'Employment Situation\n' %>% grid::textGrob(
gp = grid::gpar(fontsize = 50, fontface = 'bold'),
just = 'top'
)
date <- paste0(current.period, '\nConsensus Estimates') %>% grid::textGrob(
gp = grid::gpar(fontsize = 35)
)
subtitle.1 <- paste0('Monthly Change\nin Nonfarm Payrolls') %>%
grid::textGrob(gp = grid::gpar(fontsize = 30),
just = 'top')
subtitle.2 <- paste0('U-3 Unemployment\nRate') %>%
grid::textGrob(gp = grid::gpar(fontsize = 30),
just = 'top')
subtitle.3 <- 'Annual Growth in Avg\nHourly Earnings' %>%
grid::textGrob(gp = grid::gpar(fontsize = 30),
just = 'top')
blank <- grid::textGrob(' ')
lay <- rbind(c(1,1,1),
c(2,2,2),
c(3,4,5),
c(6,7,8),
c(9,9,9))
p <- gridExtra::grid.arrange(title,
date,
subtitle.1, subtitle.2, subtitle.3,
payrolls, u3, ahe,
blank,
layout_matrix = lay) %>%
pamngr::ppt_output('preview.png')
|
039b0625b3377f35562245f8f7eca7320ee5589a
|
00be44c6e49e7f0e948bb202457240467665480e
|
/R_templates/meta_bag.R
|
29defa2d657f4aec16cb38adf60dd2dd6223f31e
|
[] |
no_license
|
AkiraKane/scharf-personal
|
b9469d76e026255283f99d66c5cb0e17456bf8b5
|
b6b6560bc8ac5033871e6e64cb2920b6b14f30bd
|
refs/heads/master
| 2021-01-19T20:58:07.745222
| 2016-09-22T15:12:26
| 2016-09-22T15:12:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 948
|
r
|
meta_bag.R
|
meta_bag <- function(
Xtrain, # This is a matrix, contains train data, data_cols only
Xtest, # This is a matrix, contains train data, data_cols only
y, # contains target vars
model_dir, # we will make this directory if it doesn't exist
n_models, # total model construction
worker_bee) # function that saves a DATA structure in model_dir
{
# make directory if it doesn't exist
ifelse( !dir.exists(model_dir) , dir.create(model_dir), FALSE)
#Train and test same number of columns
stopifnot(dim(Xtrain)[2]==dim(Xtest)[2])
for(m in 1:n_models){
cat('\n','working on model',m,'\n')
set.seed(m)
idx <- sample(nrow(Xtrain),nrow(Xtrain) , replace = T) # full bag indexes
worker_bee(
train = Xtrain,
test = Xtest,
y = y,
idx = idx,
model_dir = model_dir,
m = m)
}
}
|
73fadd4dd63097d2a007f20704cbdb566337920d
|
55c2eaf5f65b863bf6efd0cd5c6f29dce85dac30
|
/R/utils.R
|
a3f4ba6020b305780ef0008895c2a1229785da66
|
[] |
no_license
|
renanlf/distrstats
|
244d6209ae67ae77ac81c9299b2b2d136ee49b36
|
f2d09708ecf3cb6482595963291158c04f977384
|
refs/heads/master
| 2023-06-30T12:09:02.085495
| 2021-08-07T02:08:51
| 2021-08-07T02:08:51
| 393,548,604
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 280
|
r
|
utils.R
|
as.dataset <- function(name, values){
list(
name = name,
values = values
)
}
as.distribution <- function(name, pdf, cdf, nparams, lower, upper){
list(
name = name,
pdf = pdf,
cdf = cdf,
nparams = nparams,
lower = lower,
upper = upper,
)
}
|
1b753bae631f20d554bcdb283cc4ce5f03443280
|
9320966521bd97b3eb88207fe53b4054568a45d2
|
/OUTROS/IC/PROJETOS/Classificador-IMDB/RECOMMENDER-IMDB/Rmd-RF/outros/q10.R
|
d21d7677c549d73a003ad993233ccd4df607cacd
|
[] |
no_license
|
eupimenta/textmining_pt
|
e70a6b7ff901953a479f5bf3957572f83ed61773
|
2b3a45f92e5be28bbbd31134b998b27721072b59
|
refs/heads/master
| 2020-04-29T01:35:08.215762
| 2019-06-19T17:40:56
| 2019-06-19T17:40:56
| 175,735,476
| 0
| 0
| null | 2019-06-19T17:40:57
| 2019-03-15T02:37:13
|
HTML
|
UTF-8
|
R
| false
| false
| 828
|
r
|
q10.R
|
n = dim(missing_data)[1]
dat <- missing_data
#Create a function to generate a continuous color palette
rbPal <- colorRampPalette(c('red','blue'))
#This adds a column of color values
# based on the y values
dat$Col <- rbPal(10)[as.numeric(cut(dat$percent_missing,breaks = 10))]
plot(dat$percent_missing,dat$percent_missing,pch = 20,col = dat$Col)
ddb_NULL <- data.frame(x = seq_len(n) - 1) %>%
mutate(
db
y = 10 + x + 10 * sin(x),
y = round(y, 1),
z = (x*y) - median(x*y),
e = 10 * abs(rnorm(length(x))) + 2,
e = round(e, 1),
low = y - e,
high = y + e,
value = y,
name = sample(missing_data$variables, size = n),
color = rep(colors, length.out = n),
segmentColor = rep(colors2, length.out = n)
)
missing_data
fruit[str_length(fruit) <= 5]
|
8640592da1a6439fd80eaf9d3a6e0597dcc1dace
|
f67642256737632b0e4a794af02f2df1aee726b8
|
/man/is.diag_resid.Rd
|
11be1a62647554794eca1f684d6611b789c9efb8
|
[] |
no_license
|
SMAC-Group/exts
|
0a430cc0df20e85903e55eb1ed5c8be76c3c6d8a
|
0aa78daff83dd4dca9fc3e166afbd2a3d726966d
|
refs/heads/master
| 2020-04-17T05:48:48.245078
| 2016-11-14T02:14:00
| 2016-11-14T02:14:00
| 67,654,379
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| true
| 353
|
rd
|
is.diag_resid.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utilities.R
\name{is.diag_resid}
\alias{is.diag_resid}
\title{Check is class}
\usage{
is.diag_resid(x)
}
\arguments{
\item{x}{A \code{diag_resid} object}
}
\value{
A \code{boolean} indicating \code{TRUE} or \code{FALSE}
}
\description{
Performs a check to see inheritance
}
|
14c37ed30811d817d1da2be04b559863096ab8d8
|
7a95abd73d1ab9826e7f2bd7762f31c98bd0274f
|
/meteor/inst/testfiles/ET0_ThornthwaiteWilmott/AFL_ET0_ThornthwaiteWilmott/ET0_ThornthwaiteWilmott_valgrind_files/1615830926-test.R
|
fdbf3182bef6c48035d201ce9eb1207fd155e2f8
|
[] |
no_license
|
akhikolla/updatedatatype-list3
|
536d4e126d14ffb84bb655b8551ed5bc9b16d2c5
|
d1505cabc5bea8badb599bf1ed44efad5306636c
|
refs/heads/master
| 2023-03-25T09:44:15.112369
| 2021-03-20T15:57:10
| 2021-03-20T15:57:10
| 349,770,001
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 545
|
r
|
1615830926-test.R
|
testlist <- list(doy = c(3.01409667740156e-243, -3.20180237041553e-60, 3.01425161743895e-243, NaN, -6.8576842040592e+303, 1.36446060005412e-317, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), latitude = numeric(0), temp = c(NA, 1.69650597018431e+95, Inf, Inf, -6.90488421149407e-258, -Inf, -5.08375287921281e-258, -1.07070466668111e-257, 0))
result <- do.call(meteor:::ET0_ThornthwaiteWilmott,testlist)
str(result)
|
b2c9cddfe12e938723d4671f50ebaba4255ff16e
|
de83e0fb6c3ce2dd4c2dda13cc54f33fb03cfc5a
|
/packages/BfgGiStats/man/differential_gi_analysis.Rd
|
1ac3f90504e33ff8a63a84881a0e5e81616ccb52
|
[] |
no_license
|
a3cel2/BFG_GI_stats
|
1636daadad2ec5e2957be3f0fcb93ae00a58f882
|
365561db2ccb6a097961045bb745d22e4f5687ee
|
refs/heads/master
| 2021-05-08T07:30:27.955184
| 2018-04-28T23:07:30
| 2018-04-28T23:07:30
| 106,863,220
| 0
| 1
| null | 2018-01-29T16:32:24
| 2017-10-13T19:06:04
|
R
|
UTF-8
|
R
| false
| true
| 1,134
|
rd
|
differential_gi_analysis.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/differential_gi_calls.R
\name{differential_gi_analysis}
\alias{differential_gi_analysis}
\title{Differential Genetic Interaction analysis}
\usage{
differential_gi_analysis(gi_data, fdr_cutoff = 0.05, delta_gi_cutoff = 0,
require_sign_change = T, nn_pair_type = "broad", make_plots = F)
}
\arguments{
\item{gi_data}{processed genetic interaction data}
\item{fdr_cutoff}{false discovery rate cutoff for calling significant GI changes}
\item{delta_gi_cutoff}{effect size cutoff (delta GIS) on top of fdr_cutoff}
\item{require_sign_change}{require different GI classifications in order to call a differential interaction; True or False}
\item{nn_pair_type}{null distribution" of GI scores used to
compute FDR. Either 'broad' for all interactions with neutral
pairs or 'narrow' for only neutral-neutral pairs}
\item{make_plots}{make histogram of Z scores while executing?}
}
\value{
a data frame with differential genetic interaction comparisons, conditions pairs sorted in alphabetical order
}
\description{
Differential Genetic Interaction analysis
}
|
2262e6a6303cd0cafdbdc95b757a4c70f0e6378b
|
0710a223ae6b7bc07b2e9dcfbd7e166256715c54
|
/chelu_mapamundi.R
|
e1d047f0e038cfe7e000ba63b87887e1eb824aa2
|
[] |
no_license
|
JuanmaMedina/random_projects
|
fa324912b61e3786744e68bdb0e1cdd4e4f18698
|
a72a6962ed58681cbdf4ecc498f6e55229c092b0
|
refs/heads/master
| 2020-09-21T13:54:47.960251
| 2019-11-29T08:35:46
| 2019-11-29T08:35:46
| 224,808,202
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,394
|
r
|
chelu_mapamundi.R
|
# Create data frame with iso3 country codes and a drug measure
# x data frame (DF) edition instructions:
# Step 1: Identify and write the iso3 codes of interest countries in the "country" column
# Step 2: Associate countries with desired drug measure in the "drug_measure" column
x <- data.frame(country = c("AUS", "JPN", "FIN", "CZE", "POL", "AUT", "USA", "GBR", "IRL",
"DEU", "DNK", "FRA", "NDL", "BEL", "ESP", "HRV", "SVN", "NOR",
"ITA", "HUN", "ROU", "BGR", "GRC", "TUR", "CHE", "ARE"),
drug_measure = c(5, 1, 2, 1, 1, 3, 4, 4, 5, 11, 1, 1, 2, 2, 4,
4, 1, 1, 3, 1, 1, 2, 1, 1, 3, 2))
# Inspect data
head(x)
# Present frequency data on a world map
# https://slcladal.github.io/maps.html
library(rworldmap)
# Get map
worldmap <- getMap(resolution = "coarse")
# Plot worldmap --> TO-DO: adjust dimensions to optimize resolution
plot(worldmap, col = "lightgrey",
fill = T, border = "darkgray",
xlim = c(-180, 180), ylim = c(-90, 90),
bg = "aliceblue",
asp = 1, wrap=c(-180,180))
# Combine DF with map --> automatic association of DF to pre-recorded map
drugMap <- joinCountryData2Map(x, joinCode = "ISO3", nameJoinColumn = "country")
# def. map parameters, e.g. def. colors
mapParams <- mapCountryData(drugMap,
# Match this param with the "drug_measure" column <--
nameColumnToPlot="drug_measure",
oceanCol = "azure2",
catMethod = "categorical",
missingCountryCol = gray(.8),
colourPalette = c("coral",
"coral2",
"coral3", "orangered",
"orangered3", "orangered4"),
addLegend = F,
mapTitle = "",
border = NA)
# Add legend and display map
do.call(addMapLegendBoxes, c(mapParams,
x = 'bottom',
title = "Drug measure",
horiz = TRUE,
bg = "transparent",
bty = "n"))
|
1427110e0901230ec30fa908cb05659d93978115
|
72d9009d19e92b721d5cc0e8f8045e1145921130
|
/RobustCalibration/man/rcalibration_MS.Rd
|
c60c8c46626e3dea4a530c96eb31dc1b68a7d29c
|
[] |
no_license
|
akhikolla/TestedPackages-NoIssues
|
be46c49c0836b3f0cf60e247087089868adf7a62
|
eb8d498cc132def615c090941bc172e17fdce267
|
refs/heads/master
| 2023-03-01T09:10:17.227119
| 2021-01-25T19:44:44
| 2021-01-25T19:44:44
| 332,027,727
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,361
|
rd
|
rcalibration_MS.Rd
|
\name{rcalibration_MS}
\alias{rcalibration_MS}
%\alias{show.rgasp}
\alias{rcalibration_MS-method}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{ Setting up the robust Calibration model for multiple sources data
%% ~~function to do ... ~~
}
\description{
%% ~~ A concise (1-5 lines) description of what the function does. ~~
Setting up the Calibration model for estimating the parameters via MCMC for multiple sources.
%The range and noise-variance ratio parameters are given and/or have been estimated.
}
\usage{
rcalibration_MS(design, observations, p_theta=NULL, index_theta=NULL,
X=as.list(rep(0,length(design))),
have_trend=rep(FALSE,length(design)),
simul_type=rep(0, length(design)),
input_simul=NULL, output_simul=NULL,
simul_nug=rep(FALSE,length(design)),math_model=NULL,
theta_range=NULL, sd_proposal_theta=rep(0.05,p_theta),
sd_proposal_cov_par=NULL,
S=10000,S_0=1000,
discrepancy_type=rep('S-GaSP',length(design)),
kernel_type=rep('matern_5_2',length(design)),
tilde_lambda=rep(1/2,length(design)),
a=NULL,b=NULL,alpha=NULL,
output_weights=NULL)
% \S4method{show}{rgasp}(object)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{design}{a list of observed inputs from multiple sources.
%% ~~Describe \code{design} here~~
}
\item{observations}{ a list of experimental data from multiple sources.
%% ~~Describe \code{response} here~~
}
\item{index_theta}{ a list of vectors for the index of calibration parameter contained in each source.
%% ~~Describe \code{response} here~~
}
\item{p_theta}{an integer about the number of parameters, which should be specified by the user.
%% ~~Describe \code{response} here~~
}
\item{X}{a list of matrices of the mean/trend discrepancy between the reality and math model for multiple sources.
%% ~~Describe \code{trend} here~~
}
\item{have_trend}{a vector of bool value meaning whether we assume a mean/trend discrepancy function.
%% ~~Describe \code{trend} here~~
}
\item{simul_type}{a vector of integer about the math model/simulator for multiple sources. If the simul_type is 0, it means we use RobustGaSP R package to build an emulator for emulation. If the simul_type is 1, it means the function of the math model is given by the user. When simul_type is 2 or 3, the mathematical model is the geophyiscal model for Kilauea Volcano. If the simul_type is 2, it means it is for the ascending mode InSAR data; if the simul_type is 3, it means it is for the descending mode InSAR data.
}
\item{input_simul}{a list of matices, each having dimension D x (p_x+p_theta) being the design for emulating the math model. It is only useful if the ith value of simul_type is 0 for the ith source, meaning that we emulate the output of the math model.
}
\item{output_simul}{a list of vectors, each having dimension D x 1 being the math model outputs on the design (input_simul). It is only useful if the ith value of simul_type is 0 for the ith source, meaning that we emulate the output of the math model.
}
\item{simul_nug}{a vectors of bool values meaning whether we have a nugget for emulating the math model/simulator for this source. If the math model is stochastic, we often need a nugget. If simul_Nug is TRUE, it means we have a nugget for the emulator. If simul_Nug is FALSE, it means we do not have a nugget for the emulator.
%% ~~Describe \code{trend} here~~
}
\item{math_model}{a list of functions of the math models provided by the user for multiple sources. It is only useful if simul_type is 1, meaning that we know the math model and it can be computed fast. If the evaluation the math model is computationally slow, one should set simul_type to be 0 to emulate the math model.
}
\item{theta_range}{a p_theta x 2 matrix of the range of the calibration parameters. The first column is the lower bound and the second column is the upper bound. It should be specified by the user if the simul_type is 0.
}
\item{sd_proposal_theta}{a vector of the standard deviation of the proposal distribution for the calibration parameters in MCMC.
}
\item{sd_proposal_cov_par}{a list of vectors of the standard deviation of the proposal distribution for range and nugget parameters in MCMC for each source.
}
\item{S}{an integer about about how many posterior samples to run.
}
\item{S_0}{an integer about about the number of burn-in samples.
}
\item{discrepancy_type}{a vector of characters about the type of the discrepancy for each source. If it is 'no-discrepancy', it means no discrepancy function. If it is 'GaSP', it means the GaSP model for the discrepancy function. If it is 'S-GaSP', it means the S-GaSP model for the discrepancy function.}
\item{kernel_type}{a vector of characters about the type of the discrepancy.type of kernel for each source. \code{matern_3_2} and \code{matern_5_2} are \code{Matern kernel} with roughness parameter 3/2 and 5/2 respectively. \code{pow_exp} is power exponential kernel with roughness parameter alpha. If \code{pow_exp} is to be used, one needs to specify its roughness parameter alpha.}
\item{tilde_lambda}{a vector numeric values about how close the math model to the reality in squared distance when the S-GaSP model is used for modeling the discrepancy for each source.}
\item{a}{a vector of the prior parameter for multiple sources.}
\item{b}{a vector of the prior parameter for multiple sources.}
\item{alpha}{a list of vectors of roughness parameters in the kernel for multiple sources.}
\item{output_weights}{a list of vectors of the weights of the outputs for multiple sources.}
% \item{post_sample}{a matrix of the posterior samples after burn-in.}
% \item{post_value}{a vector of the posterior values after burn-in.}
% \item{accept_S}{a vector of the number of proposed samples of the calibation parameters are accepted in MCMC. The first value is the number of proposed calibration parameters are accepted in MCMC. The second value is the number of proposed range and nugget parameters are accepted, if \code{discrepancy_type} is specified as 'GaSP' or 'S-GaSP'.}
% \item{count_boundary}{a vector of the number of proposed samples of the calibation parameters are outside the range and they are rejected directly.}
}
%\details{
%% ~~ If necessary, more details than the description above ~~
%expand here the details.
%}
\value{
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
\code{rcalibration_MS} returns an S4 object of class \code{rcalibration_MS} (see \code{rcalibration_MS-class}).
%If there is an emulator (i.e. simul_type is 0), \code{rcalibration} returns a list with
%\item{rcalibration}{an S4 object of class \code{rcalibration} (see \code{rcalibration-class}.}
%\item{emulator}{an S4 object of class \code{rgasp} produced by RobustGaSP R Package .}
}
\references{
%% ~put references to the literature/web site here ~
A. O'Hagan and M. C. Kennedy (2001), \emph{Bayesian calibration of computer models}, \emph{Journal of the Royal Statistical Society: Series B (Statistical Methodology}, \bold{63}, 425-464.
K. R. Anderson and M. P. Poland (2016), \emph{Bayesian estimation of magma supply, storage, and eroption rates using a multiphysical volcano model: Kilauea volcano, 2000-2012.}. \emph{Eath and Planetary Science Letters}, \bold{447}, 161-171.
K. R. Anderson and M. P. Poland (2017), \emph{Abundant carbon in the mantle beneath Hawaii}. \emph{Nature Geoscience}, \bold{10}, 704-708.
M. Gu (2016), \emph{Robust Uncertainty Quantification and Scalable Computation for Computer Models with Massive Output}, Ph.D. thesis., Duke University.
M. Gu and L. Wang (2017) \emph{Scaled Gaussian Stochastic Process for Computer Model Calibration and Prediction}. arXiv preprint arXiv:1707.08215.
M. Gu (2018) \emph{Jointly Robust Prior for Gaussian Stochastic Process in Emulation, Calibration and Variable Selection
}. arXiv preprint arXiv:1804.09329.
}
\author{
\packageAuthor{RobustCalibration}
Maintainer: \packageMaintainer{RobustCalibration}
}
%\note{
%% ~~further notes~~
%}
%% ~Make other sections like Warning with \section{Warning }{....} ~
%\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
%}
\examples{
#------------------------------------------------------------------------------
# An example for calibrating mathematical models for data from multiple sources
#------------------------------------------------------------------------------
library(RobustCalibration)
##reality
test_funct<-function(x){
sin(pi*x/2)+2*cos(pi*x/2)
}
##math model from two sources
math_model_source_1<-function(x,theta){
sin(theta*x)
}
math_model_source_2<-function(x,theta){
cos(theta*x)
}
input1=seq(0,2,2/(10-1))
input2=seq(0,3,3/(15-1))
##
output1=test_funct(input1)+rnorm(length(input1), sd=0.01)
output2=test_funct(input2)+rnorm(length(input2), sd=0.02)
plot(input1, output1)
plot(input2, output2)
design=list()
design[[1]]=as.matrix(input1)
design[[2]]=as.matrix(input2)
observations=list()
observations[[1]]=output1
observations[[2]]=output2
p_theta=1
theta_range=matrix(0,p_theta,2)
theta_range[1,]=c(0, 8)
simul_type=c(1,1)
math_model=list()
math_model[[1]]=math_model_source_1
math_model[[2]]=math_model_source_2
## calibrating two mathematical models for these two sources
model_sgasp=rcalibration_MS(design=design, observations=observations, p_theta=1,
simul_type=simul_type,math_model=math_model,
theta_range=theta_range,
S=10000,S_0=2000,
discrepancy_type=rep('S-GaSP',length(design)))
plot(model_sgasp@post_theta[,1],type='l')
mean(model_sgasp@post_theta[,1])
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
%\keyword{ ~kwd1 }
%\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
ebd180019db5f38e1fd9d8368f533de27f583dc9
|
2cc39aa3e019de9ab77124a6c812ec48934929ad
|
/R/template.R
|
c6b3e3a9645cc229d7c8d087f6337eb7ad6addb5
|
[] |
no_license
|
cran/liGP
|
d997e4794866b42cca165c3c11397b3ba80d98a9
|
b79f54ecd535f48f4e4c390925302380bf5deb25
|
refs/heads/master
| 2023-06-15T12:07:39.187428
| 2021-07-17T05:00:02
| 2021-07-17T05:00:02
| 382,082,393
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 16,481
|
r
|
template.R
|
eps <- sqrt(.Machine$double.eps)
## build_ipTemplate:
##
## Creates an inducing points design optimized with ALC or wIMSE,
## then is returned centered at the origin
build_ipTemplate <- function(X = NULL, Y = NULL, M, N, theta = NULL, g = 1e-4,
method = c('wimse','alc'),
ip_bounds = NULL, integral_bounds = NULL,
num_thread = 1, num_multistart = 20, w_var = NULL,
epsK = sqrt(.Machine$double.eps), epsQ = 1e-5,
reps = FALSE, verbose = TRUE){
## Collects data from X,Y or reps_list
if(is.list(reps)){
if(is.null(reps$X0)) stop('reps doesn\'t include \'X0\' in list')
if(is.null(reps$Z0)) stop('reps doesn\'t include \'Z0\' in list')
if(is.null(reps$mult)) stop('reps doesn\'t include \'mult\' in list')
if(is.null(reps$Z)) stop('reps doesn\'t include \'Z\' in list')
if(is.null(reps$Zlist)) stop('reps doesn\'t include \'Zlist\' in list')
reps_list <- reps
X <- reps$X0
Y <- reps$Z0
} else if (is.null(X) | is.null(Y)){
stop('X and Y are required')
} else if (reps){
Xorig <- X; Y_orig <- Y
reps_list <- find_reps(X, Y)
X <- reps_list$X0
Y <- reps_list$Z0
} else reps_list <- FALSE
###### Sanity checks ######
if(nrow(X)!=length(Y)) ## Number of entries check
stop('Number of entries in Y doesn\'t match nrows of X')
if (M > N)
warning('Number of inducing points (M) > Neighborhood size (N)')
if(N > nrow(X)) ## Neighborhood cannot be bigger than data size
stop('N is greater than the number of rows in X')
if(!is.null(theta))
if (theta <= 0)
stop('theta should be a positive number')
if (g < 0)
stop('g must be positive')
if(!method %in% c('wimse', 'alc'))
stop('A valid method was not given. Choices include: wimse and alc')
if(method=='wimse'){
Xrange <- apply(X, 2, range)
if (!is.null(integral_bounds))
if(sum(Xrange[1,] < integral_bounds[1,]) > 0 ||
sum(Xrange[2,] > integral_bounds[2,]) > 0)
stop('X outside integration bounds')
if(is.null(integral_bounds))
integral_bounds <- Xrange
}
if(!is.null(ip_bounds)){
if(nrow(ip_bounds) !=2)
stop('ip_bounds should be a matrix with two rows')
if(sum(ip_bounds[1,] < ip_bounds[2,]) < ncol(X))
stop('At least one dimensions bounds in ip_bounds is incorrect')
}
if(num_multistart < 0 | num_multistart %% 1 !=0)
stop('num_multistart is not a positive integer')
## Checks that number of threads is valid
if(num_thread %% 1 != 0 | num_thread < 1)
stop('num_thread is not a positive integer')
if (num_thread > num_multistart){
warning(paste("num_thread > num_multistart. num_thread set to",num_multistart))
num_thread <- num_multistart
}
available_cores <- detectCores()
if (num_thread > available_cores){
warning(paste("num_thread exceeds number of available cores.",
"num_thread set to", available_cores))
num_thread <- available_cores
}
if (epsK <= 0)
stop('epsK should be a positive number')
if (epsQ <= 0)
stop('epsQ should be a positive number')
## For timing
t1 <- proc.time()[3]
## Builds neighborhood at center of design
Xc <- matrix(apply(X, 2, median), nrow=1)
if(is.list(reps_list)){
reps_n_list <- build_neighborhood(N, Xc, reps_list = reps_list)
Xn <- reps_n_list$Xn
Yn <- reps_n_list$Yn
} else {
rep_n_list <- NULL
neighborhood <- build_neighborhood(N, Xc, X, Y)
Xn <- neighborhood$Xn; Yn <- neighborhood$Yn
}
neighborhood_box <- apply(Xn, 2, range)
if(is.null(ip_bounds))
ip_bounds <- neighborhood_box
if (is.null(theta)) theta <- quantile(dist(Xn),.1)^2
if (method == 'alc'){
low.bound <- neighborhood_box[1,]; upp.bound <- neighborhood_box[2,]
## Builds inducing point design by optimizing ALC
p <- optIP.ALC(Xc=Xc, Xref=NULL, M=M, Xn=Xn,
Yn=Yn, theta=theta, g=g,
ip_bounds=ip_bounds, num_thread=num_thread,
num_multistart=num_multistart,
verbose=verbose, epsQ=epsQ, epsK=epsK,
rep_list=rep_n_list)
Xm.t <- sweep(p$Xm, 2, Xc)
} else {
## Builds inducing point design by optimizing weighted IMSE
p <- optIP.wIMSE(Xn=Xn, M=M, theta=theta,
g=g, w_mean=Xc, ip_bounds=ip_bounds,
integral_bounds=integral_bounds, w_var=w_var,
num_multistart=num_multistart, verbose=verbose,
epsQ=epsQ, epsK=epsK, mult=rep_n_list$mult)
Xm.t <- sweep(p$Xm, 2, Xc)
}
## For timing
t2 <- proc.time()[3]
return(list(Xm.t=Xm.t, Xn=Xn, Xc=Xc, time=t2-t1))
}
## scale_ipTemplate:
##
## Scales a inducing points design in [0,1]^d to fill local neighborhood.
## Returns template design centered at the origin.
scale_ipTemplate <- function(X, N, space_fill_design,
method = c('qnorm', 'chr')){
t1 <- proc.time()[3]
###### Sanity checks ######
if(N > nrow(X)) ## Neighorhood cannot be bigger than data size
stop('N is greater than the number of rows in X')
if (ncol(space_fill_design) != ncol(X))
stop('A space filling design was supplied with an incorrect ',
'number of columns.')
if (nrow(space_fill_design) > N)
warning('Size of space_filling_design > Neighborhood size (N)')
if (!method %in% c('qnorm','chr'))
stop('A valid method was not given. Choices include: qnorm, chr')
Xc <- matrix(apply(X, 2, median), nrow=1)
Xn <- build_neighborhood(N, Xc, X)$Xn
neighborhood_box <- apply(Xn, 2, range)
if(method == 'qnorm'){
## Scales design by inverse normal CDF
dist_from_Xc <- sweep(neighborhood_box, 2, Xc)
qnorm_sd <- apply(abs(dist_from_Xc), 2, max)/3
Xm.qnorm <- qnormscale(space_fill_design, rep(0, ncol(X)), qnorm_sd)
Xm.t <- rbind(rep(0,ncol(Xn)), Xm.qnorm)
} else {
## Scales design to a circumscribed hyperrectangle
Xm.t <- sweep(space_fill_design - .5, 2,
neighborhood_box[2,] - neighborhood_box[1,], '*')
Xm.t <- rbind(rep(0, ncol(Xn)), Xm.t)
}
## For timing
t2 <- proc.time()[3]
return(list(Xm.t=Xm.t, Xn=Xn, time=t2-t1))
}
## qnormscale:
##
## The function that scales X to center around mean
## with standard deviations determined by sd, which can be vectorized
qnormscale <- function(X, mean, sd){
m <- ncol(X)
## Sanity checks
if(length(mean) == 1) {mean <- rep(mean, m)
} else if(length(mean) != m) stop("X and mean dimension mismatch")
if(length(sd) == 1) {sd <- rep(sd, m)
} else if(length(sd) != m) stop("X and sd dimension mismatch")
## Scale each dimension independently
for(j in 1:ncol(X))
X[,j] <- qnorm(X[,j], mean=mean[j], sd=sd[j])
## Return scaled matrix
return(X)
}
## build_gauss_measure_ipTemplate:
##
## Creates an inducing points design based on a local neighborhood
## for a Gaussian measure slice. Inducing points are optimized with
## wIMSE, and then returned centered at the origin
build_gauss_measure_ipTemplate <- function(X = NULL, Y = NULL, M, N, gauss_sd,
theta = NULL, g = 1e-4,
seq_length = 20, ip_bounds = NULL,
integral_bounds = NULL,
num_multistart = 20,
epsK = sqrt(.Machine$double.eps),
epsQ = 1e-5, reps = FALSE,
verbose = TRUE){
if(is.list(reps)){
if(is.null(reps$X0)) stop('reps doesn\'t include \'X0\' in list')
if(is.null(reps$Z0)) stop('reps doesn\'t include \'Z0\' in list')
if(is.null(reps$mult)) stop('reps doesn\'t include \'mult\' in list')
if(is.null(reps$Z)) stop('reps doesn\'t include \'Z\' in list')
if(is.null(reps$Zlist)) stop('reps doesn\'t include \'Zlist\' in list')
reps_list <- reps
X <- reps$X0
Y <- reps$Z0
} else if (is.null(X) | is.null(Y)){
stop('X and Y are required')
} else if (reps){
Xorig <- X; Y_orig <- Y
reps_list <- find_reps(X, Y)
X <- reps_list$X0
Y <- reps_list$Z0
} else reps_list <- FALSE
###### Sanity checks ######
if(nrow(X)!=length(Y)) ## Number of entries check
stop('Number of entries in Y doesn\'t match nrows of X')
if (M > N)
warning('Number of inducing points (M) > Neighborhood size (N)')
if(N > nrow(X)) ## Neighorhood cannot be bigger than data size
stop('N is greater than the number of rows in X')
nonzero_dim <- which(gauss_sd!=0)
if(length(nonzero_dim) > 1)
stop('The Gaussian measure can only have a non-zero gauss_sd ',
'in one dimension.')
if(length(gauss_sd) != ncol(X))
stop('The number of entries and gauss_sd and ncol(X) do not match')
if(!is.null(theta))
if (theta <= 0)
stop('theta should be a positive number')
if (g < 0)
stop('g must be positive')
Xrange <- apply(X, 2, range)
if (!is.null(integral_bounds))
if(sum(Xrange[1,] < integral_bounds[1,]) > 0 ||
sum(Xrange[2,] > integral_bounds[2,]) > 0)
stop('X outside integration bounds')
if(is.null(integral_bounds))
integral_bounds <- Xrange
if(!is.null(ip_bounds)){
if(nrow(ip_bounds) !=2)
stop('ip_bounds should be a matrix with two rows')
if(sum(ip_bounds[1,] < ip_bounds[2,]) < ncol(X))
stop('At least one dimensions bounds in ip_bounds is incorrect')
}
if(num_multistart < 0 | num_multistart %% 1 !=0)
if (epsK <= 0)
stop('epsK should be a positive number')
if (epsQ <= 0)
stop('epsQ should be a positive number')
## For timing
t1 <- proc.time()[3]
##-----------------------------------------
## Builds neighborhood at center of design
Xc <- matrix(apply(X, 2, median), nrow=1)
# Construct reference set for Gaussian measure
ndim <- ncol(X)
dfs <- list()
for (i in 1:ndim){
if (i == nonzero_dim) {
dfs[[i]] <- seq(Xc[,i] - 2*gauss_sd[i], Xc[,i] + 2*gauss_sd[i],
length=seq_length)
} else{
dfs[[i]] <- Xc[,i]
}
}
Xc_measure <- as.matrix(expand.grid(dfs[1:ndim]))
# Build Xc neighborhood
if(N == nrow(X)){
Xn <- X
} else{
xx_dists <- distance(Xc_measure, X)
min_dists <- apply(xx_dists, 2, min)
quant <- quantile(min_dists, N/nrow(X))
closest_indices <- min_dists < quant
Xn <- X[closest_indices,]
}
neighborhood_box <- apply(Xn, 2, range)
Xnc_theta <- darg(NULL, Xn)$start
## Change gauss_sd to allow some weight in dimensions where it's zero
nonzero2zero.ratio <- (neighborhood_box[2, -nonzero_dim] -
neighborhood_box[1, -nonzero_dim])/
(neighborhood_box[2, nonzero_dim] - neighborhood_box[1, nonzero_dim])
gauss_sd[-nonzero_dim] <- nonzero2zero.ratio*gauss_sd[nonzero_dim]
if(is.list(reps_list)){
rep_n_list <- list(mult=reps_list$mult[closest_indices],
Z=matrix(c(unlist(reps_list$Zlist[closest_indices]))))
} else rep_n_list <- NULL
if(is.null(ip_bounds))
ip_bounds <- neighborhood_box
##------------------------------------------------------------
## Builds inducing point design by optimizing weighted IMSE
Xm.wimse <- try(optIP.wIMSE(Xn=Xn, M=M, theta=Xnc_theta, g=g,
w_mean=Xc, w_var=gauss_sd^2,
ip_bounds=ip_bounds,
integral_bounds=integral_bounds,
num_multistart=num_multistart, verbose=verbose,
epsQ=epsQ, epsK=epsK, mult=rep_n_list$mult)$Xm,
silent=TRUE)
increase_epsK <- increase_epsQ <- 1
while (class(Xm.wimse)[1]=='try-error' & (epsK < 1e-3 & epsQ < 1e-3)) {
if (epsQ < 1e-3){
Xm.wimse <- try(optIP.wIMSE(Xn=Xn, M=M, theta=Xnc_theta, g=g,
w_mean=Xc, w_var=gauss_sd^2,
ip_bounds=ip_bounds,
integral_bounds=integral_bounds,
num_multistart=num_multistart, verbose=verbose,
epsQ=epsQ*(10^increase_epsQ),
epsK=epsK, mult=rep_n_list$mult)$Xm, silent=TRUE)
increase_epsQ <- increase_epsQ + 1
} else {
increase_epsQ <- 1
Xm.wimse <- try(optIP.wIMSE(Xn=Xn, M=M, theta=Xnc_theta, g=g,
w_mean=Xc, w_var=gauss_sd^2,
ip_bounds=ip_bounds,
integral_bounds=integral_bounds,
num_multistart=num_multistart, verbose=verbose,
epsQ=epsQ, epsK=epsK*(10^increase_epsK),
mult=rep_n_list$mult)$Xm, silent=TRUE)
increase_epsK <- increase_epsK + 1
}
}
Xm.t <- sweep(Xm.wimse, 2, Xc)
## For timing
t2 <- proc.time()[3]
return(list(Xm.t=Xm.t, Xn=Xn, Xc=Xc, gauss_sd=gauss_sd, time=t2-t1))
}
## scale_gauss_measure_ipTemplate:
##
## Scales a inducing points design in [0,1]^d to fill local neighborhood.
## Returns template design centered at the origin.
scale_gauss_measure_ipTemplate <- function(X, N, gauss_sd,
space_fill_design,
method = c('qnorm','chr'),
seq_length=20){
t1 <- proc.time()[3]
###### Sanity checks ######
if(N > nrow(X)) ## Neighorhood cannot be bigger than data size
stop('N is greater than the number of rows in X')
nonzero_dim <- which(gauss_sd!=0)
if(length(nonzero_dim) > 1)
stop('The Gaussian measure can only have a non-zero gauss_sd ',
'in one dimension.')
if(length(gauss_sd) != ncol(X))
stop('The number of entries and gauss_sd and ncol(X) do not match')
if (ncol(space_fill_design) != ncol(X))
stop('A space filling design was supplied with an incorrect ',
'number of columns.')
if (nrow(space_fill_design) > N)
warning('Size of space_filling_design > Neighborhood size (N)')
if (!method %in% c('qnorm','chr'))
stop('A valid method was not given. Choices include: qnorm, chr')
##-----------------------------------------
## Builds neighborhood at center of design
Xc <- matrix(apply(X, 2, median), nrow=1)
# Construct reference set for Gaussian measure
ndim <- ncol(X)
dfs <- list()
for (i in 1:ndim){
if (i == nonzero_dim) {
dfs[[i]] <- seq(Xc[,i] - 2*gauss_sd[i], Xc[,i] + 2*gauss_sd[i],
length=seq_length)
} else{
dfs[[i]] <- Xc[,i]
}
}
Xc_measure <- as.matrix(expand.grid(dfs[1:ndim]))
# Build Xc neighborhood
if(N == nrow(X)){
Xn <- X
} else{
xx_dists <- distance(Xc_measure, X)
min_dists <- apply(xx_dists, 2, min)
quant <- quantile(min_dists, N/nrow(X))
closest_indices <- min_dists < quant
Xn <- X[closest_indices,]
}
neighborhood_box <- apply(Xn, 2, range)
Xnc_theta <- darg(NULL, Xn)$start
## Change gauss_sd to allow some weight in dimensions where it's zero
nonzero2zero.ratio <- (neighborhood_box[2,-nonzero_dim] -
neighborhood_box[1,-nonzero_dim])/
(neighborhood_box[2,nonzero_dim] - neighborhood_box[1,nonzero_dim])
gauss_sd[-nonzero_dim] <- nonzero2zero.ratio*gauss_sd[nonzero_dim]
if(method == 'qnorm'){
## Scales design by inverse normal CDF
dist_from_Xc <- sweep(neighborhood_box, 2, Xc)
qnorm_sd <- apply(abs(dist_from_Xc), 2, max)/3
Xm.qnorm <- qnormscale(space_fill_design, rep(0, ncol(X)), qnorm_sd)
Xm.t <- rbind(rep(0,ncol(Xn)), Xm.qnorm)
} else {
## Scales design to a circumscribed hyperrectangle
Xm.t <- sweep(space_fill_design - .5, 2,
neighborhood_box[2,] - neighborhood_box[1,], '*')
Xm.t <- rbind(rep(0, ncol(Xn)), Xm.t)
}
## For timing
t2 <- proc.time()[3]
return(list(Xm.t=Xm.t, Xn=Xn, Xc=Xc, gauss_sd=gauss_sd, time=t2 - t1))
}
|
f3cbd9c397377e6b379b77469d630f5ed6b882b2
|
2d1866e3a065b074f7a0a8029d170f204c9faa18
|
/inst/doc/indexing.R
|
0d7213d886957091ea49e2e0fb0f2b3ef8f23b15
|
[
"CC0-1.0",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-public-domain-disclaimer"
] |
permissive
|
cran/nhdplusTools
|
4f5af6385b022269d5e71407f76faaaadac6de25
|
ce7303826ccc687527ef6ca8292df92aba9253fd
|
refs/heads/master
| 2023-09-03T19:21:32.397941
| 2023-08-31T07:40:05
| 2023-08-31T09:30:59
| 236,631,877
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,946
|
r
|
indexing.R
|
## ----setup, include = FALSE---------------------------------------------------
library(nhdplusTools)
local <- (Sys.getenv("BUILD_VIGNETTES") == "TRUE")
if(local) {
cache_path <- file.path(nhdplusTools_data_dir(), "index_v")
} else {
cache_path <- tempdir()
}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>",
fig.width=6,
fig.height=4,
eval=local,
cache=local,
cache.path=(cache_path)
)
oldoption <- options(scipen = 9999,
"rgdal_show_exportToProj4_warnings"="none")
## ----nhdplus_path_setup, echo=FALSE, include=FALSE----------------------------
library(dplyr, warn.conflicts = FALSE)
work_dir <- file.path(nhdplusTools_data_dir(), "index_vignette")
dir.create(work_dir, recursive = TRUE)
source(system.file("extdata/sample_data.R", package = "nhdplusTools"))
file.copy(sample_data,
file.path(work_dir, "natseamless.gpkg"))
## ----nhdplus_path, echo=TRUE--------------------------------------------------
library(nhdplusTools)
nhdplus_path(file.path(work_dir, "natseamless.gpkg"))
flowlines <- sf::read_sf(nhdplus_path(), "NHDFlowline_Network")
gages <- sf::read_sf(nhdplus_path(), "Gage")
## ----get_indexes--------------------------------------------------------------
indexes <- get_flowline_index(sf::st_transform(flowlines, 5070), # albers
sf::st_transform(sf::st_geometry(gages), 5070),
search_radius = units::set_units(200, "meters"),
max_matches = 1)
indexes <- left_join(sf::st_sf(id = c(1:nrow(gages)),
geom = sf::st_geometry(gages)),
indexes, by = "id")
plot(sf::st_geometry(sf::st_zm(flowlines)))
plot(sf::st_geometry(indexes), add = TRUE)
## ----analyze_index------------------------------------------------------------
p_match <- 100 * length(which(indexes$COMID %in% gages$FLComID)) / nrow(gages)
paste0(round(p_match, digits = 1),
"% were found to match the COMID in the NHDPlus gages layer")
p_match <- 100 * length(which(indexes$REACHCODE %in% gages$REACHCODE)) / nrow(gages)
paste0(round(p_match, digits = 1),
"% were found to match the REACHCODE in the NHDPlus gages layer")
matched <- cbind(indexes,
dplyr::select(sf::st_drop_geometry(gages),
REACHCODE_ref = REACHCODE,
COMID_ref = FLComID,
REACH_meas_ref = Measure)) %>%
dplyr::filter(REACHCODE == REACHCODE_ref) %>%
dplyr::mutate(REACH_meas_diff = REACH_meas - REACH_meas_ref)
hist(matched$REACH_meas_diff, breaks = 100,
main = "Difference in measure for gages matched to the same reach.")
round(quantile(matched$REACH_meas_diff,
probs = c(0, 0.1, 0.25, 0.5, 0.75, 0.9, 1)),
digits = 2)
## ----get_indexes_precise------------------------------------------------------
indexes <- get_flowline_index(flowlines,
sf::st_geometry(gages),
search_radius = units::set_units(0.1, "degrees"),
precision = 10)
indexes <- left_join(data.frame(id = seq_len(nrow(gages))), indexes, by = "id")
## ----analyze_inde_precise-----------------------------------------------------
p_match <- 100 * length(which(indexes$COMID %in% gages$FLComID)) / nrow(gages)
paste0(round(p_match, digits = 1),
"% were found to match the COMID in the NHDPlus gages layer")
p_match <- 100 * length(which(indexes$REACHCODE %in% gages$REACHCODE)) / nrow(gages)
paste0(round(p_match, digits = 1),
"% were found to match the REACHCODE in the NHDPlus gages layer")
matched <- cbind(indexes,
dplyr::select(sf::st_set_geometry(gages, NULL),
REACHCODE_ref = REACHCODE,
COMID_ref = FLComID,
REACH_meas_ref = Measure)) %>%
dplyr::filter(REACHCODE == REACHCODE_ref) %>%
dplyr::mutate(REACH_meas_diff = REACH_meas - REACH_meas_ref)
hist(matched$REACH_meas_diff, breaks = 100,
main = "Difference in measure for gages matched to the same reach.")
round(quantile(matched$REACH_meas_diff,
probs = c(0, 0.1, 0.25, 0.5, 0.75, 0.9, 1)), digits = 2)
## ----multi--------------------------------------------------------------------
all_indexes <- get_flowline_index(flowlines,
sf::st_geometry(gages),
search_radius = units::set_units(0.01, "degrees"),
max_matches = 10)
indexes <- left_join(sf::st_sf(id = 42,
geom = sf::st_geometry(gages)[42]),
all_indexes[all_indexes$id == 42, ], by = "id")
plot(sf::st_geometry(sf::st_buffer(indexes, 500)), border = NA)
plot(sf::st_geometry(indexes), add = TRUE)
plot(sf::st_geometry(sf::st_zm(flowlines)), col = "blue", add = TRUE)
indexes
## ----disamb-------------------------------------------------------------------
unique_indexes <- disambiguate_flowline_indexes(
all_indexes,
flowlines[, c("COMID", "TotDASqKM"), drop = TRUE],
data.frame(ID = seq_len(nrow(gages)),
area = gages$DASqKm))
unique_index <- left_join(sf::st_sf(id = 42,
geom = sf::st_geometry(gages)[42]),
unique_indexes[unique_indexes$id == 42, ], by = "id")
plot(sf::st_geometry(sf::st_buffer(indexes, 500)), border = NA)
plot(sf::st_geometry(indexes), add = TRUE)
plot(sf::st_geometry(sf::st_zm(flowlines[flowlines$COMID %in% indexes$COMID,])),
col = "grey", lwd = 3, add = TRUE)
plot(sf::st_geometry(sf::st_zm(flowlines[flowlines$COMID %in% unique_index$COMID,])),
col = "blue", add = TRUE)
unique_index
## ----waterbodies--------------------------------------------------------------
waterbody <- sf::read_sf(nhdplus_path(), "NHDWaterbody")
gages <- sf::st_drop_geometry(gages) %>%
dplyr::filter(!is.na(LonSite)) %>%
sf::st_as_sf(coords = c("LonSite", "LatSite"), crs = 4326)
plot(sf::st_geometry(sf::st_zm(flowlines)))
plot(sf::st_geometry(waterbody), add = TRUE)
plot(sf::st_geometry(gages), add = TRUE)
## ----index_waterbodies--------------------------------------------------------
flowline_indexes <- left_join(data.frame(id = seq_len(nrow(gages))),
get_flowline_index(
sf::st_transform(flowlines, 5070),
sf::st_geometry(sf::st_transform(gages, 5070)),
search_radius = units::set_units(200, "m")), by = "id")
indexed_gages <- cbind(dplyr::select(gages,
orig_REACHCODE = REACHCODE,
orig_Measure = Measure,
FLComID,
STATION_NM),
flowline_indexes,
get_waterbody_index(
st_transform(waterbody, 5070),
st_transform(gages, 5070),
st_drop_geometry(flowlines),
search_radius = units::set_units(200, "m")))
plot(sf::st_geometry(sf::st_zm(flowlines)))
plot(sf::st_geometry(waterbody), add = TRUE)
plot(sf::st_geometry(indexed_gages), add = TRUE)
dplyr::select(sf::st_drop_geometry(indexed_gages), near_wb_COMID, near_wb_dist, in_wb_COMID, outlet_fline_COMID)
## ----teardown, include=FALSE--------------------------------------------------
options(oldoption)
if(Sys.getenv("BUILD_VIGNETTES") != "TRUE") {
unlink(work_dir, recursive = TRUE)
}
|
850d667a57ab8de922a055d70344b1ff18786282
|
4231f527b668f4f082f617c679fef87cedb15bfa
|
/R/models-nlme.R
|
ce56f497f99716879750f5e5f1b4558b4ee96692
|
[
"MIT"
] |
permissive
|
camroach87/1901-nlmets
|
86329614e5d543e6d8a5ad4ccb56c9ae5ee09f89
|
bd72f855c36e989c3537f93d4adca8e27f7cd652
|
refs/heads/master
| 2023-01-29T20:02:53.934713
| 2020-12-11T02:37:56
| 2020-12-11T02:37:56
| 218,911,330
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,139
|
r
|
models-nlme.R
|
# Note: predict.lme throws errors if formula is created in `lme` function call.
# Need to use eval and substitute to get around this.
# Reference: https://stat.ethz.ch/pipermail/r-help/2003-January/029199.html
fit_ind_lm <- function(data) {
output <- data %>%
group_by(bid) %>%
nest() %>%
mutate(fit = map(data, ~lm(log(wh) ~ poly(scaled_temperature, degree = 2),
data = .x))) %>%
select(bid, fit)
class(output) <- c("ind_lm", "tbl_df", "tbl", "data.frame")
output
}
fit_ind_ns <- function(data, wvars) {
terms <- get_terms(wvars)
output <- data %>%
group_by(bid) %>%
nest() %>%
mutate(fit = map(data, ~lm(paste("log(wh) ~", terms),
data = .x))) %>%
select(bid, fit)
class(output) <- c("ind_lm", "tbl_df", "tbl", "data.frame")
output
}
predict.ind_lm <- function(object, newdata) {
# FIXME: if more than one value coming in here the values might get out of
# alignment after nesting and unnesting.
newdata %>%
group_by(bid) %>%
nest() %>%
inner_join(object, by = "bid") %>%
mutate(pred = map2(fit, data, ~ predict(.x, .y))) %>%
unnest(pred) %>%
pull(pred)
}
fit_pool <- function(data, wvars) {
terms <- get_terms(wvars)
lm(paste("log(wh) ~ bid +", terms),
data = data)
}
fit_ri <- function(data, wvars) {
terms <- get_terms(wvars)
form <- as.formula(paste("log(wh) ~ ", terms))
eval(substitute(
lme(form,
data = data,
random = ~ 1 | bid,
method = "ML",
control = lmeControl(opt = "optim",
msMaxIter=100,
returnObject = TRUE)),
list(form=form)
))
}
fit_ris <- function(data, wvars) {
terms <- get_terms(wvars)
form <- as.formula(paste("log(wh) ~ ", terms))
eval(substitute(
lme(form,
data = data,
random = ~ scaled_temperature | bid,
method = "ML",
control = lmeControl(opt = "optim",
msMaxIter=100,
returnObject = TRUE)),
list(form=form)
))
}
fit_ssc <- function(data, wvars) {
terms <- get_terms(wvars)
form <- as.formula(paste("log(wh) ~ ", terms))
eval(substitute(
lme(form,
data = data,
random = ~ ns(scaled_temperature, df = 3) | bid,
method = "ML",
control = lmeControl(opt = "optim",
msMaxIter=100,
returnObject = TRUE)),
list(form=form)
))
}
fit_ssc_attr <- function(data, wvars) {
terms <- get_terms(wvars)
form <- as.formula(paste("log(wh) ~ ", terms,
"+ basebldngfeedonly + dxsystem + electricelementheating",
"+ centraldist"))
eval(substitute(
lme(form,
data = data,
random = ~ ns(scaled_temperature, df = 3) | bid,
method = "ML",
control = lmeControl(opt = "optim",
msMaxIter=100,
returnObject = TRUE)),
list(form=form)
))
}
get_terms <- function(wvars) {
# Remove scaled_temperature as it is modelled as a subject specific curve
wvars <- wvars[wvars!="scaled_temperature"]
if (length(wvars) > 0) {
terms <- paste(paste0("ns(", wvars, ", df = 3)"), collapse = " + ")
} else {
terms <- 1 # intercept only
}
terms
}
# fit_ssc_ar1 <- function(data, wvars) {
# terms <- get_terms(wvars)
# form <- as.formula(paste("log(wh) ~ ", terms))
#
# eval(substitute(
# lme(form,
# data = data,
# random = ~ ns(scaled_temperature, df = 3) | bid,
# correlation = corAR1(),
# method = "ML",
# control = lmeControl(opt = "optim",
# msMaxIter=100,
# returnObject = TRUE)),
# list(form=form)
# ))
# }
#
#
# fit_ssc_ar1_attr <- function(data, wvars) {
# terms <- get_terms(wvars)
# form <- as.formula(paste("log(wh) ~ ", terms,
# "+ basebldngfeedonly + dxsystem + electricelementheating",
# "+ centraldist"))
#
# eval(substitute(
# lme(form,
# data = data,
# random = ~ ns(scaled_temperature, df = 3) | bid,
# correlation = corAR1(),
# method = "ML",
# control = lmeControl(opt = "optim",
# msMaxIter=100,
# returnObject = TRUE)),
# list(form=form)
# ))
# }
#' Fit SSCAR(1) model
#'
#' Fits a subject specific curve model with autocorrelation structure for residuals.
#'
#' TODO: Needs a predict method for the sscar1 class.
#' - Should be returned from this function as its own class.
#' - In the predict function create the new Z and Z.subject matrices. Based off training knot positions.
#' - Add ident as a variable in the dataframe
#' - Make sure knot positions in the test data are the same as for the test. Should
#' probably include these knots as an attribute
#'
#' @param data
#'
#' @return
#' @export
#'
#' @examples
# fit_sscar1 <- function(data) {
# data$ident <- 1
# x <- as.numeric(data$scaled_temperature)
# K <- 2
# K.subject <- 1
#
# knots <- quantile(unique(x), seq(0,1,length=K+2))[-c(1,K+2)]
# # Z <- outer(x, knots, "-")
# Z <- outer(x, knots, "-")
# Z <- (Z*(Z>0))^3
# knots.subject <- quantile(unique(x), seq(0, 1, length=K.subject+2))
# knots.subject <- knots.subject[-c(1,K.subject+2)]
# Z_sub <- outer(x, knots.subject, "-")
# Z_sub <- (Z_sub*(Z_sub>0))^3
#
# # TODO: Convert Z and Z_sub to dataframes. Rename columns automatically and then column bind to data.
# data$Z1 <- Z[,1]
# data$Z2 <- Z[,2]
# data$Z_sub <- Z_sub
#
# fit <- nlme::lme(wh ~ poly(scaled_temperature, 2),
# data = data,
# random = list(ident = pdIdent(~Z1+Z2-1),
# bid = pdSymm(~scaled_temperature),
# bid = pdIdent(~Z_sub-1)),
# correlation = corAR1(value = .5),
# control = lmeControl(opt = "optim"))
#
# fit
# }
|
887ed549f86811716c950d500388bf65f7bfab42
|
b06a918eb2c1a3b147a124dd204a41dbbf12ed46
|
/man/print.FSA.Rd
|
3bc79fa15c9c0c41ffa57595c6b54a14b9e7bbf8
|
[] |
no_license
|
joshuawlambert/rFSA
|
712cd31dfa0ba7641b20d9120227e328d4dc7c6b
|
b0986bb2534f550f6b6a4215d107254c370910d9
|
refs/heads/master
| 2021-07-13T14:13:30.222459
| 2021-06-30T16:49:59
| 2021-06-30T16:49:59
| 95,580,239
| 10
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 726
|
rd
|
print.FSA.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/print.FSA.R
\name{print.FSA}
\alias{print.FSA}
\title{Printing function for FSA solutions}
\usage{
\method{print}{FSA}(x, ...)
}
\arguments{
\item{x}{FSA object to print details about.}
\item{...}{arguments to be passed to other functions.}
}
\value{
list of Feasible Solution Formula's, Original Fitted model formula and criterion function and times converged to details.
}
\description{
Printing function for FSA solutions
}
\examples{
#use mtcars package see help(mtcars)
data(mtcars)
colnames(mtcars)
fit<-lmFSA(formula="mpg~hp*wt",data=mtcars,fixvar="hp",
quad=FALSE,m=2,numrs=10,save_solutions=FALSE,cores=1)
print(fit)
}
|
35f4b88be92e09db93921f9dc737fcc0d6479da2
|
303ecdc998923dc101dfc42b8dbf42853ce7a7ec
|
/man/ClassifierModels.Rd
|
88bb2589c494fc9c905b76679009591799888db6
|
[] |
no_license
|
mattdneal/FAIMSToolkit
|
7e2640eb979110c2fca1cad639beb78fb9b25be4
|
751bfba992587bb7e5edba272a3890b088e19e33
|
refs/heads/master
| 2021-01-11T17:37:52.367113
| 2018-12-01T13:54:33
| 2018-12-01T13:54:33
| 79,808,086
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,071
|
rd
|
ClassifierModels.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/crossValidation.R
\name{ClassifierModels}
\alias{ClassifierModels}
\title{Run a set of classification models on input training, test data sets}
\usage{
ClassifierModels(data.train, targetValues, data.test, models, kFolds = 2,
repeats = 5, tuneLength = 5, verbose = F)
}
\arguments{
\item{data.train}{a data frame of training data}
\item{targetValues}{a logical vector}
\item{data.test}{a data frame of test data (columns must match \code{data.train})}
\item{models}{a list of \link{caret::train} models to run}
\item{kFolds}{number of folds for model selection within each fold}
\item{repeats}{number of repeats for model selection within each fold}
\item{tuneLength}{number of parameters to tune}
\item{verbose}{verbose output if TRUE}
}
\value{
a data frame containing prediction probabilities for each classification algorithm.
These are the predicted probabililty of \code{targetValues==TRUE}
}
\description{
Run a set of classification models on input training, test data sets
}
|
5c52a7abad1ebabf7ff9d7eccefe0fc6b0741c95
|
7da6f203762c9c23d83ca262cf1ae2d03d51b228
|
/03_gacd/w1/quiz.R
|
318a1cd0c1289ddcf3c7362b8d9860637f6d14bf
|
[] |
no_license
|
josoriov/ds-coursera
|
ace55d97b44707283bd0a4739a4829ce0bdce41c
|
7018e0af76ba87cf7887636070bc8980393ed3fa
|
refs/heads/master
| 2022-11-16T02:10:58.242363
| 2020-07-15T22:22:01
| 2020-07-15T22:22:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 425
|
r
|
quiz.R
|
# 1
# dat <- read.csv("housing.csv")
# a <- dat(dat, VAL==24)
# Rta = 53
# 3
# RTA = 36534720
# 4
require(XML)
dat <- xmlParse("http://d396qusza40orc.cloudfront.net/getdata%2Fdata%2Frestaurants.xml")
xml_data <- xmlToList(dat)
xml_data <- xml_data[[1]]
a <- unlist(lapply(xml_data, function(x) x$zipcode == "21231"))
# 5
# library("data.table")
# dat <- fread(file="data_survey.csv")
# dat[, mean(pwgtp15), by=SEX]
|
7df1aa47af4e55d0b35de46ba3a520a5778dbfd8
|
db7430d5693c5aa8e9f9f7affd41df058407d084
|
/man/RcmdrPlugin.RiskDemo-package.Rd
|
d72ad4cb6d9eb4586cde09dda129ba450eb97e84
|
[] |
no_license
|
arolluom/RcmdrPlugin.RiskDemo
|
8fbb654f97b764099dc7f7bd7991d4a0d730baf4
|
a66816c9f30ebd7b398ac5cb8a01c951ff0b5c05
|
refs/heads/master
| 2021-01-15T19:59:54.356246
| 2017-08-09T17:45:30
| 2017-08-09T17:45:30
| 93,251,917
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 647
|
rd
|
RcmdrPlugin.RiskDemo-package.Rd
|
\name{RcmdrPlugin.RiskDemo-package}
\alias{RcmdrPlugin.RiskDemo-package}
\alias{RcmdrPlugin.RiskDemo}
\docType{package}
\title{
R Commander Plug-in for Risk Demonstration
}
\description{
R Commander plug-in to demonstrate various actuarial and financial risks. It includes valuation of bonds and stocks, portfolio optimization, classical ruin theory and demography.
}
\details{
\tabular{ll}{
Package: \tab RcmdrPlugin.RiskDemo\cr
Type: \tab Package\cr
Version: \tab 1.9\cr
Date: \tab 2017-08-09\cr
License: \tab GPL (>= 2)\cr
LazyLoad: \tab yes\cr
}
}
\author{
Arto Luoma
Maintainer: Arto Luoma <arto.luoma@wippies.com>
}
\keyword{ package }
|
152ea90956166573dd68f3c9a2d6849996690cc3
|
9721b7e97328faf3e4dafaa24d70310129d52b01
|
/R/permcoefs.plsRnp.R
|
e73e89ae4f8e1730b37b051b24e888b5b55c995b
|
[] |
no_license
|
kongdd/plsRglm
|
77dd10e804ec3606d914aae22a863a497497cd18
|
dfa4e54ea02bca8bf04d29bb65dc7dba611927c9
|
refs/heads/master
| 2022-02-19T20:26:29.672362
| 2019-10-01T10:41:55
| 2019-10-01T10:41:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 444
|
r
|
permcoefs.plsRnp.R
|
permcoefs.plsRnp <- function(dataRepYtt,ind,nt,modele,maxcoefvalues,wwetoile,ifbootfail){
dataRepYb=dataRepYtt[ind,1]
Tb=dataRepYtt[,-1]
tempCb=try(solve(t(Tb)%*%Tb)%*%t(Tb)%*%dataRepYb,silent=TRUE)
tempcoefs <- rbind(Intercept=0,wwetoile%*%tempCb)
Cond <- FALSE
try(Cond<-is.numeric(tempcoefs)&all(abs(tempcoefs)<maxcoefvalues),silent=TRUE)
if (Cond) {
return(tempcoefs)
}
else {
return(ifbootfail)
}
}
|
5b3b0c37e5dfcaa1ce2697953b8caace0a0f0714
|
7a9a8fb85481a80124bb1004eb3f4cfb46cdbede
|
/modeling1.R
|
8318789beb43987ee910f03eba8a4a6748738ec1
|
[] |
no_license
|
xinyizhao123/Predicting-Future-Ambient-Ozone
|
6459a9eef144bbf68416522f1987cf60f87af6bd
|
1b682e4fcc16f443b4d3d8c9216cb5f823ac2986
|
refs/heads/master
| 2020-05-25T14:58:23.133324
| 2016-10-06T00:52:51
| 2016-10-06T00:52:51
| 69,671,822
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,937
|
r
|
modeling1.R
|
#######################################################
# This program is to perform modeling for the dataset
# Programmer: Xinyi Zhao
# Date: 01/23/2016
#######################################################
#setwd("C:/Users/zhaohexu/Dropbox/Ozone project")
#setwd("C:/Users/Hitomi/Dropbox/Ozone project")
#install.packages("leaps")
library(ggplot2)
library(leaps)
st <- read.csv("study1.csv", stringsAsFactors = FALSE)
st <- st[-c(1)]
st <- st[st$county == "Harris", ]
unique(st$siteID)
# transformation of response variable
st$so <- sqrt(st$ozone) # square root transformed
st$lo <- log(st$ozone) # log-transformed
st$co <- (st$ozone)^(1/3) # cube root transformed
# create higher order terms of predictors
st$N2 <- st$NOx^2
st$V2 <- st$VOC^2
st$N3 <- st$NOx^3
st$V3 <- st$VOC^3
st$Nl <- log(st$NOx) # zero values
st$Vl <- log(st$VOC)
st$Ns=sqrt(st$NOx)
st$Vs=sqrt(st$VOC)
st$Nc=st$NOx^(1/3)
st$Vc=st$VOC^(1/3)
##### plot (unadjusted association)
ggplot(st, aes(x=VOC, y=ozone)) + geom_point(size=3.5, shape=20) + stat_smooth() +
ggtitle("Ozone and VOC") + theme(axis.title = element_text(size = 15.5)) +
theme(plot.title = element_text(size = 19)) + theme(axis.text = element_text(size = 13))
ggplot(st, aes(x=NOx, y=ozone)) + geom_point(size=3.5, shape=20) + stat_smooth() +
ggtitle("Ozone and NOx") + theme(axis.title = element_text(size = 15.5)) +
theme(plot.title = element_text(size = 19)) + theme(axis.text = element_text(size = 13))
ggplot(st, aes(x=temp, y=ozone)) + geom_point(size=3.5, shape=20) + stat_smooth() +
ggtitle("Ozone and Temperature") + theme(axis.title = element_text(size = 15.5)) +
theme(plot.title = element_text(size = 19)) + theme(axis.text = element_text(size = 13))
ggplot(st, aes(x=humid, y=ozone)) + geom_point(size=3.5, shape=20) + stat_smooth() +
ggtitle("Ozone and Relative Humidity") + theme(axis.title = element_text(size = 15.5)) +
theme(plot.title = element_text(size = 19)) + theme(axis.text = element_text(size = 13))
ggplot(st, aes(x=prcp, y=ozone)) + geom_point(size=3.5, shape=20) + stat_smooth() +
ggtitle("Ozone and Precipitation") + theme(axis.title = element_text(size = 15.5)) +
theme(plot.title = element_text(size = 19)) + theme(axis.text = element_text(size = 13))
ggplot(st, aes(x=solar, y=ozone)) + geom_point(size=3.5, shape=20) + stat_smooth() +
ggtitle("Ozone and Solar Radiation") + theme(axis.title = element_text(size = 15.5)) +
theme(plot.title = element_text(size = 19)) + theme(axis.text = element_text(size = 13))
##### transformation of response variable
st$so <- sqrt(st$ozone) # square root transformed
st$lo <- log(st$ozone) # log-transformed
st$co <- (st$ozone)^(1/3) # cube root transformed
### histogram
qplot(st$ozone, geom="histogram", main = "Histogram for Non-transformed Ozone",
fill=I("grey50"), col=I("black"), xlab = "Ozone")
qplot(st$so, geom="histogram", main = "Histogram for Square-root Transformed Ozone",
fill=I("grey50"), col=I("black"), xlab = "Square-root Ozone")
qplot(st$co, geom="histogram", main = "Histogram for Cube-root Transformed Ozone",
fill=I("grey50"), col=I("black"), xlab = "Cube-root Ozone")
qplot(st$lo, geom="histogram", main = "Histogram for Log Transformed Ozone",
fill=I("grey50"), col=I("black"), xlab = "Log Ozone")
### scattering plot
# square root
ggplot(st, aes(x=VOC, y=so)) + geom_point(size=3.5, shape=20) + stat_smooth() +
ggtitle("Square-root Ozone and VOC") + theme(axis.title = element_text(size = 15.5)) +
ylab("Square-root Ozone") + theme(plot.title = element_text(size = 19)) +
theme(axis.text = element_text(size = 13))
ggplot(st, aes(x=NOx, y=so)) + geom_point(size=3.5, shape=20) + stat_smooth() +
ggtitle("Square-root Ozone and NOx") + theme(axis.title = element_text(size = 15.5)) +
ylab("Square-root Ozone") + theme(plot.title = element_text(size = 19)) +
theme(axis.text = element_text(size = 13))
# cube root
ggplot(st, aes(x=VOC, y=co)) + geom_point(size=3.5, shape=20) + stat_smooth() +
ggtitle("Cube-root Ozone and VOC") + theme(axis.title = element_text(size = 15.5)) +
ylab("Cube-root Ozone") + theme(plot.title = element_text(size = 19)) +
theme(axis.text = element_text(size = 13))
ggplot(st, aes(x=NOx, y=co)) + geom_point(size=3.5, shape=20) + stat_smooth() +
ggtitle("Cube-root Ozone and NOx") + theme(axis.title = element_text(size = 15.5)) +
ylab("Cube-root Ozone") + theme(plot.title = element_text(size = 19)) +
theme(axis.text = element_text(size = 13))
ggplot(st, aes(x=temp, y=co)) + geom_point(size=3.5, shape=20) + stat_smooth() +
ggtitle("Cube-root Ozone and Temperature") + theme(axis.title = element_text(size = 15.5)) +
ylab("Cube-root Ozone") + theme(plot.title = element_text(size = 19)) +
theme(axis.text = element_text(size = 13))
ggplot(st, aes(x=humid, y=co)) + geom_point(size=3.5, shape=20) + stat_smooth() +
ggtitle("Cube-root Ozone and Relative Humidity") + theme(axis.title = element_text(size = 15.5)) +
ylab("Cube-root Ozone") + theme(plot.title = element_text(size = 19)) +
theme(axis.text = element_text(size = 13))
ggplot(st, aes(x=prcp, y=co)) + geom_point(size=3.5, shape=20) + stat_smooth() +
ggtitle("Cube-root Ozone and Precipitation") + theme(axis.title = element_text(size = 15.5)) +
ylab("Cube-root Ozone") + theme(plot.title = element_text(size = 19)) +
theme(axis.text = element_text(size = 13))
ggplot(st, aes(x=solar, y=co)) + geom_point(size=3.5, shape=20) + stat_smooth() +
ggtitle("Cube-root Ozone and Solar Radiation") + theme(axis.title = element_text(size = 15.5)) +
ylab("Cube-root Ozone") + theme(plot.title = element_text(size = 19)) +
theme(axis.text = element_text(size = 13))
# log
ggplot(st, aes(x=VOC, y=lo)) + geom_point(size=3.5, shape=20) + stat_smooth() +
ggtitle("Log Ozone and VOC") + theme(axis.title = element_text(size = 15.5)) +
ylab("Log Ozone") + theme(plot.title = element_text(size = 19)) +
theme(axis.text = element_text(size = 13))
ggplot(st, aes(x=NOx, y=lo)) + geom_point(size=3.5, shape=20) + stat_smooth() +
ggtitle("Log Ozone and NOx") + theme(axis.title = element_text(size = 15.5)) +
ylab("Log Ozone") + theme(plot.title = element_text(size = 19)) +
theme(axis.text = element_text(size = 13))
# model
fit1 = lm(ozone~VOC, data=st)
summary(fit1)
plot(fit1)
# square root transformed
fit2 = lm(so~VOC, data=st)
summary(fit2)
plot(fit2)
# log-transformed
fit3 = lm(lo~VOC, data=st)
summary(fit3)
plot(fit3)
#### choose cube-root
################################
# Univariate association #
################################
fit1.1 = lm(co~VOC, data=st)
summary(fit1.1)
confint(fit1.1)
fit1.2 = lm(co~NOx, data=st)
summary(fit1.2)
confint(fit1.2)
fit1.3 = lm(co~temp, data=st)
summary(fit1.3)
confint(fit1.3)
fit1.4 = lm(co~humid, data=st)
summary(fit1.4)
confint(fit1.4)
fit1.5 = lm(co~prcp, data=st)
summary(fit1.5)
confint(fit1.5)
fit1.6 = lm(co~solar, data=st)
summary(fit1.6)
confint(fit1.6)
##################################
# multivariate association #
##################################
##### interaction
fit0.00 = lm(co~VOC*NOx, data=st)
summary(fit0)
fit0.01 = lm(co~VOC+NOx, data=st)
anova(fit0.00, fit0.01)# not significant
fit0.0 = lm(co~VOC+NOx+V2+V3+N3+N2+Vl+Ns+Nc+Vs+Vc, data=st)
fit0.1 = lm(co~VOC*NOx+V2*NOx+V3*NOx+Vl*NOx+Vs*NOx+Vc*NOx+
VOC*N2+V2*N2+V3*N2+Vl*N2+Vs*N2+Vc*N2+
VOC*N3+V2*N3+V3*N3+Vl*N3+Vs*N3+Vc*N3+
VOC*Ns+V2*Ns+V3*Ns+Vl*Ns+Vs*Ns+Vc*Ns+
VOC*Nc+V2*Nc+V3*Nc+Vl*Nc+Vs*Nc+Vc*Nc, data=st)
summary(fit0.1)
anova(fit0.0, fit0.1) # not significant
# interaction not significant
##### only main exposures included
# 1
fit2 = lm(co~VOC+NOx, data=st)
summary(fit2)$adj.r.squared; AIC(fit2); BIC(fit2)
# 2
fit2 = lm(co~VOC+NOx+N2, data=st)
summary(fit2)$adj.r.squared; AIC(fit2); BIC(fit2)
# 3
fit2 = lm(co~VOC+NOx+V2, data=st)
summary(fit2)$adj.r.squared; AIC(fit2); BIC(fit2)
# 4
fit2 = lm(co~VOC+NOx+V2+N2, data=st)
summary(fit2)$adj.r.squared; AIC(fit2); BIC(fit2)
# 5
fit2 = lm(co~VOC+NOx+V3, data=st)
summary(fit2)$adj.r.squared; AIC(fit2); BIC(fit2)
# 6
fit2 = lm(co~VOC+NOx+N3, data=st)
summary(fit2)$adj.r.squared; AIC(fit2); BIC(fit2)
# 7
fit2 = lm(co~VOC+NOx+N2+V2+N3+Vl, data=st)
summary(fit2)$adj.r.squared; AIC(fit2); BIC(fit2)
### All possible model selection
leaps=regsubsets(co~VOC+NOx+V2+V3+N3+N2+Vl+Ns+Nc+Vs+Vc, data=st, nbest=5)
plot(leaps, scale="adjr2")
plot(leaps, scale="bic")
# write.csv(st, "s1.csv")
# (go to SAS....)
### model selected: NOx, NOx^3 and NOx^1/3
fit3 = lm(co~NOx+N3+Nc, data=st)
summary(fit3)
plot(fit3)
###### interaction test
## chunk test
fit4 = lm(co~NOx+N3+Nc
+NOx:VOC+N3:VOC+Nc:VOC
+NOx:V2+N3:V2+Nc:V2
+NOx:V3+N3:V3+Nc:V3
+NOx:Vs+N3:Vs+Nc:Vs
+NOx:Vc+N3:Vc+Nc:Vc
+NOx:Vl+N3:Vl+Nc:Vl, data=st)
summary(fit4)
anova(fit3, fit4) # not significant jointly
## VOC as effect modifier
# 1 interaction
fit4 = lm(co~NOx+N3+Nc+NOx:VOC, data=st)
summary(fit4); anova(fit3, fit4) # not significant
fit4 = lm(co~NOx+N3+Nc+N3:VOC, data=st)
summary(fit4); anova(fit3, fit4) # not significant
fit4 = lm(co~NOx+N3+Nc+Nc:VOC, data=st)
summary(fit4); anova(fit3, fit4) # not significant
# 2 interactions
fit4 = lm(co~NOx+N3+Nc+NOx:VOC+N3:VOC, data=st)
summary(fit4); anova(fit3, fit4) # not significant
fit4 = lm(co~NOx+N3+Nc+NOx:VOC+Nc:VOC, data=st)
summary(fit4); anova(fit3, fit4) # not significant
fit4 = lm(co~NOx+N3+Nc+N3:VOC+Nc:VOC, data=st)
summary(fit4); anova(fit3, fit4) # not significant
# 3 interactions
fit4 = lm(co~NOx+N3+Nc+NOx:VOC+Nc:VOC+N3:VOC, data=st)
summary(fit4); anova(fit3, fit4) # not significant
## VOC^2 as effect modifier
# 1 interaction
fit4 = lm(co~NOx+N3+Nc+NOx:V2, data=st)
summary(fit4); anova(fit3, fit4) # not significant
fit4 = lm(co~NOx+N3+Nc+N3:V2, data=st)
summary(fit4); anova(fit3, fit4) # not significant
fit4 = lm(co~NOx+N3+Nc+Nc:V2, data=st)
summary(fit4); anova(fit3, fit4) # not significant
# 2 interactions
fit4 = lm(co~NOx+N3+Nc+NOx:V2+N3:V2, data=st)
summary(fit4); anova(fit3, fit4) # not significant
fit4 = lm(co~NOx+N3+Nc+NOx:V2+Nc:V2, data=st)
summary(fit4); anova(fit3, fit4) # not significant
fit4 = lm(co~NOx+N3+Nc+N3:V2+Nc:V2, data=st)
summary(fit4); anova(fit3, fit4) # not significant
# 3 interactions
fit4 = lm(co~NOx+N3+Nc+NOx:V2+Nc:V2+N3:V2, data=st)
summary(fit4); anova(fit3, fit4) # not significant
## VOC^3 as effect modifier
# 1 interaction
fit4 = lm(co~NOx+N3+Nc+NOx:V3, data=st)
summary(fit4); anova(fit3, fit4) # not significant
fit4 = lm(co~NOx+N3+Nc+N3:V3, data=st)
summary(fit4); anova(fit3, fit4) # not significant
fit4 = lm(co~NOx+N3+Nc+Nc:V3, data=st)
summary(fit4); anova(fit3, fit4) # not significant
# 2 interactions
fit4 = lm(co~NOx+N3+Nc+NOx:V3+N3:V3, data=st)
summary(fit4); anova(fit3, fit4) # not significant
fit4 = lm(co~NOx+N3+Nc+NOx:V3+Nc:V3, data=st)
summary(fit4); anova(fit3, fit4) # not significant
fit4 = lm(co~NOx+N3+Nc+N3:V3+Nc:V3, data=st)
summary(fit4); anova(fit3, fit4) # not significant
# 3 interactions
fit4 = lm(co~NOx+N3+Nc+NOx:V3+Nc:V3+N3:V3, data=st)
summary(fit4); anova(fit3, fit4) # not significant
## sqrt(VOC) as effect modifier
# 1 interaction
fit4 = lm(co~NOx+N3+Nc+NOx:Vs, data=st)
summary(fit4); anova(fit3, fit4) # not significant
fit4 = lm(co~NOx+N3+Nc+N3:Vs, data=st)
summary(fit4); anova(fit3, fit4) # not significant
fit4 = lm(co~NOx+N3+Nc+Nc:Vs, data=st)
summary(fit4); anova(fit3, fit4) # not significant
# 2 interactions
fit4 = lm(co~NOx+N3+Nc+NOx:Vs+N3:Vs, data=st)
summary(fit4); anova(fit3, fit4) # not significant
fit4 = lm(co~NOx+N3+Nc+NOx:Vs+Nc:Vs, data=st)
summary(fit4); anova(fit3, fit4) # not significant
fit4 = lm(co~NOx+N3+Nc+N3:Vs+Nc:Vs, data=st)
summary(fit4); anova(fit3, fit4) # not significant
# 3 interactions
fit4 = lm(co~NOx+N3+Nc+NOx:Vs+Nc:Vs+N3:Vs, data=st)
summary(fit4); anova(fit3, fit4) # not significant
## VOC^(1/3) as effect modifier
# 1 interaction
fit4 = lm(co~NOx+N3+Nc+NOx:Vc, data=st)
summary(fit4); anova(fit3, fit4) # not significant
fit4 = lm(co~NOx+N3+Nc+N3:Vc, data=st)
summary(fit4); anova(fit3, fit4) # not significant
fit4 = lm(co~NOx+N3+Nc+Nc:Vc, data=st)
summary(fit4); anova(fit3, fit4) # not significant
# 2 interactions
fit4 = lm(co~NOx+N3+Nc+NOx:Vc+N3:Vc, data=st)
summary(fit4); anova(fit3, fit4) # not significant
fit4 = lm(co~NOx+N3+Nc+NOx:Vc+Nc:Vc, data=st)
summary(fit4); anova(fit3, fit4) # not significant
fit4 = lm(co~NOx+N3+Nc+N3:Vc+Nc:Vc, data=st)
summary(fit4); anova(fit3, fit4) # not significant
# 3 interactions
fit4 = lm(co~NOx+N3+Nc+NOx:Vc+Nc:Vc+N3:Vc, data=st)
summary(fit4); anova(fit3, fit4) # not significant
## log(VOC) as effect modifier
# 1 interaction
fit4 = lm(co~NOx+N3+Nc+NOx:Vl, data=st)
summary(fit4); anova(fit3, fit4) # not significant
fit4 = lm(co~NOx+N3+Nc+N3:Vl, data=st)
summary(fit4); anova(fit3, fit4) # not significant
fit4 = lm(co~NOx+N3+Nc+Nc:Vl, data=st)
summary(fit4); anova(fit3, fit4) # not significant
# 2 interactions
fit4 = lm(co~NOx+N3+Nc+NOx:Vl+N3:Vl, data=st)
summary(fit4); anova(fit3, fit4) # not significant
fit4 = lm(co~NOx+N3+Nc+NOx:Vl+Nc:Vl, data=st)
summary(fit4); anova(fit3, fit4) # not significant
fit4 = lm(co~NOx+N3+Nc+N3:Vl+Nc:Vl, data=st)
summary(fit4); anova(fit3, fit4) # not significant
# 3 interactions
fit4 = lm(co~NOx+N3+Nc+NOx:Vl+Nc:Vl+N3:Vl, data=st)
summary(fit4); anova(fit3, fit4) # not significant
### other possible models
|
19036c562e6420da1fa3e897142d0b746d0e74f9
|
8dcb923dea78fa398f185c06b5975d259a29f7c3
|
/modules/met_data/prepare_tair_rh_par_data.R
|
fbdc0af8ef67fe6ce038eb1e78bb04a78470a045
|
[] |
no_license
|
mingkaijiang/EucFACE_Carbon_Budget
|
ce69f2eb83066e08193bb81d1a0abc437b83dc8d
|
11abb2d6cd5e4121879ddecdf10ee5ba40af32ad
|
refs/heads/master
| 2020-09-03T03:43:00.823064
| 2020-01-15T02:14:51
| 2020-01-15T02:14:51
| 219,377,500
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,140
|
r
|
prepare_tair_rh_par_data.R
|
prepare_tair_rh_par_data <- function(timestep) {
#### Download the data - takes time to run
myDF <- download_tair_rh_par_data()
#### Assign ring information
myDF$Ring <- sub("FACE_R", "", myDF$Source)
myDF$Ring <- sub("_T1.*", "", myDF$Ring)
myDF$Ring <- as.numeric(myDF$Ring)
myDF <- myDF[order(myDF$DateTime),]
myDF$Month <- format(as.Date(myDF$Date), "%Y-%m")
myDF$Month <- as.Date(paste0(myDF$Month,"-1"), format = "%Y-%m-%d")
myDF$DateHour <- as.POSIXct(paste0(myDF$Date, " ", hour(myDF$DateTime), ":00:00"),format = "%Y-%m-%d %H:%M:%S")
myDF$AirTc_Avg <- as.numeric(myDF$AirTc_Avg)
myDF$RH_Avg <- as.numeric(myDF$RH_Avg)
myDF$LI190SB_PAR_Den_Avg <- as.numeric(myDF$LI190SB_PAR_Den_Avg)
### Calculate hourly mean
hDF <-aggregate(myDF[c("AirTc_Avg","RH_Avg","LI190SB_PAR_Den_Avg")],
by=myDF[c("DateHour")],
FUN=mean, na.rm = T, keep.names=T)
### Calculate daily mean
dDF <- aggregate(myDF[c("AirTc_Avg","RH_Avg","LI190SB_PAR_Den_Avg")],
by=myDF[c("Date")],
FUN=mean, na.rm=T, keep.names=T)
### Calculate monthly mean
mDF <- aggregate(myDF[c("AirTc_Avg","RH_Avg","LI190SB_PAR_Den_Avg")],
by=myDF[c("Month")],
FUN=mean, na.rm=T, keep.names=T)
### Colnames
colnames(hDF) <- c("DateHour", "AirT", "RH", "PAR")
colnames(dDF) <- c("Date", "AirT", "RH", "PAR")
colnames(mDF) <- c("Month", "AirT", "RH", "PAR")
### Air temperature from degree C to K
hDF$AirT <- hDF$AirT + 273.15
dDF$AirT <- dDF$AirT + 273.15
mDF$AirT <- mDF$AirT + 273.15
### Save data
write.csv(hDF, "R_other/tair_rh_par_data_hourly.csv", row.names=F)
write.csv(dDF, "R_other/tair_rh_par_data_daily.csv", row.names=F)
write.csv(mDF, "R_other/tair_rh_par_data_monthly.csv", row.names=F)
if (timestep=="Monthly") {
return(mDF)
} else if (timestep=="Daily") {
return(dDF)
} else if (timestep=="Hourly") {
return(hDF)
}
}
|
aed6651573253b01213e25d6526e551157f12107
|
6baba64a7bdb5879768da2302a23608be28cf3ee
|
/FormerLabMembers/Linh/sdmt_analyses/funcs_long/find_cpt.R
|
a2608976e4e850d41bda8af215f8fd646c95094e
|
[] |
no_license
|
bielekovaLab/Bielekova-Lab-Code
|
8db78141b6bebb0bbc08ea923655a676479992fa
|
369db2455344660e4012b605a4dc47c653a5c588
|
refs/heads/master
| 2023-04-20T01:01:50.556717
| 2021-05-04T12:44:37
| 2021-05-04T12:44:37
| 261,584,751
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 570
|
r
|
find_cpt.R
|
find_cpt <- function(data, patient, c_input, b0_input, b1_input) {
pat <- patient
y <- data %>% filter(PID==pat) %>% .$y
x <- data %>% filter(PID==pat) %>% .$x
mod <- nls(y~fx(x,c,b0,b1), start=c("c"= c_input,"b0"= b0_input,"b1"= b1_input),
control=nls.control(warnOnly = TRUE,minFactor = 1e-20,maxiter=1000))
out <- summary(mod)
fin <- data.frame(rep(pat, length(y)), x, y, predict(mod),
rep(round(out$coefficients[1]), length(y)))
colnames(fin) <- c("PID", "x", "y", "pred_y", "cpt")
return(fin)
}
|
2e009cfcda6c77e24f9832625f83a08f445bbf8d
|
0a5932d0914152af939fd112158ac9f73901e41e
|
/R/compare_sources.R
|
a128f2d82c79136c9753c5027ec95b6f26b8cf97
|
[] |
no_license
|
andrewcparnell/simmr
|
558b745360b13b413eb895585161c8e38e09cda2
|
8967f5d9800c20d3817cc2e8cf5195e96c6520a6
|
refs/heads/master
| 2023-08-31T10:10:27.187056
| 2023-08-21T10:13:21
| 2023-08-21T10:13:21
| 40,361,152
| 21
| 8
| null | 2023-08-01T10:53:37
| 2015-08-07T13:13:05
|
R
|
UTF-8
|
R
| false
| false
| 7,153
|
r
|
compare_sources.R
|
#' Compare dietary proportions between multiple sources
#'
#' This function takes in an object of class \code{simmr_output} and creates
#' probabilistic comparisons between the supplied sources. The group number can
#' also be specified.
#'
#' When two sources are specified, the function produces a direct calculation
#' of the probability that the dietary proportion for one source is bigger than
#' the other. When more than two sources are given, the function produces a set
#' of most likely probabilistic orderings for each combination of sources. The
#' function produces boxplots by default and also allows for the storage of the
#' output for further analysis if required.
#'
#' @param simmr_out An object of class \code{simmr_output} created from
#' \code{\link{simmr_mcmc}} or \code{\link{simmr_ffvb}}.
#' @param source_names The names of at least two sources. These should match
#' the names exactly given to \code{\link{simmr_load}}.
#' @param group The integer values of the group numbers to be compared. If not
#' specified assumes the first or only group
#' @param plot A logical value specifying whether plots should be produced or
#' not.
#'
#' @import ggplot2
#' @importFrom reshape2 "melt"
#'
#' @return If there are two sources, a vector containing the differences
#' between the two dietary proportion proportions for these two sources. If
#' there are multiple sources, a list containing the following fields:
#' \item{Ordering }{The different possible orderings of the dietary proportions
#' across sources} \item{out_all }{The dietary proportions for these sources
#' specified as columns in a matrix}
#' @author Andrew Parnell <andrew.parnell@@mu.ie>
#' @seealso See \code{\link{simmr_mcmc}} for complete examples.
#' @examples
#' \donttest{
#' data(geese_data_day1)
#' simmr_1 <- with(
#' geese_data_day1,
#' simmr_load(
#' mixtures = mixtures,
#' source_names = source_names,
#' source_means = source_means,
#' source_sds = source_sds,
#' correction_means = correction_means,
#' correction_sds = correction_sds,
#' concentration_means = concentration_means
#' )
#' )
#'
#' # Plot
#' plot(simmr_1)
#'
#' # Print
#' simmr_1
#'
#' # MCMC run
#' simmr_1_out <- simmr_mcmc(simmr_1)
#'
#' # Print it
#' print(simmr_1_out)
#'
#' # Summary
#' summary(simmr_1_out)
#' summary(simmr_1_out, type = "diagnostics")
#' summary(simmr_1_out, type = "correlations")
#' summary(simmr_1_out, type = "statistics")
#' ans <- summary(simmr_1_out, type = c("quantiles", "statistics"))
#'
#' # Plot
#' plot(simmr_1_out, type = "boxplot")
#' plot(simmr_1_out, type = "histogram")
#' plot(simmr_1_out, type = "density")
#' plot(simmr_1_out, type = "matrix")
#'
#' # Compare two sources
#' compare_sources(simmr_1_out, source_names = c("Zostera", "Grass"))
#'
#' # Compare multiple sources
#' compare_sources(simmr_1_out)
#' }
#'
#' @export
compare_sources <- function(simmr_out,
source_names = simmr_out$input$source_names,
group = 1,
plot = TRUE) {
UseMethod("compare_sources")
}
#' @export
compare_sources.simmr_output <- function(simmr_out,
source_names = simmr_out$input$source_names,
group = 1,
plot = TRUE) {
# Function to compare between sources within a group both via textual output and with boxplots
# Things to ly are:
# If two sources are given:
# - provide the probability of one group having higher dietary proportion than the other
# - give the probability distribution of the difference
# - optional boxplot of two
# If more than two sources are given:
# - provide the top most likely orderings of the sources
# An optional boxplot of the sources
# Throw an error if only one group is specified
assert_character(source_names,
min.len = 2,
any.missing = FALSE
)
assert_true(all(source_names %in% simmr_out$input$source_names))
assert_numeric(group,
len = 1,
any.missing = FALSE
)
assert_logical(plot)
# Start with two groups version
if (length(source_names) == 2) {
# Get the output for this particular source on these two groups
match_names <- match(source_names, simmr_out$input$source_names)
out_all_src_1 <- simmr_out$output[[group]]$BUGSoutput$sims.list$p[, match_names[1]]
out_all_src_2 <- simmr_out$output[[group]]$BUGSoutput$sims.list$p[, match_names[2]]
# Produce the difference between the two
out_diff <- out_all_src_1 - out_all_src_2
message("Prob ( proportion of", source_names[1], "> proportion of", source_names[2], ") =", round(mean(out_diff > 0), 3))
if (plot) {
# Stupid fix for packaging ggplot things
Source <- Proportion <- NULL
df <- data.frame(Proportion = c(out_all_src_1, out_all_src_2), Source = c(rep(source_names[1], length(out_all_src_1)), rep(source_names[2], length(out_all_src_2))))
p <- ggplot(df, aes(x = Source, y = Proportion, fill = Source)) +
geom_boxplot(alpha = 0.5, outlier.size = 0) +
theme_bw() +
theme(legend.position = "none") +
ggtitle(paste("Comparison of dietary proportions for sources", source_names[1], "and", source_names[2]))
print(p)
}
}
# Now for more sources
if (length(source_names) > 2) {
# Get the output for all the sources
match_names <- match(source_names, simmr_out$input$source_names)
out_all <- simmr_out$output[[group]]$BUGSoutput$sims.list$p[, match_names]
# Now find the ordering of each one
ordering_num <- t(apply(out_all, 1, order, decreasing = TRUE))
Ordering <- rep(NA, length = nrow(ordering_num))
for (i in 1:length(Ordering)) Ordering[i] <- paste0(source_names[ordering_num[i, ]], collapse = " > ")
if (simmr_out$input$n_groups > 1) cat("Results for group:", group, "\n")
message("Most popular orderings are as follows:\n")
tab <- t(t(sort(table(Ordering, dnn = NULL), decreasing = TRUE)))
colnames(tab) <- "Probability"
# Do not print all of it if too long
if (nrow(tab) > 30) {
print(round(tab[1:30, ] / length(Ordering), 4))
} else {
print(round(tab / length(Ordering), 4))
}
if (plot) {
# Stupid fix for packaging ggplot things
Source <- Proportion <- NULL
df <- reshape2::melt(out_all)[, 2:3]
colnames(df) <- c("Source", "Proportion")
p <- ggplot(df, aes(x = Source, y = Proportion, fill = Source)) +
scale_fill_viridis(discrete = TRUE) +
geom_boxplot(alpha = 0.5, outlier.size = 0) +
theme_bw() +
theme(legend.position = "none") +
ggtitle(paste("Comparison of dietary proportions between sources"))
print(p)
}
}
# Return output
if (length(source_names) == 2) {
if (plot) {
invisible(list(out_diff = out_diff, plot = p))
} else {
invisible(list(out_diff = out_diff))
}
} else {
if (plot) {
invisible(list(Ordering = Ordering, out_all = out_all, plot = p))
} else {
invisible(list(Ordering = Ordering, out_all = out_all))
}
}
}
|
9243ba678025e54bbcb7b7a300dc99c55b32864c
|
a206f33c8cbd90abf2f400f79b233b4e56c89f23
|
/clusterizacion/clusterizacion.R
|
0d93c7b54b47795dbc30029c3d871070dfba9b94
|
[] |
no_license
|
andresrabinovich/algoritmos-geneticos
|
60a403860fcad3932e5f18bad23a6ac9312c12f1
|
6b3923981c2f51ed735451f735dd12e1c63a0d75
|
refs/heads/master
| 2021-01-10T13:46:18.419293
| 2015-07-15T12:53:51
| 2015-07-15T12:53:51
| 36,511,094
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,023
|
r
|
clusterizacion.R
|
#TO DO
#VER COMO IMPLEMENTAR LA FUNCION SAMPLE EN C!
#///////////////
#CONFIGURACIONES
#///////////////
#---------------------------
#Configuraciones del dataset
#---------------------------
poblacion = 100;
pm = c(0.1, 0.1, 0.1); #probabilidad de mutacion
pc = 0.1; #probabilidad de single-point crossover
generaciones = 2500;
corridas = 1;
pp = 4; #Cuantos elementos de la poblacion se toman para elegir los padres
#---------------------
#Configuraciones de AG
#---------------------
dim_red = 3; #los puntos en la red no son reales, son solo los lugares alrededor de los cuales se van a armar los clusters
puntos_por_cluster = 20;
parametro_de_red = 1;
ancho_del_cluster = 0.1; #lo que mide el cluster en x
alto_del_cluster = 0.1; #lo que mide el cluster en y
k_max = 20; #Maxima cantidad de clusters a buscar
k_min = 2; #Minima cantidad de clusters a buscar
soluciones_de_elite = 4; #Las mejores soluciones pasan sin alteraciones a la proxima generacion
#Setea la semilla aleatoria para tener resultados reproducibles
set.seed(123457)
#////////////////////
#CARGADO DE LIBRERIAS
#////////////////////
library(fpc);
library(cluster);
#/////////////////////////////////
#COMIENZAN LAS FUNCIONES GENERICAS
#/////////////////////////////////
#------------------
#Funcion de fitness
#------------------
calcular_fitness <- function(puntos, cromosoma, distancia){
#Vamos a probar con el indice de Calinski-Harabasz (ch)
#return (summary(silhouette(cromosoma, distancia))$avg.width);
return (calinhara(puntos, cromosoma));
}
#-------------------
#Funcion de mutacion
#-------------------
mutar <- function(cromosoma, pm, k_max, k_min){
#Tres operadores de mutacion: mutar, mergear, splitear
longitud_cromosoma = length(cromosoma);
#Muta el cromosoma con probabilidad pm
mutaciones = runif(longitud_cromosoma);
for(posicion in 1:longitud_cromosoma){
if(mutaciones[posicion] <= pm[1]){
#Elije un locus al azar y lo cambia
#red<-c(1:k_max);
#red<-red[-cromosoma[posicion]]; #Sacamos como posibilidad que la mutacion lo deje en el mismo lugar
#cluster_anterior_a_la_mutacion = cromosoma[posicion]; #Guardamos la mutacion anterior
cromosoma[posicion] = sample(1:k_max, 1);
#Si la mutacion provoco que la solucion tenga menos clusters que el minimo, volvemos el cambio para atras
#if(length(unique(cromosoma)) < k_min) cromosoma[posicion] = cluster_anterior_a_la_mutacion;
}
}
#Mergea dos clusters dentro del cromosomas con probabilidad pm
if(runif(1) <= pm[2]){
#Cuantos clusters hay
clusters_en_cromosoma = unique(cromosoma);
#Como minimo tiene que tener k_min clusters
if(length(clusters_en_cromosoma) > k_min){
#Elije dos clusters al azar y los junta
clusters_a_mergear = sample(clusters_en_cromosoma, 2, replace=FALSE);
cromosoma[which(cromosoma==clusters_a_mergear[2])]=clusters_a_mergear[1];
}
}
#Splitea un cluster dentro del cromosomas con probabilidad pm
if(runif(1) <= pm[3]){
#Elije un cluster al azar
clusters_en_cromosoma = unique(cromosoma);
#Elije un cluster al azar y lo divide (elige en realidad dos clusters al azar,
#uno el que va a dividir, el otro el que va a usar para asignar
#a la mitad de los elementos del primero).
cluster_a_dividir = sample(clusters_en_cromosoma, 1, replace=FALSE);
elementos_del_cluster_a_dividir = which(cromosoma==cluster_a_dividir);
cluster_nuevo = sample(1:k_max, 1);
cromosoma[elementos_del_cluster_a_dividir[1:ceiling(length(elementos_del_cluster_a_dividir)/2)]]=cluster_nuevo;
}
return (cromosoma);
}
#--------------------
#Funcion de crossover
#--------------------
cruzar <- function(cromosomas_padres, pc, k_min, k_max){
#Creamos los hijos
cromosomas_hijos = cromosomas_padres;
#Hace crossover entre dos cromosomas con probabilidad pc
if(runif(1) <= pc){
#Elije un locus desde donde empezar a cruzar y los cruza
posicion = sample(1:length(cromosomas_padres[1,]), 1);
cromosomas_hijos[1, 1:posicion] = cromosomas_padres[2, 1:posicion];
cromosomas_hijos[2, 1:posicion] = cromosomas_padres[1, 1:posicion];
#Obliga a los hijos a tener al menos k_min clusters
for(i in 1:2){
clusters_a_elegir = c(1:k_max);
if(length(unique(cromosomas_hijos[i, ])) < k_min){
#Se fija cual tiene mas de dos y flipea uno
clusters_a_elegir = clusters_a_elegir[-unique(cromosomas_hijos[i, ])];
cromosomas_hijos[i, which(table(cromosomas_hijos[i, ])[2] > 2)[[1]]] = sample(clusters_a_elegir, 1);
}
}
}
return (cromosomas_hijos);
}
#-----------------------------------------------------
#Funcion que elige una pareja en funcion de su fitness
#-----------------------------------------------------
elegir_pareja <- function(fitness, pp){
#Trae una pareja pesada por su fitness (cuanto mas fitness mas probabilidad de ser elegido)
#return (sample(1:length(fitness), 2, replace=FALSE, prob=(fitness/sum(fitness))));
#Toma pp soluciones aleatoreas y nos quedamos con las dos de mejor fitness
cromosomas <- sample(1:length(fitness), pp, replace=FALSE);
pareja = c(0,0);
pareja[1] = cromosomas[which.max(fitness[cromosomas])];
fitness = fitness[-pareja[1]];
pareja[2] = cromosomas[which.max(fitness[cromosomas])];
return (pareja);
}
#---------------------------------------
#Funcion para generar el dataset inicial
#---------------------------------------
generar_dataset <- function(dim_red, puntos_por_cluster, parametro_de_red, ancho_del_clustero, alto_del_cluster){
#genero la red equiespaciada
a <- seq(1, dim_red*parametro_de_red, parametro_de_red);
red <- matrix(data=a, nrow=dim_red^2, ncol=2);
red[, 1] <- rep(a, each=dim_red);
#genero los puntos de datos alrededor de la red
puntos_en_la_red <- dim_red^2;
total_de_puntos <- puntos_en_la_red * puntos_por_cluster;
#Genero los puntos de los clusters
puntos <- matrix(0, nrow=total_de_puntos, ncol=2);
puntos[, 1] <- runif(total_de_puntos, -ancho_del_cluster, ancho_del_cluster) + rep(red[, 1], each=puntos_por_cluster);
puntos[, 2] <- runif(total_de_puntos, -alto_del_cluster, alto_del_cluster) + rep(red[, 2], each=puntos_por_cluster);
return (puntos);
}
#////////////////////
#COMIENZA EL PROGRAMA
#////////////////////
#Genera el dataset
puntos <- generar_dataset(dim_red, puntos_por_cluster, parametro_de_red, ancho_del_clustero, alto_del_cluster);
total_de_puntos = nrow(puntos);
#Matriz de distancias entre los puntos
matriz_de_disimilaridad = dist(puntos);
#Matriz en blanco que va a guardar los cromosomas de la poblacion nueva despues de cada corrida
#Cada cromosoma es una tira ordenada que asigna a cada posicion (cada punto) uno de los clusters posibles
#de la red
cromosomas_nuevos = matrix(0, ncol=total_de_puntos, nrow=poblacion);
#Matriz que guarda el fitness de cada cromosoma
fitness = matrix(0, ncol=1, nrow=poblacion);
#Cantidad de cruzas por iteracion
cruzas = c(1: as.integer(poblacion/2));
#Registro de fitness y de maximo N, con sus errores
registro_de_fitness = matrix(0, ncol=1, nrow=generaciones);
registro_de_error_fitness = matrix(0, ncol=1, nrow=generaciones);
registro_de_n = matrix(0, ncol=1, nrow=generaciones);
registro_de_error_n = matrix(0, ncol=1, nrow=generaciones);
#Fitness objetivo es el mejor fitness que se puede lograr
fitness_objetivo = calcular_fitness(puntos, rep(c(1:dim_red^2), each=puntos_por_cluster), matriz_de_disimilaridad);
#Arranca el reloj para medir el tiempo de ejecucion
comienzo_de_reloj <- proc.time()
#Comienzan las corridas
for(corrida in 1:corridas){
#Genera los cromosomas al azar de la corrida, entre 1 y la cantidad de puntos de la red
cromosomas = matrix(sample(1:k_max, poblacion*total_de_puntos, replace=TRUE), ncol=total_de_puntos);
#Generando las generaciones
for(generacion in 1:generaciones){
#Calcula el fitness de los cromosomas
for(cromosoma in 1:poblacion){
fitness[cromosoma] = calcular_fitness(puntos, cromosomas[cromosoma, ], matriz_de_disimilaridad);
}
registro_de_fitness[generacion] = mean(fitness);
registro_de_error_fitness[generacion] = sd(fitness)
#Las soluciones con mejor fitness pasan inalteradas
if(soluciones_de_elite){
indice_mejores_soluciones = sort(fitness, index.return=TRUE)$ix[length(fitness):(length(fitness)-soluciones_de_elite + 1)];
mejores_soluciones = cromosomas[indice_mejores_soluciones, ];
}
if(generacion%%1 == 0){
ibestf<-which.max(fitness)
nn <- apply(cromosomas,1,function(x){ return(length(unique(x)))})
cat(paste("generacion:",generacion,"- fitness mean:sd:max:optimo",
signif(mean(fitness),2),signif(sd(fitness),2),
signif(fitness[ibestf],2),
signif(fitness_objetivo, 2),
"\n"))
cat(paste(" - N mean:sd:max",
mean(nn),sd(nn),nn[ibestf],"\n\n"))
registro_de_n[generacion] = nn[ibestf];
registro_de_error_n[generacion] = sd(nn);
}
if(generacion == 10){
cat(paste("Tiempo estimado de ejecucion: ",((proc.time() - comienzo_de_reloj)[1]/10*generaciones),"\n"));
}
#Cruza los cromosomas de acuerdo a su fitness. Cuanto mas fitness mas probabilidad de cruza.
#Elige poblacion/2 parejas
pareja_actual = 1; #Indice de la nueva pareja en cada cruza, es interno a este bucle
for(cruza in cruzas){
#Elige la pareja a cruzar
pareja = elegir_pareja(fitness, pp);
#La cruza y genera dos hijos
hijos = cruzar(cromosomas[pareja, ], pc, k_min, k_max);
#Asigna a la nueva poblacion los dos hijos
cromosomas_nuevos[pareja_actual, ] = hijos[1, ];
cromosomas_nuevos[pareja_actual+1, ] = hijos[2, ];
#Agrega dos al indice de nuevas parejas
pareja_actual = pareja_actual + 2;
}
#Asignamos la nueva poblacion como la poblacion actual
cromosomas = cromosomas_nuevos;
#Mutamos los nuevos cromosomas
for(cromosoma in 1:poblacion){
cromosomas[cromosoma, ] = mutar(cromosomas[cromosoma, ], pm, k_max, k_min);
}
#Descartamos los cambios a las soluciones de elite
if(soluciones_de_elite) {
cromosomas[indice_mejores_soluciones, ] = mejores_soluciones;
}
}
}
#Imprime lo que tardo en ejecutar el algoritmo
print(proc.time() - comienzo_de_reloj);
#Muestra los mejores fitness
graphics.off();
soluciones_buenas = which(fitness == max(fitness));
#ibestf<-soluciones_buenas[which(apply(cromosomas[soluciones_buenas, ], 1, function(x){length(unique(x))}) == (dim_red^2))[1]];
ibestf<-soluciones_buenas[1];
plot(puntos[,1],puntos[,2]);
points(puntos[,1],puntos[,2],col=rainbow(length(unique(cromosomas[ibestf, ])))[cromosomas[ibestf, ]],pch=20);
dev.new();
plot(silhouette(cromosomas[ibestf, ], matriz_de_disimilaridad));
x = 1:generaciones;
dev.new()
plot(x, registro_de_fitness);
arrows(x, registro_de_fitness-registro_de_error_fitness, x, registro_de_fitness+registro_de_error_fitness, length=0.05, angle=90, code=3)
dev.new()
plot(x, registro_de_n);
#Con esto grafica barras de error
arrows(x, registro_de_n-registro_de_error_n, x, registro_de_n+registro_de_error_n, length=0.05, angle=90, code=3)
|
8ef100fa494ee797aea518908f34eb134ebfb440
|
0c55f047f3a80bb94c6a7ad050c9c44e60a73fb9
|
/LogisticGrowth/remove_model_failures.R
|
1e0043d87687ae9173d9e38152bd324db5f881a9
|
[] |
no_license
|
Tuc-Nguyen/HLF-Robot-Image-Analysis-2.1
|
3d2ab9476656ab9d2a4518512bb34c23da39381c
|
bdb652a1169fb2f66117ddda971e8fabe07dd037
|
refs/heads/master
| 2022-04-14T06:01:16.323714
| 2020-03-26T16:36:22
| 2020-03-26T16:36:22
| 250,310,692
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 866
|
r
|
remove_model_failures.R
|
require(ggplot2)
modeled_file = "logistic_growth.tab"
classes = c("numeric","numeric","character","factor",
#Row Col Name Media
"factor","factor", "factor", "character",
# Temp Array(1-6) Condition Well
"character","character","character", "character")
# CC R MinSize Corr
modeled_df = read.table(modeled_file,header=T, colClasses=classes,sep="\t")
modeled_df = subset(modeled_df, modeled_df$CC != "OMIT")
##Now that "OMITS" are removed
#T#he vectors can be turned into numeric format
modeled_df$CC = as.numeric(modeled_df$CC)
modeled_df$R = as.numeric(modeled_df$R)
modeled_df$MinSize = as.numeric(modeled_df$MinSize)
modeled_df$Corr = as.numeric(modeled_df$Corr)
write.table(modeled_df, "logistic_growth.omit_errors.tab",sep="\t",row.names = F)
|
22857fe5f4beadf3e2af0f6eb99605a657cadcfc
|
9d8b86b2a20d5fd3c31a3bce56e7f52312187be1
|
/R/z.score.R
|
0fab959d7a188948c3ddf463efb0479efdc00b15
|
[] |
no_license
|
hms-dbmi/Rcupcake
|
d4141be5394de83340f476392defa11477fda1ee
|
2f87f7c771ceb0da7813a90529c973e1f028b6e8
|
refs/heads/master
| 2022-01-17T06:03:15.038438
| 2019-07-02T23:44:11
| 2019-07-02T23:44:11
| 81,849,992
| 2
| 5
| null | 2018-04-06T15:36:32
| 2017-02-13T17:08:40
|
HTML
|
UTF-8
|
R
| false
| false
| 6,390
|
r
|
z.score.R
|
#' Transform continuous in categorical variables and generates a new \code{cupcakeData} object.
#'
#' Given an object of class \code{cupcakeData}, it transforms continuous into categorical variable
#' applying Z-score. As a result a new \code{cupcakeData} object is generated. Note that if the number
#' of individuals is lower than 5000 a Saphiro test is done to test the normal distribution, otherwise
#' a Kolmogorov-Smirnov test is performed.
#'
#'
#' @param input Object of \code{cupcakeData} class.
#' @param nfactor By default 10. Change it into other number if you consider there is any
#' categorical variable with more than nfactor values.
#' @param zscoreCutOff Z-score cut-off to categorize the continuous variable. By default it is set
#' to -2 and 2.
#' @param verbose By default \code{FALSE}. Change it to \code{TRUE} to get an
#' on-time log from the function.
#' @return A \code{cupcakeData} class object with the continuous variable transformed into a categorical
#' variable, if possible.
#' @examples
#' load(system.file("extdata", "RcupcakeExData.RData", package="Rcupcake"))
#' z.score( input = RcupcakeExData,
#' verbose = FALSE
#' )
#' @export z.score
z.score <- function( input, zscoreCutOff = c(-2, 2), nfactor = 10, verbose = FALSE ){
if( verbose == TRUE){
message("Checking the input object")
}
checkClass <- class(input)[1]
if(checkClass != "cupcakeData"){
message("Check the input object. Remember that this
object must be obtained after applying the queryPheno
function to your input file. The input object class must
be:\"cupcakeData\"")
stop()
}
tt <- input@iresult
ph <- input@phenotypes
for( i in 1:nrow( ph )){
pcolumn <- which(colnames(tt) == as.character(ph[i,1]))
if( length( unique( tt[,pcolumn])) <= nfactor){
if( verbose == TRUE){
message( as.character(ph$variable[i]), " phenotype is considered as a categorical variable")
message( "Z-score will not be applied to ", as.character(ph$variable[i]), " variable")
}
}else{
if( verbose == TRUE){
message( as.character(ph$variable[i]), " phenotype is considered as a continuous variable")
message("Checking is the variable follows a normal distribution")
}
if( nrow( tt ) < 5000 ){
normalDist <- shapiro.test(as.numeric(tt[,pcolumn]))
}else{
normalDist <- ks.test(x=rnorm(as.numeric(tt[,pcolumn])),y='pnorm',alternative='two.sided')
}
if( normalDist$p.value < 0.05){
if( verbose == TRUE){
message("Z-score will be estimated for this variable")
}
selection <- tt[! is.na(tt$Age),]
selection <- selection[! is.na( selection[,pcolumn]),]
if( verbose == TRUE){
message("Checking if there is correlation between age and ", as.character(ph$variable[i]))
}
correlationsTest <- cor.test(as.numeric(selection[, pcolumn]), as.numeric(selection$Age))
if( correlationsTest$p.value < 0.05){
if( verbose == TRUE){
message("There is a correlation between ", colnames(selection)[pcolumn],
" variable and age.")
message("Fitting linear model")
}
lm1<- lm(as.numeric(selection[, pcolumn]) ~ as.numeric(selection$Age), data= selection)
selection$lm1 <- lm1$residuals
contVariable <- selection$lm1
pcolumn <- which(colnames(selection) == "lm1")
}else{
if( verbose == TRUE){
message("There is not a correlation between ", colnames(selection)[pcolumn],
" variable and age.")
}
contVariable <- as.numeric(selection[, pcolumn])
}
#2. population parameter calculations
pop_sd <- sd(contVariable, na.rm = TRUE)*sqrt((length(contVariable)-1)/(length(contVariable)))
pop_mean <- mean(contVariable, na.rm = TRUE)
selection$zScore <- NA
selection$zScoreCat <- NA
for( z in 1:nrow( selection )){
if(! is.na(selection[z,pcolumn])){
selection$zScore[z] <- ( as.numeric(selection[z,pcolumn]) - pop_mean) / pop_sd
if( selection$zScore[z] <= zscoreCutOff[1]){
selection$zScoreCat[z] <- "under"
}else if( selection$zScore[z] >= zscoreCutOff[2]){
selection$zScoreCat[z] <- "over"
}else{
selection$zScoreCat[z] <- "normal"
}
}
}
for( j in 1:nrow(tt)){
if( tt$patient_id[j] %in% selection$patient_id){
tt[j,pcolumn-1] <- selection[selection$patient_id == tt$patient_id[j], "zScoreCat"]
}else{
tt[j,pcolumn-1] <- NA
}
}
}
else{
if( verbose == TRUE){
message("The variable ", as.character(ph$variable[i]), " does not follow a normal distribution")
message("Z-score will not be estimated for this variable")
}
}
}
}
input@iresult <- tt
return( input )
}
|
27ad7a4ec2260a14cc4095ce803ce30e8367ada3
|
257cc65928167620b1d10ca750cd71ddac0452c5
|
/SoundMetric_QDA.R
|
a6e22389bce9a2d974ee880d97ef2a4f60da686c
|
[] |
no_license
|
kbellisa/MIR-sound-ecology
|
f9ab74230b4f4cac79a38063b23451fcb9d497d7
|
aa94e088a89db786889b397673b60f11b3bbce4d
|
refs/heads/main
| 2023-05-05T22:02:25.553297
| 2021-06-03T15:59:10
| 2021-06-03T15:59:10
| 373,556,602
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,898
|
r
|
SoundMetric_QDA.R
|
##################################################################################
# MIR Study (no removal of outliers)
# USES linear and quadratic discriminant analysis
# Research question? Can we use spectral features to determine soundscape class?
# Kristen Bellisario
# with additional help from Zhao Zhao, Cristan Graupe, and Jack VanSchaik
##################################################################################
# Required Packages
library(rgl)
library(ggplot2)
library(colorspace)
library(vegan) #adonis
library(MASS) #lda
library(gplots) #heatmap
#180 sound files with 23ms and 3s window lengths (note -- label states 1s and was labeled incorrectly)
d.1 <- read.csv("TestFeatures_180.csv", header=T)
rownames(d.1) <- d.1[,1]
d.s.1 <- data.frame(cbind(d.1[,1], scale(d.1[,3:12])))
rownames(d.s.1) <- d.s.1[,1]
#COMPLETE FEATURE GROUPS FOR EACH FRAME - only running LDA on 3s model [optimized sound recording length] // including outliers
model_1.t <- cbind(d.s.1[,1], d.s.1[,c(2,4,6,8,10)])
names(model_1.t) <- c("ID", "Centroid1", "Skew1", "Slope1", "Spread1", "Var1")
#KMEANS PREDICTORS - sorted by soundfile
# each observation has three predictions when overlapping classes
# goal is to find out which class is dominant in single class membership
# note 3e is an internal naming convention for dataset group
p_set.t <- read.csv("test_data_v2.csv", header=T)
pred_p.t <- p_set.t[,c(1,3)]
factors3e.1 <- as.factor(pred_p.t[,2]) #factors 1, 3, 4, 5, 6, 7
full.3e.1.p1 <- cbind(factors3e.1, model_1.t[,c(3,5,6)])
########## NEED SPECIES FACTORS / SPECIES COUNTS
#species factors
factors3e.1
## species counts
#spc.3e.23.p1 <- full.3e.23.p1[,-1]
spc.3e.1.p1 <- full.3e.1.p1[,-1]
########### MODEL: spc.3e
class.adon <- adonis(spc.3e.1.p1 ~ factors3e.1, method="gower", data=full.3e.1.p1,
control=permControl(strata=factors3e.1), permutations=999, by="terms")
###### PERMUTATION TEST
adon.disp <- betadisper(vegdist(spc.3e.1.p1, method="gower"), factors3e.1)
boxplot(adon.disp, col="blue", main="Beta Dispersion sp.3e.3.p1", cex.main =1, cex.axis=.8, cex.lab=0.8)
###########LDA
disc.class3e.1.p1 <- lda(spc.3e.1.p1, factors3e.1) #raw
###########LDA FOR CONFUSION MATRIX / CLASS ASSESSMENT
#use quadratic discriminant analysis with outliers included
#jackknife cross validation / used for predictive value
disc.class3e.1.p1 <- qda(spc.3e.1.p1, factors3e.1, CV=T) #raw
############CONFUSION MATRIX
assess3 <- table(factors3e.1, disc.class3e.1.p1$class) #
diag(prop.table(assess3,1))
sum(diag(prop.table(assess3)))
colnames(assess3) <- c("1","3","4","5","6","7")
rownames(assess3) <- c("1","3","4","5","6","7")
heatmap.2(assess3, dendrogram="none", trace="none", key=F, srtCol=0, Rowv=F,
Colv=FALSE, cexRow = .7, cexCol = .7, cellnote=assess3, notecol="black",
col=topo.colors(50), add=T, main="Results")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.